edited_code stringlengths 17 978k | original_code stringlengths 17 978k |
|---|---|
from __future__ import unicode_literals
import os
import asyncio
import subprocess
import youtube_dl
from Python_ARQ import ARQ
from pytgcalls import GroupCall
from sys import version as pyver
from pyrogram import Client, filters
from misc import HELP_TEXT, START_TEXT, REPO_TEXT
from functions import (
transcode,
download_and_transcode_song,
convert_seconds,
time_to_seconds,
generate_cover,
generate_cover_square,
)
# TODO Make it look less messed up
is_config = os.path.exists("config.py")
if is_config:
from config import (
API_ID, API_HASH,
SUDO_CHAT_ID,
SUDOERS, ARQ_API, HEROKU
)
elif not is_config:
from sample_config import (
API_ID, API_HASH,
SUDO_CHAT_ID,
SUDOERS, ARQ_API, HEROKU
)
if HEROKU:
if is_config:
from config import SESSION_STRING
elif not is_config:
from sample_config import SESSION_STRING
queue = [] # This is where the whole song queue is stored
playing = False # Tells if something is playing or not
# Pyrogram Client
if not HEROKU:
app = Client("tgvc", api_id=API_ID, api_hash=API_HASH)
else:
app = Client(SESSION_STRING, api_id=API_ID, api_hash=API_HASH)
# Pytgcalls Client
vc = GroupCall(
client=app,
input_filename="input.raw",
play_on_repeat=True,
enable_logs_to_console=False,
)
# Arq Client
arq = ARQ(ARQ_API)
async def delete(message):
await asyncio.sleep(10)
await message.delete()
@app.on_message(filters.command("start") & filters.user(SUDOERS))
async def start(_, message):
await send(START_TEXT)
@app.on_message(filters.command("help") & filters.user(SUDOERS))
async def help(_, message):
await send(HELP_TEXT)
@app.on_message(filters.command("repo") & filters.user(SUDOERS))
async def repo(_, message):
await send(REPO_TEXT)
@app.on_message(filters.command("joinvc") & filters.user(SUDOERS))
async def joinvc(_, message):
try:
if vc.is_connected:
await send("__**Allaqachon ovozli chatdaman.**__")
return
chat_id = message.chat.id
await vc.start(chat_id)
await send("__**Ovozli chatga qo'shildim.**__")
except Exception as e:
print(str(e))
await send(str(e))
@app.on_message(filters.command("rejoinvc") & filters.user(SUDOERS))
async def joinvc(_, message):
try:
if vc.is_connected:
await send("__**Allaqachon ovozli chatdaman.**__")
return
chat_id = message.chat.id
await vc.reconnect()
await send("__**Ovozli chatga qo'shildim.**__")
except Exception as e:
print(str(e))
await send(str(e))
@app.on_message(filters.command("leavevc") & filters.user(SUDOERS))
async def leavevc(_, message):
if not vc.is_connected:
await send("__**Ovozli chatdan allaqachon chiqib ketganman.**__")
return
await vc.leave_current_group_call()
await vc.stop()
await send("__**Ovozli chatni tark etdim , yangilanish....**__")
os.execvp(
f"python{str(pyver.split(" ")[0])[:3]}",
[f"python{str(pyver.split(" ")[0])[:3]}", "main.py"],
)
@app.on_message(filters.command("update") & filters.user(SUDOERS))
async def update_restart(_, message):
await send(
f'```{subprocess.check_output(['git', 'pull']).decode('UTF-8')}```'
)
os.execvp(
f"python{str(pyver.split(" ")[0])[:3]}",
[f"python{str(pyver.split(" ")[0])[:3]}", "main.py"],
)
@app.on_message(filters.command("pause") & filters.user(SUDOERS))
async def pause_song(_, message):
vc.pause_playout()
await send("**To'xtatildi, davom ettirish uchun /resume buyrug'ini bering.**")
@app.on_message(filters.command("resume") & filters.chat(SUDO_CHAT_ID))
async def resume_song(_, message):
vc.resume_playout()
await send("**Davom etmoqda, to'xtatish uchun /pause buyrug'ini bering.**")
@app.on_message(filters.command("volume") & filters.user(SUDOERS))
async def volume_bot(_, message):
usage = "**Ishlatish uchun:**\n/volume [1-200] yozing"
if len(message.command) != 2:
await send(usage)
return
volume = int(message.text.split(None, 1)[1])
if (volume < 1) or (volume > 200):
await send(usage)
return
try:
await vc.set_my_volume(volume=volume)
except ValueError:
await send(usage)
return
await send(f"**Volume Set To {volume}**")
@app.on_message(filters.command("play") & filters.chat(SUDO_CHAT_ID))
async def queuer(_, message):
usage = "**Usage:**\n__**/play youtube Qo'shiq_Nomi**__"
if len(message.command) < 3:
await send(usage)
return
text = message.text.split(None, 2)[1:]
service = text[0].lower()
song_name = text[1]
requested_by = message.from_user.first_name
services = ["youtube", "deezer", "saavn"]
if service not in services:
await send(usage)
return
if len(queue) > 0:
await message.delete()
await send("__**Navbatga qo'shdim.__**")
queue.append(
{
"service": service,
"song": song_name,
"requested_by": requested_by,
}
)
await play()
return
await message.delete()
queue.append(
{
"service": service,
"song": song_name,
"requested_by": requested_by,
}
)
await play()
@app.on_message(
filters.command("skip") & filters.user(SUDOERS) & ~filters.edited
)
async def skip(_, message):
global playing
if len(queue) == 0:
await send("__**Navbat bo'm-bo'sh.**__")
return
playing = False
await send("__**Keyingisiga o'tkazildi!**__")
await play()
@app.on_message(filters.command("queue") & filters.chat(SUDO_CHAT_ID))
async def queue_list(_, message):
if len(queue) != 0:
i = 1
text = ""
for song in queue:
text += f"**{i}. Platforma:** __**{song["service"]}**__ " \
+ f"| **Musiqa:** __**{song["song"]}**__\n"
i += 1
m = await send(text)
await delete(message)
await m.delete()
else:
m = await send("__**Navbatda musiqa yo'q.**__")
await delete(message)
await m.delete()
# Queue handler
async def play():
global queue, playing
while not playing:
await asyncio.sleep(2)
if len(queue) != 0:
service = queue[0]["service"]
song = queue[0]["song"]
requested_by = queue[0]["requested_by"]
if service == "youtube":
playing = True
del queue[0]
try:
await ytplay(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
elif service == "saavn":
playing = True
del queue[0]
try:
await jiosaavn(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
elif service == "deezer":
playing = True
del queue[0]
try:
await deezer(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
# Deezer----------------------------------------------------------------------------------------
async def deezer(requested_by, query):
global playing
m = await send(f"__**Searching for {query} on Deezer.**__")
try:
songs = await arq.deezer(query, 1)
title = songs[0].title
duration = convert_seconds(int(songs[0].duration))
thumbnail = songs[0].thumbnail
artist = songs[0].artist
url = songs[0].url
except Exception:
await m.edit("__**Found No Song Matching Your Query.**__")
playing = False
return
await m.edit("__**Generating Thumbnail.**__")
await generate_cover_square(
requested_by, title, artist, duration, thumbnail
)
await m.edit("__**Downloading And Transcoding.**__")
await download_and_transcode_song(url)
await m.delete()
caption = f"🏷 **Name:** [{title[:35]}]({url})\n⏳ **Duration:** {duration}\n" \
+ f"🎧 **Requested By:** {requested_by}\n📡 **Platform:** Deezer"
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
photo="final.png",
caption=caption,
)
os.remove("final.png")
await asyncio.sleep(int(songs[0]["duration"]))
await m.delete()
playing = False
# Jiosaavn--------------------------------------------------------------------------------------
async def jiosaavn(requested_by, query):
global playing
m = await send(f"__**Searching for {query} on JioSaavn.**__")
try:
songs = await arq.saavn(query)
sname = songs[0].song
slink = songs[0].media_url
ssingers = songs[0].singers
sthumb = songs[0].image
sduration = songs[0].duration
sduration_converted = convert_seconds(int(sduration))
except Exception as e:
await m.edit("__**Found No Song Matching Your Query.**__")
print(str(e))
playing = False
return
await m.edit("__**Processing Thumbnail.**__")
await generate_cover_square(
requested_by, sname, ssingers, sduration_converted, sthumb
)
await m.edit("__**Downloading And Transcoding.**__")
await download_and_transcode_song(slink)
await m.delete()
caption = f"🏷 **Name:** {sname[:35]}\n⏳ **Duration:** {sduration_converted}\n" \
+ f"🎧 **Requested By:** {requested_by}\n📡 **Platform:** JioSaavn"
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
caption=caption,
photo="final.png",
)
os.remove("final.png")
await asyncio.sleep(int(sduration))
await m.delete()
playing = False
# Youtube Play-----------------------------------------------------
async def ytplay(requested_by, query):
global playing
ydl_opts = {"format": "bestaudio"}
m = await send(f"__**{query} YouTubedan izlanmoqda.**__")
try:
results = await arq.youtube(query)
link = f"https://youtube.com{results[0].url_suffix}"
title = results[0].title
thumbnail = results[0].thumbnails[0]
duration = results[0].duration
views = results[0].views
await app.update_profile(first_name=f"🔉{title} ",bio = f"{title} ijro etilmoqda")
if time_to_seconds(duration) >= 1800:
await m.edit("__**Yo'q, faqat 30 daqiqadan oshmagan musiqalar mumkin.**__")
playing = False
return
except Exception as e:
await m.edit("__**Siz izlagan musiqa topilmadi.**__")
playing = False
print(str(e))
return
await m.edit("__**1 soniya.**__")
await generate_cover(requested_by, title, views, duration, thumbnail)
await m.edit("__**yuklanmoqda ....**__")
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
await m.edit("__**1 soniya.**__")
os.rename(audio_file, "audio.webm")
transcode("audio.webm")
await m.delete()
caption = f"🏷 **Nomi:** [{title[:35]}]({link})\n⏳ **Davomiyligi:** {duration}\n" \
+ f"🎧 {requested_by} **tomonidan ijro etildi**\n📡 **Platforma:** YouTube"
await app.set_profile_photo(photo="final.png")
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
caption=caption,
photo="final.png",
)
os.remove("final.png")
await asyncio.sleep(int(time_to_seconds(duration)))
playing = False
await m.delete()
# Telegram Audio------------------------------------
@app.on_message(
filters.command("telegram") & filters.user(SUDOERS) & ~filters.edited
)
async def tgplay(_, message):
global playing
if len(queue) != 0:
await send("__**You Can Only Play Telegram Files After The Queue Gets "
+ "Finished.**__")
return
if not message.reply_to_message:
await send("__**Reply to an audio.**__")
return
if message.reply_to_message.audio:
if int(message.reply_to_message.audio.file_size) >= 104857600:
await send("__**Bruh! Only songs within 100 MB.**__")
playing = False
return
duration = message.reply_to_message.audio.duration
if not duration:
await send("__**Only Songs With Duration Are Supported.**__")
return
m = await send("__**Downloading.**__")
song = await message.reply_to_message.download()
await m.edit("__**Transcoding.**__")
transcode(song)
await m.edit(f"**Playing** __**{message.reply_to_message.link}.**__")
await asyncio.sleep(duration)
playing = False
return
await send("__**Only Audio Files (Not Document) Are Supported.**__")
async def send(text):
m = await app.send_message(
SUDO_CHAT_ID, text=text, disable_web_page_preview=True
)
return m
print(
"\nBot Starting..."
)
app.run()
| from __future__ import unicode_literals
import os
import asyncio
import subprocess
import youtube_dl
from Python_ARQ import ARQ
from pytgcalls import GroupCall
from sys import version as pyver
from pyrogram import Client, filters
from misc import HELP_TEXT, START_TEXT, REPO_TEXT
from functions import (
transcode,
download_and_transcode_song,
convert_seconds,
time_to_seconds,
generate_cover,
generate_cover_square,
)
# TODO Make it look less messed up
is_config = os.path.exists("config.py")
if is_config:
from config import (
API_ID, API_HASH,
SUDO_CHAT_ID,
SUDOERS, ARQ_API, HEROKU
)
elif not is_config:
from sample_config import (
API_ID, API_HASH,
SUDO_CHAT_ID,
SUDOERS, ARQ_API, HEROKU
)
if HEROKU:
if is_config:
from config import SESSION_STRING
elif not is_config:
from sample_config import SESSION_STRING
queue = [] # This is where the whole song queue is stored
playing = False # Tells if something is playing or not
# Pyrogram Client
if not HEROKU:
app = Client("tgvc", api_id=API_ID, api_hash=API_HASH)
else:
app = Client(SESSION_STRING, api_id=API_ID, api_hash=API_HASH)
# Pytgcalls Client
vc = GroupCall(
client=app,
input_filename="input.raw",
play_on_repeat=True,
enable_logs_to_console=False,
)
# Arq Client
arq = ARQ(ARQ_API)
async def delete(message):
await asyncio.sleep(10)
await message.delete()
@app.on_message(filters.command("start") & filters.user(SUDOERS))
async def start(_, message):
await send(START_TEXT)
@app.on_message(filters.command("help") & filters.user(SUDOERS))
async def help(_, message):
await send(HELP_TEXT)
@app.on_message(filters.command("repo") & filters.user(SUDOERS))
async def repo(_, message):
await send(REPO_TEXT)
@app.on_message(filters.command("joinvc") & filters.user(SUDOERS))
async def joinvc(_, message):
try:
if vc.is_connected:
await send("__**Allaqachon ovozli chatdaman.**__")
return
chat_id = message.chat.id
await vc.start(chat_id)
await send("__**Ovozli chatga qo'shildim.**__")
except Exception as e:
print(str(e))
await send(str(e))
@app.on_message(filters.command("rejoinvc") & filters.user(SUDOERS))
async def joinvc(_, message):
try:
if vc.is_connected:
await send("__**Allaqachon ovozli chatdaman.**__")
return
chat_id = message.chat.id
await vc.reconnect()
await send("__**Ovozli chatga qo'shildim.**__")
except Exception as e:
print(str(e))
await send(str(e))
@app.on_message(filters.command("leavevc") & filters.user(SUDOERS))
async def leavevc(_, message):
if not vc.is_connected:
await send("__**Ovozli chatdan allaqachon chiqib ketganman.**__")
return
await vc.leave_current_group_call()
await vc.stop()
await send("__**Ovozli chatni tark etdim , yangilanish....**__")
os.execvp(
f"python{str(pyver.split(' ')[0])[:3]}",
[f"python{str(pyver.split(' ')[0])[:3]}", "main.py"],
)
@app.on_message(filters.command("update") & filters.user(SUDOERS))
async def update_restart(_, message):
await send(
f'```{subprocess.check_output(["git", "pull"]).decode("UTF-8")}```'
)
os.execvp(
f"python{str(pyver.split(' ')[0])[:3]}",
[f"python{str(pyver.split(' ')[0])[:3]}", "main.py"],
)
@app.on_message(filters.command("pause") & filters.user(SUDOERS))
async def pause_song(_, message):
vc.pause_playout()
await send("**To'xtatildi, davom ettirish uchun /resume buyrug'ini bering.**")
@app.on_message(filters.command("resume") & filters.chat(SUDO_CHAT_ID))
async def resume_song(_, message):
vc.resume_playout()
await send("**Davom etmoqda, to'xtatish uchun /pause buyrug'ini bering.**")
@app.on_message(filters.command("volume") & filters.user(SUDOERS))
async def volume_bot(_, message):
usage = "**Ishlatish uchun:**\n/volume [1-200] yozing"
if len(message.command) != 2:
await send(usage)
return
volume = int(message.text.split(None, 1)[1])
if (volume < 1) or (volume > 200):
await send(usage)
return
try:
await vc.set_my_volume(volume=volume)
except ValueError:
await send(usage)
return
await send(f"**Volume Set To {volume}**")
@app.on_message(filters.command("play") & filters.chat(SUDO_CHAT_ID))
async def queuer(_, message):
usage = "**Usage:**\n__**/play youtube Qo'shiq_Nomi**__"
if len(message.command) < 3:
await send(usage)
return
text = message.text.split(None, 2)[1:]
service = text[0].lower()
song_name = text[1]
requested_by = message.from_user.first_name
services = ["youtube", "deezer", "saavn"]
if service not in services:
await send(usage)
return
if len(queue) > 0:
await message.delete()
await send("__**Navbatga qo'shdim.__**")
queue.append(
{
"service": service,
"song": song_name,
"requested_by": requested_by,
}
)
await play()
return
await message.delete()
queue.append(
{
"service": service,
"song": song_name,
"requested_by": requested_by,
}
)
await play()
@app.on_message(
filters.command("skip") & filters.user(SUDOERS) & ~filters.edited
)
async def skip(_, message):
global playing
if len(queue) == 0:
await send("__**Navbat bo'm-bo'sh.**__")
return
playing = False
await send("__**Keyingisiga o'tkazildi!**__")
await play()
@app.on_message(filters.command("queue") & filters.chat(SUDO_CHAT_ID))
async def queue_list(_, message):
if len(queue) != 0:
i = 1
text = ""
for song in queue:
text += f"**{i}. Platforma:** __**{song['service']}**__ " \
+ f"| **Musiqa:** __**{song['song']}**__\n"
i += 1
m = await send(text)
await delete(message)
await m.delete()
else:
m = await send("__**Navbatda musiqa yo'q.**__")
await delete(message)
await m.delete()
# Queue handler
async def play():
global queue, playing
while not playing:
await asyncio.sleep(2)
if len(queue) != 0:
service = queue[0]["service"]
song = queue[0]["song"]
requested_by = queue[0]["requested_by"]
if service == "youtube":
playing = True
del queue[0]
try:
await ytplay(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
elif service == "saavn":
playing = True
del queue[0]
try:
await jiosaavn(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
elif service == "deezer":
playing = True
del queue[0]
try:
await deezer(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
# Deezer----------------------------------------------------------------------------------------
async def deezer(requested_by, query):
global playing
m = await send(f"__**Searching for {query} on Deezer.**__")
try:
songs = await arq.deezer(query, 1)
title = songs[0].title
duration = convert_seconds(int(songs[0].duration))
thumbnail = songs[0].thumbnail
artist = songs[0].artist
url = songs[0].url
except Exception:
await m.edit("__**Found No Song Matching Your Query.**__")
playing = False
return
await m.edit("__**Generating Thumbnail.**__")
await generate_cover_square(
requested_by, title, artist, duration, thumbnail
)
await m.edit("__**Downloading And Transcoding.**__")
await download_and_transcode_song(url)
await m.delete()
caption = f"🏷 **Name:** [{title[:35]}]({url})\n⏳ **Duration:** {duration}\n" \
+ f"🎧 **Requested By:** {requested_by}\n📡 **Platform:** Deezer"
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
photo="final.png",
caption=caption,
)
os.remove("final.png")
await asyncio.sleep(int(songs[0]["duration"]))
await m.delete()
playing = False
# Jiosaavn--------------------------------------------------------------------------------------
async def jiosaavn(requested_by, query):
global playing
m = await send(f"__**Searching for {query} on JioSaavn.**__")
try:
songs = await arq.saavn(query)
sname = songs[0].song
slink = songs[0].media_url
ssingers = songs[0].singers
sthumb = songs[0].image
sduration = songs[0].duration
sduration_converted = convert_seconds(int(sduration))
except Exception as e:
await m.edit("__**Found No Song Matching Your Query.**__")
print(str(e))
playing = False
return
await m.edit("__**Processing Thumbnail.**__")
await generate_cover_square(
requested_by, sname, ssingers, sduration_converted, sthumb
)
await m.edit("__**Downloading And Transcoding.**__")
await download_and_transcode_song(slink)
await m.delete()
caption = f"🏷 **Name:** {sname[:35]}\n⏳ **Duration:** {sduration_converted}\n" \
+ f"🎧 **Requested By:** {requested_by}\n📡 **Platform:** JioSaavn"
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
caption=caption,
photo="final.png",
)
os.remove("final.png")
await asyncio.sleep(int(sduration))
await m.delete()
playing = False
# Youtube Play-----------------------------------------------------
async def ytplay(requested_by, query):
global playing
ydl_opts = {"format": "bestaudio"}
m = await send(f"__**{query} YouTubedan izlanmoqda.**__")
try:
results = await arq.youtube(query)
link = f"https://youtube.com{results[0].url_suffix}"
title = results[0].title
thumbnail = results[0].thumbnails[0]
duration = results[0].duration
views = results[0].views
await app.update_profile(first_name=f"🔉{title} ",bio = f"{title} ijro etilmoqda")
if time_to_seconds(duration) >= 1800:
await m.edit("__**Yo'q, faqat 30 daqiqadan oshmagan musiqalar mumkin.**__")
playing = False
return
except Exception as e:
await m.edit("__**Siz izlagan musiqa topilmadi.**__")
playing = False
print(str(e))
return
await m.edit("__**1 soniya.**__")
await generate_cover(requested_by, title, views, duration, thumbnail)
await m.edit("__**yuklanmoqda ....**__")
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
await m.edit("__**1 soniya.**__")
os.rename(audio_file, "audio.webm")
transcode("audio.webm")
await m.delete()
caption = f"🏷 **Nomi:** [{title[:35]}]({link})\n⏳ **Davomiyligi:** {duration}\n" \
+ f"🎧 {requested_by} **tomonidan ijro etildi**\n📡 **Platforma:** YouTube"
await app.set_profile_photo(photo="final.png")
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
caption=caption,
photo="final.png",
)
os.remove("final.png")
await asyncio.sleep(int(time_to_seconds(duration)))
playing = False
await m.delete()
# Telegram Audio------------------------------------
@app.on_message(
filters.command("telegram") & filters.user(SUDOERS) & ~filters.edited
)
async def tgplay(_, message):
global playing
if len(queue) != 0:
await send("__**You Can Only Play Telegram Files After The Queue Gets "
+ "Finished.**__")
return
if not message.reply_to_message:
await send("__**Reply to an audio.**__")
return
if message.reply_to_message.audio:
if int(message.reply_to_message.audio.file_size) >= 104857600:
await send("__**Bruh! Only songs within 100 MB.**__")
playing = False
return
duration = message.reply_to_message.audio.duration
if not duration:
await send("__**Only Songs With Duration Are Supported.**__")
return
m = await send("__**Downloading.**__")
song = await message.reply_to_message.download()
await m.edit("__**Transcoding.**__")
transcode(song)
await m.edit(f"**Playing** __**{message.reply_to_message.link}.**__")
await asyncio.sleep(duration)
playing = False
return
await send("__**Only Audio Files (Not Document) Are Supported.**__")
async def send(text):
m = await app.send_message(
SUDO_CHAT_ID, text=text, disable_web_page_preview=True
)
return m
print(
"\nBot Starting..."
)
app.run()
|
"""
SPDX-License-Identifier: MIT
Copyright (c) 2021, SCANOSS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
import os.path
import sys
import hashlib
import time
class CycloneDx:
"""
CycloneDX management class
Handle all interaction with CycloneDX formatting
"""
def __init__(self, debug: bool = False, output_file: str = None):
"""
Initialise the CycloneDX class
"""
self.output_file = output_file
self.debug = debug
@staticmethod
def print_stderr(*args, **kwargs):
"""
Print the given message to STDERR
"""
print(*args, file=sys.stderr, **kwargs)
def print_msg(self, *args, **kwargs):
"""
Print message if quite mode is not enabled
"""
if not self.quiet:
self.print_stderr(*args, **kwargs)
def print_debug(self, *args, **kwargs):
"""
Print debug message if enabled
"""
if self.debug:
self.print_stderr(*args, **kwargs)
def parse(self, data: json):
"""
Parse the given input (raw/plain) JSON string and return CycloneDX summary
:param data: json - JSON object
:return: CycloneDX dictionary
"""
if not data:
self.print_stderr('ERROR: No JSON data provided to parse.')
return None
self.print_debug(f'Processing raw results into CycloneDX format...')
cdx = {}
for f in data:
file_details = data.get(f)
# print(f'File: {f}: {file_details}')
for d in file_details:
id_details = d.get("id")
if not id_details or id_details == 'none':
# print(f'No ID for {f}')
continue
purl = None
purls = d.get('purl')
if not purls:
self.print_stderr(f'Purl block missing for {f}: {file_details}')
continue
for p in purls:
self.print_debug(f'Purl: {p}')
purl = p
break
if not purl:
self.print_stderr(f'Warning: No PURL found for {f}: {file_details}')
continue
if cdx.get(purl):
self.print_debug(f'Component {purl} already stored: {cdx.get(purl)}')
continue
fd = {}
# print(f'Vendor: {d.get('vendor')}, Comp: {d.get('component')}, Ver: {d.get('version')},'
# f' Latest: {d.get('latest')} ID: {d.get('id')}')
for field in ['id', 'vendor', 'component', 'version', 'latest']:
fd[field] = d.get(field)
licenses = d.get('licenses')
fdl = []
for lic in licenses:
# print(f'License: {lic.get('name')}')
fdl.append({'id':lic.get("name")})
fd['licenses'] = fdl
cdx[p] = fd
# print(f'License summary: {cdx}')
return cdx
def produce_from_file(self, json_file: str, output_file: str = None) -> bool:
"""
Parse plain/raw input JSON file and produce CycloneDX output
:param json_file:
:param output_file:
:return: True if successful, False otherwise
"""
if not json_file:
self.print_stderr('ERROR: No JSON file provided to parse.')
return False
if not os.path.isfile(json_file):
self.print_stderr(f'ERROR: JSON file does not exist or is not a file: {json_file}')
return False
success = True
with open(json_file, 'r') as f:
success = self.produce_from_str(f.read(), output_file)
return success
def produce_from_json(self, data: json, output_file: str = None) -> bool:
"""
Produce the CycloneDX output from the input JSON object
:param data: JSON object
:param output_file: Output file (optional)
:return: True if successful, False otherwise
"""
cdx = self.parse(data)
if not cdx:
self.print_stderr('ERROR: No CycloneDX data returned for the JSON string provided.')
return False
md5hex = hashlib.md5(f'{time.time()}'.encode('utf-8')).hexdigest()
data = {}
data['bomFormat'] = 'CycloneDX'
data['specVersion'] = '1.2'
data['serialNumber'] = f'scanoss:SCANOSS-PY - SCANOSS CLI-{md5hex}'
data['version'] = '1'
data['components'] = []
for purl in cdx:
comp = cdx.get(purl)
lic = []
licenses = comp.get('licenses')
if licenses:
for l in licenses:
lic.append({'license': { 'id': l.get('id')}})
m_type = 'Snippet' if comp.get('id') == 'snippet' else 'Library'
data['components'].append({
'type': m_type,
'name': comp.get('component'),
'publisher': comp.get('vendor'),
'version': comp.get('version'),
'purl': purl,
'licenses': lic
# 'licenses': [{
# 'license': {
# 'id': comp.get('license')
# }
# }]
})
# End for loop
file = sys.stdout
if not output_file and self.output_file:
output_file = self.output_file
if output_file:
file = open(output_file, 'w')
print(json.dumps(data, indent=2), file=file)
if output_file:
file.close()
return True
def produce_from_str(self, json_str: str, output_file: str = None) -> bool:
"""
Produce CycloneDX output from input JSON string
:param json_str: input JSON string
:param output_file: Output file (optional)
:return: True if successful, False otherwise
"""
if not json_str:
self.print_stderr('ERROR: No JSON string provided to parse.')
return False
data = None
try:
data = json.loads(json_str)
except Exception as e:
self.print_stderr(f'ERROR: Problem parsing input JSON: {e}')
return False
else:
return self.produce_from_json(data, output_file)
return False
#
# End of CycloneDX Class
# | """
SPDX-License-Identifier: MIT
Copyright (c) 2021, SCANOSS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
import os.path
import sys
import hashlib
import time
class CycloneDx:
"""
CycloneDX management class
Handle all interaction with CycloneDX formatting
"""
def __init__(self, debug: bool = False, output_file: str = None):
"""
Initialise the CycloneDX class
"""
self.output_file = output_file
self.debug = debug
@staticmethod
def print_stderr(*args, **kwargs):
"""
Print the given message to STDERR
"""
print(*args, file=sys.stderr, **kwargs)
def print_msg(self, *args, **kwargs):
"""
Print message if quite mode is not enabled
"""
if not self.quiet:
self.print_stderr(*args, **kwargs)
def print_debug(self, *args, **kwargs):
"""
Print debug message if enabled
"""
if self.debug:
self.print_stderr(*args, **kwargs)
def parse(self, data: json):
"""
Parse the given input (raw/plain) JSON string and return CycloneDX summary
:param data: json - JSON object
:return: CycloneDX dictionary
"""
if not data:
self.print_stderr('ERROR: No JSON data provided to parse.')
return None
self.print_debug(f'Processing raw results into CycloneDX format...')
cdx = {}
for f in data:
file_details = data.get(f)
# print(f'File: {f}: {file_details}')
for d in file_details:
id_details = d.get("id")
if not id_details or id_details == 'none':
# print(f'No ID for {f}')
continue
purl = None
purls = d.get('purl')
if not purls:
self.print_stderr(f'Purl block missing for {f}: {file_details}')
continue
for p in purls:
self.print_debug(f'Purl: {p}')
purl = p
break
if not purl:
self.print_stderr(f'Warning: No PURL found for {f}: {file_details}')
continue
if cdx.get(purl):
self.print_debug(f'Component {purl} already stored: {cdx.get(purl)}')
continue
fd = {}
# print(f'Vendor: {d.get("vendor")}, Comp: {d.get("component")}, Ver: {d.get("version")},'
# f' Latest: {d.get("latest")} ID: {d.get("id")}')
for field in ['id', 'vendor', 'component', 'version', 'latest']:
fd[field] = d.get(field)
licenses = d.get('licenses')
fdl = []
for lic in licenses:
# print(f'License: {lic.get("name")}')
fdl.append({'id':lic.get("name")})
fd['licenses'] = fdl
cdx[p] = fd
# print(f'License summary: {cdx}')
return cdx
def produce_from_file(self, json_file: str, output_file: str = None) -> bool:
"""
Parse plain/raw input JSON file and produce CycloneDX output
:param json_file:
:param output_file:
:return: True if successful, False otherwise
"""
if not json_file:
self.print_stderr('ERROR: No JSON file provided to parse.')
return False
if not os.path.isfile(json_file):
self.print_stderr(f'ERROR: JSON file does not exist or is not a file: {json_file}')
return False
success = True
with open(json_file, 'r') as f:
success = self.produce_from_str(f.read(), output_file)
return success
def produce_from_json(self, data: json, output_file: str = None) -> bool:
"""
Produce the CycloneDX output from the input JSON object
:param data: JSON object
:param output_file: Output file (optional)
:return: True if successful, False otherwise
"""
cdx = self.parse(data)
if not cdx:
self.print_stderr('ERROR: No CycloneDX data returned for the JSON string provided.')
return False
md5hex = hashlib.md5(f'{time.time()}'.encode('utf-8')).hexdigest()
data = {}
data['bomFormat'] = 'CycloneDX'
data['specVersion'] = '1.2'
data['serialNumber'] = f'scanoss:SCANOSS-PY - SCANOSS CLI-{md5hex}'
data['version'] = '1'
data['components'] = []
for purl in cdx:
comp = cdx.get(purl)
lic = []
licenses = comp.get('licenses')
if licenses:
for l in licenses:
lic.append({'license': { 'id': l.get('id')}})
m_type = 'Snippet' if comp.get('id') == 'snippet' else 'Library'
data['components'].append({
'type': m_type,
'name': comp.get('component'),
'publisher': comp.get('vendor'),
'version': comp.get('version'),
'purl': purl,
'licenses': lic
# 'licenses': [{
# 'license': {
# 'id': comp.get('license')
# }
# }]
})
# End for loop
file = sys.stdout
if not output_file and self.output_file:
output_file = self.output_file
if output_file:
file = open(output_file, 'w')
print(json.dumps(data, indent=2), file=file)
if output_file:
file.close()
return True
def produce_from_str(self, json_str: str, output_file: str = None) -> bool:
"""
Produce CycloneDX output from input JSON string
:param json_str: input JSON string
:param output_file: Output file (optional)
:return: True if successful, False otherwise
"""
if not json_str:
self.print_stderr('ERROR: No JSON string provided to parse.')
return False
data = None
try:
data = json.loads(json_str)
except Exception as e:
self.print_stderr(f'ERROR: Problem parsing input JSON: {e}')
return False
else:
return self.produce_from_json(data, output_file)
return False
#
# End of CycloneDX Class
# |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) DeFi Blockchain Developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Test Futures contract RPC."""
from test_framework.test_framework import DefiTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from decimal import Decimal
import time
def sort_history(e):
return e['txn']
class FuturesTest(DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-txnotokens=0', '-amkheight=1', '-bayfrontheight=1', '-eunosheight=1', '-fortcanningheight=1', '-fortcanninghillheight=1', '-fortcanningroadheight=150', '-subsidytest=1']]
def run_test(self):
self.nodes[0].generate(101)
# Set up oracles and tokens
self.setup_test()
# Test setting of futures Gov vars
self.futures_setup()
# Test dToken to DUSD
self.test_dtoken_to_dusd()
# Test DUSD to dToken
self.test_dusd_to_dtoken()
# Test futures block range
self.check_swap_block_range()
# Test multiple swaps per account
self.check_multiple_swaps()
# Test withdrawal
self.check_withdrawals()
# Test Satoshi swaps
self.check_minimum_swaps()
# Test changing Gov vars
self.check_gov_var_change()
# Test refunding of unpaid futures
self.unpaid_contract()
# Test list future swap history
self.rpc_history()
def setup_test(self):
# Store addresses
self.address = self.nodes[0].get_genesis_keys().ownerAuthAddress
self.contract_address = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqpsqgljc'
# Store interval
self.futures_interval = 25
# RPC history checks
self.list_history = []
# Set token symbols
self.symbolDFI = 'DFI'
self.symbolDUSD = 'DUSD'
self.symbolTSLA = 'TSLA'
self.symbolGOOGL = 'GOOGL'
self.symbolTWTR = 'TWTR'
self.symbolMSFT = 'MSFT'
self.symbolBTC = 'BTC'
# Setup oracle
oracle_address = self.nodes[0].getnewaddress("", "legacy")
price_feeds = [
{"currency": "USD", "token": self.symbolDFI},
{"currency": "USD", "token": self.symbolTSLA},
{"currency": "USD", "token": self.symbolGOOGL},
{"currency": "USD", "token": self.symbolTWTR},
{"currency": "USD", "token": self.symbolMSFT}
]
self.oracle_id = self.nodes[0].appointoracle(oracle_address, price_feeds, 10)
self.nodes[0].generate(1)
# Create Oracle prices
self.price_tsla = 870
self.price_googl = 2600
self.price_twtr = 37
self.price_msft = 295
# Calculate future swap prices
self.prices = []
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_tsla)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_tsla)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_googl)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_googl)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_twtr)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_twtr)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_msft)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_msft)) * Decimal('0.95000000')))
})
# Feed oracle
oracle_prices = [
{"currency": "USD", "tokenAmount": f'{self.price_tsla}@{self.symbolTSLA}'},
{"currency": "USD", "tokenAmount": f'{self.price_googl}@{self.symbolGOOGL}'},
{"currency": "USD", "tokenAmount": f'{self.price_twtr}@{self.symbolTWTR}'},
{"currency": "USD", "tokenAmount": f'{self.price_msft}@{self.symbolMSFT}'},
]
self.nodes[0].setoracledata(self.oracle_id, int(time.time()), oracle_prices)
self.nodes[0].generate(10)
# Set up non-loan token for failure test
self.nodes[0].createtoken({
"symbol": self.symbolBTC,
"name": self.symbolBTC,
"isDAT": True,
"collateralAddress": self.address
})
self.nodes[0].generate(1)
# Setup loan tokens
self.nodes[0].setloantoken({
'symbol': self.symbolDUSD,
'name': self.symbolDUSD,
'fixedIntervalPriceId': f'{self.symbolDUSD}/USD',
'mintable': True,
'interest': 0})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolTSLA,
'name': self.symbolTSLA,
'fixedIntervalPriceId': f'{self.symbolTSLA}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolGOOGL,
'name': self.symbolGOOGL,
'fixedIntervalPriceId': f'{self.symbolGOOGL}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolTWTR,
'name': self.symbolTWTR,
'fixedIntervalPriceId': f'{self.symbolTWTR}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolMSFT,
'name': self.symbolMSFT,
'fixedIntervalPriceId': f'{self.symbolMSFT}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
# Set token ids
self.idDUSD = list(self.nodes[0].gettoken(self.symbolDUSD).keys())[0]
self.idTSLA = list(self.nodes[0].gettoken(self.symbolTSLA).keys())[0]
self.idGOOGL = list(self.nodes[0].gettoken(self.symbolGOOGL).keys())[0]
self.idTWTR = list(self.nodes[0].gettoken(self.symbolTWTR).keys())[0]
self.idMSFT = list(self.nodes[0].gettoken(self.symbolMSFT).keys())[0]
self.idBTC = list(self.nodes[0].gettoken(self.symbolBTC).keys())[0]
# Mint tokens for swapping
self.nodes[0].minttokens([f'100000@{self.idDUSD}'])
self.nodes[0].minttokens([f'100000@{self.idTSLA}'])
self.nodes[0].minttokens([f'100000@{self.idGOOGL}'])
self.nodes[0].minttokens([f'100000@{self.idTWTR}'])
self.nodes[0].minttokens([f'100000@{self.idMSFT}'])
self.nodes[0].generate(1)
def futures_setup(self):
# Move to fork block
self.nodes[0].generate(150 - self.nodes[0].getblockcount())
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Try futureswap before feature is active
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
# Set partial futures attributes
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Try futureswap before feature is fully active
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
# Set all futures attributes but set active to false
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false','v0/params/dfip2203/reward_pct':'0.05','v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
self.nodes[0].generate(1)
# Try futureswap with DFIP2203 active set to false
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
# Fully enable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Verify Gov vars
result = self.nodes[0].getgov('ATTRIBUTES')['ATTRIBUTES']
assert_equal(result['v0/params/dfip2203/active'], 'true')
assert_equal(result['v0/params/dfip2203/reward_pct'], '0.05')
assert_equal(result['v0/params/dfip2203/block_period'], str(self.futures_interval))
# Disable DUSD
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idDUSD)}/dfip2203':'false'}})
self.nodes[0].generate(1)
# Verify Gov vars
result = self.nodes[0].getgov('ATTRIBUTES')['ATTRIBUTES']
assert_equal(result[f'v0/token/{self.idDUSD}/dfip2203'], 'false')
# Check futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
assert_equal(next_futures_block, self.nodes[0].getfutureswapblock())
def test_dtoken_to_dusd(self):
# Create addresses for futures
address_msft = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_msft: f'1@{self.symbolMSFT}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'1@{self.symbolGOOGL}'})
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'1@{self.symbolTSLA}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'1@{self.symbolTWTR}'})
self.nodes[0].generate(1)
# Test futureswap failures
assert_raises_rpc_error(-32600, f'Could not get source loan token {self.idBTC}', self.nodes[0].futureswap, self.address, f'1@{self.symbolBTC}')
assert_raises_rpc_error(-32600, f'DFIP2203 currently disabled for token {self.idDUSD}', self.nodes[0].futureswap, self.address, f'1@{self.symbolDUSD}', int(self.idDUSD))
assert_raises_rpc_error(-32600, f'Could not get destination loan token {self.idBTC}. Set valid destination.', self.nodes[0].futureswap, self.address, f'1@{self.symbolDUSD}', int(self.idBTC))
assert_raises_rpc_error(-32600, 'Destination should not be set when source amount is a dToken', self.nodes[0].futureswap, self.address, f'1@{self.symbolTSLA}', int(self.idBTC))
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 1.00000000', self.nodes[0].futureswap, address_twtr, f'1@{self.symbolTSLA}')
# Create user futures contracts
self.nodes[0].futureswap(address_twtr, f'1@{self.symbolTWTR}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_tsla, f'1@{self.symbolTSLA}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'1@{self.symbolGOOGL}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_msft, f'1@{self.symbolMSFT}')
self.nodes[0].generate(1)
# List user futures contracts
result = self.nodes[0].listpendingfutureswaps()
assert_equal(result[0]['owner'], address_msft)
assert_equal(result[0]['source'], f'{Decimal('1.00000000')}@{self.symbolMSFT}')
assert_equal(result[0]['destination'], self.symbolDUSD)
assert_equal(result[1]['owner'], address_googl)
assert_equal(result[1]['source'], f'{Decimal('1.00000000')}@{self.symbolGOOGL}')
assert_equal(result[1]['destination'], self.symbolDUSD)
assert_equal(result[2]['owner'], address_tsla)
assert_equal(result[2]['source'], f'{Decimal('1.00000000')}@{self.symbolTSLA}')
assert_equal(result[2]['destination'], self.symbolDUSD)
assert_equal(result[3]['owner'], address_twtr)
assert_equal(result[3]['source'], f'{Decimal('1.00000000')}@{self.symbolTWTR}')
assert_equal(result[3]['destination'], self.symbolDUSD)
# Get user MSFT futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{Decimal('1.00000000')}@{self.symbolMSFT}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user GOOGL futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{Decimal('1.00000000')}@{self.symbolGOOGL}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user TSLA futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{Decimal('1.00000000')}@{self.symbolTSLA}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{Decimal('1.00000000')}@{self.symbolTWTR}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Check DFI2203 amounts do not show up as burns yet
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [])
# Check DFI2203 address on listgovs, current shows pending, burn should be empty.
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert('v0/live/economy/dfip2203_burned' not in result)
assert('v0/live/economy/dfip2203_minted' not in result)
# Get token total minted before future swap
total_dusd = Decimal(self.nodes[0].gettoken(self.idDUSD)[self.idDUSD]['minted'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check total minted incremented as expected
new_total_dusd = Decimal(self.nodes[0].gettoken(self.idDUSD)[self.idDUSD]['minted'])
assert_equal(total_dusd + self.prices[0]["discountPrice"] + self.prices[1]["discountPrice"] + self.prices[2]["discountPrice"] + self.prices[3]["discountPrice"], new_total_dusd)
# Check TXN ordering
txn_first = 4294967295
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'q'})
result.sort(key = sort_history, reverse = True)
for result_entry in result:
assert_equal(result_entry['txn'], txn_first)
txn_first -= 1
# Pending futures should now be empty
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(len(result['values']), 0)
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check that futures have been executed
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'{self.prices[3]['discountPrice']}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]['discountPrice']}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]['discountPrice']}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'{self.prices[2]['discountPrice']}@{self.symbolDUSD}'])
# Populate RPC check
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address_tsla, 'destination': f'{self.prices[0]['discountPrice']}@{self.symbolDUSD}'},
{'address': address_googl, 'destination': f'{self.prices[1]['discountPrice']}@{self.symbolDUSD}'},
{'address': address_twtr, 'destination': f'{self.prices[2]['discountPrice']}@{self.symbolDUSD}'},
{'address': address_msft, 'destination': f'{self.prices[3]['discountPrice']}@{self.symbolDUSD}'},
]})
def test_dusd_to_dtoken(self):
# Create addresses for futures
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_msft = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_msft: f'{self.prices[3]['premiumPrice']}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_msft, f'{self.prices[3]['premiumPrice']}@{self.symbolDUSD}', self.idMSFT)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}', self.idTWTR)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}', self.symbolGOOGL)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}', self.symbolTSLA)
self.nodes[0].generate(1)
# List user futures contracts
result = self.nodes[0].listpendingfutureswaps()
assert_equal(result[0]['owner'], address_tsla)
assert_equal(result[0]['source'], f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result[0]['destination'], self.symbolTSLA)
assert_equal(result[1]['owner'], address_googl)
assert_equal(result[1]['source'], f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result[1]['destination'], self.symbolGOOGL)
assert_equal(result[2]['owner'], address_twtr)
assert_equal(result[2]['source'], f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result[2]['destination'], self.symbolTWTR)
assert_equal(result[3]['owner'], address_msft)
assert_equal(result[3]['source'], f'{self.prices[3]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result[3]['destination'], self.symbolMSFT)
# Get user TSLA futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
# Get user GOOGL futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
# Get user MSFT futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{self.prices[3]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
# Check new DFI2203 amounts do not show up as burns yet
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs, current shows pending if any, burned shows
# deposits from executed swaps and minted shows output from executed swaps.
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert_equal(result['v0/live/economy/dfip2203_burned'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert_equal(result['v0/live/economy/dfip2203_minted'], [f'{self.prices[0]['discountPrice'] + self.prices[1]['discountPrice'] + self.prices[2]['discountPrice'] + self.prices[3]['discountPrice']}@{self.symbolDUSD}'])
# Get token total minted before future swap
total_tsla = Decimal(self.nodes[0].gettoken(self.idTSLA)[self.idTSLA]['minted'])
total_googl = Decimal(self.nodes[0].gettoken(self.idGOOGL)[self.idGOOGL]['minted'])
total_twtr = Decimal(self.nodes[0].gettoken(self.idTWTR)[self.idTWTR]['minted'])
total_msft = Decimal(self.nodes[0].gettoken(self.idMSFT)[self.idMSFT]['minted'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check minted totals incremented as expected
new_total_tsla = Decimal(self.nodes[0].gettoken(self.idTSLA)[self.idTSLA]['minted'])
new_total_googl = Decimal(self.nodes[0].gettoken(self.idGOOGL)[self.idGOOGL]['minted'])
new_total_twtr = Decimal(self.nodes[0].gettoken(self.idTWTR)[self.idTWTR]['minted'])
new_total_msft = Decimal(self.nodes[0].gettoken(self.idMSFT)[self.idMSFT]['minted'])
assert_equal(total_tsla + Decimal('1.00000000'), new_total_tsla)
assert_equal(total_googl + Decimal('1.00000000'), new_total_googl)
assert_equal(total_twtr + Decimal('1.00000000'), new_total_twtr)
assert_equal(total_msft + Decimal('1.00000000'), new_total_msft)
# Pending futures should now be empty
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(len(result['values']), 0)
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check that futures have been executed
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'1.00000000@{self.symbolGOOGL}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'1.00000000@{self.symbolTWTR}'])
# Populate RPC check
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address_tsla, 'destination': f'1.00000000@{self.symbolTSLA}'},
{'address': address_googl, 'destination': f'1.00000000@{self.symbolGOOGL}'},
{'address': address_twtr, 'destination': f'1.00000000@{self.symbolTWTR}'},
{'address': address_msft, 'destination': f'1.00000000@{self.symbolMSFT}'},
]})
def check_swap_block_range(self):
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]['premiumPrice'] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Move to just before futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount() - 1)
# Create user futures contracts on futures block
self.nodes[0].futureswap(address, f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Check that futures have been executed
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}'])
# Check all pending swaps shows no entries
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
# Check user pending swaps is empty
result = self.nodes[0].getpendingfutureswaps(address)
assert_equal(len(result['values']), 0)
# Try and withdraw smallest amount now contract has been paid
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 0.00000001', self.nodes[0].withdrawfutureswap, address, f'{Decimal('0.00000001')}@{self.symbolDUSD}', int(self.idTSLA))
# Populate RPC check
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address, 'destination': f'1.00000000@{self.symbolTSLA}'},
]})
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check that futures has not been executed again
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'913.50000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'4905.60000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_multiple_swaps(self):
# Create addresses for futures
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]['premiumPrice'] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]['premiumPrice'] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create two user futures contracts
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].generate(1)
# Get user TSLA futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
assert_equal(result['values'][1]['source'], f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTSLA)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
assert_equal(result['values'][0]['source'], f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check that futures have been executed
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'2.00000000@{self.symbolTSLA}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'2.00000000@{self.symbolTWTR}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'6810.30000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_withdrawals(self):
# Create addresses for futures
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_msft = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]['premiumPrice'] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]['premiumPrice'] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]['premiumPrice'] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_msft: f'{self.prices[3]['premiumPrice'] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_msft, f'{self.prices[3]['premiumPrice']}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].futureswap(address_msft, f'{self.prices[3]['premiumPrice']}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_googl, f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_googl, f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Get user MSFT futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
assert_equal(result['values'][1]['source'], f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTSLA)
# Get user GOOGL futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
assert_equal(result['values'][1]['source'], f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolGOOGL)
# Get user TSLA futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
assert_equal(result['values'][1]['source'], f'{self.prices[2]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTWTR)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{self.prices[3]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
assert_equal(result['values'][1]['source'], f'{self.prices[3]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolMSFT)
# Check withdrawal failures
assert_raises_rpc_error(-32600, f'amount 0.00000000 is less than {self.prices[2]['premiumPrice'] * 2}', self.nodes[0].withdrawfutureswap, address_tsla, f'{self.prices[2]['premiumPrice'] * 2}@{self.symbolDUSD}', int(self.idTWTR))
assert_raises_rpc_error(-32600, f'amount {self.prices[0]['premiumPrice'] * 2} is less than {(self.prices[0]['premiumPrice'] * 2) + Decimal('0.00000001')}', self.nodes[0].withdrawfutureswap, address_tsla, f'{(self.prices[0]['premiumPrice'] * 2) + Decimal('0.00000001')}@{self.symbolDUSD}', int(self.idTSLA))
# Withdraw both TSLA contracts
self.nodes[0].withdrawfutureswap(address_tsla, f'{self.prices[0]['premiumPrice'] * 2}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Check user pending swap is empty
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
# Try and withdraw smallest amount now contract empty
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 0.00000001', self.nodes[0].withdrawfutureswap, address_tsla, f'{Decimal('0.00000001')}@{self.symbolDUSD}', int(self.idTSLA))
# Withdraw frm GOOGL everything but one Sat
self.nodes[0].withdrawfutureswap(address_googl, f'{(self.prices[1]['premiumPrice'] * 2) - Decimal('0.00000001')}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].generate(1)
# Check user pending swap
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'0.00000001@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
# Withdraw one TWTR contract plus 1 Sat of the second one
self.nodes[0].withdrawfutureswap(address_twtr, f'{self.prices[2]['premiumPrice'] + Decimal('0.00000001')}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].generate(1)
# Check user pending swap
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]['premiumPrice'] - Decimal('0.00000001')}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
# Withdraw one Sat
self.nodes[0].withdrawfutureswap(address_msft, f'{Decimal('0.00000001')}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].generate(1)
# Check user pending swap
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{(self.prices[3]['premiumPrice'] * 2) - Decimal('0.00000001')}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check final balances
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]['premiumPrice'] * 2}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'{self.prices[2]['premiumPrice'] + Decimal('0.00000001')}@{self.symbolDUSD}', f'0.99999999@{self.symbolTWTR}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{(self.prices[1]['premiumPrice'] * 2) - Decimal('0.00000001')}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'0.00000001@{self.symbolDUSD}', f'1.99999999@{self.symbolMSFT}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_minimum_swaps(self):
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contract with 1 Satoshi
self.nodes[0].futureswap(address, f'{Decimal('0.00000001')}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check one Satoshi swap yields no TSLA
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]['premiumPrice'] - Decimal('0.00000001')}@{self.symbolDUSD}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Create user futures contract to purchase one Satoshi of TSLA
min_purchase = round(self.prices[0]["premiumPrice"] / 100000000, 8)
self.nodes[0].futureswap(address, f'{min_purchase}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check one Satoshi swap yields one TSLA Satoshi
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]['premiumPrice'] - Decimal('0.00000001') - Decimal(min_purchase)}@{self.symbolDUSD}', f'0.00000001@{self.symbolTSLA}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000914@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_gov_var_change(self):
# Set up for block range change, create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Move to before next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval)) - 1
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Create user futures contract with 1 Satoshi to invalidate block period change
self.nodes[0].futureswap(address, f'{Decimal('0.00000001')}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Check contract address has updated
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Test changing block period while DFIP2203 still active
assert_raises_rpc_error(-32600, 'Cannot set block period while DFIP2203 is active', self.nodes[0].setgov, {"ATTRIBUTES":{'v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
# Disable DFIP2203 to be able to change block period
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false'}})
self.nodes[0].generate(1)
# Check contract address has not changed, no refund on disabling DFIP2203.
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Now set the new block period
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
self.nodes[0].generate(1)
# Enable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Create addresses
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_googl, f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Disable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false'}})
self.nodes[0].generate(1)
# Check TXN ordering on Gov var refunds
txn_first = 4294967295
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'w'})
result.sort(key = sort_history, reverse = True)
for result_entry in result:
assert_equal(result_entry['blockHeight'], self.nodes[0].getblockcount())
assert_equal(result_entry['type'], 'FutureSwapRefund')
assert_equal(result_entry['txn'], txn_first)
txn_first -= 1
# Check other refund entries
assert_equal(result[0]['owner'], self.contract_address)
assert_equal(result[2]['owner'], self.contract_address)
if result[0]['amounts'] != [f'{-self.prices[0]['premiumPrice']}@{self.symbolDUSD}']:
assert_equal(result[0]['amounts'], [f'{-self.prices[1]['premiumPrice']}@{self.symbolDUSD}'])
if result[2]['amounts'] != [f'{-self.prices[0]['premiumPrice']}@{self.symbolDUSD}']:
assert_equal(result[2]['amounts'], [f'{-self.prices[1]['premiumPrice']}@{self.symbolDUSD}'])
if result[1]['owner'] == address_googl:
assert_equal(result[1]['amounts'], [f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}'])
else:
assert_equal(result[1]['owner'], address_tsla)
assert_equal(result[1]['amounts'], [f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}'])
if result[3]['owner'] == address_googl:
assert_equal(result[3]['amounts'], [f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}'])
else:
assert_equal(result[3]['owner'], address_tsla)
assert_equal(result[3]['amounts'], [f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}'])
# Balances should be restored
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}'])
# Check contract address remains the same
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Enable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_googl, f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Disable GOOGL
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'false'}})
self.nodes[0].generate(1)
# Only TSLA contract should remain
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 1)
assert_equal(result[0]['owner'], address_tsla)
assert_equal(result[0]['source'], f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}')
assert_equal(result[0]['destination'], self.symbolTSLA)
# Balance should be restored
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}'])
# TSLA balance should be empty
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [])
# Enable GOOGL
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'true'}})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_googl, f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].generate(1)
# GOOGL balance should be empty
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [])
# Disable GOOGL
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'false'}})
self.nodes[0].generate(1)
# Balance should be restored
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check all balances
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]['premiumPrice']}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def unpaid_contract(self):
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]['premiumPrice'] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contract
self.nodes[0].futureswap(address, f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address, f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Remove Oracle
self.nodes[0].removeoracle(self.oracle_id)
self.nodes[0].generate(1)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check refund in history
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'w'})
result.sort(key = sort_history, reverse = True)
assert_equal(result[0]['owner'], self.contract_address)
assert_equal(result[0]['type'], 'FutureSwapRefund')
assert_equal(result[0]['amounts'], [f'{-self.prices[0]['premiumPrice']}@{self.symbolDUSD}'])
assert_equal(result[1]['owner'], address)
assert_equal(result[1]['type'], 'FutureSwapRefund')
assert_equal(result[1]['amounts'], [f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}'])
assert_equal(result[2]['owner'], self.contract_address)
assert_equal(result[2]['type'], 'FutureSwapRefund')
assert_equal(result[2]['amounts'], [f'{-self.prices[0]['premiumPrice']}@{self.symbolDUSD}'])
assert_equal(result[3]['owner'], address)
assert_equal(result[3]['type'], 'FutureSwapRefund')
assert_equal(result[3]['amounts'], [f'{self.prices[0]['premiumPrice']}@{self.symbolDUSD}'])
# Check user has been refunded
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]['premiumPrice'] * 2}@{self.symbolDUSD}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def rpc_history(self):
# Check some historical swaps
for history in self.list_history:
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":history['height'], 'depth':0, 'txtype':'q'})
for history_entry in history['swaps']:
found = False
for result_entry in result:
assert_equal(history['height'], result_entry['blockHeight'])
if result_entry['owner'] == history_entry['address']:
assert_equal(result_entry['owner'], history_entry['address'])
assert_equal(result_entry['type'], 'FutureSwapExecution')
assert_equal(result_entry['amounts'], [history_entry['destination']])
found = True
assert(found)
# Check all swaps present
result = self.nodes[0].listaccounthistory('all', {'txtype':'q'})
assert_equal(len(result), 17)
# Check all swap refunds present
result = self.nodes[0].listaccounthistory('all', {'txtype':'w'})
assert_equal(len(result), 12)
# Check swap by specific address
result = self.nodes[0].listaccounthistory(self.list_history[0]['swaps'][0]['address'], {'txtype':'q'})
assert_equal(len(result), 1)
assert_equal(result[0]['blockHeight'], self.list_history[0]['height'])
assert_equal(result[0]['owner'], self.list_history[0]['swaps'][0]['address'])
assert_equal(result[0]['amounts'], [self.list_history[0]['swaps'][0]['destination']])
if __name__ == '__main__':
FuturesTest().main()
| #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) DeFi Blockchain Developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Test Futures contract RPC."""
from test_framework.test_framework import DefiTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from decimal import Decimal
import time
def sort_history(e):
return e['txn']
class FuturesTest(DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-txnotokens=0', '-amkheight=1', '-bayfrontheight=1', '-eunosheight=1', '-fortcanningheight=1', '-fortcanninghillheight=1', '-fortcanningroadheight=150', '-subsidytest=1']]
def run_test(self):
self.nodes[0].generate(101)
# Set up oracles and tokens
self.setup_test()
# Test setting of futures Gov vars
self.futures_setup()
# Test dToken to DUSD
self.test_dtoken_to_dusd()
# Test DUSD to dToken
self.test_dusd_to_dtoken()
# Test futures block range
self.check_swap_block_range()
# Test multiple swaps per account
self.check_multiple_swaps()
# Test withdrawal
self.check_withdrawals()
# Test Satoshi swaps
self.check_minimum_swaps()
# Test changing Gov vars
self.check_gov_var_change()
# Test refunding of unpaid futures
self.unpaid_contract()
# Test list future swap history
self.rpc_history()
def setup_test(self):
# Store addresses
self.address = self.nodes[0].get_genesis_keys().ownerAuthAddress
self.contract_address = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqpsqgljc'
# Store interval
self.futures_interval = 25
# RPC history checks
self.list_history = []
# Set token symbols
self.symbolDFI = 'DFI'
self.symbolDUSD = 'DUSD'
self.symbolTSLA = 'TSLA'
self.symbolGOOGL = 'GOOGL'
self.symbolTWTR = 'TWTR'
self.symbolMSFT = 'MSFT'
self.symbolBTC = 'BTC'
# Setup oracle
oracle_address = self.nodes[0].getnewaddress("", "legacy")
price_feeds = [
{"currency": "USD", "token": self.symbolDFI},
{"currency": "USD", "token": self.symbolTSLA},
{"currency": "USD", "token": self.symbolGOOGL},
{"currency": "USD", "token": self.symbolTWTR},
{"currency": "USD", "token": self.symbolMSFT}
]
self.oracle_id = self.nodes[0].appointoracle(oracle_address, price_feeds, 10)
self.nodes[0].generate(1)
# Create Oracle prices
self.price_tsla = 870
self.price_googl = 2600
self.price_twtr = 37
self.price_msft = 295
# Calculate future swap prices
self.prices = []
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_tsla)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_tsla)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_googl)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_googl)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_twtr)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_twtr)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_msft)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_msft)) * Decimal('0.95000000')))
})
# Feed oracle
oracle_prices = [
{"currency": "USD", "tokenAmount": f'{self.price_tsla}@{self.symbolTSLA}'},
{"currency": "USD", "tokenAmount": f'{self.price_googl}@{self.symbolGOOGL}'},
{"currency": "USD", "tokenAmount": f'{self.price_twtr}@{self.symbolTWTR}'},
{"currency": "USD", "tokenAmount": f'{self.price_msft}@{self.symbolMSFT}'},
]
self.nodes[0].setoracledata(self.oracle_id, int(time.time()), oracle_prices)
self.nodes[0].generate(10)
# Set up non-loan token for failure test
self.nodes[0].createtoken({
"symbol": self.symbolBTC,
"name": self.symbolBTC,
"isDAT": True,
"collateralAddress": self.address
})
self.nodes[0].generate(1)
# Setup loan tokens
self.nodes[0].setloantoken({
'symbol': self.symbolDUSD,
'name': self.symbolDUSD,
'fixedIntervalPriceId': f'{self.symbolDUSD}/USD',
'mintable': True,
'interest': 0})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolTSLA,
'name': self.symbolTSLA,
'fixedIntervalPriceId': f'{self.symbolTSLA}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolGOOGL,
'name': self.symbolGOOGL,
'fixedIntervalPriceId': f'{self.symbolGOOGL}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolTWTR,
'name': self.symbolTWTR,
'fixedIntervalPriceId': f'{self.symbolTWTR}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolMSFT,
'name': self.symbolMSFT,
'fixedIntervalPriceId': f'{self.symbolMSFT}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
# Set token ids
self.idDUSD = list(self.nodes[0].gettoken(self.symbolDUSD).keys())[0]
self.idTSLA = list(self.nodes[0].gettoken(self.symbolTSLA).keys())[0]
self.idGOOGL = list(self.nodes[0].gettoken(self.symbolGOOGL).keys())[0]
self.idTWTR = list(self.nodes[0].gettoken(self.symbolTWTR).keys())[0]
self.idMSFT = list(self.nodes[0].gettoken(self.symbolMSFT).keys())[0]
self.idBTC = list(self.nodes[0].gettoken(self.symbolBTC).keys())[0]
# Mint tokens for swapping
self.nodes[0].minttokens([f'100000@{self.idDUSD}'])
self.nodes[0].minttokens([f'100000@{self.idTSLA}'])
self.nodes[0].minttokens([f'100000@{self.idGOOGL}'])
self.nodes[0].minttokens([f'100000@{self.idTWTR}'])
self.nodes[0].minttokens([f'100000@{self.idMSFT}'])
self.nodes[0].generate(1)
def futures_setup(self):
# Move to fork block
self.nodes[0].generate(150 - self.nodes[0].getblockcount())
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Try futureswap before feature is active
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
# Set partial futures attributes
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Try futureswap before feature is fully active
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
# Set all futures attributes but set active to false
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false','v0/params/dfip2203/reward_pct':'0.05','v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
self.nodes[0].generate(1)
# Try futureswap with DFIP2203 active set to false
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
# Fully enable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Verify Gov vars
result = self.nodes[0].getgov('ATTRIBUTES')['ATTRIBUTES']
assert_equal(result['v0/params/dfip2203/active'], 'true')
assert_equal(result['v0/params/dfip2203/reward_pct'], '0.05')
assert_equal(result['v0/params/dfip2203/block_period'], str(self.futures_interval))
# Disable DUSD
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idDUSD)}/dfip2203':'false'}})
self.nodes[0].generate(1)
# Verify Gov vars
result = self.nodes[0].getgov('ATTRIBUTES')['ATTRIBUTES']
assert_equal(result[f'v0/token/{self.idDUSD}/dfip2203'], 'false')
# Check futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
assert_equal(next_futures_block, self.nodes[0].getfutureswapblock())
def test_dtoken_to_dusd(self):
# Create addresses for futures
address_msft = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_msft: f'1@{self.symbolMSFT}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'1@{self.symbolGOOGL}'})
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'1@{self.symbolTSLA}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'1@{self.symbolTWTR}'})
self.nodes[0].generate(1)
# Test futureswap failures
assert_raises_rpc_error(-32600, f'Could not get source loan token {self.idBTC}', self.nodes[0].futureswap, self.address, f'1@{self.symbolBTC}')
assert_raises_rpc_error(-32600, f'DFIP2203 currently disabled for token {self.idDUSD}', self.nodes[0].futureswap, self.address, f'1@{self.symbolDUSD}', int(self.idDUSD))
assert_raises_rpc_error(-32600, f'Could not get destination loan token {self.idBTC}. Set valid destination.', self.nodes[0].futureswap, self.address, f'1@{self.symbolDUSD}', int(self.idBTC))
assert_raises_rpc_error(-32600, 'Destination should not be set when source amount is a dToken', self.nodes[0].futureswap, self.address, f'1@{self.symbolTSLA}', int(self.idBTC))
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 1.00000000', self.nodes[0].futureswap, address_twtr, f'1@{self.symbolTSLA}')
# Create user futures contracts
self.nodes[0].futureswap(address_twtr, f'1@{self.symbolTWTR}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_tsla, f'1@{self.symbolTSLA}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'1@{self.symbolGOOGL}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_msft, f'1@{self.symbolMSFT}')
self.nodes[0].generate(1)
# List user futures contracts
result = self.nodes[0].listpendingfutureswaps()
assert_equal(result[0]['owner'], address_msft)
assert_equal(result[0]['source'], f'{Decimal("1.00000000")}@{self.symbolMSFT}')
assert_equal(result[0]['destination'], self.symbolDUSD)
assert_equal(result[1]['owner'], address_googl)
assert_equal(result[1]['source'], f'{Decimal("1.00000000")}@{self.symbolGOOGL}')
assert_equal(result[1]['destination'], self.symbolDUSD)
assert_equal(result[2]['owner'], address_tsla)
assert_equal(result[2]['source'], f'{Decimal("1.00000000")}@{self.symbolTSLA}')
assert_equal(result[2]['destination'], self.symbolDUSD)
assert_equal(result[3]['owner'], address_twtr)
assert_equal(result[3]['source'], f'{Decimal("1.00000000")}@{self.symbolTWTR}')
assert_equal(result[3]['destination'], self.symbolDUSD)
# Get user MSFT futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolMSFT}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user GOOGL futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolGOOGL}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user TSLA futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolTSLA}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolTWTR}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Check DFI2203 amounts do not show up as burns yet
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [])
# Check DFI2203 address on listgovs, current shows pending, burn should be empty.
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert('v0/live/economy/dfip2203_burned' not in result)
assert('v0/live/economy/dfip2203_minted' not in result)
# Get token total minted before future swap
total_dusd = Decimal(self.nodes[0].gettoken(self.idDUSD)[self.idDUSD]['minted'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check total minted incremented as expected
new_total_dusd = Decimal(self.nodes[0].gettoken(self.idDUSD)[self.idDUSD]['minted'])
assert_equal(total_dusd + self.prices[0]["discountPrice"] + self.prices[1]["discountPrice"] + self.prices[2]["discountPrice"] + self.prices[3]["discountPrice"], new_total_dusd)
# Check TXN ordering
txn_first = 4294967295
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'q'})
result.sort(key = sort_history, reverse = True)
for result_entry in result:
assert_equal(result_entry['txn'], txn_first)
txn_first -= 1
# Pending futures should now be empty
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(len(result['values']), 0)
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check that futures have been executed
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'{self.prices[3]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'{self.prices[2]["discountPrice"]}@{self.symbolDUSD}'])
# Populate RPC check
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address_tsla, 'destination': f'{self.prices[0]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_googl, 'destination': f'{self.prices[1]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_twtr, 'destination': f'{self.prices[2]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_msft, 'destination': f'{self.prices[3]["discountPrice"]}@{self.symbolDUSD}'},
]})
def test_dusd_to_dtoken(self):
# Create addresses for futures
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_msft = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_msft: f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_msft, f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}', self.idMSFT)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', self.idTWTR)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', self.symbolGOOGL)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', self.symbolTSLA)
self.nodes[0].generate(1)
# List user futures contracts
result = self.nodes[0].listpendingfutureswaps()
assert_equal(result[0]['owner'], address_tsla)
assert_equal(result[0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[0]['destination'], self.symbolTSLA)
assert_equal(result[1]['owner'], address_googl)
assert_equal(result[1]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[1]['destination'], self.symbolGOOGL)
assert_equal(result[2]['owner'], address_twtr)
assert_equal(result[2]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[2]['destination'], self.symbolTWTR)
assert_equal(result[3]['owner'], address_msft)
assert_equal(result[3]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[3]['destination'], self.symbolMSFT)
# Get user TSLA futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
# Get user GOOGL futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
# Get user MSFT futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
# Check new DFI2203 amounts do not show up as burns yet
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs, current shows pending if any, burned shows
# deposits from executed swaps and minted shows output from executed swaps.
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert_equal(result['v0/live/economy/dfip2203_burned'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert_equal(result['v0/live/economy/dfip2203_minted'], [f'{self.prices[0]["discountPrice"] + self.prices[1]["discountPrice"] + self.prices[2]["discountPrice"] + self.prices[3]["discountPrice"]}@{self.symbolDUSD}'])
# Get token total minted before future swap
total_tsla = Decimal(self.nodes[0].gettoken(self.idTSLA)[self.idTSLA]['minted'])
total_googl = Decimal(self.nodes[0].gettoken(self.idGOOGL)[self.idGOOGL]['minted'])
total_twtr = Decimal(self.nodes[0].gettoken(self.idTWTR)[self.idTWTR]['minted'])
total_msft = Decimal(self.nodes[0].gettoken(self.idMSFT)[self.idMSFT]['minted'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check minted totals incremented as expected
new_total_tsla = Decimal(self.nodes[0].gettoken(self.idTSLA)[self.idTSLA]['minted'])
new_total_googl = Decimal(self.nodes[0].gettoken(self.idGOOGL)[self.idGOOGL]['minted'])
new_total_twtr = Decimal(self.nodes[0].gettoken(self.idTWTR)[self.idTWTR]['minted'])
new_total_msft = Decimal(self.nodes[0].gettoken(self.idMSFT)[self.idMSFT]['minted'])
assert_equal(total_tsla + Decimal('1.00000000'), new_total_tsla)
assert_equal(total_googl + Decimal('1.00000000'), new_total_googl)
assert_equal(total_twtr + Decimal('1.00000000'), new_total_twtr)
assert_equal(total_msft + Decimal('1.00000000'), new_total_msft)
# Pending futures should now be empty
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(len(result['values']), 0)
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check that futures have been executed
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'1.00000000@{self.symbolGOOGL}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'1.00000000@{self.symbolTWTR}'])
# Populate RPC check
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address_tsla, 'destination': f'1.00000000@{self.symbolTSLA}'},
{'address': address_googl, 'destination': f'1.00000000@{self.symbolGOOGL}'},
{'address': address_twtr, 'destination': f'1.00000000@{self.symbolTWTR}'},
{'address': address_msft, 'destination': f'1.00000000@{self.symbolMSFT}'},
]})
def check_swap_block_range(self):
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Move to just before futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount() - 1)
# Create user futures contracts on futures block
self.nodes[0].futureswap(address, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Check that futures have been executed
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}'])
# Check all pending swaps shows no entries
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
# Check user pending swaps is empty
result = self.nodes[0].getpendingfutureswaps(address)
assert_equal(len(result['values']), 0)
# Try and withdraw smallest amount now contract has been paid
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 0.00000001', self.nodes[0].withdrawfutureswap, address, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
# Populate RPC check
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address, 'destination': f'1.00000000@{self.symbolTSLA}'},
]})
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check that futures has not been executed again
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'913.50000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'4905.60000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_multiple_swaps(self):
# Create addresses for futures
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create two user futures contracts
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].generate(1)
# Get user TSLA futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
assert_equal(result['values'][1]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTSLA)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check that futures have been executed
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'2.00000000@{self.symbolTSLA}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'2.00000000@{self.symbolTWTR}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'6810.30000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_withdrawals(self):
# Create addresses for futures
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_msft = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_msft: f'{self.prices[3]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_msft, f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].futureswap(address_msft, f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Get user MSFT futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
assert_equal(result['values'][1]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTSLA)
# Get user GOOGL futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
assert_equal(result['values'][1]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolGOOGL)
# Get user TSLA futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
assert_equal(result['values'][1]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTWTR)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
assert_equal(result['values'][1]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolMSFT)
# Check withdrawal failures
assert_raises_rpc_error(-32600, f'amount 0.00000000 is less than {self.prices[2]["premiumPrice"] * 2}', self.nodes[0].withdrawfutureswap, address_tsla, f'{self.prices[2]["premiumPrice"] * 2}@{self.symbolDUSD}', int(self.idTWTR))
assert_raises_rpc_error(-32600, f'amount {self.prices[0]["premiumPrice"] * 2} is less than {(self.prices[0]["premiumPrice"] * 2) + Decimal("0.00000001")}', self.nodes[0].withdrawfutureswap, address_tsla, f'{(self.prices[0]["premiumPrice"] * 2) + Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
# Withdraw both TSLA contracts
self.nodes[0].withdrawfutureswap(address_tsla, f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Check user pending swap is empty
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
# Try and withdraw smallest amount now contract empty
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 0.00000001', self.nodes[0].withdrawfutureswap, address_tsla, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
# Withdraw frm GOOGL everything but one Sat
self.nodes[0].withdrawfutureswap(address_googl, f'{(self.prices[1]["premiumPrice"] * 2) - Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].generate(1)
# Check user pending swap
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'0.00000001@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
# Withdraw one TWTR contract plus 1 Sat of the second one
self.nodes[0].withdrawfutureswap(address_twtr, f'{self.prices[2]["premiumPrice"] + Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].generate(1)
# Check user pending swap
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"] - Decimal("0.00000001")}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
# Withdraw one Sat
self.nodes[0].withdrawfutureswap(address_msft, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].generate(1)
# Check user pending swap
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{(self.prices[3]["premiumPrice"] * 2) - Decimal("0.00000001")}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check final balances
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'{self.prices[2]["premiumPrice"] + Decimal("0.00000001")}@{self.symbolDUSD}', f'0.99999999@{self.symbolTWTR}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{(self.prices[1]["premiumPrice"] * 2) - Decimal("0.00000001")}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'0.00000001@{self.symbolDUSD}', f'1.99999999@{self.symbolMSFT}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_minimum_swaps(self):
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contract with 1 Satoshi
self.nodes[0].futureswap(address, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check one Satoshi swap yields no TSLA
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] - Decimal("0.00000001")}@{self.symbolDUSD}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Create user futures contract to purchase one Satoshi of TSLA
min_purchase = round(self.prices[0]["premiumPrice"] / 100000000, 8)
self.nodes[0].futureswap(address, f'{min_purchase}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check one Satoshi swap yields one TSLA Satoshi
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] - Decimal("0.00000001") - Decimal(min_purchase)}@{self.symbolDUSD}', f'0.00000001@{self.symbolTSLA}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000914@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_gov_var_change(self):
# Set up for block range change, create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Move to before next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval)) - 1
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Create user futures contract with 1 Satoshi to invalidate block period change
self.nodes[0].futureswap(address, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Check contract address has updated
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Test changing block period while DFIP2203 still active
assert_raises_rpc_error(-32600, 'Cannot set block period while DFIP2203 is active', self.nodes[0].setgov, {"ATTRIBUTES":{'v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
# Disable DFIP2203 to be able to change block period
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false'}})
self.nodes[0].generate(1)
# Check contract address has not changed, no refund on disabling DFIP2203.
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Now set the new block period
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
self.nodes[0].generate(1)
# Enable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Create addresses
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Disable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false'}})
self.nodes[0].generate(1)
# Check TXN ordering on Gov var refunds
txn_first = 4294967295
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'w'})
result.sort(key = sort_history, reverse = True)
for result_entry in result:
assert_equal(result_entry['blockHeight'], self.nodes[0].getblockcount())
assert_equal(result_entry['type'], 'FutureSwapRefund')
assert_equal(result_entry['txn'], txn_first)
txn_first -= 1
# Check other refund entries
assert_equal(result[0]['owner'], self.contract_address)
assert_equal(result[2]['owner'], self.contract_address)
if result[0]['amounts'] != [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}']:
assert_equal(result[0]['amounts'], [f'{-self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
if result[2]['amounts'] != [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}']:
assert_equal(result[2]['amounts'], [f'{-self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
if result[1]['owner'] == address_googl:
assert_equal(result[1]['amounts'], [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
else:
assert_equal(result[1]['owner'], address_tsla)
assert_equal(result[1]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
if result[3]['owner'] == address_googl:
assert_equal(result[3]['amounts'], [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
else:
assert_equal(result[3]['owner'], address_tsla)
assert_equal(result[3]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
# Balances should be restored
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
# Check contract address remains the same
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Enable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Disable GOOGL
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'false'}})
self.nodes[0].generate(1)
# Only TSLA contract should remain
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 1)
assert_equal(result[0]['owner'], address_tsla)
assert_equal(result[0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[0]['destination'], self.symbolTSLA)
# Balance should be restored
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
# TSLA balance should be empty
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [])
# Enable GOOGL
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'true'}})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].generate(1)
# GOOGL balance should be empty
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [])
# Disable GOOGL
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'false'}})
self.nodes[0].generate(1)
# Balance should be restored
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check all balances
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def unpaid_contract(self):
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contract
self.nodes[0].futureswap(address, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Remove Oracle
self.nodes[0].removeoracle(self.oracle_id)
self.nodes[0].generate(1)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check refund in history
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'w'})
result.sort(key = sort_history, reverse = True)
assert_equal(result[0]['owner'], self.contract_address)
assert_equal(result[0]['type'], 'FutureSwapRefund')
assert_equal(result[0]['amounts'], [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
assert_equal(result[1]['owner'], address)
assert_equal(result[1]['type'], 'FutureSwapRefund')
assert_equal(result[1]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
assert_equal(result[2]['owner'], self.contract_address)
assert_equal(result[2]['type'], 'FutureSwapRefund')
assert_equal(result[2]['amounts'], [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
assert_equal(result[3]['owner'], address)
assert_equal(result[3]['type'], 'FutureSwapRefund')
assert_equal(result[3]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
# Check user has been refunded
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def rpc_history(self):
# Check some historical swaps
for history in self.list_history:
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":history['height'], 'depth':0, 'txtype':'q'})
for history_entry in history['swaps']:
found = False
for result_entry in result:
assert_equal(history['height'], result_entry['blockHeight'])
if result_entry['owner'] == history_entry['address']:
assert_equal(result_entry['owner'], history_entry['address'])
assert_equal(result_entry['type'], 'FutureSwapExecution')
assert_equal(result_entry['amounts'], [history_entry['destination']])
found = True
assert(found)
# Check all swaps present
result = self.nodes[0].listaccounthistory('all', {'txtype':'q'})
assert_equal(len(result), 17)
# Check all swap refunds present
result = self.nodes[0].listaccounthistory('all', {'txtype':'w'})
assert_equal(len(result), 12)
# Check swap by specific address
result = self.nodes[0].listaccounthistory(self.list_history[0]['swaps'][0]['address'], {'txtype':'q'})
assert_equal(len(result), 1)
assert_equal(result[0]['blockHeight'], self.list_history[0]['height'])
assert_equal(result[0]['owner'], self.list_history[0]['swaps'][0]['address'])
assert_equal(result[0]['amounts'], [self.list_history[0]['swaps'][0]['destination']])
if __name__ == '__main__':
FuturesTest().main()
|
""" manage PyTables query interface via Expressions """
from __future__ import annotations
import ast
from functools import partial
from typing import Any
import numpy as np
from pandas._libs.tslibs import (
Timedelta,
Timestamp,
)
from pandas.compat.chainmap import DeepChainMap
from pandas.core.dtypes.common import is_list_like
import pandas.core.common as com
from pandas.core.computation import (
expr,
ops,
scope as _scope,
)
from pandas.core.computation.common import ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import (
UndefinedVariableError,
is_term,
)
from pandas.core.construction import extract_array
from pandas.core.indexes.base import Index
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
class PyTablesScope(_scope.Scope):
__slots__ = ("queryables",)
queryables: dict[str, Any]
def __init__(
self,
level: int,
global_dict=None,
local_dict=None,
queryables: dict[str, Any] | None = None,
):
super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
self.queryables = queryables or {}
class Term(ops.Term):
env: PyTablesScope
def __new__(cls, name, env, side=None, encoding=None):
if isinstance(name, str):
klass = cls
else:
klass = Constant
return object.__new__(klass)
def __init__(self, name, env: PyTablesScope, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == "left":
# Note: The behavior of __new__ ensures that self.name is a str here
if self.name not in self.env.queryables:
raise NameError(f"name {repr(self.name)} is not defined")
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
# read-only property overwriting read/write property
@property # type: ignore[misc]
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env: PyTablesScope, side=None, encoding=None):
assert isinstance(env, PyTablesScope), type(env)
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
op: str
queryables: dict[str, Any]
condition: str | None
def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
"""create and return a new specialized BinOp from myself"""
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if isinstance(right, ConditionBinOp):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if isinstance(right, FilterBinOp):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(
self.op, left, right, queryables=self.queryables, encoding=self.encoding
).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
"""inplace conform rhs"""
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self) -> bool:
"""return True if this is a valid field"""
return self.lhs in self.queryables
@property
def is_in_table(self) -> bool:
"""
return True if this is a valid column name for generation (e.g. an
actual column in the table)
"""
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
"""the kind of my field"""
return getattr(self.queryables.get(self.lhs), "kind", None)
@property
def meta(self):
"""the meta of my field"""
return getattr(self.queryables.get(self.lhs), "meta", None)
@property
def metadata(self):
"""the metadata of my field"""
return getattr(self.queryables.get(self.lhs), "metadata", None)
def generate(self, v) -> str:
"""create and return the op string for this TermValue"""
val = v.tostring(self.encoding)
return f"({self.lhs} {self.op} {val})"
def convert_value(self, v) -> TermValue:
"""
convert the expression that is in the term to something that is
accepted by pytables
"""
def stringify(value):
if self.encoding is not None:
return pprint_thing_encoded(value, encoding=self.encoding)
return pprint_thing(value)
kind = ensure_decoded(self.kind)
meta = ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
if isinstance(v, str):
v = Timedelta(v).value
else:
v = Timedelta(v, unit="s").value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = extract_array(self.metadata, extract_numpy=True)
if v not in metadata:
result = -1
else:
# error: Incompatible types in assignment (expression has type
# "Union[Any, ndarray]", variable has type "int")
result = metadata.searchsorted( # type: ignore[assignment]
v, side="left"
)
return TermValue(result, result, "integer")
elif kind == "integer":
v = int(float(v))
return TermValue(v, v, kind)
elif kind == "float":
v = float(v)
return TermValue(v, v, kind)
elif kind == "bool":
if isinstance(v, str):
v = not v.strip().lower() in [
"false",
"f",
"no",
"n",
"none",
"0",
"[]",
"{}",
"",
]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), "string")
else:
raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")
def convert_values(self):
pass
class FilterBinOp(BinOp):
filter: tuple[Any, Any, Index] | None = None
def __repr__(self) -> str:
if self.filter is None:
return "Filter: Not Initialized"
return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")
def invert(self):
"""invert the filter"""
if self.filter is not None:
self.filter = (
self.filter[0],
self.generate_filter_op(invert=True),
self.filter[2],
)
return self
def format(self):
"""return the actual filter format"""
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
rhs = self.conform(self.rhs)
values = list(rhs)
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ["==", "!="] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, Index(values))
return self
return None
# equality conditions
if self.op in ["==", "!="]:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, Index(values))
else:
raise TypeError(
f"passing a filterable condition to a non-table indexer [{self}]"
)
return self
def generate_filter_op(self, invert: bool = False):
if (self.op == "!=" and not invert) or (self.op == "==" and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __repr__(self) -> str:
return pprint_thing(f"[Condition : [{self.condition}]]")
def invert(self):
"""invert the condition"""
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError(
"cannot use an invert condition when passing to numexpr"
)
def format(self):
"""return the actual ne format"""
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ["==", "!="]:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = f"({" | ".join(vs)})"
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None and (
issubclass(klass, ConditionBinOp)
and operand.condition is not None
or not issubclass(klass, ConditionBinOp)
and issubclass(klass, FilterBinOp)
and operand.filter is not None
):
return operand.invert()
return None
class PyTablesExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
f"visit_{bin_node}",
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple subscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
if isinstance(slobj, Term):
# In py39 np.ndarray lookups with Term containing int raise
slobj = slobj.value
try:
return self.const_type(value[slobj], self.env)
except TypeError as err:
raise ValueError(
f"cannot subscript {repr(value)} with {repr(slobj)}"
) from err
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = type(node.ctx)
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w
class PyTablesExpr(expr.Expr):
"""
Hold a pytables-like expression, comprised of possibly multiple 'terms'.
Parameters
----------
where : string term expression, PyTablesExpr, or list-like of PyTablesExprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
a PyTablesExpr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
_visitor: PyTablesExprVisitor | None
env: PyTablesScope
expr: str
def __init__(
self,
where,
queryables: dict[str, Any] | None = None,
encoding=None,
scope_level: int = 0,
):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict: DeepChainMap[Any, Any] = DeepChainMap()
if isinstance(where, PyTablesExpr):
local_dict = where.env.scope
_where = where.expr
elif is_list_like(where):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
_where = " & ".join(f"({w})" for w in com.flatten(where))
else:
# _validate_where ensures we otherwise have a string
_where = where
self.expr = _where
self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = PyTablesExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __repr__(self) -> str:
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
"""create and return the numexpr condition and filter"""
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid condition"
) from err
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid filter"
) from err
return self.condition, self.filter
class TermValue:
"""hold a term value the we use to construct a condition/filter"""
def __init__(self, value, converted, kind: str):
assert isinstance(kind, str), kind
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding) -> str:
"""quote the string if not encoded else encode and return"""
if self.kind == "string":
if encoding is not None:
return str(self.converted)
return f'"{self.converted}"'
elif self.kind == "float":
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return str(self.converted)
def maybe_expression(s) -> bool:
"""loose checking if s is a pytables-acceptable expression"""
if not isinstance(s, str):
return False
ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)
# make sure we have an op at least
return any(op in s for op in ops)
| """ manage PyTables query interface via Expressions """
from __future__ import annotations
import ast
from functools import partial
from typing import Any
import numpy as np
from pandas._libs.tslibs import (
Timedelta,
Timestamp,
)
from pandas.compat.chainmap import DeepChainMap
from pandas.core.dtypes.common import is_list_like
import pandas.core.common as com
from pandas.core.computation import (
expr,
ops,
scope as _scope,
)
from pandas.core.computation.common import ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import (
UndefinedVariableError,
is_term,
)
from pandas.core.construction import extract_array
from pandas.core.indexes.base import Index
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
class PyTablesScope(_scope.Scope):
__slots__ = ("queryables",)
queryables: dict[str, Any]
def __init__(
self,
level: int,
global_dict=None,
local_dict=None,
queryables: dict[str, Any] | None = None,
):
super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
self.queryables = queryables or {}
class Term(ops.Term):
env: PyTablesScope
def __new__(cls, name, env, side=None, encoding=None):
if isinstance(name, str):
klass = cls
else:
klass = Constant
return object.__new__(klass)
def __init__(self, name, env: PyTablesScope, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == "left":
# Note: The behavior of __new__ ensures that self.name is a str here
if self.name not in self.env.queryables:
raise NameError(f"name {repr(self.name)} is not defined")
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
# read-only property overwriting read/write property
@property # type: ignore[misc]
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env: PyTablesScope, side=None, encoding=None):
assert isinstance(env, PyTablesScope), type(env)
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
op: str
queryables: dict[str, Any]
condition: str | None
def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
"""create and return a new specialized BinOp from myself"""
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if isinstance(right, ConditionBinOp):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if isinstance(right, FilterBinOp):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(
self.op, left, right, queryables=self.queryables, encoding=self.encoding
).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
"""inplace conform rhs"""
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self) -> bool:
"""return True if this is a valid field"""
return self.lhs in self.queryables
@property
def is_in_table(self) -> bool:
"""
return True if this is a valid column name for generation (e.g. an
actual column in the table)
"""
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
"""the kind of my field"""
return getattr(self.queryables.get(self.lhs), "kind", None)
@property
def meta(self):
"""the meta of my field"""
return getattr(self.queryables.get(self.lhs), "meta", None)
@property
def metadata(self):
"""the metadata of my field"""
return getattr(self.queryables.get(self.lhs), "metadata", None)
def generate(self, v) -> str:
"""create and return the op string for this TermValue"""
val = v.tostring(self.encoding)
return f"({self.lhs} {self.op} {val})"
def convert_value(self, v) -> TermValue:
"""
convert the expression that is in the term to something that is
accepted by pytables
"""
def stringify(value):
if self.encoding is not None:
return pprint_thing_encoded(value, encoding=self.encoding)
return pprint_thing(value)
kind = ensure_decoded(self.kind)
meta = ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
if isinstance(v, str):
v = Timedelta(v).value
else:
v = Timedelta(v, unit="s").value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = extract_array(self.metadata, extract_numpy=True)
if v not in metadata:
result = -1
else:
# error: Incompatible types in assignment (expression has type
# "Union[Any, ndarray]", variable has type "int")
result = metadata.searchsorted( # type: ignore[assignment]
v, side="left"
)
return TermValue(result, result, "integer")
elif kind == "integer":
v = int(float(v))
return TermValue(v, v, kind)
elif kind == "float":
v = float(v)
return TermValue(v, v, kind)
elif kind == "bool":
if isinstance(v, str):
v = not v.strip().lower() in [
"false",
"f",
"no",
"n",
"none",
"0",
"[]",
"{}",
"",
]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), "string")
else:
raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")
def convert_values(self):
pass
class FilterBinOp(BinOp):
filter: tuple[Any, Any, Index] | None = None
def __repr__(self) -> str:
if self.filter is None:
return "Filter: Not Initialized"
return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")
def invert(self):
"""invert the filter"""
if self.filter is not None:
self.filter = (
self.filter[0],
self.generate_filter_op(invert=True),
self.filter[2],
)
return self
def format(self):
"""return the actual filter format"""
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
rhs = self.conform(self.rhs)
values = list(rhs)
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ["==", "!="] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, Index(values))
return self
return None
# equality conditions
if self.op in ["==", "!="]:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, Index(values))
else:
raise TypeError(
f"passing a filterable condition to a non-table indexer [{self}]"
)
return self
def generate_filter_op(self, invert: bool = False):
if (self.op == "!=" and not invert) or (self.op == "==" and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __repr__(self) -> str:
return pprint_thing(f"[Condition : [{self.condition}]]")
def invert(self):
"""invert the condition"""
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError(
"cannot use an invert condition when passing to numexpr"
)
def format(self):
"""return the actual ne format"""
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ["==", "!="]:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = f"({' | '.join(vs)})"
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None and (
issubclass(klass, ConditionBinOp)
and operand.condition is not None
or not issubclass(klass, ConditionBinOp)
and issubclass(klass, FilterBinOp)
and operand.filter is not None
):
return operand.invert()
return None
class PyTablesExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
f"visit_{bin_node}",
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple subscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
if isinstance(slobj, Term):
# In py39 np.ndarray lookups with Term containing int raise
slobj = slobj.value
try:
return self.const_type(value[slobj], self.env)
except TypeError as err:
raise ValueError(
f"cannot subscript {repr(value)} with {repr(slobj)}"
) from err
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = type(node.ctx)
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w
class PyTablesExpr(expr.Expr):
"""
Hold a pytables-like expression, comprised of possibly multiple 'terms'.
Parameters
----------
where : string term expression, PyTablesExpr, or list-like of PyTablesExprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
a PyTablesExpr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
_visitor: PyTablesExprVisitor | None
env: PyTablesScope
expr: str
def __init__(
self,
where,
queryables: dict[str, Any] | None = None,
encoding=None,
scope_level: int = 0,
):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict: DeepChainMap[Any, Any] = DeepChainMap()
if isinstance(where, PyTablesExpr):
local_dict = where.env.scope
_where = where.expr
elif is_list_like(where):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
_where = " & ".join(f"({w})" for w in com.flatten(where))
else:
# _validate_where ensures we otherwise have a string
_where = where
self.expr = _where
self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = PyTablesExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __repr__(self) -> str:
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
"""create and return the numexpr condition and filter"""
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid condition"
) from err
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid filter"
) from err
return self.condition, self.filter
class TermValue:
"""hold a term value the we use to construct a condition/filter"""
def __init__(self, value, converted, kind: str):
assert isinstance(kind, str), kind
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding) -> str:
"""quote the string if not encoded else encode and return"""
if self.kind == "string":
if encoding is not None:
return str(self.converted)
return f'"{self.converted}"'
elif self.kind == "float":
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return str(self.converted)
def maybe_expression(s) -> bool:
"""loose checking if s is a pytables-acceptable expression"""
if not isinstance(s, str):
return False
ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)
# make sure we have an op at least
return any(op in s for op in ops)
|
"""
The typing module: Support for gradual typing as defined by PEP 484.
At large scale, the structure of the module is following:
* Imports and exports, all public names should be explicitly added to __all__.
* Internal helper functions: these should never be used in code outside this module.
* _SpecialForm and its instances (special forms):
Any, NoReturn, ClassVar, Union, Optional, Concatenate
* Classes whose instances can be type arguments in addition to types:
ForwardRef, TypeVar and ParamSpec
* The core of internal generics API: _GenericAlias and _VariadicGenericAlias, the latter is
currently only used by Tuple and Callable. All subscripted types like X[int], Union[int, str],
etc., are instances of either of these classes.
* The public counterpart of the generics API consists of two classes: Generic and Protocol.
* Public helper functions: get_type_hints, overload, cast, no_type_check,
no_type_check_decorator.
* Generic aliases for collections.abc ABCs and few additional protocols.
* Special types: NewType, NamedTuple, TypedDict.
* Wrapper submodules for re and io related types.
"""
from abc import abstractmethod, ABCMeta
import collections
import collections.abc
import contextlib
import functools
import operator
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
import warnings
from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias
try:
from _typing import _idfunc
except ImportError:
def _idfunc(_, x):
return x
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Annotated',
'Any',
'Callable',
'ClassVar',
'Concatenate',
'Final',
'ForwardRef',
'Generic',
'Literal',
'Optional',
'ParamSpec',
'Protocol',
'Tuple',
'Type',
'TypeVar',
'Union',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'ByteString',
'Container',
'ContextManager',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'Collection',
'AsyncGenerator',
'AsyncContextManager',
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsBytes',
'SupportsComplex',
'SupportsFloat',
'SupportsIndex',
'SupportsInt',
'SupportsRound',
# Concrete collection types.
'ChainMap',
'Counter',
'Deque',
'Dict',
'DefaultDict',
'List',
'OrderedDict',
'Set',
'FrozenSet',
'NamedTuple', # Not really a type.
'TypedDict', # Not really a type.
'Generator',
# Other concrete types.
'BinaryIO',
'IO',
'Match',
'Pattern',
'TextIO',
# One-off things.
'AnyStr',
'cast',
'final',
'get_args',
'get_origin',
'get_type_hints',
'is_typeddict',
'NewType',
'no_type_check',
'no_type_check_decorator',
'NoReturn',
'overload',
'ParamSpecArgs',
'ParamSpecKwargs',
'reveal_type',
'runtime_checkable',
'Text',
'TYPE_CHECKING',
'TypeAlias',
'TypeGuard',
]
# The pseudo-submodules 're' and 'io' are part of the public
# namespace, but excluded from __all__ because they might stomp on
# legitimate imports of those modules.
def _type_convert(arg, module=None, *, allow_special_forms=False):
"""For converting None to type(None), and strings to ForwardRef."""
if arg is None:
return type(None)
if isinstance(arg, str):
return ForwardRef(arg, module=module, is_class=allow_special_forms)
return arg
def _type_check(arg, msg, is_argument=True, module=None, *, allow_special_forms=False):
"""Check that the argument is a type, and return it (internal helper).
As a special case, accept None and return type(None) instead. Also wrap strings
into ForwardRef instances. Consider several corner cases, for example plain
special forms like Union are not valid, while Union[int, str] is OK, etc.
The msg argument is a human-readable error message, e.g::
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
invalid_generic_forms = (Generic, Protocol)
if not allow_special_forms:
invalid_generic_forms += (ClassVar,)
if is_argument:
invalid_generic_forms += (Final,)
arg = _type_convert(arg, module=module, allow_special_forms=allow_special_forms)
if (isinstance(arg, _GenericAlias) and
arg.__origin__ in invalid_generic_forms):
raise TypeError(f"{arg} is not valid as type argument")
if arg in (Any, NoReturn, ClassVar, Final, TypeAlias):
return arg
if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
raise TypeError(f"Plain {arg} is not valid as type argument")
if isinstance(arg, (type, TypeVar, ForwardRef, types.UnionType, ParamSpec)):
return arg
if not callable(arg):
raise TypeError(f"{msg} Got {arg!r:.100}.")
return arg
def _is_param_expr(arg):
return arg is ... or isinstance(arg,
(tuple, list, ParamSpec, _ConcatenateGenericAlias))
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, types.GenericAlias):
return repr(obj)
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
return f'{obj.__module__}.{obj.__qualname__}'
if obj is ...:
return('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj)
def _collect_type_vars(types_, typevar_types=None):
"""Collect all type variable contained
in types in order of first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = TypeVar
tvars = []
for t in types_:
if isinstance(t, typevar_types) and t not in tvars:
tvars.append(t)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
def _check_generic(cls, parameters, elen):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
if alen != elen:
raise TypeError(f"Too {"many" if alen > elen else "few"} arguments for {cls};"
f" actual {alen}, expected {elen}")
def _prepare_paramspec_params(cls, params):
"""Prepares the parameters for a Generic containing ParamSpec
variables (internal helper).
"""
# Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612.
if (len(cls.__parameters__) == 1
and params and not _is_param_expr(params[0])):
assert isinstance(cls.__parameters__[0], ParamSpec)
return (params,)
else:
_check_generic(cls, params, len(cls.__parameters__))
_params = []
# Convert lists to tuples to help other libraries cache the results.
for p, tvar in zip(params, cls.__parameters__):
if isinstance(tvar, ParamSpec) and isinstance(p, list):
p = tuple(p)
_params.append(p)
return tuple(_params)
def _deduplicate(params):
# Weed out strict duplicates, preserving the first of each occurrence.
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
return params
def _remove_dups_flatten(parameters):
"""An internal helper for Union creation and substitution: flatten Unions
among parameters, then remove duplicates.
"""
# Flatten out Union[Union[...], ...].
params = []
for p in parameters:
if isinstance(p, (_UnionGenericAlias, types.UnionType)):
params.extend(p.__args__)
else:
params.append(p)
return tuple(_deduplicate(params))
def _flatten_literal_params(parameters):
"""An internal helper for Literal creation: flatten Literals among parameters"""
params = []
for p in parameters:
if isinstance(p, _LiteralGenericAlias):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
_cleanups = []
def _tp_cache(func=None, /, *, typed=False):
"""Internal wrapper caching __getitem__ of generic types with a fallback to
original function for non-hashable arguments.
"""
def decorator(func):
cached = functools.lru_cache(typed=typed)(func)
_cleanups.append(cached.cache_clear)
@functools.wraps(func)
def inner(*args, **kwds):
try:
return cached(*args, **kwds)
except TypeError:
pass # All real errors (not unhashable args) are raised below.
return func(*args, **kwds)
return inner
if func is not None:
return decorator(func)
return decorator
def _eval_type(t, globalns, localns, recursive_guard=frozenset()):
"""Evaluate all forward references in the given type t.
For use of globalns and localns see the docstring for get_type_hints().
recursive_guard is used to prevent prevent infinite recursion
with recursive ForwardRef.
"""
if isinstance(t, ForwardRef):
return t._evaluate(globalns, localns, recursive_guard)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
ev_args = tuple(_eval_type(a, globalns, localns, recursive_guard) for a in t.__args__)
if ev_args == t.__args__:
return t
if isinstance(t, GenericAlias):
return GenericAlias(t.__origin__, ev_args)
if isinstance(t, types.UnionType):
return functools.reduce(operator.or_, ev_args)
else:
return t.copy_with(ev_args)
return t
class _Final:
"""Mixin to prohibit subclassing"""
__slots__ = ('__weakref__',)
def __init_subclass__(cls, /, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
# Internal indicator of special typing constructs.
# See __doc__ instance attribute for specific docs.
class _SpecialForm(_Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return 'typing.' + self._name
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@_tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
class _LiteralSpecialForm(_SpecialForm, _root=True):
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
return self._getitem(self, *parameters)
@_SpecialForm
def Any(self, parameters):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def NoReturn(self, parameters):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def ClassVar(self, parameters):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats: ClassVar[Dict[str, int]] = {} # class variable
damage: int = 10 # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Final(self, parameters):
"""Special typing construct to indicate final names to type checkers.
A final name cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Union(self, parameters):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
- The arguments must be types and there must be at least one.
- None as an argument is a special case and is replaced by
type(None).
- Unions of unions are flattened, e.g.::
Union[Union[int, str], float] == Union[int, str, float]
- Unions of a single argument vanish, e.g.::
Union[int] == int # The constructor actually returns int
- Redundant arguments are skipped, e.g.::
Union[int, str, int] == Union[int, str]
- When comparing unions, the argument order is ignored, e.g.::
Union[int, str] == Union[str, int]
- You cannot subclass or instantiate a union.
- You can use Optional[X] as a shorthand for Union[X, None].
"""
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
msg = "Union[arg, ...]: each arg must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
parameters = _remove_dups_flatten(parameters)
if len(parameters) == 1:
return parameters[0]
if len(parameters) == 2 and type(None) in parameters:
return _UnionGenericAlias(self, parameters, name="Optional")
return _UnionGenericAlias(self, parameters)
@_SpecialForm
def Optional(self, parameters):
"""Optional type.
Optional[X] is equivalent to Union[X, None].
"""
arg = _type_check(parameters, f"{self} requires a single type.")
return Union[arg, type(None)]
@_LiteralSpecialForm
@_tp_cache(typed=True)
def Literal(self, *parameters):
"""Special typing form to define literal types (a.k.a. value types).
This form can be used to indicate to type checkers that the corresponding
variable or function parameter has a value equivalent to the provided
literal (or one of several literals):
def validate_simple(data: Any) -> Literal[True]: # always returns True
...
MODE = Literal['r', 'rb', 'w', 'wb']
def open_helper(file: str, mode: MODE) -> str:
...
open_helper('/some/path', 'r') # Passes type check
open_helper('/other/path', 'typo') # Error in type checker
Literal[...] cannot be subclassed. At runtime, an arbitrary value
is allowed as type argument to Literal[...], but type checkers may
impose restrictions.
"""
# There is no '_type_check' call because arguments to Literal[...] are
# values, not types.
parameters = _flatten_literal_params(parameters)
try:
parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters))))
except TypeError: # unhashable parameters
pass
return _LiteralGenericAlias(self, parameters)
@_SpecialForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = (*(_type_check(p, msg) for p in parameters[:-1]), parameters[-1])
return _ConcatenateGenericAlias(self, parameters)
@_SpecialForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
class ForwardRef(_Final, _root=True):
"""Internal wrapper to hold a forward reference."""
__slots__ = ('__forward_arg__', '__forward_code__',
'__forward_evaluated__', '__forward_value__',
'__forward_is_argument__', '__forward_is_class__',
'__forward_module__')
def __init__(self, arg, is_argument=True, module=None, *, is_class=False):
if not isinstance(arg, str):
raise TypeError(f"Forward reference must be a string -- got {arg!r}")
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError(f"Forward reference must be an expression -- got {arg!r}")
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
self.__forward_is_argument__ = is_argument
self.__forward_is_class__ = is_class
self.__forward_module__ = module
def _evaluate(self, globalns, localns, recursive_guard):
if self.__forward_arg__ in recursive_guard:
return self
if not self.__forward_evaluated__ or localns is not globalns:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
if self.__forward_module__ is not None:
globalns = getattr(
sys.modules.get(self.__forward_module__, None), '__dict__', globalns
)
type_ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.",
is_argument=self.__forward_is_argument__,
allow_special_forms=self.__forward_is_class__,
)
self.__forward_value__ = _eval_type(
type_, globalns, localns, recursive_guard | {self.__forward_arg__}
)
self.__forward_evaluated__ = True
return self.__forward_value__
def __eq__(self, other):
if not isinstance(other, ForwardRef):
return NotImplemented
if self.__forward_evaluated__ and other.__forward_evaluated__:
return (self.__forward_arg__ == other.__forward_arg__ and
self.__forward_value__ == other.__forward_value__)
return self.__forward_arg__ == other.__forward_arg__
def __hash__(self):
return hash(self.__forward_arg__)
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __repr__(self):
return f'ForwardRef({self.__forward_arg__!r})'
class _TypeVarLike:
"""Mixin for TypeVar-like types (TypeVar and ParamSpec)."""
def __init__(self, bound, covariant, contravariant):
"""Used to setup TypeVars and ParamSpec's bound, covariant and
contravariant attributes.
"""
if covariant and contravariant:
raise ValueError("Bivariant types are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __reduce__(self):
return self.__name__
class TypeVar( _Final, _Immutable, _TypeVarLike, _root=True):
"""Type variable.
Usage::
T = TypeVar('T') # Can be anything
A = TypeVar('A', str, bytes) # Must be str or bytes
Type variables exist primarily for the benefit of static type
checkers. They serve as the parameters for generic types as well
as for generic function definitions. See class Generic for more
information on generic types. Generic functions work as follows:
def repeat(x: T, n: int) -> List[T]:
'''Return a list containing n references to x.'''
return [x]*n
def longest(x: A, y: A) -> A:
'''Return the longest of two strings.'''
return x if len(x) >= len(y) else y
The latter example's signature is essentially the overloading
of (str, str) -> str and (bytes, bytes) -> bytes. Also note
that if the arguments are instances of some subclass of str,
the return type is still plain str.
At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError.
Type variables defined with covariant=True or contravariant=True
can be used to declare covariant or contravariant generic types.
See PEP 484 for more details. By default generic types are invariant
in all type variables.
Type variables can be introspected. e.g.:
T.__name__ == 'T'
T.__constraints__ == ()
T.__covariant__ == False
T.__contravariant__ = False
A.__constraints__ == (str, bytes)
Note that only type variables defined in global scope can be pickled.
"""
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
class ParamSpecArgs(_Final, _Immutable, _root=True):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
class ParamSpecKwargs(_Final, _Immutable, _root=True):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
class ParamSpec(_Final, _Immutable, _TypeVarLike, _root=True):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or as the first argument to ``Callable``, or as parameters for user-defined
Generics. See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def _is_dunder(attr):
return attr.startswith('__') and attr.endswith('__')
class _BaseGenericAlias(_Final, _root=True):
"""The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
There are two kind of these aliases: user defined and special. The special ones
are wrappers around builtin collections and ABCs in collections.abc. These must
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
def __init__(self, origin, *, inst=True, name=None):
self._inst = inst
self._name = name
self.__origin__ = origin
self.__slots__ = None # This is not documented.
def __call__(self, *args, **kwargs):
if not self._inst:
raise TypeError(f"Type {self._name} cannot be instantiated; "
f"use {self.__origin__.__name__}() instead")
result = self.__origin__(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
def __mro_entries__(self, bases):
res = []
if self.__origin__ not in bases:
res.append(self.__origin__)
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic):
break
else:
res.append(Generic)
return tuple(res)
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return self._name or self.__origin__.__name__
# We are careful for copy and pickle.
# Also for simplicity we just don't relay all dunder names
if '__origin__' in self.__dict__ and not _is_dunder(attr):
return getattr(self.__origin__, attr)
raise AttributeError(attr)
def __setattr__(self, attr, val):
if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams',
'_typevar_types', '_paramspec_tvars'}:
super().__setattr__(attr, val)
else:
setattr(self.__origin__, attr, val)
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
raise TypeError("Subscripted generics cannot be used with"
" class and instance checks")
def __dir__(self):
return list(set(super().__dir__()
+ [attr for attr in dir(self.__origin__) if not _is_dunder(attr)]))
# Special typing constructs Union, Optional, Generic, Callable and Tuple
# use three special attributes for internal bookkeeping of generic types:
# * __parameters__ is a tuple of unique free type parameters of a generic
# type, for example, Dict[T, T].__parameters__ == (T,);
# * __origin__ keeps a reference to a type that was subscripted,
# e.g., Union[T, int].__origin__ == Union, or the non-generic version of
# the type.
# * __args__ is a tuple of all arguments used in subscripting,
# e.g., Dict[T, int].__args__ == (T, int).
class _GenericAlias(_BaseGenericAlias, _root=True):
# The type of parameterized generics.
#
# That is, for example, `type(List[int])` is `_GenericAlias`.
#
# Objects which are instances of this class include:
# * Parameterized container types, e.g. `Tuple[int]`, `List[int]`.
# * Note that native container types, e.g. `tuple`, `list`, use
# `types.GenericAlias` instead.
# * Parameterized classes:
# T = TypeVar('T')
# class C(Generic[T]): pass
# # C[int] is a _GenericAlias
# * `Callable` aliases, generic `Callable` aliases, and
# parameterized `Callable` aliases:
# T = TypeVar('T')
# # _CallableGenericAlias inherits from _GenericAlias.
# A = Callable[[], None] # _CallableGenericAlias
# B = Callable[[T], None] # _CallableGenericAlias
# C = B[int] # _CallableGenericAlias
# * Parameterized `Final`, `ClassVar` and `TypeGuard`:
# # All _GenericAlias
# Final[int]
# ClassVar[float]
# TypeVar[bool]
def __init__(self, origin, args, *, inst=True, name=None,
_typevar_types=TypeVar,
_paramspec_tvars=False):
super().__init__(origin, inst=inst, name=name)
if not isinstance(args, tuple):
args = (args,)
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in args)
self.__parameters__ = _collect_type_vars(args, typevar_types=_typevar_types)
self._typevar_types = _typevar_types
self._paramspec_tvars = _paramspec_tvars
if not name:
self.__module__ = origin.__module__
def __eq__(self, other):
if not isinstance(other, _GenericAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__args__ == other.__args__)
def __hash__(self):
return hash((self.__origin__, self.__args__))
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
@_tp_cache
def __getitem__(self, args):
# Parameterizes an already-parameterized object.
#
# For example, we arrive here doing something like:
# T1 = TypeVar('T1')
# T2 = TypeVar('T2')
# T3 = TypeVar('T3')
# class A(Generic[T1]): pass
# B = A[T2] # B is a _GenericAlias
# C = B[T3] # Invokes _GenericAlias.__getitem__
#
# We also arrive here when parameterizing a generic `Callable` alias:
# T = TypeVar('T')
# C = Callable[[T], None]
# C[int] # Invokes _GenericAlias.__getitem__
if self.__origin__ in (Generic, Protocol):
# Can't subscript Generic[...] or Protocol[...].
raise TypeError(f"Cannot subscript already-subscripted {self}")
# Preprocess `args`.
if not isinstance(args, tuple):
args = (args,)
args = tuple(_type_convert(p) for p in args)
if (self._paramspec_tvars
and any(isinstance(t, ParamSpec) for t in self.__parameters__)):
args = _prepare_paramspec_params(self, args)
else:
_check_generic(self, args, len(self.__parameters__))
new_args = self._determine_new_args(args)
r = self.copy_with(new_args)
return r
def _determine_new_args(self, args):
# Determines new __args__ for __getitem__.
#
# For example, suppose we had:
# T1 = TypeVar('T1')
# T2 = TypeVar('T2')
# class A(Generic[T1, T2]): pass
# T3 = TypeVar('T3')
# B = A[int, T3]
# C = B[str]
# `B.__args__` is `(int, T3)`, so `C.__args__` should be `(int, str)`.
# Unfortunately, this is harder than it looks, because if `T3` is
# anything more exotic than a plain `TypeVar`, we need to consider
# edge cases.
# In the example above, this would be {T3: str}
new_arg_by_param = dict(zip(self.__parameters__, args))
new_args = []
for old_arg in self.__args__:
if isinstance(old_arg, ParamSpec):
new_arg = new_arg_by_param[old_arg]
if not _is_param_expr(new_arg):
raise TypeError(f"Expected a list of types, an ellipsis, "
f"ParamSpec, or Concatenate. Got {new_arg}")
elif isinstance(old_arg, self._typevar_types):
new_arg = new_arg_by_param[old_arg]
elif isinstance(old_arg, (_GenericAlias, GenericAlias, types.UnionType)):
subparams = old_arg.__parameters__
if not subparams:
new_arg = old_arg
else:
subargs = tuple(new_arg_by_param[x] for x in subparams)
new_arg = old_arg[subargs]
else:
new_arg = old_arg
if self.__origin__ == collections.abc.Callable and isinstance(new_arg, tuple):
# Consider the following `Callable`.
# C = Callable[[int], str]
# Here, `C.__args__` should be (int, str) - NOT ([int], str).
# That means that if we had something like...
# P = ParamSpec('P')
# T = TypeVar('T')
# C = Callable[P, T]
# D = C[[int, str], float]
# ...we need to be careful; `new_args` should end up as
# `(int, str, float)` rather than `([int, str], float)`.
new_args.extend(new_arg)
else:
new_args.append(new_arg)
return tuple(new_args)
def copy_with(self, args):
return self.__class__(self.__origin__, args, name=self._name, inst=self._inst)
def __repr__(self):
if self._name:
name = 'typing.' + self._name
else:
name = _type_repr(self.__origin__)
args = ", ".join([_type_repr(a) for a in self.__args__])
return f'{name}[{args}]'
def __reduce__(self):
if self._name:
origin = globals()[self._name]
else:
origin = self.__origin__
args = tuple(self.__args__)
if len(args) == 1 and not isinstance(args[0], tuple):
args, = args
return operator.getitem, (origin, args)
def __mro_entries__(self, bases):
if isinstance(self.__origin__, _SpecialForm):
raise TypeError(f"Cannot subclass {self!r}")
if self._name: # generic version of an ABC or built-in class
return super().__mro_entries__(bases)
if self.__origin__ is Generic:
if Protocol in bases:
return ()
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) and b is not self:
return ()
return (self.__origin__,)
# _nparams is the number of accepted parameters, e.g. 0 for Hashable,
# 1 for List and 2 for Dict. It may be -1 if variable number of
# parameters are accepted (needs custom __getitem__).
class _SpecialGenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, nparams, *, inst=True, name=None):
if name is None:
name = origin.__name__
super().__init__(origin, inst=inst, name=name)
self._nparams = nparams
if origin.__module__ == 'builtins':
self.__doc__ = f'A generic version of {origin.__qualname__}.'
else:
self.__doc__ = f'A generic version of {origin.__module__}.{origin.__qualname__}.'
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
_check_generic(self, params, self._nparams)
return self.copy_with(params)
def copy_with(self, params):
return _GenericAlias(self.__origin__, params,
name=self._name, inst=self._inst)
def __repr__(self):
return 'typing.' + self._name
def __subclasscheck__(self, cls):
if isinstance(cls, _SpecialGenericAlias):
return issubclass(cls.__origin__, self.__origin__)
if not isinstance(cls, _GenericAlias):
return issubclass(cls, self.__origin__)
return super().__subclasscheck__(cls)
def __reduce__(self):
return self._name
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
class _CallableGenericAlias(_GenericAlias, _root=True):
def __repr__(self):
assert self._name == 'Callable'
args = self.__args__
if len(args) == 2 and _is_param_expr(args[0]):
return super().__repr__()
return (f'typing.Callable'
f'[[{', '.join([_type_repr(a) for a in args[:-1]])}], '
f'{_type_repr(args[-1])}]')
def __reduce__(self):
args = self.__args__
if not (len(args) == 2 and _is_param_expr(args[0])):
args = list(args[:-1]), args[-1]
return operator.getitem, (Callable, args)
class _CallableType(_SpecialGenericAlias, _root=True):
def copy_with(self, params):
return _CallableGenericAlias(self.__origin__, params,
name=self._name, inst=self._inst,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __getitem__(self, params):
if not isinstance(params, tuple) or len(params) != 2:
raise TypeError("Callable must be used as "
"Callable[[arg, ...], result].")
args, result = params
# This relaxes what args can be on purpose to allow things like
# PEP 612 ParamSpec. Responsibility for whether a user is using
# Callable[...] properly is deferred to static type checkers.
if isinstance(args, list):
params = (tuple(args), result)
else:
params = (args, result)
return self.__getitem_inner__(params)
@_tp_cache
def __getitem_inner__(self, params):
args, result = params
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
if args is Ellipsis:
return self.copy_with((_TypingEllipsis, result))
if not isinstance(args, tuple):
args = (args,)
args = tuple(_type_convert(arg) for arg in args)
params = args + (result,)
return self.copy_with(params)
class _TupleType(_SpecialGenericAlias, _root=True):
@_tp_cache
def __getitem__(self, params):
if params == ():
return self.copy_with((_TypingEmpty,))
if not isinstance(params, tuple):
params = (params,)
if len(params) == 2 and params[1] is ...:
msg = "Tuple[t, ...]: t must be a type."
p = _type_check(params[0], msg)
return self.copy_with((p, _TypingEllipsis))
msg = "Tuple[t0, t1, ...]: each t must be a type."
params = tuple(_type_check(p, msg) for p in params)
return self.copy_with(params)
class _UnionGenericAlias(_GenericAlias, _root=True):
def copy_with(self, params):
return Union[params]
def __eq__(self, other):
if not isinstance(other, (_UnionGenericAlias, types.UnionType)):
return NotImplemented
return set(self.__args__) == set(other.__args__)
def __hash__(self):
return hash(frozenset(self.__args__))
def __repr__(self):
args = self.__args__
if len(args) == 2:
if args[0] is type(None):
return f'typing.Optional[{_type_repr(args[1])}]'
elif args[1] is type(None):
return f'typing.Optional[{_type_repr(args[0])}]'
return super().__repr__()
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
for arg in self.__args__:
if issubclass(cls, arg):
return True
def __reduce__(self):
func, (origin, args) = super().__reduce__()
return func, (Union, args)
def _value_and_type_iter(parameters):
return ((p, type(p)) for p in parameters)
class _LiteralGenericAlias(_GenericAlias, _root=True):
def __eq__(self, other):
if not isinstance(other, _LiteralGenericAlias):
return NotImplemented
return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__))
def __hash__(self):
return hash(frozenset(_value_and_type_iter(self.__args__)))
class _ConcatenateGenericAlias(_GenericAlias, _root=True):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def copy_with(self, params):
if isinstance(params[-1], (list, tuple)):
return (*params[:-1], *params[-1])
if isinstance(params[-1], _ConcatenateGenericAlias):
params = (*params[:-1], *params[-1].__args__)
elif not isinstance(params[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
return super().copy_with(params)
class Generic:
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from
this class parameterized with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
"""
__slots__ = ()
_is_protocol = False
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
params = tuple(_type_convert(p) for p in params)
if cls in (Generic, Protocol):
# Generic and Protocol can only be subscripted with unique type variables.
if not all(isinstance(p, (TypeVar, ParamSpec)) for p in params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be type variables "
f"or parameter specification variables.")
if len(set(params)) != len(params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
if any(isinstance(t, ParamSpec) for t in cls.__parameters__):
params = _prepare_paramspec_params(cls, params)
else:
_check_generic(cls, params, len(cls.__parameters__))
return _GenericAlias(cls, params,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
tvars = []
if '__orig_bases__' in cls.__dict__:
error = Generic in cls.__orig_bases__
else:
error = Generic in cls.__bases__ and cls.__name__ != 'Protocol'
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__, (TypeVar, ParamSpec))
# Look for Generic[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, _GenericAlias) and
base.__origin__ is Generic):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] multiple types.")
gvars = base.__parameters__
if gvars is not None:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in Generic[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
class _TypingEmpty:
"""Internal placeholder for () or []. Used by TupleMeta and CallableMeta
to allow empty list/tuple in specific places, without allowing them
to sneak in where prohibited.
"""
class _TypingEllipsis:
"""Internal placeholder for ... (ellipsis)."""
_TYPING_INTERNALS = ['__parameters__', '__orig_bases__', '__orig_class__',
'_is_protocol', '_is_runtime_protocol']
_SPECIAL_NAMES = ['__abstractmethods__', '__annotations__', '__dict__', '__doc__',
'__init__', '__module__', '__new__', '__slots__',
'__subclasshook__', '__weakref__', '__class_getitem__']
# These special attributes will be not collected as protocol members.
EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS + _SPECIAL_NAMES + ['_MutableMapping__marker']
def _get_protocol_attrs(cls):
"""Collect protocol members from a protocol class objects.
This includes names actually defined in the class dictionary, as well
as names that appear in annotations. Special names (above) are skipped.
"""
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES:
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
# PEP 544 prohibits using issubclass() with protocols that have non-method members.
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
def _no_init_or_replace_init(self, *args, **kwargs):
cls = type(self)
if cls._is_protocol:
raise TypeError('Protocols cannot be instantiated')
# Already using a custom `__init__`. No need to calculate correct
# `__init__` to call. This can lead to RecursionError. See bpo-45121.
if cls.__init__ is not _no_init_or_replace_init:
return
# Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`.
# The first instantiation of the subclass will call `_no_init_or_replace_init` which
# searches for a proper new `__init__` in the MRO. The new `__init__`
# replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent
# instantiation of the protocol subclass will thus use the new
# `__init__` and no longer call `_no_init_or_replace_init`.
for base in cls.__mro__:
init = base.__dict__.get('__init__', _no_init_or_replace_init)
if init is not _no_init_or_replace_init:
cls.__init__ = init
break
else:
# should not happen
cls.__init__ = object.__init__
cls.__init__(self, *args, **kwargs)
def _caller(depth=1, default='__main__'):
try:
return sys._getframe(depth + 1).f_globals.get('__name__', default)
except (AttributeError, ValueError): # For platforms without _getframe()
return None
def _allow_reckless_class_checks(depth=3):
"""Allow instance and class checks for special stdlib modules.
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
"""
return _caller(depth) in {'abc', 'functools', None}
_PROTO_ALLOWLIST = {
'collections.abc': [
'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
],
'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
}
class _ProtocolMeta(ABCMeta):
# This metaclass is really unfortunate and exists only because of
# the lack of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if (
getattr(cls, '_is_protocol', False) and
not getattr(cls, '_is_runtime_protocol', False) and
not _allow_reckless_class_checks(depth=2)
):
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
# All *methods* can be blocked by setting them to None.
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(Generic, metaclass=_ProtocolMeta):
"""Base class for protocol classes.
Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing.runtime_checkable act as simple-minded runtime protocols that check
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
_is_runtime_protocol = False
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', False):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', False):
return NotImplemented
# First, perform various sanity checks.
if not getattr(cls, '_is_runtime_protocol', False):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if not _is_callable_members_only(cls):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error message as for issubclass(1, int).
raise TypeError('issubclass() arg 1 must be a class')
# Second, perform the actual structural compatibility check.
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
# Check if the members appears in the class dictionary...
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
# ...or in annotations, if it is a sub-protocol.
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, collections.abc.Mapping) and
attr in annotations and
issubclass(other, Generic) and other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols...
if not cls._is_protocol:
return
# ... otherwise check consistency of bases, and prohibit instantiation.
for base in cls.__bases__:
if not (base in (object, Generic) or
base.__module__ in _PROTO_ALLOWLIST and
base.__name__ in _PROTO_ALLOWLIST[base.__module__] or
issubclass(base, Generic) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init_or_replace_init
class _AnnotatedAlias(_GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return "typing.Annotated[{}, {}]".format(
_type_repr(self.__origin__),
", ".join(repr(a) for a in self.__metadata__)
)
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__metadata__ == other.__metadata__)
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return 'Annotated'
return super().__getattr__(attr)
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
msg = "Annotated[t, ...]: t must be a type."
origin = _type_check(params[0], msg, allow_special_forms=True)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"Cannot subclass {}.Annotated".format(cls.__module__)
)
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol.
Such protocol can be used with isinstance() and issubclass().
Raise TypeError if applied to a non-protocol class.
This allows a simple-minded structural check very similar to
one trick ponies in collections.abc such as Iterable.
For example::
@runtime_checkable
class Closable(Protocol):
def close(self): ...
assert isinstance(open('/some/file'), Closable)
Warning: this will check only the presence of the required methods,
not their type signatures!
"""
if not issubclass(cls, Generic) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
' got %r' % cls)
cls._is_runtime_protocol = True
return cls
def cast(typ, val):
"""Cast a value to a type.
This returns the value unchanged. To the type checker this
signals that the return value has the designated type, but at
runtime we intentionally don't check anything (we want this
to be as fast as possible).
"""
return val
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
try:
code = func.__code__
except AttributeError:
# Some built-in functions don't have __code__, __defaults__, etc.
return {}
pos_count = code.co_argcount
arg_names = code.co_varnames
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
_allowed_types = (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.ModuleType,
WrapperDescriptorType, MethodWrapperType, MethodDescriptorType)
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used. For classes, the search
order is globals first then locals.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
# Classes require a special treatment.
if isinstance(obj, type):
hints = {}
for base in reversed(obj.__mro__):
if globalns is None:
base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {})
else:
base_globals = globalns
ann = base.__dict__.get('__annotations__', {})
if isinstance(ann, types.GetSetDescriptorType):
ann = {}
base_locals = dict(vars(base)) if localns is None else localns
if localns is None and globalns is None:
# This is surprising, but required. Before Python 3.10,
# get_type_hints only evaluated the globalns of
# a class. To maintain backwards compatibility, we reverse
# the globalns and localns order so that eval() looks into
# *base_globals* first rather than *base_locals*.
# This only affects ForwardRefs.
base_globals, base_locals = base_locals, base_globals
for name, value in ann.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value, is_argument=False, is_class=True)
value = _eval_type(value, base_globals, base_locals)
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
if globalns is None:
if isinstance(obj, types.ModuleType):
globalns = obj.__dict__
else:
nsobj = obj
# Find globalns for the unwrapped object.
while hasattr(nsobj, '__wrapped__'):
nsobj = nsobj.__wrapped__
globalns = getattr(nsobj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
hints = getattr(obj, '__annotations__', None)
if hints is None:
# Return empty annotations for something that _could_ have them.
if isinstance(obj, _allowed_types):
return {}
else:
raise TypeError('{!r} is not a module, class, method, '
'or function.'.format(obj))
defaults = _get_defaults(obj)
hints = dict(hints)
for name, value in hints.items():
if value is None:
value = type(None)
if isinstance(value, str):
# class-level forward refs were handled above, this must be either
# a module-level annotation or a function argument annotation
value = ForwardRef(
value,
is_argument=not isinstance(obj, types.ModuleType),
is_class=False,
)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
def _strip_annotations(t):
"""Strips the annotations from a given type.
"""
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, _GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if isinstance(t, GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return GenericAlias(t.__origin__, stripped_args)
if isinstance(t, types.UnionType):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (_BaseGenericAlias, GenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is Generic:
return Generic
if isinstance(tp, types.UnionType):
return types.UnionType
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (_GenericAlias, GenericAlias)):
res = tp.__args__
if (tp.__origin__ is collections.abc.Callable
and not (len(res) == 2 and _is_param_expr(res[0]))):
res = (list(res[:-1]), res[-1])
return res
if isinstance(tp, types.UnionType):
return tp.__args__
return ()
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
return isinstance(tp, _TypedDictMeta)
def no_type_check(arg):
"""Decorator to indicate that annotations are not type hints.
The argument must be a class or function; if it is a class, it
applies recursively to all methods and classes defined in that class
(but not to methods defined in its superclasses or subclasses).
This mutates the function(s) or class(es) in place.
"""
if isinstance(arg, type):
arg_attrs = arg.__dict__.copy()
for attr, val in arg.__dict__.items():
if val in arg.__bases__ + (arg,):
arg_attrs.pop(attr)
for obj in arg_attrs.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
if isinstance(obj, type):
no_type_check(obj)
try:
arg.__no_type_check__ = True
except TypeError: # built-in classes
pass
return arg
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def _overload_dummy(*args, **kwds):
"""Helper for @overload to raise when called."""
raise NotImplementedError(
"You should not call an overloaded function. "
"A series of @overload-decorated functions "
"outside a stub module should always be followed "
"by an implementation that is not @overload-ed.")
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
"""
return _overload_dummy
def final(f):
"""A decorator to indicate final methods and final classes.
Use this decorator to indicate to type checkers that the decorated
method cannot be overridden, and decorated class cannot be subclassed.
For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties. The decorator
sets the ``__final__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
"""
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = TypeVar('T') # Any type.
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
# Internal type variable used for Type[].
CT_co = TypeVar('CT_co', covariant=True, bound=type)
# A useful type variable with constraints. This represents string types.
# (This one *is* for export!)
AnyStr = TypeVar('AnyStr', bytes, str)
# Various ABCs mimicking those in collections.abc.
_alias = _SpecialGenericAlias
Hashable = _alias(collections.abc.Hashable, 0) # Not generic.
Awaitable = _alias(collections.abc.Awaitable, 1)
Coroutine = _alias(collections.abc.Coroutine, 3)
AsyncIterable = _alias(collections.abc.AsyncIterable, 1)
AsyncIterator = _alias(collections.abc.AsyncIterator, 1)
Iterable = _alias(collections.abc.Iterable, 1)
Iterator = _alias(collections.abc.Iterator, 1)
Reversible = _alias(collections.abc.Reversible, 1)
Sized = _alias(collections.abc.Sized, 0) # Not generic.
Container = _alias(collections.abc.Container, 1)
Collection = _alias(collections.abc.Collection, 1)
Callable = _CallableType(collections.abc.Callable, 2)
Callable.__doc__ = \
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types or ellipsis; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet')
MutableSet = _alias(collections.abc.MutableSet, 1)
# NOTE: Mapping is only covariant in the value type.
Mapping = _alias(collections.abc.Mapping, 2)
MutableMapping = _alias(collections.abc.MutableMapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
MutableSequence = _alias(collections.abc.MutableSequence, 1)
ByteString = _alias(collections.abc.ByteString, 0) # Not generic
# Tuple accepts variable number of parameters.
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
Tuple.__doc__ = \
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
"""
List = _alias(list, 1, inst=False, name='List')
Deque = _alias(collections.deque, 1, name='Deque')
Set = _alias(set, 1, inst=False, name='Set')
FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet')
MappingView = _alias(collections.abc.MappingView, 1)
KeysView = _alias(collections.abc.KeysView, 1)
ItemsView = _alias(collections.abc.ItemsView, 2)
ValuesView = _alias(collections.abc.ValuesView, 1)
ContextManager = _alias(contextlib.AbstractContextManager, 1, name='ContextManager')
AsyncContextManager = _alias(contextlib.AbstractAsyncContextManager, 1, name='AsyncContextManager')
Dict = _alias(dict, 2, inst=False, name='Dict')
DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict')
OrderedDict = _alias(collections.OrderedDict, 2)
Counter = _alias(collections.Counter, 1)
ChainMap = _alias(collections.ChainMap, 2)
Generator = _alias(collections.abc.Generator, 3)
AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)
Type = _alias(type, 1, inst=False, name='Type')
Type.__doc__ = \
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
@runtime_checkable
class SupportsInt(Protocol):
"""An ABC with one abstract method __int__."""
__slots__ = ()
@abstractmethod
def __int__(self) -> int:
pass
@runtime_checkable
class SupportsFloat(Protocol):
"""An ABC with one abstract method __float__."""
__slots__ = ()
@abstractmethod
def __float__(self) -> float:
pass
@runtime_checkable
class SupportsComplex(Protocol):
"""An ABC with one abstract method __complex__."""
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
@runtime_checkable
class SupportsBytes(Protocol):
"""An ABC with one abstract method __bytes__."""
__slots__ = ()
@abstractmethod
def __bytes__(self) -> bytes:
pass
@runtime_checkable
class SupportsIndex(Protocol):
"""An ABC with one abstract method __index__."""
__slots__ = ()
@abstractmethod
def __index__(self) -> int:
pass
@runtime_checkable
class SupportsAbs(Protocol[T_co]):
"""An ABC with one abstract method __abs__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __abs__(self) -> T_co:
pass
@runtime_checkable
class SupportsRound(Protocol[T_co]):
"""An ABC with one abstract method __round__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
def _make_nmtuple(name, types, module, defaults = ()):
fields = [n for n, t in types]
types = {n: _type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = types
return nm_tpl
# attributes prohibited to set in NamedTuple class syntax
_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__',
'_fields', '_field_defaults',
'_make', '_replace', '_asdict', '_source'})
_special = frozenset({'__module__', '__name__', '__annotations__'})
class NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert bases[0] is _NamedTuple
types = ns.get('__annotations__', {})
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{"s" if len(default_names) > 1 else ""} "
f"{", ".join(default_names)}")
nm_tpl = _make_nmtuple(typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__'])
# update from user namespace without overriding special namedtuple attributes
for key in ns:
if key in _prohibited:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special and key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
return nm_tpl
def NamedTuple(typename, fields=None, /, **kwargs):
"""Typed version of namedtuple.
Usage in Python versions >= 3.6::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
Alternative equivalent keyword syntax is also accepted::
Employee = NamedTuple('Employee', name=str, id=int)
In Python versions <= 3.5 use::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
if fields is None:
fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
return _make_nmtuple(typename, fields, module=_caller())
_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {})
def _namedtuple_mro_entries(bases):
if len(bases) > 1:
raise TypeError("Multiple inheritance with NamedTuple is not supported")
assert bases[0] is NamedTuple
return (_NamedTuple,)
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, total=True):
"""Create new typed dict class object.
This method is called when TypedDict is subclassed,
or when TypedDict is instantiated. This way
TypedDict supports all three syntax forms described in its docstring.
Subclasses and instances of TypedDict return actual dictionaries.
"""
for base in bases:
if type(base) is not _TypedDictMeta:
raise TypeError('cannot inherit from both a TypedDict type '
'and a non-TypedDict base class')
tp_dict = type.__new__(_TypedDictMeta, name, (dict,), ns)
annotations = {}
own_annotations = ns.get('__annotations__', {})
own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: _type_check(tp, msg, module=tp_dict.__module__)
for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
if total:
required_keys.update(own_annotation_keys)
else:
optional_keys.update(own_annotation_keys)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__call__ = dict # static method
def __subclasscheck__(cls, other):
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
__instancecheck__ = __subclasscheck__
def TypedDict(typename, fields=None, /, *, total=True, **kwargs):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality.
Usage::
class point2D(TypedDict, total=False):
x: int
y: int
This means that a point2D TypedDict can have any of the keys omitted.A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
module = _caller()
if module is not None:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = module
return _TypedDictMeta(typename, (), ns, total=total)
_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
TypedDict.__mro_entries__ = lambda bases: (_TypedDict,)
class NewType:
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy callable that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
__call__ = _idfunc
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __reduce__(self):
return self.__qualname__
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
# Python-version-specific alias (Python 2: unicode; Python 3: str)
Text = str
# Constant that's True when type checking, but False here.
TYPE_CHECKING = False
class IO(Generic[AnyStr]):
"""Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
__slots__ = ()
@property
@abstractmethod
def mode(self) -> str:
pass
@property
@abstractmethod
def name(self) -> str:
pass
@abstractmethod
def close(self) -> None:
pass
@property
@abstractmethod
def closed(self) -> bool:
pass
@abstractmethod
def fileno(self) -> int:
pass
@abstractmethod
def flush(self) -> None:
pass
@abstractmethod
def isatty(self) -> bool:
pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr:
pass
@abstractmethod
def readable(self) -> bool:
pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr:
pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]:
pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int:
pass
@abstractmethod
def seekable(self) -> bool:
pass
@abstractmethod
def tell(self) -> int:
pass
@abstractmethod
def truncate(self, size: int = None) -> int:
pass
@abstractmethod
def writable(self) -> bool:
pass
@abstractmethod
def write(self, s: AnyStr) -> int:
pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None:
pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]':
pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None:
pass
class BinaryIO(IO[bytes]):
"""Typed version of the return of open() in binary mode."""
__slots__ = ()
@abstractmethod
def write(self, s: Union[bytes, bytearray]) -> int:
pass
@abstractmethod
def __enter__(self) -> 'BinaryIO':
pass
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
@property
@abstractmethod
def buffer(self) -> BinaryIO:
pass
@property
@abstractmethod
def encoding(self) -> str:
pass
@property
@abstractmethod
def errors(self) -> Optional[str]:
pass
@property
@abstractmethod
def line_buffering(self) -> bool:
pass
@property
@abstractmethod
def newlines(self) -> Any:
pass
@abstractmethod
def __enter__(self) -> 'TextIO':
pass
class _DeprecatedType(type):
def __getattribute__(cls, name):
if name not in ("__dict__", "__module__") and name in cls.__dict__:
warnings.warn(
f"{cls.__name__} is deprecated, import directly "
f"from typing instead. {cls.__name__} will be removed "
"in Python 3.12.",
DeprecationWarning,
stacklevel=2,
)
return super().__getattribute__(name)
class io(metaclass=_DeprecatedType):
"""Wrapper namespace for IO generic classes."""
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
Pattern = _alias(stdlib_re.Pattern, 1)
Match = _alias(stdlib_re.Match, 1)
class re(metaclass=_DeprecatedType):
"""Wrapper namespace for re type aliases."""
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + '.re'
sys.modules[re.__name__] = re
def reveal_type(obj: T, /) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr)
return obj
| """
The typing module: Support for gradual typing as defined by PEP 484.
At large scale, the structure of the module is following:
* Imports and exports, all public names should be explicitly added to __all__.
* Internal helper functions: these should never be used in code outside this module.
* _SpecialForm and its instances (special forms):
Any, NoReturn, ClassVar, Union, Optional, Concatenate
* Classes whose instances can be type arguments in addition to types:
ForwardRef, TypeVar and ParamSpec
* The core of internal generics API: _GenericAlias and _VariadicGenericAlias, the latter is
currently only used by Tuple and Callable. All subscripted types like X[int], Union[int, str],
etc., are instances of either of these classes.
* The public counterpart of the generics API consists of two classes: Generic and Protocol.
* Public helper functions: get_type_hints, overload, cast, no_type_check,
no_type_check_decorator.
* Generic aliases for collections.abc ABCs and few additional protocols.
* Special types: NewType, NamedTuple, TypedDict.
* Wrapper submodules for re and io related types.
"""
from abc import abstractmethod, ABCMeta
import collections
import collections.abc
import contextlib
import functools
import operator
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
import warnings
from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias
try:
from _typing import _idfunc
except ImportError:
def _idfunc(_, x):
return x
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Annotated',
'Any',
'Callable',
'ClassVar',
'Concatenate',
'Final',
'ForwardRef',
'Generic',
'Literal',
'Optional',
'ParamSpec',
'Protocol',
'Tuple',
'Type',
'TypeVar',
'Union',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'ByteString',
'Container',
'ContextManager',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'Collection',
'AsyncGenerator',
'AsyncContextManager',
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsBytes',
'SupportsComplex',
'SupportsFloat',
'SupportsIndex',
'SupportsInt',
'SupportsRound',
# Concrete collection types.
'ChainMap',
'Counter',
'Deque',
'Dict',
'DefaultDict',
'List',
'OrderedDict',
'Set',
'FrozenSet',
'NamedTuple', # Not really a type.
'TypedDict', # Not really a type.
'Generator',
# Other concrete types.
'BinaryIO',
'IO',
'Match',
'Pattern',
'TextIO',
# One-off things.
'AnyStr',
'cast',
'final',
'get_args',
'get_origin',
'get_type_hints',
'is_typeddict',
'NewType',
'no_type_check',
'no_type_check_decorator',
'NoReturn',
'overload',
'ParamSpecArgs',
'ParamSpecKwargs',
'reveal_type',
'runtime_checkable',
'Text',
'TYPE_CHECKING',
'TypeAlias',
'TypeGuard',
]
# The pseudo-submodules 're' and 'io' are part of the public
# namespace, but excluded from __all__ because they might stomp on
# legitimate imports of those modules.
def _type_convert(arg, module=None, *, allow_special_forms=False):
"""For converting None to type(None), and strings to ForwardRef."""
if arg is None:
return type(None)
if isinstance(arg, str):
return ForwardRef(arg, module=module, is_class=allow_special_forms)
return arg
def _type_check(arg, msg, is_argument=True, module=None, *, allow_special_forms=False):
"""Check that the argument is a type, and return it (internal helper).
As a special case, accept None and return type(None) instead. Also wrap strings
into ForwardRef instances. Consider several corner cases, for example plain
special forms like Union are not valid, while Union[int, str] is OK, etc.
The msg argument is a human-readable error message, e.g::
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
invalid_generic_forms = (Generic, Protocol)
if not allow_special_forms:
invalid_generic_forms += (ClassVar,)
if is_argument:
invalid_generic_forms += (Final,)
arg = _type_convert(arg, module=module, allow_special_forms=allow_special_forms)
if (isinstance(arg, _GenericAlias) and
arg.__origin__ in invalid_generic_forms):
raise TypeError(f"{arg} is not valid as type argument")
if arg in (Any, NoReturn, ClassVar, Final, TypeAlias):
return arg
if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
raise TypeError(f"Plain {arg} is not valid as type argument")
if isinstance(arg, (type, TypeVar, ForwardRef, types.UnionType, ParamSpec)):
return arg
if not callable(arg):
raise TypeError(f"{msg} Got {arg!r:.100}.")
return arg
def _is_param_expr(arg):
return arg is ... or isinstance(arg,
(tuple, list, ParamSpec, _ConcatenateGenericAlias))
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, types.GenericAlias):
return repr(obj)
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
return f'{obj.__module__}.{obj.__qualname__}'
if obj is ...:
return('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj)
def _collect_type_vars(types_, typevar_types=None):
"""Collect all type variable contained
in types in order of first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = TypeVar
tvars = []
for t in types_:
if isinstance(t, typevar_types) and t not in tvars:
tvars.append(t)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
def _check_generic(cls, parameters, elen):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
if alen != elen:
raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
f" actual {alen}, expected {elen}")
def _prepare_paramspec_params(cls, params):
"""Prepares the parameters for a Generic containing ParamSpec
variables (internal helper).
"""
# Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612.
if (len(cls.__parameters__) == 1
and params and not _is_param_expr(params[0])):
assert isinstance(cls.__parameters__[0], ParamSpec)
return (params,)
else:
_check_generic(cls, params, len(cls.__parameters__))
_params = []
# Convert lists to tuples to help other libraries cache the results.
for p, tvar in zip(params, cls.__parameters__):
if isinstance(tvar, ParamSpec) and isinstance(p, list):
p = tuple(p)
_params.append(p)
return tuple(_params)
def _deduplicate(params):
# Weed out strict duplicates, preserving the first of each occurrence.
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
return params
def _remove_dups_flatten(parameters):
"""An internal helper for Union creation and substitution: flatten Unions
among parameters, then remove duplicates.
"""
# Flatten out Union[Union[...], ...].
params = []
for p in parameters:
if isinstance(p, (_UnionGenericAlias, types.UnionType)):
params.extend(p.__args__)
else:
params.append(p)
return tuple(_deduplicate(params))
def _flatten_literal_params(parameters):
"""An internal helper for Literal creation: flatten Literals among parameters"""
params = []
for p in parameters:
if isinstance(p, _LiteralGenericAlias):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
_cleanups = []
def _tp_cache(func=None, /, *, typed=False):
"""Internal wrapper caching __getitem__ of generic types with a fallback to
original function for non-hashable arguments.
"""
def decorator(func):
cached = functools.lru_cache(typed=typed)(func)
_cleanups.append(cached.cache_clear)
@functools.wraps(func)
def inner(*args, **kwds):
try:
return cached(*args, **kwds)
except TypeError:
pass # All real errors (not unhashable args) are raised below.
return func(*args, **kwds)
return inner
if func is not None:
return decorator(func)
return decorator
def _eval_type(t, globalns, localns, recursive_guard=frozenset()):
"""Evaluate all forward references in the given type t.
For use of globalns and localns see the docstring for get_type_hints().
recursive_guard is used to prevent prevent infinite recursion
with recursive ForwardRef.
"""
if isinstance(t, ForwardRef):
return t._evaluate(globalns, localns, recursive_guard)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
ev_args = tuple(_eval_type(a, globalns, localns, recursive_guard) for a in t.__args__)
if ev_args == t.__args__:
return t
if isinstance(t, GenericAlias):
return GenericAlias(t.__origin__, ev_args)
if isinstance(t, types.UnionType):
return functools.reduce(operator.or_, ev_args)
else:
return t.copy_with(ev_args)
return t
class _Final:
"""Mixin to prohibit subclassing"""
__slots__ = ('__weakref__',)
def __init_subclass__(cls, /, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
# Internal indicator of special typing constructs.
# See __doc__ instance attribute for specific docs.
class _SpecialForm(_Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return 'typing.' + self._name
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@_tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
class _LiteralSpecialForm(_SpecialForm, _root=True):
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
return self._getitem(self, *parameters)
@_SpecialForm
def Any(self, parameters):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def NoReturn(self, parameters):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def ClassVar(self, parameters):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats: ClassVar[Dict[str, int]] = {} # class variable
damage: int = 10 # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Final(self, parameters):
"""Special typing construct to indicate final names to type checkers.
A final name cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Union(self, parameters):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
- The arguments must be types and there must be at least one.
- None as an argument is a special case and is replaced by
type(None).
- Unions of unions are flattened, e.g.::
Union[Union[int, str], float] == Union[int, str, float]
- Unions of a single argument vanish, e.g.::
Union[int] == int # The constructor actually returns int
- Redundant arguments are skipped, e.g.::
Union[int, str, int] == Union[int, str]
- When comparing unions, the argument order is ignored, e.g.::
Union[int, str] == Union[str, int]
- You cannot subclass or instantiate a union.
- You can use Optional[X] as a shorthand for Union[X, None].
"""
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
msg = "Union[arg, ...]: each arg must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
parameters = _remove_dups_flatten(parameters)
if len(parameters) == 1:
return parameters[0]
if len(parameters) == 2 and type(None) in parameters:
return _UnionGenericAlias(self, parameters, name="Optional")
return _UnionGenericAlias(self, parameters)
@_SpecialForm
def Optional(self, parameters):
"""Optional type.
Optional[X] is equivalent to Union[X, None].
"""
arg = _type_check(parameters, f"{self} requires a single type.")
return Union[arg, type(None)]
@_LiteralSpecialForm
@_tp_cache(typed=True)
def Literal(self, *parameters):
"""Special typing form to define literal types (a.k.a. value types).
This form can be used to indicate to type checkers that the corresponding
variable or function parameter has a value equivalent to the provided
literal (or one of several literals):
def validate_simple(data: Any) -> Literal[True]: # always returns True
...
MODE = Literal['r', 'rb', 'w', 'wb']
def open_helper(file: str, mode: MODE) -> str:
...
open_helper('/some/path', 'r') # Passes type check
open_helper('/other/path', 'typo') # Error in type checker
Literal[...] cannot be subclassed. At runtime, an arbitrary value
is allowed as type argument to Literal[...], but type checkers may
impose restrictions.
"""
# There is no '_type_check' call because arguments to Literal[...] are
# values, not types.
parameters = _flatten_literal_params(parameters)
try:
parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters))))
except TypeError: # unhashable parameters
pass
return _LiteralGenericAlias(self, parameters)
@_SpecialForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = (*(_type_check(p, msg) for p in parameters[:-1]), parameters[-1])
return _ConcatenateGenericAlias(self, parameters)
@_SpecialForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
class ForwardRef(_Final, _root=True):
"""Internal wrapper to hold a forward reference."""
__slots__ = ('__forward_arg__', '__forward_code__',
'__forward_evaluated__', '__forward_value__',
'__forward_is_argument__', '__forward_is_class__',
'__forward_module__')
def __init__(self, arg, is_argument=True, module=None, *, is_class=False):
if not isinstance(arg, str):
raise TypeError(f"Forward reference must be a string -- got {arg!r}")
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError(f"Forward reference must be an expression -- got {arg!r}")
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
self.__forward_is_argument__ = is_argument
self.__forward_is_class__ = is_class
self.__forward_module__ = module
def _evaluate(self, globalns, localns, recursive_guard):
if self.__forward_arg__ in recursive_guard:
return self
if not self.__forward_evaluated__ or localns is not globalns:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
if self.__forward_module__ is not None:
globalns = getattr(
sys.modules.get(self.__forward_module__, None), '__dict__', globalns
)
type_ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.",
is_argument=self.__forward_is_argument__,
allow_special_forms=self.__forward_is_class__,
)
self.__forward_value__ = _eval_type(
type_, globalns, localns, recursive_guard | {self.__forward_arg__}
)
self.__forward_evaluated__ = True
return self.__forward_value__
def __eq__(self, other):
if not isinstance(other, ForwardRef):
return NotImplemented
if self.__forward_evaluated__ and other.__forward_evaluated__:
return (self.__forward_arg__ == other.__forward_arg__ and
self.__forward_value__ == other.__forward_value__)
return self.__forward_arg__ == other.__forward_arg__
def __hash__(self):
return hash(self.__forward_arg__)
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __repr__(self):
return f'ForwardRef({self.__forward_arg__!r})'
class _TypeVarLike:
"""Mixin for TypeVar-like types (TypeVar and ParamSpec)."""
def __init__(self, bound, covariant, contravariant):
"""Used to setup TypeVars and ParamSpec's bound, covariant and
contravariant attributes.
"""
if covariant and contravariant:
raise ValueError("Bivariant types are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __reduce__(self):
return self.__name__
class TypeVar( _Final, _Immutable, _TypeVarLike, _root=True):
"""Type variable.
Usage::
T = TypeVar('T') # Can be anything
A = TypeVar('A', str, bytes) # Must be str or bytes
Type variables exist primarily for the benefit of static type
checkers. They serve as the parameters for generic types as well
as for generic function definitions. See class Generic for more
information on generic types. Generic functions work as follows:
def repeat(x: T, n: int) -> List[T]:
'''Return a list containing n references to x.'''
return [x]*n
def longest(x: A, y: A) -> A:
'''Return the longest of two strings.'''
return x if len(x) >= len(y) else y
The latter example's signature is essentially the overloading
of (str, str) -> str and (bytes, bytes) -> bytes. Also note
that if the arguments are instances of some subclass of str,
the return type is still plain str.
At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError.
Type variables defined with covariant=True or contravariant=True
can be used to declare covariant or contravariant generic types.
See PEP 484 for more details. By default generic types are invariant
in all type variables.
Type variables can be introspected. e.g.:
T.__name__ == 'T'
T.__constraints__ == ()
T.__covariant__ == False
T.__contravariant__ = False
A.__constraints__ == (str, bytes)
Note that only type variables defined in global scope can be pickled.
"""
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
class ParamSpecArgs(_Final, _Immutable, _root=True):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
class ParamSpecKwargs(_Final, _Immutable, _root=True):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
class ParamSpec(_Final, _Immutable, _TypeVarLike, _root=True):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or as the first argument to ``Callable``, or as parameters for user-defined
Generics. See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def _is_dunder(attr):
return attr.startswith('__') and attr.endswith('__')
class _BaseGenericAlias(_Final, _root=True):
"""The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
There are two kind of these aliases: user defined and special. The special ones
are wrappers around builtin collections and ABCs in collections.abc. These must
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
def __init__(self, origin, *, inst=True, name=None):
self._inst = inst
self._name = name
self.__origin__ = origin
self.__slots__ = None # This is not documented.
def __call__(self, *args, **kwargs):
if not self._inst:
raise TypeError(f"Type {self._name} cannot be instantiated; "
f"use {self.__origin__.__name__}() instead")
result = self.__origin__(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
def __mro_entries__(self, bases):
res = []
if self.__origin__ not in bases:
res.append(self.__origin__)
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic):
break
else:
res.append(Generic)
return tuple(res)
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return self._name or self.__origin__.__name__
# We are careful for copy and pickle.
# Also for simplicity we just don't relay all dunder names
if '__origin__' in self.__dict__ and not _is_dunder(attr):
return getattr(self.__origin__, attr)
raise AttributeError(attr)
def __setattr__(self, attr, val):
if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams',
'_typevar_types', '_paramspec_tvars'}:
super().__setattr__(attr, val)
else:
setattr(self.__origin__, attr, val)
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
raise TypeError("Subscripted generics cannot be used with"
" class and instance checks")
def __dir__(self):
return list(set(super().__dir__()
+ [attr for attr in dir(self.__origin__) if not _is_dunder(attr)]))
# Special typing constructs Union, Optional, Generic, Callable and Tuple
# use three special attributes for internal bookkeeping of generic types:
# * __parameters__ is a tuple of unique free type parameters of a generic
# type, for example, Dict[T, T].__parameters__ == (T,);
# * __origin__ keeps a reference to a type that was subscripted,
# e.g., Union[T, int].__origin__ == Union, or the non-generic version of
# the type.
# * __args__ is a tuple of all arguments used in subscripting,
# e.g., Dict[T, int].__args__ == (T, int).
class _GenericAlias(_BaseGenericAlias, _root=True):
# The type of parameterized generics.
#
# That is, for example, `type(List[int])` is `_GenericAlias`.
#
# Objects which are instances of this class include:
# * Parameterized container types, e.g. `Tuple[int]`, `List[int]`.
# * Note that native container types, e.g. `tuple`, `list`, use
# `types.GenericAlias` instead.
# * Parameterized classes:
# T = TypeVar('T')
# class C(Generic[T]): pass
# # C[int] is a _GenericAlias
# * `Callable` aliases, generic `Callable` aliases, and
# parameterized `Callable` aliases:
# T = TypeVar('T')
# # _CallableGenericAlias inherits from _GenericAlias.
# A = Callable[[], None] # _CallableGenericAlias
# B = Callable[[T], None] # _CallableGenericAlias
# C = B[int] # _CallableGenericAlias
# * Parameterized `Final`, `ClassVar` and `TypeGuard`:
# # All _GenericAlias
# Final[int]
# ClassVar[float]
# TypeVar[bool]
def __init__(self, origin, args, *, inst=True, name=None,
_typevar_types=TypeVar,
_paramspec_tvars=False):
super().__init__(origin, inst=inst, name=name)
if not isinstance(args, tuple):
args = (args,)
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in args)
self.__parameters__ = _collect_type_vars(args, typevar_types=_typevar_types)
self._typevar_types = _typevar_types
self._paramspec_tvars = _paramspec_tvars
if not name:
self.__module__ = origin.__module__
def __eq__(self, other):
if not isinstance(other, _GenericAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__args__ == other.__args__)
def __hash__(self):
return hash((self.__origin__, self.__args__))
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
@_tp_cache
def __getitem__(self, args):
# Parameterizes an already-parameterized object.
#
# For example, we arrive here doing something like:
# T1 = TypeVar('T1')
# T2 = TypeVar('T2')
# T3 = TypeVar('T3')
# class A(Generic[T1]): pass
# B = A[T2] # B is a _GenericAlias
# C = B[T3] # Invokes _GenericAlias.__getitem__
#
# We also arrive here when parameterizing a generic `Callable` alias:
# T = TypeVar('T')
# C = Callable[[T], None]
# C[int] # Invokes _GenericAlias.__getitem__
if self.__origin__ in (Generic, Protocol):
# Can't subscript Generic[...] or Protocol[...].
raise TypeError(f"Cannot subscript already-subscripted {self}")
# Preprocess `args`.
if not isinstance(args, tuple):
args = (args,)
args = tuple(_type_convert(p) for p in args)
if (self._paramspec_tvars
and any(isinstance(t, ParamSpec) for t in self.__parameters__)):
args = _prepare_paramspec_params(self, args)
else:
_check_generic(self, args, len(self.__parameters__))
new_args = self._determine_new_args(args)
r = self.copy_with(new_args)
return r
def _determine_new_args(self, args):
# Determines new __args__ for __getitem__.
#
# For example, suppose we had:
# T1 = TypeVar('T1')
# T2 = TypeVar('T2')
# class A(Generic[T1, T2]): pass
# T3 = TypeVar('T3')
# B = A[int, T3]
# C = B[str]
# `B.__args__` is `(int, T3)`, so `C.__args__` should be `(int, str)`.
# Unfortunately, this is harder than it looks, because if `T3` is
# anything more exotic than a plain `TypeVar`, we need to consider
# edge cases.
# In the example above, this would be {T3: str}
new_arg_by_param = dict(zip(self.__parameters__, args))
new_args = []
for old_arg in self.__args__:
if isinstance(old_arg, ParamSpec):
new_arg = new_arg_by_param[old_arg]
if not _is_param_expr(new_arg):
raise TypeError(f"Expected a list of types, an ellipsis, "
f"ParamSpec, or Concatenate. Got {new_arg}")
elif isinstance(old_arg, self._typevar_types):
new_arg = new_arg_by_param[old_arg]
elif isinstance(old_arg, (_GenericAlias, GenericAlias, types.UnionType)):
subparams = old_arg.__parameters__
if not subparams:
new_arg = old_arg
else:
subargs = tuple(new_arg_by_param[x] for x in subparams)
new_arg = old_arg[subargs]
else:
new_arg = old_arg
if self.__origin__ == collections.abc.Callable and isinstance(new_arg, tuple):
# Consider the following `Callable`.
# C = Callable[[int], str]
# Here, `C.__args__` should be (int, str) - NOT ([int], str).
# That means that if we had something like...
# P = ParamSpec('P')
# T = TypeVar('T')
# C = Callable[P, T]
# D = C[[int, str], float]
# ...we need to be careful; `new_args` should end up as
# `(int, str, float)` rather than `([int, str], float)`.
new_args.extend(new_arg)
else:
new_args.append(new_arg)
return tuple(new_args)
def copy_with(self, args):
return self.__class__(self.__origin__, args, name=self._name, inst=self._inst)
def __repr__(self):
if self._name:
name = 'typing.' + self._name
else:
name = _type_repr(self.__origin__)
args = ", ".join([_type_repr(a) for a in self.__args__])
return f'{name}[{args}]'
def __reduce__(self):
if self._name:
origin = globals()[self._name]
else:
origin = self.__origin__
args = tuple(self.__args__)
if len(args) == 1 and not isinstance(args[0], tuple):
args, = args
return operator.getitem, (origin, args)
def __mro_entries__(self, bases):
if isinstance(self.__origin__, _SpecialForm):
raise TypeError(f"Cannot subclass {self!r}")
if self._name: # generic version of an ABC or built-in class
return super().__mro_entries__(bases)
if self.__origin__ is Generic:
if Protocol in bases:
return ()
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) and b is not self:
return ()
return (self.__origin__,)
# _nparams is the number of accepted parameters, e.g. 0 for Hashable,
# 1 for List and 2 for Dict. It may be -1 if variable number of
# parameters are accepted (needs custom __getitem__).
class _SpecialGenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, nparams, *, inst=True, name=None):
if name is None:
name = origin.__name__
super().__init__(origin, inst=inst, name=name)
self._nparams = nparams
if origin.__module__ == 'builtins':
self.__doc__ = f'A generic version of {origin.__qualname__}.'
else:
self.__doc__ = f'A generic version of {origin.__module__}.{origin.__qualname__}.'
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
_check_generic(self, params, self._nparams)
return self.copy_with(params)
def copy_with(self, params):
return _GenericAlias(self.__origin__, params,
name=self._name, inst=self._inst)
def __repr__(self):
return 'typing.' + self._name
def __subclasscheck__(self, cls):
if isinstance(cls, _SpecialGenericAlias):
return issubclass(cls.__origin__, self.__origin__)
if not isinstance(cls, _GenericAlias):
return issubclass(cls, self.__origin__)
return super().__subclasscheck__(cls)
def __reduce__(self):
return self._name
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
class _CallableGenericAlias(_GenericAlias, _root=True):
def __repr__(self):
assert self._name == 'Callable'
args = self.__args__
if len(args) == 2 and _is_param_expr(args[0]):
return super().__repr__()
return (f'typing.Callable'
f'[[{", ".join([_type_repr(a) for a in args[:-1]])}], '
f'{_type_repr(args[-1])}]')
def __reduce__(self):
args = self.__args__
if not (len(args) == 2 and _is_param_expr(args[0])):
args = list(args[:-1]), args[-1]
return operator.getitem, (Callable, args)
class _CallableType(_SpecialGenericAlias, _root=True):
def copy_with(self, params):
return _CallableGenericAlias(self.__origin__, params,
name=self._name, inst=self._inst,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __getitem__(self, params):
if not isinstance(params, tuple) or len(params) != 2:
raise TypeError("Callable must be used as "
"Callable[[arg, ...], result].")
args, result = params
# This relaxes what args can be on purpose to allow things like
# PEP 612 ParamSpec. Responsibility for whether a user is using
# Callable[...] properly is deferred to static type checkers.
if isinstance(args, list):
params = (tuple(args), result)
else:
params = (args, result)
return self.__getitem_inner__(params)
@_tp_cache
def __getitem_inner__(self, params):
args, result = params
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
if args is Ellipsis:
return self.copy_with((_TypingEllipsis, result))
if not isinstance(args, tuple):
args = (args,)
args = tuple(_type_convert(arg) for arg in args)
params = args + (result,)
return self.copy_with(params)
class _TupleType(_SpecialGenericAlias, _root=True):
@_tp_cache
def __getitem__(self, params):
if params == ():
return self.copy_with((_TypingEmpty,))
if not isinstance(params, tuple):
params = (params,)
if len(params) == 2 and params[1] is ...:
msg = "Tuple[t, ...]: t must be a type."
p = _type_check(params[0], msg)
return self.copy_with((p, _TypingEllipsis))
msg = "Tuple[t0, t1, ...]: each t must be a type."
params = tuple(_type_check(p, msg) for p in params)
return self.copy_with(params)
class _UnionGenericAlias(_GenericAlias, _root=True):
def copy_with(self, params):
return Union[params]
def __eq__(self, other):
if not isinstance(other, (_UnionGenericAlias, types.UnionType)):
return NotImplemented
return set(self.__args__) == set(other.__args__)
def __hash__(self):
return hash(frozenset(self.__args__))
def __repr__(self):
args = self.__args__
if len(args) == 2:
if args[0] is type(None):
return f'typing.Optional[{_type_repr(args[1])}]'
elif args[1] is type(None):
return f'typing.Optional[{_type_repr(args[0])}]'
return super().__repr__()
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
for arg in self.__args__:
if issubclass(cls, arg):
return True
def __reduce__(self):
func, (origin, args) = super().__reduce__()
return func, (Union, args)
def _value_and_type_iter(parameters):
return ((p, type(p)) for p in parameters)
class _LiteralGenericAlias(_GenericAlias, _root=True):
def __eq__(self, other):
if not isinstance(other, _LiteralGenericAlias):
return NotImplemented
return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__))
def __hash__(self):
return hash(frozenset(_value_and_type_iter(self.__args__)))
class _ConcatenateGenericAlias(_GenericAlias, _root=True):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def copy_with(self, params):
if isinstance(params[-1], (list, tuple)):
return (*params[:-1], *params[-1])
if isinstance(params[-1], _ConcatenateGenericAlias):
params = (*params[:-1], *params[-1].__args__)
elif not isinstance(params[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
return super().copy_with(params)
class Generic:
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from
this class parameterized with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
"""
__slots__ = ()
_is_protocol = False
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
params = tuple(_type_convert(p) for p in params)
if cls in (Generic, Protocol):
# Generic and Protocol can only be subscripted with unique type variables.
if not all(isinstance(p, (TypeVar, ParamSpec)) for p in params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be type variables "
f"or parameter specification variables.")
if len(set(params)) != len(params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
if any(isinstance(t, ParamSpec) for t in cls.__parameters__):
params = _prepare_paramspec_params(cls, params)
else:
_check_generic(cls, params, len(cls.__parameters__))
return _GenericAlias(cls, params,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
tvars = []
if '__orig_bases__' in cls.__dict__:
error = Generic in cls.__orig_bases__
else:
error = Generic in cls.__bases__ and cls.__name__ != 'Protocol'
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__, (TypeVar, ParamSpec))
# Look for Generic[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, _GenericAlias) and
base.__origin__ is Generic):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] multiple types.")
gvars = base.__parameters__
if gvars is not None:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in Generic[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
class _TypingEmpty:
"""Internal placeholder for () or []. Used by TupleMeta and CallableMeta
to allow empty list/tuple in specific places, without allowing them
to sneak in where prohibited.
"""
class _TypingEllipsis:
"""Internal placeholder for ... (ellipsis)."""
_TYPING_INTERNALS = ['__parameters__', '__orig_bases__', '__orig_class__',
'_is_protocol', '_is_runtime_protocol']
_SPECIAL_NAMES = ['__abstractmethods__', '__annotations__', '__dict__', '__doc__',
'__init__', '__module__', '__new__', '__slots__',
'__subclasshook__', '__weakref__', '__class_getitem__']
# These special attributes will be not collected as protocol members.
EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS + _SPECIAL_NAMES + ['_MutableMapping__marker']
def _get_protocol_attrs(cls):
"""Collect protocol members from a protocol class objects.
This includes names actually defined in the class dictionary, as well
as names that appear in annotations. Special names (above) are skipped.
"""
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES:
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
# PEP 544 prohibits using issubclass() with protocols that have non-method members.
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
def _no_init_or_replace_init(self, *args, **kwargs):
cls = type(self)
if cls._is_protocol:
raise TypeError('Protocols cannot be instantiated')
# Already using a custom `__init__`. No need to calculate correct
# `__init__` to call. This can lead to RecursionError. See bpo-45121.
if cls.__init__ is not _no_init_or_replace_init:
return
# Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`.
# The first instantiation of the subclass will call `_no_init_or_replace_init` which
# searches for a proper new `__init__` in the MRO. The new `__init__`
# replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent
# instantiation of the protocol subclass will thus use the new
# `__init__` and no longer call `_no_init_or_replace_init`.
for base in cls.__mro__:
init = base.__dict__.get('__init__', _no_init_or_replace_init)
if init is not _no_init_or_replace_init:
cls.__init__ = init
break
else:
# should not happen
cls.__init__ = object.__init__
cls.__init__(self, *args, **kwargs)
def _caller(depth=1, default='__main__'):
try:
return sys._getframe(depth + 1).f_globals.get('__name__', default)
except (AttributeError, ValueError): # For platforms without _getframe()
return None
def _allow_reckless_class_checks(depth=3):
"""Allow instance and class checks for special stdlib modules.
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
"""
return _caller(depth) in {'abc', 'functools', None}
_PROTO_ALLOWLIST = {
'collections.abc': [
'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
],
'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
}
class _ProtocolMeta(ABCMeta):
# This metaclass is really unfortunate and exists only because of
# the lack of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if (
getattr(cls, '_is_protocol', False) and
not getattr(cls, '_is_runtime_protocol', False) and
not _allow_reckless_class_checks(depth=2)
):
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
# All *methods* can be blocked by setting them to None.
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(Generic, metaclass=_ProtocolMeta):
"""Base class for protocol classes.
Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing.runtime_checkable act as simple-minded runtime protocols that check
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
_is_runtime_protocol = False
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', False):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', False):
return NotImplemented
# First, perform various sanity checks.
if not getattr(cls, '_is_runtime_protocol', False):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if not _is_callable_members_only(cls):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error message as for issubclass(1, int).
raise TypeError('issubclass() arg 1 must be a class')
# Second, perform the actual structural compatibility check.
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
# Check if the members appears in the class dictionary...
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
# ...or in annotations, if it is a sub-protocol.
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, collections.abc.Mapping) and
attr in annotations and
issubclass(other, Generic) and other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols...
if not cls._is_protocol:
return
# ... otherwise check consistency of bases, and prohibit instantiation.
for base in cls.__bases__:
if not (base in (object, Generic) or
base.__module__ in _PROTO_ALLOWLIST and
base.__name__ in _PROTO_ALLOWLIST[base.__module__] or
issubclass(base, Generic) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init_or_replace_init
class _AnnotatedAlias(_GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return "typing.Annotated[{}, {}]".format(
_type_repr(self.__origin__),
", ".join(repr(a) for a in self.__metadata__)
)
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__metadata__ == other.__metadata__)
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return 'Annotated'
return super().__getattr__(attr)
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
msg = "Annotated[t, ...]: t must be a type."
origin = _type_check(params[0], msg, allow_special_forms=True)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"Cannot subclass {}.Annotated".format(cls.__module__)
)
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol.
Such protocol can be used with isinstance() and issubclass().
Raise TypeError if applied to a non-protocol class.
This allows a simple-minded structural check very similar to
one trick ponies in collections.abc such as Iterable.
For example::
@runtime_checkable
class Closable(Protocol):
def close(self): ...
assert isinstance(open('/some/file'), Closable)
Warning: this will check only the presence of the required methods,
not their type signatures!
"""
if not issubclass(cls, Generic) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
' got %r' % cls)
cls._is_runtime_protocol = True
return cls
def cast(typ, val):
"""Cast a value to a type.
This returns the value unchanged. To the type checker this
signals that the return value has the designated type, but at
runtime we intentionally don't check anything (we want this
to be as fast as possible).
"""
return val
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
try:
code = func.__code__
except AttributeError:
# Some built-in functions don't have __code__, __defaults__, etc.
return {}
pos_count = code.co_argcount
arg_names = code.co_varnames
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
_allowed_types = (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.ModuleType,
WrapperDescriptorType, MethodWrapperType, MethodDescriptorType)
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used. For classes, the search
order is globals first then locals.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
# Classes require a special treatment.
if isinstance(obj, type):
hints = {}
for base in reversed(obj.__mro__):
if globalns is None:
base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {})
else:
base_globals = globalns
ann = base.__dict__.get('__annotations__', {})
if isinstance(ann, types.GetSetDescriptorType):
ann = {}
base_locals = dict(vars(base)) if localns is None else localns
if localns is None and globalns is None:
# This is surprising, but required. Before Python 3.10,
# get_type_hints only evaluated the globalns of
# a class. To maintain backwards compatibility, we reverse
# the globalns and localns order so that eval() looks into
# *base_globals* first rather than *base_locals*.
# This only affects ForwardRefs.
base_globals, base_locals = base_locals, base_globals
for name, value in ann.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value, is_argument=False, is_class=True)
value = _eval_type(value, base_globals, base_locals)
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
if globalns is None:
if isinstance(obj, types.ModuleType):
globalns = obj.__dict__
else:
nsobj = obj
# Find globalns for the unwrapped object.
while hasattr(nsobj, '__wrapped__'):
nsobj = nsobj.__wrapped__
globalns = getattr(nsobj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
hints = getattr(obj, '__annotations__', None)
if hints is None:
# Return empty annotations for something that _could_ have them.
if isinstance(obj, _allowed_types):
return {}
else:
raise TypeError('{!r} is not a module, class, method, '
'or function.'.format(obj))
defaults = _get_defaults(obj)
hints = dict(hints)
for name, value in hints.items():
if value is None:
value = type(None)
if isinstance(value, str):
# class-level forward refs were handled above, this must be either
# a module-level annotation or a function argument annotation
value = ForwardRef(
value,
is_argument=not isinstance(obj, types.ModuleType),
is_class=False,
)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
def _strip_annotations(t):
"""Strips the annotations from a given type.
"""
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, _GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if isinstance(t, GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return GenericAlias(t.__origin__, stripped_args)
if isinstance(t, types.UnionType):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (_BaseGenericAlias, GenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is Generic:
return Generic
if isinstance(tp, types.UnionType):
return types.UnionType
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (_GenericAlias, GenericAlias)):
res = tp.__args__
if (tp.__origin__ is collections.abc.Callable
and not (len(res) == 2 and _is_param_expr(res[0]))):
res = (list(res[:-1]), res[-1])
return res
if isinstance(tp, types.UnionType):
return tp.__args__
return ()
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
return isinstance(tp, _TypedDictMeta)
def no_type_check(arg):
"""Decorator to indicate that annotations are not type hints.
The argument must be a class or function; if it is a class, it
applies recursively to all methods and classes defined in that class
(but not to methods defined in its superclasses or subclasses).
This mutates the function(s) or class(es) in place.
"""
if isinstance(arg, type):
arg_attrs = arg.__dict__.copy()
for attr, val in arg.__dict__.items():
if val in arg.__bases__ + (arg,):
arg_attrs.pop(attr)
for obj in arg_attrs.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
if isinstance(obj, type):
no_type_check(obj)
try:
arg.__no_type_check__ = True
except TypeError: # built-in classes
pass
return arg
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def _overload_dummy(*args, **kwds):
"""Helper for @overload to raise when called."""
raise NotImplementedError(
"You should not call an overloaded function. "
"A series of @overload-decorated functions "
"outside a stub module should always be followed "
"by an implementation that is not @overload-ed.")
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
"""
return _overload_dummy
def final(f):
"""A decorator to indicate final methods and final classes.
Use this decorator to indicate to type checkers that the decorated
method cannot be overridden, and decorated class cannot be subclassed.
For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties. The decorator
sets the ``__final__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
"""
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = TypeVar('T') # Any type.
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
# Internal type variable used for Type[].
CT_co = TypeVar('CT_co', covariant=True, bound=type)
# A useful type variable with constraints. This represents string types.
# (This one *is* for export!)
AnyStr = TypeVar('AnyStr', bytes, str)
# Various ABCs mimicking those in collections.abc.
_alias = _SpecialGenericAlias
Hashable = _alias(collections.abc.Hashable, 0) # Not generic.
Awaitable = _alias(collections.abc.Awaitable, 1)
Coroutine = _alias(collections.abc.Coroutine, 3)
AsyncIterable = _alias(collections.abc.AsyncIterable, 1)
AsyncIterator = _alias(collections.abc.AsyncIterator, 1)
Iterable = _alias(collections.abc.Iterable, 1)
Iterator = _alias(collections.abc.Iterator, 1)
Reversible = _alias(collections.abc.Reversible, 1)
Sized = _alias(collections.abc.Sized, 0) # Not generic.
Container = _alias(collections.abc.Container, 1)
Collection = _alias(collections.abc.Collection, 1)
Callable = _CallableType(collections.abc.Callable, 2)
Callable.__doc__ = \
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types or ellipsis; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet')
MutableSet = _alias(collections.abc.MutableSet, 1)
# NOTE: Mapping is only covariant in the value type.
Mapping = _alias(collections.abc.Mapping, 2)
MutableMapping = _alias(collections.abc.MutableMapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
MutableSequence = _alias(collections.abc.MutableSequence, 1)
ByteString = _alias(collections.abc.ByteString, 0) # Not generic
# Tuple accepts variable number of parameters.
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
Tuple.__doc__ = \
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
"""
List = _alias(list, 1, inst=False, name='List')
Deque = _alias(collections.deque, 1, name='Deque')
Set = _alias(set, 1, inst=False, name='Set')
FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet')
MappingView = _alias(collections.abc.MappingView, 1)
KeysView = _alias(collections.abc.KeysView, 1)
ItemsView = _alias(collections.abc.ItemsView, 2)
ValuesView = _alias(collections.abc.ValuesView, 1)
ContextManager = _alias(contextlib.AbstractContextManager, 1, name='ContextManager')
AsyncContextManager = _alias(contextlib.AbstractAsyncContextManager, 1, name='AsyncContextManager')
Dict = _alias(dict, 2, inst=False, name='Dict')
DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict')
OrderedDict = _alias(collections.OrderedDict, 2)
Counter = _alias(collections.Counter, 1)
ChainMap = _alias(collections.ChainMap, 2)
Generator = _alias(collections.abc.Generator, 3)
AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)
Type = _alias(type, 1, inst=False, name='Type')
Type.__doc__ = \
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
@runtime_checkable
class SupportsInt(Protocol):
"""An ABC with one abstract method __int__."""
__slots__ = ()
@abstractmethod
def __int__(self) -> int:
pass
@runtime_checkable
class SupportsFloat(Protocol):
"""An ABC with one abstract method __float__."""
__slots__ = ()
@abstractmethod
def __float__(self) -> float:
pass
@runtime_checkable
class SupportsComplex(Protocol):
"""An ABC with one abstract method __complex__."""
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
@runtime_checkable
class SupportsBytes(Protocol):
"""An ABC with one abstract method __bytes__."""
__slots__ = ()
@abstractmethod
def __bytes__(self) -> bytes:
pass
@runtime_checkable
class SupportsIndex(Protocol):
"""An ABC with one abstract method __index__."""
__slots__ = ()
@abstractmethod
def __index__(self) -> int:
pass
@runtime_checkable
class SupportsAbs(Protocol[T_co]):
"""An ABC with one abstract method __abs__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __abs__(self) -> T_co:
pass
@runtime_checkable
class SupportsRound(Protocol[T_co]):
"""An ABC with one abstract method __round__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
def _make_nmtuple(name, types, module, defaults = ()):
fields = [n for n, t in types]
types = {n: _type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = types
return nm_tpl
# attributes prohibited to set in NamedTuple class syntax
_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__',
'_fields', '_field_defaults',
'_make', '_replace', '_asdict', '_source'})
_special = frozenset({'__module__', '__name__', '__annotations__'})
class NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert bases[0] is _NamedTuple
types = ns.get('__annotations__', {})
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__'])
# update from user namespace without overriding special namedtuple attributes
for key in ns:
if key in _prohibited:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special and key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
return nm_tpl
def NamedTuple(typename, fields=None, /, **kwargs):
"""Typed version of namedtuple.
Usage in Python versions >= 3.6::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
Alternative equivalent keyword syntax is also accepted::
Employee = NamedTuple('Employee', name=str, id=int)
In Python versions <= 3.5 use::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
if fields is None:
fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
return _make_nmtuple(typename, fields, module=_caller())
_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {})
def _namedtuple_mro_entries(bases):
if len(bases) > 1:
raise TypeError("Multiple inheritance with NamedTuple is not supported")
assert bases[0] is NamedTuple
return (_NamedTuple,)
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, total=True):
"""Create new typed dict class object.
This method is called when TypedDict is subclassed,
or when TypedDict is instantiated. This way
TypedDict supports all three syntax forms described in its docstring.
Subclasses and instances of TypedDict return actual dictionaries.
"""
for base in bases:
if type(base) is not _TypedDictMeta:
raise TypeError('cannot inherit from both a TypedDict type '
'and a non-TypedDict base class')
tp_dict = type.__new__(_TypedDictMeta, name, (dict,), ns)
annotations = {}
own_annotations = ns.get('__annotations__', {})
own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: _type_check(tp, msg, module=tp_dict.__module__)
for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
if total:
required_keys.update(own_annotation_keys)
else:
optional_keys.update(own_annotation_keys)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__call__ = dict # static method
def __subclasscheck__(cls, other):
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
__instancecheck__ = __subclasscheck__
def TypedDict(typename, fields=None, /, *, total=True, **kwargs):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality.
Usage::
class point2D(TypedDict, total=False):
x: int
y: int
This means that a point2D TypedDict can have any of the keys omitted.A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
module = _caller()
if module is not None:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = module
return _TypedDictMeta(typename, (), ns, total=total)
_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
TypedDict.__mro_entries__ = lambda bases: (_TypedDict,)
class NewType:
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy callable that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
__call__ = _idfunc
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __reduce__(self):
return self.__qualname__
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
# Python-version-specific alias (Python 2: unicode; Python 3: str)
Text = str
# Constant that's True when type checking, but False here.
TYPE_CHECKING = False
class IO(Generic[AnyStr]):
"""Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
__slots__ = ()
@property
@abstractmethod
def mode(self) -> str:
pass
@property
@abstractmethod
def name(self) -> str:
pass
@abstractmethod
def close(self) -> None:
pass
@property
@abstractmethod
def closed(self) -> bool:
pass
@abstractmethod
def fileno(self) -> int:
pass
@abstractmethod
def flush(self) -> None:
pass
@abstractmethod
def isatty(self) -> bool:
pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr:
pass
@abstractmethod
def readable(self) -> bool:
pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr:
pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]:
pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int:
pass
@abstractmethod
def seekable(self) -> bool:
pass
@abstractmethod
def tell(self) -> int:
pass
@abstractmethod
def truncate(self, size: int = None) -> int:
pass
@abstractmethod
def writable(self) -> bool:
pass
@abstractmethod
def write(self, s: AnyStr) -> int:
pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None:
pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]':
pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None:
pass
class BinaryIO(IO[bytes]):
"""Typed version of the return of open() in binary mode."""
__slots__ = ()
@abstractmethod
def write(self, s: Union[bytes, bytearray]) -> int:
pass
@abstractmethod
def __enter__(self) -> 'BinaryIO':
pass
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
@property
@abstractmethod
def buffer(self) -> BinaryIO:
pass
@property
@abstractmethod
def encoding(self) -> str:
pass
@property
@abstractmethod
def errors(self) -> Optional[str]:
pass
@property
@abstractmethod
def line_buffering(self) -> bool:
pass
@property
@abstractmethod
def newlines(self) -> Any:
pass
@abstractmethod
def __enter__(self) -> 'TextIO':
pass
class _DeprecatedType(type):
def __getattribute__(cls, name):
if name not in ("__dict__", "__module__") and name in cls.__dict__:
warnings.warn(
f"{cls.__name__} is deprecated, import directly "
f"from typing instead. {cls.__name__} will be removed "
"in Python 3.12.",
DeprecationWarning,
stacklevel=2,
)
return super().__getattribute__(name)
class io(metaclass=_DeprecatedType):
"""Wrapper namespace for IO generic classes."""
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
Pattern = _alias(stdlib_re.Pattern, 1)
Match = _alias(stdlib_re.Match, 1)
class re(metaclass=_DeprecatedType):
"""Wrapper namespace for re type aliases."""
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + '.re'
sys.modules[re.__name__] = re
def reveal_type(obj: T, /) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr)
return obj
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import os
import re
from contextlib import contextmanager
from pathlib import Path
from git import Repo
# This script is intended to be run from the root of the repo but you can adapt this constant if you need to.
PATH_TO_TRANFORMERS = "."
@contextmanager
def checkout_commit(repo, commit_id):
"""
Context manager that checks out a commit in the repo.
"""
current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
try:
repo.git.checkout(commit_id)
yield
finally:
repo.git.checkout(current_head)
def clean_code(content):
"""
Remove docstrings, empty line or comments from `content`.
"""
# fmt: off
# Remove docstrings by splitting on triple " then triple ':
splits = content.split('\"\"\"')
content = "".join(splits[::2])
splits = content.split("\'\'\'")
# fmt: on
content = "".join(splits[::2])
# Remove empty lines and comments
lines_to_keep = []
for line in content.split("\n"):
# remove anything that is after a # sign.
line = re.sub("#.*$", "", line)
if len(line) == 0 or line.isspace():
continue
lines_to_keep.append(line)
return "\n".join(lines_to_keep)
def diff_is_docstring_only(repo, branching_point, filename):
"""
Check if the diff is only in docstrings in a filename.
"""
with checkout_commit(repo, branching_point):
with open(filename, "r", encoding="utf-8") as f:
old_content = f.read()
with open(filename, "r", encoding="utf-8") as f:
new_content = f.read()
old_content_clean = clean_code(old_content)
new_content_clean = clean_code(new_content)
return old_content_clean == new_content_clean
def get_modified_python_files(diff_with_last_commit=False):
"""
Return a list of python files that have been modified between:
- the current head and the main branch if `diff_with_last_commit=False` (default)
- the current head and its parent commit otherwise.
"""
repo = Repo(PATH_TO_TRANFORMERS)
if not diff_with_last_commit:
print(f"main is at {repo.refs.main.commit}")
print(f"Current head is at {repo.head.commit}")
branching_commits = repo.merge_base(repo.refs.main, repo.head)
for commit in branching_commits:
print(f"Branching commit: {commit}")
return get_diff(repo, repo.head.commit, branching_commits)
else:
print(f"main is at {repo.head.commit}")
parent_commits = repo.head.commit.parents
for commit in parent_commits:
print(f"Parent commit: {commit}")
return get_diff(repo, repo.head.commit, parent_commits)
def get_diff(repo, base_commit, commits):
"""
Get's the diff between one or several commits and the head of the repository.
"""
print("\n### DIFF ###\n")
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
# We always add new python files
if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"):
code_diff.append(diff_obj.b_path)
# We check that deleted python files won't break corresponding tests.
elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"):
code_diff.append(diff_obj.a_path)
# Now for modified files
elif diff_obj.change_type in ["M", "R"] and diff_obj.b_path.endswith(".py"):
# In case of renames, we'll look at the tests using both the old and new name.
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
else:
# Otherwise, we check modifications are in code and not docstrings.
if diff_is_docstring_only(repo, commit, diff_obj.b_path):
print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.")
else:
code_diff.append(diff_obj.a_path)
return code_diff
def get_module_dependencies(module_fname):
"""
Get the dependencies of a module.
"""
with open(os.path.join(PATH_TO_TRANFORMERS, module_fname), "r", encoding="utf-8") as f:
content = f.read()
module_parts = module_fname.split(os.path.sep)
imported_modules = []
# Let's start with relative imports
relative_imports = re.findall(r"from\s+(\.+\S+)\s+import\s+([^\n]+)\n", content)
relative_imports = [mod for mod, imp in relative_imports if "# tests_ignore" not in imp]
for imp in relative_imports:
level = 0
while imp.startswith("."):
imp = imp[1:]
level += 1
if len(imp) > 0:
dep_parts = module_parts[: len(module_parts) - level] + imp.split(".")
else:
dep_parts = module_parts[: len(module_parts) - level] + ["__init__.py"]
imported_module = os.path.sep.join(dep_parts)
# We ignore the main init import as it's only for the __version__ that it's done
# and it would add everything as a dependency.
if not imported_module.endswith("transformers/__init__.py"):
imported_modules.append(imported_module)
# Let's continue with direct imports
# The import from the transformers module are ignored for the same reason we ignored the
# main init before.
direct_imports = re.findall(r"from\s+transformers\.(\S+)\s+import\s+([^\n]+)\n", content)
direct_imports = [mod for mod, imp in direct_imports if "# tests_ignore" not in imp]
for imp in direct_imports:
import_parts = imp.split(".")
dep_parts = ["src", "transformers"] + import_parts
imported_modules.append(os.path.sep.join(dep_parts))
# Now let's just check that we have proper module files, or append an init for submodules
dependencies = []
for imported_module in imported_modules:
if os.path.isfile(os.path.join(PATH_TO_TRANFORMERS, f"{imported_module}.py")):
dependencies.append(f"{imported_module}.py")
elif os.path.isdir(os.path.join(PATH_TO_TRANFORMERS, imported_module)) and os.path.isfile(
os.path.sep.join([PATH_TO_TRANFORMERS, imported_module, "__init__.py"])
):
dependencies.append(os.path.sep.join([imported_module, "__init__.py"]))
return dependencies
def get_test_dependencies(test_fname):
"""
Get the dependencies of a test file.
"""
with open(os.path.join(PATH_TO_TRANFORMERS, test_fname), "r", encoding="utf-8") as f:
content = f.read()
# Tests only have relative imports for other test files
# TODO Sylvain: handle relative imports cleanly
relative_imports = re.findall(r"from\s+(\.\S+)\s+import\s+([^\n]+)\n", content)
relative_imports = [test for test, imp in relative_imports if "# tests_ignore" not in imp]
# Removes the double trailing '..' for parent imports, and creates an absolute path from the root dir with
# `tests` as a prefix.
parent_imports = [imp.strip(".") for imp in relative_imports if ".." in imp]
parent_imports = [os.path.join("tests", f"{test.replace(".", os.path.sep)}.py") for test in parent_imports]
# Removes the single trailing '.' for current dir imports, and creates an absolute path from the root dir with
# tests/{module_name} as a prefix.
current_dir_imports = [imp.strip(".") for imp in relative_imports if ".." not in imp]
directory = os.path.sep.join(test_fname.split(os.path.sep)[:-1])
current_dir_imports = [
os.path.join(directory, f"{test.replace(".", os.path.sep)}.py") for test in current_dir_imports
]
return [f for f in [*parent_imports, *current_dir_imports] if os.path.isfile(f)]
def create_reverse_dependency_map():
"""
Create the dependency map from module/test filename to the list of modules/tests that depend on it (even
recursively).
"""
modules = [
str(f.relative_to(PATH_TO_TRANFORMERS))
for f in (Path(PATH_TO_TRANFORMERS) / "src/transformers").glob("**/*.py")
]
# We grab all the dependencies of each module.
direct_deps = {m: get_module_dependencies(m) for m in modules}
# We add all the dependencies of each test file
tests = [str(f.relative_to(PATH_TO_TRANFORMERS)) for f in (Path(PATH_TO_TRANFORMERS) / "tests").glob("**/*.py")]
direct_deps.update({t: get_test_dependencies(t) for t in tests})
all_files = modules + tests
# This recurses the dependencies
something_changed = True
while something_changed:
something_changed = False
for m in all_files:
for d in direct_deps[m]:
if d not in direct_deps:
raise ValueError(f"KeyError:{d}. From {m}")
for dep in direct_deps[d]:
if dep not in direct_deps[m]:
direct_deps[m].append(dep)
something_changed = True
# Finally we can build the reverse map.
reverse_map = collections.defaultdict(list)
for m in all_files:
if m.endswith("__init__.py"):
reverse_map[m].extend(direct_deps[m])
for d in direct_deps[m]:
reverse_map[d].append(m)
return reverse_map
# Any module file that has a test name which can't be inferred automatically from its name should go here. A better
# approach is to (re-)name the test file accordingly, and second best to add the correspondence map here.
SPECIAL_MODULE_TO_TEST_MAP = {
"commands/add_new_model_like.py": "utils/test_add_new_model_like.py",
"configuration_utils.py": "test_configuration_common.py",
"convert_graph_to_onnx.py": "onnx/test_onnx.py",
"data/data_collator.py": "trainer/test_data_collator.py",
"deepspeed.py": "deepspeed/",
"feature_extraction_sequence_utils.py": "test_sequence_feature_extraction_common.py",
"feature_extraction_utils.py": "test_feature_extraction_common.py",
"file_utils.py": ["utils/test_file_utils.py", "utils/test_model_output.py"],
"utils/generic.py": ["utils/test_file_utils.py", "utils/test_model_output.py"],
"utils/hub.py": "utils/test_file_utils.py",
"modelcard.py": "utils/test_model_card.py",
"modeling_flax_utils.py": "test_modeling_flax_common.py",
"modeling_tf_utils.py": ["test_modeling_tf_common.py", "utils/test_modeling_tf_core.py"],
"modeling_utils.py": ["test_modeling_common.py", "utils/test_offline.py"],
"models/auto/modeling_auto.py": [
"models/auto/test_modeling_auto.py",
"models/auto/test_modeling_tf_pytorch.py",
"models/bort/test_modeling_bort.py",
"models/dit/test_modeling_dit.py",
],
"models/auto/modeling_flax_auto.py": "models/auto/test_modeling_flax_auto.py",
"models/auto/modeling_tf_auto.py": [
"models/auto/test_modeling_tf_auto.py",
"models/auto/test_modeling_tf_pytorch.py",
"models/bort/test_modeling_tf_bort.py",
],
"models/gpt2/modeling_gpt2.py": [
"models/gpt2/test_modeling_gpt2.py",
"models/megatron_gpt2/test_modeling_megatron_gpt2.py",
],
"optimization.py": "optimization/test_optimization.py",
"optimization_tf.py": "optimization/test_optimization_tf.py",
"pipelines/base.py": "pipelines/test_pipelines_*.py",
"pipelines/text2text_generation.py": [
"pipelines/test_pipelines_text2text_generation.py",
"pipelines/test_pipelines_summarization.py",
"pipelines/test_pipelines_translation.py",
],
"pipelines/zero_shot_classification.py": "pipelines/test_pipelines_zero_shot.py",
"testing_utils.py": "utils/test_skip_decorators.py",
"tokenization_utils.py": ["test_tokenization_common.py", "tokenization/test_tokenization_utils.py"],
"tokenization_utils_base.py": ["test_tokenization_common.py", "tokenization/test_tokenization_utils.py"],
"tokenization_utils_fast.py": [
"test_tokenization_common.py",
"tokenization/test_tokenization_utils.py",
"tokenization/test_tokenization_fast.py",
],
"trainer.py": [
"trainer/test_trainer.py",
"extended/test_trainer_ext.py",
"trainer/test_trainer_distributed.py",
"trainer/test_trainer_tpu.py",
],
"train_pt_utils.py": "trainer/test_trainer_utils.py",
"utils/versions.py": "utils/test_versions_utils.py",
}
def module_to_test_file(module_fname):
"""
Returns the name of the file(s) where `module_fname` is tested.
"""
splits = module_fname.split(os.path.sep)
# Special map has priority
short_name = os.path.sep.join(splits[2:])
if short_name in SPECIAL_MODULE_TO_TEST_MAP:
test_file = SPECIAL_MODULE_TO_TEST_MAP[short_name]
if isinstance(test_file, str):
return f"tests/{test_file}"
return [f"tests/{f}" for f in test_file]
module_name = splits[-1]
# Fast tokenizers are tested in the same file as the slow ones.
if module_name.endswith("_fast.py"):
module_name = module_name.replace("_fast.py", ".py")
# Special case for pipelines submodules
if len(splits) >= 2 and splits[-2] == "pipelines":
default_test_file = f"tests/pipelines/test_pipelines_{module_name}"
# Special case for benchmarks submodules
elif len(splits) >= 2 and splits[-2] == "benchmark":
return ["tests/benchmark/test_benchmark.py", "tests/benchmark/test_benchmark_tf.py"]
# Special case for commands submodules
elif len(splits) >= 2 and splits[-2] == "commands":
return "tests/utils/test_cli.py"
# Special case for onnx submodules
elif len(splits) >= 2 and splits[-2] == "onnx":
return ["tests/onnx/test_onnx.py", "tests/onnx/test_onnx_v2.py"]
# Special case for utils (not the one in src/transformers, the ones at the root of the repo).
elif len(splits) > 0 and splits[0] == "utils":
default_test_file = f"tests/utils/test_utils_{module_name}"
elif len(splits) > 4 and splits[2] == "models":
default_test_file = f"tests/models/{splits[3]}/test_{module_name}"
elif len(splits) > 2 and splits[2].startswith("generation"):
default_test_file = f"tests/generation/test_{module_name}"
elif len(splits) > 2 and splits[2].startswith("trainer"):
default_test_file = f"tests/trainer/test_{module_name}"
else:
default_test_file = f"tests/utils/test_{module_name}"
if os.path.isfile(default_test_file):
return default_test_file
# Processing -> processor
if "processing" in default_test_file:
test_file = default_test_file.replace("processing", "processor")
if os.path.isfile(test_file):
return test_file
# This list contains the list of test files we expect never to be launched from a change in a module/util. Those are
# launched separately.
EXPECTED_TEST_FILES_NEVER_TOUCHED = [
"tests/utils/test_doc_samples.py", # Doc tests
"tests/pipelines/test_pipelines_common.py", # Actually checked by the pipeline based file
"tests/sagemaker/test_single_node_gpu.py", # SageMaker test
"tests/sagemaker/test_multi_node_model_parallel.py", # SageMaker test
"tests/sagemaker/test_multi_node_data_parallel.py", # SageMaker test
]
def _print_list(l):
return "\n".join([f"- {f}" for f in l])
def sanity_check():
"""
Checks that all test files can be touched by a modification in at least one module/utils. This test ensures that
newly-added test files are properly mapped to some module or utils, so they can be run by the CI.
"""
# Grab all module and utils
all_files = [
str(p.relative_to(PATH_TO_TRANFORMERS))
for p in (Path(PATH_TO_TRANFORMERS) / "src/transformers").glob("**/*.py")
]
all_files += [
str(p.relative_to(PATH_TO_TRANFORMERS)) for p in (Path(PATH_TO_TRANFORMERS) / "utils").glob("**/*.py")
]
# Compute all the test files we get from those.
test_files_found = []
for f in all_files:
test_f = module_to_test_file(f)
if test_f is not None:
if isinstance(test_f, str):
test_files_found.append(test_f)
else:
test_files_found.extend(test_f)
# Some of the test files might actually be subfolders so we grab the tests inside.
test_files = []
for test_f in test_files_found:
if os.path.isdir(os.path.join(PATH_TO_TRANFORMERS, test_f)):
test_files.extend(
[
str(p.relative_to(PATH_TO_TRANFORMERS))
for p in (Path(PATH_TO_TRANFORMERS) / test_f).glob("**/test*.py")
]
)
else:
test_files.append(test_f)
# Compare to existing test files
existing_test_files = [
str(p.relative_to(PATH_TO_TRANFORMERS)) for p in (Path(PATH_TO_TRANFORMERS) / "tests").glob("**/test*.py")
]
not_touched_test_files = [f for f in existing_test_files if f not in test_files]
should_be_tested = set(not_touched_test_files) - set(EXPECTED_TEST_FILES_NEVER_TOUCHED)
if len(should_be_tested) > 0:
raise ValueError(
"The following test files are not currently associated with any module or utils files, which means they "
f"will never get run by the CI:\n{_print_list(should_be_tested)}\n. Make sure the names of these test "
"files match the name of the module or utils they are testing, or adapt the constant "
"`SPECIAL_MODULE_TO_TEST_MAP` in `utils/tests_fetcher.py` to add them. If your test file is triggered "
"separately and is not supposed to be run by the regular CI, add it to the "
"`EXPECTED_TEST_FILES_NEVER_TOUCHED` constant instead."
)
def infer_tests_to_run(output_file, diff_with_last_commit=False, filters=None):
modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit)
print(f"\n### MODIFIED FILES ###\n{_print_list(modified_files)}")
# Create the map that will give us all impacted modules.
impacted_modules_map = create_reverse_dependency_map()
impacted_files = modified_files.copy()
for f in modified_files:
if f in impacted_modules_map:
impacted_files.extend(impacted_modules_map[f])
# Remove duplicates
impacted_files = sorted(list(set(impacted_files)))
print(f"\n### IMPACTED FILES ###\n{_print_list(impacted_files)}")
# Grab the corresponding test files:
if "setup.py" in impacted_files:
test_files_to_run = ["tests"]
else:
# Grab the corresponding test files:
test_files_to_run = []
for f in impacted_files:
# Modified test files are always added
if f.startswith("tests/"):
test_files_to_run.append(f)
# Example files are tested separately
elif f.startswith("examples/pytorch"):
test_files_to_run.append("examples/pytorch/test_pytorch_examples.py")
test_files_to_run.append("examples/pytorch/test_accelerate_examples.py")
elif f.startswith("examples/flax"):
test_files_to_run.append("examples/flax/test_flax_examples.py")
else:
new_tests = module_to_test_file(f)
if new_tests is not None:
if isinstance(new_tests, str):
test_files_to_run.append(new_tests)
else:
test_files_to_run.extend(new_tests)
# Remove duplicates
test_files_to_run = sorted(list(set(test_files_to_run)))
# Make sure we did not end up with a test file that was removed
test_files_to_run = [f for f in test_files_to_run if os.path.isfile(f) or os.path.isdir(f)]
if filters is not None:
filtered_files = []
for filter in filters:
filtered_files.extend([f for f in test_files_to_run if f.startswith(filter)])
test_files_to_run = filtered_files
print(f"\n### TEST TO RUN ###\n{_print_list(test_files_to_run)}")
if len(test_files_to_run) > 0:
with open(output_file, "w", encoding="utf-8") as f:
f.write(" ".join(test_files_to_run))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--sanity_check", action="store_true", help="Only test that all tests and modules are accounted for."
)
parser.add_argument(
"--output_file", type=str, default="test_list.txt", help="Where to store the list of tests to run"
)
parser.add_argument(
"--diff_with_last_commit",
action="store_true",
help="To fetch the tests between the current commit and the last commit",
)
parser.add_argument(
"--filters",
type=str,
nargs="*",
default=["tests"],
help="Only keep the test files matching one of those filters.",
)
args = parser.parse_args()
if args.sanity_check:
sanity_check()
else:
repo = Repo(PATH_TO_TRANFORMERS)
diff_with_last_commit = args.diff_with_last_commit
if not diff_with_last_commit and not repo.head.is_detached and repo.head.ref == repo.refs.main:
print("main branch detected, fetching tests against last commit.")
diff_with_last_commit = True
try:
infer_tests_to_run(args.output_file, diff_with_last_commit=diff_with_last_commit, filters=args.filters)
except Exception as e:
print(f"\nError when trying to grab the relevant tests: {e}\n\nRunning all tests.")
with open(args.output_file, "w", encoding="utf-8") as f:
if args.filters is None:
f.write("./tests/")
else:
f.write(" ".join(args.filters))
| # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import os
import re
from contextlib import contextmanager
from pathlib import Path
from git import Repo
# This script is intended to be run from the root of the repo but you can adapt this constant if you need to.
PATH_TO_TRANFORMERS = "."
@contextmanager
def checkout_commit(repo, commit_id):
"""
Context manager that checks out a commit in the repo.
"""
current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
try:
repo.git.checkout(commit_id)
yield
finally:
repo.git.checkout(current_head)
def clean_code(content):
"""
Remove docstrings, empty line or comments from `content`.
"""
# fmt: off
# Remove docstrings by splitting on triple " then triple ':
splits = content.split('\"\"\"')
content = "".join(splits[::2])
splits = content.split("\'\'\'")
# fmt: on
content = "".join(splits[::2])
# Remove empty lines and comments
lines_to_keep = []
for line in content.split("\n"):
# remove anything that is after a # sign.
line = re.sub("#.*$", "", line)
if len(line) == 0 or line.isspace():
continue
lines_to_keep.append(line)
return "\n".join(lines_to_keep)
def diff_is_docstring_only(repo, branching_point, filename):
"""
Check if the diff is only in docstrings in a filename.
"""
with checkout_commit(repo, branching_point):
with open(filename, "r", encoding="utf-8") as f:
old_content = f.read()
with open(filename, "r", encoding="utf-8") as f:
new_content = f.read()
old_content_clean = clean_code(old_content)
new_content_clean = clean_code(new_content)
return old_content_clean == new_content_clean
def get_modified_python_files(diff_with_last_commit=False):
"""
Return a list of python files that have been modified between:
- the current head and the main branch if `diff_with_last_commit=False` (default)
- the current head and its parent commit otherwise.
"""
repo = Repo(PATH_TO_TRANFORMERS)
if not diff_with_last_commit:
print(f"main is at {repo.refs.main.commit}")
print(f"Current head is at {repo.head.commit}")
branching_commits = repo.merge_base(repo.refs.main, repo.head)
for commit in branching_commits:
print(f"Branching commit: {commit}")
return get_diff(repo, repo.head.commit, branching_commits)
else:
print(f"main is at {repo.head.commit}")
parent_commits = repo.head.commit.parents
for commit in parent_commits:
print(f"Parent commit: {commit}")
return get_diff(repo, repo.head.commit, parent_commits)
def get_diff(repo, base_commit, commits):
"""
Get's the diff between one or several commits and the head of the repository.
"""
print("\n### DIFF ###\n")
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
# We always add new python files
if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"):
code_diff.append(diff_obj.b_path)
# We check that deleted python files won't break corresponding tests.
elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"):
code_diff.append(diff_obj.a_path)
# Now for modified files
elif diff_obj.change_type in ["M", "R"] and diff_obj.b_path.endswith(".py"):
# In case of renames, we'll look at the tests using both the old and new name.
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
else:
# Otherwise, we check modifications are in code and not docstrings.
if diff_is_docstring_only(repo, commit, diff_obj.b_path):
print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.")
else:
code_diff.append(diff_obj.a_path)
return code_diff
def get_module_dependencies(module_fname):
"""
Get the dependencies of a module.
"""
with open(os.path.join(PATH_TO_TRANFORMERS, module_fname), "r", encoding="utf-8") as f:
content = f.read()
module_parts = module_fname.split(os.path.sep)
imported_modules = []
# Let's start with relative imports
relative_imports = re.findall(r"from\s+(\.+\S+)\s+import\s+([^\n]+)\n", content)
relative_imports = [mod for mod, imp in relative_imports if "# tests_ignore" not in imp]
for imp in relative_imports:
level = 0
while imp.startswith("."):
imp = imp[1:]
level += 1
if len(imp) > 0:
dep_parts = module_parts[: len(module_parts) - level] + imp.split(".")
else:
dep_parts = module_parts[: len(module_parts) - level] + ["__init__.py"]
imported_module = os.path.sep.join(dep_parts)
# We ignore the main init import as it's only for the __version__ that it's done
# and it would add everything as a dependency.
if not imported_module.endswith("transformers/__init__.py"):
imported_modules.append(imported_module)
# Let's continue with direct imports
# The import from the transformers module are ignored for the same reason we ignored the
# main init before.
direct_imports = re.findall(r"from\s+transformers\.(\S+)\s+import\s+([^\n]+)\n", content)
direct_imports = [mod for mod, imp in direct_imports if "# tests_ignore" not in imp]
for imp in direct_imports:
import_parts = imp.split(".")
dep_parts = ["src", "transformers"] + import_parts
imported_modules.append(os.path.sep.join(dep_parts))
# Now let's just check that we have proper module files, or append an init for submodules
dependencies = []
for imported_module in imported_modules:
if os.path.isfile(os.path.join(PATH_TO_TRANFORMERS, f"{imported_module}.py")):
dependencies.append(f"{imported_module}.py")
elif os.path.isdir(os.path.join(PATH_TO_TRANFORMERS, imported_module)) and os.path.isfile(
os.path.sep.join([PATH_TO_TRANFORMERS, imported_module, "__init__.py"])
):
dependencies.append(os.path.sep.join([imported_module, "__init__.py"]))
return dependencies
def get_test_dependencies(test_fname):
"""
Get the dependencies of a test file.
"""
with open(os.path.join(PATH_TO_TRANFORMERS, test_fname), "r", encoding="utf-8") as f:
content = f.read()
# Tests only have relative imports for other test files
# TODO Sylvain: handle relative imports cleanly
relative_imports = re.findall(r"from\s+(\.\S+)\s+import\s+([^\n]+)\n", content)
relative_imports = [test for test, imp in relative_imports if "# tests_ignore" not in imp]
# Removes the double trailing '..' for parent imports, and creates an absolute path from the root dir with
# `tests` as a prefix.
parent_imports = [imp.strip(".") for imp in relative_imports if ".." in imp]
parent_imports = [os.path.join("tests", f"{test.replace('.', os.path.sep)}.py") for test in parent_imports]
# Removes the single trailing '.' for current dir imports, and creates an absolute path from the root dir with
# tests/{module_name} as a prefix.
current_dir_imports = [imp.strip(".") for imp in relative_imports if ".." not in imp]
directory = os.path.sep.join(test_fname.split(os.path.sep)[:-1])
current_dir_imports = [
os.path.join(directory, f"{test.replace('.', os.path.sep)}.py") for test in current_dir_imports
]
return [f for f in [*parent_imports, *current_dir_imports] if os.path.isfile(f)]
def create_reverse_dependency_map():
"""
Create the dependency map from module/test filename to the list of modules/tests that depend on it (even
recursively).
"""
modules = [
str(f.relative_to(PATH_TO_TRANFORMERS))
for f in (Path(PATH_TO_TRANFORMERS) / "src/transformers").glob("**/*.py")
]
# We grab all the dependencies of each module.
direct_deps = {m: get_module_dependencies(m) for m in modules}
# We add all the dependencies of each test file
tests = [str(f.relative_to(PATH_TO_TRANFORMERS)) for f in (Path(PATH_TO_TRANFORMERS) / "tests").glob("**/*.py")]
direct_deps.update({t: get_test_dependencies(t) for t in tests})
all_files = modules + tests
# This recurses the dependencies
something_changed = True
while something_changed:
something_changed = False
for m in all_files:
for d in direct_deps[m]:
if d not in direct_deps:
raise ValueError(f"KeyError:{d}. From {m}")
for dep in direct_deps[d]:
if dep not in direct_deps[m]:
direct_deps[m].append(dep)
something_changed = True
# Finally we can build the reverse map.
reverse_map = collections.defaultdict(list)
for m in all_files:
if m.endswith("__init__.py"):
reverse_map[m].extend(direct_deps[m])
for d in direct_deps[m]:
reverse_map[d].append(m)
return reverse_map
# Any module file that has a test name which can't be inferred automatically from its name should go here. A better
# approach is to (re-)name the test file accordingly, and second best to add the correspondence map here.
SPECIAL_MODULE_TO_TEST_MAP = {
"commands/add_new_model_like.py": "utils/test_add_new_model_like.py",
"configuration_utils.py": "test_configuration_common.py",
"convert_graph_to_onnx.py": "onnx/test_onnx.py",
"data/data_collator.py": "trainer/test_data_collator.py",
"deepspeed.py": "deepspeed/",
"feature_extraction_sequence_utils.py": "test_sequence_feature_extraction_common.py",
"feature_extraction_utils.py": "test_feature_extraction_common.py",
"file_utils.py": ["utils/test_file_utils.py", "utils/test_model_output.py"],
"utils/generic.py": ["utils/test_file_utils.py", "utils/test_model_output.py"],
"utils/hub.py": "utils/test_file_utils.py",
"modelcard.py": "utils/test_model_card.py",
"modeling_flax_utils.py": "test_modeling_flax_common.py",
"modeling_tf_utils.py": ["test_modeling_tf_common.py", "utils/test_modeling_tf_core.py"],
"modeling_utils.py": ["test_modeling_common.py", "utils/test_offline.py"],
"models/auto/modeling_auto.py": [
"models/auto/test_modeling_auto.py",
"models/auto/test_modeling_tf_pytorch.py",
"models/bort/test_modeling_bort.py",
"models/dit/test_modeling_dit.py",
],
"models/auto/modeling_flax_auto.py": "models/auto/test_modeling_flax_auto.py",
"models/auto/modeling_tf_auto.py": [
"models/auto/test_modeling_tf_auto.py",
"models/auto/test_modeling_tf_pytorch.py",
"models/bort/test_modeling_tf_bort.py",
],
"models/gpt2/modeling_gpt2.py": [
"models/gpt2/test_modeling_gpt2.py",
"models/megatron_gpt2/test_modeling_megatron_gpt2.py",
],
"optimization.py": "optimization/test_optimization.py",
"optimization_tf.py": "optimization/test_optimization_tf.py",
"pipelines/base.py": "pipelines/test_pipelines_*.py",
"pipelines/text2text_generation.py": [
"pipelines/test_pipelines_text2text_generation.py",
"pipelines/test_pipelines_summarization.py",
"pipelines/test_pipelines_translation.py",
],
"pipelines/zero_shot_classification.py": "pipelines/test_pipelines_zero_shot.py",
"testing_utils.py": "utils/test_skip_decorators.py",
"tokenization_utils.py": ["test_tokenization_common.py", "tokenization/test_tokenization_utils.py"],
"tokenization_utils_base.py": ["test_tokenization_common.py", "tokenization/test_tokenization_utils.py"],
"tokenization_utils_fast.py": [
"test_tokenization_common.py",
"tokenization/test_tokenization_utils.py",
"tokenization/test_tokenization_fast.py",
],
"trainer.py": [
"trainer/test_trainer.py",
"extended/test_trainer_ext.py",
"trainer/test_trainer_distributed.py",
"trainer/test_trainer_tpu.py",
],
"train_pt_utils.py": "trainer/test_trainer_utils.py",
"utils/versions.py": "utils/test_versions_utils.py",
}
def module_to_test_file(module_fname):
"""
Returns the name of the file(s) where `module_fname` is tested.
"""
splits = module_fname.split(os.path.sep)
# Special map has priority
short_name = os.path.sep.join(splits[2:])
if short_name in SPECIAL_MODULE_TO_TEST_MAP:
test_file = SPECIAL_MODULE_TO_TEST_MAP[short_name]
if isinstance(test_file, str):
return f"tests/{test_file}"
return [f"tests/{f}" for f in test_file]
module_name = splits[-1]
# Fast tokenizers are tested in the same file as the slow ones.
if module_name.endswith("_fast.py"):
module_name = module_name.replace("_fast.py", ".py")
# Special case for pipelines submodules
if len(splits) >= 2 and splits[-2] == "pipelines":
default_test_file = f"tests/pipelines/test_pipelines_{module_name}"
# Special case for benchmarks submodules
elif len(splits) >= 2 and splits[-2] == "benchmark":
return ["tests/benchmark/test_benchmark.py", "tests/benchmark/test_benchmark_tf.py"]
# Special case for commands submodules
elif len(splits) >= 2 and splits[-2] == "commands":
return "tests/utils/test_cli.py"
# Special case for onnx submodules
elif len(splits) >= 2 and splits[-2] == "onnx":
return ["tests/onnx/test_onnx.py", "tests/onnx/test_onnx_v2.py"]
# Special case for utils (not the one in src/transformers, the ones at the root of the repo).
elif len(splits) > 0 and splits[0] == "utils":
default_test_file = f"tests/utils/test_utils_{module_name}"
elif len(splits) > 4 and splits[2] == "models":
default_test_file = f"tests/models/{splits[3]}/test_{module_name}"
elif len(splits) > 2 and splits[2].startswith("generation"):
default_test_file = f"tests/generation/test_{module_name}"
elif len(splits) > 2 and splits[2].startswith("trainer"):
default_test_file = f"tests/trainer/test_{module_name}"
else:
default_test_file = f"tests/utils/test_{module_name}"
if os.path.isfile(default_test_file):
return default_test_file
# Processing -> processor
if "processing" in default_test_file:
test_file = default_test_file.replace("processing", "processor")
if os.path.isfile(test_file):
return test_file
# This list contains the list of test files we expect never to be launched from a change in a module/util. Those are
# launched separately.
EXPECTED_TEST_FILES_NEVER_TOUCHED = [
"tests/utils/test_doc_samples.py", # Doc tests
"tests/pipelines/test_pipelines_common.py", # Actually checked by the pipeline based file
"tests/sagemaker/test_single_node_gpu.py", # SageMaker test
"tests/sagemaker/test_multi_node_model_parallel.py", # SageMaker test
"tests/sagemaker/test_multi_node_data_parallel.py", # SageMaker test
]
def _print_list(l):
return "\n".join([f"- {f}" for f in l])
def sanity_check():
"""
Checks that all test files can be touched by a modification in at least one module/utils. This test ensures that
newly-added test files are properly mapped to some module or utils, so they can be run by the CI.
"""
# Grab all module and utils
all_files = [
str(p.relative_to(PATH_TO_TRANFORMERS))
for p in (Path(PATH_TO_TRANFORMERS) / "src/transformers").glob("**/*.py")
]
all_files += [
str(p.relative_to(PATH_TO_TRANFORMERS)) for p in (Path(PATH_TO_TRANFORMERS) / "utils").glob("**/*.py")
]
# Compute all the test files we get from those.
test_files_found = []
for f in all_files:
test_f = module_to_test_file(f)
if test_f is not None:
if isinstance(test_f, str):
test_files_found.append(test_f)
else:
test_files_found.extend(test_f)
# Some of the test files might actually be subfolders so we grab the tests inside.
test_files = []
for test_f in test_files_found:
if os.path.isdir(os.path.join(PATH_TO_TRANFORMERS, test_f)):
test_files.extend(
[
str(p.relative_to(PATH_TO_TRANFORMERS))
for p in (Path(PATH_TO_TRANFORMERS) / test_f).glob("**/test*.py")
]
)
else:
test_files.append(test_f)
# Compare to existing test files
existing_test_files = [
str(p.relative_to(PATH_TO_TRANFORMERS)) for p in (Path(PATH_TO_TRANFORMERS) / "tests").glob("**/test*.py")
]
not_touched_test_files = [f for f in existing_test_files if f not in test_files]
should_be_tested = set(not_touched_test_files) - set(EXPECTED_TEST_FILES_NEVER_TOUCHED)
if len(should_be_tested) > 0:
raise ValueError(
"The following test files are not currently associated with any module or utils files, which means they "
f"will never get run by the CI:\n{_print_list(should_be_tested)}\n. Make sure the names of these test "
"files match the name of the module or utils they are testing, or adapt the constant "
"`SPECIAL_MODULE_TO_TEST_MAP` in `utils/tests_fetcher.py` to add them. If your test file is triggered "
"separately and is not supposed to be run by the regular CI, add it to the "
"`EXPECTED_TEST_FILES_NEVER_TOUCHED` constant instead."
)
def infer_tests_to_run(output_file, diff_with_last_commit=False, filters=None):
modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit)
print(f"\n### MODIFIED FILES ###\n{_print_list(modified_files)}")
# Create the map that will give us all impacted modules.
impacted_modules_map = create_reverse_dependency_map()
impacted_files = modified_files.copy()
for f in modified_files:
if f in impacted_modules_map:
impacted_files.extend(impacted_modules_map[f])
# Remove duplicates
impacted_files = sorted(list(set(impacted_files)))
print(f"\n### IMPACTED FILES ###\n{_print_list(impacted_files)}")
# Grab the corresponding test files:
if "setup.py" in impacted_files:
test_files_to_run = ["tests"]
else:
# Grab the corresponding test files:
test_files_to_run = []
for f in impacted_files:
# Modified test files are always added
if f.startswith("tests/"):
test_files_to_run.append(f)
# Example files are tested separately
elif f.startswith("examples/pytorch"):
test_files_to_run.append("examples/pytorch/test_pytorch_examples.py")
test_files_to_run.append("examples/pytorch/test_accelerate_examples.py")
elif f.startswith("examples/flax"):
test_files_to_run.append("examples/flax/test_flax_examples.py")
else:
new_tests = module_to_test_file(f)
if new_tests is not None:
if isinstance(new_tests, str):
test_files_to_run.append(new_tests)
else:
test_files_to_run.extend(new_tests)
# Remove duplicates
test_files_to_run = sorted(list(set(test_files_to_run)))
# Make sure we did not end up with a test file that was removed
test_files_to_run = [f for f in test_files_to_run if os.path.isfile(f) or os.path.isdir(f)]
if filters is not None:
filtered_files = []
for filter in filters:
filtered_files.extend([f for f in test_files_to_run if f.startswith(filter)])
test_files_to_run = filtered_files
print(f"\n### TEST TO RUN ###\n{_print_list(test_files_to_run)}")
if len(test_files_to_run) > 0:
with open(output_file, "w", encoding="utf-8") as f:
f.write(" ".join(test_files_to_run))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--sanity_check", action="store_true", help="Only test that all tests and modules are accounted for."
)
parser.add_argument(
"--output_file", type=str, default="test_list.txt", help="Where to store the list of tests to run"
)
parser.add_argument(
"--diff_with_last_commit",
action="store_true",
help="To fetch the tests between the current commit and the last commit",
)
parser.add_argument(
"--filters",
type=str,
nargs="*",
default=["tests"],
help="Only keep the test files matching one of those filters.",
)
args = parser.parse_args()
if args.sanity_check:
sanity_check()
else:
repo = Repo(PATH_TO_TRANFORMERS)
diff_with_last_commit = args.diff_with_last_commit
if not diff_with_last_commit and not repo.head.is_detached and repo.head.ref == repo.refs.main:
print("main branch detected, fetching tests against last commit.")
diff_with_last_commit = True
try:
infer_tests_to_run(args.output_file, diff_with_last_commit=diff_with_last_commit, filters=args.filters)
except Exception as e:
print(f"\nError when trying to grab the relevant tests: {e}\n\nRunning all tests.")
with open(args.output_file, "w", encoding="utf-8") as f:
if args.filters is None:
f.write("./tests/")
else:
f.write(" ".join(args.filters))
|
import random
import string
from typing import Callable, List, Optional
import discord
from redbot.core import Config
from redbot.core.commands import Context
from redbot.core.bot import Red
from redbot.core.utils.menus import start_adding_reactions
from redbot.core.utils.predicates import MessagePredicate, ReactionPredicate
# from redbot.core.utils.chat_formatting import text_to_file
from .constants import EMBED_COLOR
from .emojis import emojis
from .errors import CancellationError
# Credits to Star List
club_thumb = "https://www.starlist.pro/assets/club/{}.png"
class Club:
"""Represents a Brawlcord club."""
def __init__(self, data: dict):
self.id: str = data["id"]
self.name: str = data["name"]
self.description: str = data["description"]
self.required_trophies: int = data["required_trophies"]
self.location: str = data["location"]
self.icon_num: int = data["icon_num"]
self.ctype: str = data["ctype"]
self.president: discord.User = data["president"]
self.vice_presidents: List[discord.User] = data.get("vice_presidents", [])
self.seniors: List[discord.User] = data.get("seniors", [])
self.members: List[discord.User] = data.get("members", [])
self.all_members = [self.president] + self.vice_presidents + self.seniors + self.members
@classmethod
async def create_club(cls, config: Config, ctx: Context):
"""Interactive club creation process.
This function creates the club, adds it to both user and global database
and returns the club object. It also adjusts the `club_id_length` if required.
All errors must be handled in the caller function.
"""
async def get_input(timeout=30):
pred = await ctx.bot.wait_for(
"message", check=MessagePredicate.same_context(ctx), timeout=timeout
)
if pred.content.strip().lower() == "cancel":
raise CancellationError
return pred.content.strip()
data = {}
await ctx.send(
":tada: Let's create your club! First, what name do you want the club to have?"
" Note that it cannot be changed later!"
)
data["name"] = await get_input()
await ctx.send(
"Set the name! Now, what do you want to set as the server description?"
)
data["description"] = await get_input(60)
await ctx.send(
"Set the description! What should be the required trophies?"
" Enter a number. (without commas)"
)
data["required_trophies"] = int(await get_input())
await ctx.send(
"Set required trophies! Select a icon for the club!"
" Enter the number corresponding to icon of choice."
)
data["icon_num"] = int(await get_input(60))
await ctx.send(
"Set club icon! Now, enter a location for your club!"
)
data["location"] = await get_input()
await ctx.send(
"Set the location. Lastly, what kind of club do you want to create?"
" Enter one of `open`, `closed`, or `invite`."
)
club_type = await get_input()
if club_type.strip().lower() not in ["open", "closed", "invite"]:
# We raise `NameError` instead of `ValueError` to keep
# it separate from the above `int` conversions.
raise NameError
else:
data["ctype"] = club_type
data["president"] = ctx.author
await ctx.send(
f"All set! Club created! :tada:")
default_length = await config.club_id_length()
async with config.clubs() as clubs:
# First we get all club IDs we've used so far to get an ID for our new club.
ids = [c["id"] for c in clubs]
data["id"], new_length = cls.get_club_id(ids, default_length)
club = cls(data)
clubs.append(club.to_json())
await config.user(ctx.author).club.set(club.id)
if default_length != new_length:
await config.club_id_length.set(new_length)
return club
def to_json(self) -> dict:
"""Returns a dictionary represeting the `Club` object."""
return {
"id": self.id,
"name": self.name,
"description": self.description,
"required_trophies": self.required_trophies,
"location": self.location,
"icon_num": self.icon_num,
"ctype": self.ctype,
"president_id": self.president.id,
"vice_president_ids": [vp.id for vp in self.vice_presidents],
"senior_ids": [s.id for s in self.seniors],
"member_ids": [m.id for m in self.members]
}
@classmethod
async def from_json(cls, data: dict, bot: Red):
"""Return a `Club` object from dictionary representation of the club."""
data["president"] = await cls.get_user(data["president_id"], bot)
vice_presidents = []
for vp_id in data["vice_president_ids"]:
vp = await cls.get_user(vp_id, bot)
if vp is not None:
vice_presidents.append(vp)
data["vice_presidents"] = vice_presidents
seniors = []
for s_id in data["senior_ids"]:
sen = await cls.get_user(s_id, bot)
if sen is not None:
seniors.append(sen)
data["seniors"] = seniors
members = []
for m_id in data["member_ids"]:
mem = await cls.get_user(m_id, bot)
if mem is not None:
members.append(mem)
data["members"] = members
data.pop("president_id")
data.pop("vice_president_ids")
data.pop("senior_ids")
data.pop("member_ids")
return cls(data)
@staticmethod
async def get_user(user_id: int, bot: Red) -> Optional[discord.User]:
"""Returns `discord.User` object from the given ID.
Returns `None` if user can't be found.
"""
user = bot.get_user(user_id)
if user is None:
try:
user = await bot.fetch_user(user_id)
except Exception:
pass
return user
@staticmethod
async def show_club(
data: dict, bot: Red, config: Config, get_league: Callable
) -> (discord.Embed, discord.File):
"""Returns a tuple of length two.
First element is a formatted `discord.Embed` object to display the club.
Second is a `discord.File` with data about all club members. It is `None`
is club has less than or equal to 10 members.
"""
if isinstance(data, Club):
club = data
else:
club: Club = await Club.from_json(data, bot)
embeds = []
pages = await club.members_list(config, get_league)
total_pages = len(pages)
total_trophies = await club.total_trophies(config)
if club.icon_num not in range(1, 31):
icon_url = "https://www.starlist.pro/assets/icon/Club.png"
else:
icon_url = club_thumb.format(club.icon_num - 1)
for idx, page in enumerate(pages):
# if not page.strip():
# continue
embed = discord.Embed(color=EMBED_COLOR, description=club.description)
# Star List's club indexing starts a 0, ours at 1.
# It goes all the way up till 29.
embed.set_author(name=club.name, icon_url=icon_url)
embed.set_footer(text=f"Club ID: {club.id} | Page {idx+1}/{total_pages}")
embed.add_field(
name="Total Trophies",
value=f"{emojis["trophies"]} {total_trophies:,}"
)
embed.add_field(name="President", value=club.president.name)
embed.add_field(
name="Required Trophies", value=f"{emojis["trophies"]} {club.required_trophies:,}"
)
embed.add_field(name="Total Members", value=f"{len(club.all_members)}/100")
embed.add_field(name="Type", value=club.ctype.title())
embed.add_field(name="Location", value=club.location)
embed.add_field(name="\u200b\n", value=page.strip(), inline=False)
embeds.append(embed)
# if whole:
# club_file = text_to_file(whole, "club_data.txt")
# else:
# club_file = None
return embeds
async def total_trophies(self, config: Config) -> int:
"""Returns total club trophies."""
total = 0
for member in self.all_members:
try:
brawlers = await config.user(member).brawlers()
total += self.get_user_trophies(brawlers)
except Exception:
continue
return total
@staticmethod
def get_user_trophies(brawlers: dict) -> int:
"""Returns total trophies of the user."""
return sum([brawlers[brawler]["trophies"] for brawler in brawlers])
async def members_list(self, config: Config, get_league: Callable) -> (str, str):
"""Returns a tuple of two strings.
First string is for top ten club members (in terms of trophies).
Second is for all. If the club has less than or equal to 10 members,
the second string is empty.
"""
mapping = {}
for member in self.all_members:
try:
brawlers = await config.user(member).brawlers()
mapping[member] = self.get_user_trophies(brawlers)
except Exception:
pass
# Sort mapping to get users with most trophies at the top.
mapping = {k: v for k, v in sorted(mapping.items(), key=lambda x: x[1], reverse=True)}
# total_num = len(mapping)
first_ten_txt = ""
second_ten_txt = ""
third_ten_txt = ""
fourth_ten_txt = ""
fifth_ten_txt = ""
# whole_txt = ""
for idx, user in enumerate(mapping):
pos = "Member"
if user.id == self.president.id:
pos = "**President**"
elif user.id in [vp.id for vp in self.vice_presidents]:
pos = "**Vice President**"
elif user.id in [s.id for s in self.seniors]:
pos = "**Senior**"
_, emoji = await get_league(mapping[user])
txt = f"\n`{(idx+1):02d}.` {user} {emoji}{mapping[user]} ({pos})"
if idx in range(0, 10):
first_ten_txt += txt
if idx in range(10, 20):
second_ten_txt += txt
if idx in range(20, 30):
third_ten_txt += txt
if idx in range(30, 40):
fourth_ten_txt += txt
if idx in range(40, 50):
fifth_ten_txt += txt
pages = [
page for page in
[first_ten_txt, second_ten_txt, third_ten_txt, fourth_ten_txt, fifth_ten_txt]
if page.strip()
]
return pages
@staticmethod
def get_club_id(used_ids: list, default_length: int) -> (str, int):
"""Returns a unique id for the club and the default length we should use."""
def gen_id(length=default_length):
id = "".join(
[random.choice(string.ascii_uppercase + string.digits) for _ in range(length)]
)
if id not in used_ids:
return id
else:
return False
id = gen_id()
if id is False:
# If id is not unique, try generating id of default length 3 more times.
# Increase length by one if still not unique.
for _ in range(3):
id = gen_id()
if id is False:
continue
else:
return id
default_length += 1
id = gen_id(default_length)
return id, default_length
@classmethod
async def club_from_id(cls, id: str, config: Config, bot: Red):
"""Returns `Club` instance representing club with given id.
Returns `None` if club with given id doesn't exist.
"""
clubs = await config.clubs()
for club in clubs:
if club["id"] == id:
return await cls.from_json(club, bot)
async def remove_user(self, user: discord.User, config: Config):
"""Removes user from club lists."""
def choose_new_pres(pool: list):
try:
new_pres = random.choice(pool)
# Remove it from pool.
pool.remove(new_pres)
# Set it as new president.
self.president = new_pres
return True
except IndexError:
return False
if user in self.all_members:
self.all_members.remove(user)
if user.id == self.president.id:
if not choose_new_pres(self.vice_presidents):
if not choose_new_pres(self.seniors):
if not choose_new_pres(self.members):
# Empty club, remove it from database.
async with config.clubs() as clubs:
where = next(i for i, d in enumerate(clubs) if d.get('id') == self.id)
del clubs[where]
return True
else:
if user in self.vice_presidents:
self.vice_presidents.remove(user)
elif user in self.seniors:
self.seniors.remove(user)
elif user in self.members:
self.members.remove(user)
await self.update_club(config)
async def add_user(self, user: discord.User, config: Config):
"""Adds users to the club list."""
if self.ctype in ["closed", "invite"]:
raise ValueError("Club type is `closed` or `invite-only`.")
self.members.append(user)
await self.update_club(config)
async def promote_user(self, user: discord.User, ctx: Context, config: Config):
"""Promotes a user.
Raises ValueError if not allowed.
"""
if user.id == self.president.id:
raise ValueError(f"{user.name} is the club President!")
if ctx.author.id == self.president.id:
if user in self.vice_presidents:
msg = await ctx.send(
f"Promoting {user.name} will demote you and make them the President."
" Are you sure you want to continue?"
)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
await ctx.bot.wait_for("reaction_add", check=pred)
if pred.result is True:
self.president = user
self.vice_presidents.remove(user)
self.vice_presidents.append(ctx.author)
await ctx.send(f"Promoted {user.name} to President!")
else:
return await ctx.send("Cancelled promotion.")
elif user in self.seniors:
self.seniors.remove(user)
self.vice_presidents.append(user)
await ctx.send(f"Promoted {user.name} to Vice President!")
elif user in self.members:
self.members.remove(user)
self.seniors.append(user)
await ctx.send(f"Promoted {user.name} to Senior!")
if ctx.author in self.vice_presidents:
if user in self.vice_presidents:
raise ValueError(f"{user.name} is equal to you in hierarchy!")
elif user in self.seniors:
raise ValueError(f"Only club President can promote a Senior to Vice President")
elif user in self.members:
self.members.remove(user)
self.seniors.append(user)
await ctx.send(f"Promoted {user.name} to Senior!")
await self.update_club(config)
async def demote_user(self, user: discord.User, ctx: Context, config: Config):
"""Demotes a user.
Raises ValueError if not allowed.
"""
if user.id == self.president.id:
raise ValueError(f"{user.name} is the club President!")
if ctx.author.id == self.president.id:
if user in self.vice_presidents:
self.vice_presidents.remove(user)
await ctx.send(f"Demoted {user.name} to Senior!")
elif user in self.seniors:
self.seniors.remove(user)
await ctx.send(f"Demoted {user.name} to Member!")
elif user in self.members:
raise ValueError(
f"{user.name} is already a Member."
" Use `club kick` command to kick member out of the club."
)
if ctx.author in self.vice_presidents:
if user in self.vice_presidents:
raise ValueError(f"{user.name} is equal to you in hierarchy!")
elif user in self.seniors:
self.seniors.remove(user)
await ctx.send(f"Demoted {user.name} to Member!")
elif user in self.members:
raise ValueError(
f"{user.name} is already a Member."
" Use `club kick` command to kick member out of the club."
)
await self.update_club(config)
async def update_club(self, config: Config):
"""Updates club in the bot database."""
async with config.clubs() as clubs:
where = next(i for i, d in enumerate(clubs) if d.get('id') == self.id)
clubs[where] = self.to_json()
return True
| import random
import string
from typing import Callable, List, Optional
import discord
from redbot.core import Config
from redbot.core.commands import Context
from redbot.core.bot import Red
from redbot.core.utils.menus import start_adding_reactions
from redbot.core.utils.predicates import MessagePredicate, ReactionPredicate
# from redbot.core.utils.chat_formatting import text_to_file
from .constants import EMBED_COLOR
from .emojis import emojis
from .errors import CancellationError
# Credits to Star List
club_thumb = "https://www.starlist.pro/assets/club/{}.png"
class Club:
"""Represents a Brawlcord club."""
def __init__(self, data: dict):
self.id: str = data["id"]
self.name: str = data["name"]
self.description: str = data["description"]
self.required_trophies: int = data["required_trophies"]
self.location: str = data["location"]
self.icon_num: int = data["icon_num"]
self.ctype: str = data["ctype"]
self.president: discord.User = data["president"]
self.vice_presidents: List[discord.User] = data.get("vice_presidents", [])
self.seniors: List[discord.User] = data.get("seniors", [])
self.members: List[discord.User] = data.get("members", [])
self.all_members = [self.president] + self.vice_presidents + self.seniors + self.members
@classmethod
async def create_club(cls, config: Config, ctx: Context):
"""Interactive club creation process.
This function creates the club, adds it to both user and global database
and returns the club object. It also adjusts the `club_id_length` if required.
All errors must be handled in the caller function.
"""
async def get_input(timeout=30):
pred = await ctx.bot.wait_for(
"message", check=MessagePredicate.same_context(ctx), timeout=timeout
)
if pred.content.strip().lower() == "cancel":
raise CancellationError
return pred.content.strip()
data = {}
await ctx.send(
":tada: Let's create your club! First, what name do you want the club to have?"
" Note that it cannot be changed later!"
)
data["name"] = await get_input()
await ctx.send(
"Set the name! Now, what do you want to set as the server description?"
)
data["description"] = await get_input(60)
await ctx.send(
"Set the description! What should be the required trophies?"
" Enter a number. (without commas)"
)
data["required_trophies"] = int(await get_input())
await ctx.send(
"Set required trophies! Select a icon for the club!"
" Enter the number corresponding to icon of choice."
)
data["icon_num"] = int(await get_input(60))
await ctx.send(
"Set club icon! Now, enter a location for your club!"
)
data["location"] = await get_input()
await ctx.send(
"Set the location. Lastly, what kind of club do you want to create?"
" Enter one of `open`, `closed`, or `invite`."
)
club_type = await get_input()
if club_type.strip().lower() not in ["open", "closed", "invite"]:
# We raise `NameError` instead of `ValueError` to keep
# it separate from the above `int` conversions.
raise NameError
else:
data["ctype"] = club_type
data["president"] = ctx.author
await ctx.send(
f"All set! Club created! :tada:")
default_length = await config.club_id_length()
async with config.clubs() as clubs:
# First we get all club IDs we've used so far to get an ID for our new club.
ids = [c["id"] for c in clubs]
data["id"], new_length = cls.get_club_id(ids, default_length)
club = cls(data)
clubs.append(club.to_json())
await config.user(ctx.author).club.set(club.id)
if default_length != new_length:
await config.club_id_length.set(new_length)
return club
def to_json(self) -> dict:
"""Returns a dictionary represeting the `Club` object."""
return {
"id": self.id,
"name": self.name,
"description": self.description,
"required_trophies": self.required_trophies,
"location": self.location,
"icon_num": self.icon_num,
"ctype": self.ctype,
"president_id": self.president.id,
"vice_president_ids": [vp.id for vp in self.vice_presidents],
"senior_ids": [s.id for s in self.seniors],
"member_ids": [m.id for m in self.members]
}
@classmethod
async def from_json(cls, data: dict, bot: Red):
"""Return a `Club` object from dictionary representation of the club."""
data["president"] = await cls.get_user(data["president_id"], bot)
vice_presidents = []
for vp_id in data["vice_president_ids"]:
vp = await cls.get_user(vp_id, bot)
if vp is not None:
vice_presidents.append(vp)
data["vice_presidents"] = vice_presidents
seniors = []
for s_id in data["senior_ids"]:
sen = await cls.get_user(s_id, bot)
if sen is not None:
seniors.append(sen)
data["seniors"] = seniors
members = []
for m_id in data["member_ids"]:
mem = await cls.get_user(m_id, bot)
if mem is not None:
members.append(mem)
data["members"] = members
data.pop("president_id")
data.pop("vice_president_ids")
data.pop("senior_ids")
data.pop("member_ids")
return cls(data)
@staticmethod
async def get_user(user_id: int, bot: Red) -> Optional[discord.User]:
"""Returns `discord.User` object from the given ID.
Returns `None` if user can't be found.
"""
user = bot.get_user(user_id)
if user is None:
try:
user = await bot.fetch_user(user_id)
except Exception:
pass
return user
@staticmethod
async def show_club(
data: dict, bot: Red, config: Config, get_league: Callable
) -> (discord.Embed, discord.File):
"""Returns a tuple of length two.
First element is a formatted `discord.Embed` object to display the club.
Second is a `discord.File` with data about all club members. It is `None`
is club has less than or equal to 10 members.
"""
if isinstance(data, Club):
club = data
else:
club: Club = await Club.from_json(data, bot)
embeds = []
pages = await club.members_list(config, get_league)
total_pages = len(pages)
total_trophies = await club.total_trophies(config)
if club.icon_num not in range(1, 31):
icon_url = "https://www.starlist.pro/assets/icon/Club.png"
else:
icon_url = club_thumb.format(club.icon_num - 1)
for idx, page in enumerate(pages):
# if not page.strip():
# continue
embed = discord.Embed(color=EMBED_COLOR, description=club.description)
# Star List's club indexing starts a 0, ours at 1.
# It goes all the way up till 29.
embed.set_author(name=club.name, icon_url=icon_url)
embed.set_footer(text=f"Club ID: {club.id} | Page {idx+1}/{total_pages}")
embed.add_field(
name="Total Trophies",
value=f"{emojis['trophies']} {total_trophies:,}"
)
embed.add_field(name="President", value=club.president.name)
embed.add_field(
name="Required Trophies", value=f"{emojis['trophies']} {club.required_trophies:,}"
)
embed.add_field(name="Total Members", value=f"{len(club.all_members)}/100")
embed.add_field(name="Type", value=club.ctype.title())
embed.add_field(name="Location", value=club.location)
embed.add_field(name="\u200b\n", value=page.strip(), inline=False)
embeds.append(embed)
# if whole:
# club_file = text_to_file(whole, "club_data.txt")
# else:
# club_file = None
return embeds
async def total_trophies(self, config: Config) -> int:
"""Returns total club trophies."""
total = 0
for member in self.all_members:
try:
brawlers = await config.user(member).brawlers()
total += self.get_user_trophies(brawlers)
except Exception:
continue
return total
@staticmethod
def get_user_trophies(brawlers: dict) -> int:
"""Returns total trophies of the user."""
return sum([brawlers[brawler]["trophies"] for brawler in brawlers])
async def members_list(self, config: Config, get_league: Callable) -> (str, str):
"""Returns a tuple of two strings.
First string is for top ten club members (in terms of trophies).
Second is for all. If the club has less than or equal to 10 members,
the second string is empty.
"""
mapping = {}
for member in self.all_members:
try:
brawlers = await config.user(member).brawlers()
mapping[member] = self.get_user_trophies(brawlers)
except Exception:
pass
# Sort mapping to get users with most trophies at the top.
mapping = {k: v for k, v in sorted(mapping.items(), key=lambda x: x[1], reverse=True)}
# total_num = len(mapping)
first_ten_txt = ""
second_ten_txt = ""
third_ten_txt = ""
fourth_ten_txt = ""
fifth_ten_txt = ""
# whole_txt = ""
for idx, user in enumerate(mapping):
pos = "Member"
if user.id == self.president.id:
pos = "**President**"
elif user.id in [vp.id for vp in self.vice_presidents]:
pos = "**Vice President**"
elif user.id in [s.id for s in self.seniors]:
pos = "**Senior**"
_, emoji = await get_league(mapping[user])
txt = f"\n`{(idx+1):02d}.` {user} {emoji}{mapping[user]} ({pos})"
if idx in range(0, 10):
first_ten_txt += txt
if idx in range(10, 20):
second_ten_txt += txt
if idx in range(20, 30):
third_ten_txt += txt
if idx in range(30, 40):
fourth_ten_txt += txt
if idx in range(40, 50):
fifth_ten_txt += txt
pages = [
page for page in
[first_ten_txt, second_ten_txt, third_ten_txt, fourth_ten_txt, fifth_ten_txt]
if page.strip()
]
return pages
@staticmethod
def get_club_id(used_ids: list, default_length: int) -> (str, int):
"""Returns a unique id for the club and the default length we should use."""
def gen_id(length=default_length):
id = "".join(
[random.choice(string.ascii_uppercase + string.digits) for _ in range(length)]
)
if id not in used_ids:
return id
else:
return False
id = gen_id()
if id is False:
# If id is not unique, try generating id of default length 3 more times.
# Increase length by one if still not unique.
for _ in range(3):
id = gen_id()
if id is False:
continue
else:
return id
default_length += 1
id = gen_id(default_length)
return id, default_length
@classmethod
async def club_from_id(cls, id: str, config: Config, bot: Red):
"""Returns `Club` instance representing club with given id.
Returns `None` if club with given id doesn't exist.
"""
clubs = await config.clubs()
for club in clubs:
if club["id"] == id:
return await cls.from_json(club, bot)
async def remove_user(self, user: discord.User, config: Config):
"""Removes user from club lists."""
def choose_new_pres(pool: list):
try:
new_pres = random.choice(pool)
# Remove it from pool.
pool.remove(new_pres)
# Set it as new president.
self.president = new_pres
return True
except IndexError:
return False
if user in self.all_members:
self.all_members.remove(user)
if user.id == self.president.id:
if not choose_new_pres(self.vice_presidents):
if not choose_new_pres(self.seniors):
if not choose_new_pres(self.members):
# Empty club, remove it from database.
async with config.clubs() as clubs:
where = next(i for i, d in enumerate(clubs) if d.get('id') == self.id)
del clubs[where]
return True
else:
if user in self.vice_presidents:
self.vice_presidents.remove(user)
elif user in self.seniors:
self.seniors.remove(user)
elif user in self.members:
self.members.remove(user)
await self.update_club(config)
async def add_user(self, user: discord.User, config: Config):
"""Adds users to the club list."""
if self.ctype in ["closed", "invite"]:
raise ValueError("Club type is `closed` or `invite-only`.")
self.members.append(user)
await self.update_club(config)
async def promote_user(self, user: discord.User, ctx: Context, config: Config):
"""Promotes a user.
Raises ValueError if not allowed.
"""
if user.id == self.president.id:
raise ValueError(f"{user.name} is the club President!")
if ctx.author.id == self.president.id:
if user in self.vice_presidents:
msg = await ctx.send(
f"Promoting {user.name} will demote you and make them the President."
" Are you sure you want to continue?"
)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
await ctx.bot.wait_for("reaction_add", check=pred)
if pred.result is True:
self.president = user
self.vice_presidents.remove(user)
self.vice_presidents.append(ctx.author)
await ctx.send(f"Promoted {user.name} to President!")
else:
return await ctx.send("Cancelled promotion.")
elif user in self.seniors:
self.seniors.remove(user)
self.vice_presidents.append(user)
await ctx.send(f"Promoted {user.name} to Vice President!")
elif user in self.members:
self.members.remove(user)
self.seniors.append(user)
await ctx.send(f"Promoted {user.name} to Senior!")
if ctx.author in self.vice_presidents:
if user in self.vice_presidents:
raise ValueError(f"{user.name} is equal to you in hierarchy!")
elif user in self.seniors:
raise ValueError(f"Only club President can promote a Senior to Vice President")
elif user in self.members:
self.members.remove(user)
self.seniors.append(user)
await ctx.send(f"Promoted {user.name} to Senior!")
await self.update_club(config)
async def demote_user(self, user: discord.User, ctx: Context, config: Config):
"""Demotes a user.
Raises ValueError if not allowed.
"""
if user.id == self.president.id:
raise ValueError(f"{user.name} is the club President!")
if ctx.author.id == self.president.id:
if user in self.vice_presidents:
self.vice_presidents.remove(user)
await ctx.send(f"Demoted {user.name} to Senior!")
elif user in self.seniors:
self.seniors.remove(user)
await ctx.send(f"Demoted {user.name} to Member!")
elif user in self.members:
raise ValueError(
f"{user.name} is already a Member."
" Use `club kick` command to kick member out of the club."
)
if ctx.author in self.vice_presidents:
if user in self.vice_presidents:
raise ValueError(f"{user.name} is equal to you in hierarchy!")
elif user in self.seniors:
self.seniors.remove(user)
await ctx.send(f"Demoted {user.name} to Member!")
elif user in self.members:
raise ValueError(
f"{user.name} is already a Member."
" Use `club kick` command to kick member out of the club."
)
await self.update_club(config)
async def update_club(self, config: Config):
"""Updates club in the bot database."""
async with config.clubs() as clubs:
where = next(i for i, d in enumerate(clubs) if d.get('id') == self.id)
clubs[where] = self.to_json()
return True
|
# -*- coding: utf-8 -*-
"""
------------------------------------
@Project : uiTest
@Time : 2021/3/9 14:17
@Auth : wrc
@Email : wrcyyy@126.com
@File : base.py
@IDE : PyCharm
------------------------------------
"""
import logging
import os
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from utils.fileoperate import FileOperate
class BaseOperation:
def __init__(self, driver):
self.__config_info = FileOperate.read_yaml(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yml'))
self.__driver = driver
self.__timeout = 10
self.__poll_frequency = 0.5
def clear_input_box(self, locator: tuple):
"""
清空输入框内容
:param locator:
:return:
"""
element = self.find_elem(locator)
try:
element.clear()
logging.info(f'输入框内容已清空')
except Exception as e:
logging.error(f'清除输入框失败!{locator},{e}')
def open_url(self, url: str):
try:
self.__driver.get(url)
logging.info(f'成功打开:{url}')
except Exception as e:
logging.error(f'打开:{url}失败,错误信息:{e}')
def find_elem(self, locator):
"""
定位单个元素
:param locator:
:return:
"""
if isinstance(locator, tuple):
logging.info(f'正在使用{locator[0]}定位元素:{locator[1]}')
try:
elem = WebDriverWait(self.__driver, self.__timeout, self.__poll_frequency).until(
lambda driver: driver.find_element(*locator))
return elem
except Exception as e:
logging.error(f"元素定位失败!{locator},{e}")
return None
else:
logging.error('locator参数类型错误,示例:(By.xpath,"****")')
def find_elements(self, locator):
"""
定位一组元素
:param locator:
:return:
"""
if isinstance(locator, tuple):
logging.info(f'正在使用{locator[0]}定位元素:{locator[1]}')
try:
elements = WebDriverWait(self.__driver, self.__timeout, self.__poll_frequency).until(
lambda driver: driver.find_elements(*locator))
return elements
except Exception as e:
logging.error(f"元素定位失败!{locator},{e}")
return None
else:
logging.error('locator参数类型错误,示例:("css_selector","****")')
def get_text(self, locator: tuple):
"""
获取元素的text
:param locator:
:return:
"""
element = self.find_elem(locator)
try:
logging.info(f'获取元素text成功:{element.text}')
return element.text
except Exception as e:
logging.error(f'获取元素text失败!{locator},{e}')
return None
def get_placeholder_info(self, locator: tuple):
element = self.find_elem(locator)
try:
logging.info(f'获取placeholder成功:{element.get_attribute('placeholder')}')
return element.get_attribute("placeholder")
except Exception as e:
logging.error(f'获取placeholder失败,{locator},{e}')
return None
def open_login_page(self):
"""
打开配置文件中的登录url
:return:
"""
self.open_url(self.__config_info['test_server_info']['url'])
def send_key(self, locator: tuple, info: str):
"""
向页面元素输入内容
:param locator: 传入元素定位信息,例如:("css_selector","#username")
:param info: 要输入的字符串
:return:
"""
element = self.find_elem(locator)
try:
element.send_keys(info)
logging.info(f'向{locator}输入{info}成功')
except Exception as e:
logging.error(f'向{locator}输入{info}失败,{e}')
def click(self, locator: tuple):
"""
点击元素
:param locator: 传入元素定位信息,例如:("css_selector","#username")
:return:
"""
element = self.find_elem(locator)
try:
element.click()
logging.info(f'点击元素成功:{locator}')
except Exception as e:
logging.error(f'点击元素失败!{locator},{e}')
def save_screenshot(self, file_path: str):
"""
保存截图
:param file_path:
:return:
"""
try:
self.__driver.save_screenshot(file_path)
logging.info(f'截图已保存至:{file_path}')
except Exception as e:
logging.error(f'截图保存失败!{file_path},{e}')
def switch_frame(self, locator: tuple):
"""
切换frame
:param locator:
:return:
"""
element = self.find_elem(locator)
try:
self.__driver.switch_to.frame(element)
logging.info(f'切换Frame成功{locator}')
except Exception as e:
logging.error(f'切换Frame失败!{locator},{e}')
def switch_handler(self, index: int):
"""
切换窗口
:param index: 窗口序号,从0开始
:return:
"""
all_handlers = self.__driver.window_handles
try:
self.__driver.switch_to.windows(all_handlers[index])
except Exception as e:
logging.error(f'切换至窗口{index}失败,{e}')
| # -*- coding: utf-8 -*-
"""
------------------------------------
@Project : uiTest
@Time : 2021/3/9 14:17
@Auth : wrc
@Email : wrcyyy@126.com
@File : base.py
@IDE : PyCharm
------------------------------------
"""
import logging
import os
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from utils.fileoperate import FileOperate
class BaseOperation:
def __init__(self, driver):
self.__config_info = FileOperate.read_yaml(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yml'))
self.__driver = driver
self.__timeout = 10
self.__poll_frequency = 0.5
def clear_input_box(self, locator: tuple):
"""
清空输入框内容
:param locator:
:return:
"""
element = self.find_elem(locator)
try:
element.clear()
logging.info(f'输入框内容已清空')
except Exception as e:
logging.error(f'清除输入框失败!{locator},{e}')
def open_url(self, url: str):
try:
self.__driver.get(url)
logging.info(f'成功打开:{url}')
except Exception as e:
logging.error(f'打开:{url}失败,错误信息:{e}')
def find_elem(self, locator):
"""
定位单个元素
:param locator:
:return:
"""
if isinstance(locator, tuple):
logging.info(f'正在使用{locator[0]}定位元素:{locator[1]}')
try:
elem = WebDriverWait(self.__driver, self.__timeout, self.__poll_frequency).until(
lambda driver: driver.find_element(*locator))
return elem
except Exception as e:
logging.error(f"元素定位失败!{locator},{e}")
return None
else:
logging.error('locator参数类型错误,示例:(By.xpath,"****")')
def find_elements(self, locator):
"""
定位一组元素
:param locator:
:return:
"""
if isinstance(locator, tuple):
logging.info(f'正在使用{locator[0]}定位元素:{locator[1]}')
try:
elements = WebDriverWait(self.__driver, self.__timeout, self.__poll_frequency).until(
lambda driver: driver.find_elements(*locator))
return elements
except Exception as e:
logging.error(f"元素定位失败!{locator},{e}")
return None
else:
logging.error('locator参数类型错误,示例:("css_selector","****")')
def get_text(self, locator: tuple):
"""
获取元素的text
:param locator:
:return:
"""
element = self.find_elem(locator)
try:
logging.info(f'获取元素text成功:{element.text}')
return element.text
except Exception as e:
logging.error(f'获取元素text失败!{locator},{e}')
return None
def get_placeholder_info(self, locator: tuple):
element = self.find_elem(locator)
try:
logging.info(f'获取placeholder成功:{element.get_attribute("placeholder")}')
return element.get_attribute("placeholder")
except Exception as e:
logging.error(f'获取placeholder失败,{locator},{e}')
return None
def open_login_page(self):
"""
打开配置文件中的登录url
:return:
"""
self.open_url(self.__config_info['test_server_info']['url'])
def send_key(self, locator: tuple, info: str):
"""
向页面元素输入内容
:param locator: 传入元素定位信息,例如:("css_selector","#username")
:param info: 要输入的字符串
:return:
"""
element = self.find_elem(locator)
try:
element.send_keys(info)
logging.info(f'向{locator}输入{info}成功')
except Exception as e:
logging.error(f'向{locator}输入{info}失败,{e}')
def click(self, locator: tuple):
"""
点击元素
:param locator: 传入元素定位信息,例如:("css_selector","#username")
:return:
"""
element = self.find_elem(locator)
try:
element.click()
logging.info(f'点击元素成功:{locator}')
except Exception as e:
logging.error(f'点击元素失败!{locator},{e}')
def save_screenshot(self, file_path: str):
"""
保存截图
:param file_path:
:return:
"""
try:
self.__driver.save_screenshot(file_path)
logging.info(f'截图已保存至:{file_path}')
except Exception as e:
logging.error(f'截图保存失败!{file_path},{e}')
def switch_frame(self, locator: tuple):
"""
切换frame
:param locator:
:return:
"""
element = self.find_elem(locator)
try:
self.__driver.switch_to.frame(element)
logging.info(f'切换Frame成功{locator}')
except Exception as e:
logging.error(f'切换Frame失败!{locator},{e}')
def switch_handler(self, index: int):
"""
切换窗口
:param index: 窗口序号,从0开始
:return:
"""
all_handlers = self.__driver.window_handles
try:
self.__driver.switch_to.windows(all_handlers[index])
except Exception as e:
logging.error(f'切换至窗口{index}失败,{e}')
|
# -*- coding: utf-8 -*-
'''
Author: TJUZQC
Date: 2020-10-26 10:26:51
LastEditors: TJUZQC
LastEditTime: 2020-11-20 19:23:55
Description: None
'''
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import yaml
from torch import optim
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from evaluation import eval_net
from models import ChooseModel, init_weights
from utils.dataset import BasicDataset
conf = yaml.load(open(os.path.join(
sys.path[0], 'config', 'config.yaml')), Loader=yaml.FullLoader)
dir_img = conf['DATASET']['IMGS_DIR']
dir_mask = conf['DATASET']['MASKS_DIR']
dir_checkpoint = conf['MODEL']['CHECKPOINT_DIR']
def train_net(net,
device,
epochs=5,
batch_size=16,
lr=0.001,
val_percent=0.1,
save_cp=True,
img_scale=0.5,
use_apex=False,
optimizer='adam',
classes=2,
lr_scheduler='steplr',
lr_scheduler_cfgs: dict = {'step_size': 10}):
dataset = BasicDataset(dir_img, dir_mask, img_scale,
train=True, classes=classes)
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train, val = random_split(dataset, [n_train, n_val])
train_loader = DataLoader(
train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
val_loader = DataLoader(val, batch_size=batch_size,
shuffle=False, num_workers=8, pin_memory=True)
writer = SummaryWriter(
comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
global_step = 0
logging.info(f'''Starting training:
Epochs: {epochs}
Batch size: {batch_size}
Learning rate: {lr}
Training size: {n_train}
Validation size: {n_val}
Checkpoints: {save_cp}
Device: {device.type}
Images scaling: {img_scale}
Use apex: {use_apex}
''')
optimizers = {
'adadelta': optim.Adadelta,
'adagrad': optim.Adagrad,
'adam': optim.Adam,
'adamw': optim.AdamW,
'sparseadam': optim.SparseAdam,
'adamax': optim.Adamax,
'asgd': optim.ASGD,
'lbfgs': optim.LBFGS,
'rmsprop': optim.RMSprop,
'rprop': optim.Rprop,
'sgd': optim.SGD,
}
optimizer = optimizers.get(optimizer, None)(
net.parameters(), lr=lr, weight_decay=1e-8)
lr_scheduler_getter = {
'lambdalr': torch.optim.lr_scheduler.LambdaLR,
'multiplicativelr': torch.optim.lr_scheduler.MultiplicativeLR,
'steplr': torch.optim.lr_scheduler.StepLR,
'multisteplr': torch.optim.lr_scheduler.MultiStepLR,
'exponentiallr': torch.optim.lr_scheduler.ExponentialLR,
'cosineannealinglr': torch.optim.lr_scheduler.CosineAnnealingLR,
'reducelronplateau': torch.optim.lr_scheduler.ReduceLROnPlateau,
'cycliclr': torch.optim.lr_scheduler.CyclicLR,
'onecyclelr': torch.optim.lr_scheduler.OneCycleLR,
}
lr_scheduler = lr_scheduler_getter.get(
lr_scheduler.lower(), None)(optimizer, **lr_scheduler_cfgs)
if use_apex:
try:
from apex import amp
net, optimizer = amp.initialize(net, optimizer, opt_level="O1")
except ImportError as e:
print(e)
use_apex = False
if net.n_classes > 1:
criterion = nn.CrossEntropyLoss()
else:
criterion = nn.BCEWithLogitsLoss()
for epoch in range(epochs):
net.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
imgs = batch['image']
true_masks = batch['mask']
assert imgs.shape[1] == net.n_channels, \
f'Network has been defined with {net.n_channels} input channels, ' \
f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
'the images are loaded correctly.'
imgs = imgs.to(device=device, dtype=torch.float32)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type)
if net.n_classes > 1:
b, c, w, h = true_masks.shape
true_masks = true_masks.view(b, w, h)
masks_pred = net(imgs)
loss = criterion(masks_pred, true_masks)
epoch_loss += loss.item()
writer.add_scalar('Loss/train', loss.item(), global_step)
pbar.set_postfix(**{'loss (batch)': loss.item()})
optimizer.zero_grad()
if not use_apex:
loss.backward()
else:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
pbar.update(imgs.shape[0])
global_step += 1
dataset_len = len(dataset)
a1 = dataset_len // 10
a2 = dataset_len / 10
b1 = global_step % a1
b2 = global_step % a2
if global_step % (len(dataset) // (10 * batch_size)) == 0:
dice_coeff, pA, oA, precision, recall, f1score = eval_net(
net, val_loader, device, n_val)
if net.n_classes > 1:
logging.info(
'Validation cross entropy: {}'.format(dice_coeff))
writer.add_scalar('Loss/test', dice_coeff, global_step)
else:
logging.info(
'Validation Dice Coeff: {}'.format(dice_coeff))
writer.add_scalar('Dice/test', dice_coeff, global_step)
logging.info(
'Validation Pixel Accuracy: {}'.format(pA))
writer.add_scalar('pA/test', pA, global_step)
logging.info(
'Validation Overall Accuracy: {}'.format(oA))
writer.add_scalar('oA/test', oA, global_step)
logging.info(
'Validation Precision: {}'.format(precision))
writer.add_scalar('precision/test',
precision, global_step)
logging.info(
'Validation Recall: {}'.format(recall))
writer.add_scalar('recall/test', recall, global_step)
logging.info(
'Validation F1-score: {}'.format(f1score))
writer.add_scalar(
'F1-score/test', f1score, global_step)
writer.add_images('images', imgs, global_step)
if net.n_classes == 1:
writer.add_images(
'masks/true', true_masks, global_step)
writer.add_images(
'masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)
lr_scheduler.step()
if save_cp:
try:
os.mkdir(dir_checkpoint)
logging.info('Created checkpoint directory')
except OSError:
pass
torch.save(net.state_dict(),
os.path.join(dir_checkpoint, f'CP_epoch{epoch + 1}_loss_{str(loss.item())}.pth'))
logging.info(
f'Checkpoint {epoch + 1} saved ! loss (batch) = ' + str(loss.item()))
writer.close()
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--network', metavar='NETWORK', type=str,
default=conf['MODEL']['MODEL_NAME'], help='network type', dest='network')
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=conf['NUM_EPOCHS'],
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=conf['BATCH_SIZE'],
help='Batch size', dest='batchsize')
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=conf['LR'],
help='Learning rate', dest='lr')
parser.add_argument('-f', '--load', dest='load', type=str, default=conf['MODEL']['PRETRAINED_MODEL_DIR'],
help='Load model from a .pth file')
parser.add_argument('-s', '--scale', dest='scale', type=float, default=conf['SCALE'],
help='Downscaling factor of the images')
parser.add_argument('-v', '--validation', dest='val', type=float, default=conf['VALIDATION'],
help='Percent of the data that is used as validation (0-100)')
parser.add_argument('-t', '--init-type', dest='init_type', type=str, default=conf['INIT_TYPE'],
help='Init weights type')
parser.add_argument('-a', '--use-apex', dest='use_apex', type=str, default=conf['APEX'],
help='Automatic Mixed Precision')
parser.add_argument('-o', '--optimizer', dest='optimizer',
type=str, default=conf['OPTIMIZER'], help='Optimizer type')
parser.add_argument('-ls', '--lr-scheduler', dest='lr_scheduler',
type=str, default=conf['LR_SCHEDULER'], help='lr scheduler type')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available(
) and conf['DEVICE'].lower() == 'cuda' else 'cpu')
logging.info(f'Using device {device}')
network = args.network.lower()
# Change here to adapt to your data
# n_channels=3 for RGB images
# n_classes is the number of probabilities you want to get per pixel
# - For 1 class and background, use n_classes=1
# - For 2 classes, use n_classes=1
# - For N > 2 classes, use n_classes=N
net = ChooseModel(network)(
n_channels=3, n_classes=conf['DATASET']['NUM_CLASSES'])
assert net is not None, f'check your argument --network'
logging.info(f'Network:\n'
f'\t{net.n_channels} input channels\n'
f'\t{net.n_classes} output channels (classes)\n'
f'\t{'Bilinear' if net.bilinear else 'Dilated conv'} upscaling\n'
f'\tApex is {'using' if args.use_apex == 'True' else 'not using'}')
init_weights(net, args.init_type)
if args.load:
net.load_state_dict(
torch.load(args.load, map_location=device)
)
logging.info(f'Model loaded from {args.load}')
net.to(device=device)
# faster convolutions, but more memory
# cudnn.benchmark = True
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batchsize,
lr=args.lr,
device=device,
img_scale=args.scale,
val_percent=args.val / 100,
use_apex=(args.use_apex == "True"),
optimizer=args.optimizer.lower(),
classes=conf['DATASET']['NUM_CLASSES'],
lr_scheduler=args.lr_scheduler,
lr_scheduler_cfgs=conf['LR_SCHEDULER_CFGS'])
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| # -*- coding: utf-8 -*-
'''
Author: TJUZQC
Date: 2020-10-26 10:26:51
LastEditors: TJUZQC
LastEditTime: 2020-11-20 19:23:55
Description: None
'''
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import yaml
from torch import optim
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from evaluation import eval_net
from models import ChooseModel, init_weights
from utils.dataset import BasicDataset
conf = yaml.load(open(os.path.join(
sys.path[0], 'config', 'config.yaml')), Loader=yaml.FullLoader)
dir_img = conf['DATASET']['IMGS_DIR']
dir_mask = conf['DATASET']['MASKS_DIR']
dir_checkpoint = conf['MODEL']['CHECKPOINT_DIR']
def train_net(net,
device,
epochs=5,
batch_size=16,
lr=0.001,
val_percent=0.1,
save_cp=True,
img_scale=0.5,
use_apex=False,
optimizer='adam',
classes=2,
lr_scheduler='steplr',
lr_scheduler_cfgs: dict = {'step_size': 10}):
dataset = BasicDataset(dir_img, dir_mask, img_scale,
train=True, classes=classes)
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train, val = random_split(dataset, [n_train, n_val])
train_loader = DataLoader(
train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
val_loader = DataLoader(val, batch_size=batch_size,
shuffle=False, num_workers=8, pin_memory=True)
writer = SummaryWriter(
comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
global_step = 0
logging.info(f'''Starting training:
Epochs: {epochs}
Batch size: {batch_size}
Learning rate: {lr}
Training size: {n_train}
Validation size: {n_val}
Checkpoints: {save_cp}
Device: {device.type}
Images scaling: {img_scale}
Use apex: {use_apex}
''')
optimizers = {
'adadelta': optim.Adadelta,
'adagrad': optim.Adagrad,
'adam': optim.Adam,
'adamw': optim.AdamW,
'sparseadam': optim.SparseAdam,
'adamax': optim.Adamax,
'asgd': optim.ASGD,
'lbfgs': optim.LBFGS,
'rmsprop': optim.RMSprop,
'rprop': optim.Rprop,
'sgd': optim.SGD,
}
optimizer = optimizers.get(optimizer, None)(
net.parameters(), lr=lr, weight_decay=1e-8)
lr_scheduler_getter = {
'lambdalr': torch.optim.lr_scheduler.LambdaLR,
'multiplicativelr': torch.optim.lr_scheduler.MultiplicativeLR,
'steplr': torch.optim.lr_scheduler.StepLR,
'multisteplr': torch.optim.lr_scheduler.MultiStepLR,
'exponentiallr': torch.optim.lr_scheduler.ExponentialLR,
'cosineannealinglr': torch.optim.lr_scheduler.CosineAnnealingLR,
'reducelronplateau': torch.optim.lr_scheduler.ReduceLROnPlateau,
'cycliclr': torch.optim.lr_scheduler.CyclicLR,
'onecyclelr': torch.optim.lr_scheduler.OneCycleLR,
}
lr_scheduler = lr_scheduler_getter.get(
lr_scheduler.lower(), None)(optimizer, **lr_scheduler_cfgs)
if use_apex:
try:
from apex import amp
net, optimizer = amp.initialize(net, optimizer, opt_level="O1")
except ImportError as e:
print(e)
use_apex = False
if net.n_classes > 1:
criterion = nn.CrossEntropyLoss()
else:
criterion = nn.BCEWithLogitsLoss()
for epoch in range(epochs):
net.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
imgs = batch['image']
true_masks = batch['mask']
assert imgs.shape[1] == net.n_channels, \
f'Network has been defined with {net.n_channels} input channels, ' \
f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
'the images are loaded correctly.'
imgs = imgs.to(device=device, dtype=torch.float32)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type)
if net.n_classes > 1:
b, c, w, h = true_masks.shape
true_masks = true_masks.view(b, w, h)
masks_pred = net(imgs)
loss = criterion(masks_pred, true_masks)
epoch_loss += loss.item()
writer.add_scalar('Loss/train', loss.item(), global_step)
pbar.set_postfix(**{'loss (batch)': loss.item()})
optimizer.zero_grad()
if not use_apex:
loss.backward()
else:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
pbar.update(imgs.shape[0])
global_step += 1
dataset_len = len(dataset)
a1 = dataset_len // 10
a2 = dataset_len / 10
b1 = global_step % a1
b2 = global_step % a2
if global_step % (len(dataset) // (10 * batch_size)) == 0:
dice_coeff, pA, oA, precision, recall, f1score = eval_net(
net, val_loader, device, n_val)
if net.n_classes > 1:
logging.info(
'Validation cross entropy: {}'.format(dice_coeff))
writer.add_scalar('Loss/test', dice_coeff, global_step)
else:
logging.info(
'Validation Dice Coeff: {}'.format(dice_coeff))
writer.add_scalar('Dice/test', dice_coeff, global_step)
logging.info(
'Validation Pixel Accuracy: {}'.format(pA))
writer.add_scalar('pA/test', pA, global_step)
logging.info(
'Validation Overall Accuracy: {}'.format(oA))
writer.add_scalar('oA/test', oA, global_step)
logging.info(
'Validation Precision: {}'.format(precision))
writer.add_scalar('precision/test',
precision, global_step)
logging.info(
'Validation Recall: {}'.format(recall))
writer.add_scalar('recall/test', recall, global_step)
logging.info(
'Validation F1-score: {}'.format(f1score))
writer.add_scalar(
'F1-score/test', f1score, global_step)
writer.add_images('images', imgs, global_step)
if net.n_classes == 1:
writer.add_images(
'masks/true', true_masks, global_step)
writer.add_images(
'masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)
lr_scheduler.step()
if save_cp:
try:
os.mkdir(dir_checkpoint)
logging.info('Created checkpoint directory')
except OSError:
pass
torch.save(net.state_dict(),
os.path.join(dir_checkpoint, f'CP_epoch{epoch + 1}_loss_{str(loss.item())}.pth'))
logging.info(
f'Checkpoint {epoch + 1} saved ! loss (batch) = ' + str(loss.item()))
writer.close()
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--network', metavar='NETWORK', type=str,
default=conf['MODEL']['MODEL_NAME'], help='network type', dest='network')
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=conf['NUM_EPOCHS'],
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=conf['BATCH_SIZE'],
help='Batch size', dest='batchsize')
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=conf['LR'],
help='Learning rate', dest='lr')
parser.add_argument('-f', '--load', dest='load', type=str, default=conf['MODEL']['PRETRAINED_MODEL_DIR'],
help='Load model from a .pth file')
parser.add_argument('-s', '--scale', dest='scale', type=float, default=conf['SCALE'],
help='Downscaling factor of the images')
parser.add_argument('-v', '--validation', dest='val', type=float, default=conf['VALIDATION'],
help='Percent of the data that is used as validation (0-100)')
parser.add_argument('-t', '--init-type', dest='init_type', type=str, default=conf['INIT_TYPE'],
help='Init weights type')
parser.add_argument('-a', '--use-apex', dest='use_apex', type=str, default=conf['APEX'],
help='Automatic Mixed Precision')
parser.add_argument('-o', '--optimizer', dest='optimizer',
type=str, default=conf['OPTIMIZER'], help='Optimizer type')
parser.add_argument('-ls', '--lr-scheduler', dest='lr_scheduler',
type=str, default=conf['LR_SCHEDULER'], help='lr scheduler type')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available(
) and conf['DEVICE'].lower() == 'cuda' else 'cpu')
logging.info(f'Using device {device}')
network = args.network.lower()
# Change here to adapt to your data
# n_channels=3 for RGB images
# n_classes is the number of probabilities you want to get per pixel
# - For 1 class and background, use n_classes=1
# - For 2 classes, use n_classes=1
# - For N > 2 classes, use n_classes=N
net = ChooseModel(network)(
n_channels=3, n_classes=conf['DATASET']['NUM_CLASSES'])
assert net is not None, f'check your argument --network'
logging.info(f'Network:\n'
f'\t{net.n_channels} input channels\n'
f'\t{net.n_classes} output channels (classes)\n'
f'\t{"Bilinear" if net.bilinear else "Dilated conv"} upscaling\n'
f'\tApex is {"using" if args.use_apex == "True" else "not using"}')
init_weights(net, args.init_type)
if args.load:
net.load_state_dict(
torch.load(args.load, map_location=device)
)
logging.info(f'Model loaded from {args.load}')
net.to(device=device)
# faster convolutions, but more memory
# cudnn.benchmark = True
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batchsize,
lr=args.lr,
device=device,
img_scale=args.scale,
val_percent=args.val / 100,
use_apex=(args.use_apex == "True"),
optimizer=args.optimizer.lower(),
classes=conf['DATASET']['NUM_CLASSES'],
lr_scheduler=args.lr_scheduler,
lr_scheduler_cfgs=conf['LR_SCHEDULER_CFGS'])
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
import json
import logging
from typing import Any, Callable, List
import paho.mqtt.client as mqtt
from paho.mqtt.client import MQTTMessage, SubscribeOptions
from paho.mqtt.properties import Properties
from paho.mqtt.reasoncodes import PacketTypes, ReasonCodes
class MqttClient:
def __init__(
self,
id: str,
protocol: int = mqtt.MQTTv5,
enable_logging: bool = False,
) -> None:
self.logger = logging.getLogger(id)
self._client = mqtt.Client(client_id=id, protocol=protocol)
if enable_logging:
self._client.enable_logger()
self._client.on_connect = self._on_connect
self._client.on_disconnect = self._on_disconnect
self._client.on_subscribe = self._on_subscribe
self._client.on_unsubscribe = self._on_unsubscribe
self._client.on_message = self._on_message
self._client.on_publish = self._on_publish
def connect(self, host: str, port: int = 1883, properties: Properties = None):
rc: int = self._client.connect(
host=host, port=port, clean_start=True, properties=properties
)
self.logger.debug(
f"Sending CONNECT host='{host}' port='{port}' properties='{properties}' rc='{mqtt.error_string(rc)}'"
)
def disconnect(self, reasoncode: ReasonCodes = None, properties: Properties = None):
rc: int = self._client.disconnect(reasoncode=reasoncode, properties=properties)
self.logger.debug(
f"Sending DISCONNECT reasoncode='{reasoncode}' properties='{properties}' rc='{ReasonCodes(PacketTypes.DISCONNECT, identifier=rc)}'"
)
def subscribe(
self,
topic: str,
qos: int = 0,
options: SubscribeOptions = None,
properties: Properties = None,
) -> None:
rc, mid = self._client.subscribe(
topic=topic, qos=qos, options=options, properties=properties
)
self.logger.debug(
f"Sending SUBSCRIBE topic='{topic}' with qos='{qos}' options='{options}' properties='{properties}' mid='{mid}' rc='{mqtt.error_string(rc)}'"
)
def unsubscribe(self, topic: str, properties: Properties = None) -> None:
rc, mid = self._client.unsubscribe(topic, properties=properties)
self.logger.debug(
f"Sending UNSUBSCRIBE topic='{topic}' properties='{properties}' mid='{mid}' rc='{mqtt.error_string(rc)}'"
)
def publish(
self,
topic: str,
payload: dict,
qos: int = 0,
retain: bool = False,
properties: Properties = None,
) -> None:
info: mqtt.MQTTMessageInfo = self._client.publish(
topic=topic,
payload=json.dumps(payload),
qos=qos,
retain=retain,
properties=properties,
)
self.logger.debug(
f"Sending PUBLICH topic='{topic}' payload='{payload}' with qos='{qos}' retain='{retain}' properties='{properties}' mid='{info.mid}' rc='{mqtt.error_string(info.rc)}'"
)
def loop_start(self) -> None:
self._client.loop_start()
def loop_stop(self) -> None:
self._client.loop_stop()
def loop_forever(self) -> None:
self._client.loop_forever()
def register_callback(self, topic: str, callback: Callable):
self._client.message_callback_add(sub=topic, callback=callback)
self.logger.info(
f"Register CALLBACK topic='{topic}' callback='{callback.__name__}'"
)
def unregister_callback(self, topic: str):
self._client.message_callback_remove(sub=topic)
self.logger.info(f"Unregister CALLBACK topic='{topic}'")
def _on_message(
self, client: mqtt.Client, userdata: Any, message: MQTTMessage
) -> None:
self.logger.info(
f"Received ON_MESSAGE client_id='{client._client_id.decode("utf-8")}' userdata='{userdata}" topic="{message.topic}' payload='{message.payload}" qos="{message.qos}' retain='{message.retain}" mid="{message.info.mid}' rc='{mqtt.error_string(message.info.rc)}'"
)
def _on_connect(
self,
client: mqtt.Client,
userdata: Any,
flags: dict,
rc: ReasonCodes,
properties: mqtt.Properties,
) -> None:
self.logger.info(
f"Received ON_CONNECT client_id='{client._client_id.decode("utf-8")}' rc='{mqtt.connack_string(rc)}" userdata="{userdata}' flags='{flags}" properties="{properties}'"
)
def _on_disconnect(
self,
client: mqtt.Client,
userdata: Any,
rc: int,
) -> None:
self.logger.info(
f"Received ON_DISCONNECT client_id='{client._client_id.decode("utf-8")}' rc='{ReasonCodes(PacketTypes.DISCONNECT, identifier=rc)}" userdata="{userdata}'"
)
def _on_subscribe(
self,
client: mqtt.Client,
userdata: Any,
mid: int,
rc: List[ReasonCodes],
properties: List[Properties],
) -> None:
self.logger.info(
f"Received ON_SUBSCRIBE client_id='{client._client_id.decode("utf-8")}' mid='{mid}" qos="{[qos.getName() for qos in rc]}' userdata='{userdata}" properties="{properties}'"
)
def _on_unsubscribe(
self,
client: mqtt.Client,
userdata: Any,
mid: int,
properties: List[Properties],
rc: List[ReasonCodes],
) -> None:
self.logger.info(
f"Received ON_UNSUBSCRIBE client_id='{client._client_id.decode("utf-8")}' mid='{mid}" rc="{[qos.getName() for qos in rc]}' userdata='{userdata}" properties="{properties}'"
)
def _on_publish(self, client: mqtt.Client, userdata: Any, mid: int) -> None:
self.logger.info(
f"Received ON_PUBLICH client_id='{client._client_id.decode("utf-8")}' mid='{mid}" userdata="{userdata}'"
)
| import json
import logging
from typing import Any, Callable, List
import paho.mqtt.client as mqtt
from paho.mqtt.client import MQTTMessage, SubscribeOptions
from paho.mqtt.properties import Properties
from paho.mqtt.reasoncodes import PacketTypes, ReasonCodes
class MqttClient:
def __init__(
self,
id: str,
protocol: int = mqtt.MQTTv5,
enable_logging: bool = False,
) -> None:
self.logger = logging.getLogger(id)
self._client = mqtt.Client(client_id=id, protocol=protocol)
if enable_logging:
self._client.enable_logger()
self._client.on_connect = self._on_connect
self._client.on_disconnect = self._on_disconnect
self._client.on_subscribe = self._on_subscribe
self._client.on_unsubscribe = self._on_unsubscribe
self._client.on_message = self._on_message
self._client.on_publish = self._on_publish
def connect(self, host: str, port: int = 1883, properties: Properties = None):
rc: int = self._client.connect(
host=host, port=port, clean_start=True, properties=properties
)
self.logger.debug(
f"Sending CONNECT host='{host}' port='{port}' properties='{properties}' rc='{mqtt.error_string(rc)}'"
)
def disconnect(self, reasoncode: ReasonCodes = None, properties: Properties = None):
rc: int = self._client.disconnect(reasoncode=reasoncode, properties=properties)
self.logger.debug(
f"Sending DISCONNECT reasoncode='{reasoncode}' properties='{properties}' rc='{ReasonCodes(PacketTypes.DISCONNECT, identifier=rc)}'"
)
def subscribe(
self,
topic: str,
qos: int = 0,
options: SubscribeOptions = None,
properties: Properties = None,
) -> None:
rc, mid = self._client.subscribe(
topic=topic, qos=qos, options=options, properties=properties
)
self.logger.debug(
f"Sending SUBSCRIBE topic='{topic}' with qos='{qos}' options='{options}' properties='{properties}' mid='{mid}' rc='{mqtt.error_string(rc)}'"
)
def unsubscribe(self, topic: str, properties: Properties = None) -> None:
rc, mid = self._client.unsubscribe(topic, properties=properties)
self.logger.debug(
f"Sending UNSUBSCRIBE topic='{topic}' properties='{properties}' mid='{mid}' rc='{mqtt.error_string(rc)}'"
)
def publish(
self,
topic: str,
payload: dict,
qos: int = 0,
retain: bool = False,
properties: Properties = None,
) -> None:
info: mqtt.MQTTMessageInfo = self._client.publish(
topic=topic,
payload=json.dumps(payload),
qos=qos,
retain=retain,
properties=properties,
)
self.logger.debug(
f"Sending PUBLICH topic='{topic}' payload='{payload}' with qos='{qos}' retain='{retain}' properties='{properties}' mid='{info.mid}' rc='{mqtt.error_string(info.rc)}'"
)
def loop_start(self) -> None:
self._client.loop_start()
def loop_stop(self) -> None:
self._client.loop_stop()
def loop_forever(self) -> None:
self._client.loop_forever()
def register_callback(self, topic: str, callback: Callable):
self._client.message_callback_add(sub=topic, callback=callback)
self.logger.info(
f"Register CALLBACK topic='{topic}' callback='{callback.__name__}'"
)
def unregister_callback(self, topic: str):
self._client.message_callback_remove(sub=topic)
self.logger.info(f"Unregister CALLBACK topic='{topic}'")
def _on_message(
self, client: mqtt.Client, userdata: Any, message: MQTTMessage
) -> None:
self.logger.info(
f"Received ON_MESSAGE client_id='{client._client_id.decode('utf-8')}' userdata='{userdata}' topic='{message.topic}' payload='{message.payload}' qos='{message.qos}' retain='{message.retain}' mid='{message.info.mid}' rc='{mqtt.error_string(message.info.rc)}'"
)
def _on_connect(
self,
client: mqtt.Client,
userdata: Any,
flags: dict,
rc: ReasonCodes,
properties: mqtt.Properties,
) -> None:
self.logger.info(
f"Received ON_CONNECT client_id='{client._client_id.decode('utf-8')}' rc='{mqtt.connack_string(rc)}' userdata='{userdata}' flags='{flags}' properties='{properties}'"
)
def _on_disconnect(
self,
client: mqtt.Client,
userdata: Any,
rc: int,
) -> None:
self.logger.info(
f"Received ON_DISCONNECT client_id='{client._client_id.decode('utf-8')}' rc='{ReasonCodes(PacketTypes.DISCONNECT, identifier=rc)}' userdata='{userdata}'"
)
def _on_subscribe(
self,
client: mqtt.Client,
userdata: Any,
mid: int,
rc: List[ReasonCodes],
properties: List[Properties],
) -> None:
self.logger.info(
f"Received ON_SUBSCRIBE client_id='{client._client_id.decode('utf-8')}' mid='{mid}' qos='{[qos.getName() for qos in rc]}' userdata='{userdata}' properties='{properties}'"
)
def _on_unsubscribe(
self,
client: mqtt.Client,
userdata: Any,
mid: int,
properties: List[Properties],
rc: List[ReasonCodes],
) -> None:
self.logger.info(
f"Received ON_UNSUBSCRIBE client_id='{client._client_id.decode('utf-8')}' mid='{mid}' rc='{[qos.getName() for qos in rc]}' userdata='{userdata}' properties='{properties}'"
)
def _on_publish(self, client: mqtt.Client, userdata: Any, mid: int) -> None:
self.logger.info(
f"Received ON_PUBLICH client_id='{client._client_id.decode('utf-8')}' mid='{mid}' userdata='{userdata}'"
)
|
# Copyright (C) databricks-cicd 2021 man40 (man40dev@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from os import path as op
from configparser import ConfigParser
from textwrap import indent
_log = logging.getLogger(__name__)
class ConfBase:
def __repr__(self):
return '\n'.join(f'{k}: {self._indent(v)}' for k, v in self.__dict__.items() if not k.startswith('_'))
@staticmethod
def _parse_int(value) -> int:
return eval(value) if isinstance(value, str) else int(value)
@staticmethod
def _parse_list(value) -> list:
return [] if value is None else [v for v in value.split('\n') if v]
@staticmethod
def _indent(obj):
if isinstance(obj, ConfBase):
return f'\n{indent(str(obj), ' ')}'
return obj
class Conf(ConfBase):
def __init__(self, cmd_args: dict, config_file: str):
default_config_file = op.join(op.dirname(__file__), 'default.ini')
parser = ConfigParser()
parser.read(default_config_file)
override_config_file = config_file
if override_config_file:
assert op.isfile(override_config_file), f'Config file was not found in: {override_config_file}'
parser.read(override_config_file)
parser.read_dict(cmd_args)
self._section = 'global'
self.workspace_host = parser[self._section].get('workspace_host')
self.deploying_user_name = parser[self._section].get('deploying_user_name')
self.deploying_user_id = None
self.local_path = parser[self._section].get('local_path')
self.dry_run = parser[self._section].getboolean('dry_run')
self.name_prefix = parser[self._section].get('name_prefix')
self.deploy_safety_limit = self._parse_int(parser[self._section].get('deploy_safety_limit'))
self.rate_limit_timeout = self._parse_int(parser[self._section].get('rate_limit_timeout'))
self.rate_limit_attempts = self._parse_int(parser[self._section].get('rate_limit_attempts'))
self.workspace = ConfWorkspace(parser)
self.instance_pools = ConfInstancePools(parser)
self.clusters = ConfClusters(parser)
self.jobs = ConfJobs(parser)
self.dbfs = ConfDBFS(parser)
class ConfWorkspace(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'workspace'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.target_path = parser[self._section].get('target_path')
assert self.target_path != '/', 'Cannot deploy in the workspace root folder!'
class ConfInstancePools(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'instance_pools'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.ignore_attributes = self._parse_list(parser[self._section].get('ignore_attributes'))
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfClusters(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'clusters'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.ignore_attributes = self._parse_list(parser[self._section].get('ignore_attributes'))
self.ignore_attributes_with_instance_pool = self._parse_list(
parser[self._section].get('ignore_attributes_with_instance_pool'))
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfJobs(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'jobs'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfDBFS(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'dbfs'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.compare_contents = parser[self._section].getboolean('compare_contents')
self.target_path = parser[self._section].get('target_path')
self.transfer_block_size = eval(parser[self._section].get('transfer_block_size'))
assert self.target_path != '/', 'Cannot deploy in the dbfs root folder!'
| # Copyright (C) databricks-cicd 2021 man40 (man40dev@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from os import path as op
from configparser import ConfigParser
from textwrap import indent
_log = logging.getLogger(__name__)
class ConfBase:
def __repr__(self):
return '\n'.join(f'{k}: {self._indent(v)}' for k, v in self.__dict__.items() if not k.startswith('_'))
@staticmethod
def _parse_int(value) -> int:
return eval(value) if isinstance(value, str) else int(value)
@staticmethod
def _parse_list(value) -> list:
return [] if value is None else [v for v in value.split('\n') if v]
@staticmethod
def _indent(obj):
if isinstance(obj, ConfBase):
return f'\n{indent(str(obj), " ")}'
return obj
class Conf(ConfBase):
def __init__(self, cmd_args: dict, config_file: str):
default_config_file = op.join(op.dirname(__file__), 'default.ini')
parser = ConfigParser()
parser.read(default_config_file)
override_config_file = config_file
if override_config_file:
assert op.isfile(override_config_file), f'Config file was not found in: {override_config_file}'
parser.read(override_config_file)
parser.read_dict(cmd_args)
self._section = 'global'
self.workspace_host = parser[self._section].get('workspace_host')
self.deploying_user_name = parser[self._section].get('deploying_user_name')
self.deploying_user_id = None
self.local_path = parser[self._section].get('local_path')
self.dry_run = parser[self._section].getboolean('dry_run')
self.name_prefix = parser[self._section].get('name_prefix')
self.deploy_safety_limit = self._parse_int(parser[self._section].get('deploy_safety_limit'))
self.rate_limit_timeout = self._parse_int(parser[self._section].get('rate_limit_timeout'))
self.rate_limit_attempts = self._parse_int(parser[self._section].get('rate_limit_attempts'))
self.workspace = ConfWorkspace(parser)
self.instance_pools = ConfInstancePools(parser)
self.clusters = ConfClusters(parser)
self.jobs = ConfJobs(parser)
self.dbfs = ConfDBFS(parser)
class ConfWorkspace(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'workspace'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.target_path = parser[self._section].get('target_path')
assert self.target_path != '/', 'Cannot deploy in the workspace root folder!'
class ConfInstancePools(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'instance_pools'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.ignore_attributes = self._parse_list(parser[self._section].get('ignore_attributes'))
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfClusters(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'clusters'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.ignore_attributes = self._parse_list(parser[self._section].get('ignore_attributes'))
self.ignore_attributes_with_instance_pool = self._parse_list(
parser[self._section].get('ignore_attributes_with_instance_pool'))
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfJobs(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'jobs'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfDBFS(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'dbfs'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.compare_contents = parser[self._section].getboolean('compare_contents')
self.target_path = parser[self._section].get('target_path')
self.transfer_block_size = eval(parser[self._section].get('transfer_block_size'))
assert self.target_path != '/', 'Cannot deploy in the dbfs root folder!'
|
import json
import time
from typing import Callable, Optional, List, Any, Dict
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import chia.server.ws_connection as ws
from chia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chia.farmer.farmer import Farmer
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.harvester_protocol import PoolDifficulty
from chia.protocols.pool_protocol import (
get_current_authentication_token,
PoolErrorCode,
PostPartialRequest,
PostPartialPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.util.api_decorators import api_request, peer_required
from chia.util.ints import uint32, uint64
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection
):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
"""
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(
int(time.time()))
max_pos_per_sp = 5
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
# This will likely never happen for any farmer with less than 10% of global space
# It's meant to make testnets more stable
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(
f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# If the iters are good enough to make a block, proceed with the block making flow
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [
]
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(
int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(
int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
# Otherwise, send the proof of space to the pool
# When we win a block, we also send the partial to the pool
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(
f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict["current_difficulty"]}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
# Submit partial to pool
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(
authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
# The plot key is 2/2 so we need the harvester's half of the signature
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(
f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(
taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(
agg_pk, m_to_sign, plot_signature)
authentication_pk = pool_state_dict["pool_config"].authentication_public_key
if bytes(authentication_pk) is None:
self.farmer.log.error(
f"No authentication sk for {authentication_pk}")
return
authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(
authentication_pk)]
authentication_signature = AugSchemeMPL.sign(
authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate(
[plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(
payload, agg_sig)
post_partial_body = json.dumps(
post_partial_request.to_json_dict())
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append(
(time.time(), pool_state_dict["current_difficulty"]))
headers = {
"content-type": "application/json;",
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_url}/partial",
data=post_partial_body,
headers=headers,
ssl=ssl_context_for_root(get_mozilla_ca_crt()),
) as resp:
if resp.ok:
pool_response: Dict = json.loads(await resp.text())
self.farmer.log.info(
f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response["error_code"], pool_response["error_message"]}"
)
pool_state_dict["pool_errors_24h"].append(
pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append(
(time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(
f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
"""
There are two cases: receiving signatures for sps, or receiving signatures for the block.
"""
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(
taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(
taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(
sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig,
farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(
agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(
sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig,
farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(
agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(
self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(
pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed(
"proof", {"proof": request, "passed_filter": True})
msg = make_msg(
ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(
sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(
sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer,
foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(
agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(
agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(
ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
"""
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
# Self pooling
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict["pool_config"].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(
ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(
f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(
new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(
int(time.time()))
tStart = time.time()
self.farmer.lastChannageTime = int(round(tStart * 1000))
self.farmer.state_changed("new_signage_point", {
"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(
f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash,
full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
timeConsuming = 999
tEnd = time.time()
timeConsuming = int(round(tEnd * 1000)) - self.farmer.lastChannageTime
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
"timeconsuming": timeConsuming,
}
},
)
@api_request
async def respond_plots(self, _: harvester_protocol.RespondPlots):
self.farmer.log.warning("Respond plots came too late")
| import json
import time
from typing import Callable, Optional, List, Any, Dict
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import chia.server.ws_connection as ws
from chia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chia.farmer.farmer import Farmer
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.harvester_protocol import PoolDifficulty
from chia.protocols.pool_protocol import (
get_current_authentication_token,
PoolErrorCode,
PostPartialRequest,
PostPartialPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.util.api_decorators import api_request, peer_required
from chia.util.ints import uint32, uint64
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection
):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
"""
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(
int(time.time()))
max_pos_per_sp = 5
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
# This will likely never happen for any farmer with less than 10% of global space
# It's meant to make testnets more stable
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(
f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# If the iters are good enough to make a block, proceed with the block making flow
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [
]
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(
int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(
int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
# Otherwise, send the proof of space to the pool
# When we win a block, we also send the partial to the pool
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(
f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
# Submit partial to pool
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(
authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
# The plot key is 2/2 so we need the harvester's half of the signature
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(
f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(
taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(
agg_pk, m_to_sign, plot_signature)
authentication_pk = pool_state_dict["pool_config"].authentication_public_key
if bytes(authentication_pk) is None:
self.farmer.log.error(
f"No authentication sk for {authentication_pk}")
return
authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(
authentication_pk)]
authentication_signature = AugSchemeMPL.sign(
authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate(
[plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(
payload, agg_sig)
post_partial_body = json.dumps(
post_partial_request.to_json_dict())
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append(
(time.time(), pool_state_dict["current_difficulty"]))
headers = {
"content-type": "application/json;",
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_url}/partial",
data=post_partial_body,
headers=headers,
ssl=ssl_context_for_root(get_mozilla_ca_crt()),
) as resp:
if resp.ok:
pool_response: Dict = json.loads(await resp.text())
self.farmer.log.info(
f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response['error_code'], pool_response['error_message']}"
)
pool_state_dict["pool_errors_24h"].append(
pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append(
(time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(
f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
"""
There are two cases: receiving signatures for sps, or receiving signatures for the block.
"""
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(
taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(
taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(
sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig,
farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(
agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(
sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig,
farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(
agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(
self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(
pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed(
"proof", {"proof": request, "passed_filter": True})
msg = make_msg(
ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(
sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(
sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer,
foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(
agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(
agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(
ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
"""
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
# Self pooling
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict['pool_config'].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(
ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(
f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(
new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(
int(time.time()))
tStart = time.time()
self.farmer.lastChannageTime = int(round(tStart * 1000))
self.farmer.state_changed("new_signage_point", {
"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(
f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash,
full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
timeConsuming = 999
tEnd = time.time()
timeConsuming = int(round(tEnd * 1000)) - self.farmer.lastChannageTime
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
"timeconsuming": timeConsuming,
}
},
)
@api_request
async def respond_plots(self, _: harvester_protocol.RespondPlots):
self.farmer.log.warning("Respond plots came too late")
|
#!/usr/bin/env python3
"""
@author T. Paysan-Lafosse
@brief This script searches unintegrated proteins for a given organism or taxid
in InterPro signatures, if they are not found in signatures, they are clustered based on UniRef clusters
@arguments [-u USER]: database user
[-p PASSWORD]: database password for the user
[-s SCHEMA]: database schema to use
[-o ORGANISM or -t TAXID]: organism (scientific name) or taxid to look for
[-f FOLDER]: output folder
"""
import argparse
import os
import sys
from pathlib import Path
import requests
from utils import proteome
class protein_pipeline(proteome):
def __init__(self):
super().__init__()
self.uniref50 = dict()
self.clusters = dict()
def get_integrated(self, protein_list):
"""
Search integrated proteins
Args:
protein_list: list containing proteins to search for
Yields:
list_integrated: list of proteins integrated in InterPro entries
"""
print("Searching for integrated proteins")
uniprot_chunks = list(self.chunks(protein_list, 1000))
list_integrated = set()
for chunk in uniprot_chunks:
protein_list_quote = [f"'{row}'" for row in chunk]
request = f"SELECT P.PROTEIN_AC \
FROM INTERPRO.MV_ENTRY2PROTEIN E2P \
JOIN INTERPRO.PROTEIN P ON E2P.PROTEIN_AC=P.PROTEIN_AC \
WHERE E2P.PROTEIN_AC IN ({','.join(protein_list_quote)})"
self.cursor.execute(request)
list_integrated.update(set([row[0] for row in self.cursor]))
return list_integrated
def get_count_signature_taxid(self, list_signatures):
"""
Search for protein counts for a list of InterPro signatures
Args:
list_signatures: list of InterPro signatures
Yields:
count_prot_signatures: dictionnary with signature as key and protein_count as value
"""
count_prot_signatures = dict()
signature_chunks = list(self.chunks(list(list_signatures), 1000))
for chunk in signature_chunks:
signature_list_quote = [f"'{row}'" for row in chunk]
request = f"SELECT M2P.METHOD_AC,COUNT(P.PROTEIN_AC) \
FROM INTERPRO.PROTEIN P \
JOIN INTERPRO.MV_METHOD2PROTEIN M2P ON P.PROTEIN_AC = M2P.PROTEIN_AC \
JOIN INTERPRO.ETAXI ET ON P.TAX_ID = ET.TAX_ID \
WHERE ET.TAX_ID=:1 AND M2P.METHOD_AC IN ({','.join(signature_list_quote)}) \
GROUP BY M2P.METHOD_AC"
self.cursor.execute(request, (self.tax_id,))
count_prot_signatures.update({row[0]: row[1] for row in self.cursor})
return count_prot_signatures
def get_accession_in_signature(self, folder, protein_list):
"""
Search for proteins found in InterPro signatures but not integrated
Write the results in a csv file with each row corresponding to a protein/signature pair (protein,dbcode,organism,signature,total_prot_count,count_proteome,comment)
Args:
folder: output directory
protein_list: list containing proteins to search for
Yields:
list of proteins found in unintegrated signatures
"""
print("Searching for unintegrated proteins in signature")
uniprot_chunks = list(self.chunks(list(protein_list), 1000))
list_signatures = set()
list_proteins_with_signature = dict()
nbprot_in_signature = 0
for chunk in uniprot_chunks:
# if comments needed in future: C.VALUE, LISTAGG(MC.VALUE, '; ') WITHIN GROUP (ORDER BY MC.VALUE) COMMENTS
protein_list_quote = [f"'{row}'" for row in chunk]
request = f"SELECT P.PROTEIN_AC, P.DBCODE, ET.SCIENTIFIC_NAME, M2P.METHOD_AC, MM.PROTEIN_COUNT, \
( SELECT COUNT(*) FROM INTERPRO.MATCH M \
INNER JOIN INTERPRO.PROTEIN P ON M.PROTEIN_AC = P.PROTEIN_AC \
WHERE P.DBCODE = 'S' and M.METHOD_AC = M2P.METHOD_AC ) as SWISS_COUNT \
FROM INTERPRO.PROTEIN P \
JOIN INTERPRO.ETAXI ET ON P.TAX_ID = ET.TAX_ID \
JOIN INTERPRO.MV_METHOD2PROTEIN M2P ON P.PROTEIN_AC = M2P.PROTEIN_AC \
JOIN INTERPRO.MV_METHOD_MATCH MM ON MM.METHOD_AC = M2P.METHOD_AC \
LEFT JOIN INTERPRO.METHOD_COMMENT MC ON MC.METHOD_AC = M2P.METHOD_AC \
WHERE P.PROTEIN_AC IN ({','.join(protein_list_quote)})\
AND M2P.METHOD_AC not like '%:SF%' \
AND MC.VALUE IS NULL \
GROUP BY P.PROTEIN_AC, P.DBCODE, ET.SCIENTIFIC_NAME, M2P.METHOD_AC, MM.PROTEIN_COUNT"
# print(request)
self.cursor.execute(request)
results = self.cursor.fetchall()
nbprot_in_signature += len(results)
for row in results:
protein = row[0]
signature = row[3]
list_signatures.add(signature)
if signature not in list_proteins_with_signature:
list_proteins_with_signature[signature] = [
protein,
row[1],
row[2],
row[4],
row[5],
]
else:
pass
# `try:
# list_proteins_with_signature[protein][signature] = [
# row[1],
# row[2],
# row[4],
# row[5],
# ]
# except KeyError:
# list_proteins_with_signature[protein] = dict()
# list_proteins_with_signature[protein][signature] = [
# row[1],
# row[2],
# row[4],
# row[5],
# ]`
# count_prot_signatures = self.get_count_signature_taxid(list_signatures)
unintegrated_file = os.path.join(
folder, f"unintegrated_prot_in_signatures_{self.tax_id}.csv"
)
with open(unintegrated_file, "w") as outf:
outf.write("protein,dbcode,organism,signature,total_prot_count,count_swiss_prot\n")
# outf.write(
# "protein,dbcode,organism,signature,total_prot_count,count_swiss_prot,count_proteome\n"
# )
# for protein, signatures in list_proteins_with_signature.items():
# for signature, values in signatures.items():
# if values[3] != 0:
# outf.write(
# f"{protein},{values[0]},{values[1]},{signature},{values[2]},{values[3]}\n"
# )
for signature, proteins in list_proteins_with_signature.items():
if proteins[4] != 0:
outf.write(
f"{proteins[0]},{proteins[1]},{proteins[2]},{signature},{proteins[3]},{proteins[4]}\n"
)
# outf.write(
# f"{protein},{values[0]},{values[1]},{signature},{values[2]},{values[3]},{count_prot_signatures[signature]}\n"
# )
# return list_proteins_with_signature.keys()
return nbprot_in_signature
def search_uniprotid_in_uniref(self, uniprotid):
"""
Search if the uniprotid is already referenced in the uniref50 dictionnary to avoid querying UniProt multiple times
Args:
uniprotid: UniProt accession to search for
Yields:
uniref: UniRef cluster found
False: uniprotid not found
"""
for uniref, accessions in self.uniref50.items():
if uniprotid in accessions:
return uniref
return False
def get_cluster(self, protein_list):
"""
Search clustering information in UniRef from UniProt for a given UniProt accession
Args:
None
"""
print("Clustering UniProt accessions unintegrated with no signature using Uniref50")
for uniprotid in protein_list:
uniref_cluster = self.search_uniprotid_in_uniref(uniprotid)
if uniref_cluster:
self.clusters.setdefault(uniref_cluster, []).append(uniprotid)
else:
url = f"https://www.uniprot.org/uniref/?query={uniprotid}&fil=identity:0.5&columns=id,members&format=tab"
response = requests.get(url)
data = response.text
if response.status_code != 200:
print(f"FAILURE::{url}")
uniref_all = data.split("\n")[1:]
for uniref_info in uniref_all:
if uniref_info:
name, accessions = uniref_info.split("\t")
accessions = accessions.split("; ")
if name not in self.uniref50:
self.uniref50[name] = accessions
self.clusters.setdefault(name, []).append(uniprotid)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--user", help="username for database connection", required=True)
parser.add_argument("-p", "--password", help="password for database connection", required=True)
parser.add_argument("-s", "--schema", help="database schema to connect to", required=True)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-o", "--organism", help="Scientific name of the organism to get the conservation score for"
)
group.add_argument(
"-t", "--taxid", help="Taxid of the organism to get the conservation score for"
)
parser.add_argument(
"-f", "--folder", help="folder directory to write output files", required=True
)
args = parser.parse_args()
# initialising
protein_pip = protein_pipeline()
protein_pip.getConnection(args.user, args.password, args.schema)
# create output directory if it doesn't exist
Path(args.folder).mkdir(parents=True, exist_ok=True)
# initialise tax_id value
if args.organism:
print(f"Searching taxid for {args.organism}")
protein_pip.search_taxid(args.organism)
elif args.taxid:
protein_pip.tax_id = args.taxid
else:
print("Error no organism or taxid provided")
sys.exit(1)
# search the proteome
print(f"Searching list of proteins for {protein_pip.tax_id}")
protein_list = protein_pip.get_proteins()
# search for integrated proteins
list_integrated = protein_pip.get_integrated(protein_list)
print(f"UniProt accessions integrated: {len(list_integrated)}")
# list of unintegrated proteins
unintegrated_subset = set(protein_list).difference(list_integrated)
print(f"UniProt accessions unintegrated: {len(unintegrated_subset)}")
# search for proteins in unintegrated InterPro signatures
list_in_signature = protein_pip.get_accession_in_signature(args.folder, unintegrated_subset)
# list_in_signature = set(
# protein_pip.get_accession_in_signature(args.folder, unintegrated_subset)
# )
print(f"UniProt accession unintegrated matching signature: {list_in_signature}")
# list of unintegrated proteins not found in InterPro signatures
# list_not_in_signature = unintegrated_subset.difference(list_in_signature)
list_not_in_signature = len(unintegrated_subset) - list_in_signature
print(f"UniProt accession unintegrated with no signature: {list_not_in_signature}")
# close database connection
protein_pip.connection.close()
# # clustering unintegrated proteins
# protein_pip.get_cluster(list_not_in_signature)
# print(f"{len(protein_pip.clusters)} clusters found")
# # write clustering results in file
# cluster_file = os.path.join(args.folder, f"clusters_proteome_taxid_{protein_pip.tax_id}.csv")
# with open(cluster_file, "w") as f:
# f.write("cluster_id,accessions\n")
# for cluster, accessions in protein_pip.clusters.items():
# f.write(f"{cluster},{"; ".join(accessions)}\n")
# uniref50_cluster_file = os.path.join(
# args.folder, f"all_clusters_taxid_{protein_pip.tax_id}.csv"
# )
# with open(uniref50_cluster_file, "w") as f:
# f.write("cluster_id,count proteome matches,accessions\n")
# for cluster, accessions in protein_pip.uniref50.items():
# f.write(f"{cluster},{len(protein_pip.clusters[cluster])},{"; ".join(accessions)}\n")
| #!/usr/bin/env python3
"""
@author T. Paysan-Lafosse
@brief This script searches unintegrated proteins for a given organism or taxid
in InterPro signatures, if they are not found in signatures, they are clustered based on UniRef clusters
@arguments [-u USER]: database user
[-p PASSWORD]: database password for the user
[-s SCHEMA]: database schema to use
[-o ORGANISM or -t TAXID]: organism (scientific name) or taxid to look for
[-f FOLDER]: output folder
"""
import argparse
import os
import sys
from pathlib import Path
import requests
from utils import proteome
class protein_pipeline(proteome):
def __init__(self):
super().__init__()
self.uniref50 = dict()
self.clusters = dict()
def get_integrated(self, protein_list):
"""
Search integrated proteins
Args:
protein_list: list containing proteins to search for
Yields:
list_integrated: list of proteins integrated in InterPro entries
"""
print("Searching for integrated proteins")
uniprot_chunks = list(self.chunks(protein_list, 1000))
list_integrated = set()
for chunk in uniprot_chunks:
protein_list_quote = [f"'{row}'" for row in chunk]
request = f"SELECT P.PROTEIN_AC \
FROM INTERPRO.MV_ENTRY2PROTEIN E2P \
JOIN INTERPRO.PROTEIN P ON E2P.PROTEIN_AC=P.PROTEIN_AC \
WHERE E2P.PROTEIN_AC IN ({','.join(protein_list_quote)})"
self.cursor.execute(request)
list_integrated.update(set([row[0] for row in self.cursor]))
return list_integrated
def get_count_signature_taxid(self, list_signatures):
"""
Search for protein counts for a list of InterPro signatures
Args:
list_signatures: list of InterPro signatures
Yields:
count_prot_signatures: dictionnary with signature as key and protein_count as value
"""
count_prot_signatures = dict()
signature_chunks = list(self.chunks(list(list_signatures), 1000))
for chunk in signature_chunks:
signature_list_quote = [f"'{row}'" for row in chunk]
request = f"SELECT M2P.METHOD_AC,COUNT(P.PROTEIN_AC) \
FROM INTERPRO.PROTEIN P \
JOIN INTERPRO.MV_METHOD2PROTEIN M2P ON P.PROTEIN_AC = M2P.PROTEIN_AC \
JOIN INTERPRO.ETAXI ET ON P.TAX_ID = ET.TAX_ID \
WHERE ET.TAX_ID=:1 AND M2P.METHOD_AC IN ({','.join(signature_list_quote)}) \
GROUP BY M2P.METHOD_AC"
self.cursor.execute(request, (self.tax_id,))
count_prot_signatures.update({row[0]: row[1] for row in self.cursor})
return count_prot_signatures
def get_accession_in_signature(self, folder, protein_list):
"""
Search for proteins found in InterPro signatures but not integrated
Write the results in a csv file with each row corresponding to a protein/signature pair (protein,dbcode,organism,signature,total_prot_count,count_proteome,comment)
Args:
folder: output directory
protein_list: list containing proteins to search for
Yields:
list of proteins found in unintegrated signatures
"""
print("Searching for unintegrated proteins in signature")
uniprot_chunks = list(self.chunks(list(protein_list), 1000))
list_signatures = set()
list_proteins_with_signature = dict()
nbprot_in_signature = 0
for chunk in uniprot_chunks:
# if comments needed in future: C.VALUE, LISTAGG(MC.VALUE, '; ') WITHIN GROUP (ORDER BY MC.VALUE) COMMENTS
protein_list_quote = [f"'{row}'" for row in chunk]
request = f"SELECT P.PROTEIN_AC, P.DBCODE, ET.SCIENTIFIC_NAME, M2P.METHOD_AC, MM.PROTEIN_COUNT, \
( SELECT COUNT(*) FROM INTERPRO.MATCH M \
INNER JOIN INTERPRO.PROTEIN P ON M.PROTEIN_AC = P.PROTEIN_AC \
WHERE P.DBCODE = 'S' and M.METHOD_AC = M2P.METHOD_AC ) as SWISS_COUNT \
FROM INTERPRO.PROTEIN P \
JOIN INTERPRO.ETAXI ET ON P.TAX_ID = ET.TAX_ID \
JOIN INTERPRO.MV_METHOD2PROTEIN M2P ON P.PROTEIN_AC = M2P.PROTEIN_AC \
JOIN INTERPRO.MV_METHOD_MATCH MM ON MM.METHOD_AC = M2P.METHOD_AC \
LEFT JOIN INTERPRO.METHOD_COMMENT MC ON MC.METHOD_AC = M2P.METHOD_AC \
WHERE P.PROTEIN_AC IN ({','.join(protein_list_quote)})\
AND M2P.METHOD_AC not like '%:SF%' \
AND MC.VALUE IS NULL \
GROUP BY P.PROTEIN_AC, P.DBCODE, ET.SCIENTIFIC_NAME, M2P.METHOD_AC, MM.PROTEIN_COUNT"
# print(request)
self.cursor.execute(request)
results = self.cursor.fetchall()
nbprot_in_signature += len(results)
for row in results:
protein = row[0]
signature = row[3]
list_signatures.add(signature)
if signature not in list_proteins_with_signature:
list_proteins_with_signature[signature] = [
protein,
row[1],
row[2],
row[4],
row[5],
]
else:
pass
# `try:
# list_proteins_with_signature[protein][signature] = [
# row[1],
# row[2],
# row[4],
# row[5],
# ]
# except KeyError:
# list_proteins_with_signature[protein] = dict()
# list_proteins_with_signature[protein][signature] = [
# row[1],
# row[2],
# row[4],
# row[5],
# ]`
# count_prot_signatures = self.get_count_signature_taxid(list_signatures)
unintegrated_file = os.path.join(
folder, f"unintegrated_prot_in_signatures_{self.tax_id}.csv"
)
with open(unintegrated_file, "w") as outf:
outf.write("protein,dbcode,organism,signature,total_prot_count,count_swiss_prot\n")
# outf.write(
# "protein,dbcode,organism,signature,total_prot_count,count_swiss_prot,count_proteome\n"
# )
# for protein, signatures in list_proteins_with_signature.items():
# for signature, values in signatures.items():
# if values[3] != 0:
# outf.write(
# f"{protein},{values[0]},{values[1]},{signature},{values[2]},{values[3]}\n"
# )
for signature, proteins in list_proteins_with_signature.items():
if proteins[4] != 0:
outf.write(
f"{proteins[0]},{proteins[1]},{proteins[2]},{signature},{proteins[3]},{proteins[4]}\n"
)
# outf.write(
# f"{protein},{values[0]},{values[1]},{signature},{values[2]},{values[3]},{count_prot_signatures[signature]}\n"
# )
# return list_proteins_with_signature.keys()
return nbprot_in_signature
def search_uniprotid_in_uniref(self, uniprotid):
"""
Search if the uniprotid is already referenced in the uniref50 dictionnary to avoid querying UniProt multiple times
Args:
uniprotid: UniProt accession to search for
Yields:
uniref: UniRef cluster found
False: uniprotid not found
"""
for uniref, accessions in self.uniref50.items():
if uniprotid in accessions:
return uniref
return False
def get_cluster(self, protein_list):
"""
Search clustering information in UniRef from UniProt for a given UniProt accession
Args:
None
"""
print("Clustering UniProt accessions unintegrated with no signature using Uniref50")
for uniprotid in protein_list:
uniref_cluster = self.search_uniprotid_in_uniref(uniprotid)
if uniref_cluster:
self.clusters.setdefault(uniref_cluster, []).append(uniprotid)
else:
url = f"https://www.uniprot.org/uniref/?query={uniprotid}&fil=identity:0.5&columns=id,members&format=tab"
response = requests.get(url)
data = response.text
if response.status_code != 200:
print(f"FAILURE::{url}")
uniref_all = data.split("\n")[1:]
for uniref_info in uniref_all:
if uniref_info:
name, accessions = uniref_info.split("\t")
accessions = accessions.split("; ")
if name not in self.uniref50:
self.uniref50[name] = accessions
self.clusters.setdefault(name, []).append(uniprotid)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--user", help="username for database connection", required=True)
parser.add_argument("-p", "--password", help="password for database connection", required=True)
parser.add_argument("-s", "--schema", help="database schema to connect to", required=True)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-o", "--organism", help="Scientific name of the organism to get the conservation score for"
)
group.add_argument(
"-t", "--taxid", help="Taxid of the organism to get the conservation score for"
)
parser.add_argument(
"-f", "--folder", help="folder directory to write output files", required=True
)
args = parser.parse_args()
# initialising
protein_pip = protein_pipeline()
protein_pip.getConnection(args.user, args.password, args.schema)
# create output directory if it doesn't exist
Path(args.folder).mkdir(parents=True, exist_ok=True)
# initialise tax_id value
if args.organism:
print(f"Searching taxid for {args.organism}")
protein_pip.search_taxid(args.organism)
elif args.taxid:
protein_pip.tax_id = args.taxid
else:
print("Error no organism or taxid provided")
sys.exit(1)
# search the proteome
print(f"Searching list of proteins for {protein_pip.tax_id}")
protein_list = protein_pip.get_proteins()
# search for integrated proteins
list_integrated = protein_pip.get_integrated(protein_list)
print(f"UniProt accessions integrated: {len(list_integrated)}")
# list of unintegrated proteins
unintegrated_subset = set(protein_list).difference(list_integrated)
print(f"UniProt accessions unintegrated: {len(unintegrated_subset)}")
# search for proteins in unintegrated InterPro signatures
list_in_signature = protein_pip.get_accession_in_signature(args.folder, unintegrated_subset)
# list_in_signature = set(
# protein_pip.get_accession_in_signature(args.folder, unintegrated_subset)
# )
print(f"UniProt accession unintegrated matching signature: {list_in_signature}")
# list of unintegrated proteins not found in InterPro signatures
# list_not_in_signature = unintegrated_subset.difference(list_in_signature)
list_not_in_signature = len(unintegrated_subset) - list_in_signature
print(f"UniProt accession unintegrated with no signature: {list_not_in_signature}")
# close database connection
protein_pip.connection.close()
# # clustering unintegrated proteins
# protein_pip.get_cluster(list_not_in_signature)
# print(f"{len(protein_pip.clusters)} clusters found")
# # write clustering results in file
# cluster_file = os.path.join(args.folder, f"clusters_proteome_taxid_{protein_pip.tax_id}.csv")
# with open(cluster_file, "w") as f:
# f.write("cluster_id,accessions\n")
# for cluster, accessions in protein_pip.clusters.items():
# f.write(f"{cluster},{'; '.join(accessions)}\n")
# uniref50_cluster_file = os.path.join(
# args.folder, f"all_clusters_taxid_{protein_pip.tax_id}.csv"
# )
# with open(uniref50_cluster_file, "w") as f:
# f.write("cluster_id,count proteome matches,accessions\n")
# for cluster, accessions in protein_pip.uniref50.items():
# f.write(f"{cluster},{len(protein_pip.clusters[cluster])},{'; '.join(accessions)}\n")
|
import json
import inspect
import requests
import os
import boto3
from urllib import parse
ASYNC = 'async'
token = None
slash_handlers = []
event_handlers = []
def slash(command, conditional=lambda text: True):
def fn(f):
slash_handlers.append([conditional, command, f, None])
return f
return fn
def slash_async(command, conditional=lambda text: True):
def fn(f):
slash_handlers.append([conditional, command, f, ASYNC])
return f
return fn
def event(conditional):
def fn(f):
event_handlers.append([conditional, f])
return f
return fn
def _lambda_response(body):
return {'statusCode': '200',
'isBase64Encoded': False,
'headers': {'Content-Type': 'application/json'},
'body': json.dumps(body)}
def response(body, in_channel=True):
if not isinstance(body, dict):
body = {'text': body}
if in_channel:
body["response_type"] = 'in_channel'
else:
body["response_type"] = 'ephemeral'
return body
def asynchronous(command, response_url, data, _file_):
name = os.path.basename(_file_).replace(' ', '-').replace('_', '-').split('.py')[0] # copied from: cli_aws.lambda_name()
val = {'body': json.dumps({'type': ASYNC,
'data': data,
'response_url': response_url,
'command': command,
'token': token})}
boto3.client('lambda').invoke(FunctionName=name,
InvocationType='Event',
Payload=bytes(json.dumps(val), 'utf-8'))
def main(event, context, log_unmatched_events=False):
if not token:
return print('error: must assign slackbot.token = "your verification token from the app page"')
if token == 'SKIP':
print('warning: you should set slackbot.token to the verification token from your slack app page')
if 'body' not in event:
return print(f'error: no body in event {event}')
try:
body = json.loads(event['body'])
if body['token'] != token or token == 'SKIP':
return print(f'error: token mismatch {body['token']} {token}')
except:
body = parse.parse_qs(event['body'])
if body['token'][0] != token or token == 'SKIP':
return print(f'error: token mismatch {body['token'][0]} {token}')
if 'command' in body:
for conditional, command, handler, kind in slash_handlers:
text = body.get("text", [''])[0]
if body['command'][0] == command and conditional(text):
if kind == ASYNC:
asynchronous(command, body['response_url'][0], text, inspect.getfile(handler))
return _lambda_response(response('one moment please...'))
else:
return _lambda_response(handler(text))
else:
if "challenge" in body:
return _lambda_response({'challenge': body['challenge']})
elif body['type'] == 'event_callback':
for conditional, handler in event_handlers:
if conditional(body['event']):
handler(body['event'])
return _lambda_response('')
elif body['type'] == ASYNC:
for conditional, command, handler, kind in slash_handlers:
text = body['data']
if body['command'] == command and kind == ASYNC and conditional(text):
resp = requests.post(body['response_url'], data=json.dumps(handler(text)))
assert str(resp.status_code)[0] == '2', [resp, resp.text]
return _lambda_response('')
if log_unmatched_events:
print(f'nothing matched: {body}')
| import json
import inspect
import requests
import os
import boto3
from urllib import parse
ASYNC = 'async'
token = None
slash_handlers = []
event_handlers = []
def slash(command, conditional=lambda text: True):
def fn(f):
slash_handlers.append([conditional, command, f, None])
return f
return fn
def slash_async(command, conditional=lambda text: True):
def fn(f):
slash_handlers.append([conditional, command, f, ASYNC])
return f
return fn
def event(conditional):
def fn(f):
event_handlers.append([conditional, f])
return f
return fn
def _lambda_response(body):
return {'statusCode': '200',
'isBase64Encoded': False,
'headers': {'Content-Type': 'application/json'},
'body': json.dumps(body)}
def response(body, in_channel=True):
if not isinstance(body, dict):
body = {'text': body}
if in_channel:
body["response_type"] = 'in_channel'
else:
body["response_type"] = 'ephemeral'
return body
def asynchronous(command, response_url, data, _file_):
name = os.path.basename(_file_).replace(' ', '-').replace('_', '-').split('.py')[0] # copied from: cli_aws.lambda_name()
val = {'body': json.dumps({'type': ASYNC,
'data': data,
'response_url': response_url,
'command': command,
'token': token})}
boto3.client('lambda').invoke(FunctionName=name,
InvocationType='Event',
Payload=bytes(json.dumps(val), 'utf-8'))
def main(event, context, log_unmatched_events=False):
if not token:
return print('error: must assign slackbot.token = "your verification token from the app page"')
if token == 'SKIP':
print('warning: you should set slackbot.token to the verification token from your slack app page')
if 'body' not in event:
return print(f'error: no body in event {event}')
try:
body = json.loads(event['body'])
if body['token'] != token or token == 'SKIP':
return print(f'error: token mismatch {body["token"]} {token}')
except:
body = parse.parse_qs(event['body'])
if body['token'][0] != token or token == 'SKIP':
return print(f'error: token mismatch {body["token"][0]} {token}')
if 'command' in body:
for conditional, command, handler, kind in slash_handlers:
text = body.get("text", [''])[0]
if body['command'][0] == command and conditional(text):
if kind == ASYNC:
asynchronous(command, body['response_url'][0], text, inspect.getfile(handler))
return _lambda_response(response('one moment please...'))
else:
return _lambda_response(handler(text))
else:
if "challenge" in body:
return _lambda_response({'challenge': body['challenge']})
elif body['type'] == 'event_callback':
for conditional, handler in event_handlers:
if conditional(body['event']):
handler(body['event'])
return _lambda_response('')
elif body['type'] == ASYNC:
for conditional, command, handler, kind in slash_handlers:
text = body['data']
if body['command'] == command and kind == ASYNC and conditional(text):
resp = requests.post(body['response_url'], data=json.dumps(handler(text)))
assert str(resp.status_code)[0] == '2', [resp, resp.text]
return _lambda_response('')
if log_unmatched_events:
print(f'nothing matched: {body}')
|
from Utilities import MENU, resources
from art import logo
print(logo)
shop_open_and_ingredients_available = True
pay = 0
Water = resources["water"]
Milk = resources["milk"]
Coffee = resources["coffee"]
espresso_water = MENU["espresso"]["ingredients"]["water"]
espresso_coffee = MENU["espresso"]["ingredients"]["coffee"]
latte_water = MENU["latte"]["ingredients"]["water"]
latte_coffee = MENU["latte"]["ingredients"]["coffee"]
latte_milk = MENU["latte"]["ingredients"]["milk"]
cappuccino_water = MENU["cappuccino"]["ingredients"]["water"]
cappuccino_coffee = MENU["cappuccino"]["ingredients"]["coffee"]
cappuccino_milk = MENU["cappuccino"]["ingredients"]["milk"]
def report():
print(f"Water left : {Water}")
print(f"Milk left : {Milk}")
print(f"Coffee left : {Coffee}")
print(f"Total money collected: {pay}")
# Shut Down Machine when OFF is called
def make_coffee():
global Water, Coffee, Milk, shop_open_and_ingredients_available, pay
choice = input("What would you like to have? (espresso Rs.25/latte Rs.35/cappuccino Rs.50): ")
if "report" in choice:
report()
elif "off" in choice:
shop_open_and_ingredients_available = False
print("SYSTEM IS CLOSED FOR REPAIR.")
elif "espresso" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['espresso']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
elif Water >= espresso_water and Coffee >= espresso_coffee:
print("Here is your Espresso. Thank You!")
print(f"Here's your change of RS.{money - MENU["espresso"]["cost"]}")
Water -= espresso_water
Coffee -= espresso_coffee
pay += 25
elif Water < espresso_water and espresso_coffee:
print("Sorry, Water is over")
elif Water > espresso_water and espresso_coffee:
print("Sorry, Coffee is over")
elif Water < espresso_water and espresso_coffee:
print("Water and Coffee are over")
elif Water < espresso_water:
print("Sorry, Water Shortage")
elif Coffee < espresso_coffee:
print("Sorry, Coffee Shortage")
else:
print("Sorry, We are currently facing some technical issues")
elif "latte" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['latte']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
if Water >= latte_water and Coffee >= latte_coffee and Milk >= latte_milk:
print("Here is your Latte. Thank You!")
print(f"Here's your change of RS.{money - MENU["latte"]["cost"]}")
Water -= latte_water
Coffee -= latte_coffee
Milk -= latte_milk
pay += 35
elif Water < latte_water and Coffee > latte_coffee and Milk > latte_milk:
print("Sorry, Water is over")
elif Water > latte_water and Coffee < latte_coffee and Milk > latte_milk:
print("Sorry, Coffee is over")
elif Water > latte_water and Coffee > latte_coffee and Milk < latte_milk:
print("Sorry, Milk is over")
elif Water < latte_water and Coffee < latte_coffee and Milk < latte_milk:
print("Water, Coffee and Milk are over")
elif Water < latte_water:
print("Sorry, Water Shortage")
elif Coffee < latte_coffee:
print("Sorry, Coffee Shortage")
elif Milk < latte_milk:
print("Sorry, Milk shortage")
else:
print("Sorry, We are currently facing some technical issues")
elif "cappuccino" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['cappuccino']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
if Water >= cappuccino_water and Coffee >= cappuccino_coffee and Milk >= cappuccino_milk:
print("Here is your cappuccino. Thank You!")
print(f"Here's your change of RS.{money - MENU["cappuccino"]["cost"]}")
Water -= cappuccino_water
Coffee -= cappuccino_coffee
Milk -= cappuccino_milk
pay += 50
elif Water < cappuccino_water and Coffee > cappuccino_coffee and Milk > cappuccino_milk:
print("Sorry, Water is over")
elif Water > cappuccino_water and Coffee < cappuccino_coffee and Milk > cappuccino_milk:
print("Sorry, Coffee is over")
elif Water > cappuccino_water and Coffee > cappuccino_coffee and Milk < cappuccino_milk:
print("Sorry, Milk is over")
elif Water < cappuccino_water and Coffee < cappuccino_coffee and Milk < cappuccino_milk:
print("Water, Coffee and Milk are over")
elif Water < cappuccino_water:
print("Sorry, Water Shortage")
elif Coffee < cappuccino_coffee:
print("Sorry, Coffee Shortage")
elif Milk < cappuccino_milk:
print("Sorry, Milk shortage")
else:
print("Sorry, We are currently facing some technical issues")
while shop_open_and_ingredients_available:
make_coffee()
| from Utilities import MENU, resources
from art import logo
print(logo)
shop_open_and_ingredients_available = True
pay = 0
Water = resources["water"]
Milk = resources["milk"]
Coffee = resources["coffee"]
espresso_water = MENU["espresso"]["ingredients"]["water"]
espresso_coffee = MENU["espresso"]["ingredients"]["coffee"]
latte_water = MENU["latte"]["ingredients"]["water"]
latte_coffee = MENU["latte"]["ingredients"]["coffee"]
latte_milk = MENU["latte"]["ingredients"]["milk"]
cappuccino_water = MENU["cappuccino"]["ingredients"]["water"]
cappuccino_coffee = MENU["cappuccino"]["ingredients"]["coffee"]
cappuccino_milk = MENU["cappuccino"]["ingredients"]["milk"]
def report():
print(f"Water left : {Water}")
print(f"Milk left : {Milk}")
print(f"Coffee left : {Coffee}")
print(f"Total money collected: {pay}")
# Shut Down Machine when OFF is called
def make_coffee():
global Water, Coffee, Milk, shop_open_and_ingredients_available, pay
choice = input("What would you like to have? (espresso Rs.25/latte Rs.35/cappuccino Rs.50): ")
if "report" in choice:
report()
elif "off" in choice:
shop_open_and_ingredients_available = False
print("SYSTEM IS CLOSED FOR REPAIR.")
elif "espresso" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['espresso']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
elif Water >= espresso_water and Coffee >= espresso_coffee:
print("Here is your Espresso. Thank You!")
print(f"Here's your change of RS.{money - MENU['espresso']['cost']}")
Water -= espresso_water
Coffee -= espresso_coffee
pay += 25
elif Water < espresso_water and espresso_coffee:
print("Sorry, Water is over")
elif Water > espresso_water and espresso_coffee:
print("Sorry, Coffee is over")
elif Water < espresso_water and espresso_coffee:
print("Water and Coffee are over")
elif Water < espresso_water:
print("Sorry, Water Shortage")
elif Coffee < espresso_coffee:
print("Sorry, Coffee Shortage")
else:
print("Sorry, We are currently facing some technical issues")
elif "latte" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['latte']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
if Water >= latte_water and Coffee >= latte_coffee and Milk >= latte_milk:
print("Here is your Latte. Thank You!")
print(f"Here's your change of RS.{money - MENU['latte']['cost']}")
Water -= latte_water
Coffee -= latte_coffee
Milk -= latte_milk
pay += 35
elif Water < latte_water and Coffee > latte_coffee and Milk > latte_milk:
print("Sorry, Water is over")
elif Water > latte_water and Coffee < latte_coffee and Milk > latte_milk:
print("Sorry, Coffee is over")
elif Water > latte_water and Coffee > latte_coffee and Milk < latte_milk:
print("Sorry, Milk is over")
elif Water < latte_water and Coffee < latte_coffee and Milk < latte_milk:
print("Water, Coffee and Milk are over")
elif Water < latte_water:
print("Sorry, Water Shortage")
elif Coffee < latte_coffee:
print("Sorry, Coffee Shortage")
elif Milk < latte_milk:
print("Sorry, Milk shortage")
else:
print("Sorry, We are currently facing some technical issues")
elif "cappuccino" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['cappuccino']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
if Water >= cappuccino_water and Coffee >= cappuccino_coffee and Milk >= cappuccino_milk:
print("Here is your cappuccino. Thank You!")
print(f"Here's your change of RS.{money - MENU['cappuccino']['cost']}")
Water -= cappuccino_water
Coffee -= cappuccino_coffee
Milk -= cappuccino_milk
pay += 50
elif Water < cappuccino_water and Coffee > cappuccino_coffee and Milk > cappuccino_milk:
print("Sorry, Water is over")
elif Water > cappuccino_water and Coffee < cappuccino_coffee and Milk > cappuccino_milk:
print("Sorry, Coffee is over")
elif Water > cappuccino_water and Coffee > cappuccino_coffee and Milk < cappuccino_milk:
print("Sorry, Milk is over")
elif Water < cappuccino_water and Coffee < cappuccino_coffee and Milk < cappuccino_milk:
print("Water, Coffee and Milk are over")
elif Water < cappuccino_water:
print("Sorry, Water Shortage")
elif Coffee < cappuccino_coffee:
print("Sorry, Coffee Shortage")
elif Milk < cappuccino_milk:
print("Sorry, Milk shortage")
else:
print("Sorry, We are currently facing some technical issues")
while shop_open_and_ingredients_available:
make_coffee()
|
import sys
import selectors
import json
import io
import struct
request_search = {
"morpheus": "Follow the white rabbit. \U0001f430",
"ring": "In the caves beneath the Misty Mountains. \U0001f48d",
"\U0001f436": "\U0001f43e Playing ball! \U0001f3d0",
}
class Message:
def __init__(self, selector, sock, addr):
self.selector = selector
self.sock = sock
self.addr = addr
self._recv_buffer = b""
self._send_buffer = b""
self._jsonheader_len = None
self.jsonheader = None
self.request = None
self.response_created = False
def _set_selector_events_mask(self, mode):
"""Set selector to listen for events: mode is 'r', 'w', or 'rw'."""
if mode == "r":
events = selectors.EVENT_READ
elif mode == "w":
events = selectors.EVENT_WRITE
elif mode == "rw":
events = selectors.EVENT_READ | selectors.EVENT_WRITE
else:
raise ValueError(f"Invalid events mask mode {repr(mode)}.")
self.selector.modify(self.sock, events, data=self)
def _read(self):
try:
# Should be ready to read
data = self.sock.recv(4096)
except BlockingIOError:
# Resource temporarily unavailable (errno EWOULDBLOCK)
pass
else:
if data:
self._recv_buffer += data
else:
raise RuntimeError("Peer closed.")
def _write(self):
if self._send_buffer:
print("sending", repr(self._send_buffer), "to", self.addr)
try:
# Should be ready to write
sent = self.sock.send(self._send_buffer)
except BlockingIOError:
# Resource temporarily unavailable (errno EWOULDBLOCK)
pass
else:
self._send_buffer = self._send_buffer[sent:]
# Close when the buffer is drained. The response has been sent.
if sent and not self._send_buffer:
self.close()
def _json_encode(self, obj, encoding):
return json.dumps(obj, ensure_ascii=False).encode(encoding)
def _json_decode(self, json_bytes, encoding):
tiow = io.TextIOWrapper(
io.BytesIO(json_bytes), encoding=encoding, newline=""
)
obj = json.load(tiow)
tiow.close()
return obj
def _create_message(
self, *, content_bytes, content_type, content_encoding
):
jsonheader = {
"byteorder": sys.byteorder,
"content-type": content_type,
"content-encoding": content_encoding,
"content-length": len(content_bytes),
}
jsonheader_bytes = self._json_encode(jsonheader, "utf-8")
message_hdr = struct.pack(">H", len(jsonheader_bytes))
message = message_hdr + jsonheader_bytes + content_bytes
return message
def _create_response_json_content(self):
action = self.request.get("action")
if action == "search":
query = self.request.get("value")
answer = request_search.get(query) or f'No match for "{query}".'
content = {"result": answer}
else:
content = {"result": f'Error: invalid action "{action}".'}
content_encoding = "utf-8"
response = {
"content_bytes": self._json_encode(content, content_encoding),
"content_type": "text/json",
"content_encoding": content_encoding,
}
return response
def _create_response_binary_content(self):
response = {
"content_bytes": b"First 10 bytes of request: "
+ self.request[:10],
"content_type": "binary/custom-server-binary-type",
"content_encoding": "binary",
}
return response
def process_events(self, mask):
if mask & selectors.EVENT_READ:
self.read()
if mask & selectors.EVENT_WRITE:
self.write()
def read(self):
self._read()
if self._jsonheader_len is None:
self.process_protoheader()
if self._jsonheader_len is not None:
if self.jsonheader is None:
self.process_jsonheader()
if self.jsonheader:
if self.request is None:
self.process_request()
def write(self):
if self.request:
if not self.response_created:
self.create_response()
self._write()
def close(self):
print("closing connection to", self.addr)
try:
self.selector.unregister(self.sock)
except Exception as e:
print(
"error: selector.unregister() exception for",
f"{self.addr}: {repr(e)}",
)
try:
self.sock.close()
except OSError as e:
print(
"error: socket.close() exception for",
f"{self.addr}: {repr(e)}",
)
finally:
# Delete reference to socket object for garbage collection
self.sock = None
def process_protoheader(self):
hdrlen = 2
if len(self._recv_buffer) >= hdrlen:
self._jsonheader_len = struct.unpack(
">H", self._recv_buffer[:hdrlen]
)[0]
self._recv_buffer = self._recv_buffer[hdrlen:]
def process_jsonheader(self):
hdrlen = self._jsonheader_len
if len(self._recv_buffer) >= hdrlen:
self.jsonheader = self._json_decode(
self._recv_buffer[:hdrlen], "utf-8"
)
self._recv_buffer = self._recv_buffer[hdrlen:]
for reqhdr in (
"byteorder",
"content-length",
"content-type",
"content-encoding",
):
if reqhdr not in self.jsonheader:
raise ValueError(f'Missing required header "{reqhdr}".')
def process_request(self):
content_len = self.jsonheader["content-length"]
if not len(self._recv_buffer) >= content_len:
return
data = self._recv_buffer[:content_len]
self._recv_buffer = self._recv_buffer[content_len:]
if self.jsonheader["content-type"] == "text/json":
encoding = self.jsonheader["content-encoding"]
self.request = self._json_decode(data, encoding)
print("received request", repr(self.request), "from", self.addr)
else:
# Binary or unknown content-type
self.request = data
print(
f'received {self.jsonheader['content-type']} request from',
self.addr,
)
# Set selector to listen for write events, we're done reading.
self._set_selector_events_mask("w")
def create_response(self):
if self.jsonheader["content-type"] == "text/json":
response = self._create_response_json_content()
else:
# Binary or unknown content-type
response = self._create_response_binary_content()
message = self._create_message(**response)
self.response_created = True
self._send_buffer += message
| import sys
import selectors
import json
import io
import struct
request_search = {
"morpheus": "Follow the white rabbit. \U0001f430",
"ring": "In the caves beneath the Misty Mountains. \U0001f48d",
"\U0001f436": "\U0001f43e Playing ball! \U0001f3d0",
}
class Message:
def __init__(self, selector, sock, addr):
self.selector = selector
self.sock = sock
self.addr = addr
self._recv_buffer = b""
self._send_buffer = b""
self._jsonheader_len = None
self.jsonheader = None
self.request = None
self.response_created = False
def _set_selector_events_mask(self, mode):
"""Set selector to listen for events: mode is 'r', 'w', or 'rw'."""
if mode == "r":
events = selectors.EVENT_READ
elif mode == "w":
events = selectors.EVENT_WRITE
elif mode == "rw":
events = selectors.EVENT_READ | selectors.EVENT_WRITE
else:
raise ValueError(f"Invalid events mask mode {repr(mode)}.")
self.selector.modify(self.sock, events, data=self)
def _read(self):
try:
# Should be ready to read
data = self.sock.recv(4096)
except BlockingIOError:
# Resource temporarily unavailable (errno EWOULDBLOCK)
pass
else:
if data:
self._recv_buffer += data
else:
raise RuntimeError("Peer closed.")
def _write(self):
if self._send_buffer:
print("sending", repr(self._send_buffer), "to", self.addr)
try:
# Should be ready to write
sent = self.sock.send(self._send_buffer)
except BlockingIOError:
# Resource temporarily unavailable (errno EWOULDBLOCK)
pass
else:
self._send_buffer = self._send_buffer[sent:]
# Close when the buffer is drained. The response has been sent.
if sent and not self._send_buffer:
self.close()
def _json_encode(self, obj, encoding):
return json.dumps(obj, ensure_ascii=False).encode(encoding)
def _json_decode(self, json_bytes, encoding):
tiow = io.TextIOWrapper(
io.BytesIO(json_bytes), encoding=encoding, newline=""
)
obj = json.load(tiow)
tiow.close()
return obj
def _create_message(
self, *, content_bytes, content_type, content_encoding
):
jsonheader = {
"byteorder": sys.byteorder,
"content-type": content_type,
"content-encoding": content_encoding,
"content-length": len(content_bytes),
}
jsonheader_bytes = self._json_encode(jsonheader, "utf-8")
message_hdr = struct.pack(">H", len(jsonheader_bytes))
message = message_hdr + jsonheader_bytes + content_bytes
return message
def _create_response_json_content(self):
action = self.request.get("action")
if action == "search":
query = self.request.get("value")
answer = request_search.get(query) or f'No match for "{query}".'
content = {"result": answer}
else:
content = {"result": f'Error: invalid action "{action}".'}
content_encoding = "utf-8"
response = {
"content_bytes": self._json_encode(content, content_encoding),
"content_type": "text/json",
"content_encoding": content_encoding,
}
return response
def _create_response_binary_content(self):
response = {
"content_bytes": b"First 10 bytes of request: "
+ self.request[:10],
"content_type": "binary/custom-server-binary-type",
"content_encoding": "binary",
}
return response
def process_events(self, mask):
if mask & selectors.EVENT_READ:
self.read()
if mask & selectors.EVENT_WRITE:
self.write()
def read(self):
self._read()
if self._jsonheader_len is None:
self.process_protoheader()
if self._jsonheader_len is not None:
if self.jsonheader is None:
self.process_jsonheader()
if self.jsonheader:
if self.request is None:
self.process_request()
def write(self):
if self.request:
if not self.response_created:
self.create_response()
self._write()
def close(self):
print("closing connection to", self.addr)
try:
self.selector.unregister(self.sock)
except Exception as e:
print(
"error: selector.unregister() exception for",
f"{self.addr}: {repr(e)}",
)
try:
self.sock.close()
except OSError as e:
print(
"error: socket.close() exception for",
f"{self.addr}: {repr(e)}",
)
finally:
# Delete reference to socket object for garbage collection
self.sock = None
def process_protoheader(self):
hdrlen = 2
if len(self._recv_buffer) >= hdrlen:
self._jsonheader_len = struct.unpack(
">H", self._recv_buffer[:hdrlen]
)[0]
self._recv_buffer = self._recv_buffer[hdrlen:]
def process_jsonheader(self):
hdrlen = self._jsonheader_len
if len(self._recv_buffer) >= hdrlen:
self.jsonheader = self._json_decode(
self._recv_buffer[:hdrlen], "utf-8"
)
self._recv_buffer = self._recv_buffer[hdrlen:]
for reqhdr in (
"byteorder",
"content-length",
"content-type",
"content-encoding",
):
if reqhdr not in self.jsonheader:
raise ValueError(f'Missing required header "{reqhdr}".')
def process_request(self):
content_len = self.jsonheader["content-length"]
if not len(self._recv_buffer) >= content_len:
return
data = self._recv_buffer[:content_len]
self._recv_buffer = self._recv_buffer[content_len:]
if self.jsonheader["content-type"] == "text/json":
encoding = self.jsonheader["content-encoding"]
self.request = self._json_decode(data, encoding)
print("received request", repr(self.request), "from", self.addr)
else:
# Binary or unknown content-type
self.request = data
print(
f'received {self.jsonheader["content-type"]} request from',
self.addr,
)
# Set selector to listen for write events, we're done reading.
self._set_selector_events_mask("w")
def create_response(self):
if self.jsonheader["content-type"] == "text/json":
response = self._create_response_json_content()
else:
# Binary or unknown content-type
response = self._create_response_binary_content()
message = self._create_message(**response)
self.response_created = True
self._send_buffer += message
|
""" Entry point for evaluating/rendering a trained policy. """
import argparse
import json
import os
import numpy as np
import time
import datetime
from rltime.general.config import load_config
from rltime.general.utils import deep_dictionary_update
from rltime.general.type_registry import get_registered_type
from rltime.env_wrappers.common import make_env_creator, EpisodeRecorder
from rltime.env_wrappers.vec_env.sub_proc import make_sub_proc_vec_env
from rltime.general.loggers import DirectoryLogger
def create_policy_from_config(config, action_space, observation_space):
"""Creates a policy from the given config and spaces
This does not load the weights just creates the policy
"""
if not isinstance(config, dict):
config = load_config(config)
train_cls = get_registered_type(
"trainers", config['training'].get("type", None))
assert(hasattr(train_cls, "create_policy")), \
f"Config training class {type(train_cls)} does not have a " \
"'create_policy' method"
model_config = config.get("model")
return train_cls.create_policy(
model_config=model_config, action_space=action_space,
observation_space=observation_space, **config.get("policy_args", {}))
def eval_policy(path, num_envs, episode_count, record=False, record_fps=60,
render=False, render_fps=None, eps=0.001, conf_update=None):
"""Evaluates training result at 'path', loading the last checkpoint
The result is logged to a new line in file 'eval.json' in <path>
Args:
path: The path containing the training result output to evaluate
num_envs: Amount of vectorized (sub-process) ENVs to evaluate in
parallel
episode_count: The amount of episodes to evaluate total
record: Whether to record episodes to MP4 (under 'recordings'
sub-directory in <path>)
record_fps: If <record>, the FPS to record at (These are raw ENV frames
before any frame-skipping, so atari would usually be 60)
render: Whether to render the ENVs in a window in real-time (Tiled if
num_envs>1)
render_fps: Frames-Per-Second to sync the rendering to (Valid only for
render=True), the default (None) renders at max policy speed. These
are acting steps, so after frame-skipping if active
eps: Epsilon to use for random action selection
Note: We count the first 'episode_count' episodes that started and not
ended, as 'ended' is unfair to longer episodes in case of vectorized
evaluation. For Example: Take a policy that achieves 100 reward in 100
seconds 50% of the time and 0 reward in <1 second 50% of the time.
So we'd expect if we evaluate 20 episodes to get around ~50 average
reward (which we would if running 20 episodes serially on a single ENV)
But if we run 16 ENVs in parallel we will likely get near-0 mean reward
if we count the first 20 episodes that finished (Since half of the 16
ENVs immediately end with reward 0 then restart, then half of those
immediately end with 0 and so on, so we quickly get ~(8+4+2+1) 0-reward
runs and don't count the ones which are running long and going to reach
100 reward), while if we take the first 20 episodes that started (and
ignore any that started after) we will get the expected result
"""
print("Evaluating:", path)
assert(num_envs <= episode_count), \
"num_envs can't be higher than the requested episode_count"
logger = DirectoryLogger(path, use_logging=False, tensorboard=False)
# Load the config from the result path
config = logger.get_config()
if conf_update:
config = dict(config) # Avoid changing the passed config
deep_dictionary_update(config, conf_update)
# Make the env-creaton function based on the config settings
env_args = config.get("env_args", {})
if record:
# If requested, add also an episode-recorder to the ENV stack
recorder = {
"type": EpisodeRecorder,
"args": {
"path": os.path.join(path, "recordings"),
"fps": record_fps
}
}
env_args['wrappers'] = [recorder] + env_args.get('wrappers', [])
env_creator = make_env_creator(config.get("env"), **env_args)
# Create a vectorized ENV
env = make_sub_proc_vec_env(env_creator, num_envs)
# Create the policy based on the config
policy = create_policy_from_config(
config, env.action_space, env.observation_space)
# Load the last checkpoint
training_step, cp_data = logger.get_checkpoint()
# Load the weights from the checkpoint to the policy
policy.load_state(cp_data['policy_state'])
print("Loaded checkpoint from step:", training_step)
# The initial policy input state
state = policy.make_input_state(env.reset(), np.array([True] * num_envs))
episodes_started = num_envs
rewards = []
lengths = []
# This signifies the ENV started the episode in time and should be counted
masks = [True] * num_envs
# TODO(frederik): Mention mode and difficulty
print(f"Running '{config.get("env")}' for {episode_count} episodes"
f" on {num_envs} ENVs")
while len(rewards) < episode_count:
step_start = time.time()
# Select the next action for each env
preds = policy.actor_predict(state, timesteps=1)
actions = preds['actions']
if eps:
# Remap to random actions with eps probability
for i in range(num_envs):
if np.random.rand() < eps:
actions[i] = env.action_space.sample()
# Send the action and get the transition data
obs, _, dones, info = env.step(actions)
# Check any env if finished
for i, env_info in enumerate(info):
# We use the 'real' done/reward from the EpisodeTracker wrapper
if env_info['episode_info']['done']:
if masks[i]:
# Only count the first 'episode_count' that started
reward = env_info['episode_info']['reward']
length = env_info['episode_info']['length']
rewards.append(reward)
lengths.append(length)
print(f"Episode {len(rewards)}/{episode_count} "
f"finished with reward: {reward}")
episodes_started += 1
if episodes_started > episode_count:
masks[i] = False
# Render to screen if requested
if render:
if render_fps:
diff = 1./render_fps - (time.time() - step_start)
if diff > 0:
time.sleep(diff)
env.render()
# Generate the next policy input state
state = policy.make_input_state(obs, dones)
env.close()
# Log the result
result = {
"step": training_step,
"date": datetime.datetime.now(),
"episodes": episode_count,
"envs": num_envs,
**{
key: {
"mean": np.mean(vals),
"min": np.min(vals),
"max": np.max(vals),
"median": np.median(vals),
"std": np.std(vals),
} for key, vals in [("reward", rewards), ("length", lengths)]
}
}
print("Result:")
logger.log_result("eval", result, None)
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'path', type=str,
help="The path to the training directory result to evaluate")
parser.add_argument(
'--num-envs', type=int, default=1,
help="Amount of ENVs to run in parallel")
parser.add_argument(
'--episodes', type=int, default=5,
help="Amount of episodes to run")
parser.add_argument(
'--record', action='store_true',
help="Whether to record episode to MP4 (To a sub-directory in the "
"result path). Warning: If used with --num-envs>1 the last "
"videos will be truncated")
parser.add_argument(
'--record-fps', type=int, default=60,
help="FPS to record at if --record (Typically 60FPS for atari)")
parser.add_argument(
'--render', action='store_true',
help="Whether to render the episodes in real-time")
parser.add_argument(
'--render-fps', type=int, default=0,
help="FPS to sync to if using --render (Set to 0 for full speed), "
"note this is after ENV frame-skipping so if you want 60FPS with "
"frame-skip of 4 use 15 here")
parser.add_argument(
'--eps', type=float, default=0.001,
help="Epsilon value to use for random action selection during "
"evaluation")
parser.add_argument(
'--conf-update', type=str,
help="Optional JSON dictionary string to deep-update the config with")
return parser.parse_args()
def main():
args = parse_args()
conf_update = None if not args.conf_update \
else json.loads(args.conf_update)
eval_policy(
args.path, num_envs=args.num_envs, episode_count=args.episodes,
record=args.record, record_fps=args.record_fps,
render=args.render, render_fps=args.render_fps, eps=args.eps, conf_update=conf_update)
if __name__ == '__main__':
main()
| """ Entry point for evaluating/rendering a trained policy. """
import argparse
import json
import os
import numpy as np
import time
import datetime
from rltime.general.config import load_config
from rltime.general.utils import deep_dictionary_update
from rltime.general.type_registry import get_registered_type
from rltime.env_wrappers.common import make_env_creator, EpisodeRecorder
from rltime.env_wrappers.vec_env.sub_proc import make_sub_proc_vec_env
from rltime.general.loggers import DirectoryLogger
def create_policy_from_config(config, action_space, observation_space):
"""Creates a policy from the given config and spaces
This does not load the weights just creates the policy
"""
if not isinstance(config, dict):
config = load_config(config)
train_cls = get_registered_type(
"trainers", config['training'].get("type", None))
assert(hasattr(train_cls, "create_policy")), \
f"Config training class {type(train_cls)} does not have a " \
"'create_policy' method"
model_config = config.get("model")
return train_cls.create_policy(
model_config=model_config, action_space=action_space,
observation_space=observation_space, **config.get("policy_args", {}))
def eval_policy(path, num_envs, episode_count, record=False, record_fps=60,
render=False, render_fps=None, eps=0.001, conf_update=None):
"""Evaluates training result at 'path', loading the last checkpoint
The result is logged to a new line in file 'eval.json' in <path>
Args:
path: The path containing the training result output to evaluate
num_envs: Amount of vectorized (sub-process) ENVs to evaluate in
parallel
episode_count: The amount of episodes to evaluate total
record: Whether to record episodes to MP4 (under 'recordings'
sub-directory in <path>)
record_fps: If <record>, the FPS to record at (These are raw ENV frames
before any frame-skipping, so atari would usually be 60)
render: Whether to render the ENVs in a window in real-time (Tiled if
num_envs>1)
render_fps: Frames-Per-Second to sync the rendering to (Valid only for
render=True), the default (None) renders at max policy speed. These
are acting steps, so after frame-skipping if active
eps: Epsilon to use for random action selection
Note: We count the first 'episode_count' episodes that started and not
ended, as 'ended' is unfair to longer episodes in case of vectorized
evaluation. For Example: Take a policy that achieves 100 reward in 100
seconds 50% of the time and 0 reward in <1 second 50% of the time.
So we'd expect if we evaluate 20 episodes to get around ~50 average
reward (which we would if running 20 episodes serially on a single ENV)
But if we run 16 ENVs in parallel we will likely get near-0 mean reward
if we count the first 20 episodes that finished (Since half of the 16
ENVs immediately end with reward 0 then restart, then half of those
immediately end with 0 and so on, so we quickly get ~(8+4+2+1) 0-reward
runs and don't count the ones which are running long and going to reach
100 reward), while if we take the first 20 episodes that started (and
ignore any that started after) we will get the expected result
"""
print("Evaluating:", path)
assert(num_envs <= episode_count), \
"num_envs can't be higher than the requested episode_count"
logger = DirectoryLogger(path, use_logging=False, tensorboard=False)
# Load the config from the result path
config = logger.get_config()
if conf_update:
config = dict(config) # Avoid changing the passed config
deep_dictionary_update(config, conf_update)
# Make the env-creaton function based on the config settings
env_args = config.get("env_args", {})
if record:
# If requested, add also an episode-recorder to the ENV stack
recorder = {
"type": EpisodeRecorder,
"args": {
"path": os.path.join(path, "recordings"),
"fps": record_fps
}
}
env_args['wrappers'] = [recorder] + env_args.get('wrappers', [])
env_creator = make_env_creator(config.get("env"), **env_args)
# Create a vectorized ENV
env = make_sub_proc_vec_env(env_creator, num_envs)
# Create the policy based on the config
policy = create_policy_from_config(
config, env.action_space, env.observation_space)
# Load the last checkpoint
training_step, cp_data = logger.get_checkpoint()
# Load the weights from the checkpoint to the policy
policy.load_state(cp_data['policy_state'])
print("Loaded checkpoint from step:", training_step)
# The initial policy input state
state = policy.make_input_state(env.reset(), np.array([True] * num_envs))
episodes_started = num_envs
rewards = []
lengths = []
# This signifies the ENV started the episode in time and should be counted
masks = [True] * num_envs
# TODO(frederik): Mention mode and difficulty
print(f"Running '{config.get('env')}' for {episode_count} episodes"
f" on {num_envs} ENVs")
while len(rewards) < episode_count:
step_start = time.time()
# Select the next action for each env
preds = policy.actor_predict(state, timesteps=1)
actions = preds['actions']
if eps:
# Remap to random actions with eps probability
for i in range(num_envs):
if np.random.rand() < eps:
actions[i] = env.action_space.sample()
# Send the action and get the transition data
obs, _, dones, info = env.step(actions)
# Check any env if finished
for i, env_info in enumerate(info):
# We use the 'real' done/reward from the EpisodeTracker wrapper
if env_info['episode_info']['done']:
if masks[i]:
# Only count the first 'episode_count' that started
reward = env_info['episode_info']['reward']
length = env_info['episode_info']['length']
rewards.append(reward)
lengths.append(length)
print(f"Episode {len(rewards)}/{episode_count} "
f"finished with reward: {reward}")
episodes_started += 1
if episodes_started > episode_count:
masks[i] = False
# Render to screen if requested
if render:
if render_fps:
diff = 1./render_fps - (time.time() - step_start)
if diff > 0:
time.sleep(diff)
env.render()
# Generate the next policy input state
state = policy.make_input_state(obs, dones)
env.close()
# Log the result
result = {
"step": training_step,
"date": datetime.datetime.now(),
"episodes": episode_count,
"envs": num_envs,
**{
key: {
"mean": np.mean(vals),
"min": np.min(vals),
"max": np.max(vals),
"median": np.median(vals),
"std": np.std(vals),
} for key, vals in [("reward", rewards), ("length", lengths)]
}
}
print("Result:")
logger.log_result("eval", result, None)
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'path', type=str,
help="The path to the training directory result to evaluate")
parser.add_argument(
'--num-envs', type=int, default=1,
help="Amount of ENVs to run in parallel")
parser.add_argument(
'--episodes', type=int, default=5,
help="Amount of episodes to run")
parser.add_argument(
'--record', action='store_true',
help="Whether to record episode to MP4 (To a sub-directory in the "
"result path). Warning: If used with --num-envs>1 the last "
"videos will be truncated")
parser.add_argument(
'--record-fps', type=int, default=60,
help="FPS to record at if --record (Typically 60FPS for atari)")
parser.add_argument(
'--render', action='store_true',
help="Whether to render the episodes in real-time")
parser.add_argument(
'--render-fps', type=int, default=0,
help="FPS to sync to if using --render (Set to 0 for full speed), "
"note this is after ENV frame-skipping so if you want 60FPS with "
"frame-skip of 4 use 15 here")
parser.add_argument(
'--eps', type=float, default=0.001,
help="Epsilon value to use for random action selection during "
"evaluation")
parser.add_argument(
'--conf-update', type=str,
help="Optional JSON dictionary string to deep-update the config with")
return parser.parse_args()
def main():
args = parse_args()
conf_update = None if not args.conf_update \
else json.loads(args.conf_update)
eval_policy(
args.path, num_envs=args.num_envs, episode_count=args.episodes,
record=args.record, record_fps=args.record_fps,
render=args.render, render_fps=args.render_fps, eps=args.eps, conf_update=conf_update)
if __name__ == '__main__':
main()
|
import os
import logging
import asyncio
import sqlite3
import platform
from binascii import hexlify
from collections import defaultdict
from dataclasses import dataclass
from contextvars import ContextVar
from typing import Tuple, List, Union, Callable, Any, Awaitable, Iterable, Dict, Optional
from datetime import date
from prometheus_client import Gauge, Counter, Histogram
from lbry.utils import LockWithMetrics
from .bip32 import PubKey
from .transaction import Transaction, Output, OutputScript, TXRefImmutable, Input
from .constants import TXO_TYPES, CLAIM_TYPES
from .util import date_to_julian_day
from concurrent.futures.thread import ThreadPoolExecutor # pylint: disable=wrong-import-order
if platform.system() == 'Windows' or 'ANDROID_ARGUMENT' or 'KIVY_BUILD' in os.environ:
from concurrent.futures.thread import ThreadPoolExecutor as ReaderExecutorClass # pylint: disable=reimported
else:
from concurrent.futures.process import ProcessPoolExecutor as ReaderExecutorClass
log = logging.getLogger(__name__)
sqlite3.enable_callback_tracebacks(True)
HISTOGRAM_BUCKETS = (
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
)
@dataclass
class ReaderProcessState:
cursor: sqlite3.Cursor
reader_context: Optional[ContextVar[ReaderProcessState]] = ContextVar('reader_context')
def initializer(path):
db = sqlite3.connect(path)
db.row_factory = dict_row_factory
db.executescript("pragma journal_mode=WAL;")
reader = ReaderProcessState(db.cursor())
reader_context.set(reader)
def run_read_only_fetchall(sql, params):
cursor = reader_context.get().cursor
try:
return cursor.execute(sql, params).fetchall()
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
raise
def run_read_only_fetchone(sql, params):
cursor = reader_context.get().cursor
try:
return cursor.execute(sql, params).fetchone()
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
raise
class AIOSQLite:
reader_executor: ReaderExecutorClass
waiting_writes_metric = Gauge(
"waiting_writes_count", "Number of waiting db writes", namespace="daemon_database"
)
waiting_reads_metric = Gauge(
"waiting_reads_count", "Number of waiting db writes", namespace="daemon_database"
)
write_count_metric = Counter(
"write_count", "Number of database writes", namespace="daemon_database"
)
read_count_metric = Counter(
"read_count", "Number of database reads", namespace="daemon_database"
)
acquire_write_lock_metric = Histogram(
f'write_lock_acquired', 'Time to acquire the write lock', namespace="daemon_database", buckets=HISTOGRAM_BUCKETS
)
held_write_lock_metric = Histogram(
f'write_lock_held', 'Length of time the write lock is held for', namespace="daemon_database",
buckets=HISTOGRAM_BUCKETS
)
def __init__(self):
# has to be single threaded as there is no mapping of thread:connection
self.writer_executor = ThreadPoolExecutor(max_workers=1)
self.writer_connection: Optional[sqlite3.Connection] = None
self._closing = False
self.query_count = 0
self.write_lock = LockWithMetrics(self.acquire_write_lock_metric, self.held_write_lock_metric)
self.writers = 0
self.read_ready = asyncio.Event()
self.urgent_read_done = asyncio.Event()
@classmethod
async def connect(cls, path: Union[bytes, str], *args, **kwargs):
sqlite3.enable_callback_tracebacks(True)
db = cls()
def _connect_writer():
db.writer_connection = sqlite3.connect(path, *args, **kwargs)
readers = max(os.cpu_count() - 2, 2)
db.reader_executor = ReaderExecutorClass(
max_workers=readers, initializer=initializer, initargs=(path, )
)
await asyncio.get_event_loop().run_in_executor(db.writer_executor, _connect_writer)
db.read_ready.set()
db.urgent_read_done.set()
return db
async def close(self):
if self._closing:
return
self._closing = True
def __checkpoint_and_close(conn: sqlite3.Connection):
conn.execute("PRAGMA WAL_CHECKPOINT(FULL);")
log.info("DB checkpoint finished.")
conn.close()
await asyncio.get_event_loop().run_in_executor(
self.writer_executor, __checkpoint_and_close, self.writer_connection)
self.writer_executor.shutdown(wait=True)
self.reader_executor.shutdown(wait=True)
self.read_ready.clear()
self.writer_connection = None
def executemany(self, sql: str, params: Iterable):
params = params if params is not None else []
# this fetchall is needed to prevent SQLITE_MISUSE
return self.run(lambda conn: conn.executemany(sql, params).fetchall())
def executescript(self, script: str) -> Awaitable:
return self.run(lambda conn: conn.executescript(script))
async def _execute_fetch(self, sql: str, parameters: Iterable = None,
read_only=False, fetch_all: bool = False) -> List[dict]:
read_only_fn = run_read_only_fetchall if fetch_all else run_read_only_fetchone
parameters = parameters if parameters is not None else []
still_waiting = False
urgent_read = False
if read_only:
self.waiting_reads_metric.inc()
self.read_count_metric.inc()
try:
while self.writers and not self._closing: # more writes can come in while we are waiting for the first
if not urgent_read and still_waiting and self.urgent_read_done.is_set():
# throttle the writes if they pile up
self.urgent_read_done.clear()
urgent_read = True
# wait until the running writes have finished
await self.read_ready.wait()
still_waiting = True
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.reader_executor, read_only_fn, sql, parameters
)
finally:
if urgent_read:
# unthrottle the writers if they had to be throttled
self.urgent_read_done.set()
self.waiting_reads_metric.dec()
if fetch_all:
return await self.run(lambda conn: conn.execute(sql, parameters).fetchall())
return await self.run(lambda conn: conn.execute(sql, parameters).fetchone())
async def execute_fetchall(self, sql: str, parameters: Iterable = None,
read_only=False) -> List[dict]:
return await self._execute_fetch(sql, parameters, read_only, fetch_all=True)
async def execute_fetchone(self, sql: str, parameters: Iterable = None,
read_only=False) -> List[dict]:
return await self._execute_fetch(sql, parameters, read_only, fetch_all=False)
def execute(self, sql: str, parameters: Iterable = None) -> Awaitable[sqlite3.Cursor]:
parameters = parameters if parameters is not None else []
return self.run(lambda conn: conn.execute(sql, parameters))
async def run(self, fun, *args, **kwargs):
self.write_count_metric.inc()
self.waiting_writes_metric.inc()
# it's possible many writes are coming in one after the other, these can
# block reader calls for a long time
# if the reader waits for the writers to finish and then has to wait for
# yet more, it will clear the urgent_read_done event to block more writers
# piling on
try:
await self.urgent_read_done.wait()
except Exception as e:
self.waiting_writes_metric.dec()
raise e
self.writers += 1
# block readers
self.read_ready.clear()
try:
async with self.write_lock:
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.writer_executor, lambda: self.__run_transaction(fun, *args, **kwargs)
)
finally:
self.writers -= 1
self.waiting_writes_metric.dec()
if not self.writers:
# unblock the readers once the last enqueued writer finishes
self.read_ready.set()
def __run_transaction(self, fun: Callable[[sqlite3.Connection, Any, Any], Any], *args, **kwargs):
self.writer_connection.execute('begin')
try:
self.query_count += 1
result = fun(self.writer_connection, *args, **kwargs) # type: ignore
self.writer_connection.commit()
return result
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
self.writer_connection.rollback()
log.warning("rolled back")
raise
async def run_with_foreign_keys_disabled(self, fun, *args, **kwargs):
self.write_count_metric.inc()
self.waiting_writes_metric.inc()
try:
await self.urgent_read_done.wait()
except Exception as e:
self.waiting_writes_metric.dec()
raise e
self.writers += 1
self.read_ready.clear()
try:
async with self.write_lock:
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.writer_executor, self.__run_transaction_with_foreign_keys_disabled, fun, args, kwargs
)
finally:
self.writers -= 1
self.waiting_writes_metric.dec()
if not self.writers:
self.read_ready.set()
def __run_transaction_with_foreign_keys_disabled(self,
fun: Callable[[sqlite3.Connection, Any, Any], Any],
args, kwargs):
foreign_keys_enabled, = self.writer_connection.execute("pragma foreign_keys").fetchone()
if not foreign_keys_enabled:
raise sqlite3.IntegrityError("foreign keys are disabled, use `AIOSQLite.run` instead")
try:
self.writer_connection.execute('pragma foreign_keys=off').fetchone()
return self.__run_transaction(fun, *args, **kwargs)
finally:
self.writer_connection.execute('pragma foreign_keys=on').fetchone()
def constraints_to_sql(constraints, joiner=' AND ', prepend_key=''):
sql, values = [], {}
for key, constraint in constraints.items():
tag = '0'
if '#' in key:
key, tag = key[:key.index('#')], key[key.index('#')+1:]
col, op, key = key, '=', key.replace('.', '_')
if not key:
sql.append(constraint)
continue
if key.startswith('$$'):
col, key = col[2:], key[1:]
elif key.startswith('$'):
values[key] = constraint
continue
if key.endswith('__not'):
col, op = col[:-len('__not')], '!='
elif key.endswith('__is_null'):
col = col[:-len('__is_null')]
sql.append(f'{col} IS NULL')
continue
if key.endswith('__is_not_null'):
col = col[:-len('__is_not_null')]
sql.append(f'{col} IS NOT NULL')
continue
if key.endswith('__lt'):
col, op = col[:-len('__lt')], '<'
elif key.endswith('__lte'):
col, op = col[:-len('__lte')], '<='
elif key.endswith('__gt'):
col, op = col[:-len('__gt')], '>'
elif key.endswith('__gte'):
col, op = col[:-len('__gte')], '>='
elif key.endswith('__like'):
col, op = col[:-len('__like')], 'LIKE'
elif key.endswith('__not_like'):
col, op = col[:-len('__not_like')], 'NOT LIKE'
elif key.endswith('__in') or key.endswith('__not_in'):
if key.endswith('__in'):
col, op, one_val_op = col[:-len('__in')], 'IN', '='
else:
col, op, one_val_op = col[:-len('__not_in')], 'NOT IN', '!='
if constraint:
if isinstance(constraint, (list, set, tuple)):
if len(constraint) == 1:
values[f'{key}{tag}'] = next(iter(constraint))
sql.append(f'{col} {one_val_op} :{key}{tag}')
else:
keys = []
for i, val in enumerate(constraint):
keys.append(f':{key}{tag}_{i}')
values[f'{key}{tag}_{i}'] = val
sql.append(f'{col} {op} ({', '.join(keys)})')
elif isinstance(constraint, str):
sql.append(f'{col} {op} ({constraint})')
else:
raise ValueError(f"{col} requires a list, set or string as constraint value.")
continue
elif key.endswith('__any') or key.endswith('__or'):
where, subvalues = constraints_to_sql(constraint, ' OR ', key+tag+'_')
sql.append(f'({where})')
values.update(subvalues)
continue
if key.endswith('__and'):
where, subvalues = constraints_to_sql(constraint, ' AND ', key+tag+'_')
sql.append(f'({where})')
values.update(subvalues)
continue
sql.append(f'{col} {op} :{prepend_key}{key}{tag}')
values[prepend_key+key+tag] = constraint
return joiner.join(sql) if sql else '', values
def query(select, **constraints) -> Tuple[str, Dict[str, Any]]:
sql = [select]
limit = constraints.pop('limit', None)
offset = constraints.pop('offset', None)
order_by = constraints.pop('order_by', None)
group_by = constraints.pop('group_by', None)
accounts = constraints.pop('accounts', [])
if accounts:
constraints['account__in'] = [a.public_key.address for a in accounts]
where, values = constraints_to_sql(constraints)
if where:
sql.append('WHERE')
sql.append(where)
if group_by is not None:
sql.append(f'GROUP BY {group_by}')
if order_by:
sql.append('ORDER BY')
if isinstance(order_by, str):
sql.append(order_by)
elif isinstance(order_by, list):
sql.append(', '.join(order_by))
else:
raise ValueError("order_by must be string or list")
if limit is not None:
sql.append(f'LIMIT {limit}')
if offset is not None:
sql.append(f'OFFSET {offset}')
return ' '.join(sql), values
def interpolate(sql, values):
for k in sorted(values.keys(), reverse=True):
value = values[k]
if isinstance(value, bytes):
value = f"X'{hexlify(value).decode()}'"
elif isinstance(value, str):
value = f"'{value}'"
else:
value = str(value)
sql = sql.replace(f":{k}", value)
return sql
def constrain_single_or_list(constraints, column, value, convert=lambda x: x, negate=False):
if value is not None:
if isinstance(value, list):
value = [convert(v) for v in value]
if len(value) == 1:
if negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not": value[0]
}
else:
constraints[column] = value[0]
elif len(value) > 1:
if negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not_in": value
}
else:
constraints[f"{column}__in"] = value
elif negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not": convert(value)
}
else:
constraints[column] = convert(value)
return constraints
class SQLiteMixin:
SCHEMA_VERSION: Optional[str] = None
CREATE_TABLES_QUERY: str
MAX_QUERY_VARIABLES = 900
CREATE_VERSION_TABLE = """
create table if not exists version (
version text
);
"""
def __init__(self, path):
self._db_path = path
self.db: AIOSQLite = None
self.ledger = None
async def open(self):
log.info("connecting to database: %s", self._db_path)
self.db = await AIOSQLite.connect(self._db_path, isolation_level=None)
if self.SCHEMA_VERSION:
tables = [t[0] for t in await self.db.execute_fetchall(
"SELECT name FROM sqlite_master WHERE type='table';"
)]
if tables:
if 'version' in tables:
version = await self.db.execute_fetchone("SELECT version FROM version LIMIT 1;")
if version == (self.SCHEMA_VERSION,):
return
await self.db.executescript('\n'.join(
f"DROP TABLE {table};" for table in tables
) + '\n' + 'PRAGMA WAL_CHECKPOINT(FULL);' + '\n' + 'VACUUM;')
await self.db.execute(self.CREATE_VERSION_TABLE)
await self.db.execute("INSERT INTO version VALUES (?)", (self.SCHEMA_VERSION,))
await self.db.executescript(self.CREATE_TABLES_QUERY)
async def close(self):
await self.db.close()
@staticmethod
def _insert_sql(table: str, data: dict, ignore_duplicate: bool = False,
replace: bool = False) -> Tuple[str, List]:
columns, values = [], []
for column, value in data.items():
columns.append(column)
values.append(value)
policy = ""
if ignore_duplicate:
policy = " OR IGNORE"
if replace:
policy = " OR REPLACE"
sql = "INSERT{} INTO {} ({}) VALUES ({})".format(
policy, table, ', '.join(columns), ', '.join(['?'] * len(values))
)
return sql, values
@staticmethod
def _update_sql(table: str, data: dict, where: str,
constraints: Union[list, tuple]) -> Tuple[str, list]:
columns, values = [], []
for column, value in data.items():
columns.append(f"{column} = ?")
values.append(value)
values.extend(constraints)
sql = "UPDATE {} SET {} WHERE {}".format(
table, ', '.join(columns), where
)
return sql, values
def dict_row_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
SQLITE_MAX_INTEGER = 9223372036854775807
def _get_spendable_utxos(transaction: sqlite3.Connection, accounts: List, decoded_transactions: Dict[str, Transaction],
result: Dict[Tuple[bytes, int, bool], List[int]], reserved: List[Transaction],
amount_to_reserve: int, reserved_amount: int, floor: int, ceiling: int,
fee_per_byte: int) -> int:
accounts_fmt = ",".join(["?"] * len(accounts))
txo_query = f"""
SELECT tx.txid, txo.txoid, tx.raw, tx.height, txo.position as nout, tx.is_verified, txo.amount FROM txo
INNER JOIN account_address USING (address)
LEFT JOIN txi USING (txoid)
INNER JOIN tx USING (txid)
WHERE txo.txo_type=0 AND txi.txoid IS NULL AND tx.txid IS NOT NULL AND NOT txo.is_reserved
AND txo.amount >= ? AND txo.amount < ?
"""
if accounts:
txo_query += f"""
AND account_address.account {'= ?' if len(accounts_fmt) == 1 else 'IN (' + accounts_fmt + ')'}
"""
txo_query += """
ORDER BY txo.amount ASC, tx.height DESC
"""
# prefer confirmed, but save unconfirmed utxos from this selection in case they are needed
unconfirmed = []
for row in transaction.execute(txo_query, (floor, ceiling, *accounts)):
(txid, txoid, raw, height, nout, verified, amount) = row.values()
# verified or non verified transactions were found- reset the gap count
# multiple txos can come from the same tx, only decode it once and cache
if txid not in decoded_transactions:
# cache the decoded transaction
decoded_transactions[txid] = Transaction(raw)
decoded_tx = decoded_transactions[txid]
# save the unconfirmed txo for possible use later, if still needed
if verified:
# add the txo to the reservation, minus the fee for including it
reserved_amount += amount
reserved_amount -= Input.spend(decoded_tx.outputs[nout]).size * fee_per_byte
# mark it as reserved
result[(raw, height, verified)].append(nout)
reserved.append(txoid)
# if we've reserved enough, return
if reserved_amount >= amount_to_reserve:
return reserved_amount
else:
unconfirmed.append((txid, txoid, raw, height, nout, verified, amount))
# we're popping the items, so to get them in the order they were seen they are reversed
unconfirmed.reverse()
# add available unconfirmed txos if any were previously found
while unconfirmed and reserved_amount < amount_to_reserve:
(txid, txoid, raw, height, nout, verified, amount) = unconfirmed.pop()
# it's already decoded
decoded_tx = decoded_transactions[txid]
# add to the reserved amount
reserved_amount += amount
reserved_amount -= Input.spend(decoded_tx.outputs[nout]).size * fee_per_byte
result[(raw, height, verified)].append(nout)
reserved.append(txoid)
return reserved_amount
def get_and_reserve_spendable_utxos(transaction: sqlite3.Connection, accounts: List, amount_to_reserve: int, floor: int,
fee_per_byte: int, set_reserved: bool, return_insufficient_funds: bool,
base_multiplier: int = 100):
txs = defaultdict(list)
decoded_transactions = {}
reserved = []
reserved_dewies = 0
multiplier = base_multiplier
gap_count = 0
while reserved_dewies < amount_to_reserve and gap_count < 5 and floor * multiplier < SQLITE_MAX_INTEGER:
previous_reserved_dewies = reserved_dewies
reserved_dewies = _get_spendable_utxos(
transaction, accounts, decoded_transactions, txs, reserved, amount_to_reserve, reserved_dewies,
floor, floor * multiplier, fee_per_byte
)
floor *= multiplier
if previous_reserved_dewies == reserved_dewies:
gap_count += 1
multiplier **= 2
else:
gap_count = 0
multiplier = base_multiplier
# reserve the accumulated txos if enough were found
if reserved_dewies >= amount_to_reserve:
if set_reserved:
transaction.executemany("UPDATE txo SET is_reserved = ? WHERE txoid = ?",
[(True, txoid) for txoid in reserved]).fetchall()
return txs
# return_insufficient_funds and set_reserved are used for testing
return txs if return_insufficient_funds else {}
class Database(SQLiteMixin):
SCHEMA_VERSION = "1.5"
PRAGMAS = """
pragma journal_mode=WAL;
"""
CREATE_ACCOUNT_TABLE = """
create table if not exists account_address (
account text not null,
address text not null,
chain integer not null,
pubkey blob not null,
chain_code blob not null,
n integer not null,
depth integer not null,
primary key (account, address)
);
create index if not exists address_account_idx on account_address (address, account);
"""
CREATE_PUBKEY_ADDRESS_TABLE = """
create table if not exists pubkey_address (
address text primary key,
history text,
used_times integer not null default 0
);
"""
CREATE_TX_TABLE = """
create table if not exists tx (
txid text primary key,
raw blob not null,
height integer not null,
position integer not null,
is_verified boolean not null default 0,
purchased_claim_id text,
day integer
);
create index if not exists tx_purchased_claim_id_idx on tx (purchased_claim_id);
"""
CREATE_TXO_TABLE = """
create table if not exists txo (
txid text references tx,
txoid text primary key,
address text references pubkey_address,
position integer not null,
amount integer not null,
script blob not null,
is_reserved boolean not null default 0,
txo_type integer not null default 0,
claim_id text,
claim_name text,
channel_id text,
reposted_claim_id text
);
create index if not exists txo_txid_idx on txo (txid);
create index if not exists txo_address_idx on txo (address);
create index if not exists txo_claim_id_idx on txo (claim_id, txo_type);
create index if not exists txo_claim_name_idx on txo (claim_name);
create index if not exists txo_txo_type_idx on txo (txo_type);
create index if not exists txo_channel_id_idx on txo (channel_id);
create index if not exists txo_reposted_claim_idx on txo (reposted_claim_id);
"""
CREATE_TXI_TABLE = """
create table if not exists txi (
txid text references tx,
txoid text references txo primary key,
address text references pubkey_address,
position integer not null
);
create index if not exists txi_address_idx on txi (address);
create index if not exists first_input_idx on txi (txid, address) where position=0;
"""
CREATE_TABLES_QUERY = (
PRAGMAS +
CREATE_ACCOUNT_TABLE +
CREATE_PUBKEY_ADDRESS_TABLE +
CREATE_TX_TABLE +
CREATE_TXO_TABLE +
CREATE_TXI_TABLE
)
async def open(self):
await super().open()
self.db.writer_connection.row_factory = dict_row_factory
def txo_to_row(self, tx, txo):
row = {
'txid': tx.id,
'txoid': txo.id,
'address': txo.get_address(self.ledger),
'position': txo.position,
'amount': txo.amount,
'script': sqlite3.Binary(txo.script.source)
}
if txo.is_claim:
if txo.can_decode_claim:
claim = txo.claim
row['txo_type'] = TXO_TYPES.get(claim.claim_type, TXO_TYPES['stream'])
if claim.is_repost:
row['reposted_claim_id'] = claim.repost.reference.claim_id
if claim.is_signed:
row['channel_id'] = claim.signing_channel_id
else:
row['txo_type'] = TXO_TYPES['stream']
elif txo.is_support:
row['txo_type'] = TXO_TYPES['support']
elif txo.purchase is not None:
row['txo_type'] = TXO_TYPES['purchase']
row['claim_id'] = txo.purchased_claim_id
if txo.script.is_claim_involved:
row['claim_id'] = txo.claim_id
row['claim_name'] = txo.claim_name
return row
def tx_to_row(self, tx):
row = {
'txid': tx.id,
'raw': sqlite3.Binary(tx.raw),
'height': tx.height,
'position': tx.position,
'is_verified': tx.is_verified,
'day': tx.get_julian_day(self.ledger),
}
txos = tx.outputs
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
txos[0].purchase = txos[1]
row['purchased_claim_id'] = txos[1].purchase_data.claim_id
return row
async def insert_transaction(self, tx):
await self.db.execute_fetchall(*self._insert_sql('tx', self.tx_to_row(tx)))
async def update_transaction(self, tx):
await self.db.execute_fetchall(*self._update_sql("tx", {
'height': tx.height, 'position': tx.position, 'is_verified': tx.is_verified
}, 'txid = ?', (tx.id,)))
def _transaction_io(self, conn: sqlite3.Connection, tx: Transaction, address, txhash):
conn.execute(*self._insert_sql('tx', self.tx_to_row(tx), replace=True)).fetchall()
is_my_input = False
for txi in tx.inputs:
if txi.txo_ref.txo is not None:
txo = txi.txo_ref.txo
if txo.has_address and txo.get_address(self.ledger) == address:
is_my_input = True
conn.execute(*self._insert_sql("txi", {
'txid': tx.id,
'txoid': txo.id,
'address': address,
'position': txi.position
}, ignore_duplicate=True)).fetchall()
for txo in tx.outputs:
if txo.script.is_pay_pubkey_hash and (txo.pubkey_hash == txhash or is_my_input):
conn.execute(*self._insert_sql(
"txo", self.txo_to_row(tx, txo), ignore_duplicate=True
)).fetchall()
elif txo.script.is_pay_script_hash:
# TODO: implement script hash payments
log.warning('Database.save_transaction_io: pay script hash is not implemented!')
def save_transaction_io(self, tx: Transaction, address, txhash, history):
return self.save_transaction_io_batch([tx], address, txhash, history)
def save_transaction_io_batch(self, txs: Iterable[Transaction], address, txhash, history):
history_count = history.count(':') // 2
def __many(conn):
for tx in txs:
self._transaction_io(conn, tx, address, txhash)
conn.execute(
"UPDATE pubkey_address SET history = ?, used_times = ? WHERE address = ?",
(history, history_count, address)
).fetchall()
return self.db.run(__many)
async def reserve_outputs(self, txos, is_reserved=True):
txoids = [(is_reserved, txo.id) for txo in txos]
await self.db.executemany("UPDATE txo SET is_reserved = ? WHERE txoid = ?", txoids)
async def release_outputs(self, txos):
await self.reserve_outputs(txos, is_reserved=False)
async def rewind_blockchain(self, above_height): # pylint: disable=no-self-use
# TODO:
# 1. delete transactions above_height
# 2. update address histories removing deleted TXs
return True
async def get_spendable_utxos(self, ledger, reserve_amount, accounts: Optional[Iterable], min_amount: int = 1,
fee_per_byte: int = 50, set_reserved: bool = True,
return_insufficient_funds: bool = False) -> List:
to_spend = await self.db.run(
get_and_reserve_spendable_utxos, tuple(account.id for account in accounts), reserve_amount, min_amount,
fee_per_byte, set_reserved, return_insufficient_funds
)
txos = []
for (raw, height, verified), positions in to_spend.items():
tx = Transaction(raw, height=height, is_verified=verified)
for nout in positions:
txos.append(tx.outputs[nout].get_estimator(ledger))
return txos
async def select_transactions(self, cols, accounts=None, read_only=False, **constraints):
if not {'txid', 'txid__in'}.intersection(constraints):
assert accounts, "'accounts' argument required when no 'txid' constraint is present"
where, values = constraints_to_sql({
'$$account_address.account__in': [a.public_key.address for a in accounts]
})
constraints['txid__in'] = f"""
SELECT txo.txid FROM txo JOIN account_address USING (address) WHERE {where}
UNION
SELECT txi.txid FROM txi JOIN account_address USING (address) WHERE {where}
"""
constraints.update(values)
return await self.db.execute_fetchall(
*query(f"SELECT {cols} FROM tx", **constraints), read_only=read_only
)
TXO_NOT_MINE = Output(None, None, is_my_output=False)
async def get_transactions(self, wallet=None, **constraints):
include_is_spent = constraints.pop('include_is_spent', False)
include_is_my_input = constraints.pop('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False)
tx_rows = await self.select_transactions(
'txid, raw, height, position, is_verified',
order_by=constraints.pop('order_by', ["height=0 DESC", "height DESC", "position DESC"]),
**constraints
)
if not tx_rows:
return []
txids, txs, txi_txoids = [], [], []
for row in tx_rows:
txids.append(row['txid'])
txs.append(Transaction(
raw=row['raw'], height=row['height'], position=row['position'],
is_verified=bool(row['is_verified'])
))
for txi in txs[-1].inputs:
txi_txoids.append(txi.txo_ref.id)
step = self.MAX_QUERY_VARIABLES
annotated_txos = {}
for offset in range(0, len(txids), step):
annotated_txos.update({
txo.id: txo for txo in
(await self.get_txos(
wallet=wallet,
txid__in=txids[offset:offset+step], order_by='txo.txid',
include_is_spent=include_is_spent,
include_is_my_input=include_is_my_input,
include_is_my_output=include_is_my_output,
))
})
referenced_txos = {}
for offset in range(0, len(txi_txoids), step):
referenced_txos.update({
txo.id: txo for txo in
(await self.get_txos(
wallet=wallet,
txoid__in=txi_txoids[offset:offset+step], order_by='txo.txoid',
include_is_my_output=include_is_my_output,
))
})
for tx in txs:
for txi in tx.inputs:
txo = referenced_txos.get(txi.txo_ref.id)
if txo:
txi.txo_ref = txo.ref
for txo in tx.outputs:
_txo = annotated_txos.get(txo.id)
if _txo:
txo.update_annotations(_txo)
else:
txo.update_annotations(self.TXO_NOT_MINE)
for tx in txs:
txos = tx.outputs
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
txos[0].purchase = txos[1]
return txs
async def get_transaction_count(self, **constraints):
constraints.pop('wallet', None)
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
count = await self.select_transactions('COUNT(*) as total', **constraints)
return count[0]['total'] or 0
async def get_transaction(self, **constraints):
txs = await self.get_transactions(limit=1, **constraints)
if txs:
return txs[0]
async def select_txos(
self, cols, accounts=None, is_my_input=None, is_my_output=True,
is_my_input_or_output=None, exclude_internal_transfers=False,
include_is_spent=False, include_is_my_input=False,
is_spent=None, read_only=False, **constraints):
for rename_col in ('txid', 'txoid'):
for rename_constraint in (rename_col, rename_col+'__in', rename_col+'__not_in'):
if rename_constraint in constraints:
constraints['txo.'+rename_constraint] = constraints.pop(rename_constraint)
if accounts:
account_in_sql, values = constraints_to_sql({
'$$account__in': [a.public_key.address for a in accounts]
})
my_addresses = f"SELECT address FROM account_address WHERE {account_in_sql}"
constraints.update(values)
if is_my_input_or_output:
include_is_my_input = True
constraints['received_or_sent__or'] = {
'txo.address__in': my_addresses,
'sent__and': {
'txi.address__is_not_null': True,
'txi.address__in': my_addresses
}
}
else:
if is_my_output:
constraints['txo.address__in'] = my_addresses
elif is_my_output is False:
constraints['txo.address__not_in'] = my_addresses
if is_my_input:
include_is_my_input = True
constraints['txi.address__is_not_null'] = True
constraints['txi.address__in'] = my_addresses
elif is_my_input is False:
include_is_my_input = True
constraints['is_my_input_false__or'] = {
'txi.address__is_null': True,
'txi.address__not_in': my_addresses
}
if exclude_internal_transfers:
include_is_my_input = True
constraints['exclude_internal_payments__or'] = {
'txo.txo_type__not': TXO_TYPES['other'],
'txo.address__not_in': my_addresses,
'txi.address__is_null': True,
'txi.address__not_in': my_addresses,
}
sql = [f"SELECT {cols} FROM txo JOIN tx ON (tx.txid=txo.txid)"]
if is_spent:
constraints['spent.txoid__is_not_null'] = True
elif is_spent is False:
constraints['is_reserved'] = False
constraints['spent.txoid__is_null'] = True
if include_is_spent or is_spent is not None:
sql.append("LEFT JOIN txi AS spent ON (spent.txoid=txo.txoid)")
if include_is_my_input:
sql.append("LEFT JOIN txi ON (txi.position=0 AND txi.txid=txo.txid)")
return await self.db.execute_fetchall(*query(' '.join(sql), **constraints), read_only=read_only)
async def get_txos(self, wallet=None, no_tx=False, read_only=False, **constraints):
include_is_spent = constraints.get('include_is_spent', False)
include_is_my_input = constraints.get('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False)
include_received_tips = constraints.pop('include_received_tips', False)
select_columns = [
"tx.txid, raw, tx.height, tx.position as tx_position, tx.is_verified, "
"txo_type, txo.position as txo_position, amount, script"
]
my_accounts = {a.public_key.address for a in wallet.accounts} if wallet else set()
my_accounts_sql = ""
if include_is_my_output or include_is_my_input:
my_accounts_sql, values = constraints_to_sql({'$$account__in#_wallet': my_accounts})
constraints.update(values)
if include_is_my_output and my_accounts:
if constraints.get('is_my_output', None) in (True, False):
select_columns.append(f"{1 if constraints["is_my_output"] else 0} AS is_my_output")
else:
select_columns.append(f"""(
txo.address IN (SELECT address FROM account_address WHERE {my_accounts_sql})
) AS is_my_output""")
if include_is_my_input and my_accounts:
if constraints.get('is_my_input', None) in (True, False):
select_columns.append(f"{1 if constraints["is_my_input"] else 0} AS is_my_input")
else:
select_columns.append(f"""(
txi.address IS NOT NULL AND
txi.address IN (SELECT address FROM account_address WHERE {my_accounts_sql})
) AS is_my_input""")
if include_is_spent:
select_columns.append("spent.txoid IS NOT NULL AS is_spent")
if include_received_tips:
select_columns.append(f"""(
SELECT COALESCE(SUM(support.amount), 0) FROM txo AS support WHERE
support.claim_id = txo.claim_id AND
support.txo_type = {TXO_TYPES['support']} AND
support.address IN (SELECT address FROM account_address WHERE {my_accounts_sql}) AND
support.txoid NOT IN (SELECT txoid FROM txi)
) AS received_tips""")
if 'order_by' not in constraints or constraints['order_by'] == 'height':
constraints['order_by'] = [
"tx.height=0 DESC", "tx.height DESC", "tx.position DESC", "txo.position"
]
elif constraints.get('order_by', None) == 'none':
del constraints['order_by']
rows = await self.select_txos(', '.join(select_columns), read_only=read_only, **constraints)
txos = []
txs = {}
for row in rows:
if no_tx:
txo = Output(
amount=row['amount'],
script=OutputScript(row['script']),
tx_ref=TXRefImmutable.from_id(row['txid'], row['height']),
position=row['txo_position']
)
else:
if row['txid'] not in txs:
txs[row['txid']] = Transaction(
row['raw'], height=row['height'], position=row['tx_position'],
is_verified=bool(row['is_verified'])
)
txo = txs[row['txid']].outputs[row['txo_position']]
if include_is_spent:
txo.is_spent = bool(row['is_spent'])
if include_is_my_input:
txo.is_my_input = bool(row['is_my_input'])
if include_is_my_output:
txo.is_my_output = bool(row['is_my_output'])
if include_is_my_input and include_is_my_output:
if txo.is_my_input and txo.is_my_output and row['txo_type'] == TXO_TYPES['other']:
txo.is_internal_transfer = True
else:
txo.is_internal_transfer = False
if include_received_tips:
txo.received_tips = row['received_tips']
txos.append(txo)
channel_ids = set()
for txo in txos:
if txo.is_claim and txo.can_decode_claim:
if txo.claim.is_signed:
channel_ids.add(txo.claim.signing_channel_id)
if txo.claim.is_channel and wallet:
for account in wallet.accounts:
private_key = await account.get_channel_private_key(
txo.claim.channel.public_key_bytes
)
if private_key:
txo.private_key = private_key
break
if channel_ids:
channels = {
txo.claim_id: txo for txo in
(await self.get_channels(
wallet=wallet,
claim_id__in=channel_ids,
read_only=read_only
))
}
for txo in txos:
if txo.is_claim and txo.can_decode_claim:
txo.channel = channels.get(txo.claim.signing_channel_id, None)
return txos
@staticmethod
def _clean_txo_constraints_for_aggregation(constraints):
constraints.pop('include_is_spent', None)
constraints.pop('include_is_my_input', None)
constraints.pop('include_is_my_output', None)
constraints.pop('include_received_tips', None)
constraints.pop('wallet', None)
constraints.pop('resolve', None)
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
async def get_txo_count(self, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
count = await self.select_txos('COUNT(*) AS total', **constraints)
return count[0]['total'] or 0
async def get_txo_sum(self, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
result = await self.select_txos('SUM(amount) AS total', **constraints)
return result[0]['total'] or 0
async def get_txo_plot(self, start_day=None, days_back=0, end_day=None, days_after=None, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
if start_day is None:
constraints['day__gte'] = self.ledger.headers.estimated_julian_day(
self.ledger.headers.height
) - days_back
else:
constraints['day__gte'] = date_to_julian_day(
date.fromisoformat(start_day)
)
if end_day is not None:
constraints['day__lte'] = date_to_julian_day(
date.fromisoformat(end_day)
)
elif days_after is not None:
constraints['day__lte'] = constraints['day__gte'] + days_after
return await self.select_txos(
"DATE(day) AS day, SUM(amount) AS total",
group_by='day', order_by='day', **constraints
)
def get_utxos(self, read_only=False, **constraints):
return self.get_txos(is_spent=False, read_only=read_only, **constraints)
def get_utxo_count(self, **constraints):
return self.get_txo_count(is_spent=False, **constraints)
async def get_balance(self, wallet=None, accounts=None, read_only=False, **constraints):
assert wallet or accounts, \
"'wallet' or 'accounts' constraints required to calculate balance"
constraints['accounts'] = accounts or wallet.accounts
balance = await self.select_txos(
'SUM(amount) as total', is_spent=False, read_only=read_only, **constraints
)
return balance[0]['total'] or 0
async def select_addresses(self, cols, read_only=False, **constraints):
return await self.db.execute_fetchall(*query(
f"SELECT {cols} FROM pubkey_address JOIN account_address USING (address)",
**constraints
), read_only=read_only)
async def get_addresses(self, cols=None, read_only=False, **constraints):
cols = cols or (
'address', 'account', 'chain', 'history', 'used_times',
'pubkey', 'chain_code', 'n', 'depth'
)
addresses = await self.select_addresses(', '.join(cols), read_only=read_only, **constraints)
if 'pubkey' in cols:
for address in addresses:
address['pubkey'] = PubKey(
self.ledger, address.pop('pubkey'), address.pop('chain_code'),
address.pop('n'), address.pop('depth')
)
return addresses
async def get_address_count(self, cols=None, read_only=False, **constraints):
count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints)
return count[0]['total'] or 0
async def get_address(self, read_only=False, **constraints):
addresses = await self.get_addresses(read_only=read_only, limit=1, **constraints)
if addresses:
return addresses[0]
async def add_keys(self, account, chain, pubkeys):
await self.db.executemany(
"insert or ignore into account_address "
"(account, address, chain, pubkey, chain_code, n, depth) values "
"(?, ?, ?, ?, ?, ?, ?)", ((
account.id, k.address, chain,
sqlite3.Binary(k.pubkey_bytes),
sqlite3.Binary(k.chain_code),
k.n, k.depth
) for k in pubkeys)
)
await self.db.executemany(
"insert or ignore into pubkey_address (address) values (?)",
((pubkey.address,) for pubkey in pubkeys)
)
async def _set_address_history(self, address, history):
await self.db.execute_fetchall(
"UPDATE pubkey_address SET history = ?, used_times = ? WHERE address = ?",
(history, history.count(':')//2, address)
)
async def set_address_history(self, address, history):
await self._set_address_history(address, history)
@staticmethod
def constrain_purchases(constraints):
accounts = constraints.pop('accounts', None)
assert accounts, "'accounts' argument required to find purchases"
if not {'purchased_claim_id', 'purchased_claim_id__in'}.intersection(constraints):
constraints['purchased_claim_id__is_not_null'] = True
constraints.update({
f'$account{i}': a.public_key.address for i, a in enumerate(accounts)
})
account_values = ', '.join([f':$account{i}' for i in range(len(accounts))])
constraints['txid__in'] = f"""
SELECT txid FROM txi JOIN account_address USING (address)
WHERE account_address.account IN ({account_values})
"""
async def get_purchases(self, **constraints):
self.constrain_purchases(constraints)
return [tx.outputs[0] for tx in await self.get_transactions(**constraints)]
def get_purchase_count(self, **constraints):
self.constrain_purchases(constraints)
return self.get_transaction_count(**constraints)
@staticmethod
def constrain_claims(constraints):
if {'txo_type', 'txo_type__in'}.intersection(constraints):
return
claim_types = constraints.pop('claim_type', None)
if claim_types:
constrain_single_or_list(
constraints, 'txo_type', claim_types, lambda x: TXO_TYPES[x]
)
else:
constraints['txo_type__in'] = CLAIM_TYPES
async def get_claims(self, read_only=False, **constraints) -> List[Output]:
self.constrain_claims(constraints)
return await self.get_utxos(read_only=read_only, **constraints)
def get_claim_count(self, **constraints):
self.constrain_claims(constraints)
return self.get_utxo_count(**constraints)
@staticmethod
def constrain_streams(constraints):
constraints['txo_type'] = TXO_TYPES['stream']
def get_streams(self, read_only=False, **constraints):
self.constrain_streams(constraints)
return self.get_claims(read_only=read_only, **constraints)
def get_stream_count(self, **constraints):
self.constrain_streams(constraints)
return self.get_claim_count(**constraints)
@staticmethod
def constrain_channels(constraints):
constraints['txo_type'] = TXO_TYPES['channel']
def get_channels(self, **constraints):
self.constrain_channels(constraints)
return self.get_claims(**constraints)
def get_channel_count(self, **constraints):
self.constrain_channels(constraints)
return self.get_claim_count(**constraints)
@staticmethod
def constrain_supports(constraints):
constraints['txo_type'] = TXO_TYPES['support']
def get_supports(self, **constraints):
self.constrain_supports(constraints)
return self.get_utxos(**constraints)
def get_support_count(self, **constraints):
self.constrain_supports(constraints)
return self.get_utxo_count(**constraints)
@staticmethod
def constrain_collections(constraints):
constraints['txo_type'] = TXO_TYPES['collection']
def get_collections(self, **constraints):
self.constrain_collections(constraints)
return self.get_utxos(**constraints)
def get_collection_count(self, **constraints):
self.constrain_collections(constraints)
return self.get_utxo_count(**constraints)
async def release_all_outputs(self, account=None):
if account is None:
await self.db.execute_fetchall("UPDATE txo SET is_reserved = 0 WHERE is_reserved = 1")
else:
await self.db.execute_fetchall(
"UPDATE txo SET is_reserved = 0 WHERE"
" is_reserved = 1 AND txo.address IN ("
" SELECT address from account_address WHERE account = ?"
" )", (account.public_key.address, )
)
def get_supports_summary(self, read_only=False, **constraints):
return self.get_txos(
txo_type=TXO_TYPES['support'],
is_spent=False, is_my_output=True,
include_is_my_input=True,
no_tx=True, read_only=read_only,
**constraints
)
| import os
import logging
import asyncio
import sqlite3
import platform
from binascii import hexlify
from collections import defaultdict
from dataclasses import dataclass
from contextvars import ContextVar
from typing import Tuple, List, Union, Callable, Any, Awaitable, Iterable, Dict, Optional
from datetime import date
from prometheus_client import Gauge, Counter, Histogram
from lbry.utils import LockWithMetrics
from .bip32 import PubKey
from .transaction import Transaction, Output, OutputScript, TXRefImmutable, Input
from .constants import TXO_TYPES, CLAIM_TYPES
from .util import date_to_julian_day
from concurrent.futures.thread import ThreadPoolExecutor # pylint: disable=wrong-import-order
if platform.system() == 'Windows' or 'ANDROID_ARGUMENT' or 'KIVY_BUILD' in os.environ:
from concurrent.futures.thread import ThreadPoolExecutor as ReaderExecutorClass # pylint: disable=reimported
else:
from concurrent.futures.process import ProcessPoolExecutor as ReaderExecutorClass
log = logging.getLogger(__name__)
sqlite3.enable_callback_tracebacks(True)
HISTOGRAM_BUCKETS = (
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
)
@dataclass
class ReaderProcessState:
cursor: sqlite3.Cursor
reader_context: Optional[ContextVar[ReaderProcessState]] = ContextVar('reader_context')
def initializer(path):
db = sqlite3.connect(path)
db.row_factory = dict_row_factory
db.executescript("pragma journal_mode=WAL;")
reader = ReaderProcessState(db.cursor())
reader_context.set(reader)
def run_read_only_fetchall(sql, params):
cursor = reader_context.get().cursor
try:
return cursor.execute(sql, params).fetchall()
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
raise
def run_read_only_fetchone(sql, params):
cursor = reader_context.get().cursor
try:
return cursor.execute(sql, params).fetchone()
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
raise
class AIOSQLite:
reader_executor: ReaderExecutorClass
waiting_writes_metric = Gauge(
"waiting_writes_count", "Number of waiting db writes", namespace="daemon_database"
)
waiting_reads_metric = Gauge(
"waiting_reads_count", "Number of waiting db writes", namespace="daemon_database"
)
write_count_metric = Counter(
"write_count", "Number of database writes", namespace="daemon_database"
)
read_count_metric = Counter(
"read_count", "Number of database reads", namespace="daemon_database"
)
acquire_write_lock_metric = Histogram(
f'write_lock_acquired', 'Time to acquire the write lock', namespace="daemon_database", buckets=HISTOGRAM_BUCKETS
)
held_write_lock_metric = Histogram(
f'write_lock_held', 'Length of time the write lock is held for', namespace="daemon_database",
buckets=HISTOGRAM_BUCKETS
)
def __init__(self):
# has to be single threaded as there is no mapping of thread:connection
self.writer_executor = ThreadPoolExecutor(max_workers=1)
self.writer_connection: Optional[sqlite3.Connection] = None
self._closing = False
self.query_count = 0
self.write_lock = LockWithMetrics(self.acquire_write_lock_metric, self.held_write_lock_metric)
self.writers = 0
self.read_ready = asyncio.Event()
self.urgent_read_done = asyncio.Event()
@classmethod
async def connect(cls, path: Union[bytes, str], *args, **kwargs):
sqlite3.enable_callback_tracebacks(True)
db = cls()
def _connect_writer():
db.writer_connection = sqlite3.connect(path, *args, **kwargs)
readers = max(os.cpu_count() - 2, 2)
db.reader_executor = ReaderExecutorClass(
max_workers=readers, initializer=initializer, initargs=(path, )
)
await asyncio.get_event_loop().run_in_executor(db.writer_executor, _connect_writer)
db.read_ready.set()
db.urgent_read_done.set()
return db
async def close(self):
if self._closing:
return
self._closing = True
def __checkpoint_and_close(conn: sqlite3.Connection):
conn.execute("PRAGMA WAL_CHECKPOINT(FULL);")
log.info("DB checkpoint finished.")
conn.close()
await asyncio.get_event_loop().run_in_executor(
self.writer_executor, __checkpoint_and_close, self.writer_connection)
self.writer_executor.shutdown(wait=True)
self.reader_executor.shutdown(wait=True)
self.read_ready.clear()
self.writer_connection = None
def executemany(self, sql: str, params: Iterable):
params = params if params is not None else []
# this fetchall is needed to prevent SQLITE_MISUSE
return self.run(lambda conn: conn.executemany(sql, params).fetchall())
def executescript(self, script: str) -> Awaitable:
return self.run(lambda conn: conn.executescript(script))
async def _execute_fetch(self, sql: str, parameters: Iterable = None,
read_only=False, fetch_all: bool = False) -> List[dict]:
read_only_fn = run_read_only_fetchall if fetch_all else run_read_only_fetchone
parameters = parameters if parameters is not None else []
still_waiting = False
urgent_read = False
if read_only:
self.waiting_reads_metric.inc()
self.read_count_metric.inc()
try:
while self.writers and not self._closing: # more writes can come in while we are waiting for the first
if not urgent_read and still_waiting and self.urgent_read_done.is_set():
# throttle the writes if they pile up
self.urgent_read_done.clear()
urgent_read = True
# wait until the running writes have finished
await self.read_ready.wait()
still_waiting = True
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.reader_executor, read_only_fn, sql, parameters
)
finally:
if urgent_read:
# unthrottle the writers if they had to be throttled
self.urgent_read_done.set()
self.waiting_reads_metric.dec()
if fetch_all:
return await self.run(lambda conn: conn.execute(sql, parameters).fetchall())
return await self.run(lambda conn: conn.execute(sql, parameters).fetchone())
async def execute_fetchall(self, sql: str, parameters: Iterable = None,
read_only=False) -> List[dict]:
return await self._execute_fetch(sql, parameters, read_only, fetch_all=True)
async def execute_fetchone(self, sql: str, parameters: Iterable = None,
read_only=False) -> List[dict]:
return await self._execute_fetch(sql, parameters, read_only, fetch_all=False)
def execute(self, sql: str, parameters: Iterable = None) -> Awaitable[sqlite3.Cursor]:
parameters = parameters if parameters is not None else []
return self.run(lambda conn: conn.execute(sql, parameters))
async def run(self, fun, *args, **kwargs):
self.write_count_metric.inc()
self.waiting_writes_metric.inc()
# it's possible many writes are coming in one after the other, these can
# block reader calls for a long time
# if the reader waits for the writers to finish and then has to wait for
# yet more, it will clear the urgent_read_done event to block more writers
# piling on
try:
await self.urgent_read_done.wait()
except Exception as e:
self.waiting_writes_metric.dec()
raise e
self.writers += 1
# block readers
self.read_ready.clear()
try:
async with self.write_lock:
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.writer_executor, lambda: self.__run_transaction(fun, *args, **kwargs)
)
finally:
self.writers -= 1
self.waiting_writes_metric.dec()
if not self.writers:
# unblock the readers once the last enqueued writer finishes
self.read_ready.set()
def __run_transaction(self, fun: Callable[[sqlite3.Connection, Any, Any], Any], *args, **kwargs):
self.writer_connection.execute('begin')
try:
self.query_count += 1
result = fun(self.writer_connection, *args, **kwargs) # type: ignore
self.writer_connection.commit()
return result
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
self.writer_connection.rollback()
log.warning("rolled back")
raise
async def run_with_foreign_keys_disabled(self, fun, *args, **kwargs):
self.write_count_metric.inc()
self.waiting_writes_metric.inc()
try:
await self.urgent_read_done.wait()
except Exception as e:
self.waiting_writes_metric.dec()
raise e
self.writers += 1
self.read_ready.clear()
try:
async with self.write_lock:
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.writer_executor, self.__run_transaction_with_foreign_keys_disabled, fun, args, kwargs
)
finally:
self.writers -= 1
self.waiting_writes_metric.dec()
if not self.writers:
self.read_ready.set()
def __run_transaction_with_foreign_keys_disabled(self,
fun: Callable[[sqlite3.Connection, Any, Any], Any],
args, kwargs):
foreign_keys_enabled, = self.writer_connection.execute("pragma foreign_keys").fetchone()
if not foreign_keys_enabled:
raise sqlite3.IntegrityError("foreign keys are disabled, use `AIOSQLite.run` instead")
try:
self.writer_connection.execute('pragma foreign_keys=off').fetchone()
return self.__run_transaction(fun, *args, **kwargs)
finally:
self.writer_connection.execute('pragma foreign_keys=on').fetchone()
def constraints_to_sql(constraints, joiner=' AND ', prepend_key=''):
sql, values = [], {}
for key, constraint in constraints.items():
tag = '0'
if '#' in key:
key, tag = key[:key.index('#')], key[key.index('#')+1:]
col, op, key = key, '=', key.replace('.', '_')
if not key:
sql.append(constraint)
continue
if key.startswith('$$'):
col, key = col[2:], key[1:]
elif key.startswith('$'):
values[key] = constraint
continue
if key.endswith('__not'):
col, op = col[:-len('__not')], '!='
elif key.endswith('__is_null'):
col = col[:-len('__is_null')]
sql.append(f'{col} IS NULL')
continue
if key.endswith('__is_not_null'):
col = col[:-len('__is_not_null')]
sql.append(f'{col} IS NOT NULL')
continue
if key.endswith('__lt'):
col, op = col[:-len('__lt')], '<'
elif key.endswith('__lte'):
col, op = col[:-len('__lte')], '<='
elif key.endswith('__gt'):
col, op = col[:-len('__gt')], '>'
elif key.endswith('__gte'):
col, op = col[:-len('__gte')], '>='
elif key.endswith('__like'):
col, op = col[:-len('__like')], 'LIKE'
elif key.endswith('__not_like'):
col, op = col[:-len('__not_like')], 'NOT LIKE'
elif key.endswith('__in') or key.endswith('__not_in'):
if key.endswith('__in'):
col, op, one_val_op = col[:-len('__in')], 'IN', '='
else:
col, op, one_val_op = col[:-len('__not_in')], 'NOT IN', '!='
if constraint:
if isinstance(constraint, (list, set, tuple)):
if len(constraint) == 1:
values[f'{key}{tag}'] = next(iter(constraint))
sql.append(f'{col} {one_val_op} :{key}{tag}')
else:
keys = []
for i, val in enumerate(constraint):
keys.append(f':{key}{tag}_{i}')
values[f'{key}{tag}_{i}'] = val
sql.append(f'{col} {op} ({", ".join(keys)})')
elif isinstance(constraint, str):
sql.append(f'{col} {op} ({constraint})')
else:
raise ValueError(f"{col} requires a list, set or string as constraint value.")
continue
elif key.endswith('__any') or key.endswith('__or'):
where, subvalues = constraints_to_sql(constraint, ' OR ', key+tag+'_')
sql.append(f'({where})')
values.update(subvalues)
continue
if key.endswith('__and'):
where, subvalues = constraints_to_sql(constraint, ' AND ', key+tag+'_')
sql.append(f'({where})')
values.update(subvalues)
continue
sql.append(f'{col} {op} :{prepend_key}{key}{tag}')
values[prepend_key+key+tag] = constraint
return joiner.join(sql) if sql else '', values
def query(select, **constraints) -> Tuple[str, Dict[str, Any]]:
sql = [select]
limit = constraints.pop('limit', None)
offset = constraints.pop('offset', None)
order_by = constraints.pop('order_by', None)
group_by = constraints.pop('group_by', None)
accounts = constraints.pop('accounts', [])
if accounts:
constraints['account__in'] = [a.public_key.address for a in accounts]
where, values = constraints_to_sql(constraints)
if where:
sql.append('WHERE')
sql.append(where)
if group_by is not None:
sql.append(f'GROUP BY {group_by}')
if order_by:
sql.append('ORDER BY')
if isinstance(order_by, str):
sql.append(order_by)
elif isinstance(order_by, list):
sql.append(', '.join(order_by))
else:
raise ValueError("order_by must be string or list")
if limit is not None:
sql.append(f'LIMIT {limit}')
if offset is not None:
sql.append(f'OFFSET {offset}')
return ' '.join(sql), values
def interpolate(sql, values):
for k in sorted(values.keys(), reverse=True):
value = values[k]
if isinstance(value, bytes):
value = f"X'{hexlify(value).decode()}'"
elif isinstance(value, str):
value = f"'{value}'"
else:
value = str(value)
sql = sql.replace(f":{k}", value)
return sql
def constrain_single_or_list(constraints, column, value, convert=lambda x: x, negate=False):
if value is not None:
if isinstance(value, list):
value = [convert(v) for v in value]
if len(value) == 1:
if negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not": value[0]
}
else:
constraints[column] = value[0]
elif len(value) > 1:
if negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not_in": value
}
else:
constraints[f"{column}__in"] = value
elif negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not": convert(value)
}
else:
constraints[column] = convert(value)
return constraints
class SQLiteMixin:
SCHEMA_VERSION: Optional[str] = None
CREATE_TABLES_QUERY: str
MAX_QUERY_VARIABLES = 900
CREATE_VERSION_TABLE = """
create table if not exists version (
version text
);
"""
def __init__(self, path):
self._db_path = path
self.db: AIOSQLite = None
self.ledger = None
async def open(self):
log.info("connecting to database: %s", self._db_path)
self.db = await AIOSQLite.connect(self._db_path, isolation_level=None)
if self.SCHEMA_VERSION:
tables = [t[0] for t in await self.db.execute_fetchall(
"SELECT name FROM sqlite_master WHERE type='table';"
)]
if tables:
if 'version' in tables:
version = await self.db.execute_fetchone("SELECT version FROM version LIMIT 1;")
if version == (self.SCHEMA_VERSION,):
return
await self.db.executescript('\n'.join(
f"DROP TABLE {table};" for table in tables
) + '\n' + 'PRAGMA WAL_CHECKPOINT(FULL);' + '\n' + 'VACUUM;')
await self.db.execute(self.CREATE_VERSION_TABLE)
await self.db.execute("INSERT INTO version VALUES (?)", (self.SCHEMA_VERSION,))
await self.db.executescript(self.CREATE_TABLES_QUERY)
async def close(self):
await self.db.close()
@staticmethod
def _insert_sql(table: str, data: dict, ignore_duplicate: bool = False,
replace: bool = False) -> Tuple[str, List]:
columns, values = [], []
for column, value in data.items():
columns.append(column)
values.append(value)
policy = ""
if ignore_duplicate:
policy = " OR IGNORE"
if replace:
policy = " OR REPLACE"
sql = "INSERT{} INTO {} ({}) VALUES ({})".format(
policy, table, ', '.join(columns), ', '.join(['?'] * len(values))
)
return sql, values
@staticmethod
def _update_sql(table: str, data: dict, where: str,
constraints: Union[list, tuple]) -> Tuple[str, list]:
columns, values = [], []
for column, value in data.items():
columns.append(f"{column} = ?")
values.append(value)
values.extend(constraints)
sql = "UPDATE {} SET {} WHERE {}".format(
table, ', '.join(columns), where
)
return sql, values
def dict_row_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
SQLITE_MAX_INTEGER = 9223372036854775807
def _get_spendable_utxos(transaction: sqlite3.Connection, accounts: List, decoded_transactions: Dict[str, Transaction],
result: Dict[Tuple[bytes, int, bool], List[int]], reserved: List[Transaction],
amount_to_reserve: int, reserved_amount: int, floor: int, ceiling: int,
fee_per_byte: int) -> int:
accounts_fmt = ",".join(["?"] * len(accounts))
txo_query = f"""
SELECT tx.txid, txo.txoid, tx.raw, tx.height, txo.position as nout, tx.is_verified, txo.amount FROM txo
INNER JOIN account_address USING (address)
LEFT JOIN txi USING (txoid)
INNER JOIN tx USING (txid)
WHERE txo.txo_type=0 AND txi.txoid IS NULL AND tx.txid IS NOT NULL AND NOT txo.is_reserved
AND txo.amount >= ? AND txo.amount < ?
"""
if accounts:
txo_query += f"""
AND account_address.account {'= ?' if len(accounts_fmt) == 1 else 'IN (' + accounts_fmt + ')'}
"""
txo_query += """
ORDER BY txo.amount ASC, tx.height DESC
"""
# prefer confirmed, but save unconfirmed utxos from this selection in case they are needed
unconfirmed = []
for row in transaction.execute(txo_query, (floor, ceiling, *accounts)):
(txid, txoid, raw, height, nout, verified, amount) = row.values()
# verified or non verified transactions were found- reset the gap count
# multiple txos can come from the same tx, only decode it once and cache
if txid not in decoded_transactions:
# cache the decoded transaction
decoded_transactions[txid] = Transaction(raw)
decoded_tx = decoded_transactions[txid]
# save the unconfirmed txo for possible use later, if still needed
if verified:
# add the txo to the reservation, minus the fee for including it
reserved_amount += amount
reserved_amount -= Input.spend(decoded_tx.outputs[nout]).size * fee_per_byte
# mark it as reserved
result[(raw, height, verified)].append(nout)
reserved.append(txoid)
# if we've reserved enough, return
if reserved_amount >= amount_to_reserve:
return reserved_amount
else:
unconfirmed.append((txid, txoid, raw, height, nout, verified, amount))
# we're popping the items, so to get them in the order they were seen they are reversed
unconfirmed.reverse()
# add available unconfirmed txos if any were previously found
while unconfirmed and reserved_amount < amount_to_reserve:
(txid, txoid, raw, height, nout, verified, amount) = unconfirmed.pop()
# it's already decoded
decoded_tx = decoded_transactions[txid]
# add to the reserved amount
reserved_amount += amount
reserved_amount -= Input.spend(decoded_tx.outputs[nout]).size * fee_per_byte
result[(raw, height, verified)].append(nout)
reserved.append(txoid)
return reserved_amount
def get_and_reserve_spendable_utxos(transaction: sqlite3.Connection, accounts: List, amount_to_reserve: int, floor: int,
fee_per_byte: int, set_reserved: bool, return_insufficient_funds: bool,
base_multiplier: int = 100):
txs = defaultdict(list)
decoded_transactions = {}
reserved = []
reserved_dewies = 0
multiplier = base_multiplier
gap_count = 0
while reserved_dewies < amount_to_reserve and gap_count < 5 and floor * multiplier < SQLITE_MAX_INTEGER:
previous_reserved_dewies = reserved_dewies
reserved_dewies = _get_spendable_utxos(
transaction, accounts, decoded_transactions, txs, reserved, amount_to_reserve, reserved_dewies,
floor, floor * multiplier, fee_per_byte
)
floor *= multiplier
if previous_reserved_dewies == reserved_dewies:
gap_count += 1
multiplier **= 2
else:
gap_count = 0
multiplier = base_multiplier
# reserve the accumulated txos if enough were found
if reserved_dewies >= amount_to_reserve:
if set_reserved:
transaction.executemany("UPDATE txo SET is_reserved = ? WHERE txoid = ?",
[(True, txoid) for txoid in reserved]).fetchall()
return txs
# return_insufficient_funds and set_reserved are used for testing
return txs if return_insufficient_funds else {}
class Database(SQLiteMixin):
SCHEMA_VERSION = "1.5"
PRAGMAS = """
pragma journal_mode=WAL;
"""
CREATE_ACCOUNT_TABLE = """
create table if not exists account_address (
account text not null,
address text not null,
chain integer not null,
pubkey blob not null,
chain_code blob not null,
n integer not null,
depth integer not null,
primary key (account, address)
);
create index if not exists address_account_idx on account_address (address, account);
"""
CREATE_PUBKEY_ADDRESS_TABLE = """
create table if not exists pubkey_address (
address text primary key,
history text,
used_times integer not null default 0
);
"""
CREATE_TX_TABLE = """
create table if not exists tx (
txid text primary key,
raw blob not null,
height integer not null,
position integer not null,
is_verified boolean not null default 0,
purchased_claim_id text,
day integer
);
create index if not exists tx_purchased_claim_id_idx on tx (purchased_claim_id);
"""
CREATE_TXO_TABLE = """
create table if not exists txo (
txid text references tx,
txoid text primary key,
address text references pubkey_address,
position integer not null,
amount integer not null,
script blob not null,
is_reserved boolean not null default 0,
txo_type integer not null default 0,
claim_id text,
claim_name text,
channel_id text,
reposted_claim_id text
);
create index if not exists txo_txid_idx on txo (txid);
create index if not exists txo_address_idx on txo (address);
create index if not exists txo_claim_id_idx on txo (claim_id, txo_type);
create index if not exists txo_claim_name_idx on txo (claim_name);
create index if not exists txo_txo_type_idx on txo (txo_type);
create index if not exists txo_channel_id_idx on txo (channel_id);
create index if not exists txo_reposted_claim_idx on txo (reposted_claim_id);
"""
CREATE_TXI_TABLE = """
create table if not exists txi (
txid text references tx,
txoid text references txo primary key,
address text references pubkey_address,
position integer not null
);
create index if not exists txi_address_idx on txi (address);
create index if not exists first_input_idx on txi (txid, address) where position=0;
"""
CREATE_TABLES_QUERY = (
PRAGMAS +
CREATE_ACCOUNT_TABLE +
CREATE_PUBKEY_ADDRESS_TABLE +
CREATE_TX_TABLE +
CREATE_TXO_TABLE +
CREATE_TXI_TABLE
)
async def open(self):
await super().open()
self.db.writer_connection.row_factory = dict_row_factory
def txo_to_row(self, tx, txo):
row = {
'txid': tx.id,
'txoid': txo.id,
'address': txo.get_address(self.ledger),
'position': txo.position,
'amount': txo.amount,
'script': sqlite3.Binary(txo.script.source)
}
if txo.is_claim:
if txo.can_decode_claim:
claim = txo.claim
row['txo_type'] = TXO_TYPES.get(claim.claim_type, TXO_TYPES['stream'])
if claim.is_repost:
row['reposted_claim_id'] = claim.repost.reference.claim_id
if claim.is_signed:
row['channel_id'] = claim.signing_channel_id
else:
row['txo_type'] = TXO_TYPES['stream']
elif txo.is_support:
row['txo_type'] = TXO_TYPES['support']
elif txo.purchase is not None:
row['txo_type'] = TXO_TYPES['purchase']
row['claim_id'] = txo.purchased_claim_id
if txo.script.is_claim_involved:
row['claim_id'] = txo.claim_id
row['claim_name'] = txo.claim_name
return row
def tx_to_row(self, tx):
row = {
'txid': tx.id,
'raw': sqlite3.Binary(tx.raw),
'height': tx.height,
'position': tx.position,
'is_verified': tx.is_verified,
'day': tx.get_julian_day(self.ledger),
}
txos = tx.outputs
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
txos[0].purchase = txos[1]
row['purchased_claim_id'] = txos[1].purchase_data.claim_id
return row
async def insert_transaction(self, tx):
await self.db.execute_fetchall(*self._insert_sql('tx', self.tx_to_row(tx)))
async def update_transaction(self, tx):
await self.db.execute_fetchall(*self._update_sql("tx", {
'height': tx.height, 'position': tx.position, 'is_verified': tx.is_verified
}, 'txid = ?', (tx.id,)))
def _transaction_io(self, conn: sqlite3.Connection, tx: Transaction, address, txhash):
conn.execute(*self._insert_sql('tx', self.tx_to_row(tx), replace=True)).fetchall()
is_my_input = False
for txi in tx.inputs:
if txi.txo_ref.txo is not None:
txo = txi.txo_ref.txo
if txo.has_address and txo.get_address(self.ledger) == address:
is_my_input = True
conn.execute(*self._insert_sql("txi", {
'txid': tx.id,
'txoid': txo.id,
'address': address,
'position': txi.position
}, ignore_duplicate=True)).fetchall()
for txo in tx.outputs:
if txo.script.is_pay_pubkey_hash and (txo.pubkey_hash == txhash or is_my_input):
conn.execute(*self._insert_sql(
"txo", self.txo_to_row(tx, txo), ignore_duplicate=True
)).fetchall()
elif txo.script.is_pay_script_hash:
# TODO: implement script hash payments
log.warning('Database.save_transaction_io: pay script hash is not implemented!')
def save_transaction_io(self, tx: Transaction, address, txhash, history):
return self.save_transaction_io_batch([tx], address, txhash, history)
def save_transaction_io_batch(self, txs: Iterable[Transaction], address, txhash, history):
history_count = history.count(':') // 2
def __many(conn):
for tx in txs:
self._transaction_io(conn, tx, address, txhash)
conn.execute(
"UPDATE pubkey_address SET history = ?, used_times = ? WHERE address = ?",
(history, history_count, address)
).fetchall()
return self.db.run(__many)
async def reserve_outputs(self, txos, is_reserved=True):
txoids = [(is_reserved, txo.id) for txo in txos]
await self.db.executemany("UPDATE txo SET is_reserved = ? WHERE txoid = ?", txoids)
async def release_outputs(self, txos):
await self.reserve_outputs(txos, is_reserved=False)
async def rewind_blockchain(self, above_height): # pylint: disable=no-self-use
# TODO:
# 1. delete transactions above_height
# 2. update address histories removing deleted TXs
return True
async def get_spendable_utxos(self, ledger, reserve_amount, accounts: Optional[Iterable], min_amount: int = 1,
fee_per_byte: int = 50, set_reserved: bool = True,
return_insufficient_funds: bool = False) -> List:
to_spend = await self.db.run(
get_and_reserve_spendable_utxos, tuple(account.id for account in accounts), reserve_amount, min_amount,
fee_per_byte, set_reserved, return_insufficient_funds
)
txos = []
for (raw, height, verified), positions in to_spend.items():
tx = Transaction(raw, height=height, is_verified=verified)
for nout in positions:
txos.append(tx.outputs[nout].get_estimator(ledger))
return txos
async def select_transactions(self, cols, accounts=None, read_only=False, **constraints):
if not {'txid', 'txid__in'}.intersection(constraints):
assert accounts, "'accounts' argument required when no 'txid' constraint is present"
where, values = constraints_to_sql({
'$$account_address.account__in': [a.public_key.address for a in accounts]
})
constraints['txid__in'] = f"""
SELECT txo.txid FROM txo JOIN account_address USING (address) WHERE {where}
UNION
SELECT txi.txid FROM txi JOIN account_address USING (address) WHERE {where}
"""
constraints.update(values)
return await self.db.execute_fetchall(
*query(f"SELECT {cols} FROM tx", **constraints), read_only=read_only
)
TXO_NOT_MINE = Output(None, None, is_my_output=False)
async def get_transactions(self, wallet=None, **constraints):
include_is_spent = constraints.pop('include_is_spent', False)
include_is_my_input = constraints.pop('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False)
tx_rows = await self.select_transactions(
'txid, raw, height, position, is_verified',
order_by=constraints.pop('order_by', ["height=0 DESC", "height DESC", "position DESC"]),
**constraints
)
if not tx_rows:
return []
txids, txs, txi_txoids = [], [], []
for row in tx_rows:
txids.append(row['txid'])
txs.append(Transaction(
raw=row['raw'], height=row['height'], position=row['position'],
is_verified=bool(row['is_verified'])
))
for txi in txs[-1].inputs:
txi_txoids.append(txi.txo_ref.id)
step = self.MAX_QUERY_VARIABLES
annotated_txos = {}
for offset in range(0, len(txids), step):
annotated_txos.update({
txo.id: txo for txo in
(await self.get_txos(
wallet=wallet,
txid__in=txids[offset:offset+step], order_by='txo.txid',
include_is_spent=include_is_spent,
include_is_my_input=include_is_my_input,
include_is_my_output=include_is_my_output,
))
})
referenced_txos = {}
for offset in range(0, len(txi_txoids), step):
referenced_txos.update({
txo.id: txo for txo in
(await self.get_txos(
wallet=wallet,
txoid__in=txi_txoids[offset:offset+step], order_by='txo.txoid',
include_is_my_output=include_is_my_output,
))
})
for tx in txs:
for txi in tx.inputs:
txo = referenced_txos.get(txi.txo_ref.id)
if txo:
txi.txo_ref = txo.ref
for txo in tx.outputs:
_txo = annotated_txos.get(txo.id)
if _txo:
txo.update_annotations(_txo)
else:
txo.update_annotations(self.TXO_NOT_MINE)
for tx in txs:
txos = tx.outputs
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
txos[0].purchase = txos[1]
return txs
async def get_transaction_count(self, **constraints):
constraints.pop('wallet', None)
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
count = await self.select_transactions('COUNT(*) as total', **constraints)
return count[0]['total'] or 0
async def get_transaction(self, **constraints):
txs = await self.get_transactions(limit=1, **constraints)
if txs:
return txs[0]
async def select_txos(
self, cols, accounts=None, is_my_input=None, is_my_output=True,
is_my_input_or_output=None, exclude_internal_transfers=False,
include_is_spent=False, include_is_my_input=False,
is_spent=None, read_only=False, **constraints):
for rename_col in ('txid', 'txoid'):
for rename_constraint in (rename_col, rename_col+'__in', rename_col+'__not_in'):
if rename_constraint in constraints:
constraints['txo.'+rename_constraint] = constraints.pop(rename_constraint)
if accounts:
account_in_sql, values = constraints_to_sql({
'$$account__in': [a.public_key.address for a in accounts]
})
my_addresses = f"SELECT address FROM account_address WHERE {account_in_sql}"
constraints.update(values)
if is_my_input_or_output:
include_is_my_input = True
constraints['received_or_sent__or'] = {
'txo.address__in': my_addresses,
'sent__and': {
'txi.address__is_not_null': True,
'txi.address__in': my_addresses
}
}
else:
if is_my_output:
constraints['txo.address__in'] = my_addresses
elif is_my_output is False:
constraints['txo.address__not_in'] = my_addresses
if is_my_input:
include_is_my_input = True
constraints['txi.address__is_not_null'] = True
constraints['txi.address__in'] = my_addresses
elif is_my_input is False:
include_is_my_input = True
constraints['is_my_input_false__or'] = {
'txi.address__is_null': True,
'txi.address__not_in': my_addresses
}
if exclude_internal_transfers:
include_is_my_input = True
constraints['exclude_internal_payments__or'] = {
'txo.txo_type__not': TXO_TYPES['other'],
'txo.address__not_in': my_addresses,
'txi.address__is_null': True,
'txi.address__not_in': my_addresses,
}
sql = [f"SELECT {cols} FROM txo JOIN tx ON (tx.txid=txo.txid)"]
if is_spent:
constraints['spent.txoid__is_not_null'] = True
elif is_spent is False:
constraints['is_reserved'] = False
constraints['spent.txoid__is_null'] = True
if include_is_spent or is_spent is not None:
sql.append("LEFT JOIN txi AS spent ON (spent.txoid=txo.txoid)")
if include_is_my_input:
sql.append("LEFT JOIN txi ON (txi.position=0 AND txi.txid=txo.txid)")
return await self.db.execute_fetchall(*query(' '.join(sql), **constraints), read_only=read_only)
async def get_txos(self, wallet=None, no_tx=False, read_only=False, **constraints):
include_is_spent = constraints.get('include_is_spent', False)
include_is_my_input = constraints.get('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False)
include_received_tips = constraints.pop('include_received_tips', False)
select_columns = [
"tx.txid, raw, tx.height, tx.position as tx_position, tx.is_verified, "
"txo_type, txo.position as txo_position, amount, script"
]
my_accounts = {a.public_key.address for a in wallet.accounts} if wallet else set()
my_accounts_sql = ""
if include_is_my_output or include_is_my_input:
my_accounts_sql, values = constraints_to_sql({'$$account__in#_wallet': my_accounts})
constraints.update(values)
if include_is_my_output and my_accounts:
if constraints.get('is_my_output', None) in (True, False):
select_columns.append(f"{1 if constraints['is_my_output'] else 0} AS is_my_output")
else:
select_columns.append(f"""(
txo.address IN (SELECT address FROM account_address WHERE {my_accounts_sql})
) AS is_my_output""")
if include_is_my_input and my_accounts:
if constraints.get('is_my_input', None) in (True, False):
select_columns.append(f"{1 if constraints['is_my_input'] else 0} AS is_my_input")
else:
select_columns.append(f"""(
txi.address IS NOT NULL AND
txi.address IN (SELECT address FROM account_address WHERE {my_accounts_sql})
) AS is_my_input""")
if include_is_spent:
select_columns.append("spent.txoid IS NOT NULL AS is_spent")
if include_received_tips:
select_columns.append(f"""(
SELECT COALESCE(SUM(support.amount), 0) FROM txo AS support WHERE
support.claim_id = txo.claim_id AND
support.txo_type = {TXO_TYPES['support']} AND
support.address IN (SELECT address FROM account_address WHERE {my_accounts_sql}) AND
support.txoid NOT IN (SELECT txoid FROM txi)
) AS received_tips""")
if 'order_by' not in constraints or constraints['order_by'] == 'height':
constraints['order_by'] = [
"tx.height=0 DESC", "tx.height DESC", "tx.position DESC", "txo.position"
]
elif constraints.get('order_by', None) == 'none':
del constraints['order_by']
rows = await self.select_txos(', '.join(select_columns), read_only=read_only, **constraints)
txos = []
txs = {}
for row in rows:
if no_tx:
txo = Output(
amount=row['amount'],
script=OutputScript(row['script']),
tx_ref=TXRefImmutable.from_id(row['txid'], row['height']),
position=row['txo_position']
)
else:
if row['txid'] not in txs:
txs[row['txid']] = Transaction(
row['raw'], height=row['height'], position=row['tx_position'],
is_verified=bool(row['is_verified'])
)
txo = txs[row['txid']].outputs[row['txo_position']]
if include_is_spent:
txo.is_spent = bool(row['is_spent'])
if include_is_my_input:
txo.is_my_input = bool(row['is_my_input'])
if include_is_my_output:
txo.is_my_output = bool(row['is_my_output'])
if include_is_my_input and include_is_my_output:
if txo.is_my_input and txo.is_my_output and row['txo_type'] == TXO_TYPES['other']:
txo.is_internal_transfer = True
else:
txo.is_internal_transfer = False
if include_received_tips:
txo.received_tips = row['received_tips']
txos.append(txo)
channel_ids = set()
for txo in txos:
if txo.is_claim and txo.can_decode_claim:
if txo.claim.is_signed:
channel_ids.add(txo.claim.signing_channel_id)
if txo.claim.is_channel and wallet:
for account in wallet.accounts:
private_key = await account.get_channel_private_key(
txo.claim.channel.public_key_bytes
)
if private_key:
txo.private_key = private_key
break
if channel_ids:
channels = {
txo.claim_id: txo for txo in
(await self.get_channels(
wallet=wallet,
claim_id__in=channel_ids,
read_only=read_only
))
}
for txo in txos:
if txo.is_claim and txo.can_decode_claim:
txo.channel = channels.get(txo.claim.signing_channel_id, None)
return txos
@staticmethod
def _clean_txo_constraints_for_aggregation(constraints):
constraints.pop('include_is_spent', None)
constraints.pop('include_is_my_input', None)
constraints.pop('include_is_my_output', None)
constraints.pop('include_received_tips', None)
constraints.pop('wallet', None)
constraints.pop('resolve', None)
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
async def get_txo_count(self, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
count = await self.select_txos('COUNT(*) AS total', **constraints)
return count[0]['total'] or 0
async def get_txo_sum(self, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
result = await self.select_txos('SUM(amount) AS total', **constraints)
return result[0]['total'] or 0
async def get_txo_plot(self, start_day=None, days_back=0, end_day=None, days_after=None, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
if start_day is None:
constraints['day__gte'] = self.ledger.headers.estimated_julian_day(
self.ledger.headers.height
) - days_back
else:
constraints['day__gte'] = date_to_julian_day(
date.fromisoformat(start_day)
)
if end_day is not None:
constraints['day__lte'] = date_to_julian_day(
date.fromisoformat(end_day)
)
elif days_after is not None:
constraints['day__lte'] = constraints['day__gte'] + days_after
return await self.select_txos(
"DATE(day) AS day, SUM(amount) AS total",
group_by='day', order_by='day', **constraints
)
def get_utxos(self, read_only=False, **constraints):
return self.get_txos(is_spent=False, read_only=read_only, **constraints)
def get_utxo_count(self, **constraints):
return self.get_txo_count(is_spent=False, **constraints)
async def get_balance(self, wallet=None, accounts=None, read_only=False, **constraints):
assert wallet or accounts, \
"'wallet' or 'accounts' constraints required to calculate balance"
constraints['accounts'] = accounts or wallet.accounts
balance = await self.select_txos(
'SUM(amount) as total', is_spent=False, read_only=read_only, **constraints
)
return balance[0]['total'] or 0
async def select_addresses(self, cols, read_only=False, **constraints):
return await self.db.execute_fetchall(*query(
f"SELECT {cols} FROM pubkey_address JOIN account_address USING (address)",
**constraints
), read_only=read_only)
async def get_addresses(self, cols=None, read_only=False, **constraints):
cols = cols or (
'address', 'account', 'chain', 'history', 'used_times',
'pubkey', 'chain_code', 'n', 'depth'
)
addresses = await self.select_addresses(', '.join(cols), read_only=read_only, **constraints)
if 'pubkey' in cols:
for address in addresses:
address['pubkey'] = PubKey(
self.ledger, address.pop('pubkey'), address.pop('chain_code'),
address.pop('n'), address.pop('depth')
)
return addresses
async def get_address_count(self, cols=None, read_only=False, **constraints):
count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints)
return count[0]['total'] or 0
async def get_address(self, read_only=False, **constraints):
addresses = await self.get_addresses(read_only=read_only, limit=1, **constraints)
if addresses:
return addresses[0]
async def add_keys(self, account, chain, pubkeys):
await self.db.executemany(
"insert or ignore into account_address "
"(account, address, chain, pubkey, chain_code, n, depth) values "
"(?, ?, ?, ?, ?, ?, ?)", ((
account.id, k.address, chain,
sqlite3.Binary(k.pubkey_bytes),
sqlite3.Binary(k.chain_code),
k.n, k.depth
) for k in pubkeys)
)
await self.db.executemany(
"insert or ignore into pubkey_address (address) values (?)",
((pubkey.address,) for pubkey in pubkeys)
)
async def _set_address_history(self, address, history):
await self.db.execute_fetchall(
"UPDATE pubkey_address SET history = ?, used_times = ? WHERE address = ?",
(history, history.count(':')//2, address)
)
async def set_address_history(self, address, history):
await self._set_address_history(address, history)
@staticmethod
def constrain_purchases(constraints):
accounts = constraints.pop('accounts', None)
assert accounts, "'accounts' argument required to find purchases"
if not {'purchased_claim_id', 'purchased_claim_id__in'}.intersection(constraints):
constraints['purchased_claim_id__is_not_null'] = True
constraints.update({
f'$account{i}': a.public_key.address for i, a in enumerate(accounts)
})
account_values = ', '.join([f':$account{i}' for i in range(len(accounts))])
constraints['txid__in'] = f"""
SELECT txid FROM txi JOIN account_address USING (address)
WHERE account_address.account IN ({account_values})
"""
async def get_purchases(self, **constraints):
self.constrain_purchases(constraints)
return [tx.outputs[0] for tx in await self.get_transactions(**constraints)]
def get_purchase_count(self, **constraints):
self.constrain_purchases(constraints)
return self.get_transaction_count(**constraints)
@staticmethod
def constrain_claims(constraints):
if {'txo_type', 'txo_type__in'}.intersection(constraints):
return
claim_types = constraints.pop('claim_type', None)
if claim_types:
constrain_single_or_list(
constraints, 'txo_type', claim_types, lambda x: TXO_TYPES[x]
)
else:
constraints['txo_type__in'] = CLAIM_TYPES
async def get_claims(self, read_only=False, **constraints) -> List[Output]:
self.constrain_claims(constraints)
return await self.get_utxos(read_only=read_only, **constraints)
def get_claim_count(self, **constraints):
self.constrain_claims(constraints)
return self.get_utxo_count(**constraints)
@staticmethod
def constrain_streams(constraints):
constraints['txo_type'] = TXO_TYPES['stream']
def get_streams(self, read_only=False, **constraints):
self.constrain_streams(constraints)
return self.get_claims(read_only=read_only, **constraints)
def get_stream_count(self, **constraints):
self.constrain_streams(constraints)
return self.get_claim_count(**constraints)
@staticmethod
def constrain_channels(constraints):
constraints['txo_type'] = TXO_TYPES['channel']
def get_channels(self, **constraints):
self.constrain_channels(constraints)
return self.get_claims(**constraints)
def get_channel_count(self, **constraints):
self.constrain_channels(constraints)
return self.get_claim_count(**constraints)
@staticmethod
def constrain_supports(constraints):
constraints['txo_type'] = TXO_TYPES['support']
def get_supports(self, **constraints):
self.constrain_supports(constraints)
return self.get_utxos(**constraints)
def get_support_count(self, **constraints):
self.constrain_supports(constraints)
return self.get_utxo_count(**constraints)
@staticmethod
def constrain_collections(constraints):
constraints['txo_type'] = TXO_TYPES['collection']
def get_collections(self, **constraints):
self.constrain_collections(constraints)
return self.get_utxos(**constraints)
def get_collection_count(self, **constraints):
self.constrain_collections(constraints)
return self.get_utxo_count(**constraints)
async def release_all_outputs(self, account=None):
if account is None:
await self.db.execute_fetchall("UPDATE txo SET is_reserved = 0 WHERE is_reserved = 1")
else:
await self.db.execute_fetchall(
"UPDATE txo SET is_reserved = 0 WHERE"
" is_reserved = 1 AND txo.address IN ("
" SELECT address from account_address WHERE account = ?"
" )", (account.public_key.address, )
)
def get_supports_summary(self, read_only=False, **constraints):
return self.get_txos(
txo_type=TXO_TYPES['support'],
is_spent=False, is_my_output=True,
include_is_my_input=True,
no_tx=True, read_only=read_only,
**constraints
)
|
"""
Provides logging utilities.
"""
import argparse
import difflib
import os
from dataclasses import dataclass
import sys
from types import TracebackType
from typing import Any, Optional, Type, cast
import fora
@dataclass
class State:
"""Global state for logging."""
indentation_level: int = 0
"""The current global indentation level."""
state: State = State()
"""The global logger state."""
def use_color() -> bool:
"""Returns true if color should be used."""
if not isinstance(cast(Any, fora.args), argparse.Namespace):
return os.getenv("NO_COLOR") is None
return not fora.args.no_color
def col(color_code: str) -> str:
"""Returns the given argument only if color is enabled."""
return color_code if use_color() else ""
class IndentationContext:
"""A context manager to modify the indentation level."""
def __enter__(self) -> None:
state.indentation_level += 1
def __exit__(self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], traceback: Optional[TracebackType]) -> None:
_ = (exc_type, exc, traceback)
state.indentation_level -= 1
def ellipsis(s: str, width: int) -> str:
"""
Shrinks the given string to width (including an ellipsis character).
Parameters
----------
s
The string.
width
The maximum width.
Returns
-------
str
A modified string with at most `width` characters.
"""
if len(s) > width:
s = s[:width - 1] + "…"
return s
def indent() -> IndentationContext:
"""Retruns a context manager that increases the indentation level."""
return IndentationContext()
def indent_prefix() -> str:
"""Returns the indentation prefix for the current indentation level."""
if not use_color():
return " " * state.indentation_level
ret = ""
for i in range(state.indentation_level):
if i % 2 == 0:
ret += "[90m│[m "
else:
ret += "[90m╵[m "
return ret
def debug(msg: str) -> None:
"""Prints the given message only in debug mode."""
if not fora.args.debug:
return
print(f" [1;34mDEBUG[m: {msg}", file=sys.stderr)
def debug_args(msg: str, args: dict[str, Any]) -> None:
"""Prints all given arguments when in debug mode."""
if not fora.args.debug:
return
str_args = ""
args = {k: v for k,v in args.items() if k != "self"}
if len(args) > 0:
str_args = " " + ", ".join(f"{k}={v}" for k,v in args.items())
print(f" [1;34mDEBUG[m: {msg}{str_args}", file=sys.stderr)
def print_indented(msg: str, **kwargs: Any) -> None:
"""Same as print(), but prefixes the message with the indentation prefix."""
print(f"{indent_prefix()}{msg}", **kwargs)
def connection_init(connector: Any) -> None:
"""Prints connection initialization information."""
print_indented(f"{col("[1;34m")}host{col("[m")} {connector.host.name} via {col("[1;33m")}{connector.host.url}{col("[m")}", flush=True)
def connection_failed(error_msg: str) -> None:
"""Signals that an error has occurred while establishing the connection."""
print(col("[1;31m") + "ERR" + col("[m"))
print_indented(f" {col("[90m")}└{col("[m")} " + f"{col("[31m")}{error_msg}{col("[m")}")
def connection_established() -> None:
"""Signals that the connection has been successfully established."""
#print(col("[1;32m") + "OK" + col("[m"))
def run_script(script: str, name: Optional[str] = None) -> None:
"""Prints the script file and name that is being executed next."""
if name is not None:
print_indented(f"{col("[33;1m")}script{col("[m")} {script} {col("[90m")}({name}){col("[m")}")
else:
print_indented(f"{col("[33;1m")}script{col("[m")} {script}")
def print_operation_title(op: Any, title_color: str, end: str = "\n") -> None:
"""Prints the operation title and description."""
name_if_given = (" " + col('[90m') + f"({op.name})" + col('[m')) if op.name is not None else ""
dry_run_info = f" {col("[90m")}(dry){col("[m")}" if fora.args.dry else ""
print_indented(f"{title_color}{op.op_name}{col("[m")}{dry_run_info} {op.description}{name_if_given}", end=end, flush=True)
def print_operation_early(op: Any) -> None:
"""Prints the operation title and description before the final status is known."""
title_color = col("[1;33m")
# Only overwrite status later if debugging is not enabled.
print_operation_title(op, title_color, end=" (early status)\n" if fora.args.debug else "")
def decode_escape(data: bytes, encoding: str = 'utf-8') -> str:
"""
Tries to decode the given data with the given encoding, but replaces all non-decodeable
and non-printable characters with backslash escape sequences.
Example:
```python
>>> decode_escape(b'It is Wednesday\\nmy dudes\\r\\n🐸\\xff\\0')
'It is Wednesday\\\\nMy Dudes\\\\r\\\\n🐸\\\\xff\\\\0'
```
Parameters
----------
content
The content that should be decoded and escaped.
encoding
The encoding that should be tried. To preserve utf-8 symbols, use 'utf-8',
to replace any non-ascii character with an escape sequence use 'ascii'.
Returns
-------
str
The decoded and escaped string.
"""
def escape_char(c: str) -> str:
special = {'\x00': '\\0', '\n': '\\n', '\r': '\\r', '\t': '\\t'}
if c in special:
return special[c]
num = ord(c)
if not c.isprintable() and num <= 0xff:
return f"\\x{num:02x}"
return c
return ''.join([escape_char(c) for c in data.decode(encoding, 'backslashreplace')])
def diff(filename: str, old: Optional[bytes], new: Optional[bytes], color: bool = True) -> list[str]:
"""
Creates a diff between the old and new content of the given filename,
that can be printed to the console. This function returns the diff
output as an array of lines. The lines in the output array are not
terminated by newlines.
If color is True, the diff is colored using ANSI escape sequences.
If you want to provide an alternative diffing function, beware that
the input can theoretically contain any bytes and therefore should
be decoded as utf-8 if possible, but non-decodeable
or non-printable charaters should be replaced with human readable
variants such as `\\x00`, `^@` or similar represenations.
Your diffing function should still be able to work on the raw bytes
representation, after you aquire the diff and before you apply colors,
your output should be made printable with a function such as `fora.logger.decode_escape`:
```python
# First decode and escape
line = logger.decode_escape(byteline)
# Add coloring afterwards so ANSI escape sequences are not escaped
```
Parameters
----------
filename
The filename of the file that is being diffed.
old
The old content, or None if the file didn't exist before.
new
The new content, or None if the file was deleted.
color
Whether the output should be colored (with ANSI color sequences).
Returns
-------
list[str]
The lines of the diff output. The individual lines will not have a terminating newline.
"""
bdiff = list(difflib.diff_bytes(difflib.unified_diff,
a=[] if old is None else old.split(b'\n'),
b=[] if new is None else new.split(b'\n'),
lineterm=b''))
# Strip file name header and decode diff to be human readable.
difflines = map(decode_escape, bdiff[2:])
# Create custom file name header
action = 'created' if old is None else 'deleted' if new is None else 'modified'
title = f"{action}: {filename}"
N = len(title)
header = ['─' * N, title, '─' * N]
# Apply coloring if desired
if color:
def apply_color(line: str) -> str:
linecolor = {
'+': '[32m',
'-': '[31m',
'@': '[34m',
}
return linecolor.get(line[0], '[90m') + line + '[m'
# Apply color to diff
difflines = map(apply_color, difflines)
# Apply color to header
header = list(map(lambda line: f"[33m{line}[m", header))
return header + list(difflines)
# TODO: move functions to operation api. cleaner and has type access.
def _operation_state_infos(result: Any) -> list[str]:
def to_str(v: Any) -> str:
return v.hex() if isinstance(v, bytes) else str(v)
# Print "key: value" pairs with changes
state_infos: list[str] = []
for k,final_v in result.final.items():
if final_v is None:
continue
initial_v = result.initial[k]
str_initial_v = to_str(initial_v)
str_final_v = to_str(final_v)
# Add ellipsis on long strings, if we are not in verbose mode
if fora.args.verbose == 0:
k = ellipsis(k, 12)
str_initial_v = ellipsis(to_str(initial_v), 9)
str_final_v = ellipsis(to_str(final_v), 9+3+9 if initial_v is None else 9)
if initial_v == final_v:
if fora.args.verbose >= 1:
# TODO = instead of : for better readability
entry_str = f"{col("[90m")}{k}: {str_initial_v}{col("[m")}"
state_infos.append(entry_str)
else:
if initial_v is None:
entry_str = f"{col("[33m")}{k}: {col("[32m")}{str_final_v}{col("[m")}"
else:
entry_str = f"{col("[33m")}{k}: {col("[31m")}{str_initial_v}{col("[33m")} → {col("[32m")}{str_final_v}{col("[m")}"
state_infos.append(entry_str)
return state_infos
def print_operation(op: Any, result: Any) -> None:
"""Prints the operation summary after it has finished execution."""
if result.success:
title_color = col("[1;32m") if result.changed else col("[1;90m")
else:
title_color = col("[1;31m")
# Print title and name, overwriting the transitive status
print("\r", end="")
print_operation_title(op, title_color)
if not result.success:
print_indented(f" {col("[90m")}└{col("[m")} " + f"{col("[31m")}{result.failure_message}{col("[m")}")
return
if not fora.args.changes:
return
# Cache number of upcoming diffs to determine what box character to print
n_diffs = len(op.diffs) if fora.args.diff else 0
box_char = '└' if n_diffs == 0 else '├'
# Print "key: value" pairs with changes
state_infos = _operation_state_infos(result)
if len(state_infos) > 0:
print_indented(f"{col("[90m")}{box_char}{col("[m")} " + f"{col("[90m")},{col("[m")} ".join(state_infos))
if fora.args.diff:
diff_lines = []
# Generate diffs
for file, old, new in op.diffs:
diff_lines.extend(diff(file, old, new))
# Print diffs with block character line
if len(diff_lines) > 0:
for l in diff_lines[:-1]:
print_indented(f"{col("[90m")}│ {col("[m")}" + l)
print_indented(f"{col("[90m")}└ {col("[m")}" + diff_lines[-1])
| """
Provides logging utilities.
"""
import argparse
import difflib
import os
from dataclasses import dataclass
import sys
from types import TracebackType
from typing import Any, Optional, Type, cast
import fora
@dataclass
class State:
"""Global state for logging."""
indentation_level: int = 0
"""The current global indentation level."""
state: State = State()
"""The global logger state."""
def use_color() -> bool:
"""Returns true if color should be used."""
if not isinstance(cast(Any, fora.args), argparse.Namespace):
return os.getenv("NO_COLOR") is None
return not fora.args.no_color
def col(color_code: str) -> str:
"""Returns the given argument only if color is enabled."""
return color_code if use_color() else ""
class IndentationContext:
"""A context manager to modify the indentation level."""
def __enter__(self) -> None:
state.indentation_level += 1
def __exit__(self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], traceback: Optional[TracebackType]) -> None:
_ = (exc_type, exc, traceback)
state.indentation_level -= 1
def ellipsis(s: str, width: int) -> str:
"""
Shrinks the given string to width (including an ellipsis character).
Parameters
----------
s
The string.
width
The maximum width.
Returns
-------
str
A modified string with at most `width` characters.
"""
if len(s) > width:
s = s[:width - 1] + "…"
return s
def indent() -> IndentationContext:
"""Retruns a context manager that increases the indentation level."""
return IndentationContext()
def indent_prefix() -> str:
"""Returns the indentation prefix for the current indentation level."""
if not use_color():
return " " * state.indentation_level
ret = ""
for i in range(state.indentation_level):
if i % 2 == 0:
ret += "[90m│[m "
else:
ret += "[90m╵[m "
return ret
def debug(msg: str) -> None:
"""Prints the given message only in debug mode."""
if not fora.args.debug:
return
print(f" [1;34mDEBUG[m: {msg}", file=sys.stderr)
def debug_args(msg: str, args: dict[str, Any]) -> None:
"""Prints all given arguments when in debug mode."""
if not fora.args.debug:
return
str_args = ""
args = {k: v for k,v in args.items() if k != "self"}
if len(args) > 0:
str_args = " " + ", ".join(f"{k}={v}" for k,v in args.items())
print(f" [1;34mDEBUG[m: {msg}{str_args}", file=sys.stderr)
def print_indented(msg: str, **kwargs: Any) -> None:
"""Same as print(), but prefixes the message with the indentation prefix."""
print(f"{indent_prefix()}{msg}", **kwargs)
def connection_init(connector: Any) -> None:
"""Prints connection initialization information."""
print_indented(f"{col('[1;34m')}host{col('[m')} {connector.host.name} via {col('[1;33m')}{connector.host.url}{col('[m')}", flush=True)
def connection_failed(error_msg: str) -> None:
"""Signals that an error has occurred while establishing the connection."""
print(col("[1;31m") + "ERR" + col("[m"))
print_indented(f" {col('[90m')}└{col('[m')} " + f"{col('[31m')}{error_msg}{col('[m')}")
def connection_established() -> None:
"""Signals that the connection has been successfully established."""
#print(col("[1;32m") + "OK" + col("[m"))
def run_script(script: str, name: Optional[str] = None) -> None:
"""Prints the script file and name that is being executed next."""
if name is not None:
print_indented(f"{col('[33;1m')}script{col('[m')} {script} {col('[90m')}({name}){col('[m')}")
else:
print_indented(f"{col('[33;1m')}script{col('[m')} {script}")
def print_operation_title(op: Any, title_color: str, end: str = "\n") -> None:
"""Prints the operation title and description."""
name_if_given = (" " + col('[90m') + f"({op.name})" + col('[m')) if op.name is not None else ""
dry_run_info = f" {col('[90m')}(dry){col('[m')}" if fora.args.dry else ""
print_indented(f"{title_color}{op.op_name}{col('[m')}{dry_run_info} {op.description}{name_if_given}", end=end, flush=True)
def print_operation_early(op: Any) -> None:
"""Prints the operation title and description before the final status is known."""
title_color = col("[1;33m")
# Only overwrite status later if debugging is not enabled.
print_operation_title(op, title_color, end=" (early status)\n" if fora.args.debug else "")
def decode_escape(data: bytes, encoding: str = 'utf-8') -> str:
"""
Tries to decode the given data with the given encoding, but replaces all non-decodeable
and non-printable characters with backslash escape sequences.
Example:
```python
>>> decode_escape(b'It is Wednesday\\nmy dudes\\r\\n🐸\\xff\\0')
'It is Wednesday\\\\nMy Dudes\\\\r\\\\n🐸\\\\xff\\\\0'
```
Parameters
----------
content
The content that should be decoded and escaped.
encoding
The encoding that should be tried. To preserve utf-8 symbols, use 'utf-8',
to replace any non-ascii character with an escape sequence use 'ascii'.
Returns
-------
str
The decoded and escaped string.
"""
def escape_char(c: str) -> str:
special = {'\x00': '\\0', '\n': '\\n', '\r': '\\r', '\t': '\\t'}
if c in special:
return special[c]
num = ord(c)
if not c.isprintable() and num <= 0xff:
return f"\\x{num:02x}"
return c
return ''.join([escape_char(c) for c in data.decode(encoding, 'backslashreplace')])
def diff(filename: str, old: Optional[bytes], new: Optional[bytes], color: bool = True) -> list[str]:
"""
Creates a diff between the old and new content of the given filename,
that can be printed to the console. This function returns the diff
output as an array of lines. The lines in the output array are not
terminated by newlines.
If color is True, the diff is colored using ANSI escape sequences.
If you want to provide an alternative diffing function, beware that
the input can theoretically contain any bytes and therefore should
be decoded as utf-8 if possible, but non-decodeable
or non-printable charaters should be replaced with human readable
variants such as `\\x00`, `^@` or similar represenations.
Your diffing function should still be able to work on the raw bytes
representation, after you aquire the diff and before you apply colors,
your output should be made printable with a function such as `fora.logger.decode_escape`:
```python
# First decode and escape
line = logger.decode_escape(byteline)
# Add coloring afterwards so ANSI escape sequences are not escaped
```
Parameters
----------
filename
The filename of the file that is being diffed.
old
The old content, or None if the file didn't exist before.
new
The new content, or None if the file was deleted.
color
Whether the output should be colored (with ANSI color sequences).
Returns
-------
list[str]
The lines of the diff output. The individual lines will not have a terminating newline.
"""
bdiff = list(difflib.diff_bytes(difflib.unified_diff,
a=[] if old is None else old.split(b'\n'),
b=[] if new is None else new.split(b'\n'),
lineterm=b''))
# Strip file name header and decode diff to be human readable.
difflines = map(decode_escape, bdiff[2:])
# Create custom file name header
action = 'created' if old is None else 'deleted' if new is None else 'modified'
title = f"{action}: {filename}"
N = len(title)
header = ['─' * N, title, '─' * N]
# Apply coloring if desired
if color:
def apply_color(line: str) -> str:
linecolor = {
'+': '[32m',
'-': '[31m',
'@': '[34m',
}
return linecolor.get(line[0], '[90m') + line + '[m'
# Apply color to diff
difflines = map(apply_color, difflines)
# Apply color to header
header = list(map(lambda line: f"[33m{line}[m", header))
return header + list(difflines)
# TODO: move functions to operation api. cleaner and has type access.
def _operation_state_infos(result: Any) -> list[str]:
def to_str(v: Any) -> str:
return v.hex() if isinstance(v, bytes) else str(v)
# Print "key: value" pairs with changes
state_infos: list[str] = []
for k,final_v in result.final.items():
if final_v is None:
continue
initial_v = result.initial[k]
str_initial_v = to_str(initial_v)
str_final_v = to_str(final_v)
# Add ellipsis on long strings, if we are not in verbose mode
if fora.args.verbose == 0:
k = ellipsis(k, 12)
str_initial_v = ellipsis(to_str(initial_v), 9)
str_final_v = ellipsis(to_str(final_v), 9+3+9 if initial_v is None else 9)
if initial_v == final_v:
if fora.args.verbose >= 1:
# TODO = instead of : for better readability
entry_str = f"{col('[90m')}{k}: {str_initial_v}{col('[m')}"
state_infos.append(entry_str)
else:
if initial_v is None:
entry_str = f"{col('[33m')}{k}: {col('[32m')}{str_final_v}{col('[m')}"
else:
entry_str = f"{col('[33m')}{k}: {col('[31m')}{str_initial_v}{col('[33m')} → {col('[32m')}{str_final_v}{col('[m')}"
state_infos.append(entry_str)
return state_infos
def print_operation(op: Any, result: Any) -> None:
"""Prints the operation summary after it has finished execution."""
if result.success:
title_color = col("[1;32m") if result.changed else col("[1;90m")
else:
title_color = col("[1;31m")
# Print title and name, overwriting the transitive status
print("\r", end="")
print_operation_title(op, title_color)
if not result.success:
print_indented(f" {col('[90m')}└{col('[m')} " + f"{col('[31m')}{result.failure_message}{col('[m')}")
return
if not fora.args.changes:
return
# Cache number of upcoming diffs to determine what box character to print
n_diffs = len(op.diffs) if fora.args.diff else 0
box_char = '└' if n_diffs == 0 else '├'
# Print "key: value" pairs with changes
state_infos = _operation_state_infos(result)
if len(state_infos) > 0:
print_indented(f"{col('[90m')}{box_char}{col('[m')} " + f"{col('[90m')},{col('[m')} ".join(state_infos))
if fora.args.diff:
diff_lines = []
# Generate diffs
for file, old, new in op.diffs:
diff_lines.extend(diff(file, old, new))
# Print diffs with block character line
if len(diff_lines) > 0:
for l in diff_lines[:-1]:
print_indented(f"{col('[90m')}│ {col('[m')}" + l)
print_indented(f"{col('[90m')}└ {col('[m')}" + diff_lines[-1])
|
# Aula 19 Dicionarios. É assim que tratamos os dicionarios
pessoas = {'nome': 'Gustavo', 'sexo': 'M', 'idade': 22}
print(pessoas['nome'])
print(pessoas['idade'])
print(pessoas['sexo'])
print(f'{pessoas['nome']} tem {pessoas['idade']} anos') # Utilizar aspas duplas para a localização [" "]
print(pessoas.keys()) # nome / sexo / idade
print(pessoas.values()) # Gustavo / M / 22
print(pessoas.items()) #Composição de elementos: lista e treis tuplas
| # Aula 19 Dicionarios. É assim que tratamos os dicionarios
pessoas = {'nome': 'Gustavo', 'sexo': 'M', 'idade': 22}
print(pessoas['nome'])
print(pessoas['idade'])
print(pessoas['sexo'])
print(f'{pessoas["nome"]} tem {pessoas["idade"]} anos') # Utilizar aspas duplas para a localização [" "]
print(pessoas.keys()) # nome / sexo / idade
print(pessoas.values()) # Gustavo / M / 22
print(pessoas.items()) #Composição de elementos: lista e treis tuplas
|
# flake8: noqa
import csv
def model_name(table_name):
if table_name in ["vtm", "vpi", "vmp", "vmpp", "amp", "ampp", "gtin"]:
return table_name.upper()
else:
return "".join(tok.title() for tok in table_name.split("_"))
def quote(s):
assert '"' not in s
return '"' + s + '"'
with open("schema.csv") as f:
lines = list(csv.DictReader(f))
print("from django.db import models")
table = None
for line in lines:
if line["table"] == "ccontent":
continue
if line["table"] != table:
table = line["table"]
print()
print()
print(f"class {model_name(table)}(models.Model):")
print("# class Meta:")
print('# verbose_name = "TODO"')
print()
if line["type"] == "retired":
continue
options = []
if line["primary_key"] == "True":
options.append(("primary_key", "True"))
if line["db_column"]:
options.append(("db_column", quote(line["db_column"])))
if line["type"] in ["ForeignKey", "OneToOneField"]:
options.append(("to", quote(model_name(line["to"]))))
options.append(("on_delete", "models.CASCADE"))
if "prevcd" in line["db_column"] or "uomcd" in line["db_column"]:
options.append(("related_name", quote("+")))
elif line["type"] == "CharField":
options.append(("max_length", line["max_length"]))
elif line["type"] == "DecimalField":
options.append(("max_digits", line["max_digits"]))
options.append(("decimal_places", line["decimal_places"]))
if line["optional"] == "Y":
if line["type"] != "BooleanField" and line["primary_key"] != "True":
options.append(("null", "True"))
options.append(("help_text", quote(line["descr"])))
print(f' {line['field']} = models.{line['type']}(')
for k, v in options:
print(f" {k}={v},")
print(" )")
| # flake8: noqa
import csv
def model_name(table_name):
if table_name in ["vtm", "vpi", "vmp", "vmpp", "amp", "ampp", "gtin"]:
return table_name.upper()
else:
return "".join(tok.title() for tok in table_name.split("_"))
def quote(s):
assert '"' not in s
return '"' + s + '"'
with open("schema.csv") as f:
lines = list(csv.DictReader(f))
print("from django.db import models")
table = None
for line in lines:
if line["table"] == "ccontent":
continue
if line["table"] != table:
table = line["table"]
print()
print()
print(f"class {model_name(table)}(models.Model):")
print("# class Meta:")
print('# verbose_name = "TODO"')
print()
if line["type"] == "retired":
continue
options = []
if line["primary_key"] == "True":
options.append(("primary_key", "True"))
if line["db_column"]:
options.append(("db_column", quote(line["db_column"])))
if line["type"] in ["ForeignKey", "OneToOneField"]:
options.append(("to", quote(model_name(line["to"]))))
options.append(("on_delete", "models.CASCADE"))
if "prevcd" in line["db_column"] or "uomcd" in line["db_column"]:
options.append(("related_name", quote("+")))
elif line["type"] == "CharField":
options.append(("max_length", line["max_length"]))
elif line["type"] == "DecimalField":
options.append(("max_digits", line["max_digits"]))
options.append(("decimal_places", line["decimal_places"]))
if line["optional"] == "Y":
if line["type"] != "BooleanField" and line["primary_key"] != "True":
options.append(("null", "True"))
options.append(("help_text", quote(line["descr"])))
print(f' {line["field"]} = models.{line["type"]}(')
for k, v in options:
print(f" {k}={v},")
print(" )")
|
import os
from nonebot.adapters.cqhttp import MessageSegment, Message
import nonebot
import random
from .update_game_info import update_info
from .util import generate_img, init_star_rst, BaseData, set_list, get_star, init_up_char
from .config import GENSHIN_FIVE_P, GENSHIN_FOUR_P, GENSHIN_G_FIVE_P, GENSHIN_G_FOUR_P, GENSHIN_THREE_P, I72_ADD, \
DRAW_PATH, GENSHIN_FLAG
from dataclasses import dataclass
from .init_card_pool import init_game_pool
from .announcement import GenshinAnnouncement
try:
import ujson as json
except ModuleNotFoundError:
import json
driver: nonebot.Driver = nonebot.get_driver()
announcement = GenshinAnnouncement()
genshin_five = {}
genshin_count = {}
genshin_pl_count = {}
ALL_CHAR = []
ALL_ARMS = []
UP_CHAR = []
UP_ARMS = []
_CURRENT_CHAR_POOL_TITLE = ''
_CURRENT_ARMS_POOL_TITLE = ''
POOL_IMG = ''
@dataclass
class GenshinChar(BaseData):
pass
async def genshin_draw(user_id: int, count: int, pool_name: str):
# 0 1 2
cnlist = ['★★★★★', '★★★★', '★★★']
char_list, five_list, five_index_list, char_dict, star_list = _format_card_information(count, user_id, pool_name)
temp = ''
title = ''
up_type = []
up_list = []
if pool_name == 'char' and _CURRENT_CHAR_POOL_TITLE:
up_type = UP_CHAR
title = _CURRENT_CHAR_POOL_TITLE
elif pool_name == 'arms' and _CURRENT_ARMS_POOL_TITLE:
up_type = UP_ARMS
title = _CURRENT_ARMS_POOL_TITLE
tmp = ''
if up_type:
for x in up_type:
for operator in x.operators:
up_list.append(operator)
if x.star == 5:
tmp += f'五星UP:{' '.join(x.operators)} \n'
elif x.star == 4:
tmp += f'四星UP:{' '.join(x.operators)}'
rst = init_star_rst(star_list, cnlist, five_list, five_index_list, up_list)
pool_info = f'当前up池:{title}\n{tmp}' if title else ''
if count > 90:
char_list = set_list(char_list)
return pool_info + '\n' + MessageSegment.image("base64://" + await generate_img(char_list, 'genshin', star_list)) + '\n' + rst[:-1] + \
temp[:-1] + f'\n距离保底发还剩 {90 - genshin_count[user_id] if genshin_count.get(user_id) else '^'} 抽' \
+ "\n【五星:0.6%,四星:5.1%\n第72抽开始五星概率每抽加0.585%】"
async def update_genshin_info():
global ALL_CHAR, ALL_ARMS
url = 'https://wiki.biligame.com/ys/角色筛选'
data, code = await update_info(url, 'genshin')
if code == 200:
ALL_CHAR = init_game_pool('genshin', data, GenshinChar)
url = 'https://wiki.biligame.com/ys/武器图鉴'
data, code = await update_info(url, 'genshin_arms', ['头像', '名称', '类型', '稀有度.alt',
'获取途径', '初始基础属性1', '初始基础属性2',
'攻击力(MAX)', '副属性(MAX)', '技能'])
if code == 200:
ALL_ARMS = init_game_pool('genshin_arms', data, GenshinChar)
await _genshin_init_up_char()
async def init_genshin_data():
global ALL_CHAR, ALL_ARMS
if GENSHIN_FLAG:
if not os.path.exists(DRAW_PATH + 'genshin.json') or not os.path.exists(DRAW_PATH + 'genshin_arms.json'):
await update_genshin_info()
else:
with open(DRAW_PATH + 'genshin.json', 'r', encoding='utf8') as f:
genshin_dict = json.load(f)
with open(DRAW_PATH + 'genshin_arms.json', 'r', encoding='utf8') as f:
genshin_ARMS_dict = json.load(f)
ALL_CHAR = init_game_pool('genshin', genshin_dict, GenshinChar)
ALL_ARMS = init_game_pool('genshin_arms', genshin_ARMS_dict, GenshinChar)
await _genshin_init_up_char()
# 抽取卡池
def _get_genshin_card(mode: int = 1, pool_name: str = '', add: float = 0.0):
global ALL_ARMS, ALL_CHAR, UP_ARMS, UP_CHAR, _CURRENT_ARMS_POOL_TITLE, _CURRENT_CHAR_POOL_TITLE
if mode == 1:
star = get_star([5, 4, 3], [GENSHIN_FIVE_P + add, GENSHIN_FOUR_P, GENSHIN_THREE_P])
elif mode == 2:
star = get_star([5, 4], [GENSHIN_G_FIVE_P + add, GENSHIN_G_FOUR_P])
else:
star = 5
if pool_name == 'char':
data_lst = UP_CHAR
flag = _CURRENT_CHAR_POOL_TITLE
itype_all_lst = ALL_CHAR + [x for x in ALL_ARMS if x.star == star and x.star < 5]
elif pool_name == 'arms':
data_lst = UP_ARMS
flag = _CURRENT_ARMS_POOL_TITLE
itype_all_lst = ALL_ARMS + [x for x in ALL_CHAR if x.star == star and x.star < 5]
else:
data_lst = ''
flag = ''
itype_all_lst = ''
all_lst = ALL_ARMS + ALL_CHAR
# 是否UP
if flag and star > 3 and pool_name:
# 获取up角色列表
up_char_lst = [x.operators for x in data_lst if x.star == star][0]
# 成功获取up角色
if random.random() < 0.5:
up_char_name = random.choice(up_char_lst)
acquire_char = [x for x in all_lst if x.name == up_char_name][0]
else:
# 无up
all_char_lst = [x for x in itype_all_lst if x.star == star and x.name not in up_char_lst and not x.limited]
acquire_char = random.choice(all_char_lst)
else:
chars = [x for x in all_lst if x.star == star and not x.limited]
acquire_char = random.choice(chars)
return acquire_char, 5 - star
def _format_card_information(_count: int, user_id, pool_name):
char_list = []
star_list = [0, 0, 0]
five_index_list = []
five_list = []
five_dict = {}
add = 0.0
if genshin_count.get(user_id) and _count <= 90:
f_count = genshin_count[user_id]
else:
f_count = 0
if genshin_pl_count.get(user_id) and _count <= 90:
count = genshin_pl_count[user_id]
else:
count = 0
for i in range(_count):
count += 1
f_count += 1
# 十连保底
if count == 10 and f_count != 90:
if f_count >= 72:
add += I72_ADD
char, code = _get_genshin_card(2, pool_name, add=add)
count = 0
# 大保底
elif f_count == 90:
char, code = _get_genshin_card(3, pool_name)
else:
if f_count >= 72:
add += I72_ADD
char, code = _get_genshin_card(pool_name=pool_name, add=add)
if code == 1:
count = 0
star_list[code] += 1
if code == 0:
if _count <= 90:
genshin_five[user_id] = f_count
add = 0.0
f_count = 0
five_list.append(char.name)
five_index_list.append(i)
try:
five_dict[char.name] += 1
except KeyError:
five_dict[char.name] = 1
char_list.append(char)
if _count <= 90:
genshin_count[user_id] = f_count
genshin_pl_count[user_id] = count
return char_list, five_list, five_index_list, five_dict, star_list
def reset_count(user_id: int):
genshin_count[user_id] = 0
genshin_pl_count[user_id] = 0
# 获取up和概率
async def _genshin_init_up_char():
global _CURRENT_CHAR_POOL_TITLE, _CURRENT_ARMS_POOL_TITLE, UP_CHAR, UP_ARMS, POOL_IMG
_CURRENT_CHAR_POOL_TITLE, _CURRENT_ARMS_POOL_TITLE, POOL_IMG, UP_CHAR, UP_ARMS = await init_up_char(announcement)
async def reload_genshin_pool():
await _genshin_init_up_char()
return Message(f'当前UP池子:{_CURRENT_CHAR_POOL_TITLE} & {_CURRENT_ARMS_POOL_TITLE} {POOL_IMG}')
| import os
from nonebot.adapters.cqhttp import MessageSegment, Message
import nonebot
import random
from .update_game_info import update_info
from .util import generate_img, init_star_rst, BaseData, set_list, get_star, init_up_char
from .config import GENSHIN_FIVE_P, GENSHIN_FOUR_P, GENSHIN_G_FIVE_P, GENSHIN_G_FOUR_P, GENSHIN_THREE_P, I72_ADD, \
DRAW_PATH, GENSHIN_FLAG
from dataclasses import dataclass
from .init_card_pool import init_game_pool
from .announcement import GenshinAnnouncement
try:
import ujson as json
except ModuleNotFoundError:
import json
driver: nonebot.Driver = nonebot.get_driver()
announcement = GenshinAnnouncement()
genshin_five = {}
genshin_count = {}
genshin_pl_count = {}
ALL_CHAR = []
ALL_ARMS = []
UP_CHAR = []
UP_ARMS = []
_CURRENT_CHAR_POOL_TITLE = ''
_CURRENT_ARMS_POOL_TITLE = ''
POOL_IMG = ''
@dataclass
class GenshinChar(BaseData):
pass
async def genshin_draw(user_id: int, count: int, pool_name: str):
# 0 1 2
cnlist = ['★★★★★', '★★★★', '★★★']
char_list, five_list, five_index_list, char_dict, star_list = _format_card_information(count, user_id, pool_name)
temp = ''
title = ''
up_type = []
up_list = []
if pool_name == 'char' and _CURRENT_CHAR_POOL_TITLE:
up_type = UP_CHAR
title = _CURRENT_CHAR_POOL_TITLE
elif pool_name == 'arms' and _CURRENT_ARMS_POOL_TITLE:
up_type = UP_ARMS
title = _CURRENT_ARMS_POOL_TITLE
tmp = ''
if up_type:
for x in up_type:
for operator in x.operators:
up_list.append(operator)
if x.star == 5:
tmp += f'五星UP:{" ".join(x.operators)} \n'
elif x.star == 4:
tmp += f'四星UP:{" ".join(x.operators)}'
rst = init_star_rst(star_list, cnlist, five_list, five_index_list, up_list)
pool_info = f'当前up池:{title}\n{tmp}' if title else ''
if count > 90:
char_list = set_list(char_list)
return pool_info + '\n' + MessageSegment.image("base64://" + await generate_img(char_list, 'genshin', star_list)) + '\n' + rst[:-1] + \
temp[:-1] + f'\n距离保底发还剩 {90 - genshin_count[user_id] if genshin_count.get(user_id) else "^"} 抽' \
+ "\n【五星:0.6%,四星:5.1%\n第72抽开始五星概率每抽加0.585%】"
async def update_genshin_info():
global ALL_CHAR, ALL_ARMS
url = 'https://wiki.biligame.com/ys/角色筛选'
data, code = await update_info(url, 'genshin')
if code == 200:
ALL_CHAR = init_game_pool('genshin', data, GenshinChar)
url = 'https://wiki.biligame.com/ys/武器图鉴'
data, code = await update_info(url, 'genshin_arms', ['头像', '名称', '类型', '稀有度.alt',
'获取途径', '初始基础属性1', '初始基础属性2',
'攻击力(MAX)', '副属性(MAX)', '技能'])
if code == 200:
ALL_ARMS = init_game_pool('genshin_arms', data, GenshinChar)
await _genshin_init_up_char()
async def init_genshin_data():
global ALL_CHAR, ALL_ARMS
if GENSHIN_FLAG:
if not os.path.exists(DRAW_PATH + 'genshin.json') or not os.path.exists(DRAW_PATH + 'genshin_arms.json'):
await update_genshin_info()
else:
with open(DRAW_PATH + 'genshin.json', 'r', encoding='utf8') as f:
genshin_dict = json.load(f)
with open(DRAW_PATH + 'genshin_arms.json', 'r', encoding='utf8') as f:
genshin_ARMS_dict = json.load(f)
ALL_CHAR = init_game_pool('genshin', genshin_dict, GenshinChar)
ALL_ARMS = init_game_pool('genshin_arms', genshin_ARMS_dict, GenshinChar)
await _genshin_init_up_char()
# 抽取卡池
def _get_genshin_card(mode: int = 1, pool_name: str = '', add: float = 0.0):
global ALL_ARMS, ALL_CHAR, UP_ARMS, UP_CHAR, _CURRENT_ARMS_POOL_TITLE, _CURRENT_CHAR_POOL_TITLE
if mode == 1:
star = get_star([5, 4, 3], [GENSHIN_FIVE_P + add, GENSHIN_FOUR_P, GENSHIN_THREE_P])
elif mode == 2:
star = get_star([5, 4], [GENSHIN_G_FIVE_P + add, GENSHIN_G_FOUR_P])
else:
star = 5
if pool_name == 'char':
data_lst = UP_CHAR
flag = _CURRENT_CHAR_POOL_TITLE
itype_all_lst = ALL_CHAR + [x for x in ALL_ARMS if x.star == star and x.star < 5]
elif pool_name == 'arms':
data_lst = UP_ARMS
flag = _CURRENT_ARMS_POOL_TITLE
itype_all_lst = ALL_ARMS + [x for x in ALL_CHAR if x.star == star and x.star < 5]
else:
data_lst = ''
flag = ''
itype_all_lst = ''
all_lst = ALL_ARMS + ALL_CHAR
# 是否UP
if flag and star > 3 and pool_name:
# 获取up角色列表
up_char_lst = [x.operators for x in data_lst if x.star == star][0]
# 成功获取up角色
if random.random() < 0.5:
up_char_name = random.choice(up_char_lst)
acquire_char = [x for x in all_lst if x.name == up_char_name][0]
else:
# 无up
all_char_lst = [x for x in itype_all_lst if x.star == star and x.name not in up_char_lst and not x.limited]
acquire_char = random.choice(all_char_lst)
else:
chars = [x for x in all_lst if x.star == star and not x.limited]
acquire_char = random.choice(chars)
return acquire_char, 5 - star
def _format_card_information(_count: int, user_id, pool_name):
char_list = []
star_list = [0, 0, 0]
five_index_list = []
five_list = []
five_dict = {}
add = 0.0
if genshin_count.get(user_id) and _count <= 90:
f_count = genshin_count[user_id]
else:
f_count = 0
if genshin_pl_count.get(user_id) and _count <= 90:
count = genshin_pl_count[user_id]
else:
count = 0
for i in range(_count):
count += 1
f_count += 1
# 十连保底
if count == 10 and f_count != 90:
if f_count >= 72:
add += I72_ADD
char, code = _get_genshin_card(2, pool_name, add=add)
count = 0
# 大保底
elif f_count == 90:
char, code = _get_genshin_card(3, pool_name)
else:
if f_count >= 72:
add += I72_ADD
char, code = _get_genshin_card(pool_name=pool_name, add=add)
if code == 1:
count = 0
star_list[code] += 1
if code == 0:
if _count <= 90:
genshin_five[user_id] = f_count
add = 0.0
f_count = 0
five_list.append(char.name)
five_index_list.append(i)
try:
five_dict[char.name] += 1
except KeyError:
five_dict[char.name] = 1
char_list.append(char)
if _count <= 90:
genshin_count[user_id] = f_count
genshin_pl_count[user_id] = count
return char_list, five_list, five_index_list, five_dict, star_list
def reset_count(user_id: int):
genshin_count[user_id] = 0
genshin_pl_count[user_id] = 0
# 获取up和概率
async def _genshin_init_up_char():
global _CURRENT_CHAR_POOL_TITLE, _CURRENT_ARMS_POOL_TITLE, UP_CHAR, UP_ARMS, POOL_IMG
_CURRENT_CHAR_POOL_TITLE, _CURRENT_ARMS_POOL_TITLE, POOL_IMG, UP_CHAR, UP_ARMS = await init_up_char(announcement)
async def reload_genshin_pool():
await _genshin_init_up_char()
return Message(f'当前UP池子:{_CURRENT_CHAR_POOL_TITLE} & {_CURRENT_ARMS_POOL_TITLE} {POOL_IMG}')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""RAMSES RF - Protocol/Transport layer.
Helper functions.
"""
import ctypes
import sys
import time
from datetime import datetime as dt
from typing import Optional, Union
from .const import DEVICE_TYPES, NON_DEVICE_ID, NUL_DEVICE_ID
class FILETIME(ctypes.Structure):
"""Data structure for GetSystemTimePreciseAsFileTime()."""
_fields_ = [("dwLowDateTime", ctypes.c_uint), ("dwHighDateTime", ctypes.c_uint)]
def dt_now() -> dt:
"""Return the current datetime as a local/naive datetime object.
This is slower, but potentially more accurate, than dt.now(), and is used mainly for
packet timestamps.
"""
return dt.fromtimestamp(timestamp())
def dt_str() -> str:
"""Return the current datetime as a isoformat string."""
return dt_now().isoformat(timespec="microseconds")
def timestamp() -> float:
"""Return the number of seconds since the Unix epoch.
Return an accurate value, even for Windows-based systems.
""" # see: https://www.python.org/dev/peps/pep-0564/
if sys.platform != "win32":
return time.time_ns() / 1e9 # since 1970-01-01T00:00:00Z, time.gmtime(0)
file_time = FILETIME()
ctypes.windll.kernel32.GetSystemTimePreciseAsFileTime(ctypes.byref(file_time))
_time = (file_time.dwLowDateTime + (file_time.dwHighDateTime << 32)) / 1e7
return _time - 134774 * 24 * 60 * 60 # otherwise, is since 1601-01-01T00:00:00Z
def _precision_v_cost():
import math
#
LOOPS = 10 ** 6
#
print("time.time_ns(): %s" % time.time_ns())
print("time.time(): %s\r\n" % time.time())
#
starts = time.time_ns()
min_dt = [abs(time.time_ns() - time.time_ns()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta time_ns(): %s ns" % min_dt)
print("duration time_ns(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_dt = [abs(time.time() - time.time()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta time(): %s ns" % math.ceil(min_dt * 1e9))
print("duration time(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_dt = [abs(timestamp() - timestamp()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta timestamp(): %s ns" % math.ceil(min_dt * 1e9))
print("duration timestamp(): %s ns\r\n" % (time.time_ns() - starts))
#
LOOPS = 10 ** 4
#
starts = time.time_ns()
min_td = [abs(dt.now() - dt.now()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt.now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt.now(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_td = [abs(dt_now() - dt_now()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_td = [
abs(
(dt_now if sys.platform == "win32" else dt.now)()
- (dt_now if sys.platform == "win32" else dt.now)()
)
for _ in range(LOOPS)
]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
#
dt_nov = dt_now if sys.platform == "win32" else dt.now
starts = time.time_ns()
min_td = [abs(dt_nov() - dt_nov()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
def double(val, factor=1) -> Optional[float]:
"""Return a double, used by 31DA."""
if val == "7FFF":
return
result = int(val, 16)
assert result < 32767
return result if factor == 1 else result / factor
def flag8(byte, lsb=False) -> list:
"""Split a byte (as a str) into a list of 8 bits, MSB first by default."""
if lsb is True:
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in range(8)]
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in reversed(range(8))]
def percent(value: str) -> Optional[float]: # a percentage 0-100% (0.0 to 1.0)
"""Return a percentage, 0-100% with resolution of 0.5%."""
assert len(value) == 2, f"percent({value}): len is not 2"
if value in {"EF", "FE", "FF"}: # TODO: diff b/w FE (seen with 3150) & FF
return
assert int(value, 16) <= 200, "max value should be 0xC8, not 0x{value}"
return int(value, 16) / 200
def bool_from_hex(value: str) -> Optional[bool]: # either 00 or C8
"""Return a boolean."""
assert value in {"00", "C8", "FF"}, value
return {"00": False, "C8": True}.get(value)
def date_from_hex(value: str) -> Optional[str]: # YY-MM-DD
"""Return a date string in the format YY-MM-DD."""
assert len(value) == 8, "len is not 8"
if value == "FFFFFFFF":
return
return dt(
year=int(value[4:8], 16),
month=int(value[2:4], 16),
day=int(value[:2], 16) & 0b11111, # 1st 3 bits: DayOfWeek
).strftime("%Y-%m-%d")
def dtm_from_hex(value: str) -> str: # from parsers
"""Convert a hex string to an (naive, local) isoformat string."""
# 00141B0A07E3 (...HH:MM:00) for system_mode, zone_mode (schedules?)
# 0400041C0A07E3 (...HH:MM:SS) for sync_datetime
if value == "FF" * 6:
return None
if len(value) == 12:
value = f"00{value}"
# assert len(value) == 14
return dt(
year=int(value[10:14], 16),
month=int(value[8:10], 16),
day=int(value[6:8], 16),
hour=int(value[4:6], 16) & 0b11111, # 1st 3 bits: DayOfWeek
minute=int(value[2:4], 16),
second=int(value[:2], 16) & 0b1111111, # 1st bit: used for DST
).isoformat(timespec="seconds")
def dtm_to_hex(dtm: Union[str, dt]) -> str:
"""Convert a datetime (isoformat string, or datetime obj) to a hex string."""
def _dtm_to_hex(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, *args):
return f"{tm_min:02X}{tm_hour:02X}{tm_mday:02X}{tm_mon:02X}{tm_year:04X}"
if dtm is None:
return "FF" * 6
if isinstance(dtm, str):
try:
dtm = dt.fromisoformat(dtm)
except ValueError:
raise ValueError("Invalid datetime isoformat string")
elif not isinstance(dtm, dt):
raise TypeError("Invalid datetime object")
# if dtm < dt.now() + td(minutes=1):
# raise ValueError("Invalid datetime")
return _dtm_to_hex(*dtm.timetuple())
def dts_from_hex(value: str) -> Optional[str]:
"""YY-MM-DD HH:MM:SS."""
if value == "00000000007F":
return None
_seqx = int(value, 16)
return dt(
year=(_seqx & 0b1111111 << 24) >> 24,
month=(_seqx & 0b1111 << 36) >> 36,
day=(_seqx & 0b11111 << 31) >> 31,
hour=(_seqx & 0b11111 << 19) >> 19,
minute=(_seqx & 0b111111 << 13) >> 13,
second=(_seqx & 0b111111 << 7) >> 7,
).strftime("%Y-%m-%dT%H:%M:%S")
def dts_to_hex(dtm: Union[str, dt]) -> str: # TODO: WIP
"""YY-MM-DD HH:MM:SS."""
if dtm is None:
return "00000000007F"
if isinstance(dtm, str):
try:
dtm = dt.fromisoformat(dtm) # TODO: YY-MM-DD, not YYYY-MM-DD
except ValueError:
raise ValueError("Invalid datetime isoformat string")
elif not isinstance(dtm, dt):
raise TypeError("Invalid datetime object")
(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, *args) = dtm.timetuple()
val = sum(
(
tm_year % 100 << 24,
tm_mon << 36,
tm_mday << 31,
tm_hour << 19,
tm_min << 13,
tm_sec << 7,
)
)
return f"{val:012X}"
def str_from_hex(value: str) -> Optional[str]: # printable ASCII characters
"""Return a string of printable ASCII characters."""
# result = bytearray.fromhex(value).split(b"\x7F")[0] # TODO: needs checking
result = bytearray([x for x in bytearray.fromhex(value) if 31 < x < 127])
return result.decode("ascii").strip() if result else None
def str_to_hex(value: str) -> str:
"""Convert a string to a variable-length ASCII hex string."""
return "".join(f"{ord(x):02X}" for x in value)
# return value.encode().hex()
def temp_from_hex(value: str) -> Union[float, bool, None]:
"""Convert a 2's complement 4-byte hex string to an float."""
assert len(value) == 4, f"temp_from_hex({value}): should be 4 bytes long"
if value == "31FF": # means: N/A (== 127.99, 2s complement), signed?
return
if value == "7EFF": # possibly only for setpoints? unsigned?
return False
if value == "7FFF": # also: FFFF?, means: N/A (== 327.67)
return
temp = int(value, 16)
return (temp if temp < 2 ** 15 else temp - 2 ** 16) / 100
def temp_to_hex(value: float) -> str:
"""Convert a float to a 2's complement 4-byte hex string."""
assert (
not value or -(2 ** 7) <= value < 2 ** 7
), f"temp_to_hex({value}): is out of 2's complement range"
if value is None:
return "7FFF" # or: "31FF"?
if value is False:
return "7EFF"
temp = int(value * 100)
return f"{temp if temp >= 0 else temp + 2 ** 16:04X}"
def valve_demand(value: str) -> dict:
# a damper restricts flow, a valve permits flow
demand = int(value, 16)
if demand & 0xF0 == 0xF0:
VALVE_STATE = {
"F0": "open_circuit",
"F1": "short_circuit",
"FD": "valve_stuck", # damper/valve stuck
"FE": "actuator_stuck",
} # VALVE_STATE.get(value, "malfunction")
return {
"heat_demand": None,
"fault": VALVE_STATE.get(value, "malfunction"),
}
assert demand <= 200
return {"heat_demand": demand / 200}
def hex_id_to_dec(device_hex: str, friendly_id=False) -> str:
"""Convert (say) '06368E' to '01:145038' (or 'CTL:145038')."""
if device_hex == "FFFFFE": # aka '63:262142'
return "NUL:262142" if friendly_id else NUL_DEVICE_ID
if not device_hex.strip(): # aka '--:------'
return f"{"":10}" if friendly_id else NON_DEVICE_ID
_tmp = int(device_hex, 16)
dev_type = f"{(_tmp & 0xFC0000) >> 18:02d}"
if friendly_id:
dev_type = DEVICE_TYPES.get(dev_type, f"{dev_type:<3}")
return f"{dev_type}:{_tmp & 0x03FFFF:06d}"
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""RAMSES RF - Protocol/Transport layer.
Helper functions.
"""
import ctypes
import sys
import time
from datetime import datetime as dt
from typing import Optional, Union
from .const import DEVICE_TYPES, NON_DEVICE_ID, NUL_DEVICE_ID
class FILETIME(ctypes.Structure):
"""Data structure for GetSystemTimePreciseAsFileTime()."""
_fields_ = [("dwLowDateTime", ctypes.c_uint), ("dwHighDateTime", ctypes.c_uint)]
def dt_now() -> dt:
"""Return the current datetime as a local/naive datetime object.
This is slower, but potentially more accurate, than dt.now(), and is used mainly for
packet timestamps.
"""
return dt.fromtimestamp(timestamp())
def dt_str() -> str:
"""Return the current datetime as a isoformat string."""
return dt_now().isoformat(timespec="microseconds")
def timestamp() -> float:
"""Return the number of seconds since the Unix epoch.
Return an accurate value, even for Windows-based systems.
""" # see: https://www.python.org/dev/peps/pep-0564/
if sys.platform != "win32":
return time.time_ns() / 1e9 # since 1970-01-01T00:00:00Z, time.gmtime(0)
file_time = FILETIME()
ctypes.windll.kernel32.GetSystemTimePreciseAsFileTime(ctypes.byref(file_time))
_time = (file_time.dwLowDateTime + (file_time.dwHighDateTime << 32)) / 1e7
return _time - 134774 * 24 * 60 * 60 # otherwise, is since 1601-01-01T00:00:00Z
def _precision_v_cost():
import math
#
LOOPS = 10 ** 6
#
print("time.time_ns(): %s" % time.time_ns())
print("time.time(): %s\r\n" % time.time())
#
starts = time.time_ns()
min_dt = [abs(time.time_ns() - time.time_ns()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta time_ns(): %s ns" % min_dt)
print("duration time_ns(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_dt = [abs(time.time() - time.time()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta time(): %s ns" % math.ceil(min_dt * 1e9))
print("duration time(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_dt = [abs(timestamp() - timestamp()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta timestamp(): %s ns" % math.ceil(min_dt * 1e9))
print("duration timestamp(): %s ns\r\n" % (time.time_ns() - starts))
#
LOOPS = 10 ** 4
#
starts = time.time_ns()
min_td = [abs(dt.now() - dt.now()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt.now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt.now(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_td = [abs(dt_now() - dt_now()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_td = [
abs(
(dt_now if sys.platform == "win32" else dt.now)()
- (dt_now if sys.platform == "win32" else dt.now)()
)
for _ in range(LOOPS)
]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
#
dt_nov = dt_now if sys.platform == "win32" else dt.now
starts = time.time_ns()
min_td = [abs(dt_nov() - dt_nov()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
def double(val, factor=1) -> Optional[float]:
"""Return a double, used by 31DA."""
if val == "7FFF":
return
result = int(val, 16)
assert result < 32767
return result if factor == 1 else result / factor
def flag8(byte, lsb=False) -> list:
"""Split a byte (as a str) into a list of 8 bits, MSB first by default."""
if lsb is True:
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in range(8)]
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in reversed(range(8))]
def percent(value: str) -> Optional[float]: # a percentage 0-100% (0.0 to 1.0)
"""Return a percentage, 0-100% with resolution of 0.5%."""
assert len(value) == 2, f"percent({value}): len is not 2"
if value in {"EF", "FE", "FF"}: # TODO: diff b/w FE (seen with 3150) & FF
return
assert int(value, 16) <= 200, "max value should be 0xC8, not 0x{value}"
return int(value, 16) / 200
def bool_from_hex(value: str) -> Optional[bool]: # either 00 or C8
"""Return a boolean."""
assert value in {"00", "C8", "FF"}, value
return {"00": False, "C8": True}.get(value)
def date_from_hex(value: str) -> Optional[str]: # YY-MM-DD
"""Return a date string in the format YY-MM-DD."""
assert len(value) == 8, "len is not 8"
if value == "FFFFFFFF":
return
return dt(
year=int(value[4:8], 16),
month=int(value[2:4], 16),
day=int(value[:2], 16) & 0b11111, # 1st 3 bits: DayOfWeek
).strftime("%Y-%m-%d")
def dtm_from_hex(value: str) -> str: # from parsers
"""Convert a hex string to an (naive, local) isoformat string."""
# 00141B0A07E3 (...HH:MM:00) for system_mode, zone_mode (schedules?)
# 0400041C0A07E3 (...HH:MM:SS) for sync_datetime
if value == "FF" * 6:
return None
if len(value) == 12:
value = f"00{value}"
# assert len(value) == 14
return dt(
year=int(value[10:14], 16),
month=int(value[8:10], 16),
day=int(value[6:8], 16),
hour=int(value[4:6], 16) & 0b11111, # 1st 3 bits: DayOfWeek
minute=int(value[2:4], 16),
second=int(value[:2], 16) & 0b1111111, # 1st bit: used for DST
).isoformat(timespec="seconds")
def dtm_to_hex(dtm: Union[str, dt]) -> str:
"""Convert a datetime (isoformat string, or datetime obj) to a hex string."""
def _dtm_to_hex(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, *args):
return f"{tm_min:02X}{tm_hour:02X}{tm_mday:02X}{tm_mon:02X}{tm_year:04X}"
if dtm is None:
return "FF" * 6
if isinstance(dtm, str):
try:
dtm = dt.fromisoformat(dtm)
except ValueError:
raise ValueError("Invalid datetime isoformat string")
elif not isinstance(dtm, dt):
raise TypeError("Invalid datetime object")
# if dtm < dt.now() + td(minutes=1):
# raise ValueError("Invalid datetime")
return _dtm_to_hex(*dtm.timetuple())
def dts_from_hex(value: str) -> Optional[str]:
"""YY-MM-DD HH:MM:SS."""
if value == "00000000007F":
return None
_seqx = int(value, 16)
return dt(
year=(_seqx & 0b1111111 << 24) >> 24,
month=(_seqx & 0b1111 << 36) >> 36,
day=(_seqx & 0b11111 << 31) >> 31,
hour=(_seqx & 0b11111 << 19) >> 19,
minute=(_seqx & 0b111111 << 13) >> 13,
second=(_seqx & 0b111111 << 7) >> 7,
).strftime("%Y-%m-%dT%H:%M:%S")
def dts_to_hex(dtm: Union[str, dt]) -> str: # TODO: WIP
"""YY-MM-DD HH:MM:SS."""
if dtm is None:
return "00000000007F"
if isinstance(dtm, str):
try:
dtm = dt.fromisoformat(dtm) # TODO: YY-MM-DD, not YYYY-MM-DD
except ValueError:
raise ValueError("Invalid datetime isoformat string")
elif not isinstance(dtm, dt):
raise TypeError("Invalid datetime object")
(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, *args) = dtm.timetuple()
val = sum(
(
tm_year % 100 << 24,
tm_mon << 36,
tm_mday << 31,
tm_hour << 19,
tm_min << 13,
tm_sec << 7,
)
)
return f"{val:012X}"
def str_from_hex(value: str) -> Optional[str]: # printable ASCII characters
"""Return a string of printable ASCII characters."""
# result = bytearray.fromhex(value).split(b"\x7F")[0] # TODO: needs checking
result = bytearray([x for x in bytearray.fromhex(value) if 31 < x < 127])
return result.decode("ascii").strip() if result else None
def str_to_hex(value: str) -> str:
"""Convert a string to a variable-length ASCII hex string."""
return "".join(f"{ord(x):02X}" for x in value)
# return value.encode().hex()
def temp_from_hex(value: str) -> Union[float, bool, None]:
"""Convert a 2's complement 4-byte hex string to an float."""
assert len(value) == 4, f"temp_from_hex({value}): should be 4 bytes long"
if value == "31FF": # means: N/A (== 127.99, 2s complement), signed?
return
if value == "7EFF": # possibly only for setpoints? unsigned?
return False
if value == "7FFF": # also: FFFF?, means: N/A (== 327.67)
return
temp = int(value, 16)
return (temp if temp < 2 ** 15 else temp - 2 ** 16) / 100
def temp_to_hex(value: float) -> str:
"""Convert a float to a 2's complement 4-byte hex string."""
assert (
not value or -(2 ** 7) <= value < 2 ** 7
), f"temp_to_hex({value}): is out of 2's complement range"
if value is None:
return "7FFF" # or: "31FF"?
if value is False:
return "7EFF"
temp = int(value * 100)
return f"{temp if temp >= 0 else temp + 2 ** 16:04X}"
def valve_demand(value: str) -> dict:
# a damper restricts flow, a valve permits flow
demand = int(value, 16)
if demand & 0xF0 == 0xF0:
VALVE_STATE = {
"F0": "open_circuit",
"F1": "short_circuit",
"FD": "valve_stuck", # damper/valve stuck
"FE": "actuator_stuck",
} # VALVE_STATE.get(value, "malfunction")
return {
"heat_demand": None,
"fault": VALVE_STATE.get(value, "malfunction"),
}
assert demand <= 200
return {"heat_demand": demand / 200}
def hex_id_to_dec(device_hex: str, friendly_id=False) -> str:
"""Convert (say) '06368E' to '01:145038' (or 'CTL:145038')."""
if device_hex == "FFFFFE": # aka '63:262142'
return "NUL:262142" if friendly_id else NUL_DEVICE_ID
if not device_hex.strip(): # aka '--:------'
return f"{'':10}" if friendly_id else NON_DEVICE_ID
_tmp = int(device_hex, 16)
dev_type = f"{(_tmp & 0xFC0000) >> 18:02d}"
if friendly_id:
dev_type = DEVICE_TYPES.get(dev_type, f"{dev_type:<3}")
return f"{dev_type}:{_tmp & 0x03FFFF:06d}"
|
# %%
import gc
import itertools
import math
import typing as ty
from copy import deepcopy
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim.swa_utils as swa_utils
import zero
from torch import Tensor
import wandb
import lib
import lib.node as node
# %%
class NODE(nn.Module):
def __init__(
self,
*,
d_in: int,
num_layers: int,
layer_dim: int,
depth: int,
tree_dim: int,
choice_function: str,
bin_function: str,
d_out: int,
categories: ty.Optional[ty.List[int]],
d_embedding: int,
) -> None:
super().__init__()
if categories is not None:
d_in += len(categories) * d_embedding
category_offsets = torch.tensor([0] + categories[:-1]).cumsum(0)
self.register_buffer('category_offsets', category_offsets)
self.category_embeddings = nn.Embedding(sum(categories), d_embedding)
nn.init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5))
print(f'{self.category_embeddings.weight.shape=}')
self.d_out = d_out
self.block = node.DenseBlock(
input_dim=d_in,
num_layers=num_layers,
layer_dim=layer_dim,
depth=depth,
tree_dim=tree_dim,
bin_function=getattr(node, bin_function),
choice_function=getattr(node, choice_function),
flatten_output=False,
)
def forward(self, x_num: Tensor, x_cat: Tensor) -> Tensor:
if x_cat is not None:
x_cat = self.category_embeddings(x_cat + self.category_offsets[None])
x = torch.cat([x_num, x_cat.view(x_cat.size(0), -1)], dim=-1)
else:
x = x_num
x = self.block(x)
x = x[..., : self.d_out].mean(dim=-2)
x = x.squeeze(-1)
return x
# %%
args, output = lib.load_config()
assert 'weight_decay' not in args, 'NODE architecture performs badly with weight decay'
if 'swa' in args:
assert args['swa']['n_checkpoints'] > 1
# %%
zero.set_randomness(args['seed'])
dataset_dir = lib.get_path(args['data']['path'])
stats: ty.Dict[str, ty.Any] = {
'dataset': dataset_dir.name,
'algorithm': Path(__file__).stem,
**lib.load_json(output / 'stats.json'),
}
D = lib.Dataset.from_dir(dataset_dir)
X = D.build_X(
normalization=args['data'].get('normalization'),
num_nan_policy='mean',
cat_nan_policy='new',
cat_policy=args['data'].get('cat_policy', 'indices'),
cat_min_frequency=args['data'].get('cat_min_frequency', 0.0),
seed=args['seed'],
)
if not isinstance(X, tuple):
X = (X, None)
zero.set_randomness(args['seed'])
Y, y_info = D.build_y(args['data'].get('y_policy'))
lib.dump_pickle(y_info, output / 'y_info.pickle')
X = tuple(None if x is None else lib.to_tensors(x) for x in X)
Y = lib.to_tensors(Y)
device = lib.get_device()
if device.type != 'cpu':
X = tuple(None if x is None else {k: v.to(device) for k, v in x.items()} for x in X)
Y_device = {k: v.to(device) for k, v in Y.items()}
else:
Y_device = Y
X_num, X_cat = X
if not D.is_multiclass:
Y_device = {k: v.float() for k, v in Y_device.items()}
train_size = D.size(lib.TRAIN)
batch_size, epoch_size = (
stats['batch_size'],
stats['epoch_size'],
) = lib.get_epoch_parameters(train_size, args['training'].get('batch_size', 'v3'))
eval_batch_size = args['training']['eval_batch_size']
chunk_size = None
stats['chunk_sizes'] = {}
stats['eval_batch_sizes'] = {}
loss_fn = (
F.binary_cross_entropy_with_logits
if D.is_binclass
else F.cross_entropy
if D.is_multiclass
else F.mse_loss
)
args['model'].setdefault('d_embedding', None)
model = NODE(
d_in=0 if X_num is None else X_num['train'].shape[1],
d_out=D.info['n_classes'] if D.is_multiclass else 1,
categories=lib.get_categories(X_cat),
**args['model'],
).to(device)
if torch.cuda.device_count() > 1: # type: ignore[code]
print('Using nn.DataParallel')
model = nn.DataParallel(model)
stats['n_parameters'] = lib.get_n_parameters(model)
optimizer = lib.make_optimizer(
args['training']['optimizer'],
model.parameters(),
args['training']['lr'],
args['training']['weight_decay'],
)
stream = zero.Stream(lib.IndexLoader(train_size, batch_size, True, device))
progress = zero.ProgressTracker(args['training']['patience'])
training_log = {lib.TRAIN: [], lib.VAL: [], lib.TEST: []}
stage = 0
lr_n_decays = 0
timer = zero.Timer()
swa_stage_first_epoch = None
def print_epoch_info():
print(
f'\n>>> Epoch {stream.epoch} | Stage {stage} | {lib.format_seconds(timer())} | {output}'
)
details = {'lr': lib.get_lr(optimizer), 'chunk_size': chunk_size}
details.update((x, stats[x]) for x in ['batch_size', 'epoch_size', 'n_parameters'])
print(' | '.join(f'{k} = {v}' for k, v in details.items()))
def get_checkpoint_path(suffix):
return output / f'checkpoint_{suffix}.pt'
def step(batch_idx):
logits = model(
X_num[lib.TRAIN][batch_idx],
None if X_cat is None else X_cat[lib.TRAIN][batch_idx],
)
targets = Y_device[lib.TRAIN][batch_idx] # type: ignore[code]
if not D.is_multiclass:
targets = targets.to(logits.dtype)
return logits, targets
def _predict(part):
result = []
for idx in lib.IndexLoader(
D.size(part),
args['training']['eval_batch_size'],
False,
device,
):
result.append(
model(
None if X_num is None else X_num[part][idx],
None if X_cat is None else X_cat[part][idx],
)
)
return torch.cat(result).cpu()
@torch.no_grad()
def predict(m, part):
global eval_batch_size
m.eval()
random_state = zero.get_random_state()
while eval_batch_size:
try:
zero.set_random_state(random_state)
return _predict(part)
except RuntimeError as err:
if not lib.is_oom_exception(err):
raise
zero.free_memory()
gc.collect()
eval_batch_size //= 2
print('New eval batch size:', eval_batch_size)
stats['eval_batch_sizes'][stream.epoch] = eval_batch_size
raise RuntimeError('Not enough memory even for eval_batch_size=1')
@torch.no_grad()
def evaluate(m, parts):
metrics = {}
predictions = {}
for part in parts:
predictions[part] = predict(m, part).numpy()
metrics[part] = lib.calculate_metrics(
D.info['task_type'],
Y[part].numpy(), # type: ignore[code]
predictions[part], # type: ignore[code]
'logits',
y_info,
)
for part, part_metrics in metrics.items():
print(f'[{part:<5}]', lib.make_summary(part_metrics))
return metrics, predictions
STATE_VARIABLES = [
'progress',
'stats',
'timer',
'training_log',
'stage',
'swa_stage_first_epoch',
'lr_n_decays',
'chunk_size',
'eval_batch_size',
]
def save_checkpoint(suffix):
model_artifact = wandb.Artifact('node-artifact', type='model')
torch.save(
{
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'stream': stream.state_dict(),
'random_state': zero.get_random_state(),
**{x: globals()[x] for x in STATE_VARIABLES},
},
get_checkpoint_path(suffix),
)
lib.dump_stats(stats, output, suffix == 'final')
lib.backup_output(output)
model_artifact.add_file(get_checkpoint_path(suffix))
wandb.run.log_artifact(model_artifact)
for stage in list(range(args.get('swa', {}).get('n_checkpoints', 1)))[::-1]:
if get_checkpoint_path(stage).exists():
print(f'Loading checkpoint {get_checkpoint_path(stage).name}')
c = torch.load(get_checkpoint_path(stage))
model.load_state_dict(c['model'])
optimizer.load_state_dict(c['optimizer'])
stream.load_state_dict(c['stream'])
globals().update({x: c[x] for x in STATE_VARIABLES})
stats.setdefault('old_stats', []).append(deepcopy(stats))
stats.setdefault('continuations', []).append(stream.epoch)
zero.set_random_state(c['random_state'])
break
# %%
timer.run()
with torch.no_grad():
# NODE-specific initialization
if stream.epoch == 0:
model.eval()
size = 2048
while True:
try:
zero.set_randomness(args['seed'])
x = step(torch.randperm(train_size)[:size])
del x
except RuntimeError as err:
if not lib.is_oom_exception(err):
raise
size //= 2
else:
break
wandb.init(project="RTDL", config=args)
for epoch in stream.epochs(args['training']['n_epochs']):
print_epoch_info()
epoch_losses = []
for batch_idx in epoch:
loss, new_chunk_size = lib.learn_with_auto_virtual_batch(
model, optimizer, loss_fn, step, batch_idx, batch_size, chunk_size
)
wandb.log({"Training Loss": loss})
epoch_losses.append(loss.detach())
if new_chunk_size and new_chunk_size < (chunk_size or batch_size):
chunk_size = new_chunk_size
print('New chunk size:', chunk_size)
stats['chunk_sizes'][stream.iteration] = chunk_size
zero.free_memory()
gc.collect()
epoch_losses = torch.stack(epoch_losses).tolist()
training_log[lib.TRAIN].extend(epoch_losses)
print(f'[{lib.TRAIN}] loss = {round(sum(epoch_losses) / len(epoch_losses), 3)}')
metrics, predictions = evaluate(model, [lib.VAL, lib.TEST])
wandb.log({"score": metrics[lib.VAL]['score']})
for k, v in metrics.items():
training_log[k].append(v)
wandb.log({k:v})
progress.update(metrics[lib.VAL]['score'])
if progress.success:
print('New best epoch!')
stats[f'best_epoch_{stage}'] = stream.epoch
stats[f'metrics_{stage}'] = metrics
save_checkpoint(stage)
for k, v in predictions.items():
np.save(output / f'p_{stage}_{k}.npy', v)
wandb.log({f"predictions_{k}": v})
elif progress.fail:
if stage == 0 and lr_n_decays < args['training']['lr_n_decays']:
print('Reducing lr...')
stats[f'lr_decay_{lr_n_decays}'] = stream.epoch
lib.set_lr(optimizer, lib.get_lr(optimizer) * args['training']['lr_decay'])
lr_n_decays += 1
progress.forget_bad_updates()
else:
print(f'Finishing stage {stage}...')
stats[f'time_{stage}'] = lib.format_seconds(timer())
if 'swa' not in args or stage + 1 == args['swa']['n_checkpoints']:
break
best_stage_checkpoint = torch.load(get_checkpoint_path(stage))
model.load_state_dict(best_stage_checkpoint['model'])
optimizer.load_state_dict(best_stage_checkpoint['optimizer'])
progress = zero.ProgressTracker(args['swa']['patience'])
lib.set_lr(optimizer, args['training']['lr'] * args['swa']['lr_factor'])
swa_stage_first_epoch = stream.epoch + 1
stage += 1
if stream.epoch == swa_stage_first_epoch:
lib.set_lr(optimizer, args['training']['lr'])
# %%
def load_best_model(stage):
model.load_state_dict(torch.load(get_checkpoint_path(stage))['model'])
if 'swa' in args:
print('\nRunning SWA...')
swa_model = swa_utils.AveragedModel(model)
swa_progress = zero.ProgressTracker(None)
best_swa_model = None
for stage in range(args['swa']['n_checkpoints']):
load_best_model(stage)
swa_model.update_parameters(model)
if stage > 0 and args['swa']['update_bn_n_epochs']:
zero.set_randomness(args['seed'])
with torch.no_grad():
swa_utils.update_bn(
itertools.chain.from_iterable(
zero.iter_batches(
X[lib.TRAIN], chunk_size or batch_size, shuffle=True
)
for _ in range(args['swa']['update_bn_n_epochs'])
),
swa_model,
device,
)
swa_progress.update(
evaluate(swa_model if stage > 0 else model, [lib.VAL])[0][lib.VAL]['score']
)
if swa_progress.success:
print('New best SWA checkpoint!')
stats['n_swa_checkpoints'] = stage + 1
if stage > 0:
best_swa_model = deepcopy(swa_model)
if best_swa_model is None:
load_best_model(0)
else:
lib.load_swa_state_dict(model, best_swa_model)
else:
load_best_model(0)
print('\nRunning the final evaluation...')
stats['metrics'], predictions = evaluate(model, lib.PARTS)
for k, v in predictions.items():
np.save(output / f'p_{k}.npy', v)
wandb.run.summary[f"final_prediction_{k}"] = v
stats['time_final'] = lib.format_seconds(timer())
save_checkpoint('final')
print(f'Done! Time elapsed: {stats['time_final']}')
print(
'\n!!! WARNING !!! The metrics for a single model are stored under the "metrics_0" key.\n'
)
| # %%
import gc
import itertools
import math
import typing as ty
from copy import deepcopy
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim.swa_utils as swa_utils
import zero
from torch import Tensor
import wandb
import lib
import lib.node as node
# %%
class NODE(nn.Module):
def __init__(
self,
*,
d_in: int,
num_layers: int,
layer_dim: int,
depth: int,
tree_dim: int,
choice_function: str,
bin_function: str,
d_out: int,
categories: ty.Optional[ty.List[int]],
d_embedding: int,
) -> None:
super().__init__()
if categories is not None:
d_in += len(categories) * d_embedding
category_offsets = torch.tensor([0] + categories[:-1]).cumsum(0)
self.register_buffer('category_offsets', category_offsets)
self.category_embeddings = nn.Embedding(sum(categories), d_embedding)
nn.init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5))
print(f'{self.category_embeddings.weight.shape=}')
self.d_out = d_out
self.block = node.DenseBlock(
input_dim=d_in,
num_layers=num_layers,
layer_dim=layer_dim,
depth=depth,
tree_dim=tree_dim,
bin_function=getattr(node, bin_function),
choice_function=getattr(node, choice_function),
flatten_output=False,
)
def forward(self, x_num: Tensor, x_cat: Tensor) -> Tensor:
if x_cat is not None:
x_cat = self.category_embeddings(x_cat + self.category_offsets[None])
x = torch.cat([x_num, x_cat.view(x_cat.size(0), -1)], dim=-1)
else:
x = x_num
x = self.block(x)
x = x[..., : self.d_out].mean(dim=-2)
x = x.squeeze(-1)
return x
# %%
args, output = lib.load_config()
assert 'weight_decay' not in args, 'NODE architecture performs badly with weight decay'
if 'swa' in args:
assert args['swa']['n_checkpoints'] > 1
# %%
zero.set_randomness(args['seed'])
dataset_dir = lib.get_path(args['data']['path'])
stats: ty.Dict[str, ty.Any] = {
'dataset': dataset_dir.name,
'algorithm': Path(__file__).stem,
**lib.load_json(output / 'stats.json'),
}
D = lib.Dataset.from_dir(dataset_dir)
X = D.build_X(
normalization=args['data'].get('normalization'),
num_nan_policy='mean',
cat_nan_policy='new',
cat_policy=args['data'].get('cat_policy', 'indices'),
cat_min_frequency=args['data'].get('cat_min_frequency', 0.0),
seed=args['seed'],
)
if not isinstance(X, tuple):
X = (X, None)
zero.set_randomness(args['seed'])
Y, y_info = D.build_y(args['data'].get('y_policy'))
lib.dump_pickle(y_info, output / 'y_info.pickle')
X = tuple(None if x is None else lib.to_tensors(x) for x in X)
Y = lib.to_tensors(Y)
device = lib.get_device()
if device.type != 'cpu':
X = tuple(None if x is None else {k: v.to(device) for k, v in x.items()} for x in X)
Y_device = {k: v.to(device) for k, v in Y.items()}
else:
Y_device = Y
X_num, X_cat = X
if not D.is_multiclass:
Y_device = {k: v.float() for k, v in Y_device.items()}
train_size = D.size(lib.TRAIN)
batch_size, epoch_size = (
stats['batch_size'],
stats['epoch_size'],
) = lib.get_epoch_parameters(train_size, args['training'].get('batch_size', 'v3'))
eval_batch_size = args['training']['eval_batch_size']
chunk_size = None
stats['chunk_sizes'] = {}
stats['eval_batch_sizes'] = {}
loss_fn = (
F.binary_cross_entropy_with_logits
if D.is_binclass
else F.cross_entropy
if D.is_multiclass
else F.mse_loss
)
args['model'].setdefault('d_embedding', None)
model = NODE(
d_in=0 if X_num is None else X_num['train'].shape[1],
d_out=D.info['n_classes'] if D.is_multiclass else 1,
categories=lib.get_categories(X_cat),
**args['model'],
).to(device)
if torch.cuda.device_count() > 1: # type: ignore[code]
print('Using nn.DataParallel')
model = nn.DataParallel(model)
stats['n_parameters'] = lib.get_n_parameters(model)
optimizer = lib.make_optimizer(
args['training']['optimizer'],
model.parameters(),
args['training']['lr'],
args['training']['weight_decay'],
)
stream = zero.Stream(lib.IndexLoader(train_size, batch_size, True, device))
progress = zero.ProgressTracker(args['training']['patience'])
training_log = {lib.TRAIN: [], lib.VAL: [], lib.TEST: []}
stage = 0
lr_n_decays = 0
timer = zero.Timer()
swa_stage_first_epoch = None
def print_epoch_info():
print(
f'\n>>> Epoch {stream.epoch} | Stage {stage} | {lib.format_seconds(timer())} | {output}'
)
details = {'lr': lib.get_lr(optimizer), 'chunk_size': chunk_size}
details.update((x, stats[x]) for x in ['batch_size', 'epoch_size', 'n_parameters'])
print(' | '.join(f'{k} = {v}' for k, v in details.items()))
def get_checkpoint_path(suffix):
return output / f'checkpoint_{suffix}.pt'
def step(batch_idx):
logits = model(
X_num[lib.TRAIN][batch_idx],
None if X_cat is None else X_cat[lib.TRAIN][batch_idx],
)
targets = Y_device[lib.TRAIN][batch_idx] # type: ignore[code]
if not D.is_multiclass:
targets = targets.to(logits.dtype)
return logits, targets
def _predict(part):
result = []
for idx in lib.IndexLoader(
D.size(part),
args['training']['eval_batch_size'],
False,
device,
):
result.append(
model(
None if X_num is None else X_num[part][idx],
None if X_cat is None else X_cat[part][idx],
)
)
return torch.cat(result).cpu()
@torch.no_grad()
def predict(m, part):
global eval_batch_size
m.eval()
random_state = zero.get_random_state()
while eval_batch_size:
try:
zero.set_random_state(random_state)
return _predict(part)
except RuntimeError as err:
if not lib.is_oom_exception(err):
raise
zero.free_memory()
gc.collect()
eval_batch_size //= 2
print('New eval batch size:', eval_batch_size)
stats['eval_batch_sizes'][stream.epoch] = eval_batch_size
raise RuntimeError('Not enough memory even for eval_batch_size=1')
@torch.no_grad()
def evaluate(m, parts):
metrics = {}
predictions = {}
for part in parts:
predictions[part] = predict(m, part).numpy()
metrics[part] = lib.calculate_metrics(
D.info['task_type'],
Y[part].numpy(), # type: ignore[code]
predictions[part], # type: ignore[code]
'logits',
y_info,
)
for part, part_metrics in metrics.items():
print(f'[{part:<5}]', lib.make_summary(part_metrics))
return metrics, predictions
STATE_VARIABLES = [
'progress',
'stats',
'timer',
'training_log',
'stage',
'swa_stage_first_epoch',
'lr_n_decays',
'chunk_size',
'eval_batch_size',
]
def save_checkpoint(suffix):
model_artifact = wandb.Artifact('node-artifact', type='model')
torch.save(
{
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'stream': stream.state_dict(),
'random_state': zero.get_random_state(),
**{x: globals()[x] for x in STATE_VARIABLES},
},
get_checkpoint_path(suffix),
)
lib.dump_stats(stats, output, suffix == 'final')
lib.backup_output(output)
model_artifact.add_file(get_checkpoint_path(suffix))
wandb.run.log_artifact(model_artifact)
for stage in list(range(args.get('swa', {}).get('n_checkpoints', 1)))[::-1]:
if get_checkpoint_path(stage).exists():
print(f'Loading checkpoint {get_checkpoint_path(stage).name}')
c = torch.load(get_checkpoint_path(stage))
model.load_state_dict(c['model'])
optimizer.load_state_dict(c['optimizer'])
stream.load_state_dict(c['stream'])
globals().update({x: c[x] for x in STATE_VARIABLES})
stats.setdefault('old_stats', []).append(deepcopy(stats))
stats.setdefault('continuations', []).append(stream.epoch)
zero.set_random_state(c['random_state'])
break
# %%
timer.run()
with torch.no_grad():
# NODE-specific initialization
if stream.epoch == 0:
model.eval()
size = 2048
while True:
try:
zero.set_randomness(args['seed'])
x = step(torch.randperm(train_size)[:size])
del x
except RuntimeError as err:
if not lib.is_oom_exception(err):
raise
size //= 2
else:
break
wandb.init(project="RTDL", config=args)
for epoch in stream.epochs(args['training']['n_epochs']):
print_epoch_info()
epoch_losses = []
for batch_idx in epoch:
loss, new_chunk_size = lib.learn_with_auto_virtual_batch(
model, optimizer, loss_fn, step, batch_idx, batch_size, chunk_size
)
wandb.log({"Training Loss": loss})
epoch_losses.append(loss.detach())
if new_chunk_size and new_chunk_size < (chunk_size or batch_size):
chunk_size = new_chunk_size
print('New chunk size:', chunk_size)
stats['chunk_sizes'][stream.iteration] = chunk_size
zero.free_memory()
gc.collect()
epoch_losses = torch.stack(epoch_losses).tolist()
training_log[lib.TRAIN].extend(epoch_losses)
print(f'[{lib.TRAIN}] loss = {round(sum(epoch_losses) / len(epoch_losses), 3)}')
metrics, predictions = evaluate(model, [lib.VAL, lib.TEST])
wandb.log({"score": metrics[lib.VAL]['score']})
for k, v in metrics.items():
training_log[k].append(v)
wandb.log({k:v})
progress.update(metrics[lib.VAL]['score'])
if progress.success:
print('New best epoch!')
stats[f'best_epoch_{stage}'] = stream.epoch
stats[f'metrics_{stage}'] = metrics
save_checkpoint(stage)
for k, v in predictions.items():
np.save(output / f'p_{stage}_{k}.npy', v)
wandb.log({f"predictions_{k}": v})
elif progress.fail:
if stage == 0 and lr_n_decays < args['training']['lr_n_decays']:
print('Reducing lr...')
stats[f'lr_decay_{lr_n_decays}'] = stream.epoch
lib.set_lr(optimizer, lib.get_lr(optimizer) * args['training']['lr_decay'])
lr_n_decays += 1
progress.forget_bad_updates()
else:
print(f'Finishing stage {stage}...')
stats[f'time_{stage}'] = lib.format_seconds(timer())
if 'swa' not in args or stage + 1 == args['swa']['n_checkpoints']:
break
best_stage_checkpoint = torch.load(get_checkpoint_path(stage))
model.load_state_dict(best_stage_checkpoint['model'])
optimizer.load_state_dict(best_stage_checkpoint['optimizer'])
progress = zero.ProgressTracker(args['swa']['patience'])
lib.set_lr(optimizer, args['training']['lr'] * args['swa']['lr_factor'])
swa_stage_first_epoch = stream.epoch + 1
stage += 1
if stream.epoch == swa_stage_first_epoch:
lib.set_lr(optimizer, args['training']['lr'])
# %%
def load_best_model(stage):
model.load_state_dict(torch.load(get_checkpoint_path(stage))['model'])
if 'swa' in args:
print('\nRunning SWA...')
swa_model = swa_utils.AveragedModel(model)
swa_progress = zero.ProgressTracker(None)
best_swa_model = None
for stage in range(args['swa']['n_checkpoints']):
load_best_model(stage)
swa_model.update_parameters(model)
if stage > 0 and args['swa']['update_bn_n_epochs']:
zero.set_randomness(args['seed'])
with torch.no_grad():
swa_utils.update_bn(
itertools.chain.from_iterable(
zero.iter_batches(
X[lib.TRAIN], chunk_size or batch_size, shuffle=True
)
for _ in range(args['swa']['update_bn_n_epochs'])
),
swa_model,
device,
)
swa_progress.update(
evaluate(swa_model if stage > 0 else model, [lib.VAL])[0][lib.VAL]['score']
)
if swa_progress.success:
print('New best SWA checkpoint!')
stats['n_swa_checkpoints'] = stage + 1
if stage > 0:
best_swa_model = deepcopy(swa_model)
if best_swa_model is None:
load_best_model(0)
else:
lib.load_swa_state_dict(model, best_swa_model)
else:
load_best_model(0)
print('\nRunning the final evaluation...')
stats['metrics'], predictions = evaluate(model, lib.PARTS)
for k, v in predictions.items():
np.save(output / f'p_{k}.npy', v)
wandb.run.summary[f"final_prediction_{k}"] = v
stats['time_final'] = lib.format_seconds(timer())
save_checkpoint('final')
print(f'Done! Time elapsed: {stats["time_final"]}')
print(
'\n!!! WARNING !!! The metrics for a single model are stored under the "metrics_0" key.\n'
)
|
import argparse
import torch
import os
from transformers import AutoTokenizer, AutoModelForMaskedLM
from utils import load_data, write_to_file
from metric import compute_metrics
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", default="model/chinese_bert", type=str)
parser.add_argument("--save_path", default="./", type=str)
parser.add_argument("--test_file", default="data/sighan/test.json", type=str)
args = parser.parse_args()
assert os.path.exists(args.save_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
model = AutoModelForMaskedLM.from_pretrained(args.model_name_or_path)
checkpoint = torch.load(os.path.join(args.save_path, "model.tar"), map_location=device)
model.load_state_dict(checkpoint["model_state_dict"])
model = model.to(device)
src, trg = load_data(file_path=args.test_file, mode="test")
results = []
for s, t in zip(src, trg):
inputs = tokenizer(t, return_tensors="pt")
inputs = inputs.to(device)
outputs = model(**inputs)
logits = outputs.logits[0][1:-1] #filter [CLS] & [SEP]
predict = tokenizer.convert_ids_to_tokens(logits.argmax(-1).tolist())
s_tok = tokenizer.tokenize(s)
t_tok = tokenizer.tokenize(t)
assert len(s_tok) == len(t_tok) == len(predict)
results.append([s_tok, t_tok, predict])
metrics = compute_metrics(results)
print(f"{", ".join([f"{key}={value:.4f}' for key, value in metrics.items()])}")
write_to_file(file_path=os.path.join(args.save_path, "result_test.json"), results=results)
print(f"write to {os.path.join(args.save_path, "result_test.json")}")
| import argparse
import torch
import os
from transformers import AutoTokenizer, AutoModelForMaskedLM
from utils import load_data, write_to_file
from metric import compute_metrics
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", default="model/chinese_bert", type=str)
parser.add_argument("--save_path", default="./", type=str)
parser.add_argument("--test_file", default="data/sighan/test.json", type=str)
args = parser.parse_args()
assert os.path.exists(args.save_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
model = AutoModelForMaskedLM.from_pretrained(args.model_name_or_path)
checkpoint = torch.load(os.path.join(args.save_path, "model.tar"), map_location=device)
model.load_state_dict(checkpoint["model_state_dict"])
model = model.to(device)
src, trg = load_data(file_path=args.test_file, mode="test")
results = []
for s, t in zip(src, trg):
inputs = tokenizer(t, return_tensors="pt")
inputs = inputs.to(device)
outputs = model(**inputs)
logits = outputs.logits[0][1:-1] #filter [CLS] & [SEP]
predict = tokenizer.convert_ids_to_tokens(logits.argmax(-1).tolist())
s_tok = tokenizer.tokenize(s)
t_tok = tokenizer.tokenize(t)
assert len(s_tok) == len(t_tok) == len(predict)
results.append([s_tok, t_tok, predict])
metrics = compute_metrics(results)
print(f"{', '.join([f'{key}={value:.4f}' for key, value in metrics.items()])}")
write_to_file(file_path=os.path.join(args.save_path, "result_test.json"), results=results)
print(f"write to {os.path.join(args.save_path, 'result_test.json')}")
|
from datetime import datetime
import os
from bson.objectid import ObjectId
from girder import logger
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.setting import Setting
from girder.models.user import User
from girder.settings import SettingKey
from girder.utility.mail_utils import renderTemplate, sendMail
from dive_utils import asbool, fromMeta
from dive_utils.constants import (
AssetstoreSourceMarker,
AssetstoreSourcePathMarker,
DatasetMarker,
DefaultVideoFPS,
FPSMarker,
ImageSequenceType,
TypeMarker,
VideoType,
imageRegex,
videoRegex,
)
def send_new_user_email(event):
try:
info = event.info
email = info.get('email')
brandName = Setting().get(SettingKey.BRAND_NAME)
rendered = renderTemplate('welcome.mako')
sendMail(f'Welcome to {brandName}', rendered, [email])
except Exception:
logger.exception("Failed to send new user email")
def process_assetstore_import(event, meta: dict):
"""
Function for appending the appropriate metadata to no-copy import data
"""
info = event.info
objectType = info.get("type")
importPath = info.get("importPath")
now = datetime.now()
if not importPath or not objectType or objectType != "item":
return
dataset_type = None
item = Item().findOne({"_id": info["id"]})
item['meta'].update(
{
**meta,
AssetstoreSourcePathMarker: importPath,
}
)
# TODO figure out what's going on here?
if imageRegex.search(importPath):
dataset_type = ImageSequenceType
elif videoRegex.search(importPath):
# Look for exisitng video dataset directory
parentFolder = Folder().findOne({"_id": item["folderId"]})
userId = parentFolder['creatorId'] or parentFolder['baseParentId']
user = User().findOne({'_id': ObjectId(userId)})
foldername = f'Video {item['name']}'
dest = Folder().createFolder(parentFolder, foldername, creator=user, reuseExisting=True)
if dest['created'] < now:
# Remove the old item, replace it with the new one.
oldItem = Item().findOne({'folderId': dest['_id'], 'name': item['name']})
if oldItem is not None:
Item().remove(oldItem)
Item().move(item, dest)
dataset_type = VideoType
if dataset_type is not None:
# Update metadata of parent folder
# FPS is hardcoded for now
Item().save(item)
folder = Folder().findOne({"_id": item["folderId"]})
root, _ = os.path.split(importPath)
if not asbool(fromMeta(folder, DatasetMarker)):
folder["meta"].update(
{
TypeMarker: dataset_type,
FPSMarker: DefaultVideoFPS,
DatasetMarker: True,
AssetstoreSourcePathMarker: root,
**meta,
}
)
Folder().save(folder)
def process_fs_import(event):
return process_assetstore_import(event, {AssetstoreSourceMarker: 'filesystem'})
def process_s3_import(event):
return process_assetstore_import(event, {AssetstoreSourceMarker: 's3'})
| from datetime import datetime
import os
from bson.objectid import ObjectId
from girder import logger
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.setting import Setting
from girder.models.user import User
from girder.settings import SettingKey
from girder.utility.mail_utils import renderTemplate, sendMail
from dive_utils import asbool, fromMeta
from dive_utils.constants import (
AssetstoreSourceMarker,
AssetstoreSourcePathMarker,
DatasetMarker,
DefaultVideoFPS,
FPSMarker,
ImageSequenceType,
TypeMarker,
VideoType,
imageRegex,
videoRegex,
)
def send_new_user_email(event):
try:
info = event.info
email = info.get('email')
brandName = Setting().get(SettingKey.BRAND_NAME)
rendered = renderTemplate('welcome.mako')
sendMail(f'Welcome to {brandName}', rendered, [email])
except Exception:
logger.exception("Failed to send new user email")
def process_assetstore_import(event, meta: dict):
"""
Function for appending the appropriate metadata to no-copy import data
"""
info = event.info
objectType = info.get("type")
importPath = info.get("importPath")
now = datetime.now()
if not importPath or not objectType or objectType != "item":
return
dataset_type = None
item = Item().findOne({"_id": info["id"]})
item['meta'].update(
{
**meta,
AssetstoreSourcePathMarker: importPath,
}
)
# TODO figure out what's going on here?
if imageRegex.search(importPath):
dataset_type = ImageSequenceType
elif videoRegex.search(importPath):
# Look for exisitng video dataset directory
parentFolder = Folder().findOne({"_id": item["folderId"]})
userId = parentFolder['creatorId'] or parentFolder['baseParentId']
user = User().findOne({'_id': ObjectId(userId)})
foldername = f'Video {item["name"]}'
dest = Folder().createFolder(parentFolder, foldername, creator=user, reuseExisting=True)
if dest['created'] < now:
# Remove the old item, replace it with the new one.
oldItem = Item().findOne({'folderId': dest['_id'], 'name': item['name']})
if oldItem is not None:
Item().remove(oldItem)
Item().move(item, dest)
dataset_type = VideoType
if dataset_type is not None:
# Update metadata of parent folder
# FPS is hardcoded for now
Item().save(item)
folder = Folder().findOne({"_id": item["folderId"]})
root, _ = os.path.split(importPath)
if not asbool(fromMeta(folder, DatasetMarker)):
folder["meta"].update(
{
TypeMarker: dataset_type,
FPSMarker: DefaultVideoFPS,
DatasetMarker: True,
AssetstoreSourcePathMarker: root,
**meta,
}
)
Folder().save(folder)
def process_fs_import(event):
return process_assetstore_import(event, {AssetstoreSourceMarker: 'filesystem'})
def process_s3_import(event):
return process_assetstore_import(event, {AssetstoreSourceMarker: 's3'})
|
import os
from pathlib import Path, PurePosixPath
from typing import Any, Dict, Iterable, Optional, Union
from ..client import Client, register_client_class
from ..cloudpath import implementation_registry
from .s3path import S3Path
try:
from boto3.session import Session
from boto3.s3.transfer import TransferConfig
from botocore.config import Config
from botocore.exceptions import ClientError
import botocore.session
except ModuleNotFoundError:
implementation_registry["s3"].dependencies_loaded = False
@register_client_class("s3")
class S3Client(Client):
"""Client class for AWS S3 which handles authentication with AWS for [`S3Path`](../s3path/)
instances. See documentation for the [`__init__` method][cloudpathlib.s3.s3client.S3Client.__init__]
for detailed authentication options."""
def __init__(
self,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
no_sign_request: Optional[bool] = False,
botocore_session: Optional["botocore.session.Session"] = None,
profile_name: Optional[str] = None,
boto3_session: Optional["Session"] = None,
local_cache_dir: Optional[Union[str, os.PathLike]] = None,
endpoint_url: Optional[str] = None,
boto3_transfer_config: Optional["TransferConfig"] = None,
):
"""Class constructor. Sets up a boto3 [`Session`](
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html).
Directly supports the same authentication interface, as well as the same environment
variables supported by boto3. See [boto3 Session documentation](
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/session.html).
If no authentication arguments or environment variables are provided, then the client will
be instantiated as anonymous, which will only have access to public buckets.
Args:
aws_access_key_id (Optional[str]): AWS access key ID.
aws_secret_access_key (Optional[str]): AWS secret access key.
aws_session_token (Optional[str]): Session key for your AWS account. This is only
needed when you are using temporarycredentials.
no_sign_request: (Optional[bool]): If `True`, credentials are not looked for and we use unsigned
requests to fetch resources. This will only allow access to public resources. This is equivalent
to `--no-sign-request` in the AWS CLI (https://docs.aws.amazon.com/cli/latest/reference/).
botocore_session (Optional[botocore.session.Session]): An already instantiated botocore
Session.
profile_name (Optional[str]): Profile name of a profile in a shared credentials file.
boto3_session (Optional[Session]): An already instantiated boto3 Session.
local_cache_dir (Optional[Union[str, os.PathLike]]): Path to directory to use as cache
for downloaded files. If None, will use a temporary directory.
endpoint_url (Optional[str]): S3 server endpoint URL to use for the constructed boto3 S3 resource and client.
Parameterize it to access a customly deployed S3-compatible object store such as MinIO, Ceph or any other.
boto3_transfer_config (Optional[dict]): Instantiated TransferConfig for managing s3 transfers.
(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/customizations/s3.html#boto3.s3.transfer.TransferConfig)
"""
endpoint_url = endpoint_url or os.getenv("AWS_ENDPOINT_URL")
if boto3_session is not None:
self.sess = boto3_session
else:
self.sess = Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
botocore_session=botocore_session,
profile_name=profile_name,
)
if no_sign_request:
self.s3 = self.sess.resource(
"s3",
endpoint_url=endpoint_url,
config=Config(signature_version=botocore.session.UNSIGNED),
)
self.client = self.sess.client(
"s3",
endpoint_url=endpoint_url,
config=Config(signature_version=botocore.session.UNSIGNED),
)
else:
self.s3 = self.sess.resource("s3", endpoint_url=endpoint_url)
self.client = self.sess.client("s3", endpoint_url=endpoint_url)
self.boto3_transfer_config = boto3_transfer_config
super().__init__(local_cache_dir=local_cache_dir)
def _get_metadata(self, cloud_path: S3Path) -> Dict[str, Any]:
data = self.s3.ObjectSummary(cloud_path.bucket, cloud_path.key).get()
return {
"last_modified": data["LastModified"],
"size": data["ContentLength"],
"etag": data["ETag"],
"mime": data["ContentType"],
"extra": data["Metadata"],
}
def _download_file(self, cloud_path: S3Path, local_path: Union[str, os.PathLike]) -> Path:
local_path = Path(local_path)
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.download_file(str(local_path), Config=self.boto3_transfer_config)
return local_path
def _is_file_or_dir(self, cloud_path: S3Path) -> Optional[str]:
# short-circuit the root-level bucket
if not cloud_path.key:
return "dir"
# get first item by listing at least one key
s3_obj = self._s3_file_query(cloud_path)
if s3_obj is None:
return None
# since S3 only returns files when filtering objects:
# if the first item key is equal to the path key, this is a file
if s3_obj.key == cloud_path.key:
# "fake" directories on S3 can be created in the console UI
# these are 0-size keys that end in `/`
# Ref: https://github.com/boto/boto3/issues/377
if s3_obj.key.endswith("/") and s3_obj.content_length == 0:
return "dir"
else:
return "file"
else:
return "dir"
def _exists(self, cloud_path: S3Path) -> bool:
return self._s3_file_query(cloud_path) is not None
def _s3_file_query(self, cloud_path: S3Path):
"""Boto3 query used for quick checks of existence and if path is file/dir"""
# first check if this is an object that we can access directly
try:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.load()
return obj
# else, confirm it is a dir by filtering to the first item under the prefix
except ClientError:
return next(
(
obj
for obj in (
self.s3.Bucket(cloud_path.bucket)
.objects.filter(Prefix=cloud_path.key)
.limit(1)
)
),
None,
)
def _list_dir(self, cloud_path: S3Path, recursive=False) -> Iterable[S3Path]:
bucket = self.s3.Bucket(cloud_path.bucket)
prefix = cloud_path.key
if prefix and not prefix.endswith("/"):
prefix += "/"
yielded_dirs = set()
if recursive:
for o in bucket.objects.filter(Prefix=prefix):
# get directory from this path
for parent in PurePosixPath(o.key[len(prefix) :]).parents:
# if we haven't surfaced their directory already
if parent not in yielded_dirs and str(parent) != ".":
yield self.CloudPath(f"s3://{cloud_path.bucket}/{prefix}{parent}")
yielded_dirs.add(parent)
yield self.CloudPath(f"s3://{o.bucket_name}/{o.key}")
else:
# non recursive is best done with old client API rather than resource
paginator = self.client.get_paginator("list_objects")
for result in paginator.paginate(
Bucket=cloud_path.bucket, Prefix=prefix, Delimiter="/"
):
# sub directory names
for result_prefix in result.get("CommonPrefixes", []):
yield self.CloudPath(f"s3://{cloud_path.bucket}/{result_prefix.get("Prefix")}")
# files in the directory
for result_key in result.get("Contents", []):
if result_key.get('Size') > 0:
yield self.CloudPath(f"s3://{cloud_path.bucket}/{result_key.get("Key")}")
def _move_file(self, src: S3Path, dst: S3Path, remove_src: bool = True) -> S3Path:
# just a touch, so "REPLACE" metadata
if src == dst:
o = self.s3.Object(src.bucket, src.key)
o.copy_from(
CopySource={"Bucket": src.bucket, "Key": src.key},
Metadata=self._get_metadata(src).get("extra", {}),
MetadataDirective="REPLACE",
)
else:
target = self.s3.Object(dst.bucket, dst.key)
target.copy({"Bucket": src.bucket, "Key": src.key})
if remove_src:
self._remove(src)
return dst
def _remove(self, cloud_path: S3Path) -> None:
try:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
# will throw if not a file
obj.load()
resp = obj.delete()
assert resp.get("ResponseMetadata").get("HTTPStatusCode") == 204
except ClientError:
# try to delete as a direcotry instead
bucket = self.s3.Bucket(cloud_path.bucket)
prefix = cloud_path.key
if prefix and not prefix.endswith("/"):
prefix += "/"
resp = bucket.objects.filter(Prefix=prefix).delete()
# ensure directory deleted; if cloud_path did not exist at all
# resp will be [], so no need to check success
if resp:
assert resp[0].get("ResponseMetadata").get("HTTPStatusCode") == 200
def _upload_file(self, local_path: Union[str, os.PathLike], cloud_path: S3Path) -> S3Path:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.upload_file(str(local_path), Config=self.boto3_transfer_config)
return cloud_path
S3Client.S3Path = S3Client.CloudPath # type: ignore
| import os
from pathlib import Path, PurePosixPath
from typing import Any, Dict, Iterable, Optional, Union
from ..client import Client, register_client_class
from ..cloudpath import implementation_registry
from .s3path import S3Path
try:
from boto3.session import Session
from boto3.s3.transfer import TransferConfig
from botocore.config import Config
from botocore.exceptions import ClientError
import botocore.session
except ModuleNotFoundError:
implementation_registry["s3"].dependencies_loaded = False
@register_client_class("s3")
class S3Client(Client):
"""Client class for AWS S3 which handles authentication with AWS for [`S3Path`](../s3path/)
instances. See documentation for the [`__init__` method][cloudpathlib.s3.s3client.S3Client.__init__]
for detailed authentication options."""
def __init__(
self,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
no_sign_request: Optional[bool] = False,
botocore_session: Optional["botocore.session.Session"] = None,
profile_name: Optional[str] = None,
boto3_session: Optional["Session"] = None,
local_cache_dir: Optional[Union[str, os.PathLike]] = None,
endpoint_url: Optional[str] = None,
boto3_transfer_config: Optional["TransferConfig"] = None,
):
"""Class constructor. Sets up a boto3 [`Session`](
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html).
Directly supports the same authentication interface, as well as the same environment
variables supported by boto3. See [boto3 Session documentation](
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/session.html).
If no authentication arguments or environment variables are provided, then the client will
be instantiated as anonymous, which will only have access to public buckets.
Args:
aws_access_key_id (Optional[str]): AWS access key ID.
aws_secret_access_key (Optional[str]): AWS secret access key.
aws_session_token (Optional[str]): Session key for your AWS account. This is only
needed when you are using temporarycredentials.
no_sign_request: (Optional[bool]): If `True`, credentials are not looked for and we use unsigned
requests to fetch resources. This will only allow access to public resources. This is equivalent
to `--no-sign-request` in the AWS CLI (https://docs.aws.amazon.com/cli/latest/reference/).
botocore_session (Optional[botocore.session.Session]): An already instantiated botocore
Session.
profile_name (Optional[str]): Profile name of a profile in a shared credentials file.
boto3_session (Optional[Session]): An already instantiated boto3 Session.
local_cache_dir (Optional[Union[str, os.PathLike]]): Path to directory to use as cache
for downloaded files. If None, will use a temporary directory.
endpoint_url (Optional[str]): S3 server endpoint URL to use for the constructed boto3 S3 resource and client.
Parameterize it to access a customly deployed S3-compatible object store such as MinIO, Ceph or any other.
boto3_transfer_config (Optional[dict]): Instantiated TransferConfig for managing s3 transfers.
(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/customizations/s3.html#boto3.s3.transfer.TransferConfig)
"""
endpoint_url = endpoint_url or os.getenv("AWS_ENDPOINT_URL")
if boto3_session is not None:
self.sess = boto3_session
else:
self.sess = Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
botocore_session=botocore_session,
profile_name=profile_name,
)
if no_sign_request:
self.s3 = self.sess.resource(
"s3",
endpoint_url=endpoint_url,
config=Config(signature_version=botocore.session.UNSIGNED),
)
self.client = self.sess.client(
"s3",
endpoint_url=endpoint_url,
config=Config(signature_version=botocore.session.UNSIGNED),
)
else:
self.s3 = self.sess.resource("s3", endpoint_url=endpoint_url)
self.client = self.sess.client("s3", endpoint_url=endpoint_url)
self.boto3_transfer_config = boto3_transfer_config
super().__init__(local_cache_dir=local_cache_dir)
def _get_metadata(self, cloud_path: S3Path) -> Dict[str, Any]:
data = self.s3.ObjectSummary(cloud_path.bucket, cloud_path.key).get()
return {
"last_modified": data["LastModified"],
"size": data["ContentLength"],
"etag": data["ETag"],
"mime": data["ContentType"],
"extra": data["Metadata"],
}
def _download_file(self, cloud_path: S3Path, local_path: Union[str, os.PathLike]) -> Path:
local_path = Path(local_path)
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.download_file(str(local_path), Config=self.boto3_transfer_config)
return local_path
def _is_file_or_dir(self, cloud_path: S3Path) -> Optional[str]:
# short-circuit the root-level bucket
if not cloud_path.key:
return "dir"
# get first item by listing at least one key
s3_obj = self._s3_file_query(cloud_path)
if s3_obj is None:
return None
# since S3 only returns files when filtering objects:
# if the first item key is equal to the path key, this is a file
if s3_obj.key == cloud_path.key:
# "fake" directories on S3 can be created in the console UI
# these are 0-size keys that end in `/`
# Ref: https://github.com/boto/boto3/issues/377
if s3_obj.key.endswith("/") and s3_obj.content_length == 0:
return "dir"
else:
return "file"
else:
return "dir"
def _exists(self, cloud_path: S3Path) -> bool:
return self._s3_file_query(cloud_path) is not None
def _s3_file_query(self, cloud_path: S3Path):
"""Boto3 query used for quick checks of existence and if path is file/dir"""
# first check if this is an object that we can access directly
try:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.load()
return obj
# else, confirm it is a dir by filtering to the first item under the prefix
except ClientError:
return next(
(
obj
for obj in (
self.s3.Bucket(cloud_path.bucket)
.objects.filter(Prefix=cloud_path.key)
.limit(1)
)
),
None,
)
def _list_dir(self, cloud_path: S3Path, recursive=False) -> Iterable[S3Path]:
bucket = self.s3.Bucket(cloud_path.bucket)
prefix = cloud_path.key
if prefix and not prefix.endswith("/"):
prefix += "/"
yielded_dirs = set()
if recursive:
for o in bucket.objects.filter(Prefix=prefix):
# get directory from this path
for parent in PurePosixPath(o.key[len(prefix) :]).parents:
# if we haven't surfaced their directory already
if parent not in yielded_dirs and str(parent) != ".":
yield self.CloudPath(f"s3://{cloud_path.bucket}/{prefix}{parent}")
yielded_dirs.add(parent)
yield self.CloudPath(f"s3://{o.bucket_name}/{o.key}")
else:
# non recursive is best done with old client API rather than resource
paginator = self.client.get_paginator("list_objects")
for result in paginator.paginate(
Bucket=cloud_path.bucket, Prefix=prefix, Delimiter="/"
):
# sub directory names
for result_prefix in result.get("CommonPrefixes", []):
yield self.CloudPath(f"s3://{cloud_path.bucket}/{result_prefix.get('Prefix')}")
# files in the directory
for result_key in result.get("Contents", []):
if result_key.get('Size') > 0:
yield self.CloudPath(f"s3://{cloud_path.bucket}/{result_key.get('Key')}")
def _move_file(self, src: S3Path, dst: S3Path, remove_src: bool = True) -> S3Path:
# just a touch, so "REPLACE" metadata
if src == dst:
o = self.s3.Object(src.bucket, src.key)
o.copy_from(
CopySource={"Bucket": src.bucket, "Key": src.key},
Metadata=self._get_metadata(src).get("extra", {}),
MetadataDirective="REPLACE",
)
else:
target = self.s3.Object(dst.bucket, dst.key)
target.copy({"Bucket": src.bucket, "Key": src.key})
if remove_src:
self._remove(src)
return dst
def _remove(self, cloud_path: S3Path) -> None:
try:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
# will throw if not a file
obj.load()
resp = obj.delete()
assert resp.get("ResponseMetadata").get("HTTPStatusCode") == 204
except ClientError:
# try to delete as a direcotry instead
bucket = self.s3.Bucket(cloud_path.bucket)
prefix = cloud_path.key
if prefix and not prefix.endswith("/"):
prefix += "/"
resp = bucket.objects.filter(Prefix=prefix).delete()
# ensure directory deleted; if cloud_path did not exist at all
# resp will be [], so no need to check success
if resp:
assert resp[0].get("ResponseMetadata").get("HTTPStatusCode") == 200
def _upload_file(self, local_path: Union[str, os.PathLike], cloud_path: S3Path) -> S3Path:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.upload_file(str(local_path), Config=self.boto3_transfer_config)
return cloud_path
S3Client.S3Path = S3Client.CloudPath # type: ignore
|
import json
import re
import requests
from django.conf import settings
from cathie.exceptions import CatsAnswerCodeException
from cathie import authorization
def cats_check_status():
pass
@authorization.check_authorization_for_cats
def cats_submit_solution(source_text: str, problem_id: int, de_id: int, source=None):
# ToDo обработать повторную отправку решения
url = f'{settings.CATS_URL}main.pl?f=api_submit_problem;json=1;'
url += f'sid={authorization.cats_sid()}'
data = {
'de_id': de_id,
'source_text': source_text,
'problem_id': problem_id
}
r = requests.post(url, data=data)
if r.status_code != 200:
raise CatsAnswerCodeException(r)
r_content = json.loads(r.content.decode('utf-8'))
req_ids = None
if r_content.get('href_run_details'):
req_ids = re.search(r'(?<=rid=)\d+', r_content['href_run_details']).group()
if req_ids.isdigit():
req_ids = int(req_ids)
return req_ids, r_content
def cats_submit_problem():
pass
@authorization.check_authorization_for_cats
def cats_check_solution_status(req_ids: int):
url = f'{settings.CATS_URL}main.pl?f=api_get_request_state;req_ids={req_ids};json=1;'
url += f'sid={authorization.cats_sid()}'
r = requests.get(url)
if r.status_code != 200:
raise CatsAnswerCodeException(r)
data = r.json()
if data:
return data[0]['verdict'], data
@authorization.check_authorization_for_cats
def cats_get_problems_from_contest(contest_id):
url = f'{settings.CATS_URL}?f=problems;json=1;cid={contest_id};'
url += f'sid={authorization.cats_sid()}'
answer = requests.get(url)
if answer.status_code != 200:
raise CatsAnswerCodeException(answer)
data = json.loads(answer.content.decode('utf-8'))
# course_problems = CatsProblemSerializer(data=data.problems, many=True)
return data['problems']
def cats_get_problem_description_by_url(description_url):
url = f'{settings.CATS_URL}{description_url.lstrip('./')}'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/89.0.4356.6 Safari/537.36'
}
request = requests.request(method='get', url=url, headers=headers)
if request.status_code != 200:
raise CatsAnswerCodeException(request)
data = request.content.decode('utf-8')
return data
# def cats_get_problem_by_id(cats_id, user):
# pass
| import json
import re
import requests
from django.conf import settings
from cathie.exceptions import CatsAnswerCodeException
from cathie import authorization
def cats_check_status():
pass
@authorization.check_authorization_for_cats
def cats_submit_solution(source_text: str, problem_id: int, de_id: int, source=None):
# ToDo обработать повторную отправку решения
url = f'{settings.CATS_URL}main.pl?f=api_submit_problem;json=1;'
url += f'sid={authorization.cats_sid()}'
data = {
'de_id': de_id,
'source_text': source_text,
'problem_id': problem_id
}
r = requests.post(url, data=data)
if r.status_code != 200:
raise CatsAnswerCodeException(r)
r_content = json.loads(r.content.decode('utf-8'))
req_ids = None
if r_content.get('href_run_details'):
req_ids = re.search(r'(?<=rid=)\d+', r_content['href_run_details']).group()
if req_ids.isdigit():
req_ids = int(req_ids)
return req_ids, r_content
def cats_submit_problem():
pass
@authorization.check_authorization_for_cats
def cats_check_solution_status(req_ids: int):
url = f'{settings.CATS_URL}main.pl?f=api_get_request_state;req_ids={req_ids};json=1;'
url += f'sid={authorization.cats_sid()}'
r = requests.get(url)
if r.status_code != 200:
raise CatsAnswerCodeException(r)
data = r.json()
if data:
return data[0]['verdict'], data
@authorization.check_authorization_for_cats
def cats_get_problems_from_contest(contest_id):
url = f'{settings.CATS_URL}?f=problems;json=1;cid={contest_id};'
url += f'sid={authorization.cats_sid()}'
answer = requests.get(url)
if answer.status_code != 200:
raise CatsAnswerCodeException(answer)
data = json.loads(answer.content.decode('utf-8'))
# course_problems = CatsProblemSerializer(data=data.problems, many=True)
return data['problems']
def cats_get_problem_description_by_url(description_url):
url = f'{settings.CATS_URL}{description_url.lstrip("./")}'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/89.0.4356.6 Safari/537.36'
}
request = requests.request(method='get', url=url, headers=headers)
if request.status_code != 200:
raise CatsAnswerCodeException(request)
data = request.content.decode('utf-8')
return data
# def cats_get_problem_by_id(cats_id, user):
# pass
|
import validators
import requests
import base64
from app import app
def intelixlookup(ioc):
#Get a token
token = get_token()
# use Validators to redirect the IOC to the correct Intelix endpoint
if validators.ipv4(ioc):
u = f"https://de.api.labs.sophos.com/lookup/ips/v1/{ioc}"
elif validators.md5(ioc):
u = f"https://de.api.labs.sophos.com/lookup/urls/v1/{ioc}"
elif validators.sha256(ioc):
u = f"https://de.api.labs.sophos.com/lookup/files/v1/{ioc}"
h = {"Authorization": f"{token}"}
r = requests.get(u, headers=h)
j = r.json()
response = {}
# File reponses
if validators.sha256(ioc) or validators.md5(ioc):
if 'reputationScore' in j:
response['reputationScore'] = j['reputationScore']
if j['reputationScore'] <= 19:
response['fileReputation'] = 'Malware'
elif j['reputationScore'] <= 29:
response['fileReputation'] = 'PUA (potentially unwanted application)'
elif j['reputationScore'] <= 69:
response['fileReputation'] = 'Unknown/suspicious'
elif j['reputationScore'] <= 100:
response['fileReputation'] = 'Known good'
if 'detectionName' in j:
response['detectionName'] = j['detectionName']
response['type'] = 'File Hash'
# IP reponses
if validators.ipv4(ioc):
if 'category' in j:
response['category'] = j['category']
else:
response['category'] = 'Unknown IP Address'
if 'ttl' in j:
response['ttl'] = j['ttl']
response['type'] = 'IP Address'
# Generic consistent repsponses
if 'correlationId' in j:
response['correlationId'] = j['correlationId']
if 'requestId' in j:
response['requestId'] = j['requestId']
# Generic Error Handling based on reponses
# https://api.labs.sophos.com/doc/lookup/ips.html
# https://api.labs.sophos.com/doc/lookup/files.html
if 'error' in j:
response['error'] = j['error']
if 'message' in j:
response['message'] = j['message']
# Return a dict, flask will return this as JSON to the browser
return response
def get_token():
# This is lazy, the token should be stored for quicker request times.
creds = f"{app.config["INTELIX_CLIENT_ID"]}:{app.config["INTELIX_CLIENT_SECRET"]}"
t = base64.b64encode(creds.encode("UTF-8")).decode("ascii")
d = {'grant_type': 'client_credentials'}
h = {'Authorization': f"Basic {t}",
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.post('https://api.labs.sophos.com/oauth2/token', headers=h, data=d)
r = r.json()
return r['access_token'] | import validators
import requests
import base64
from app import app
def intelixlookup(ioc):
#Get a token
token = get_token()
# use Validators to redirect the IOC to the correct Intelix endpoint
if validators.ipv4(ioc):
u = f"https://de.api.labs.sophos.com/lookup/ips/v1/{ioc}"
elif validators.md5(ioc):
u = f"https://de.api.labs.sophos.com/lookup/urls/v1/{ioc}"
elif validators.sha256(ioc):
u = f"https://de.api.labs.sophos.com/lookup/files/v1/{ioc}"
h = {"Authorization": f"{token}"}
r = requests.get(u, headers=h)
j = r.json()
response = {}
# File reponses
if validators.sha256(ioc) or validators.md5(ioc):
if 'reputationScore' in j:
response['reputationScore'] = j['reputationScore']
if j['reputationScore'] <= 19:
response['fileReputation'] = 'Malware'
elif j['reputationScore'] <= 29:
response['fileReputation'] = 'PUA (potentially unwanted application)'
elif j['reputationScore'] <= 69:
response['fileReputation'] = 'Unknown/suspicious'
elif j['reputationScore'] <= 100:
response['fileReputation'] = 'Known good'
if 'detectionName' in j:
response['detectionName'] = j['detectionName']
response['type'] = 'File Hash'
# IP reponses
if validators.ipv4(ioc):
if 'category' in j:
response['category'] = j['category']
else:
response['category'] = 'Unknown IP Address'
if 'ttl' in j:
response['ttl'] = j['ttl']
response['type'] = 'IP Address'
# Generic consistent repsponses
if 'correlationId' in j:
response['correlationId'] = j['correlationId']
if 'requestId' in j:
response['requestId'] = j['requestId']
# Generic Error Handling based on reponses
# https://api.labs.sophos.com/doc/lookup/ips.html
# https://api.labs.sophos.com/doc/lookup/files.html
if 'error' in j:
response['error'] = j['error']
if 'message' in j:
response['message'] = j['message']
# Return a dict, flask will return this as JSON to the browser
return response
def get_token():
# This is lazy, the token should be stored for quicker request times.
creds = f"{app.config['INTELIX_CLIENT_ID']}:{app.config['INTELIX_CLIENT_SECRET']}"
t = base64.b64encode(creds.encode("UTF-8")).decode("ascii")
d = {'grant_type': 'client_credentials'}
h = {'Authorization': f"Basic {t}",
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.post('https://api.labs.sophos.com/oauth2/token', headers=h, data=d)
r = r.json()
return r['access_token'] |
import requests
from flask import current_app
from notifications_utils.statsd_decorators import statsd
from app import notify_celery
from app.dao.broadcast_message_dao import dao_get_broadcast_event_by_id
@notify_celery.task(name="send-broadcast-event")
@statsd(namespace="tasks")
def send_broadcast_event(broadcast_event_id, provider='stub-1'):
broadcast_event = dao_get_broadcast_event_by_id(broadcast_event_id)
current_app.logger.info(
f'sending broadcast_event {broadcast_event.reference} '
f'msgType {broadcast_event.message_type} to {provider}'
)
payload = broadcast_event.serialize()
resp = requests.post(
f'{current_app.config['CBC_PROXY_URL']}/broadcasts/events/{provider}',
json=payload
)
resp.raise_for_status()
current_app.logger.info(
f'broadcast_event {broadcast_event.reference} '
f'msgType {broadcast_event.message_type} sent to {provider}'
)
| import requests
from flask import current_app
from notifications_utils.statsd_decorators import statsd
from app import notify_celery
from app.dao.broadcast_message_dao import dao_get_broadcast_event_by_id
@notify_celery.task(name="send-broadcast-event")
@statsd(namespace="tasks")
def send_broadcast_event(broadcast_event_id, provider='stub-1'):
broadcast_event = dao_get_broadcast_event_by_id(broadcast_event_id)
current_app.logger.info(
f'sending broadcast_event {broadcast_event.reference} '
f'msgType {broadcast_event.message_type} to {provider}'
)
payload = broadcast_event.serialize()
resp = requests.post(
f'{current_app.config["CBC_PROXY_URL"]}/broadcasts/events/{provider}',
json=payload
)
resp.raise_for_status()
current_app.logger.info(
f'broadcast_event {broadcast_event.reference} '
f'msgType {broadcast_event.message_type} sent to {provider}'
)
|
import pytest
import os
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
from pathlib import Path
from dselib.context import DSEContext
from dselib.ntpx import NTPX
def context(varfile=None):
"""returns the DSE context object for this script."""
try:
myself = __file__
except NameError:
myself = sys.argv[0]
return DSEContext(myself, varfile)
me = context('test_ntpx')
def test_1():
a = NTPX('.')
b = NTPX('./fu')
c = NTPX('./fu.bar')
d = NTPX('fu.bar')
e = NTPX('/fu.bar')
def check_it(input, ntobj, cwd):
# logger.info(f"{cwd=}::{str(cwd)=}::{os.sep=}::{str(cwd.parent)[2 if os.name == "nt" else 0:]}")
suffix = lambda x: '' if str(x.parent)[2 if os.name == 'nt' else 0:] == os.sep else os.sep
path = lambda x: str(x.parent)[2 if os.name == 'nt' else 0:] + suffix(x)
# logger.info(f"{cwd=}::{suffix(cwd)=}::justpath={path(cwd)=}")
# the path_suffix is os.sep unless we are already at the root directory
# logger.info(f"{os.path.split(cwd)=}::{str(cwd.parent)=}")
path_suffix = '' if str(cwd.parent)[2 if os.name == 'nt' else 0:] == os.sep else os.sep
assert ntobj.format('dpnx') == str(cwd)
assert ntobj.format('d') == cwd.drive
assert ntobj.format('p') == path(cwd) #str(cwd.parent)[2 if os.name == 'nt' else 0:] + path_suffix
assert ntobj.format('n') == cwd.stem
assert ntobj.format('x') == cwd.suffix
assert ntobj.drive == cwd.drive
assert ntobj.path == path(cwd) #str(cwd.parent)[2 if os.name == 'nt' else 0:] + path_suffix
assert ntobj.name == cwd.stem
assert ntobj.ext == cwd.suffix
# logger.info(f"ntobj.all::{ntobj.all()[:5]}")
# logger.info(f"otherexpr::{(str(cwd), cwd.drive, path(cwd), cwd.stem, cwd.suffix)}")
# assert ntobj.all()[:5] == (str(cwd), cwd.drive, path(cwd), cwd.stem, cwd.suffix)
assert ntobj.all()[:5] == (str(cwd), cwd.drive, path(cwd), cwd.stem, cwd.suffix)
assert ntobj.full == cwd
# assert ntobj == str(cwd) # C:\Users\user\dse\test == C:\\Users\\user\\dse\\test
logger.info(f"NTPX('{input}") has passed. fully qualified is {ntobj.full}. formatted is {ntobj.format("dpnx")}")
check_it('.', a, Path('.').resolve())
check_it('./fu', b, Path('./fu').resolve())
check_it('./fu.bar', c, Path('fu.bar').resolve())
check_it('fu.bar', d, Path('fu.bar').resolve())
check_it('/fu.bar', e, Path('/fu.bar').resolve())
| import pytest
import os
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
from pathlib import Path
from dselib.context import DSEContext
from dselib.ntpx import NTPX
def context(varfile=None):
"""returns the DSE context object for this script."""
try:
myself = __file__
except NameError:
myself = sys.argv[0]
return DSEContext(myself, varfile)
me = context('test_ntpx')
def test_1():
a = NTPX('.')
b = NTPX('./fu')
c = NTPX('./fu.bar')
d = NTPX('fu.bar')
e = NTPX('/fu.bar')
def check_it(input, ntobj, cwd):
# logger.info(f"{cwd=}::{str(cwd)=}::{os.sep=}::{str(cwd.parent)[2 if os.name == 'nt' else 0:]}")
suffix = lambda x: '' if str(x.parent)[2 if os.name == 'nt' else 0:] == os.sep else os.sep
path = lambda x: str(x.parent)[2 if os.name == 'nt' else 0:] + suffix(x)
# logger.info(f"{cwd=}::{suffix(cwd)=}::justpath={path(cwd)=}")
# the path_suffix is os.sep unless we are already at the root directory
# logger.info(f"{os.path.split(cwd)=}::{str(cwd.parent)=}")
path_suffix = '' if str(cwd.parent)[2 if os.name == 'nt' else 0:] == os.sep else os.sep
assert ntobj.format('dpnx') == str(cwd)
assert ntobj.format('d') == cwd.drive
assert ntobj.format('p') == path(cwd) #str(cwd.parent)[2 if os.name == 'nt' else 0:] + path_suffix
assert ntobj.format('n') == cwd.stem
assert ntobj.format('x') == cwd.suffix
assert ntobj.drive == cwd.drive
assert ntobj.path == path(cwd) #str(cwd.parent)[2 if os.name == 'nt' else 0:] + path_suffix
assert ntobj.name == cwd.stem
assert ntobj.ext == cwd.suffix
# logger.info(f"ntobj.all::{ntobj.all()[:5]}")
# logger.info(f"otherexpr::{(str(cwd), cwd.drive, path(cwd), cwd.stem, cwd.suffix)}")
# assert ntobj.all()[:5] == (str(cwd), cwd.drive, path(cwd), cwd.stem, cwd.suffix)
assert ntobj.all()[:5] == (str(cwd), cwd.drive, path(cwd), cwd.stem, cwd.suffix)
assert ntobj.full == cwd
# assert ntobj == str(cwd) # C:\Users\user\dse\test == C:\\Users\\user\\dse\\test
logger.info(f"NTPX('{input}') has passed. fully qualified is {ntobj.full}. formatted is {ntobj.format('dpnx')}")
check_it('.', a, Path('.').resolve())
check_it('./fu', b, Path('./fu').resolve())
check_it('./fu.bar', c, Path('fu.bar').resolve())
check_it('fu.bar', d, Path('fu.bar').resolve())
check_it('/fu.bar', e, Path('/fu.bar').resolve())
|
import json
import os
import re
import sys
import time
import unittest
import click
import vmraid
import requests
from .test_runner import (SLOW_TEST_THRESHOLD, make_test_records, set_test_email_config)
click_ctx = click.get_current_context(True)
if click_ctx:
click_ctx.color = True
class ParallelTestRunner():
def __init__(self, app, site, build_number=1, total_builds=1, with_coverage=False):
self.app = app
self.site = site
self.with_coverage = with_coverage
self.build_number = vmraid.utils.cint(build_number) or 1
self.total_builds = vmraid.utils.cint(total_builds)
self.setup_test_site()
self.run_tests()
def setup_test_site(self):
vmraid.init(site=self.site)
if not vmraid.db:
vmraid.connect()
vmraid.flags.in_test = True
vmraid.clear_cache()
vmraid.utils.scheduler.disable_scheduler()
set_test_email_config()
self.before_test_setup()
def before_test_setup(self):
start_time = time.time()
for fn in vmraid.get_hooks("before_tests", app_name=self.app):
vmraid.get_attr(fn)()
test_module = vmraid.get_module(f'{self.app}.tests')
if hasattr(test_module, "global_test_dependencies"):
for doctype in test_module.global_test_dependencies:
make_test_records(doctype)
elapsed = time.time() - start_time
elapsed = click.style(f' ({elapsed:.03}s)', fg='red')
click.echo(f'Before Test {elapsed}')
def run_tests(self):
self.test_result = ParallelTestResult(stream=sys.stderr, descriptions=True, verbosity=2)
self.start_coverage()
for test_file_info in self.get_test_file_list():
self.run_tests_for_file(test_file_info)
self.save_coverage()
self.print_result()
def run_tests_for_file(self, file_info):
if not file_info: return
vmraid.set_user('Administrator')
path, filename = file_info
module = self.get_module(path, filename)
self.create_test_dependency_records(module, path, filename)
test_suite = unittest.TestSuite()
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
test_suite.addTest(module_test_cases)
test_suite(self.test_result)
def create_test_dependency_records(self, module, path, filename):
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype)
if os.path.basename(os.path.dirname(path)) == "doctype":
# test_data_migration_connector.py > data_migration_connector.json
test_record_filename = re.sub('^test_', '', filename).replace(".py", ".json")
test_record_file_path = os.path.join(path, test_record_filename)
if os.path.exists(test_record_file_path):
with open(test_record_file_path, 'r') as f:
doc = json.loads(f.read())
doctype = doc["name"]
make_test_records(doctype)
def get_module(self, path, filename):
app_path = vmraid.get_pymodule_path(self.app)
relative_path = os.path.relpath(path, app_path)
if relative_path == '.':
module_name = self.app
else:
relative_path = relative_path.replace('/', '.')
module_name = os.path.splitext(filename)[0]
module_name = f'{self.app}.{relative_path}.{module_name}'
return vmraid.get_module(module_name)
def print_result(self):
self.test_result.printErrors()
click.echo(self.test_result)
if self.test_result.failures or self.test_result.errors:
if os.environ.get('CI'):
sys.exit(1)
def start_coverage(self):
if self.with_coverage:
from coverage import Coverage
from vmraid.utils import get_bench_path
# Generate coverage report only for app that is being tested
source_path = os.path.join(get_bench_path(), 'apps', self.app)
omit=['*.html', '*.js', '*.xml', '*.css', '*.less', '*.scss',
'*.vue', '*/doctype/*/*_dashboard.py', '*/patches/*']
if self.app == 'vmraid':
omit.append('*/commands/*')
self.coverage = Coverage(source=[source_path], omit=omit)
self.coverage.start()
def save_coverage(self):
if not self.with_coverage:
return
self.coverage.stop()
self.coverage.save()
def get_test_file_list(self):
test_list = get_all_tests(self.app)
split_size = vmraid.utils.ceil(len(test_list) / self.total_builds)
# [1,2,3,4,5,6] to [[1,2], [3,4], [4,6]] if split_size is 2
test_chunks = [test_list[x:x+split_size] for x in range(0, len(test_list), split_size)]
return test_chunks[self.build_number - 1]
class ParallelTestResult(unittest.TextTestResult):
def startTest(self, test):
self._started_at = time.time()
super(unittest.TextTestResult, self).startTest(test)
test_class = unittest.util.strclass(test.__class__)
if not hasattr(self, 'current_test_class') or self.current_test_class != test_class:
click.echo(f"\n{unittest.util.strclass(test.__class__)}")
self.current_test_class = test_class
def getTestMethodName(self, test):
return test._testMethodName if hasattr(test, '_testMethodName') else str(test)
def addSuccess(self, test):
super(unittest.TextTestResult, self).addSuccess(test)
elapsed = time.time() - self._started_at
threshold_passed = elapsed >= SLOW_TEST_THRESHOLD
elapsed = click.style(f' ({elapsed:.03}s)', fg='red') if threshold_passed else ''
click.echo(f" {click.style(" ✔ ", fg="green")} {self.getTestMethodName(test)}{elapsed}")
def addError(self, test, err):
super(unittest.TextTestResult, self).addError(test, err)
click.echo(f" {click.style(" ✖ ", fg="red")} {self.getTestMethodName(test)}")
def addFailure(self, test, err):
super(unittest.TextTestResult, self).addFailure(test, err)
click.echo(f" {click.style(" ✖ ", fg="red")} {self.getTestMethodName(test)}")
def addSkip(self, test, reason):
super(unittest.TextTestResult, self).addSkip(test, reason)
click.echo(f" {click.style(" = ", fg="white")} {self.getTestMethodName(test)}")
def addExpectedFailure(self, test, err):
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
click.echo(f" {click.style(" ✖ ", fg="red")} {self.getTestMethodName(test)}")
def addUnexpectedSuccess(self, test):
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
click.echo(f" {click.style(" ✔ ", fg="green")} {self.getTestMethodName(test)}")
def printErrors(self):
click.echo('\n')
self.printErrorList(' ERROR ', self.errors, 'red')
self.printErrorList(' FAIL ', self.failures, 'red')
def printErrorList(self, flavour, errors, color):
for test, err in errors:
click.echo(self.separator1)
click.echo(f"{click.style(flavour, bg=color)} {self.getDescription(test)}")
click.echo(self.separator2)
click.echo(err)
def __str__(self):
return f"Tests: {self.testsRun}, Failing: {len(self.failures)}, Errors: {len(self.errors)}"
def get_all_tests(app):
test_file_list = []
for path, folders, files in os.walk(vmraid.get_pymodule_path(app)):
for dontwalk in ('locals', '.git', 'public', '__pycache__'):
if dontwalk in folders:
folders.remove(dontwalk)
# for predictability
folders.sort()
files.sort()
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
# in /doctype/doctype/boilerplate/
continue
for filename in files:
if filename.startswith("test_") and filename.endswith(".py") \
and filename != 'test_runner.py':
test_file_list.append([path, filename])
return test_file_list
class ParallelTestWithOrchestrator(ParallelTestRunner):
'''
This can be used to balance-out test time across multiple instances
This is dependent on external orchestrator which returns next test to run
orchestrator endpoints
- register-instance (<build_id>, <instance_id>, test_spec_list)
- get-next-test-spec (<build_id>, <instance_id>)
- test-completed (<build_id>, <instance_id>)
'''
def __init__(self, app, site, with_coverage=False):
self.orchestrator_url = os.environ.get('ORCHESTRATOR_URL')
if not self.orchestrator_url:
click.echo('ORCHESTRATOR_URL environment variable not found!')
click.echo('Pass public URL after hosting https://github.com/vmraid/test-orchestrator')
sys.exit(1)
self.ci_build_id = os.environ.get('CI_BUILD_ID')
self.ci_instance_id = os.environ.get('CI_INSTANCE_ID') or vmraid.generate_hash(length=10)
if not self.ci_build_id:
click.echo('CI_BUILD_ID environment variable not found!')
sys.exit(1)
ParallelTestRunner.__init__(self, app, site, with_coverage=with_coverage)
def run_tests(self):
self.test_status = 'ongoing'
self.register_instance()
super().run_tests()
def get_test_file_list(self):
while self.test_status == 'ongoing':
yield self.get_next_test()
def register_instance(self):
test_spec_list = get_all_tests(self.app)
response_data = self.call_orchestrator('register-instance', data={
'test_spec_list': test_spec_list
})
self.is_master = response_data.get('is_master')
def get_next_test(self):
response_data = self.call_orchestrator('get-next-test-spec')
self.test_status = response_data.get('status')
return response_data.get('next_test')
def print_result(self):
self.call_orchestrator('test-completed')
return super().print_result()
def call_orchestrator(self, endpoint, data={}):
# add repo token header
# build id in header
headers = {
'CI-BUILD-ID': self.ci_build_id,
'CI-INSTANCE-ID': self.ci_instance_id,
'REPO-TOKEN': '2948288382838DE'
}
url = f'{self.orchestrator_url}/{endpoint}'
res = requests.get(url, json=data, headers=headers)
res.raise_for_status()
response_data = {}
if 'application/json' in res.headers.get('content-type'):
response_data = res.json()
return response_data
| import json
import os
import re
import sys
import time
import unittest
import click
import vmraid
import requests
from .test_runner import (SLOW_TEST_THRESHOLD, make_test_records, set_test_email_config)
click_ctx = click.get_current_context(True)
if click_ctx:
click_ctx.color = True
class ParallelTestRunner():
def __init__(self, app, site, build_number=1, total_builds=1, with_coverage=False):
self.app = app
self.site = site
self.with_coverage = with_coverage
self.build_number = vmraid.utils.cint(build_number) or 1
self.total_builds = vmraid.utils.cint(total_builds)
self.setup_test_site()
self.run_tests()
def setup_test_site(self):
vmraid.init(site=self.site)
if not vmraid.db:
vmraid.connect()
vmraid.flags.in_test = True
vmraid.clear_cache()
vmraid.utils.scheduler.disable_scheduler()
set_test_email_config()
self.before_test_setup()
def before_test_setup(self):
start_time = time.time()
for fn in vmraid.get_hooks("before_tests", app_name=self.app):
vmraid.get_attr(fn)()
test_module = vmraid.get_module(f'{self.app}.tests')
if hasattr(test_module, "global_test_dependencies"):
for doctype in test_module.global_test_dependencies:
make_test_records(doctype)
elapsed = time.time() - start_time
elapsed = click.style(f' ({elapsed:.03}s)', fg='red')
click.echo(f'Before Test {elapsed}')
def run_tests(self):
self.test_result = ParallelTestResult(stream=sys.stderr, descriptions=True, verbosity=2)
self.start_coverage()
for test_file_info in self.get_test_file_list():
self.run_tests_for_file(test_file_info)
self.save_coverage()
self.print_result()
def run_tests_for_file(self, file_info):
if not file_info: return
vmraid.set_user('Administrator')
path, filename = file_info
module = self.get_module(path, filename)
self.create_test_dependency_records(module, path, filename)
test_suite = unittest.TestSuite()
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
test_suite.addTest(module_test_cases)
test_suite(self.test_result)
def create_test_dependency_records(self, module, path, filename):
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype)
if os.path.basename(os.path.dirname(path)) == "doctype":
# test_data_migration_connector.py > data_migration_connector.json
test_record_filename = re.sub('^test_', '', filename).replace(".py", ".json")
test_record_file_path = os.path.join(path, test_record_filename)
if os.path.exists(test_record_file_path):
with open(test_record_file_path, 'r') as f:
doc = json.loads(f.read())
doctype = doc["name"]
make_test_records(doctype)
def get_module(self, path, filename):
app_path = vmraid.get_pymodule_path(self.app)
relative_path = os.path.relpath(path, app_path)
if relative_path == '.':
module_name = self.app
else:
relative_path = relative_path.replace('/', '.')
module_name = os.path.splitext(filename)[0]
module_name = f'{self.app}.{relative_path}.{module_name}'
return vmraid.get_module(module_name)
def print_result(self):
self.test_result.printErrors()
click.echo(self.test_result)
if self.test_result.failures or self.test_result.errors:
if os.environ.get('CI'):
sys.exit(1)
def start_coverage(self):
if self.with_coverage:
from coverage import Coverage
from vmraid.utils import get_bench_path
# Generate coverage report only for app that is being tested
source_path = os.path.join(get_bench_path(), 'apps', self.app)
omit=['*.html', '*.js', '*.xml', '*.css', '*.less', '*.scss',
'*.vue', '*/doctype/*/*_dashboard.py', '*/patches/*']
if self.app == 'vmraid':
omit.append('*/commands/*')
self.coverage = Coverage(source=[source_path], omit=omit)
self.coverage.start()
def save_coverage(self):
if not self.with_coverage:
return
self.coverage.stop()
self.coverage.save()
def get_test_file_list(self):
test_list = get_all_tests(self.app)
split_size = vmraid.utils.ceil(len(test_list) / self.total_builds)
# [1,2,3,4,5,6] to [[1,2], [3,4], [4,6]] if split_size is 2
test_chunks = [test_list[x:x+split_size] for x in range(0, len(test_list), split_size)]
return test_chunks[self.build_number - 1]
class ParallelTestResult(unittest.TextTestResult):
def startTest(self, test):
self._started_at = time.time()
super(unittest.TextTestResult, self).startTest(test)
test_class = unittest.util.strclass(test.__class__)
if not hasattr(self, 'current_test_class') or self.current_test_class != test_class:
click.echo(f"\n{unittest.util.strclass(test.__class__)}")
self.current_test_class = test_class
def getTestMethodName(self, test):
return test._testMethodName if hasattr(test, '_testMethodName') else str(test)
def addSuccess(self, test):
super(unittest.TextTestResult, self).addSuccess(test)
elapsed = time.time() - self._started_at
threshold_passed = elapsed >= SLOW_TEST_THRESHOLD
elapsed = click.style(f' ({elapsed:.03}s)', fg='red') if threshold_passed else ''
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}{elapsed}")
def addError(self, test, err):
super(unittest.TextTestResult, self).addError(test, err)
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
def addFailure(self, test, err):
super(unittest.TextTestResult, self).addFailure(test, err)
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
def addSkip(self, test, reason):
super(unittest.TextTestResult, self).addSkip(test, reason)
click.echo(f" {click.style(' = ', fg='white')} {self.getTestMethodName(test)}")
def addExpectedFailure(self, test, err):
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
def addUnexpectedSuccess(self, test):
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}")
def printErrors(self):
click.echo('\n')
self.printErrorList(' ERROR ', self.errors, 'red')
self.printErrorList(' FAIL ', self.failures, 'red')
def printErrorList(self, flavour, errors, color):
for test, err in errors:
click.echo(self.separator1)
click.echo(f"{click.style(flavour, bg=color)} {self.getDescription(test)}")
click.echo(self.separator2)
click.echo(err)
def __str__(self):
return f"Tests: {self.testsRun}, Failing: {len(self.failures)}, Errors: {len(self.errors)}"
def get_all_tests(app):
test_file_list = []
for path, folders, files in os.walk(vmraid.get_pymodule_path(app)):
for dontwalk in ('locals', '.git', 'public', '__pycache__'):
if dontwalk in folders:
folders.remove(dontwalk)
# for predictability
folders.sort()
files.sort()
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
# in /doctype/doctype/boilerplate/
continue
for filename in files:
if filename.startswith("test_") and filename.endswith(".py") \
and filename != 'test_runner.py':
test_file_list.append([path, filename])
return test_file_list
class ParallelTestWithOrchestrator(ParallelTestRunner):
'''
This can be used to balance-out test time across multiple instances
This is dependent on external orchestrator which returns next test to run
orchestrator endpoints
- register-instance (<build_id>, <instance_id>, test_spec_list)
- get-next-test-spec (<build_id>, <instance_id>)
- test-completed (<build_id>, <instance_id>)
'''
def __init__(self, app, site, with_coverage=False):
self.orchestrator_url = os.environ.get('ORCHESTRATOR_URL')
if not self.orchestrator_url:
click.echo('ORCHESTRATOR_URL environment variable not found!')
click.echo('Pass public URL after hosting https://github.com/vmraid/test-orchestrator')
sys.exit(1)
self.ci_build_id = os.environ.get('CI_BUILD_ID')
self.ci_instance_id = os.environ.get('CI_INSTANCE_ID') or vmraid.generate_hash(length=10)
if not self.ci_build_id:
click.echo('CI_BUILD_ID environment variable not found!')
sys.exit(1)
ParallelTestRunner.__init__(self, app, site, with_coverage=with_coverage)
def run_tests(self):
self.test_status = 'ongoing'
self.register_instance()
super().run_tests()
def get_test_file_list(self):
while self.test_status == 'ongoing':
yield self.get_next_test()
def register_instance(self):
test_spec_list = get_all_tests(self.app)
response_data = self.call_orchestrator('register-instance', data={
'test_spec_list': test_spec_list
})
self.is_master = response_data.get('is_master')
def get_next_test(self):
response_data = self.call_orchestrator('get-next-test-spec')
self.test_status = response_data.get('status')
return response_data.get('next_test')
def print_result(self):
self.call_orchestrator('test-completed')
return super().print_result()
def call_orchestrator(self, endpoint, data={}):
# add repo token header
# build id in header
headers = {
'CI-BUILD-ID': self.ci_build_id,
'CI-INSTANCE-ID': self.ci_instance_id,
'REPO-TOKEN': '2948288382838DE'
}
url = f'{self.orchestrator_url}/{endpoint}'
res = requests.get(url, json=data, headers=headers)
res.raise_for_status()
response_data = {}
if 'application/json' in res.headers.get('content-type'):
response_data = res.json()
return response_data
|
import rlp
from typing import (
Iterable,
Optional,
Tuple,
)
from eth_utils.toolz import (
curry,
)
from eth_utils import (
to_tuple,
ValidationError,
)
from eth.rlp.blocks import (
BaseBlock,
)
@to_tuple
def diff_rlp_object(left: BaseBlock,
right: BaseBlock) -> Optional[Iterable[Tuple[str, str, str]]]:
if left != right:
rlp_type = type(left)
for field_name, field_type in rlp_type._meta.fields:
left_value = getattr(left, field_name)
right_value = getattr(right, field_name)
if isinstance(field_type, type) and issubclass(field_type, rlp.Serializable):
sub_diff = diff_rlp_object(left_value, right_value)
for sub_field_name, sub_left_value, sub_right_value in sub_diff:
yield (
f"{field_name}.{sub_field_name}",
sub_left_value,
sub_right_value,
)
elif isinstance(field_type, (rlp.sedes.List, rlp.sedes.CountableList)):
if tuple(left_value) != tuple(right_value):
yield (
field_name,
left_value,
right_value,
)
elif left_value != right_value:
yield (
field_name,
left_value,
right_value,
)
else:
continue
def _humanized_diff_elements(
diff: Iterable[Tuple[str, str, str]],
obj_a_name: str,
obj_b_name: str) -> Iterable[str]:
longest_obj_name = max(len(obj_a_name), len(obj_b_name))
for field_name, a_val, b_val in diff:
if isinstance(a_val, int) and isinstance(b_val, int):
element_diff = b_val - a_val
if element_diff > 0:
element_diff_display = f" (+{element_diff})"
else:
element_diff_display = f" ({element_diff})"
else:
element_diff_display = ""
yield (
f"{field_name}:\n"
f" ({obj_a_name.ljust(longest_obj_name, " ")}) : {a_val}\n"
f" ({obj_b_name.ljust(longest_obj_name, " ")}) : {b_val}{element_diff_display}"
)
@curry
def validate_rlp_equal(obj_a: BaseBlock,
obj_b: BaseBlock,
obj_a_name: str = None,
obj_b_name: str = None) -> None:
if obj_a == obj_b:
return
if obj_a_name is None:
obj_a_name = obj_a.__class__.__name__ + '_a'
if obj_b_name is None:
obj_b_name = obj_b.__class__.__name__ + '_b'
diff = diff_rlp_object(obj_a, obj_b)
if len(diff) == 0:
raise TypeError(
f"{obj_a_name} ({obj_a!r}) != "
f"{obj_b_name} ({obj_b!r}) but got an empty diff"
)
err_fields = "\n - ".join(_humanized_diff_elements(diff, obj_a_name, obj_b_name))
error_message = (
f"Mismatch between {obj_a_name} and {obj_b_name} "
f"on {len(diff)} fields:\n - {err_fields}"
)
raise ValidationError(error_message)
validate_imported_block_unchanged = validate_rlp_equal(
obj_a_name="locally executed block",
obj_b_name="proposed block",
)
| import rlp
from typing import (
Iterable,
Optional,
Tuple,
)
from eth_utils.toolz import (
curry,
)
from eth_utils import (
to_tuple,
ValidationError,
)
from eth.rlp.blocks import (
BaseBlock,
)
@to_tuple
def diff_rlp_object(left: BaseBlock,
right: BaseBlock) -> Optional[Iterable[Tuple[str, str, str]]]:
if left != right:
rlp_type = type(left)
for field_name, field_type in rlp_type._meta.fields:
left_value = getattr(left, field_name)
right_value = getattr(right, field_name)
if isinstance(field_type, type) and issubclass(field_type, rlp.Serializable):
sub_diff = diff_rlp_object(left_value, right_value)
for sub_field_name, sub_left_value, sub_right_value in sub_diff:
yield (
f"{field_name}.{sub_field_name}",
sub_left_value,
sub_right_value,
)
elif isinstance(field_type, (rlp.sedes.List, rlp.sedes.CountableList)):
if tuple(left_value) != tuple(right_value):
yield (
field_name,
left_value,
right_value,
)
elif left_value != right_value:
yield (
field_name,
left_value,
right_value,
)
else:
continue
def _humanized_diff_elements(
diff: Iterable[Tuple[str, str, str]],
obj_a_name: str,
obj_b_name: str) -> Iterable[str]:
longest_obj_name = max(len(obj_a_name), len(obj_b_name))
for field_name, a_val, b_val in diff:
if isinstance(a_val, int) and isinstance(b_val, int):
element_diff = b_val - a_val
if element_diff > 0:
element_diff_display = f" (+{element_diff})"
else:
element_diff_display = f" ({element_diff})"
else:
element_diff_display = ""
yield (
f"{field_name}:\n"
f" ({obj_a_name.ljust(longest_obj_name, ' ')}) : {a_val}\n"
f" ({obj_b_name.ljust(longest_obj_name, ' ')}) : {b_val}{element_diff_display}"
)
@curry
def validate_rlp_equal(obj_a: BaseBlock,
obj_b: BaseBlock,
obj_a_name: str = None,
obj_b_name: str = None) -> None:
if obj_a == obj_b:
return
if obj_a_name is None:
obj_a_name = obj_a.__class__.__name__ + '_a'
if obj_b_name is None:
obj_b_name = obj_b.__class__.__name__ + '_b'
diff = diff_rlp_object(obj_a, obj_b)
if len(diff) == 0:
raise TypeError(
f"{obj_a_name} ({obj_a!r}) != "
f"{obj_b_name} ({obj_b!r}) but got an empty diff"
)
err_fields = "\n - ".join(_humanized_diff_elements(diff, obj_a_name, obj_b_name))
error_message = (
f"Mismatch between {obj_a_name} and {obj_b_name} "
f"on {len(diff)} fields:\n - {err_fields}"
)
raise ValidationError(error_message)
validate_imported_block_unchanged = validate_rlp_equal(
obj_a_name="locally executed block",
obj_b_name="proposed block",
)
|
from dataTool import ReadLabels, ReadXYZ, VisualizePointCloudClassesAsync, modelPath, DataTool
from imports import *
import math
import numpy as np
from time import time
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.utils import Sequence
from tensorflow.keras.layers import Input, BatchNormalization, Dense, Dropout, InputLayer
from sklearn.neighbors import KDTree
from sklearn.metrics import confusion_matrix
from PIL import Image, ImageEnhance, ImageOps
import random
# from notify_run import Notify
class Const:
@staticmethod
def IsWindowsMachine():
if os.path.isdir("C:/Program Files"):
return True
else:
return False
if os.path.isdir("C:/Program Files"):
batchSize = 8
else:
batchSize = 16 #25
#Placeholders
classCount = Label.Semantic3D.Count-1
classNames = Label.Semantic3D.Names
testFiles = []
excludeFiles = []
Paths = Paths.Semantic3D
epochs = 100
pointComponents = 3
featureComponents = 3 #rgb
classCount = 0
npoints = 8192
blocksize = 8
test_step = 0.5
name = ""
#Algorithm configuration
noFeature = False
Fusion = False
Scale = False
Rotate = False
Mirror = False
Jitter = False
FtrAugment = False
logsPath = "./logs"
### MODEL CONFIG
pl = 64
### MODEL CONFIG
def BuildSpecDict(self):
return {"noFeature" : self.noFeature,
"Fusion" : self.Fusion,
"Scale" : self.Scale,
"Rotate" : self.Rotate,
"Mirror" : self.Mirror,
"Jitter" : self.Jitter,
"FtrAugment" : False if self.noFeature else self.FtrAugment,
}
def Name(self, UID = ""):
modelName = self.name
modelName += f"({len(self.TrainFiles())}&{len(self.TestFiles())})"
for spec, value in self.BuildSpecDict().items():
if(value == True):
modelName += f"({spec})"
if(UID != ""):
modelName += f"_{UID}"
return modelName
@staticmethod
def RemoveUID(name : str):
return name.replace(f"_{Const.ParseModelUID(name)}", "")
@staticmethod
def UID():
import uuid
return uuid.uuid4().hex
@staticmethod
def ParseModelConfig(file):
config = Paths.FileName(file).split("_")[0].replace("("," ").replace(")","").replace("vox ","").split(" ")
const = None
if(config[0] == NPM3D.name):
const = NPM3D()
if(config[0] == Semantic3D.name):
const = Semantic3D()
for conf in config[1:]:
if conf == "noFeature" or conf == "NOCOL":
const.noFeature = True
elif conf == "Fusion":
const.Fusion = True
elif conf == "Scale":
const.Scale = True
elif conf == "Rotate":
const.Rotate = True
elif conf == "Mirror":
const.Mirror = True
elif conf == "Jitter":
const.Jitter = True
elif conf == "FtrAugment":
const.FtrAugment = True
return const
@staticmethod
def ParseModelUID(file):
parts = Paths.FileName(file).split("_")
if(len(parts) >= 2):
return parts[1]
else:
return None
@staticmethod
def ParseModelName(file, withUID = True):
parts = Paths.FileName(file, withoutExt = False).split("_")
name = parts[0]
if(withUID and len(parts) > 1):
name += "_"+parts[1]
return name
def TestFiles(self):
return Paths.JoinPaths(self.Paths.processedTrain, self.testFiles)
def TrainFiles(self):
return Paths.GetFiles(self.Paths.processedTrain, excludeFiles = self.TestFiles()+self.excludeFiles)
class Semantic3D(Const):
pointComponents = 3
featureComponents = 3 #rgb
classCount = Label.Semantic3D.Count-1
classNames = Label.Semantic3D.Names
test_step = 0.8
name = "Sem3D"
Paths = Paths.Semantic3D
testFiles = [
"untermaederbrunnen_station3_xyz_intensity_rgb_voxels.npy",
"domfountain_station1_xyz_intensity_rgb_voxels.npy",
]
excludeFiles = []
fileNames = {"birdfountain_station1_xyz_intensity_rgb" : "birdfountain1",
"castleblatten_station1_intensity_rgb" : "castleblatten1",
"castleblatten_station5_xyz_intensity_rgb" : "castleblatten5",
"marketplacefeldkirch_station1_intensity_rgb" : "marketsquarefeldkirch1",
"marketplacefeldkirch_station4_intensity_rgb" : "marketsquarefeldkirch4",
"marketplacefeldkirch_station7_intensity_rgb" : "marketsquarefeldkirch7",
"sg27_station3_intensity_rgb" : "sg27_3",
"sg27_station6_intensity_rgb" : "sg27_6",
"sg27_station8_intensity_rgb" : "sg27_8",
"sg27_station10_intensity_rgb" : "sg27_10",
"sg28_station2_intensity_rgb" : "sg28_2",
"sg28_station5_xyz_intensity_rgb" : "sg28_5",
"stgallencathedral_station1_intensity_rgb" : "stgallencathedral1",
"stgallencathedral_station3_intensity_rgb" : "stgallencathedral3",
"stgallencathedral_station6_intensity_rgb" : "stgallencathedral6",
"MarketplaceFeldkirch_Station4_rgb_intensity-reduced" : "marketsquarefeldkirch4-reduced",
"sg27_station10_rgb_intensity-reduced" : "sg27_10-reduced",
"sg28_Station2_rgb_intensity-reduced" : "sg28_2-reduced",
"StGallenCathedral_station6_rgb_intensity-reduced" : "stgallencathedral6-reduced",
}
class Curbs(Const):
pointComponents = 3
featureComponents = 3
classCount = 2
classNames = Label.Curbs.Names
test_step = 0.5
name = "Curbs"
Paths = Paths.Curbs
if os.path.isdir("C:/Program Files"):
batchSize = 8
else:
batchSize = 25
testFiles = [
"park_extracted.npy",
"Jelskio_str_trimmed.npy",
]
excludeFiles = [
"powerlines_dataset"
]
def FilterCurbAndLineFiles(self, files):
return [file for file in files if not file.endswith("_curbs.npy") and not file.endswith("_lines.npy")]
def TestFiles(self):
return self.FilterCurbAndLineFiles(super(Curbs, self).TestFiles())
def TrainFiles(self):
return self.FilterCurbAndLineFiles(super(Curbs, self).TrainFiles())
class NPM3D(Const):
pointComponents = 3
featureComponents = 1
classCount = Label.NPM3D.Count-1
classNames = Label.NPM3D.Names
test_step = 0.5
name = "NPM3D"
Paths = Paths.NPM3D
testFiles = [
# "Lille1_1_0.npy",
# "Lille1_1_1.npy",
# "Lille1_1_2.npy",
# "Lille1_1_3.npy",
# "Lille1_1_4.npy",
# "Lille1_1_5.npy",
# "Lille1_1_6.npy",
# "Lille1_1_7.npy",
# "Lille1_1_8.npy",
# "Lille1_2_0.npy",
# "Lille1_2_1.npy",
"Lille2_0.npy",
"Lille2_1.npy",
"Lille2_2.npy",
"Lille2_8.npy",
"Lille2_9.npy",
# "Paris_0.npy",
# "Paris_1.npy",
]
excludeFiles = [
# "Lille1_1_7.npy",
# "Lille1_2_2.npy",
"Lille2_10.npy",
# "Paris_2.npy",
]
class WeightsMul(tf.keras.layers.Layer):
def __init__(self, shape, lowBound, highBound, **kwargs):
super(WeightsMul, self).__init__(**kwargs)
self.shape = shape
self.lowBound = lowBound
self.highBound = highBound
def build(self, input_shape):
init = tf.random_uniform_initializer(self.lowBound, self.highBound)
self.vars = self.add_weight(shape=(self.shape),
initializer = init,
trainable = True, dtype=tf.float32)
def call(self, inputs):
return tf.matmul(inputs, self.vars)
def get_config(self):
config = super(WeightsMul, self).get_config()
config.update({'shape': self.shape, 'lowBound': self.lowBound, 'highBound': self.highBound})
return config
class GatherNDLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(GatherNDLayer, self).__init__(**kwargs)
def call(self, array, indices):
return tf.gather_nd(array, indices, batch_dims=1)
def get_config(self):
config = super(GatherNDLayer, self).get_config()
return config
class SubstractCenters(tf.keras.layers.Layer):
def __init__(self, dim, n_centers, **kwargs):
super(SubstractCenters, self).__init__(**kwargs)
self.dim = dim
self.n_centers = n_centers
def build(self, input_shape):
center_data = np.zeros((self.dim, self.n_centers))
for i in range(self.n_centers):
coord = np.random.rand(self.dim)*2 - 1
while (coord**2).sum() > 1:
coord = np.random.rand(self.dim)*2 - 1
center_data[:,i] = coord
self.centers = self.add_weight(shape = (center_data.shape),
initializer = tf.constant_initializer(center_data),
trainable = True, dtype=tf.float32)
def call(self, points):
return points - self.centers
def get_config(self):
config = super(SubstractCenters, self).get_config()
config.update({'dim': self.dim, 'n_centers': self.n_centers})
return config
class UnitBallNormalize(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(UnitBallNormalize, self).__init__(**kwargs)
def call(self, points):
maxi = tf.sqrt(tf.reduce_max(tf.reduce_sum(tf.square(tf.stop_gradient(points)), axis = 3), axis = 2))
maxi = tf.where(tf.equal(maxi, 0.0), tf.constant(1.0), maxi)
points = points / tf.expand_dims(tf.expand_dims(maxi, 2), 3)
return points
def get_config(self):
config = super(UnitBallNormalize, self).get_config()
return config
def PtConv(fts, points, K, next_pts, in_features, out_features, n_centers = 16):
next_pts_ = None
if isinstance(next_pts, int) and points.get_shape()[1] != next_pts:
# convolution with reduction
indices, next_pts_ = KDTreeSampleLayer(K, next_pts)(points)
elif (next_pts is None) or (isinstance(next_pts, int) and points.get_shape()[1] == next_pts):
# convolution without reduction
indices = KDTreeLayer(K)(points, points)
next_pts_ = points
else:
# convolution with up sampling or projection on given points
indices = KDTreeLayer(K)(points, next_pts)
next_pts_ = next_pts
if next_pts is None or isinstance(next_pts, int):
next_pts = next_pts_
# get the features and point cooridnates associated with the indices
pts = GatherNDLayer()(points, indices)
if fts is None:
features = tf.expand_dims(tf.ones_like(pts[:,:,:,0]), 3)
else:
features = GatherNDLayer()(fts, indices)
# center the neighborhoods
pts = pts - tf.expand_dims(next_pts,2)
# normalize to unit ball, or not
pts = UnitBallNormalize()(pts)
# compute the distances
dists = SubstractCenters(3, n_centers)(tf.expand_dims(pts, 4))
dShape = dists.shape
dists = tf.reshape(dists, (-1, dShape[1], dShape[2], dShape[3]*dShape[4]))
dists = DenseInitialized(2*n_centers, activation="relu")(dists)
dists = DenseInitialized(n_centers, activation="relu")(dists)
dists = DenseInitialized(n_centers, activation="relu")(dists)
# compute features
fs = features.shape # [batch, points, n_centers, in_features]
ds = dists.shape
features = tf.transpose(features,[0, 1, 3, 2])
features = tf.reshape(features, (-1, features.shape[2], features.shape[3])) #features.shape[0]*features.shape[1]
dists = tf.reshape(dists, (-1, dists.shape[2], dists.shape[3])) #dists.shape[0]*dists.shape[1]
features = tf.matmul(features, dists)
features = tf.reshape(features, (-1, ds[1], features.shape[1]*features.shape[2]))
bound = math.sqrt(3.0) * math.sqrt(2.0 / (in_features + out_features))
features = WeightsMul([in_features * n_centers, out_features], -bound, bound)(features)
features = features / fs[2]
# normalization and activation
features = BatchNormalization(epsilon = 1e-05, momentum=0.9)(features)
features = tf.nn.relu(features)
return features, next_pts
def LinearInitializer(k):
k = np.sqrt(1.0/float(k))
return tf.random_uniform_initializer(k*-1, k)
def DenseInitialized(out_features, activation = None, name = None):
def DenseInit(x):
return Dense(out_features,
kernel_initializer = tf.initializers.lecun_normal(),
bias_initializer = tf.initializers.lecun_normal(),
activation = activation,
name = name,
)(x)
return DenseInit
def CreateModel(classCount, ftsComp, in_fts = None, in_pts = None, returnFeatures = False, noColor = False, applySoftmax = True):
print("Creating new model...")
if(in_fts is None and in_pts is None):
in_pts = Input(shape=(Const.npoints, Const.pointComponents), dtype=tf.float32) #points
if(noColor):
in_fts = None
else:
in_fts = Input(shape=(Const.npoints, ftsComp), dtype=tf.float32) #featuress
if(noColor):
in_fts = None
pl = Const.pl
### Down Sample
x0, _ = PtConv(in_fts, in_pts, K = 16, next_pts = None, in_features = ftsComp, out_features = pl)
x1, pts1 = PtConv(x0, in_pts, K = 16, next_pts = 2048, in_features = pl, out_features = pl)
x2, pts2 = PtConv(x1, pts1, K = 16, next_pts = 1024, in_features = pl, out_features = pl)
x3, pts3 = PtConv(x2, pts2, K = 16, next_pts = 256, in_features = pl, out_features = pl)
x4, pts4 = PtConv(x3, pts3, K = 8, next_pts = 64, in_features = pl, out_features = pl*2)
x5, pts5 = PtConv(x4, pts4, K = 8, next_pts = 16, in_features = pl*2, out_features = pl*2)
x6, pts6 = PtConv(x5, pts5, K = 4, next_pts = 8, in_features = pl*2, out_features = pl*2)
## Up Sample
x5d, _ = PtConv(x6, pts6, K = 4, next_pts = pts5, in_features = pl*2, out_features = pl*2)
x5d = tf.concat([x5d, x5], axis = 2)
x4d, _ = PtConv(x5d, pts5, K = 4, next_pts = pts4, in_features = pl*4, out_features = pl*2)
x4d = tf.concat([x4d, x4], axis = 2)
x3d, _ = PtConv(x4d, pts4, K = 4, next_pts = pts3, in_features = pl*4, out_features = pl)
x3d = tf.concat([x3d, x3], axis = 2)
x2d, _ = PtConv(x3d, pts3, K = 8, next_pts = pts2, in_features = pl*2, out_features = pl)
x2d = tf.concat([x2d, x2], axis = 2)
x1d, _ = PtConv(x2d, pts2, K = 8, next_pts = pts1, in_features = pl*2, out_features = pl)
x1d = tf.concat([x1d, x1], axis = 2)
x0d, _ = PtConv(x1d, pts1, K = 8, next_pts = in_pts, in_features = pl*2, out_features = pl)
x0d = tf.concat([x0d, x0], axis = 2)
### Output layer
out_labels = Dropout(rate=0.5)(x0d)
out_labels = tf.reshape(out_labels, (-1, out_labels.shape[2]))
out_labels = DenseInitialized(classCount)(out_labels)
out_labels = tf.reshape(out_labels, (-1, x0d.shape[1], out_labels.shape[1]))
if(applySoftmax):
out_labels = tf.nn.softmax(out_labels)
if(noColor):
inputList = [in_pts]
else:
inputList = [in_fts, in_pts]
if(returnFeatures):
return Model(inputList, [x0d, out_labels], name ="model")
model = Model(inputList, out_labels, name ="model")
model = CompileModel(model, classCount)
# print(model.summary())
return model
def ModifyModelOutput(model, classCount):
dropoutLayer = model.layers[len(model.layers)-5] #take output of the drop out layer
out_labels = dropoutLayer.output
out_labels = tf.reshape(out_labels, (-1, out_labels.shape[2]), name = "lbl_reshape_1")
out_labels = DenseInitialized(classCount, name = "lbl_dense")(out_labels)
out_labels = tf.reshape(out_labels, (-1, dropoutLayer.input.shape[1], out_labels.shape[1]), name = "lbl_reshape_2")
out_labels = tf.nn.softmax(out_labels, name = "lbl_softmax")
return Model(model.inputs, out_labels, name ="model")
def ReadModel(modelPath):
if(not modelPath.endswith(".h5")):
modelPath += ".h5"
if(not os.path.exists(modelPath)):
if(os.path.exists(os.path.join("." , "data", modelPath))):
modelPath = os.path.join("." , "data", modelPath)
elif(os.path.exists(os.path.join("." , "data", Const.ParseModelName(modelPath, False)))):
file = os.path.basename(modelPath)
folder = os.path.join("." , "data", Const.ParseModelName(modelPath, False))
modelPath = os.path.join(folder, file)
elif(os.path.exists(os.path.join("." , "data", Const.ParseModelName(modelPath)))):
file = os.path.basename(modelPath)
folder = os.path.join("." , "data", Const.ParseModelName(modelPath))
modelPath = os.path.join(folder, file)
if(not os.path.exists(modelPath)):
raise FileNotFoundError
model = tf.keras.models.load_model(modelPath, compile=False,
custom_objects={'NearestNeighborsLayer': NearestNeighborsLayer,
'SampleNearestNeighborsLayer': SampleNearestNeighborsLayer,
'SubstractCenters': SubstractCenters,
'WeightsMul': WeightsMul,
'GatherNDLayer':GatherNDLayer,
'UnitBallNormalize':UnitBallNormalize,
'KDTreeSampleLayer':KDTreeSampleLayer,
'KDTreeLayer':KDTreeLayer,
})
PrintToLog("{} model loaded".format(modelPath))
return model
def LatestModel(path):
if(Const.ParseModelUID(path) is None):
folders = [os.path.join("." , "data",folder) for folder in os.listdir(os.path.join("." , "data"))
if os.path.isdir(os.path.join("." , "data",folder))
and path == Const.RemoveUID(Const.ParseModelName(folder))
and len(Paths.GetFiles(os.path.join("." , "data",folder), findExtesions=".h5")) > 0]
path = max(folders, key=os.path.getctime)
else:
path = os.path.join("." , "data", Const.ParseModelName(path))
try:
latestModel = max(Paths.GetFiles(path, findExtesions=".h5"), key=os.path.getctime)
except:
print(f"No model found in: {path}")
latestModel = None
return latestModel
import re
def ModelValMIOU(path):
result = re.findall("val\((.+)\)", path)
return float(result[0])
def HighestValMIOUModel(path):
if(not os.path.isdir(path)):
path = os.path.join("." , "data", os.path.basename(path).split("_")[0])
latestModel = max(Paths.GetFiles(path, findExtesions=".h5"), key=ModelValMIOU)
return latestModel
def LoadModel(modelPath, consts):
model = ReadModel(modelPath)
modified = False
if(model.output.shape[2] != consts.classCount):
print("Model output {} classes changed to {}".format(model.output.shape[2], consts.classCount))
modified = True
model = ModifyModelOutput(model, consts.classCount)
model = CompileModel(model, consts.classCount)
# model.summary()
return model, modified
def ReadModelConfig(path):
Model = ReadModel(path)
modelConfig = Const.ParseModelConfig(path)
return Model, modelConfig
def CreateModelCopy(Model, modelConfig, in_pts, in_RGB):
inputFeatures = 1 if modelConfig.noFeature else modelConfig.featureComponents
newModel = CreateModel(modelConfig.classCount, inputFeatures, in_RGB, in_pts, noColor=modelConfig.noFeature, returnFeatures=True, applySoftmax=False)
if(Model != None):
for new_layer, layer in zip(newModel.layers, Model.layers):
new_layer.set_weights(layer.get_weights())
return newModel
def FuseModels(modelPaths, consts):
fusionModel = None
assert(len(modelPaths) == 2 or modelPaths is None)
print("Model fusion")
if(not modelPaths is None):
ModelA, modelAConfig = ReadModelConfig(modelPaths[0])
ModelB, modelBConfig = ReadModelConfig(modelPaths[1])
else:
consts.noFeature = False
modelAConfig = consts
consts.noFeature = True
modelBConfig = consts
in_RGB = None
if(not modelAConfig.noFeature or not modelBConfig.noFeature):
in_RGB = Input(shape=(Const.npoints, consts.featureComponents), dtype=tf.float32, name = "In_RGB") #features
in_pts = Input(shape=(Const.npoints, Const.pointComponents), dtype=tf.float32, name = "In_pts") #points
newModelA = CreateModelCopy(ModelA, modelAConfig, in_pts, in_RGB)
newModelB = CreateModelCopy(ModelB, modelBConfig, in_pts, in_RGB)
x = tf.concat((newModelA.output[0], newModelB.output[0]), axis = 2) #fuse features from both models
x1, _ = PtConv(x, in_pts, K = 16, next_pts = Const.npoints, in_features = 2*128, out_features = 96)
x2, _ = PtConv(x1, in_pts, K = 16, next_pts = Const.npoints, in_features = 96, out_features = 48)
x0d = tf.concat([x2, newModelA.output[1], newModelB.output[1]], axis = 2)
out_labels = tf.reshape(x0d, (-1, x0d.shape[2]))
out_labels = Dropout(rate=0.5)(out_labels)
out_labels = DenseInitialized(consts.classCount)(out_labels)
out_labels = tf.reshape(out_labels, (-1, x0d.shape[1], out_labels.shape[1]))
out_labels = tf.nn.softmax(out_labels)
fusionModel = Model([in_pts] if in_RGB is None else [in_RGB, in_pts], out_labels, name ="model")
nontrainableNames = [x.name for x in newModelA.layers] + [x.name for x in newModelB.layers]
# nontrainableNames = [x.name for x in newModelA.layers]
count = 0
for i, layer in enumerate(fusionModel.layers):
if(layer.name in nontrainableNames):
layer.trainable = False
count += 1
PrintToLog(f"{len(fusionModel.layers)-count}/{len(fusionModel.layers)} layers are trainable.")
fusionModel = CompileModel(fusionModel, consts.classCount)
# fusionModel.summary()
return fusionModel
class MIOU(tf.keras.metrics.Metric):
def __init__(self, classCount, name='miou', **kwargs):
super(MIOU, self).__init__(name=name, **kwargs)
self.cm = self.add_weight(name=name, shape = (classCount, classCount), initializer='zeros', dtype = tf.int64)
self.classCount = classCount
def update_state(self, y_true, y_pred, sample_weight=None):
TrueLbl = tf.argmax(tf.reshape(y_true, [-1, self.classCount]), axis= 1)
PredLbl = tf.argmax(tf.reshape(y_pred, [-1, self.classCount]), axis= 1)
confusion_matrix = tf.math.confusion_matrix(TrueLbl, PredLbl, self.classCount)
self.cm.assign_add(tf.cast(confusion_matrix, tf.int64))
def result(self):
union = tf.linalg.diag_part(self.cm)
rowSum = tf.math.reduce_sum(self.cm, axis = 0)
colSum = tf.math.reduce_sum(self.cm, axis = 1)
intersection = (colSum + rowSum - union)
intersection = tf.where(tf.equal(intersection, tf.constant(0, dtype=tf.int64)), tf.constant(1, dtype=tf.int64), intersection)
iou = union / intersection
miou = tf.expand_dims(tf.convert_to_tensor(tf.reduce_sum(iou) / tf.cast(iou.shape[0], dtype=np.float64)), 0)
return tf.concat((tf.expand_dims(miou,1), tf.cast(tf.expand_dims(iou,1), tf.float64)), 0)
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.cm.assign(tf.zeros((self.classCount, self.classCount), dtype=tf.int64))
def moving_miou_metric(classCount):
def moving_iou(y_true, y_pred):
TrueLbl = tf.argmax(tf.reshape(y_true, [-1, classCount]), axis= 1)
PredLbl = tf.argmax(tf.reshape(y_pred, [-1, classCount]), axis= 1)
cm = tf.math.confusion_matrix(TrueLbl, PredLbl, classCount)
union = tf.linalg.diag_part(cm)
rowSum = tf.math.reduce_sum(cm, axis = 0)
colSum = tf.math.reduce_sum(cm, axis = 1)
intersection = (colSum + rowSum - union)+1
iou = union / intersection
return tf.reduce_sum(iou) / tf.cast(tf.math.maximum(iou.shape[0], 1), dtype=np.float64)
return moving_iou
class IOU(tf.keras.metrics.Metric):
def __init__(self, classCount, classIndex, name='iou', **kwargs):
super(IOU, self).__init__(name=name, **kwargs)
self.cm = self.add_weight(name=name, shape = (classCount, classCount), initializer='zeros', dtype = tf.int64)
self.classCount = classCount
self.classIndex = classIndex
def update_state(self, y_true, y_pred, sample_weight=None):
TrueLbl = tf.argmax(tf.reshape(y_true, [-1, self.classCount]), axis= 1)
PredLbl = tf.argmax(tf.reshape(y_pred, [-1, self.classCount]), axis= 1)
confusion_matrix = tf.math.confusion_matrix(TrueLbl, PredLbl, self.classCount)
self.cm.assign_add(tf.cast(confusion_matrix, tf.int64))
def result(self):
union = tf.linalg.diag_part(self.cm)
rowSum = tf.math.reduce_sum(self.cm, axis = 0)
colSum = tf.math.reduce_sum(self.cm, axis = 1)
intersection = (colSum + rowSum - union)
intersection = tf.where(tf.equal(intersection, tf.constant(0, dtype=tf.int64)), tf.constant(1, dtype=tf.int64), intersection)
iou = union / intersection
return tf.cast(tf.expand_dims(iou, 1)[self.classIndex], tf.float64)
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.cm.assign(tf.zeros((self.classCount, self.classCount), dtype=tf.int64))
def weighted_categorical_crossentropy(weights):
# weights = [0.9,0.05,0.04,0.01]
def wcce(y_true, y_pred):
Kweights = tf.constant(weights)
y_true = tf.cast(y_true, y_pred.dtype)
return tf.keras.losses.categorical_crossentropy(y_true, y_pred) * tf.math.reduce_sum(y_true * Kweights, axis=-1)
return wcce
def CompileModel(model, classCount):
model.compile(
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3, epsilon = 1e-8),
loss = tf.keras.losses.CategoricalCrossentropy(),
# loss = weighted_categorical_crossentropy([0.7, 5]),
metrics= [IOU(classCount, 0, name="other"), IOU(classCount, 1, name="curb")] if classCount == 2 else [MIOU(classCount)]
)
return model
class IOUPerClass(tf.keras.callbacks.Callback):
def __init__(self, plot_path, classNames, firstEpoch = 0, metric = "miou"):
self.metric = metric
self.epoch = firstEpoch
self.classCount = len(classNames)
self.classNames = classNames
self.path = plot_path
print(f"IOU logs path: {self.path}")
self.writers = []
self.val_writers = []
ioupath = os.path.join(plot_path, "iou")
os.makedirs(ioupath, exist_ok=True)
for i in range(self.classCount):
path = os.path.join(ioupath, classNames[i])
os.makedirs(path, exist_ok=True)
self.writers.append(tf.summary.create_file_writer(path))
path = os.path.join(ioupath, "val_"+classNames[i])
os.makedirs(path, exist_ok=True)
self.val_writers.append(tf.summary.create_file_writer(path))
# print("Writer path: ", path)
self.InitializeMIOUWriter()
def InitializeMIOUWriter(self):
mioupath = os.path.join(self.path, "miou")
os.makedirs(mioupath, exist_ok=True)
path = os.path.join(mioupath, "miou")
os.makedirs(path, exist_ok=True)
self.miou_writer = tf.summary.create_file_writer(path)
path = os.path.join(mioupath, "val_miou")
os.makedirs(path, exist_ok=True)
self.val_miou_writer = tf.summary.create_file_writer(path)
def WriteLog(self, writer, metric, logs, epoch, tag = "miou"):
value = logs.get(metric)
if(value is None):
print(f"Failed getting {metric} log")
return False
with writer.as_default():
tf.summary.scalar(tag, value[0][0], step=epoch)
writer.flush()
def WriteLogs(self, writers, metric, logs, epoch, tag = "iou"):
metrix = logs.get(metric)
if(metrix is None):
print(f"Failed getting {metric} log")
return False
iou = [i[0] for i in metrix[len(metrix)-self.classCount:]]
for i in range(len(iou)):
with writers[i].as_default():
tf.summary.scalar(tag, iou[i], step=epoch)
writers[i].flush()
def on_epoch_end(self, batch, logs=None):
self.WriteLogs(self.writers, self.metric, logs, self.epoch)
self.WriteLogs(self.val_writers, "val_"+self.metric, logs, self.epoch)
self.WriteLog(self.miou_writer, self.metric, logs, self.epoch)
self.WriteLog(self.val_miou_writer, "val_"+self.metric, logs, self.epoch)
self.epoch += 1
logSaveDir = ""
def WriteToLog(msg):
if(os.path.isdir(logSaveDir)):
logFile = open(logSaveDir+f"/training.log", "a")
logFile.write(msg+"\n")
logFile.close()
def PrintToLog(msg):
print(msg)
WriteToLog(msg)
class ModelSaveCallback(tf.keras.callbacks.Callback):
def __init__(self, saveDir, trainingSteps, metric = "accuracy", modelNamePrefix = "", sendNotifications = False):
super().__init__()
self.saveDir = saveDir
self.metric = metric
self.modelNamePrefix = modelNamePrefix
self.epoch = 0
self.trainingSteps = trainingSteps
self.sendNotifications = sendNotifications
if(self.sendNotifications):
self.notifyDevice = Notify()
os.makedirs(self.saveDir, exist_ok=True)
WriteToLog(f"Training: {modelNamePrefix}")
def on_epoch_end(self, epoch, logs=None):
self.epoch = epoch + 1
if(len(logs) > 0):
miou = logs.get(self.metric)[0]*100
val_metric = "val_"+self.metric
val_miou = logs.get(val_metric)[0]*100
SaveModel(self.saveDir, epoch, self.model, miou, val_miou, self.modelNamePrefix)
message = "Ep: {0}. {1}: {2:.3}%. {3}: {4:.3}%".format(self.epoch, self.metric, miou, val_metric, val_miou)
WriteToLog(message)
f = open("demofile3.txt", "w")
f.write("Woops! I have deleted the content!")
f.close()
if(self.sendNotifications):
try:
self.notifyDevice.send(self.modelNamePrefix + " " + message)
except:
print("notifyDevice error")
# def on_batch_end(self, batch, logs=None):
# progress = batch/self.trainingSteps * 100
# if(progress % 10 == 0):
# try:
# message = "Ep. {0} {1}% done. {2}: {3:.3}%".format(self.epoch+1, int(progress), self.metric, logs.get(self.metric)*100)
# self.notifyDevice.send(message)
# except:
# print("notifyDevice error")
def ParseEpoch(modelPath):
filename = os.path.basename(modelPath)
return int(filename.split("_")[2])
def GetValidationData(testFiles, consts, batchesCount = 100, newDataGeneration = False):
print("Gathering validation data...")
print(f"Test files: {testFiles}")
if(newDataGeneration):
PrintToLog("Use TestSequence for validation.")
assert(len(testFiles) == 1)
seq = TestSequence(testFiles[0], consts, test = True)
else:
PrintToLog("Use TrainSequence for validation.")
seq = TrainSequence(testFiles, batchesCount, consts, dataAugmentation = False)
if not consts.noFeature:
ftsList = np.zeros((0, consts.npoints, consts.featureComponents), np.float32)
ptsList = np.zeros((0, consts.npoints, 3), np.float32)
lbsList = np.zeros((0, consts.npoints, consts.classCount), np.uint8)
if(newDataGeneration):
indexes = np.arange(min(batchesCount, len(seq)))
np.random.shuffle(indexes)
else:
indexes = range(batchesCount)
for i in indexes:
if consts.noFeature:
if(newDataGeneration):
ptslbl = seq.__getitem__(i)
else:
pts, lbl = seq.__getitem__(i)
ptslbl = [pts[0], lbl]
ptsList = np.concatenate((ptsList, ptslbl[0]), 0)
lbsList = np.concatenate((lbsList, ptslbl[1]), 0)
else:
if(newDataGeneration):
ftsptslbl = seq.__getitem__(i)
else:
ftspts, lbl = seq.__getitem__(i)
ftsptslbl = [ftspts[0], ftspts[1], lbl]
ftsList = np.concatenate((ftsList, ftsptslbl[0]), 0)
ptsList = np.concatenate((ptsList, ftsptslbl[1]), 0)
lbsList = np.concatenate((lbsList, ftsptslbl[2]), 0)
PrintToLog(f"Generated {len(lbsList)} validation samples.")
if consts.noFeature:
return (ptsList, lbsList)
else:
return ([ftsList, ptsList], lbsList)
def TrainModel(trainFiles, testFiles, consts : Const, modelPath = None, saveDir = Paths.dataPath, classes = None, first_epoch = 0, epochs = None, sendNotifications = False):
model = None
modelName = None
if(modelPath != None):
if(not isinstance(modelPath, list)):
modelName = Const.ParseModelName(modelPath)
if(consts.Name() != Const.RemoveUID(modelName)):
modelName = consts.Name(consts.UID())
logSaveDir = saveDir + f"/{modelName}/"
if(isinstance(modelPath, list)):
model = FuseModels(modelPath, consts)
else:
model, modified = LoadModel(modelPath, consts)
if(not modified):
first_epoch = ParseEpoch(modelPath) +1
else:
if(consts.Fusion):
model = FuseModels(None, consts)
else:
model = CreateModel(consts.classCount, 1 if consts.noFeature else consts.featureComponents, noColor=consts.noFeature)
if(modelName is None or modelName == ""):
modelName = consts.Name(consts.UID())
logSaveDir = saveDir + f"/{modelName}/"
PrintToLog("Train {} on {} files. Test on {} files".format(modelName, len(trainFiles), len(testFiles)))
PrintToLog("Validate on :" + str(testFiles))
trainingSteps = int((1000*16)/consts.batchSize) if not Const.IsWindowsMachine() else int(10)
PrintToLog("Batch size: {}, trainingSteps: {}".format(consts.batchSize, trainingSteps))
logsPath = os.path.join(consts.logsPath, Const.RemoveUID(modelName))
os.makedirs(logsPath, exist_ok=True)
callbacks_list = []
callbacks_list.append(ModelSaveCallback(logSaveDir, trainingSteps, "curb", modelNamePrefix = modelName, sendNotifications=sendNotifications))
# callbacks_list.append(IOUPerClass(logsPath, consts.classNames[1:], first_epoch+1))
# callbacks_list.append(tf.keras.callbacks.TensorBoard(log_dir=logsPath, update_freq="batch", histogram_freq=0, profile_batch = 0)) # tensorboard 2.0.2
seq = TrainSequence(trainFiles, trainingSteps, consts)
validationSteps = int(((150 if not Const.IsWindowsMachine() else 10) * 16)/consts.batchSize)
validationData = None if len(testFiles) == 0 else GetValidationData(testFiles, consts, validationSteps)
if(epochs is None):
epochs = 20 if consts.Fusion else 100
model.fit(seq, validation_data = validationData, epochs = epochs, batch_size = consts.batchSize, workers = consts.batchSize, max_queue_size = 300, callbacks=callbacks_list, initial_epoch = first_epoch)
def EvaluateModels(modelsList, testFiles, consts, x = None, y = None):
if(x is None or y is None):
validationSteps = int(((150 if not Const.IsWindowsMachine() else 10) * 16)/consts.batchSize)
x, y = GetValidationData(testFiles, consts, validationSteps, newDataGeneration = False)
for file in modelsList:
model, _ = LoadModel(file, consts)
metrics = model.evaluate(x, y, batch_size = consts.batchSize, workers = consts.batchSize, max_queue_size = 300)
# print(f"miou: {metrics[2][0][0]*100:.3}")
def SaveModel(saveDir, epoch, model, train_score, val_score=0, modelNamePrefix = ""):
if(modelNamePrefix != ""):
modelNamePrefix += "_"
fileName = saveDir+"/{0}{1}{2}{3}.h5".format(modelNamePrefix, epoch, f"_train({train_score:.3})", f"_val({val_score:.3})" if val_score != 0 else "")
if(not os.path.isdir(saveDir)):
os.mkdir(saveDir)
if(os.path.exists(fileName)):
os.remove(fileName)
model.save(fileName, include_optimizer=False)
def RotatePointCloud(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, sinval, 0],
[-sinval, cosval, 0],
[0, 0, 1],])
return np.dot(batch_data, rotation_matrix)
def JitterRGB(features):
features = features.astype(np.uint8)
assert(np.max(features) > 1)
img = Image.fromarray(np.expand_dims(features,0), mode="RGB")
low = 0.4
high = 1.6
#1 is baseline
img = ImageEnhance.Brightness(img).enhance(np.random.uniform(low, high))
img = ImageEnhance.Color(img).enhance(np.random.uniform(low, high))
img = ImageEnhance.Contrast(img).enhance(np.random.uniform(low, high))
img = ImageEnhance.Sharpness(img).enhance(np.random.uniform(low, high))
if(np.random.uniform(low, high) > 1):
img = ImageOps.equalize(img)
if(np.random.uniform(low, high) > 1):
img = ImageOps.autocontrast(img)
new_features = np.array(img).reshape((-1, 3))
return new_features
def JitterReflectance(features, sigma=40): #input [0; 255]
assert(features.shape[1] == 1)
randJitters = np.random.randint(-sigma, sigma, size = features.shape)
features += randJitters
features = np.clip(features, 0, 255)
return features
def JitterPoints(points, sigma=0.01):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
C = 3
assert(points.shape[1] == C)
randJitters = np.random.uniform(-sigma, sigma, size = points.shape)
return points + randJitters
def Mirror(points, axis, min = True):
if(min):
axisValue = np.amin(points[:,axis])
else:
axisValue = np.amax(points[:,axis])
distances = np.abs(points[:, axis] - axisValue)
newpoints = np.array(points, copy=True)
newpoints[:,axis] = newpoints[:,axis] + distances*(-2 if min else 2)
return newpoints
def MirrorPoints(points):
assert(len(points.shape) == 2 and points.shape[1] == 3)
mirrorDirection = random.choice(["xMin", "xMax", "yMin", "yMax", ""])
if(mirrorDirection == "xMin"):
points = Mirror(points, 0, min = True)
elif(mirrorDirection == "xMax"):
points = Mirror(points, 0, min = False)
elif(mirrorDirection == "yMin"):
points = Mirror(points, 1, min = True)
elif(mirrorDirection == "yMax"):
points = Mirror(points, 1, min = False)
return points
def ScalePoints(points, sigma = 0.02):
""" Scale up or down random by small percentage
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, scaled batch of point clouds
"""
assert(points.shape[1]==3)
scale = np.random.uniform(1-sigma, 1+sigma)
scale_matrix = np.array([[scale, 0, 0],
[0, scale, 0],
[0, 0, scale]])
scaled = np.dot(points, scale_matrix)
return scaled
class TrainSequence(Sequence):
def __init__(self, filelist, iteration_number, consts : Const, dataAugmentation = True):
self.filelist = filelist
self.ptsList = [np.load(file) for file in self.filelist]
self.ptsList = sorted(self.ptsList, key=len)
self.ptsListCount = np.cumsum([len(pts) for pts in self.ptsList])
self.cts = consts
self.dataAugmentation = dataAugmentation
self.iterations = iteration_number
def __len__(self):
return int(self.iterations)
def PickRandomPoint(self, lbl):
lblIdx = []
while True:
randClass = random.randint(0, self.cts.classCount-1)
lblIdx = np.where(lbl == randClass)[0]
if(len(lblIdx) >= 2):
break
return lblIdx[random.randint(0, len(lblIdx)-1)]
def __getitem__(self, _):
if not self.cts.noFeature:
ftsList = np.zeros((self.cts.batchSize, self.cts.npoints, self.cts.featureComponents), np.float32)
ptsList = np.zeros((self.cts.batchSize, self.cts.npoints, 3), np.float32)
lbsList = np.zeros((self.cts.batchSize, self.cts.npoints, self.cts.classCount), np.uint8)
for i in range(self.cts.batchSize):
# load the data
ptIdx = random.randint(0, self.ptsListCount[-1])
pts = self.ptsList[np.argmax(self.ptsListCount >= ptIdx)]
# if(self.cts.featureComponents == 1):
# keepPts = (pts[:, 4] != 0)
# else:
# keepPts = (pts[:, 6] != 0)
# pts = pts[keepPts]
# get the features
if(self.cts.featureComponents == 1):
if not self.cts.noFeature:
fts = np.expand_dims(pts[:,3], 1).astype(np.float32)
lbs = pts[:,4].astype(int)
else:
if not self.cts.noFeature:
fts = pts[:,3:6].astype(np.float32)
lbs = pts[:,6].astype(int)
if(np.min(lbs) == 1):
lbs -= 1 #class 0 is filtered out
# get the point coordinates
pts = pts[:, :3]
# pick a random point
pt_id = random.randint(0, pts.shape[0]-1)
pt = pts[pt_id]
# create the mask
mask_x = np.logical_and(pts[:,0]<pt[0]+self.cts.blocksize/2, pts[:,0]>pt[0]-self.cts.blocksize/2)
mask_y = np.logical_and(pts[:,1]<pt[1]+self.cts.blocksize/2, pts[:,1]>pt[1]-self.cts.blocksize/2)
mask = np.logical_and(mask_x, mask_y)
temppts = pts[mask]
templbs = lbs[mask]
if not self.cts.noFeature:
tempfts = fts[mask]
# random selection
choice = np.random.choice(temppts.shape[0], self.cts.npoints, replace=True)
temppts = temppts[choice]
if not self.cts.noFeature:
tempfts = tempfts[choice]
templbs = templbs[choice]
encodedLbs = np.zeros((len(templbs), self.cts.classCount))
encodedLbs[np.arange(len(templbs)),templbs] = 1
templbs = encodedLbs
# if self.dataAugmentation:
# dt = DataTool()
# dt.VisualizePointCloudAsync([temppts], [tempfts/255])
# data augmentation
if self.dataAugmentation:
if(self.cts.Mirror):
temppts = MirrorPoints(temppts)
if(self.cts.Rotate):
temppts = RotatePointCloud(temppts)
if(self.cts.Scale):
temppts = ScalePoints(temppts, sigma = 0.02)
if(self.cts.Jitter):
temppts = JitterPoints(temppts, sigma = 0.01)
if(not self.cts.noFeature and self.cts.FtrAugment):
if(self.cts.featureComponents == 3):
tempfts = JitterRGB(tempfts)
elif(self.cts.featureComponents == 1):
tempfts = JitterReflectance(tempfts)
if(not self.cts.noFeature):
tempfts = tempfts.astype(np.float32)
tempfts = tempfts/255 # - 0.5
# if self.dataAugmentation:
# # visualize data
# dt = DataTool()
# dt.VisualizePointCloud([temppts], [tempfts], windowName = "Augmented")
# linePoints = np.where(templbs[:, 1] == 1)[0]
# DataTool().VisualizePointCloud([np.delete(temppts, linePoints, axis=0), temppts[linePoints]], [[0,0,1], [1,0,0]], windowName="Sampled")
if not self.cts.noFeature:
ftsList[i] = np.expand_dims(tempfts, 0)
ptsList[i] = np.expand_dims(temppts, 0)
lbsList[i] = np.expand_dims(templbs, 0)
if self.cts.noFeature:
return [ptsList], lbsList
else: # works for RGB and fusion models
return [ftsList, ptsList], lbsList
class TestSequence(Sequence):
def __init__(self, filename, consts, splitDataSetToParts = -1, windowsMachineCap = True, test = False):
self.filename = filename
self.batchSize = consts.batchSize
self.npoints = consts.npoints
self.nocolor = consts.noFeature
self.bs = consts.blocksize
self.featureComponents = consts.featureComponents
self.fusion = consts.Fusion
self.test = test
if(self.test):
self.classCount = consts.classCount
self.lbl = []
if(self.filename.endswith(".ply")):
from plyfile import PlyData
plydata = PlyData.read(self.filename)
x = plydata["vertex"].data["x"].astype(np.float32)
y = plydata["vertex"].data["y"].astype(np.float32)
z = plydata["vertex"].data["z"].astype(np.float32)
fts = plydata["vertex"].data["reflectance"].astype(np.float32)
self.xyzrgb = np.concatenate((np.expand_dims(x,1), np.expand_dims(y,1), np.expand_dims(z,1), np.expand_dims(fts, 1)), axis=1)
elif(self.filename.endswith(".npy")):
xyzftsl = np.load(self.filename)
if(xyzftsl.shape[1] == 5):
self.xyzrgb = xyzftsl[:, :4]
if(self.test):
self.lbl = xyzftsl[:, 4] - 1
else: #if(xyzftsl.shape[1] == 7):
self.xyzrgb = xyzftsl[:, :6]
if(self.test):
self.lbl = xyzftsl[:, 6] - 1
elif(self.filename.endswith(".las")):
from dataTool import ReadXYZRGB
xyz, rgb = ReadXYZRGB(self.filename)
self.xyzrgb = np.concatenate((xyz, rgb), 1)
print("Test_step:", consts.test_step)
step = consts.test_step
discretized = ((self.xyzrgb[:,:2]).astype(float)/step).astype(int)
self.allpts = np.unique(discretized, axis=0)
self.allpts = self.allpts.astype(np.float)*step
if(consts.IsWindowsMachine() and windowsMachineCap):
self.allpts = self.allpts[:115] #small sample for testing
self.splitDataSetToParts = splitDataSetToParts
if(self.splitDataSetToParts != -1):
self.ptIndex = 0
else:
self.pts = self.allpts
self.idxList = np.zeros((len(self.pts), self.npoints), np.int64)
self.sparseCubes = 0
self.sparseCubesPtCount = 0
def LenParts(self):
if(self.splitDataSetToParts != -1):
return math.ceil(len(self.allpts)/self.splitDataSetToParts)
else:
return 1
def NextPart(self):
if(self.splitDataSetToParts <= 0):
return False
if(self.ptIndex >= len(self.allpts)):
return False
self.nextIndex = np.min([self.ptIndex+self.splitDataSetToParts, len(self.allpts)])
self.pts = self.allpts[self.ptIndex : self.nextIndex]
self.ptIndex = self.nextIndex
self.idxList = np.zeros((len(self.pts), self.npoints), np.int64)
return True
def __len__(self):
return math.ceil(len(self.pts)/self.batchSize)
def compute_mask(self, pt, bs):
# build the mask
mask_x = np.logical_and(self.xyzrgb[:,0]<pt[0]+bs/2, self.xyzrgb[:,0]>pt[0]-bs/2)
mask_y = np.logical_and(self.xyzrgb[:,1]<pt[1]+bs/2, self.xyzrgb[:,1]>pt[1]-bs/2)
mask = np.logical_and(mask_x, mask_y)
return mask
def __getitem__(self, index):
size = min(self.batchSize, len(self.pts) - (index * self.batchSize))
if not self.nocolor:
ftsList = np.zeros((size, self.npoints, self.featureComponents), np.float32)
ptsList = np.zeros((size, self.npoints, 3), np.float32)
if(self.test):
lblList = np.zeros((size, self.npoints, self.classCount), np.float32)
for i in range(size):
# get the data
mask = self.compute_mask(self.pts[index*self.batchSize + i], self.bs)
pts = self.xyzrgb[mask]
if(self.test):
lbl = self.lbl[mask]
if(len(pts) < self.npoints):
self.sparseCubes += 1
self.sparseCubesPtCount += len(pts)
# choose right number of points
choice = np.random.choice(pts.shape[0], self.npoints, replace=True)
pts = pts[choice]
if(self.test):
lbl = lbl[choice]
# labels will contain indices in the original point cloud
idx = np.where(mask)[0][choice]
self.idxList[index*self.batchSize + i] = np.expand_dims(idx, 0)
# separate between features and points
if not self.nocolor:
if(self.featureComponents == 1):
fts = np.expand_dims(pts[:,3], 1)
else:
fts = pts[:,3:6]
fts = fts/255 #- 0.5
pts = pts[:, :3].copy()
if not self.nocolor:
ftsList[i] = np.expand_dims(fts, 0)
ptsList[i] = np.expand_dims(pts, 0)
if self.test:
lblList[i, np.arange(len(lbl)), lbl.astype(int)] = 1
add_lbl = []
if self.test:
add_lbl = [lblList]
if self.nocolor:
return [ptsList] + add_lbl
else: #works for RGB
return [ftsList, ptsList] + add_lbl
def GenerateData(modelPath, testFiles, consts, outputFolder, NameIncludeModelInfo = False):
model, _ = LoadModel(modelPath, consts)
if(not NameIncludeModelInfo):
outputFolder = os.path.join(outputFolder, Paths.FileName(modelPath))
os.makedirs(outputFolder, exist_ok=True)
for file in testFiles:
t = time()
baseName = Paths.FileName(file)
if(NameIncludeModelInfo):
baseName = baseName + "_" + Paths.FileName(modelPath)
baseName += ".txt"
newFile = os.path.join(outputFolder, baseName)
if(os.path.exists(newFile)):
print("All ready exists: ",newFile)
continue
else:
open(newFile, "a").close()
print("Generating: ", newFile)
GenerateFile(model, file, consts, newFile)
print("Done in {:02d}:{:02d} min.".format(int((time() - t)/60), int((time() - t)%60)))
def GenerateLargeData(modelPath, voxelFiles, consts, outputFolder, orgFiles = None, replace = False, Upscale = True, NameIncludeModelInfo = False):
from time import time
model, _ = LoadModel(modelPath, consts)
if(not NameIncludeModelInfo):
outputFolder = outputFolder + Paths.FileName(modelPath)
if not Upscale:
outputFolder = outputFolder+"/vox_lbl/"
os.makedirs(outputFolder, exist_ok=True)
if isinstance(voxelFiles, str):
voxelFiles = Paths.GetFiles(voxelFiles)
if isinstance(orgFiles, str):
orgFiles = Paths.GetFiles(orgFiles)
for voxelFile in voxelFiles:
baseName = Paths.FileName(voxelFile).replace("_voxels", "")
if not (orgFiles is None):
orgFile = [f for f in orgFiles if Paths.FileName(f).startswith(baseName)]
if(len(orgFile) != 1):
print("Skip: ", voxelFile)
continue
orgFile = orgFile[0]
else:
orgFile = None
t = time()
if(NameIncludeModelInfo):
baseName = baseName + "_" + Paths.FileName(modelPath)
if Upscale:
newFile = os.path.join(outputFolder, baseName+".labels")
else:
newFile = os.path.join(outputFolder, baseName+".npy")
if(not replace and os.path.exists(newFile)):
print(newFile," already exists.")
continue
flagFile = newFile+".tmp"
if(os.path.exists(flagFile)):
print("Other worker is generating: ", newFile)
continue
else:
open(flagFile, "a").close()
print("Generating: ", newFile)
GenerateLargeFile(model, voxelFile, orgFile, consts, newFile, Upscale = Upscale)
os.remove(flagFile)
print("{} generated in {:02d}:{:02d} min.".format(baseName, int((time() - t)/60), int((time() - t)%60)))
def GenerateFile(model, file, consts, outputFile, saveScores = True):
seq = TestSequence(file, consts)
output = model.predict(seq, workers = consts.batchSize, max_queue_size = 300, verbose = 1)
# for y in range(len(seq)):
# pts = seq.__getitem__(y)
# pts = pts[0]
# pred = model.predict(pts)
# for i in range(len(pred)):
# predPtsIdx = np.where(np.argmax(pred[i], axis = 1) == 1)[0]
# # truePtsIdx = np.where(np.argmax(lbl[i], axis = 1) == 1)[0]
# # print(f"True curb points: {len(truePtsIdx)}. Found curb points: {len(predPtsIdx)}")
# DataTool().VisualizePointCloud([np.delete(pts[i], predPtsIdx, axis=0), pts[i][predPtsIdx]], [[0,0,1], [1,0,0]])
idx = seq.idxList
xyzrgb = seq.xyzrgb[:,:3]
scores = np.zeros((xyzrgb.shape[0], consts.classCount))
for i in range(len(output)):
scores[idx[i]] += output[i]
mask = np.logical_not(scores.sum(1)==0)
scores = scores[mask]
pts_src = xyzrgb[mask]
# create the scores for all points
indexes = nearest_correspondance(pts_src.astype(np.float32), xyzrgb.astype(np.float32), K=1)
scores = scores[indexes]
if saveScores:
scoresFile = outputFile.replace(".txt", "_scores.npy")
np.save(scoresFile, scores)
print(f"Scores saved to: {scoresFile}")
scores = scores.argmax(1) + 1 #because all class are shifted to avoid 0 - unclassified
print(f"class 0: {len(np.where(scores == 0)[0])}, class 1: {len(np.where(scores == 1)[0])}")
import pandas as pd
print("Save labels: ", scores.shape)
pd.DataFrame(scores, dtype=np.uint8).to_csv(outputFile, sep='\t', header=None, index=None)
def SaveLabelsPnts(labels, outputFile):
import pandas as pd
print("Saving pts lbs...")
if(len(labels.shape) == 1):
pd.DataFrame(labels, dtype=np.uint8).to_csv(outputFile, sep='\t', header=None, index=None)
else:
np.save(outputFile, labels)
print("Pts lbs {} saved!".format(labels.shape))
def UpscaleToOriginal(originalPoints, pts_src, lbl, outputFile = None):
from tqdm import tqdm
# create the scores for all points
step = 10000000 #1000000
fullLbl = np.zeros((0,), np.int8)
print("KDTree magic. Source pts: {}. Queary pts: {}".format(len(pts_src), len(originalPoints)))
for i in tqdm(range(0, math.ceil(len(originalPoints)/step))):
a = i*step
b = a + np.min([len(originalPoints)-a, step])
indexes = nearest_correspondance(pts_src, originalPoints[a:b], K=1)
fullLbl = np.concatenate([fullLbl, lbl[indexes]], 0)
if(not (outputFile is None)):
SaveLabelsPnts(fullLbl, outputFile)
else:
return fullLbl
def GenerateLargeFile(model, voxelFile, originalFile, consts, outputFile, Upscale = True, saveScores = True):
from dataTool import ReadXYZ
from tqdm import tqdm
seq = TestSequence(voxelFile, consts, splitDataSetToParts=16000)
print("All pts: ", len(seq.allpts))
xyzrgb = seq.xyzrgb[:,:3]
scores = np.zeros((xyzrgb.shape[0], consts.classCount))
for _ in tqdm(range(seq.LenParts())):
seq.NextPart()
output = model.predict(seq, workers = consts.batchSize, max_queue_size = 300, verbose = 1)
idx = seq.idxList
for i in range(len(output)):
scores[idx[i]] += output[i]
mask = np.logical_not(scores.sum(1)==0)
scores = scores[mask]
pts_src = xyzrgb[mask].astype(np.float32)
if saveScores:
scoresFile = os.path.splitext(outputFile)[0]+"_scores.npy"
np.save(scoresFile, scores)
print(f"Scores saved to: {scoresFile}")
lbl = scores.argmax(1)
if(Upscale and not (originalFile is None)):
print("Load original file: ", originalFile)
originalPoints = ReadXYZ(originalFile).astype(np.float32)
assert(originalPoints.shape[1] == 3)
UpscaleToOriginal(originalPoints, pts_src, lbl, outputFile)
else:
SaveLabelsPnts(np.concatenate([pts_src, np.expand_dims(lbl, 1)], axis=1), outputFile)
def UpscaleFilesAsync(modelPath, voxelFolder, orgFolder, savePath):
import time
# notifyDevice = Notify()
savePath = savePath + Paths.FileName(modelPath)
print(f"Searching in folder: {savePath+"/vox_lbl/"}")
while True:
found = False
fileNames = Semantic3D.fileNames
for file in Paths.GetFiles(savePath, onlyNames=True, withoutExtension=True, findExtesions=('.labels')):
if(file in fileNames or fileNames.values()):
fileNames = {key:val for key, val in fileNames.items() if val != file and key != file}
if(len(fileNames) == 0):
print("Done upscaling files")
# notifyDevice.send("Done upscaling files")
return
for file in Paths.GetFiles(savePath+"/vox_lbl/", onlyNames=True, withoutExtension=True, findExtesions=('.npy')):
ptslbs = os.path.join(savePath+"/vox_lbl/", file+".npy")
# originalFile = os.path.join(orgFolder, file+".npy")
originalFile = os.path.join(orgFolder, file+".hdf5")
outputFile = os.path.join(savePath, file+".labels")
if(not os.path.exists(outputFile)):
found = True
open(outputFile, "a").close()
UpscaleFile(ptslbs, originalFile, outputFile)
if not found:
time.sleep(10) #sleep for 10 second and scan for job again
def UpscaleFile(ptslbsFile, originalFile, outputFile):
from dataTool import ReadLabels, ReadXYZ
print("Upscaling: {}".format(ptslbsFile))
scores = ReadLabels(ptslbsFile, readFormat = ".npy")
scores = np.squeeze(scores, 1)
pts_src = ReadXYZ(ptslbsFile, readFormat = ".npy")
originalPoints = ReadXYZ(originalFile)
UpscaleToOriginal(originalPoints, pts_src, scores, outputFile)
def nearest_correspondance(pts_src, pts_dest, K=1):
# print("KDTree magic. Source pts: {}. Queary pts: {}".format(len(pts_src), len(pts_dest)))
# t = time()
kdt = KDTree(pts_src, leaf_size=20)
_, indexes = kdt.query(pts_dest, k = K)
# print("Done in {}:{} min.".format(int((time() - t)/60), int((time() - t)%60)))
return np.squeeze(indexes, 1)
def TestTestSequence(path, consts):
seq = TestSequence(path, consts)
allPts = np.zeros((len(seq.xyzrgb), 3))
for i in range(len(seq)):
inpt = seq[i]
ftsList = inpt[0]
ptsList = inpt[1]
for j in range(len(ptsList)):
allPts[seq.idxList[i*consts.batchSize + j]] = ptsList[j]
emptyPts = np.logical_not(allPts.sum(1) != 0)
print("sparseCubes: ",seq.sparseCubes)
print("mean sparseCubes pt count: ", seq.sparseCubesPtCount/seq.sparseCubes)
print("Not picked points: {} => {:.2f}%".format(len(emptyPts), len(emptyPts)/len(allPts)))
nonEmptyPts = np.logical_not(emptyPts)
a = seq.xyzrgb[emptyPts]
b = seq.xyzrgb[nonEmptyPts]
dt = DataTool()
dt.VisualizePointCloud([a, b], [[1,0,0], None])
if(os.path.exists("C:/Program Files")):
import open3d as o3d
import time
from dataTool import LoadRenderOptions, SaveRenderOptions, GetPointsIndexInBoundingBox, GetPointsInBoundingBox
class BoxesIterator:
def __init__(self, boxes, points, colors, labels):
# self.pc = o3d.geometry.PointCloud()
# self.pc.points = o3d.utility.Vector3dVector(points)
self.src_points = points
self.src_colors = colors if np.max(colors) <= 1 else colors/255
self.src_labels = labels
self.dst_points = np.zeros((0, 3), dtype = np.float)
self.dst_colors = np.zeros((0, 3), dtype = np.float)
self.boxes = boxes
self.i = 0
# self.kdt = KDTree(points, leaf_size=20)
self.trajectory = None
# if(os.path.exists("./data/camera_trajectory.json")):
# self.trajectory = o3d.io.read_pinhole_camera_trajectory("./data/camera_trajectory.json").parameters
# self.trajectory_i = 0
# self.trajectory_time = time.time()
grey = np.array([128, 128, 128])/255
red = np.array([136, 0, 1])/255
mint = np.array([170, 255, 195])/255
teal = np.array([0, 128, 128])/255
green = np.array([60, 180, 75])/255
verygreen = np.array([0, 255, 0])/255
brown = np.array([170, 110, 40])/255
# white = np.array([255, 255, 255])/255
black = np.array([0, 0, 0])/255
blue = np.array([0, 0, 255])/255
pink = np.array([255, 56, 152])/255
#NPM3D
self.colors = []
if(np.max(self.src_labels) == 9):
self.colors = [grey, red, blue, teal, mint, brown, pink, black, green]
#Semantic3D
elif(np.max(self.src_labels) == 8):
self.colors = [grey, verygreen, green, mint, red, blue, brown, black]
self.pc = o3d.geometry.PointCloud()
self.pc.points = o3d.utility.Vector3dVector(self.src_points)
self.box = o3d.geometry.LineSet()
lines = np.array([[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7],[0, 4], [1, 5], [2, 6], [3, 7]])
self.box.lines = o3d.utility.Vector2iVector(lines)
self.box.colors = o3d.utility.Vector3dVector(np.array([[1,0,0] for _ in range(len(lines))]))
self.initSet = False
def ColorPtsByClass(self, pts, lbl):
pts_colors = np.zeros((len(pts), 3), np.float)
for i in range(0, len(self.colors)):
indexes = np.where(lbl == i+1)[0]
pts_colors[indexes] = self.colors[i]
return pts_colors
def BoxPts(self, bBox):
box = [[bBox[0], bBox[2], bBox[4]],
[bBox[1], bBox[2], bBox[4]],
[bBox[0], bBox[3], bBox[4]],
[bBox[1], bBox[3], bBox[4]],
[bBox[0], bBox[2], bBox[5]],
[bBox[1], bBox[2], bBox[5]],
[bBox[0], bBox[3], bBox[5]],
[bBox[1], bBox[3], bBox[5]]]
return np.array(box)
def AnimationFunction(self, vis):
# time.sleep(0.2)
if(self.i < len(self.boxes)):
pts = self.src_points[:, :2]
mask_x = np.logical_and(self.boxes[self.i][0]<pts[:,0], pts[:,0]<self.boxes[self.i][1])
mask_y = np.logical_and(self.boxes[self.i][2]<pts[:,1], pts[:,1]<self.boxes[self.i][3])
ptsIdx = np.where(np.logical_and(mask_x, mask_y))[0]
randIdx = np.random.choice(ptsIdx, min(8192, len(ptsIdx)), replace=False)
self.dst_points = np.concatenate((self.dst_points, self.src_points[randIdx]), axis = 0)
self.dst_colors = np.concatenate((self.dst_colors, self.ColorPtsByClass(self.src_points[randIdx], self.src_labels[randIdx])), axis = 0)
self.src_points = np.delete(self.src_points, randIdx, axis = 0)
self.src_labels = np.delete(self.src_labels, randIdx, axis = 0)
self.src_colors = np.delete(self.src_colors, randIdx, axis = 0)
self.pc.points = o3d.utility.Vector3dVector(np.concatenate((self.src_points, self.dst_points), 0))
self.pc.colors = o3d.utility.Vector3dVector(np.concatenate((self.src_colors, self.dst_colors), 0))
self.box.points = o3d.utility.Vector3dVector(self.BoxPts(self.boxes[self.i]))
vis.clear_geometries()
vis.add_geometry(self.pc, False)
vis.add_geometry(self.box, False)
self.i += 1
# print(f"{self.i}/{len(self.boxes)}", end="\r")
else:
print("Iteration over.")
if(not os.path.exists("./data/camera_trajectory.json")):
self.trajectory = None
if(self.trajectory is None):
# vis = LoadRenderOptions(vis, returnVis=True)
if(os.path.exists("./data/camera_trajectory.json")):
self.trajectory = o3d.io.read_pinhole_camera_trajectory("./data/camera_trajectory.json").parameters
self.trajectory_i = 0
self.trajectory_time = time.time()
else:
ctr = vis.get_view_control()
ctr.convert_from_pinhole_camera_parameters(self.trajectory[self.trajectory_i])
if(self.trajectory_i < len(self.trajectory)-1): #and time.time() - self.trajectory_time > 1
print(f"Trajectory: {self.trajectory_i}/{len(self.trajectory)}", end="\r")
self.trajectory_i += 1
self.trajectory_time = time.time()
return False
def ShowSequenceBoxes(ptsFile, lblFile, consts):
from dataTool import DataTool
consts.test_step = 4
seq = TestSequence(ptsFile, consts, windowsMachineCap=False)
minZ = np.min(seq.xyzrgb[:,2])
maxZ = np.max(seq.xyzrgb[:,2])
boxes = []
for pt in seq.pts:
minX = pt[0] - consts.blocksize/2
maxX = pt[0] + consts.blocksize/2
minY = pt[1] - consts.blocksize/2
maxY = pt[1] + consts.blocksize/2
boxes.append([minX, maxX, minY, maxY, minZ, maxZ])
dt = DataTool()
# dt.VisualizePointCloud([seq.xyzrgb[:,:3]], [seq.xyzrgb[:,3:6]], bBoxes = boxes)
boxesitr = BoxesIterator(boxes, seq.xyzrgb[:,:3], seq.xyzrgb[:,3:], np.squeeze(ReadLabels(lblFile),1))
dt.VisualizePointCloud([seq.xyzrgb[:,:3]], animationFunction=boxesitr.AnimationFunction)
# dt.VisualizePointCloud([seq.xyzrgb[:,:3]])
def RunExperiments():
from dataTool import VisualizePointCloudClassesAsync, VisualizePointCloudClasses, ReadLabels, DataTool, ReadXYZ
# testCloud = "G:/PointCloud DataSets/NPM3D/test_10_classes/ajaccio_2.ply"
# testCloud = consts.Paths.processedTrain+"/Lille1_1_0.npy"
# VisualizePointCloudClassesAsync(testCloud, downSample=False, windowName="Keras")
# VisualizePointCloudClassesAsync(testCloud, "G:/PointCloud DataSets/NPM3D/generatedResults/ajaccio_2.txt", downSample=False, windowName="Keras")
# VisualizePointCloudClassesAsync(testCloud, "G:/PointCloud DataSets/NPM3D/torch_generated_data/results88.2%/ajaccio_2.txt", downSample=False, windowName="Torch")
# TestTestSequence(consts.Paths.processedTrain+"/Lille1_1_0.npy", consts)
# ShowSequenceBoxes(consts.Paths.processedTrain+"/Lille1_1_0.npy", consts)
# # pts = ReadXYZ(consts.Paths.processedTrain+"/Lille2_0.npy")
# true = ReadLabels(consts.Paths.processedTrain+"/Lille2_0.npy")
# # pts = ReadXYZ(consts.Paths.rawTrain+"/untermaederbrunnen_station3_xyz_intensity_rgb.hdf5")
# # true = ReadLabels(consts.Paths.rawTrain+"/untermaederbrunnen_station3_xyz_intensity_rgb.hdf5")
# # pred_file = "G:/PointCloud DataSets/NPM3D/torch_generated_data/results88.2%/Lille2_0.txt"
# pred_file = consts.Paths.generatedTest+"/"+Paths.FileName(modelPath)+"/Lille2_0.txt"
# # pred_file = consts.Paths.generatedTest+"/"+Paths.FileName(modelPath)+"/untermaederbrunnen_station3_xyz_intensity_rgb.labels"
# pred = ReadLabels(pred_file)
# VisualizePointCloudClasses(consts.Paths.processedTrain+"/Lille2_0.npy",
# pred_file,
# downSample=False, windowName="Red error",
# errorPoints = ((true != pred) == (true != 0)),
# delPoints = (true == 0))
# error = np.where(true == 0)[0]
# true = np.delete(true, error, 0)
# pred = np.delete(pred, error, 0)
# from sklearn.metrics import confusion_matrix
# import metrics
# cm = confusion_matrix(true, pred, labels=list(range(consts.classCount)))
# iou = metrics.stats_iou_per_class(cm)
# print("Mean iou:", iou[0])
# print("iou per class:", iou[1])
from dataTool import ReadXYZ, ReadLabels
from sklearn.metrics import confusion_matrix
from metrics import stats_accuracy_per_class, stats_iou_per_class
src_pts = ReadXYZ(r"G:\PointCloud DataSets\semantic3d\rawTrain\bildstein_station3_xyz_intensity_rgb.hdf5")
src_lbl = ReadLabels(r"G:\PointCloud DataSets\semantic3d\rawTrain\bildstein_station3_xyz_intensity_rgb.hdf5")
src_lbl = np.squeeze(src_lbl, 1)
delIndices = np.where(src_lbl == 0)
src_pts = np.delete(src_pts, delIndices, axis=0)
src_lbl = np.delete(src_lbl, delIndices, axis=0)
voxel_pts = ReadXYZ(r"G:\PointCloud DataSets\semantic3d\processedTrain(0.15m)\bildstein_station3_xyz_intensity_rgb_voxels.npy")
voxel_lbl = ReadLabels(r"G:\PointCloud DataSets\semantic3d\processedTrain(0.15m)\bildstein_station3_xyz_intensity_rgb_voxels.npy")
voxel_lbl = np.squeeze(voxel_lbl, 1)
upscaled_lbl = UpscaleToOriginal(src_pts, voxel_pts, voxel_lbl)
cm = confusion_matrix(src_lbl, upscaled_lbl)
avg_acc, avg_class = stats_accuracy_per_class(cm)
avg_iou, avg_iou_class = stats_iou_per_class(cm)
def RenameSemantic3DFiles(folder):
if(len(Paths.GetFiles(folder, findExtesions = ".labels")) == 0):
print("No files found.")
return
for file in Paths.GetFiles(folder, findExtesions = ".labels"):
if(Paths.FileName(file).endswith("(1)")):
os.remove(file)
else:
name = Paths.FileName(file)
newFileName = file.replace(name, Semantic3D.fileNames[name])
os.rename(file, newFileName)
if(os.path.getsize(newFileName) == 0):
print(f"{newFileName} if 0 bytes size")
if(len(Paths.GetFiles(folder, findExtesions = ".labels")) != 15):
print("Wrong number of files.")
else:
print("Done renaming: ", folder)
if __name__ == "__main__":
from NearestNeighbors import NearestNeighborsLayer, SampleNearestNeighborsLayer
from KDTree import KDTreeLayer, KDTreeSampleLayer
modelPath = None
# consts = NPM3D()
# consts = Semantic3D()
consts = Curbs()
consts.noFeature = True
# consts.Fusion = True
# consts.Scale = True
consts.Rotate = True
# consts.Mirror = True
# consts.Jitter = True
# consts.FtrAugment = True
testFiles = consts.TestFiles()
trainFiles = consts.TrainFiles()
modelPath = "Sem3D(vox)(fusion)(FullAugment)_3_train(86.2)_val(79.5).h5"
# modelPath = "Curbs(7&1)(noFeature)(Rotate)_21bdbe6aa82d4e259526ab46577e795a_25_train(75.1)_val(60.7).h5"
# modelPath = ["Sem3D(vox)(RGB)(FullAugment)_55_train(85.7)_val(79.9)", "Sem3D(NOCOL)_50_train(87.4)_val(69.1)"]
# modelPath = ["NPM3D(80&5)(RGB)(NoScale)_28_train(88.3)_val(73.2).h5", "NPM3D(80&5)(NOCOL)(FullAugment)_28_train(87.3)_val(71.5).h5"]
# modelPath = LatestModel("Sem3D(14&1)(noFeature)(Scale)(Rotate)(Mirror)(Jitter)")
# modelPath = LatestModel(consts.Name())
if(isinstance(modelPath,list)):
consts.Fusion = True
if(not consts.Fusion and not Const.IsWindowsMachine()):
tf.config.optimizer.set_jit(True) #Gives more than 10% boost!!!
print("XLA enabled.")
# modelPath = ["Sem3D(14&1)(noFeature)(Scale)(Rotate)(Mirror)(Jitter)_9bbee708a7814063af9d85070452abd8_59_train(85.2)_val(72.8)",
# "Sem3D(14&1)(noFeature)(Rotate)(Mirror)(Jitter)_ff2eb229084247d9a1c63caa519e9890_58_train(84.9)_val(75.5)",
# "Sem3D(14&1)(noFeature)_dffc17f77e924894bbdbdad818ab6994_40_train(85.1)_val(68.8)"]
# EvaluateModels([modelPath], testFiles, consts)
TrainModel(trainFiles, testFiles, consts, modelPath = modelPath)# , epochs = 8) #continue train
# TrainModel(trainFiles, testFiles, consts) #new model
# modelPath = HighestValMIOUModel("NPM3D(80&5)(fusion)(FullAugment)")
#NPM3D
# GenerateData(modelPath, Paths.GetFiles(consts.Paths.rawTest), consts, consts.Paths.generatedTest)
#Semantic3D
# GenerateLargeData(modelPath, Paths.Semantic3D.processedTest, Paths.Semantic3D.rawTest, consts, consts.Paths.generatedTest, Upscale=False)
# UpscaleFilesAsync(modelPath, Paths.Semantic3D.processedTest, Paths.Semantic3D.rawTest, Paths.Semantic3D.generatedTest)
# RenameSemantic3DFiles(Paths.Semantic3D.generatedTest + Paths.FileName(modelPath))
#Curbs
EvaluateModels([modelPath], testFiles, consts)
# GenerateData(modelPath, testFiles, consts, consts.Paths.pointCloudPath+"/generated/")
GenerateLargeData(modelPath, testFiles, consts, consts.Paths.pointCloudPath+"/generated/") | from dataTool import ReadLabels, ReadXYZ, VisualizePointCloudClassesAsync, modelPath, DataTool
from imports import *
import math
import numpy as np
from time import time
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.utils import Sequence
from tensorflow.keras.layers import Input, BatchNormalization, Dense, Dropout, InputLayer
from sklearn.neighbors import KDTree
from sklearn.metrics import confusion_matrix
from PIL import Image, ImageEnhance, ImageOps
import random
# from notify_run import Notify
class Const:
@staticmethod
def IsWindowsMachine():
if os.path.isdir("C:/Program Files"):
return True
else:
return False
if os.path.isdir("C:/Program Files"):
batchSize = 8
else:
batchSize = 16 #25
#Placeholders
classCount = Label.Semantic3D.Count-1
classNames = Label.Semantic3D.Names
testFiles = []
excludeFiles = []
Paths = Paths.Semantic3D
epochs = 100
pointComponents = 3
featureComponents = 3 #rgb
classCount = 0
npoints = 8192
blocksize = 8
test_step = 0.5
name = ""
#Algorithm configuration
noFeature = False
Fusion = False
Scale = False
Rotate = False
Mirror = False
Jitter = False
FtrAugment = False
logsPath = "./logs"
### MODEL CONFIG
pl = 64
### MODEL CONFIG
def BuildSpecDict(self):
return {"noFeature" : self.noFeature,
"Fusion" : self.Fusion,
"Scale" : self.Scale,
"Rotate" : self.Rotate,
"Mirror" : self.Mirror,
"Jitter" : self.Jitter,
"FtrAugment" : False if self.noFeature else self.FtrAugment,
}
def Name(self, UID = ""):
modelName = self.name
modelName += f"({len(self.TrainFiles())}&{len(self.TestFiles())})"
for spec, value in self.BuildSpecDict().items():
if(value == True):
modelName += f"({spec})"
if(UID != ""):
modelName += f"_{UID}"
return modelName
@staticmethod
def RemoveUID(name : str):
return name.replace(f"_{Const.ParseModelUID(name)}", "")
@staticmethod
def UID():
import uuid
return uuid.uuid4().hex
@staticmethod
def ParseModelConfig(file):
config = Paths.FileName(file).split("_")[0].replace("("," ").replace(")","").replace("vox ","").split(" ")
const = None
if(config[0] == NPM3D.name):
const = NPM3D()
if(config[0] == Semantic3D.name):
const = Semantic3D()
for conf in config[1:]:
if conf == "noFeature" or conf == "NOCOL":
const.noFeature = True
elif conf == "Fusion":
const.Fusion = True
elif conf == "Scale":
const.Scale = True
elif conf == "Rotate":
const.Rotate = True
elif conf == "Mirror":
const.Mirror = True
elif conf == "Jitter":
const.Jitter = True
elif conf == "FtrAugment":
const.FtrAugment = True
return const
@staticmethod
def ParseModelUID(file):
parts = Paths.FileName(file).split("_")
if(len(parts) >= 2):
return parts[1]
else:
return None
@staticmethod
def ParseModelName(file, withUID = True):
parts = Paths.FileName(file, withoutExt = False).split("_")
name = parts[0]
if(withUID and len(parts) > 1):
name += "_"+parts[1]
return name
def TestFiles(self):
return Paths.JoinPaths(self.Paths.processedTrain, self.testFiles)
def TrainFiles(self):
return Paths.GetFiles(self.Paths.processedTrain, excludeFiles = self.TestFiles()+self.excludeFiles)
class Semantic3D(Const):
pointComponents = 3
featureComponents = 3 #rgb
classCount = Label.Semantic3D.Count-1
classNames = Label.Semantic3D.Names
test_step = 0.8
name = "Sem3D"
Paths = Paths.Semantic3D
testFiles = [
"untermaederbrunnen_station3_xyz_intensity_rgb_voxels.npy",
"domfountain_station1_xyz_intensity_rgb_voxels.npy",
]
excludeFiles = []
fileNames = {"birdfountain_station1_xyz_intensity_rgb" : "birdfountain1",
"castleblatten_station1_intensity_rgb" : "castleblatten1",
"castleblatten_station5_xyz_intensity_rgb" : "castleblatten5",
"marketplacefeldkirch_station1_intensity_rgb" : "marketsquarefeldkirch1",
"marketplacefeldkirch_station4_intensity_rgb" : "marketsquarefeldkirch4",
"marketplacefeldkirch_station7_intensity_rgb" : "marketsquarefeldkirch7",
"sg27_station3_intensity_rgb" : "sg27_3",
"sg27_station6_intensity_rgb" : "sg27_6",
"sg27_station8_intensity_rgb" : "sg27_8",
"sg27_station10_intensity_rgb" : "sg27_10",
"sg28_station2_intensity_rgb" : "sg28_2",
"sg28_station5_xyz_intensity_rgb" : "sg28_5",
"stgallencathedral_station1_intensity_rgb" : "stgallencathedral1",
"stgallencathedral_station3_intensity_rgb" : "stgallencathedral3",
"stgallencathedral_station6_intensity_rgb" : "stgallencathedral6",
"MarketplaceFeldkirch_Station4_rgb_intensity-reduced" : "marketsquarefeldkirch4-reduced",
"sg27_station10_rgb_intensity-reduced" : "sg27_10-reduced",
"sg28_Station2_rgb_intensity-reduced" : "sg28_2-reduced",
"StGallenCathedral_station6_rgb_intensity-reduced" : "stgallencathedral6-reduced",
}
class Curbs(Const):
pointComponents = 3
featureComponents = 3
classCount = 2
classNames = Label.Curbs.Names
test_step = 0.5
name = "Curbs"
Paths = Paths.Curbs
if os.path.isdir("C:/Program Files"):
batchSize = 8
else:
batchSize = 25
testFiles = [
"park_extracted.npy",
"Jelskio_str_trimmed.npy",
]
excludeFiles = [
"powerlines_dataset"
]
def FilterCurbAndLineFiles(self, files):
return [file for file in files if not file.endswith("_curbs.npy") and not file.endswith("_lines.npy")]
def TestFiles(self):
return self.FilterCurbAndLineFiles(super(Curbs, self).TestFiles())
def TrainFiles(self):
return self.FilterCurbAndLineFiles(super(Curbs, self).TrainFiles())
class NPM3D(Const):
pointComponents = 3
featureComponents = 1
classCount = Label.NPM3D.Count-1
classNames = Label.NPM3D.Names
test_step = 0.5
name = "NPM3D"
Paths = Paths.NPM3D
testFiles = [
# "Lille1_1_0.npy",
# "Lille1_1_1.npy",
# "Lille1_1_2.npy",
# "Lille1_1_3.npy",
# "Lille1_1_4.npy",
# "Lille1_1_5.npy",
# "Lille1_1_6.npy",
# "Lille1_1_7.npy",
# "Lille1_1_8.npy",
# "Lille1_2_0.npy",
# "Lille1_2_1.npy",
"Lille2_0.npy",
"Lille2_1.npy",
"Lille2_2.npy",
"Lille2_8.npy",
"Lille2_9.npy",
# "Paris_0.npy",
# "Paris_1.npy",
]
excludeFiles = [
# "Lille1_1_7.npy",
# "Lille1_2_2.npy",
"Lille2_10.npy",
# "Paris_2.npy",
]
class WeightsMul(tf.keras.layers.Layer):
def __init__(self, shape, lowBound, highBound, **kwargs):
super(WeightsMul, self).__init__(**kwargs)
self.shape = shape
self.lowBound = lowBound
self.highBound = highBound
def build(self, input_shape):
init = tf.random_uniform_initializer(self.lowBound, self.highBound)
self.vars = self.add_weight(shape=(self.shape),
initializer = init,
trainable = True, dtype=tf.float32)
def call(self, inputs):
return tf.matmul(inputs, self.vars)
def get_config(self):
config = super(WeightsMul, self).get_config()
config.update({'shape': self.shape, 'lowBound': self.lowBound, 'highBound': self.highBound})
return config
class GatherNDLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(GatherNDLayer, self).__init__(**kwargs)
def call(self, array, indices):
return tf.gather_nd(array, indices, batch_dims=1)
def get_config(self):
config = super(GatherNDLayer, self).get_config()
return config
class SubstractCenters(tf.keras.layers.Layer):
def __init__(self, dim, n_centers, **kwargs):
super(SubstractCenters, self).__init__(**kwargs)
self.dim = dim
self.n_centers = n_centers
def build(self, input_shape):
center_data = np.zeros((self.dim, self.n_centers))
for i in range(self.n_centers):
coord = np.random.rand(self.dim)*2 - 1
while (coord**2).sum() > 1:
coord = np.random.rand(self.dim)*2 - 1
center_data[:,i] = coord
self.centers = self.add_weight(shape = (center_data.shape),
initializer = tf.constant_initializer(center_data),
trainable = True, dtype=tf.float32)
def call(self, points):
return points - self.centers
def get_config(self):
config = super(SubstractCenters, self).get_config()
config.update({'dim': self.dim, 'n_centers': self.n_centers})
return config
class UnitBallNormalize(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(UnitBallNormalize, self).__init__(**kwargs)
def call(self, points):
maxi = tf.sqrt(tf.reduce_max(tf.reduce_sum(tf.square(tf.stop_gradient(points)), axis = 3), axis = 2))
maxi = tf.where(tf.equal(maxi, 0.0), tf.constant(1.0), maxi)
points = points / tf.expand_dims(tf.expand_dims(maxi, 2), 3)
return points
def get_config(self):
config = super(UnitBallNormalize, self).get_config()
return config
def PtConv(fts, points, K, next_pts, in_features, out_features, n_centers = 16):
next_pts_ = None
if isinstance(next_pts, int) and points.get_shape()[1] != next_pts:
# convolution with reduction
indices, next_pts_ = KDTreeSampleLayer(K, next_pts)(points)
elif (next_pts is None) or (isinstance(next_pts, int) and points.get_shape()[1] == next_pts):
# convolution without reduction
indices = KDTreeLayer(K)(points, points)
next_pts_ = points
else:
# convolution with up sampling or projection on given points
indices = KDTreeLayer(K)(points, next_pts)
next_pts_ = next_pts
if next_pts is None or isinstance(next_pts, int):
next_pts = next_pts_
# get the features and point cooridnates associated with the indices
pts = GatherNDLayer()(points, indices)
if fts is None:
features = tf.expand_dims(tf.ones_like(pts[:,:,:,0]), 3)
else:
features = GatherNDLayer()(fts, indices)
# center the neighborhoods
pts = pts - tf.expand_dims(next_pts,2)
# normalize to unit ball, or not
pts = UnitBallNormalize()(pts)
# compute the distances
dists = SubstractCenters(3, n_centers)(tf.expand_dims(pts, 4))
dShape = dists.shape
dists = tf.reshape(dists, (-1, dShape[1], dShape[2], dShape[3]*dShape[4]))
dists = DenseInitialized(2*n_centers, activation="relu")(dists)
dists = DenseInitialized(n_centers, activation="relu")(dists)
dists = DenseInitialized(n_centers, activation="relu")(dists)
# compute features
fs = features.shape # [batch, points, n_centers, in_features]
ds = dists.shape
features = tf.transpose(features,[0, 1, 3, 2])
features = tf.reshape(features, (-1, features.shape[2], features.shape[3])) #features.shape[0]*features.shape[1]
dists = tf.reshape(dists, (-1, dists.shape[2], dists.shape[3])) #dists.shape[0]*dists.shape[1]
features = tf.matmul(features, dists)
features = tf.reshape(features, (-1, ds[1], features.shape[1]*features.shape[2]))
bound = math.sqrt(3.0) * math.sqrt(2.0 / (in_features + out_features))
features = WeightsMul([in_features * n_centers, out_features], -bound, bound)(features)
features = features / fs[2]
# normalization and activation
features = BatchNormalization(epsilon = 1e-05, momentum=0.9)(features)
features = tf.nn.relu(features)
return features, next_pts
def LinearInitializer(k):
k = np.sqrt(1.0/float(k))
return tf.random_uniform_initializer(k*-1, k)
def DenseInitialized(out_features, activation = None, name = None):
def DenseInit(x):
return Dense(out_features,
kernel_initializer = tf.initializers.lecun_normal(),
bias_initializer = tf.initializers.lecun_normal(),
activation = activation,
name = name,
)(x)
return DenseInit
def CreateModel(classCount, ftsComp, in_fts = None, in_pts = None, returnFeatures = False, noColor = False, applySoftmax = True):
print("Creating new model...")
if(in_fts is None and in_pts is None):
in_pts = Input(shape=(Const.npoints, Const.pointComponents), dtype=tf.float32) #points
if(noColor):
in_fts = None
else:
in_fts = Input(shape=(Const.npoints, ftsComp), dtype=tf.float32) #featuress
if(noColor):
in_fts = None
pl = Const.pl
### Down Sample
x0, _ = PtConv(in_fts, in_pts, K = 16, next_pts = None, in_features = ftsComp, out_features = pl)
x1, pts1 = PtConv(x0, in_pts, K = 16, next_pts = 2048, in_features = pl, out_features = pl)
x2, pts2 = PtConv(x1, pts1, K = 16, next_pts = 1024, in_features = pl, out_features = pl)
x3, pts3 = PtConv(x2, pts2, K = 16, next_pts = 256, in_features = pl, out_features = pl)
x4, pts4 = PtConv(x3, pts3, K = 8, next_pts = 64, in_features = pl, out_features = pl*2)
x5, pts5 = PtConv(x4, pts4, K = 8, next_pts = 16, in_features = pl*2, out_features = pl*2)
x6, pts6 = PtConv(x5, pts5, K = 4, next_pts = 8, in_features = pl*2, out_features = pl*2)
## Up Sample
x5d, _ = PtConv(x6, pts6, K = 4, next_pts = pts5, in_features = pl*2, out_features = pl*2)
x5d = tf.concat([x5d, x5], axis = 2)
x4d, _ = PtConv(x5d, pts5, K = 4, next_pts = pts4, in_features = pl*4, out_features = pl*2)
x4d = tf.concat([x4d, x4], axis = 2)
x3d, _ = PtConv(x4d, pts4, K = 4, next_pts = pts3, in_features = pl*4, out_features = pl)
x3d = tf.concat([x3d, x3], axis = 2)
x2d, _ = PtConv(x3d, pts3, K = 8, next_pts = pts2, in_features = pl*2, out_features = pl)
x2d = tf.concat([x2d, x2], axis = 2)
x1d, _ = PtConv(x2d, pts2, K = 8, next_pts = pts1, in_features = pl*2, out_features = pl)
x1d = tf.concat([x1d, x1], axis = 2)
x0d, _ = PtConv(x1d, pts1, K = 8, next_pts = in_pts, in_features = pl*2, out_features = pl)
x0d = tf.concat([x0d, x0], axis = 2)
### Output layer
out_labels = Dropout(rate=0.5)(x0d)
out_labels = tf.reshape(out_labels, (-1, out_labels.shape[2]))
out_labels = DenseInitialized(classCount)(out_labels)
out_labels = tf.reshape(out_labels, (-1, x0d.shape[1], out_labels.shape[1]))
if(applySoftmax):
out_labels = tf.nn.softmax(out_labels)
if(noColor):
inputList = [in_pts]
else:
inputList = [in_fts, in_pts]
if(returnFeatures):
return Model(inputList, [x0d, out_labels], name ="model")
model = Model(inputList, out_labels, name ="model")
model = CompileModel(model, classCount)
# print(model.summary())
return model
def ModifyModelOutput(model, classCount):
dropoutLayer = model.layers[len(model.layers)-5] #take output of the drop out layer
out_labels = dropoutLayer.output
out_labels = tf.reshape(out_labels, (-1, out_labels.shape[2]), name = "lbl_reshape_1")
out_labels = DenseInitialized(classCount, name = "lbl_dense")(out_labels)
out_labels = tf.reshape(out_labels, (-1, dropoutLayer.input.shape[1], out_labels.shape[1]), name = "lbl_reshape_2")
out_labels = tf.nn.softmax(out_labels, name = "lbl_softmax")
return Model(model.inputs, out_labels, name ="model")
def ReadModel(modelPath):
if(not modelPath.endswith(".h5")):
modelPath += ".h5"
if(not os.path.exists(modelPath)):
if(os.path.exists(os.path.join("." , "data", modelPath))):
modelPath = os.path.join("." , "data", modelPath)
elif(os.path.exists(os.path.join("." , "data", Const.ParseModelName(modelPath, False)))):
file = os.path.basename(modelPath)
folder = os.path.join("." , "data", Const.ParseModelName(modelPath, False))
modelPath = os.path.join(folder, file)
elif(os.path.exists(os.path.join("." , "data", Const.ParseModelName(modelPath)))):
file = os.path.basename(modelPath)
folder = os.path.join("." , "data", Const.ParseModelName(modelPath))
modelPath = os.path.join(folder, file)
if(not os.path.exists(modelPath)):
raise FileNotFoundError
model = tf.keras.models.load_model(modelPath, compile=False,
custom_objects={'NearestNeighborsLayer': NearestNeighborsLayer,
'SampleNearestNeighborsLayer': SampleNearestNeighborsLayer,
'SubstractCenters': SubstractCenters,
'WeightsMul': WeightsMul,
'GatherNDLayer':GatherNDLayer,
'UnitBallNormalize':UnitBallNormalize,
'KDTreeSampleLayer':KDTreeSampleLayer,
'KDTreeLayer':KDTreeLayer,
})
PrintToLog("{} model loaded".format(modelPath))
return model
def LatestModel(path):
if(Const.ParseModelUID(path) is None):
folders = [os.path.join("." , "data",folder) for folder in os.listdir(os.path.join("." , "data"))
if os.path.isdir(os.path.join("." , "data",folder))
and path == Const.RemoveUID(Const.ParseModelName(folder))
and len(Paths.GetFiles(os.path.join("." , "data",folder), findExtesions=".h5")) > 0]
path = max(folders, key=os.path.getctime)
else:
path = os.path.join("." , "data", Const.ParseModelName(path))
try:
latestModel = max(Paths.GetFiles(path, findExtesions=".h5"), key=os.path.getctime)
except:
print(f"No model found in: {path}")
latestModel = None
return latestModel
import re
def ModelValMIOU(path):
result = re.findall("val\((.+)\)", path)
return float(result[0])
def HighestValMIOUModel(path):
if(not os.path.isdir(path)):
path = os.path.join("." , "data", os.path.basename(path).split("_")[0])
latestModel = max(Paths.GetFiles(path, findExtesions=".h5"), key=ModelValMIOU)
return latestModel
def LoadModel(modelPath, consts):
model = ReadModel(modelPath)
modified = False
if(model.output.shape[2] != consts.classCount):
print("Model output {} classes changed to {}".format(model.output.shape[2], consts.classCount))
modified = True
model = ModifyModelOutput(model, consts.classCount)
model = CompileModel(model, consts.classCount)
# model.summary()
return model, modified
def ReadModelConfig(path):
Model = ReadModel(path)
modelConfig = Const.ParseModelConfig(path)
return Model, modelConfig
def CreateModelCopy(Model, modelConfig, in_pts, in_RGB):
inputFeatures = 1 if modelConfig.noFeature else modelConfig.featureComponents
newModel = CreateModel(modelConfig.classCount, inputFeatures, in_RGB, in_pts, noColor=modelConfig.noFeature, returnFeatures=True, applySoftmax=False)
if(Model != None):
for new_layer, layer in zip(newModel.layers, Model.layers):
new_layer.set_weights(layer.get_weights())
return newModel
def FuseModels(modelPaths, consts):
fusionModel = None
assert(len(modelPaths) == 2 or modelPaths is None)
print("Model fusion")
if(not modelPaths is None):
ModelA, modelAConfig = ReadModelConfig(modelPaths[0])
ModelB, modelBConfig = ReadModelConfig(modelPaths[1])
else:
consts.noFeature = False
modelAConfig = consts
consts.noFeature = True
modelBConfig = consts
in_RGB = None
if(not modelAConfig.noFeature or not modelBConfig.noFeature):
in_RGB = Input(shape=(Const.npoints, consts.featureComponents), dtype=tf.float32, name = "In_RGB") #features
in_pts = Input(shape=(Const.npoints, Const.pointComponents), dtype=tf.float32, name = "In_pts") #points
newModelA = CreateModelCopy(ModelA, modelAConfig, in_pts, in_RGB)
newModelB = CreateModelCopy(ModelB, modelBConfig, in_pts, in_RGB)
x = tf.concat((newModelA.output[0], newModelB.output[0]), axis = 2) #fuse features from both models
x1, _ = PtConv(x, in_pts, K = 16, next_pts = Const.npoints, in_features = 2*128, out_features = 96)
x2, _ = PtConv(x1, in_pts, K = 16, next_pts = Const.npoints, in_features = 96, out_features = 48)
x0d = tf.concat([x2, newModelA.output[1], newModelB.output[1]], axis = 2)
out_labels = tf.reshape(x0d, (-1, x0d.shape[2]))
out_labels = Dropout(rate=0.5)(out_labels)
out_labels = DenseInitialized(consts.classCount)(out_labels)
out_labels = tf.reshape(out_labels, (-1, x0d.shape[1], out_labels.shape[1]))
out_labels = tf.nn.softmax(out_labels)
fusionModel = Model([in_pts] if in_RGB is None else [in_RGB, in_pts], out_labels, name ="model")
nontrainableNames = [x.name for x in newModelA.layers] + [x.name for x in newModelB.layers]
# nontrainableNames = [x.name for x in newModelA.layers]
count = 0
for i, layer in enumerate(fusionModel.layers):
if(layer.name in nontrainableNames):
layer.trainable = False
count += 1
PrintToLog(f"{len(fusionModel.layers)-count}/{len(fusionModel.layers)} layers are trainable.")
fusionModel = CompileModel(fusionModel, consts.classCount)
# fusionModel.summary()
return fusionModel
class MIOU(tf.keras.metrics.Metric):
def __init__(self, classCount, name='miou', **kwargs):
super(MIOU, self).__init__(name=name, **kwargs)
self.cm = self.add_weight(name=name, shape = (classCount, classCount), initializer='zeros', dtype = tf.int64)
self.classCount = classCount
def update_state(self, y_true, y_pred, sample_weight=None):
TrueLbl = tf.argmax(tf.reshape(y_true, [-1, self.classCount]), axis= 1)
PredLbl = tf.argmax(tf.reshape(y_pred, [-1, self.classCount]), axis= 1)
confusion_matrix = tf.math.confusion_matrix(TrueLbl, PredLbl, self.classCount)
self.cm.assign_add(tf.cast(confusion_matrix, tf.int64))
def result(self):
union = tf.linalg.diag_part(self.cm)
rowSum = tf.math.reduce_sum(self.cm, axis = 0)
colSum = tf.math.reduce_sum(self.cm, axis = 1)
intersection = (colSum + rowSum - union)
intersection = tf.where(tf.equal(intersection, tf.constant(0, dtype=tf.int64)), tf.constant(1, dtype=tf.int64), intersection)
iou = union / intersection
miou = tf.expand_dims(tf.convert_to_tensor(tf.reduce_sum(iou) / tf.cast(iou.shape[0], dtype=np.float64)), 0)
return tf.concat((tf.expand_dims(miou,1), tf.cast(tf.expand_dims(iou,1), tf.float64)), 0)
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.cm.assign(tf.zeros((self.classCount, self.classCount), dtype=tf.int64))
def moving_miou_metric(classCount):
def moving_iou(y_true, y_pred):
TrueLbl = tf.argmax(tf.reshape(y_true, [-1, classCount]), axis= 1)
PredLbl = tf.argmax(tf.reshape(y_pred, [-1, classCount]), axis= 1)
cm = tf.math.confusion_matrix(TrueLbl, PredLbl, classCount)
union = tf.linalg.diag_part(cm)
rowSum = tf.math.reduce_sum(cm, axis = 0)
colSum = tf.math.reduce_sum(cm, axis = 1)
intersection = (colSum + rowSum - union)+1
iou = union / intersection
return tf.reduce_sum(iou) / tf.cast(tf.math.maximum(iou.shape[0], 1), dtype=np.float64)
return moving_iou
class IOU(tf.keras.metrics.Metric):
def __init__(self, classCount, classIndex, name='iou', **kwargs):
super(IOU, self).__init__(name=name, **kwargs)
self.cm = self.add_weight(name=name, shape = (classCount, classCount), initializer='zeros', dtype = tf.int64)
self.classCount = classCount
self.classIndex = classIndex
def update_state(self, y_true, y_pred, sample_weight=None):
TrueLbl = tf.argmax(tf.reshape(y_true, [-1, self.classCount]), axis= 1)
PredLbl = tf.argmax(tf.reshape(y_pred, [-1, self.classCount]), axis= 1)
confusion_matrix = tf.math.confusion_matrix(TrueLbl, PredLbl, self.classCount)
self.cm.assign_add(tf.cast(confusion_matrix, tf.int64))
def result(self):
union = tf.linalg.diag_part(self.cm)
rowSum = tf.math.reduce_sum(self.cm, axis = 0)
colSum = tf.math.reduce_sum(self.cm, axis = 1)
intersection = (colSum + rowSum - union)
intersection = tf.where(tf.equal(intersection, tf.constant(0, dtype=tf.int64)), tf.constant(1, dtype=tf.int64), intersection)
iou = union / intersection
return tf.cast(tf.expand_dims(iou, 1)[self.classIndex], tf.float64)
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.cm.assign(tf.zeros((self.classCount, self.classCount), dtype=tf.int64))
def weighted_categorical_crossentropy(weights):
# weights = [0.9,0.05,0.04,0.01]
def wcce(y_true, y_pred):
Kweights = tf.constant(weights)
y_true = tf.cast(y_true, y_pred.dtype)
return tf.keras.losses.categorical_crossentropy(y_true, y_pred) * tf.math.reduce_sum(y_true * Kweights, axis=-1)
return wcce
def CompileModel(model, classCount):
model.compile(
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3, epsilon = 1e-8),
loss = tf.keras.losses.CategoricalCrossentropy(),
# loss = weighted_categorical_crossentropy([0.7, 5]),
metrics= [IOU(classCount, 0, name="other"), IOU(classCount, 1, name="curb")] if classCount == 2 else [MIOU(classCount)]
)
return model
class IOUPerClass(tf.keras.callbacks.Callback):
def __init__(self, plot_path, classNames, firstEpoch = 0, metric = "miou"):
self.metric = metric
self.epoch = firstEpoch
self.classCount = len(classNames)
self.classNames = classNames
self.path = plot_path
print(f"IOU logs path: {self.path}")
self.writers = []
self.val_writers = []
ioupath = os.path.join(plot_path, "iou")
os.makedirs(ioupath, exist_ok=True)
for i in range(self.classCount):
path = os.path.join(ioupath, classNames[i])
os.makedirs(path, exist_ok=True)
self.writers.append(tf.summary.create_file_writer(path))
path = os.path.join(ioupath, "val_"+classNames[i])
os.makedirs(path, exist_ok=True)
self.val_writers.append(tf.summary.create_file_writer(path))
# print("Writer path: ", path)
self.InitializeMIOUWriter()
def InitializeMIOUWriter(self):
mioupath = os.path.join(self.path, "miou")
os.makedirs(mioupath, exist_ok=True)
path = os.path.join(mioupath, "miou")
os.makedirs(path, exist_ok=True)
self.miou_writer = tf.summary.create_file_writer(path)
path = os.path.join(mioupath, "val_miou")
os.makedirs(path, exist_ok=True)
self.val_miou_writer = tf.summary.create_file_writer(path)
def WriteLog(self, writer, metric, logs, epoch, tag = "miou"):
value = logs.get(metric)
if(value is None):
print(f"Failed getting {metric} log")
return False
with writer.as_default():
tf.summary.scalar(tag, value[0][0], step=epoch)
writer.flush()
def WriteLogs(self, writers, metric, logs, epoch, tag = "iou"):
metrix = logs.get(metric)
if(metrix is None):
print(f"Failed getting {metric} log")
return False
iou = [i[0] for i in metrix[len(metrix)-self.classCount:]]
for i in range(len(iou)):
with writers[i].as_default():
tf.summary.scalar(tag, iou[i], step=epoch)
writers[i].flush()
def on_epoch_end(self, batch, logs=None):
self.WriteLogs(self.writers, self.metric, logs, self.epoch)
self.WriteLogs(self.val_writers, "val_"+self.metric, logs, self.epoch)
self.WriteLog(self.miou_writer, self.metric, logs, self.epoch)
self.WriteLog(self.val_miou_writer, "val_"+self.metric, logs, self.epoch)
self.epoch += 1
logSaveDir = ""
def WriteToLog(msg):
if(os.path.isdir(logSaveDir)):
logFile = open(logSaveDir+f"/training.log", "a")
logFile.write(msg+"\n")
logFile.close()
def PrintToLog(msg):
print(msg)
WriteToLog(msg)
class ModelSaveCallback(tf.keras.callbacks.Callback):
def __init__(self, saveDir, trainingSteps, metric = "accuracy", modelNamePrefix = "", sendNotifications = False):
super().__init__()
self.saveDir = saveDir
self.metric = metric
self.modelNamePrefix = modelNamePrefix
self.epoch = 0
self.trainingSteps = trainingSteps
self.sendNotifications = sendNotifications
if(self.sendNotifications):
self.notifyDevice = Notify()
os.makedirs(self.saveDir, exist_ok=True)
WriteToLog(f"Training: {modelNamePrefix}")
def on_epoch_end(self, epoch, logs=None):
self.epoch = epoch + 1
if(len(logs) > 0):
miou = logs.get(self.metric)[0]*100
val_metric = "val_"+self.metric
val_miou = logs.get(val_metric)[0]*100
SaveModel(self.saveDir, epoch, self.model, miou, val_miou, self.modelNamePrefix)
message = "Ep: {0}. {1}: {2:.3}%. {3}: {4:.3}%".format(self.epoch, self.metric, miou, val_metric, val_miou)
WriteToLog(message)
f = open("demofile3.txt", "w")
f.write("Woops! I have deleted the content!")
f.close()
if(self.sendNotifications):
try:
self.notifyDevice.send(self.modelNamePrefix + " " + message)
except:
print("notifyDevice error")
# def on_batch_end(self, batch, logs=None):
# progress = batch/self.trainingSteps * 100
# if(progress % 10 == 0):
# try:
# message = "Ep. {0} {1}% done. {2}: {3:.3}%".format(self.epoch+1, int(progress), self.metric, logs.get(self.metric)*100)
# self.notifyDevice.send(message)
# except:
# print("notifyDevice error")
def ParseEpoch(modelPath):
filename = os.path.basename(modelPath)
return int(filename.split("_")[2])
def GetValidationData(testFiles, consts, batchesCount = 100, newDataGeneration = False):
print("Gathering validation data...")
print(f"Test files: {testFiles}")
if(newDataGeneration):
PrintToLog("Use TestSequence for validation.")
assert(len(testFiles) == 1)
seq = TestSequence(testFiles[0], consts, test = True)
else:
PrintToLog("Use TrainSequence for validation.")
seq = TrainSequence(testFiles, batchesCount, consts, dataAugmentation = False)
if not consts.noFeature:
ftsList = np.zeros((0, consts.npoints, consts.featureComponents), np.float32)
ptsList = np.zeros((0, consts.npoints, 3), np.float32)
lbsList = np.zeros((0, consts.npoints, consts.classCount), np.uint8)
if(newDataGeneration):
indexes = np.arange(min(batchesCount, len(seq)))
np.random.shuffle(indexes)
else:
indexes = range(batchesCount)
for i in indexes:
if consts.noFeature:
if(newDataGeneration):
ptslbl = seq.__getitem__(i)
else:
pts, lbl = seq.__getitem__(i)
ptslbl = [pts[0], lbl]
ptsList = np.concatenate((ptsList, ptslbl[0]), 0)
lbsList = np.concatenate((lbsList, ptslbl[1]), 0)
else:
if(newDataGeneration):
ftsptslbl = seq.__getitem__(i)
else:
ftspts, lbl = seq.__getitem__(i)
ftsptslbl = [ftspts[0], ftspts[1], lbl]
ftsList = np.concatenate((ftsList, ftsptslbl[0]), 0)
ptsList = np.concatenate((ptsList, ftsptslbl[1]), 0)
lbsList = np.concatenate((lbsList, ftsptslbl[2]), 0)
PrintToLog(f"Generated {len(lbsList)} validation samples.")
if consts.noFeature:
return (ptsList, lbsList)
else:
return ([ftsList, ptsList], lbsList)
def TrainModel(trainFiles, testFiles, consts : Const, modelPath = None, saveDir = Paths.dataPath, classes = None, first_epoch = 0, epochs = None, sendNotifications = False):
model = None
modelName = None
if(modelPath != None):
if(not isinstance(modelPath, list)):
modelName = Const.ParseModelName(modelPath)
if(consts.Name() != Const.RemoveUID(modelName)):
modelName = consts.Name(consts.UID())
logSaveDir = saveDir + f"/{modelName}/"
if(isinstance(modelPath, list)):
model = FuseModels(modelPath, consts)
else:
model, modified = LoadModel(modelPath, consts)
if(not modified):
first_epoch = ParseEpoch(modelPath) +1
else:
if(consts.Fusion):
model = FuseModels(None, consts)
else:
model = CreateModel(consts.classCount, 1 if consts.noFeature else consts.featureComponents, noColor=consts.noFeature)
if(modelName is None or modelName == ""):
modelName = consts.Name(consts.UID())
logSaveDir = saveDir + f"/{modelName}/"
PrintToLog("Train {} on {} files. Test on {} files".format(modelName, len(trainFiles), len(testFiles)))
PrintToLog("Validate on :" + str(testFiles))
trainingSteps = int((1000*16)/consts.batchSize) if not Const.IsWindowsMachine() else int(10)
PrintToLog("Batch size: {}, trainingSteps: {}".format(consts.batchSize, trainingSteps))
logsPath = os.path.join(consts.logsPath, Const.RemoveUID(modelName))
os.makedirs(logsPath, exist_ok=True)
callbacks_list = []
callbacks_list.append(ModelSaveCallback(logSaveDir, trainingSteps, "curb", modelNamePrefix = modelName, sendNotifications=sendNotifications))
# callbacks_list.append(IOUPerClass(logsPath, consts.classNames[1:], first_epoch+1))
# callbacks_list.append(tf.keras.callbacks.TensorBoard(log_dir=logsPath, update_freq="batch", histogram_freq=0, profile_batch = 0)) # tensorboard 2.0.2
seq = TrainSequence(trainFiles, trainingSteps, consts)
validationSteps = int(((150 if not Const.IsWindowsMachine() else 10) * 16)/consts.batchSize)
validationData = None if len(testFiles) == 0 else GetValidationData(testFiles, consts, validationSteps)
if(epochs is None):
epochs = 20 if consts.Fusion else 100
model.fit(seq, validation_data = validationData, epochs = epochs, batch_size = consts.batchSize, workers = consts.batchSize, max_queue_size = 300, callbacks=callbacks_list, initial_epoch = first_epoch)
def EvaluateModels(modelsList, testFiles, consts, x = None, y = None):
if(x is None or y is None):
validationSteps = int(((150 if not Const.IsWindowsMachine() else 10) * 16)/consts.batchSize)
x, y = GetValidationData(testFiles, consts, validationSteps, newDataGeneration = False)
for file in modelsList:
model, _ = LoadModel(file, consts)
metrics = model.evaluate(x, y, batch_size = consts.batchSize, workers = consts.batchSize, max_queue_size = 300)
# print(f"miou: {metrics[2][0][0]*100:.3}")
def SaveModel(saveDir, epoch, model, train_score, val_score=0, modelNamePrefix = ""):
if(modelNamePrefix != ""):
modelNamePrefix += "_"
fileName = saveDir+"/{0}{1}{2}{3}.h5".format(modelNamePrefix, epoch, f"_train({train_score:.3})", f"_val({val_score:.3})" if val_score != 0 else "")
if(not os.path.isdir(saveDir)):
os.mkdir(saveDir)
if(os.path.exists(fileName)):
os.remove(fileName)
model.save(fileName, include_optimizer=False)
def RotatePointCloud(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, sinval, 0],
[-sinval, cosval, 0],
[0, 0, 1],])
return np.dot(batch_data, rotation_matrix)
def JitterRGB(features):
features = features.astype(np.uint8)
assert(np.max(features) > 1)
img = Image.fromarray(np.expand_dims(features,0), mode="RGB")
low = 0.4
high = 1.6
#1 is baseline
img = ImageEnhance.Brightness(img).enhance(np.random.uniform(low, high))
img = ImageEnhance.Color(img).enhance(np.random.uniform(low, high))
img = ImageEnhance.Contrast(img).enhance(np.random.uniform(low, high))
img = ImageEnhance.Sharpness(img).enhance(np.random.uniform(low, high))
if(np.random.uniform(low, high) > 1):
img = ImageOps.equalize(img)
if(np.random.uniform(low, high) > 1):
img = ImageOps.autocontrast(img)
new_features = np.array(img).reshape((-1, 3))
return new_features
def JitterReflectance(features, sigma=40): #input [0; 255]
assert(features.shape[1] == 1)
randJitters = np.random.randint(-sigma, sigma, size = features.shape)
features += randJitters
features = np.clip(features, 0, 255)
return features
def JitterPoints(points, sigma=0.01):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
C = 3
assert(points.shape[1] == C)
randJitters = np.random.uniform(-sigma, sigma, size = points.shape)
return points + randJitters
def Mirror(points, axis, min = True):
if(min):
axisValue = np.amin(points[:,axis])
else:
axisValue = np.amax(points[:,axis])
distances = np.abs(points[:, axis] - axisValue)
newpoints = np.array(points, copy=True)
newpoints[:,axis] = newpoints[:,axis] + distances*(-2 if min else 2)
return newpoints
def MirrorPoints(points):
assert(len(points.shape) == 2 and points.shape[1] == 3)
mirrorDirection = random.choice(["xMin", "xMax", "yMin", "yMax", ""])
if(mirrorDirection == "xMin"):
points = Mirror(points, 0, min = True)
elif(mirrorDirection == "xMax"):
points = Mirror(points, 0, min = False)
elif(mirrorDirection == "yMin"):
points = Mirror(points, 1, min = True)
elif(mirrorDirection == "yMax"):
points = Mirror(points, 1, min = False)
return points
def ScalePoints(points, sigma = 0.02):
""" Scale up or down random by small percentage
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, scaled batch of point clouds
"""
assert(points.shape[1]==3)
scale = np.random.uniform(1-sigma, 1+sigma)
scale_matrix = np.array([[scale, 0, 0],
[0, scale, 0],
[0, 0, scale]])
scaled = np.dot(points, scale_matrix)
return scaled
class TrainSequence(Sequence):
def __init__(self, filelist, iteration_number, consts : Const, dataAugmentation = True):
self.filelist = filelist
self.ptsList = [np.load(file) for file in self.filelist]
self.ptsList = sorted(self.ptsList, key=len)
self.ptsListCount = np.cumsum([len(pts) for pts in self.ptsList])
self.cts = consts
self.dataAugmentation = dataAugmentation
self.iterations = iteration_number
def __len__(self):
return int(self.iterations)
def PickRandomPoint(self, lbl):
lblIdx = []
while True:
randClass = random.randint(0, self.cts.classCount-1)
lblIdx = np.where(lbl == randClass)[0]
if(len(lblIdx) >= 2):
break
return lblIdx[random.randint(0, len(lblIdx)-1)]
def __getitem__(self, _):
if not self.cts.noFeature:
ftsList = np.zeros((self.cts.batchSize, self.cts.npoints, self.cts.featureComponents), np.float32)
ptsList = np.zeros((self.cts.batchSize, self.cts.npoints, 3), np.float32)
lbsList = np.zeros((self.cts.batchSize, self.cts.npoints, self.cts.classCount), np.uint8)
for i in range(self.cts.batchSize):
# load the data
ptIdx = random.randint(0, self.ptsListCount[-1])
pts = self.ptsList[np.argmax(self.ptsListCount >= ptIdx)]
# if(self.cts.featureComponents == 1):
# keepPts = (pts[:, 4] != 0)
# else:
# keepPts = (pts[:, 6] != 0)
# pts = pts[keepPts]
# get the features
if(self.cts.featureComponents == 1):
if not self.cts.noFeature:
fts = np.expand_dims(pts[:,3], 1).astype(np.float32)
lbs = pts[:,4].astype(int)
else:
if not self.cts.noFeature:
fts = pts[:,3:6].astype(np.float32)
lbs = pts[:,6].astype(int)
if(np.min(lbs) == 1):
lbs -= 1 #class 0 is filtered out
# get the point coordinates
pts = pts[:, :3]
# pick a random point
pt_id = random.randint(0, pts.shape[0]-1)
pt = pts[pt_id]
# create the mask
mask_x = np.logical_and(pts[:,0]<pt[0]+self.cts.blocksize/2, pts[:,0]>pt[0]-self.cts.blocksize/2)
mask_y = np.logical_and(pts[:,1]<pt[1]+self.cts.blocksize/2, pts[:,1]>pt[1]-self.cts.blocksize/2)
mask = np.logical_and(mask_x, mask_y)
temppts = pts[mask]
templbs = lbs[mask]
if not self.cts.noFeature:
tempfts = fts[mask]
# random selection
choice = np.random.choice(temppts.shape[0], self.cts.npoints, replace=True)
temppts = temppts[choice]
if not self.cts.noFeature:
tempfts = tempfts[choice]
templbs = templbs[choice]
encodedLbs = np.zeros((len(templbs), self.cts.classCount))
encodedLbs[np.arange(len(templbs)),templbs] = 1
templbs = encodedLbs
# if self.dataAugmentation:
# dt = DataTool()
# dt.VisualizePointCloudAsync([temppts], [tempfts/255])
# data augmentation
if self.dataAugmentation:
if(self.cts.Mirror):
temppts = MirrorPoints(temppts)
if(self.cts.Rotate):
temppts = RotatePointCloud(temppts)
if(self.cts.Scale):
temppts = ScalePoints(temppts, sigma = 0.02)
if(self.cts.Jitter):
temppts = JitterPoints(temppts, sigma = 0.01)
if(not self.cts.noFeature and self.cts.FtrAugment):
if(self.cts.featureComponents == 3):
tempfts = JitterRGB(tempfts)
elif(self.cts.featureComponents == 1):
tempfts = JitterReflectance(tempfts)
if(not self.cts.noFeature):
tempfts = tempfts.astype(np.float32)
tempfts = tempfts/255 # - 0.5
# if self.dataAugmentation:
# # visualize data
# dt = DataTool()
# dt.VisualizePointCloud([temppts], [tempfts], windowName = "Augmented")
# linePoints = np.where(templbs[:, 1] == 1)[0]
# DataTool().VisualizePointCloud([np.delete(temppts, linePoints, axis=0), temppts[linePoints]], [[0,0,1], [1,0,0]], windowName="Sampled")
if not self.cts.noFeature:
ftsList[i] = np.expand_dims(tempfts, 0)
ptsList[i] = np.expand_dims(temppts, 0)
lbsList[i] = np.expand_dims(templbs, 0)
if self.cts.noFeature:
return [ptsList], lbsList
else: # works for RGB and fusion models
return [ftsList, ptsList], lbsList
class TestSequence(Sequence):
def __init__(self, filename, consts, splitDataSetToParts = -1, windowsMachineCap = True, test = False):
self.filename = filename
self.batchSize = consts.batchSize
self.npoints = consts.npoints
self.nocolor = consts.noFeature
self.bs = consts.blocksize
self.featureComponents = consts.featureComponents
self.fusion = consts.Fusion
self.test = test
if(self.test):
self.classCount = consts.classCount
self.lbl = []
if(self.filename.endswith(".ply")):
from plyfile import PlyData
plydata = PlyData.read(self.filename)
x = plydata["vertex"].data["x"].astype(np.float32)
y = plydata["vertex"].data["y"].astype(np.float32)
z = plydata["vertex"].data["z"].astype(np.float32)
fts = plydata["vertex"].data["reflectance"].astype(np.float32)
self.xyzrgb = np.concatenate((np.expand_dims(x,1), np.expand_dims(y,1), np.expand_dims(z,1), np.expand_dims(fts, 1)), axis=1)
elif(self.filename.endswith(".npy")):
xyzftsl = np.load(self.filename)
if(xyzftsl.shape[1] == 5):
self.xyzrgb = xyzftsl[:, :4]
if(self.test):
self.lbl = xyzftsl[:, 4] - 1
else: #if(xyzftsl.shape[1] == 7):
self.xyzrgb = xyzftsl[:, :6]
if(self.test):
self.lbl = xyzftsl[:, 6] - 1
elif(self.filename.endswith(".las")):
from dataTool import ReadXYZRGB
xyz, rgb = ReadXYZRGB(self.filename)
self.xyzrgb = np.concatenate((xyz, rgb), 1)
print("Test_step:", consts.test_step)
step = consts.test_step
discretized = ((self.xyzrgb[:,:2]).astype(float)/step).astype(int)
self.allpts = np.unique(discretized, axis=0)
self.allpts = self.allpts.astype(np.float)*step
if(consts.IsWindowsMachine() and windowsMachineCap):
self.allpts = self.allpts[:115] #small sample for testing
self.splitDataSetToParts = splitDataSetToParts
if(self.splitDataSetToParts != -1):
self.ptIndex = 0
else:
self.pts = self.allpts
self.idxList = np.zeros((len(self.pts), self.npoints), np.int64)
self.sparseCubes = 0
self.sparseCubesPtCount = 0
def LenParts(self):
if(self.splitDataSetToParts != -1):
return math.ceil(len(self.allpts)/self.splitDataSetToParts)
else:
return 1
def NextPart(self):
if(self.splitDataSetToParts <= 0):
return False
if(self.ptIndex >= len(self.allpts)):
return False
self.nextIndex = np.min([self.ptIndex+self.splitDataSetToParts, len(self.allpts)])
self.pts = self.allpts[self.ptIndex : self.nextIndex]
self.ptIndex = self.nextIndex
self.idxList = np.zeros((len(self.pts), self.npoints), np.int64)
return True
def __len__(self):
return math.ceil(len(self.pts)/self.batchSize)
def compute_mask(self, pt, bs):
# build the mask
mask_x = np.logical_and(self.xyzrgb[:,0]<pt[0]+bs/2, self.xyzrgb[:,0]>pt[0]-bs/2)
mask_y = np.logical_and(self.xyzrgb[:,1]<pt[1]+bs/2, self.xyzrgb[:,1]>pt[1]-bs/2)
mask = np.logical_and(mask_x, mask_y)
return mask
def __getitem__(self, index):
size = min(self.batchSize, len(self.pts) - (index * self.batchSize))
if not self.nocolor:
ftsList = np.zeros((size, self.npoints, self.featureComponents), np.float32)
ptsList = np.zeros((size, self.npoints, 3), np.float32)
if(self.test):
lblList = np.zeros((size, self.npoints, self.classCount), np.float32)
for i in range(size):
# get the data
mask = self.compute_mask(self.pts[index*self.batchSize + i], self.bs)
pts = self.xyzrgb[mask]
if(self.test):
lbl = self.lbl[mask]
if(len(pts) < self.npoints):
self.sparseCubes += 1
self.sparseCubesPtCount += len(pts)
# choose right number of points
choice = np.random.choice(pts.shape[0], self.npoints, replace=True)
pts = pts[choice]
if(self.test):
lbl = lbl[choice]
# labels will contain indices in the original point cloud
idx = np.where(mask)[0][choice]
self.idxList[index*self.batchSize + i] = np.expand_dims(idx, 0)
# separate between features and points
if not self.nocolor:
if(self.featureComponents == 1):
fts = np.expand_dims(pts[:,3], 1)
else:
fts = pts[:,3:6]
fts = fts/255 #- 0.5
pts = pts[:, :3].copy()
if not self.nocolor:
ftsList[i] = np.expand_dims(fts, 0)
ptsList[i] = np.expand_dims(pts, 0)
if self.test:
lblList[i, np.arange(len(lbl)), lbl.astype(int)] = 1
add_lbl = []
if self.test:
add_lbl = [lblList]
if self.nocolor:
return [ptsList] + add_lbl
else: #works for RGB
return [ftsList, ptsList] + add_lbl
def GenerateData(modelPath, testFiles, consts, outputFolder, NameIncludeModelInfo = False):
model, _ = LoadModel(modelPath, consts)
if(not NameIncludeModelInfo):
outputFolder = os.path.join(outputFolder, Paths.FileName(modelPath))
os.makedirs(outputFolder, exist_ok=True)
for file in testFiles:
t = time()
baseName = Paths.FileName(file)
if(NameIncludeModelInfo):
baseName = baseName + "_" + Paths.FileName(modelPath)
baseName += ".txt"
newFile = os.path.join(outputFolder, baseName)
if(os.path.exists(newFile)):
print("All ready exists: ",newFile)
continue
else:
open(newFile, "a").close()
print("Generating: ", newFile)
GenerateFile(model, file, consts, newFile)
print("Done in {:02d}:{:02d} min.".format(int((time() - t)/60), int((time() - t)%60)))
def GenerateLargeData(modelPath, voxelFiles, consts, outputFolder, orgFiles = None, replace = False, Upscale = True, NameIncludeModelInfo = False):
from time import time
model, _ = LoadModel(modelPath, consts)
if(not NameIncludeModelInfo):
outputFolder = outputFolder + Paths.FileName(modelPath)
if not Upscale:
outputFolder = outputFolder+"/vox_lbl/"
os.makedirs(outputFolder, exist_ok=True)
if isinstance(voxelFiles, str):
voxelFiles = Paths.GetFiles(voxelFiles)
if isinstance(orgFiles, str):
orgFiles = Paths.GetFiles(orgFiles)
for voxelFile in voxelFiles:
baseName = Paths.FileName(voxelFile).replace("_voxels", "")
if not (orgFiles is None):
orgFile = [f for f in orgFiles if Paths.FileName(f).startswith(baseName)]
if(len(orgFile) != 1):
print("Skip: ", voxelFile)
continue
orgFile = orgFile[0]
else:
orgFile = None
t = time()
if(NameIncludeModelInfo):
baseName = baseName + "_" + Paths.FileName(modelPath)
if Upscale:
newFile = os.path.join(outputFolder, baseName+".labels")
else:
newFile = os.path.join(outputFolder, baseName+".npy")
if(not replace and os.path.exists(newFile)):
print(newFile," already exists.")
continue
flagFile = newFile+".tmp"
if(os.path.exists(flagFile)):
print("Other worker is generating: ", newFile)
continue
else:
open(flagFile, "a").close()
print("Generating: ", newFile)
GenerateLargeFile(model, voxelFile, orgFile, consts, newFile, Upscale = Upscale)
os.remove(flagFile)
print("{} generated in {:02d}:{:02d} min.".format(baseName, int((time() - t)/60), int((time() - t)%60)))
def GenerateFile(model, file, consts, outputFile, saveScores = True):
seq = TestSequence(file, consts)
output = model.predict(seq, workers = consts.batchSize, max_queue_size = 300, verbose = 1)
# for y in range(len(seq)):
# pts = seq.__getitem__(y)
# pts = pts[0]
# pred = model.predict(pts)
# for i in range(len(pred)):
# predPtsIdx = np.where(np.argmax(pred[i], axis = 1) == 1)[0]
# # truePtsIdx = np.where(np.argmax(lbl[i], axis = 1) == 1)[0]
# # print(f"True curb points: {len(truePtsIdx)}. Found curb points: {len(predPtsIdx)}")
# DataTool().VisualizePointCloud([np.delete(pts[i], predPtsIdx, axis=0), pts[i][predPtsIdx]], [[0,0,1], [1,0,0]])
idx = seq.idxList
xyzrgb = seq.xyzrgb[:,:3]
scores = np.zeros((xyzrgb.shape[0], consts.classCount))
for i in range(len(output)):
scores[idx[i]] += output[i]
mask = np.logical_not(scores.sum(1)==0)
scores = scores[mask]
pts_src = xyzrgb[mask]
# create the scores for all points
indexes = nearest_correspondance(pts_src.astype(np.float32), xyzrgb.astype(np.float32), K=1)
scores = scores[indexes]
if saveScores:
scoresFile = outputFile.replace(".txt", "_scores.npy")
np.save(scoresFile, scores)
print(f"Scores saved to: {scoresFile}")
scores = scores.argmax(1) + 1 #because all class are shifted to avoid 0 - unclassified
print(f"class 0: {len(np.where(scores == 0)[0])}, class 1: {len(np.where(scores == 1)[0])}")
import pandas as pd
print("Save labels: ", scores.shape)
pd.DataFrame(scores, dtype=np.uint8).to_csv(outputFile, sep='\t', header=None, index=None)
def SaveLabelsPnts(labels, outputFile):
import pandas as pd
print("Saving pts lbs...")
if(len(labels.shape) == 1):
pd.DataFrame(labels, dtype=np.uint8).to_csv(outputFile, sep='\t', header=None, index=None)
else:
np.save(outputFile, labels)
print("Pts lbs {} saved!".format(labels.shape))
def UpscaleToOriginal(originalPoints, pts_src, lbl, outputFile = None):
from tqdm import tqdm
# create the scores for all points
step = 10000000 #1000000
fullLbl = np.zeros((0,), np.int8)
print("KDTree magic. Source pts: {}. Queary pts: {}".format(len(pts_src), len(originalPoints)))
for i in tqdm(range(0, math.ceil(len(originalPoints)/step))):
a = i*step
b = a + np.min([len(originalPoints)-a, step])
indexes = nearest_correspondance(pts_src, originalPoints[a:b], K=1)
fullLbl = np.concatenate([fullLbl, lbl[indexes]], 0)
if(not (outputFile is None)):
SaveLabelsPnts(fullLbl, outputFile)
else:
return fullLbl
def GenerateLargeFile(model, voxelFile, originalFile, consts, outputFile, Upscale = True, saveScores = True):
from dataTool import ReadXYZ
from tqdm import tqdm
seq = TestSequence(voxelFile, consts, splitDataSetToParts=16000)
print("All pts: ", len(seq.allpts))
xyzrgb = seq.xyzrgb[:,:3]
scores = np.zeros((xyzrgb.shape[0], consts.classCount))
for _ in tqdm(range(seq.LenParts())):
seq.NextPart()
output = model.predict(seq, workers = consts.batchSize, max_queue_size = 300, verbose = 1)
idx = seq.idxList
for i in range(len(output)):
scores[idx[i]] += output[i]
mask = np.logical_not(scores.sum(1)==0)
scores = scores[mask]
pts_src = xyzrgb[mask].astype(np.float32)
if saveScores:
scoresFile = os.path.splitext(outputFile)[0]+"_scores.npy"
np.save(scoresFile, scores)
print(f"Scores saved to: {scoresFile}")
lbl = scores.argmax(1)
if(Upscale and not (originalFile is None)):
print("Load original file: ", originalFile)
originalPoints = ReadXYZ(originalFile).astype(np.float32)
assert(originalPoints.shape[1] == 3)
UpscaleToOriginal(originalPoints, pts_src, lbl, outputFile)
else:
SaveLabelsPnts(np.concatenate([pts_src, np.expand_dims(lbl, 1)], axis=1), outputFile)
def UpscaleFilesAsync(modelPath, voxelFolder, orgFolder, savePath):
import time
# notifyDevice = Notify()
savePath = savePath + Paths.FileName(modelPath)
print(f"Searching in folder: {savePath+'/vox_lbl/'}")
while True:
found = False
fileNames = Semantic3D.fileNames
for file in Paths.GetFiles(savePath, onlyNames=True, withoutExtension=True, findExtesions=('.labels')):
if(file in fileNames or fileNames.values()):
fileNames = {key:val for key, val in fileNames.items() if val != file and key != file}
if(len(fileNames) == 0):
print("Done upscaling files")
# notifyDevice.send("Done upscaling files")
return
for file in Paths.GetFiles(savePath+"/vox_lbl/", onlyNames=True, withoutExtension=True, findExtesions=('.npy')):
ptslbs = os.path.join(savePath+"/vox_lbl/", file+".npy")
# originalFile = os.path.join(orgFolder, file+".npy")
originalFile = os.path.join(orgFolder, file+".hdf5")
outputFile = os.path.join(savePath, file+".labels")
if(not os.path.exists(outputFile)):
found = True
open(outputFile, "a").close()
UpscaleFile(ptslbs, originalFile, outputFile)
if not found:
time.sleep(10) #sleep for 10 second and scan for job again
def UpscaleFile(ptslbsFile, originalFile, outputFile):
from dataTool import ReadLabels, ReadXYZ
print("Upscaling: {}".format(ptslbsFile))
scores = ReadLabels(ptslbsFile, readFormat = ".npy")
scores = np.squeeze(scores, 1)
pts_src = ReadXYZ(ptslbsFile, readFormat = ".npy")
originalPoints = ReadXYZ(originalFile)
UpscaleToOriginal(originalPoints, pts_src, scores, outputFile)
def nearest_correspondance(pts_src, pts_dest, K=1):
# print("KDTree magic. Source pts: {}. Queary pts: {}".format(len(pts_src), len(pts_dest)))
# t = time()
kdt = KDTree(pts_src, leaf_size=20)
_, indexes = kdt.query(pts_dest, k = K)
# print("Done in {}:{} min.".format(int((time() - t)/60), int((time() - t)%60)))
return np.squeeze(indexes, 1)
def TestTestSequence(path, consts):
seq = TestSequence(path, consts)
allPts = np.zeros((len(seq.xyzrgb), 3))
for i in range(len(seq)):
inpt = seq[i]
ftsList = inpt[0]
ptsList = inpt[1]
for j in range(len(ptsList)):
allPts[seq.idxList[i*consts.batchSize + j]] = ptsList[j]
emptyPts = np.logical_not(allPts.sum(1) != 0)
print("sparseCubes: ",seq.sparseCubes)
print("mean sparseCubes pt count: ", seq.sparseCubesPtCount/seq.sparseCubes)
print("Not picked points: {} => {:.2f}%".format(len(emptyPts), len(emptyPts)/len(allPts)))
nonEmptyPts = np.logical_not(emptyPts)
a = seq.xyzrgb[emptyPts]
b = seq.xyzrgb[nonEmptyPts]
dt = DataTool()
dt.VisualizePointCloud([a, b], [[1,0,0], None])
if(os.path.exists("C:/Program Files")):
import open3d as o3d
import time
from dataTool import LoadRenderOptions, SaveRenderOptions, GetPointsIndexInBoundingBox, GetPointsInBoundingBox
class BoxesIterator:
def __init__(self, boxes, points, colors, labels):
# self.pc = o3d.geometry.PointCloud()
# self.pc.points = o3d.utility.Vector3dVector(points)
self.src_points = points
self.src_colors = colors if np.max(colors) <= 1 else colors/255
self.src_labels = labels
self.dst_points = np.zeros((0, 3), dtype = np.float)
self.dst_colors = np.zeros((0, 3), dtype = np.float)
self.boxes = boxes
self.i = 0
# self.kdt = KDTree(points, leaf_size=20)
self.trajectory = None
# if(os.path.exists("./data/camera_trajectory.json")):
# self.trajectory = o3d.io.read_pinhole_camera_trajectory("./data/camera_trajectory.json").parameters
# self.trajectory_i = 0
# self.trajectory_time = time.time()
grey = np.array([128, 128, 128])/255
red = np.array([136, 0, 1])/255
mint = np.array([170, 255, 195])/255
teal = np.array([0, 128, 128])/255
green = np.array([60, 180, 75])/255
verygreen = np.array([0, 255, 0])/255
brown = np.array([170, 110, 40])/255
# white = np.array([255, 255, 255])/255
black = np.array([0, 0, 0])/255
blue = np.array([0, 0, 255])/255
pink = np.array([255, 56, 152])/255
#NPM3D
self.colors = []
if(np.max(self.src_labels) == 9):
self.colors = [grey, red, blue, teal, mint, brown, pink, black, green]
#Semantic3D
elif(np.max(self.src_labels) == 8):
self.colors = [grey, verygreen, green, mint, red, blue, brown, black]
self.pc = o3d.geometry.PointCloud()
self.pc.points = o3d.utility.Vector3dVector(self.src_points)
self.box = o3d.geometry.LineSet()
lines = np.array([[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7],[0, 4], [1, 5], [2, 6], [3, 7]])
self.box.lines = o3d.utility.Vector2iVector(lines)
self.box.colors = o3d.utility.Vector3dVector(np.array([[1,0,0] for _ in range(len(lines))]))
self.initSet = False
def ColorPtsByClass(self, pts, lbl):
pts_colors = np.zeros((len(pts), 3), np.float)
for i in range(0, len(self.colors)):
indexes = np.where(lbl == i+1)[0]
pts_colors[indexes] = self.colors[i]
return pts_colors
def BoxPts(self, bBox):
box = [[bBox[0], bBox[2], bBox[4]],
[bBox[1], bBox[2], bBox[4]],
[bBox[0], bBox[3], bBox[4]],
[bBox[1], bBox[3], bBox[4]],
[bBox[0], bBox[2], bBox[5]],
[bBox[1], bBox[2], bBox[5]],
[bBox[0], bBox[3], bBox[5]],
[bBox[1], bBox[3], bBox[5]]]
return np.array(box)
def AnimationFunction(self, vis):
# time.sleep(0.2)
if(self.i < len(self.boxes)):
pts = self.src_points[:, :2]
mask_x = np.logical_and(self.boxes[self.i][0]<pts[:,0], pts[:,0]<self.boxes[self.i][1])
mask_y = np.logical_and(self.boxes[self.i][2]<pts[:,1], pts[:,1]<self.boxes[self.i][3])
ptsIdx = np.where(np.logical_and(mask_x, mask_y))[0]
randIdx = np.random.choice(ptsIdx, min(8192, len(ptsIdx)), replace=False)
self.dst_points = np.concatenate((self.dst_points, self.src_points[randIdx]), axis = 0)
self.dst_colors = np.concatenate((self.dst_colors, self.ColorPtsByClass(self.src_points[randIdx], self.src_labels[randIdx])), axis = 0)
self.src_points = np.delete(self.src_points, randIdx, axis = 0)
self.src_labels = np.delete(self.src_labels, randIdx, axis = 0)
self.src_colors = np.delete(self.src_colors, randIdx, axis = 0)
self.pc.points = o3d.utility.Vector3dVector(np.concatenate((self.src_points, self.dst_points), 0))
self.pc.colors = o3d.utility.Vector3dVector(np.concatenate((self.src_colors, self.dst_colors), 0))
self.box.points = o3d.utility.Vector3dVector(self.BoxPts(self.boxes[self.i]))
vis.clear_geometries()
vis.add_geometry(self.pc, False)
vis.add_geometry(self.box, False)
self.i += 1
# print(f"{self.i}/{len(self.boxes)}", end="\r")
else:
print("Iteration over.")
if(not os.path.exists("./data/camera_trajectory.json")):
self.trajectory = None
if(self.trajectory is None):
# vis = LoadRenderOptions(vis, returnVis=True)
if(os.path.exists("./data/camera_trajectory.json")):
self.trajectory = o3d.io.read_pinhole_camera_trajectory("./data/camera_trajectory.json").parameters
self.trajectory_i = 0
self.trajectory_time = time.time()
else:
ctr = vis.get_view_control()
ctr.convert_from_pinhole_camera_parameters(self.trajectory[self.trajectory_i])
if(self.trajectory_i < len(self.trajectory)-1): #and time.time() - self.trajectory_time > 1
print(f"Trajectory: {self.trajectory_i}/{len(self.trajectory)}", end="\r")
self.trajectory_i += 1
self.trajectory_time = time.time()
return False
def ShowSequenceBoxes(ptsFile, lblFile, consts):
from dataTool import DataTool
consts.test_step = 4
seq = TestSequence(ptsFile, consts, windowsMachineCap=False)
minZ = np.min(seq.xyzrgb[:,2])
maxZ = np.max(seq.xyzrgb[:,2])
boxes = []
for pt in seq.pts:
minX = pt[0] - consts.blocksize/2
maxX = pt[0] + consts.blocksize/2
minY = pt[1] - consts.blocksize/2
maxY = pt[1] + consts.blocksize/2
boxes.append([minX, maxX, minY, maxY, minZ, maxZ])
dt = DataTool()
# dt.VisualizePointCloud([seq.xyzrgb[:,:3]], [seq.xyzrgb[:,3:6]], bBoxes = boxes)
boxesitr = BoxesIterator(boxes, seq.xyzrgb[:,:3], seq.xyzrgb[:,3:], np.squeeze(ReadLabels(lblFile),1))
dt.VisualizePointCloud([seq.xyzrgb[:,:3]], animationFunction=boxesitr.AnimationFunction)
# dt.VisualizePointCloud([seq.xyzrgb[:,:3]])
def RunExperiments():
from dataTool import VisualizePointCloudClassesAsync, VisualizePointCloudClasses, ReadLabels, DataTool, ReadXYZ
# testCloud = "G:/PointCloud DataSets/NPM3D/test_10_classes/ajaccio_2.ply"
# testCloud = consts.Paths.processedTrain+"/Lille1_1_0.npy"
# VisualizePointCloudClassesAsync(testCloud, downSample=False, windowName="Keras")
# VisualizePointCloudClassesAsync(testCloud, "G:/PointCloud DataSets/NPM3D/generatedResults/ajaccio_2.txt", downSample=False, windowName="Keras")
# VisualizePointCloudClassesAsync(testCloud, "G:/PointCloud DataSets/NPM3D/torch_generated_data/results88.2%/ajaccio_2.txt", downSample=False, windowName="Torch")
# TestTestSequence(consts.Paths.processedTrain+"/Lille1_1_0.npy", consts)
# ShowSequenceBoxes(consts.Paths.processedTrain+"/Lille1_1_0.npy", consts)
# # pts = ReadXYZ(consts.Paths.processedTrain+"/Lille2_0.npy")
# true = ReadLabels(consts.Paths.processedTrain+"/Lille2_0.npy")
# # pts = ReadXYZ(consts.Paths.rawTrain+"/untermaederbrunnen_station3_xyz_intensity_rgb.hdf5")
# # true = ReadLabels(consts.Paths.rawTrain+"/untermaederbrunnen_station3_xyz_intensity_rgb.hdf5")
# # pred_file = "G:/PointCloud DataSets/NPM3D/torch_generated_data/results88.2%/Lille2_0.txt"
# pred_file = consts.Paths.generatedTest+"/"+Paths.FileName(modelPath)+"/Lille2_0.txt"
# # pred_file = consts.Paths.generatedTest+"/"+Paths.FileName(modelPath)+"/untermaederbrunnen_station3_xyz_intensity_rgb.labels"
# pred = ReadLabels(pred_file)
# VisualizePointCloudClasses(consts.Paths.processedTrain+"/Lille2_0.npy",
# pred_file,
# downSample=False, windowName="Red error",
# errorPoints = ((true != pred) == (true != 0)),
# delPoints = (true == 0))
# error = np.where(true == 0)[0]
# true = np.delete(true, error, 0)
# pred = np.delete(pred, error, 0)
# from sklearn.metrics import confusion_matrix
# import metrics
# cm = confusion_matrix(true, pred, labels=list(range(consts.classCount)))
# iou = metrics.stats_iou_per_class(cm)
# print("Mean iou:", iou[0])
# print("iou per class:", iou[1])
from dataTool import ReadXYZ, ReadLabels
from sklearn.metrics import confusion_matrix
from metrics import stats_accuracy_per_class, stats_iou_per_class
src_pts = ReadXYZ(r"G:\PointCloud DataSets\semantic3d\rawTrain\bildstein_station3_xyz_intensity_rgb.hdf5")
src_lbl = ReadLabels(r"G:\PointCloud DataSets\semantic3d\rawTrain\bildstein_station3_xyz_intensity_rgb.hdf5")
src_lbl = np.squeeze(src_lbl, 1)
delIndices = np.where(src_lbl == 0)
src_pts = np.delete(src_pts, delIndices, axis=0)
src_lbl = np.delete(src_lbl, delIndices, axis=0)
voxel_pts = ReadXYZ(r"G:\PointCloud DataSets\semantic3d\processedTrain(0.15m)\bildstein_station3_xyz_intensity_rgb_voxels.npy")
voxel_lbl = ReadLabels(r"G:\PointCloud DataSets\semantic3d\processedTrain(0.15m)\bildstein_station3_xyz_intensity_rgb_voxels.npy")
voxel_lbl = np.squeeze(voxel_lbl, 1)
upscaled_lbl = UpscaleToOriginal(src_pts, voxel_pts, voxel_lbl)
cm = confusion_matrix(src_lbl, upscaled_lbl)
avg_acc, avg_class = stats_accuracy_per_class(cm)
avg_iou, avg_iou_class = stats_iou_per_class(cm)
def RenameSemantic3DFiles(folder):
if(len(Paths.GetFiles(folder, findExtesions = ".labels")) == 0):
print("No files found.")
return
for file in Paths.GetFiles(folder, findExtesions = ".labels"):
if(Paths.FileName(file).endswith("(1)")):
os.remove(file)
else:
name = Paths.FileName(file)
newFileName = file.replace(name, Semantic3D.fileNames[name])
os.rename(file, newFileName)
if(os.path.getsize(newFileName) == 0):
print(f"{newFileName} if 0 bytes size")
if(len(Paths.GetFiles(folder, findExtesions = ".labels")) != 15):
print("Wrong number of files.")
else:
print("Done renaming: ", folder)
if __name__ == "__main__":
from NearestNeighbors import NearestNeighborsLayer, SampleNearestNeighborsLayer
from KDTree import KDTreeLayer, KDTreeSampleLayer
modelPath = None
# consts = NPM3D()
# consts = Semantic3D()
consts = Curbs()
consts.noFeature = True
# consts.Fusion = True
# consts.Scale = True
consts.Rotate = True
# consts.Mirror = True
# consts.Jitter = True
# consts.FtrAugment = True
testFiles = consts.TestFiles()
trainFiles = consts.TrainFiles()
modelPath = "Sem3D(vox)(fusion)(FullAugment)_3_train(86.2)_val(79.5).h5"
# modelPath = "Curbs(7&1)(noFeature)(Rotate)_21bdbe6aa82d4e259526ab46577e795a_25_train(75.1)_val(60.7).h5"
# modelPath = ["Sem3D(vox)(RGB)(FullAugment)_55_train(85.7)_val(79.9)", "Sem3D(NOCOL)_50_train(87.4)_val(69.1)"]
# modelPath = ["NPM3D(80&5)(RGB)(NoScale)_28_train(88.3)_val(73.2).h5", "NPM3D(80&5)(NOCOL)(FullAugment)_28_train(87.3)_val(71.5).h5"]
# modelPath = LatestModel("Sem3D(14&1)(noFeature)(Scale)(Rotate)(Mirror)(Jitter)")
# modelPath = LatestModel(consts.Name())
if(isinstance(modelPath,list)):
consts.Fusion = True
if(not consts.Fusion and not Const.IsWindowsMachine()):
tf.config.optimizer.set_jit(True) #Gives more than 10% boost!!!
print("XLA enabled.")
# modelPath = ["Sem3D(14&1)(noFeature)(Scale)(Rotate)(Mirror)(Jitter)_9bbee708a7814063af9d85070452abd8_59_train(85.2)_val(72.8)",
# "Sem3D(14&1)(noFeature)(Rotate)(Mirror)(Jitter)_ff2eb229084247d9a1c63caa519e9890_58_train(84.9)_val(75.5)",
# "Sem3D(14&1)(noFeature)_dffc17f77e924894bbdbdad818ab6994_40_train(85.1)_val(68.8)"]
# EvaluateModels([modelPath], testFiles, consts)
TrainModel(trainFiles, testFiles, consts, modelPath = modelPath)# , epochs = 8) #continue train
# TrainModel(trainFiles, testFiles, consts) #new model
# modelPath = HighestValMIOUModel("NPM3D(80&5)(fusion)(FullAugment)")
#NPM3D
# GenerateData(modelPath, Paths.GetFiles(consts.Paths.rawTest), consts, consts.Paths.generatedTest)
#Semantic3D
# GenerateLargeData(modelPath, Paths.Semantic3D.processedTest, Paths.Semantic3D.rawTest, consts, consts.Paths.generatedTest, Upscale=False)
# UpscaleFilesAsync(modelPath, Paths.Semantic3D.processedTest, Paths.Semantic3D.rawTest, Paths.Semantic3D.generatedTest)
# RenameSemantic3DFiles(Paths.Semantic3D.generatedTest + Paths.FileName(modelPath))
#Curbs
EvaluateModels([modelPath], testFiles, consts)
# GenerateData(modelPath, testFiles, consts, consts.Paths.pointCloudPath+"/generated/")
GenerateLargeData(modelPath, testFiles, consts, consts.Paths.pointCloudPath+"/generated/") |
#!python3
import sys
# Used to generate output that my gnuplot will accept
lines = iter(sys.stdin)
next(lines)
lines = map(lambda s: s.split(), lines)
#0 Graph_name vertices edges g_phi h_phi timed_out
#6 spent_time allowed_time read_as_multi CASE best_cut_conductance
#11 best_cut_expansion edges_crossing size1 size2 diff_total
#16 diff_div_nodes vol1 vol2 best_round last_round
#21 walsh_cond walsh_imb colR colG colB
print("#vertices conductance our_cut_conductance name colors imbalance")
#4824 0.00271739 0.00140746 "uk" 150 150 0 0.568823
for line in lines:
line[0] = f'"{line[0].replace('_', ' ')}"'
print(f"{line[1]}\t{line[21]}\t{line[10]}\t{line[0]}\t{line[23]}\t{line[24]}\t{line[25]}\t{line[16]}\t")
| #!python3
import sys
# Used to generate output that my gnuplot will accept
lines = iter(sys.stdin)
next(lines)
lines = map(lambda s: s.split(), lines)
#0 Graph_name vertices edges g_phi h_phi timed_out
#6 spent_time allowed_time read_as_multi CASE best_cut_conductance
#11 best_cut_expansion edges_crossing size1 size2 diff_total
#16 diff_div_nodes vol1 vol2 best_round last_round
#21 walsh_cond walsh_imb colR colG colB
print("#vertices conductance our_cut_conductance name colors imbalance")
#4824 0.00271739 0.00140746 "uk" 150 150 0 0.568823
for line in lines:
line[0] = f'"{line[0].replace("_", " ")}"'
print(f"{line[1]}\t{line[21]}\t{line[10]}\t{line[0]}\t{line[23]}\t{line[24]}\t{line[25]}\t{line[16]}\t")
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from typing import Callable, List
import click
import github
from github.GithubException import GithubException
from googleapiclient import discovery
from googleapiclient.errors import HttpError
# cloud run tags much be lowercase
TAG_PREFIX = "pr-"
def make_tag(pr: str) -> str:
return f"{TAG_PREFIX}{pr}"
def get_pr(tag: str) -> int:
return int(tag.replace(TAG_PREFIX, ""))
_default_options = [
click.option(
"--dry-run",
help="Dry-run mode. No tag changes made",
default=False,
is_flag=True,
),
]
_cloudrun_options = [
click.option("--project-id", required=True, help="Google Cloud Project ID"),
click.option(
"--region", required=True, help="Google Cloud Region", default="us-central1"
),
click.option("--service", required=True, help="Google Cloud Run service name"),
]
_github_options = [
click.option(
"--repo-name", required=True, help="GitHub repo name (user/repo, or org/repo)"
)
]
def add_options(options: List[dict]) -> Callable:
def _add_options(func: Callable) -> Callable:
for option in reversed(options):
func = option(func)
return func
return _add_options
def error(msg: str, context: str = None) -> None:
click.secho(f"Error {context}: ", fg="red", bold=True, nl=False)
click.echo(msg)
sys.exit(1)
def get_service(project_id: str, region: str, service_name: str) -> dict:
"""Get the Cloud Run service object"""
api = discovery.build("run", "v1")
fqname = f"projects/{project_id}/locations/{region}/services/{service_name}"
try:
service = api.projects().locations().services().get(name=fqname).execute()
except HttpError as e:
error(re.search('"(.*)"', str(e)).group(0), context="finding service")
return service
def update_service(project_id: str, region: str, service_name: str, body: dict) -> dict:
"""Update the Cloud Run service."""
api = discovery.build("run", "v1")
fqname = f"projects/{project_id}/locations/{region}/services/{service_name}"
try:
result = (
api.projects()
.locations()
.services()
.replaceService(name=fqname, body=body)
.execute()
)
except HttpError as e:
error(re.search('"(.*)"', str(e)).group(0), context="updating service")
return result
def get_revision_url(service_obj: dict, tag: str) -> str:
"""Get the revision URL for the tag specified on the service"""
for revision in service_obj["status"]["traffic"]:
if revision.get("tag", None) == tag:
return revision["url"]
error(
f"Tag on service {service_obj["metadata"]["name"]} does not exist.",
context=f"finding revision tagged {tag}",
)
def get_revision_tags(service: dict) -> List[str]:
"""Get all tags associated to a service"""
revs = []
for revision in service["status"]["traffic"]:
if revision.get("tag", None):
revs.append(revision)
return revs
@click.group()
def cli() -> None:
"""Tool for setting GitHub Status Checks to Cloud Run Revision URLs"""
pass
@cli.command()
@add_options(_default_options)
@add_options(_cloudrun_options)
@add_options(_github_options)
def cleanup(
dry_run: str, project_id: str, region: str, service: str, repo_name: str
) -> None:
"""Cleanup any revision URLs against closed pull requests"""
service_obj = get_service(project_id, region, service)
revs = get_revision_tags(service_obj)
if not revs:
click.echo("No revision tags found, nothing to clean up")
sys.exit(0)
ghtoken = os.environ.get("GITHUB_TOKEN", None)
if not ghtoken:
raise ValueError("GITHUB_TOKEN not defined.")
try:
repo = github.Github(ghtoken).get_repo(repo_name)
except GithubException as e:
error(e.data["message"], context=f"finding repo {repo_name}")
tags_to_delete = []
for rev in revs:
tag = rev["tag"]
pr = get_pr(tag)
pull_request = repo.get_pull(pr)
if pull_request.state == "closed":
if dry_run:
click.secho("Dry-run: ", fg="blue", bold=True, nl=False)
click.echo(
f"PR {pr} is closed, so would remove tag {tag} on service {service}"
)
else:
tags_to_delete.append(tag)
if tags_to_delete:
# Edit the service by removing the tags from the traffic spec, then replace the service
# with this new configuration.
for tag in tags_to_delete:
for traffic in service_obj["spec"]["traffic"]:
if "tag" in traffic.keys() and tag == traffic["tag"]:
service_obj["spec"]["traffic"].remove(traffic)
click.echo(f"Updating the service to remove tags: {",".join(tags_to_delete)}.")
update_service(project_id, region, service, service_obj)
else:
click.echo("Did not identify any tags to delete.")
@cli.command()
@add_options(_default_options)
@add_options(_cloudrun_options)
@add_options(_github_options)
@click.option("--pull-request", required=True, help="GitHub Pull Request ID", type=int)
@click.option("--commit-sha", required=True, help="GitHub commit (SHORT_SHA)")
# [START cloudrun_deployment_preview_setstatus]
def set(
dry_run: str,
project_id: str,
region: str,
service: str,
repo_name: str,
commit_sha: str,
pull_request: str,
) -> None:
"""Set a status on a GitHub commit to a specific revision URL"""
service_obj = get_service(project_id, region, service)
revision_url = get_revision_url(service_obj, tag=make_tag(pull_request))
ghtoken = os.environ.get("GITHUB_TOKEN", None)
if not ghtoken:
raise ValueError("GITHUB_TOKEN not defined.")
try:
repo = github.Github(ghtoken).get_repo(repo_name)
except GithubException as e:
error(
e.data["message"],
context=f"finding repo {repo_name}. Is it a private repo, and does your token have the correct permissions?",
)
try:
commit = repo.get_commit(sha=commit_sha)
except GithubException as e:
error(e.data["message"], context=f"finding commit {commit_sha}")
# [START_EXCLUDE]
if dry_run:
click.secho("Dry-run: ", fg="blue", bold=True, nl=False)
click.echo(
(
f"Status would have been created on {repo_name}, "
f"commit {commit.sha[:7]}, linking to {revision_url} "
f"on service {service_obj["metadata"]["name"]}"
)
)
return
# [END_EXCLUDE]
commit.create_status(
state="success",
target_url=revision_url,
context="Deployment Preview",
description="Your preview is now available.",
)
click.secho("Success: ", fg="green", bold=True, nl=False)
click.echo(
f"Status created on {repo_name}, commit {commit.sha[:7]}, "
f"linking to {revision_url} on service {service_obj["metadata"]["name"]}"
)
# [END cloudrun_deployment_preview_setstatus]
if __name__ == "__main__":
cli()
| #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from typing import Callable, List
import click
import github
from github.GithubException import GithubException
from googleapiclient import discovery
from googleapiclient.errors import HttpError
# cloud run tags much be lowercase
TAG_PREFIX = "pr-"
def make_tag(pr: str) -> str:
return f"{TAG_PREFIX}{pr}"
def get_pr(tag: str) -> int:
return int(tag.replace(TAG_PREFIX, ""))
_default_options = [
click.option(
"--dry-run",
help="Dry-run mode. No tag changes made",
default=False,
is_flag=True,
),
]
_cloudrun_options = [
click.option("--project-id", required=True, help="Google Cloud Project ID"),
click.option(
"--region", required=True, help="Google Cloud Region", default="us-central1"
),
click.option("--service", required=True, help="Google Cloud Run service name"),
]
_github_options = [
click.option(
"--repo-name", required=True, help="GitHub repo name (user/repo, or org/repo)"
)
]
def add_options(options: List[dict]) -> Callable:
def _add_options(func: Callable) -> Callable:
for option in reversed(options):
func = option(func)
return func
return _add_options
def error(msg: str, context: str = None) -> None:
click.secho(f"Error {context}: ", fg="red", bold=True, nl=False)
click.echo(msg)
sys.exit(1)
def get_service(project_id: str, region: str, service_name: str) -> dict:
"""Get the Cloud Run service object"""
api = discovery.build("run", "v1")
fqname = f"projects/{project_id}/locations/{region}/services/{service_name}"
try:
service = api.projects().locations().services().get(name=fqname).execute()
except HttpError as e:
error(re.search('"(.*)"', str(e)).group(0), context="finding service")
return service
def update_service(project_id: str, region: str, service_name: str, body: dict) -> dict:
"""Update the Cloud Run service."""
api = discovery.build("run", "v1")
fqname = f"projects/{project_id}/locations/{region}/services/{service_name}"
try:
result = (
api.projects()
.locations()
.services()
.replaceService(name=fqname, body=body)
.execute()
)
except HttpError as e:
error(re.search('"(.*)"', str(e)).group(0), context="updating service")
return result
def get_revision_url(service_obj: dict, tag: str) -> str:
"""Get the revision URL for the tag specified on the service"""
for revision in service_obj["status"]["traffic"]:
if revision.get("tag", None) == tag:
return revision["url"]
error(
f"Tag on service {service_obj['metadata']['name']} does not exist.",
context=f"finding revision tagged {tag}",
)
def get_revision_tags(service: dict) -> List[str]:
"""Get all tags associated to a service"""
revs = []
for revision in service["status"]["traffic"]:
if revision.get("tag", None):
revs.append(revision)
return revs
@click.group()
def cli() -> None:
"""Tool for setting GitHub Status Checks to Cloud Run Revision URLs"""
pass
@cli.command()
@add_options(_default_options)
@add_options(_cloudrun_options)
@add_options(_github_options)
def cleanup(
dry_run: str, project_id: str, region: str, service: str, repo_name: str
) -> None:
"""Cleanup any revision URLs against closed pull requests"""
service_obj = get_service(project_id, region, service)
revs = get_revision_tags(service_obj)
if not revs:
click.echo("No revision tags found, nothing to clean up")
sys.exit(0)
ghtoken = os.environ.get("GITHUB_TOKEN", None)
if not ghtoken:
raise ValueError("GITHUB_TOKEN not defined.")
try:
repo = github.Github(ghtoken).get_repo(repo_name)
except GithubException as e:
error(e.data["message"], context=f"finding repo {repo_name}")
tags_to_delete = []
for rev in revs:
tag = rev["tag"]
pr = get_pr(tag)
pull_request = repo.get_pull(pr)
if pull_request.state == "closed":
if dry_run:
click.secho("Dry-run: ", fg="blue", bold=True, nl=False)
click.echo(
f"PR {pr} is closed, so would remove tag {tag} on service {service}"
)
else:
tags_to_delete.append(tag)
if tags_to_delete:
# Edit the service by removing the tags from the traffic spec, then replace the service
# with this new configuration.
for tag in tags_to_delete:
for traffic in service_obj["spec"]["traffic"]:
if "tag" in traffic.keys() and tag == traffic["tag"]:
service_obj["spec"]["traffic"].remove(traffic)
click.echo(f"Updating the service to remove tags: {','.join(tags_to_delete)}.")
update_service(project_id, region, service, service_obj)
else:
click.echo("Did not identify any tags to delete.")
@cli.command()
@add_options(_default_options)
@add_options(_cloudrun_options)
@add_options(_github_options)
@click.option("--pull-request", required=True, help="GitHub Pull Request ID", type=int)
@click.option("--commit-sha", required=True, help="GitHub commit (SHORT_SHA)")
# [START cloudrun_deployment_preview_setstatus]
def set(
dry_run: str,
project_id: str,
region: str,
service: str,
repo_name: str,
commit_sha: str,
pull_request: str,
) -> None:
"""Set a status on a GitHub commit to a specific revision URL"""
service_obj = get_service(project_id, region, service)
revision_url = get_revision_url(service_obj, tag=make_tag(pull_request))
ghtoken = os.environ.get("GITHUB_TOKEN", None)
if not ghtoken:
raise ValueError("GITHUB_TOKEN not defined.")
try:
repo = github.Github(ghtoken).get_repo(repo_name)
except GithubException as e:
error(
e.data["message"],
context=f"finding repo {repo_name}. Is it a private repo, and does your token have the correct permissions?",
)
try:
commit = repo.get_commit(sha=commit_sha)
except GithubException as e:
error(e.data["message"], context=f"finding commit {commit_sha}")
# [START_EXCLUDE]
if dry_run:
click.secho("Dry-run: ", fg="blue", bold=True, nl=False)
click.echo(
(
f"Status would have been created on {repo_name}, "
f"commit {commit.sha[:7]}, linking to {revision_url} "
f"on service {service_obj['metadata']['name']}"
)
)
return
# [END_EXCLUDE]
commit.create_status(
state="success",
target_url=revision_url,
context="Deployment Preview",
description="Your preview is now available.",
)
click.secho("Success: ", fg="green", bold=True, nl=False)
click.echo(
f"Status created on {repo_name}, commit {commit.sha[:7]}, "
f"linking to {revision_url} on service {service_obj['metadata']['name']}"
)
# [END cloudrun_deployment_preview_setstatus]
if __name__ == "__main__":
cli()
|
"""
Functions to help with calculating batch properties for experiments objects.
"""
from __future__ import annotations
import logging
from dials.array_family import flex
logger = logging.getLogger("dials")
class batch_manager:
def __init__(self, batches, batch_params):
# batch params is a list of dicts with "id" and "range" - used to be
# a 'scope extract' object
self.batch_params = sorted(batch_params, key=lambda b: b["range"][0])
self.batches = batches
self.reduced_batches, self._batch_increments = self._reduce()
def _reduce(self):
reduced_batches = flex.int(self.batches)
batch_increments = []
incr = 0
for batch in self.batch_params:
sel = (reduced_batches >= batch["range"][0]) & (
reduced_batches <= batch["range"][1]
)
reduced_batches.set_selected(
sel, reduced_batches.select(sel) - (batch["range"][0] - incr) + 1
)
batch_increments.append(incr)
incr += batch["range"][1] - batch["range"][0] + 1
assert len(set(reduced_batches)) == len(reduced_batches)
return list(reduced_batches), batch_increments
def batch_plot_shapes_and_annotations(self):
light_grey = "#d3d3d3"
grey = "#808080"
shapes = []
annotations = []
batches = flex.int(self.batches)
text = flex.std_string(batches.size())
for i, batch in enumerate(self.batch_params):
fillcolor = [light_grey, grey][i % 2] # alternate colours
shapes.append(
{
"type": "rect",
# x-reference is assigned to the x-values
"xref": "x",
# y-reference is assigned to the plot paper [0,1]
"yref": "paper",
"x0": self._batch_increments[i],
"y0": 0,
"x1": self._batch_increments[i]
+ (batch["range"][1] - batch["range"][0]),
"y1": 1,
"fillcolor": fillcolor,
"opacity": 0.2,
"line": {"width": 0},
}
)
annotations.append(
{
# x-reference is assigned to the x-values
"xref": "x",
# y-reference is assigned to the plot paper [0,1]
"yref": "paper",
"x": self._batch_increments[i]
+ (batch["range"][1] - batch["range"][0]) / 2,
"y": 1,
"text": f"{batch["id"]}",
"showarrow": False,
"yshift": 20,
# 'arrowhead': 7,
# 'ax': 0,
# 'ay': -40
}
)
sel = (batches >= batch["range"][0]) & (batches <= batch["range"][1])
text.set_selected(
sel,
flex.std_string(
[
f"{batch["id"]}: {j - batch["range"][0] + 1}"
for j in batches.select(sel)
]
),
)
return shapes, annotations, list(text)
def assign_batches_to_reflections(reflections, batch_offsets):
"""Assign a 'batch' column to the reflection table"""
for batch_offset, refl in zip(batch_offsets, reflections):
xdet, ydet, zdet = [flex.double(x) for x in refl["xyzobs.px.value"].parts()]
# compute BATCH values - floor() to get (fortran) image captured within
# +1 because FORTRAN counting; zdet+1=image_index
# +off because image_index+o=batch
refl["batch"] = (flex.floor(zdet).iround() + 1) + batch_offset
return reflections
def get_batch_ranges(experiments, batch_offsets):
"""Get batch ranges for a list of experiments and offsets"""
batch_ranges = []
assert len(experiments) == len(batch_offsets)
image_ranges = get_image_ranges(experiments)
for batch_offset, image_range in zip(batch_offsets, image_ranges):
batch_ranges.append(
(image_range[0] + batch_offset, image_range[1] + batch_offset)
)
return batch_ranges
def get_image_ranges(experiments):
"""Get image ranges for a list of experiments (including scanless exp.)"""
# Note, if set to 1,1,for scanless experiments then first batch offset in
# _calculate_batch_offsets is zero below, bad!
return [e.scan.get_image_range() if e.scan else (0, 0) for e in experiments]
def calculate_batch_offsets(experiment_list):
"""Take a list of experiments and resolve and return the batch offsets.
First adds an image_range property as not all experiments have scans."""
image_ranges = get_image_ranges(experiment_list)
offsets = _calculate_batch_offsets(image_ranges)
return offsets
def set_batch_offsets(experiment_list, batch_offsets):
"""Set batch offsets in scan objects. Don't need to set anything for
scanless experiments, as these are not used with the batch system."""
for exp, offset in zip(experiment_list, batch_offsets):
if exp.scan:
exp.scan.set_batch_offset(offset)
def _calculate_batch_offsets(image_ranges):
"""Take a list of (modified) experiments and resolve and return the batch
offsets.
This is the number added to the image number to give the
batch number, such that:
- Each experiment has a unique, nonoverlapping, nonconsecutive range
- None are zero
- Image number ranges are kept if at all possible
"""
experiments_to_shift = []
existing_ranges = set()
maximum_batch_number = 0
batch_offsets = [0] * len(image_ranges)
# Handle zeroth shifts and kept ranges
for i, image_range in enumerate(image_ranges):
ilow, ihigh = image_range
# Check assumptions
assert ilow <= ihigh, "Inverted image order!?"
assert ilow >= 0, "Negative image indices are not expected"
# Don't emit zero: Causes problems with C/fortran number conversion
if ilow == 0:
ilow, ihigh = ilow + 1, ihigh + 1
# If we overlap with anything, then process later
if any(ilow < high + 1 and ihigh >= low - 1 for low, high in existing_ranges):
experiments_to_shift.append((i, image_range))
else:
batch_offsets[i] = ilow - image_range[0]
existing_ranges.add((ilow, ihigh))
maximum_batch_number = max(maximum_batch_number, ihigh)
# Now handle all the experiments that overlapped by pushing them higher
for i, image_range in experiments_to_shift:
start_number = _next_epoch(maximum_batch_number)
range_width = image_range[1] - image_range[0] + 1
end_number = start_number + range_width - 1
batch_offsets[i] = start_number - image_range[0]
maximum_batch_number = end_number
return batch_offsets
def _next_epoch(val):
"""Find the next number above the existing value that ends in 1, that is
not consecutive with the current value."""
if val % 100 == 99:
return val + 2
elif val % 100 == 0:
return val + 101
else:
rem = val % 100
return val - rem + 101
| """
Functions to help with calculating batch properties for experiments objects.
"""
from __future__ import annotations
import logging
from dials.array_family import flex
logger = logging.getLogger("dials")
class batch_manager:
def __init__(self, batches, batch_params):
# batch params is a list of dicts with "id" and "range" - used to be
# a 'scope extract' object
self.batch_params = sorted(batch_params, key=lambda b: b["range"][0])
self.batches = batches
self.reduced_batches, self._batch_increments = self._reduce()
def _reduce(self):
reduced_batches = flex.int(self.batches)
batch_increments = []
incr = 0
for batch in self.batch_params:
sel = (reduced_batches >= batch["range"][0]) & (
reduced_batches <= batch["range"][1]
)
reduced_batches.set_selected(
sel, reduced_batches.select(sel) - (batch["range"][0] - incr) + 1
)
batch_increments.append(incr)
incr += batch["range"][1] - batch["range"][0] + 1
assert len(set(reduced_batches)) == len(reduced_batches)
return list(reduced_batches), batch_increments
def batch_plot_shapes_and_annotations(self):
light_grey = "#d3d3d3"
grey = "#808080"
shapes = []
annotations = []
batches = flex.int(self.batches)
text = flex.std_string(batches.size())
for i, batch in enumerate(self.batch_params):
fillcolor = [light_grey, grey][i % 2] # alternate colours
shapes.append(
{
"type": "rect",
# x-reference is assigned to the x-values
"xref": "x",
# y-reference is assigned to the plot paper [0,1]
"yref": "paper",
"x0": self._batch_increments[i],
"y0": 0,
"x1": self._batch_increments[i]
+ (batch["range"][1] - batch["range"][0]),
"y1": 1,
"fillcolor": fillcolor,
"opacity": 0.2,
"line": {"width": 0},
}
)
annotations.append(
{
# x-reference is assigned to the x-values
"xref": "x",
# y-reference is assigned to the plot paper [0,1]
"yref": "paper",
"x": self._batch_increments[i]
+ (batch["range"][1] - batch["range"][0]) / 2,
"y": 1,
"text": f"{batch['id']}",
"showarrow": False,
"yshift": 20,
# 'arrowhead': 7,
# 'ax': 0,
# 'ay': -40
}
)
sel = (batches >= batch["range"][0]) & (batches <= batch["range"][1])
text.set_selected(
sel,
flex.std_string(
[
f"{batch['id']}: {j - batch['range'][0] + 1}"
for j in batches.select(sel)
]
),
)
return shapes, annotations, list(text)
def assign_batches_to_reflections(reflections, batch_offsets):
"""Assign a 'batch' column to the reflection table"""
for batch_offset, refl in zip(batch_offsets, reflections):
xdet, ydet, zdet = [flex.double(x) for x in refl["xyzobs.px.value"].parts()]
# compute BATCH values - floor() to get (fortran) image captured within
# +1 because FORTRAN counting; zdet+1=image_index
# +off because image_index+o=batch
refl["batch"] = (flex.floor(zdet).iround() + 1) + batch_offset
return reflections
def get_batch_ranges(experiments, batch_offsets):
"""Get batch ranges for a list of experiments and offsets"""
batch_ranges = []
assert len(experiments) == len(batch_offsets)
image_ranges = get_image_ranges(experiments)
for batch_offset, image_range in zip(batch_offsets, image_ranges):
batch_ranges.append(
(image_range[0] + batch_offset, image_range[1] + batch_offset)
)
return batch_ranges
def get_image_ranges(experiments):
"""Get image ranges for a list of experiments (including scanless exp.)"""
# Note, if set to 1,1,for scanless experiments then first batch offset in
# _calculate_batch_offsets is zero below, bad!
return [e.scan.get_image_range() if e.scan else (0, 0) for e in experiments]
def calculate_batch_offsets(experiment_list):
"""Take a list of experiments and resolve and return the batch offsets.
First adds an image_range property as not all experiments have scans."""
image_ranges = get_image_ranges(experiment_list)
offsets = _calculate_batch_offsets(image_ranges)
return offsets
def set_batch_offsets(experiment_list, batch_offsets):
"""Set batch offsets in scan objects. Don't need to set anything for
scanless experiments, as these are not used with the batch system."""
for exp, offset in zip(experiment_list, batch_offsets):
if exp.scan:
exp.scan.set_batch_offset(offset)
def _calculate_batch_offsets(image_ranges):
"""Take a list of (modified) experiments and resolve and return the batch
offsets.
This is the number added to the image number to give the
batch number, such that:
- Each experiment has a unique, nonoverlapping, nonconsecutive range
- None are zero
- Image number ranges are kept if at all possible
"""
experiments_to_shift = []
existing_ranges = set()
maximum_batch_number = 0
batch_offsets = [0] * len(image_ranges)
# Handle zeroth shifts and kept ranges
for i, image_range in enumerate(image_ranges):
ilow, ihigh = image_range
# Check assumptions
assert ilow <= ihigh, "Inverted image order!?"
assert ilow >= 0, "Negative image indices are not expected"
# Don't emit zero: Causes problems with C/fortran number conversion
if ilow == 0:
ilow, ihigh = ilow + 1, ihigh + 1
# If we overlap with anything, then process later
if any(ilow < high + 1 and ihigh >= low - 1 for low, high in existing_ranges):
experiments_to_shift.append((i, image_range))
else:
batch_offsets[i] = ilow - image_range[0]
existing_ranges.add((ilow, ihigh))
maximum_batch_number = max(maximum_batch_number, ihigh)
# Now handle all the experiments that overlapped by pushing them higher
for i, image_range in experiments_to_shift:
start_number = _next_epoch(maximum_batch_number)
range_width = image_range[1] - image_range[0] + 1
end_number = start_number + range_width - 1
batch_offsets[i] = start_number - image_range[0]
maximum_batch_number = end_number
return batch_offsets
def _next_epoch(val):
"""Find the next number above the existing value that ends in 1, that is
not consecutive with the current value."""
if val % 100 == 99:
return val + 2
elif val % 100 == 0:
return val + 101
else:
rem = val % 100
return val - rem + 101
|
import time
import e2e.clickhouse as clickhouse
import e2e.kubectl as kubectl
import e2e.yaml_manifest as yaml_manifest
import e2e.settings as settings
import e2e.util as util
from testflows.core import *
from testflows.asserts import error
@TestScenario
@Name("test_ch_001. Insert quorum")
def test_ch_001(self):
util.require_keeper(keeper_type=self.context.keeper_type)
quorum_template = "manifests/chit/tpl-clickhouse-21.8.yaml"
chit_data = yaml_manifest.get_manifest_data(util.get_full_path(quorum_template))
kubectl.launch(f"delete chit {chit_data["metadata"]["name"]}", ns=settings.test_namespace, ok_to_fail=True)
kubectl.create_and_check(
"manifests/chi/test-ch-001-insert-quorum.yaml",
{
"apply_templates": {quorum_template},
"pod_count": 2,
"do_not_delete": 1,
})
chi = yaml_manifest.get_chi_name(util.get_full_path("manifests/chi/test-ch-001-insert-quorum.yaml"))
chi_data = kubectl.get("chi", ns=settings.test_namespace, name=chi)
util.wait_clickhouse_cluster_ready(chi_data)
host0 = "chi-test-ch-001-insert-quorum-default-0-0"
host1 = "chi-test-ch-001-insert-quorum-default-0-1"
create_table = """
create table t1 on cluster default (a Int8, d Date default today())
Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
partition by d order by a
TTL d + interval 5 second
SETTINGS merge_with_ttl_timeout=5""".replace('\r', '').replace('\n', '')
create_mv_table2 = """
create table t2 on cluster default (a Int8)
Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
partition by tuple() order by a""".replace('\r', '').replace('\n', '')
create_mv_table3 = """
create table t3 on cluster default (a Int8)
Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
partition by tuple() order by a""".replace('\r', '').replace('\n', '')
create_mv2 = "create materialized view t_mv2 on cluster default to t2 as select a from t1"
create_mv3 = "create materialized view t_mv3 on cluster default to t3 as select a from t1"
with Given("Tables t1, t2, t3 and MVs t1->t2, t1-t3 are created"):
clickhouse.query(chi, create_table)
clickhouse.query(chi, create_mv_table2)
clickhouse.query(chi, create_mv_table3)
clickhouse.query(chi, create_mv2)
clickhouse.query(chi, create_mv3)
with When("Add a row to an old partition"):
clickhouse.query(chi, "insert into t1(a,d) values(6, today()-1)", host=host0)
with When("Stop fetches for t1 at replica1"):
clickhouse.query(chi, "system stop fetches default.t1", host=host1)
with Then("Wait 10 seconds and the data should be dropped by TTL"):
time.sleep(10)
out = clickhouse.query(chi, "select count() from t1 where a=6", host=host0)
assert out == "0", error()
with When("Resume fetches for t1 at replica1"):
clickhouse.query(chi, "system start fetches default.t1", host=host1)
time.sleep(5)
with Then("Inserts should resume"):
clickhouse.query(chi, "insert into t1(a) values(7)", host=host0)
clickhouse.query(chi, "insert into t1(a) values(1)")
with When("Stop fetches for t2 at replica1"):
clickhouse.query(chi, "system stop fetches default.t2", host=host1)
with Then("Insert should fail since it can not reach the quorum"):
out = clickhouse.query_with_error(chi, "insert into t1(a) values(2)", host=host0)
assert "Timeout while waiting for quorum" in out, error()
# kubectl(f"exec {host0}-0 -n test -- cp /var/lib//clickhouse/data/default/t2/all_1_1_0/a.mrk2 /var/lib//clickhouse/data/default/t2/all_1_1_0/a.bin")
# with Then("Corrupt data part in t2"):
# kubectl(f"exec {host0}-0 -n test -- sed -i \"s/b/c/\" /var/lib/clickhouse/data/default/t2/all_1_1_0/a.bin")
with When("Resume fetches for t2 at replica1"):
clickhouse.query(chi, "system start fetches default.t2", host=host1)
i = 0
while "2" != clickhouse.query(chi, "select active_replicas from system.replicas where database='default' and table='t1'", pod=host0) and i < 10:
with Then("Not ready, wait 5 seconds"):
time.sleep(5)
i += 1
with Then("Inserts should fail with an error regarding not satisfied quorum"):
out = clickhouse.query_with_error(chi, "insert into t1(a) values(3)", host=host0)
assert "Quorum for previous write has not been satisfied yet" in out, error()
with And("Second insert of the same block should pass"):
clickhouse.query(chi, "insert into t1(a) values(3)", host=host0)
with And("Insert of the new block should fail"):
out = clickhouse.query_with_error(chi, "insert into t1(a) values(4)", host=host0)
assert "Quorum for previous write has not been satisfied yet" in out, error()
with And("Second insert of the same block with 'deduplicate_blocks_in_dependent_materialized_views' setting should fail"):
out = clickhouse.query_with_error(
chi,
"set deduplicate_blocks_in_dependent_materialized_views=1; insert into t1(a) values(5)",
host=host0
)
assert "Quorum for previous write has not been satisfied yet" in out, error()
out = clickhouse.query_with_error(
chi, "select t1.a t1_a, t2.a t2_a from t1 left outer join t2 using (a) order by t1_a settings join_use_nulls=1"
)
note(out)
# cat /var/log/clickhouse-server/clickhouse-server.log | grep t2 | grep -E "all_1_1_0|START|STOP"
@TestScenario
@Name("test_ch_002. Row-level security")
def test_ch_002(self):
kubectl.create_and_check(
"manifests/chi/test-ch-002-row-level.yaml",
{
"apply_templates": {"manifests/chit/tpl-clickhouse-21.8.yaml"},
"do_not_delete": 1,
})
chi = "test-ch-002-row-level"
create_table = """create table test (d Date default today(), team LowCardinality(String), user String) Engine = MergeTree() PARTITION BY d ORDER BY d;"""
with When("Create test table"):
clickhouse.query(chi, create_table)
with And("Insert some data"):
clickhouse.query(
chi, "INSERT INTO test(team, user) values('team1', 'user1'),('team2', 'user2'),('team3', 'user3'),('team4', 'user4')"
)
with Then("Make another query for different users. It should be restricted to corresponding team by row-level security"):
for user in ['user1', 'user2', 'user3', 'user4']:
out = clickhouse.query(chi, "select user from test", user=user, pwd=user)
assert out == user, error()
with Then("Make a count() query for different users. It should be restricted to corresponding team by row-level security"):
for user in ['user1', 'user2', 'user3', 'user4']:
out = clickhouse.query(chi, "select count() from test", user=user, pwd=user)
assert out == "1", error()
kubectl.delete_chi(chi)
@TestFeature
@Name("e2e.test_clickhouse")
def test(self):
util.clean_namespace(delete_chi=False)
all_tests = [
test_ch_001,
test_ch_002,
]
run_test = all_tests
# placeholder for selective test running
# run_test = [test_ch_002]
for t in run_test:
Scenario(test=t)()
| import time
import e2e.clickhouse as clickhouse
import e2e.kubectl as kubectl
import e2e.yaml_manifest as yaml_manifest
import e2e.settings as settings
import e2e.util as util
from testflows.core import *
from testflows.asserts import error
@TestScenario
@Name("test_ch_001. Insert quorum")
def test_ch_001(self):
util.require_keeper(keeper_type=self.context.keeper_type)
quorum_template = "manifests/chit/tpl-clickhouse-21.8.yaml"
chit_data = yaml_manifest.get_manifest_data(util.get_full_path(quorum_template))
kubectl.launch(f"delete chit {chit_data['metadata']['name']}", ns=settings.test_namespace, ok_to_fail=True)
kubectl.create_and_check(
"manifests/chi/test-ch-001-insert-quorum.yaml",
{
"apply_templates": {quorum_template},
"pod_count": 2,
"do_not_delete": 1,
})
chi = yaml_manifest.get_chi_name(util.get_full_path("manifests/chi/test-ch-001-insert-quorum.yaml"))
chi_data = kubectl.get("chi", ns=settings.test_namespace, name=chi)
util.wait_clickhouse_cluster_ready(chi_data)
host0 = "chi-test-ch-001-insert-quorum-default-0-0"
host1 = "chi-test-ch-001-insert-quorum-default-0-1"
create_table = """
create table t1 on cluster default (a Int8, d Date default today())
Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
partition by d order by a
TTL d + interval 5 second
SETTINGS merge_with_ttl_timeout=5""".replace('\r', '').replace('\n', '')
create_mv_table2 = """
create table t2 on cluster default (a Int8)
Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
partition by tuple() order by a""".replace('\r', '').replace('\n', '')
create_mv_table3 = """
create table t3 on cluster default (a Int8)
Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
partition by tuple() order by a""".replace('\r', '').replace('\n', '')
create_mv2 = "create materialized view t_mv2 on cluster default to t2 as select a from t1"
create_mv3 = "create materialized view t_mv3 on cluster default to t3 as select a from t1"
with Given("Tables t1, t2, t3 and MVs t1->t2, t1-t3 are created"):
clickhouse.query(chi, create_table)
clickhouse.query(chi, create_mv_table2)
clickhouse.query(chi, create_mv_table3)
clickhouse.query(chi, create_mv2)
clickhouse.query(chi, create_mv3)
with When("Add a row to an old partition"):
clickhouse.query(chi, "insert into t1(a,d) values(6, today()-1)", host=host0)
with When("Stop fetches for t1 at replica1"):
clickhouse.query(chi, "system stop fetches default.t1", host=host1)
with Then("Wait 10 seconds and the data should be dropped by TTL"):
time.sleep(10)
out = clickhouse.query(chi, "select count() from t1 where a=6", host=host0)
assert out == "0", error()
with When("Resume fetches for t1 at replica1"):
clickhouse.query(chi, "system start fetches default.t1", host=host1)
time.sleep(5)
with Then("Inserts should resume"):
clickhouse.query(chi, "insert into t1(a) values(7)", host=host0)
clickhouse.query(chi, "insert into t1(a) values(1)")
with When("Stop fetches for t2 at replica1"):
clickhouse.query(chi, "system stop fetches default.t2", host=host1)
with Then("Insert should fail since it can not reach the quorum"):
out = clickhouse.query_with_error(chi, "insert into t1(a) values(2)", host=host0)
assert "Timeout while waiting for quorum" in out, error()
# kubectl(f"exec {host0}-0 -n test -- cp /var/lib//clickhouse/data/default/t2/all_1_1_0/a.mrk2 /var/lib//clickhouse/data/default/t2/all_1_1_0/a.bin")
# with Then("Corrupt data part in t2"):
# kubectl(f"exec {host0}-0 -n test -- sed -i \"s/b/c/\" /var/lib/clickhouse/data/default/t2/all_1_1_0/a.bin")
with When("Resume fetches for t2 at replica1"):
clickhouse.query(chi, "system start fetches default.t2", host=host1)
i = 0
while "2" != clickhouse.query(chi, "select active_replicas from system.replicas where database='default' and table='t1'", pod=host0) and i < 10:
with Then("Not ready, wait 5 seconds"):
time.sleep(5)
i += 1
with Then("Inserts should fail with an error regarding not satisfied quorum"):
out = clickhouse.query_with_error(chi, "insert into t1(a) values(3)", host=host0)
assert "Quorum for previous write has not been satisfied yet" in out, error()
with And("Second insert of the same block should pass"):
clickhouse.query(chi, "insert into t1(a) values(3)", host=host0)
with And("Insert of the new block should fail"):
out = clickhouse.query_with_error(chi, "insert into t1(a) values(4)", host=host0)
assert "Quorum for previous write has not been satisfied yet" in out, error()
with And("Second insert of the same block with 'deduplicate_blocks_in_dependent_materialized_views' setting should fail"):
out = clickhouse.query_with_error(
chi,
"set deduplicate_blocks_in_dependent_materialized_views=1; insert into t1(a) values(5)",
host=host0
)
assert "Quorum for previous write has not been satisfied yet" in out, error()
out = clickhouse.query_with_error(
chi, "select t1.a t1_a, t2.a t2_a from t1 left outer join t2 using (a) order by t1_a settings join_use_nulls=1"
)
note(out)
# cat /var/log/clickhouse-server/clickhouse-server.log | grep t2 | grep -E "all_1_1_0|START|STOP"
@TestScenario
@Name("test_ch_002. Row-level security")
def test_ch_002(self):
kubectl.create_and_check(
"manifests/chi/test-ch-002-row-level.yaml",
{
"apply_templates": {"manifests/chit/tpl-clickhouse-21.8.yaml"},
"do_not_delete": 1,
})
chi = "test-ch-002-row-level"
create_table = """create table test (d Date default today(), team LowCardinality(String), user String) Engine = MergeTree() PARTITION BY d ORDER BY d;"""
with When("Create test table"):
clickhouse.query(chi, create_table)
with And("Insert some data"):
clickhouse.query(
chi, "INSERT INTO test(team, user) values('team1', 'user1'),('team2', 'user2'),('team3', 'user3'),('team4', 'user4')"
)
with Then("Make another query for different users. It should be restricted to corresponding team by row-level security"):
for user in ['user1', 'user2', 'user3', 'user4']:
out = clickhouse.query(chi, "select user from test", user=user, pwd=user)
assert out == user, error()
with Then("Make a count() query for different users. It should be restricted to corresponding team by row-level security"):
for user in ['user1', 'user2', 'user3', 'user4']:
out = clickhouse.query(chi, "select count() from test", user=user, pwd=user)
assert out == "1", error()
kubectl.delete_chi(chi)
@TestFeature
@Name("e2e.test_clickhouse")
def test(self):
util.clean_namespace(delete_chi=False)
all_tests = [
test_ch_001,
test_ch_002,
]
run_test = all_tests
# placeholder for selective test running
# run_test = [test_ch_002]
for t in run_test:
Scenario(test=t)()
|
#! /usr/bin/env python3
import argparse
from pathlib import Path
import subprocess
import sys
import scripts.templates
from scripts.templates import P2020, MPC5777M, CORES, TOP_DIR, PSY_DIR, STUBS_DIR, SRC_DIR, CFG_DIR, Help, AGENT_CONFIG_HJSON_TEMPLATE, CORUNNER_CONFIG_HJSON_TEMPLATE, CORUNNER_KMEMORY_JSON_TEMPLATE, COMPILE_CONFIG_HJSON_TEMPLATE, PSYMODULE_CONFIG_HJSON_TEMPLATE, FLASHLIKE
from scripts.scriptutil import load_db, load_json, dump_json, write_template, psyko
from operator import itemgetter
def corunner_to_list(s):
"""
This function takes the corunner string (a comma separated list: <core>,<start address of read>) and returns a python list of the same form (with none as second element if there is only a core). This allows to set a start address for each corunner in case there is at least two.
Should not be used except as an argument parser type.
"""
pars = s.split(',')
pars[0] = int(pars[0])
assert pars[0] in CORES, \
f"The corunner id must be one of {CORES.join(", ")}"
l = len(pars)
if l > 2:
raise argparse.ArgumentTypeError("Corunners parameters must be of type <core>[,<start address of read>]")
elif l == 2 and pars[1] != '':
return pars
else:
return [pars[0], None]
def cor_cores(cors):
"""
Takes a list returned by corunner_to_list and returns a list containing only the cores in the same order (to know which cores are used).
"""
return [i[0] for i in cors]
def getopts(argv):
parser = argparse.ArgumentParser(description='Corunners builder')
parser.add_argument("--psyko", "-P", type=Path,
help=Help.PSYKO, required=True)
parser.add_argument("--kdbv", type=Path, required=True)
parser.add_argument("--rtk-dir", "-K", type=Path,
help=Help.RTK_DIR, required=True)
parser.add_argument("--product", "-p", type=str,
help=Help.PRODUCT, required=True,
choices=[P2020,MPC5777M])
parser.add_argument("--corunner", "-C", type=corunner_to_list,
action="append", help=Help.CORUNNER, default=[])
parser.add_argument("--task", "-T", type=str, choices=["H", "G"]+FLASHLIKE,
help=Help.TASK, required=True)
parser.add_argument("--core", "-c", type=int, choices=CORES,
help=Help.CORE, required=True)
parser.add_argument("--local-corunners", action='store_true',
help=Help.LOCAL_CORUNNERS)
parser.add_argument("--build-dir", type=Path, default=TOP_DIR / "build",
help=Help.BUILD_DIR)
parser.add_argument("--mem-conf", type=Path,
help=Help.MEM_CONF)
parser.add_argument("--output", "-o", type=Path,
help=Help.OUTPUT)
args = parser.parse_args(argv[1:])
assert args.core not in cor_cores(args.corunner)
if args.output is None:
args.output = args.build_dir / "program.elf"
return args
def gen_agent_config(output_filename, name, core):
write_template(output_filename, AGENT_CONFIG_HJSON_TEMPLATE, {
"agent_name": name,
"agent_core": core,
})
def gen_corunner_config(conf_filename, identifier, symbol, object_file, kmem_filename):
write_template(conf_filename, CORUNNER_CONFIG_HJSON_TEMPLATE, {
"corunner_id": identifier,
"corunner_symbol": symbol,
"corunner_object": str(object_file)
})
write_template(kmem_filename, CORUNNER_KMEMORY_JSON_TEMPLATE, {
'symbol': symbol,
})
def gen_corunner_source(output_filename, symbol, read=dict()):
cmd = [sys.executable, TOP_DIR / "scripts" / "gen-corunner.py", symbol]
if read:
cmd += ["--read"]
if 'nop' in read:
cmd += ["--nop", str(read['nop'])]
if 'start' in read:
cmd += ["--startaddr", str(read['start'])]
if 'size' in read:
cmd += ["--tablesize", str(read['size'])]
if 'stride' in read:
cmd += ["--stride", str(read['stride'])]
else:
cmd += ["--jump", "2048"]
with subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True) as proc:
with open(output_filename, "w") as fileh:
fileh.write(proc.stdout.read())
def gen_kmem_final(default, config, memreport, kdbv, tasks, corunners=list()):
config_json = load_json(config)
cmd = [sys.executable, TOP_DIR / 'scripts' / 'gen-kmem.py', '--config', config]
del_list = []
config_json['memreport'] = str(memreport)
config_json['default_kmemory'] = str(default)
for el in config_json['elements']:
if el['type'] == 'corunner':
if corunners:
el['names'] = corunners
else:
del_list.append(el)
elif el['type'] == 'task':
el['names'] = tasks
else:
del_list.append(el)
for el in del_list:
config_json['elements'].remove(el)
dump_json(config_json, config)
ret = subprocess.check_call(cmd)
def get_sources(task_name):
c_sources = [
SRC_DIR / "crc.c",
SRC_DIR / "filter.c",
SRC_DIR / "filter2.c",
]
psy_sources = [PSY_DIR / f"task_{task_name}.psy"]
if task_name not in FLASHLIKE:
c_sources += [
STUBS_DIR / f"suite_task_{task_name}.c",
]
psy_sources += [STUBS_DIR / f"for_task_{task_name}.psy"]
return {
"c": c_sources,
"asm": [],
"psy": psy_sources,
}
def main(argv):
args = getopts(argv)
used_cores = cor_cores(args.corunner) + [args.core]
args.corunner.sort(key=itemgetter(0))
def object_of(source_filename, extension = ".o"):
return args.build_dir / (source_filename.name + extension)
sources = get_sources(args.task)
ag_config = args.build_dir / "task.hjson"
app_configs = [
args.build_dir / "app.hjson",
CFG_DIR / f"task_{args.task}.hjson",
ag_config,
]
tasks = [f'task_{args.task}']
part_configs = []
compile_config = args.build_dir / "compile.hjson"
partition_config = args.build_dir / "partition.hjson"
psymodule_config = args.build_dir / "psymodule.hjson"
gen_agent_config(ag_config, f"task_{args.task}", args.core)
mem_configs = []
corunners = []
for corunner, cor_start in args.corunner:
# The read corunner is created only if a start address si provided for this corunner.
use_read = bool(cor_start)
read_args = dict()
co_config = args.build_dir / f"corunner_{corunner}.hjson"
co_kmem = args.build_dir / f"corunner_{corunner}_kmem.json"
co_file = args.build_dir / f"corunner_{corunner}"
if use_read:
read_args['start'] = cor_start
read_args['size'] = int(env.get(f"CORUNNER_READ_SIZE_{corunner}", "0x2000"), 16)
symbol = f"co_runner_read{corunner}" if read_args else f"co_runner_flash{corunner}"
co_file = co_file.with_suffix('.asm')
sources["asm"].append(co_file)
gen_corunner_source(co_file, symbol, read_args)
app_configs.append(co_config)
mem_configs.append(co_kmem)
corunners.append(symbol)
gen_corunner_config(co_config, corunner, symbol, object_of(co_file), co_kmem)
if args.task not in FLASHLIKE:
stub_config = args.build_dir / "stub.hjson"
gen_agent_config(stub_config, f"sends_to_task_{args.task}", args.core)
app_configs.append(stub_config)
tasks.append(f'sends_to_task_{args.task}')
write_template(compile_config, COMPILE_CONFIG_HJSON_TEMPLATE, {})
write_template(psymodule_config, PSYMODULE_CONFIG_HJSON_TEMPLATE, {})
#==========================================================================
# The functions below are just helpers to call the PsyC compiler psyko,
# with a convenient access to global variables such as the path to the
# compiler and the path to the RTK.
psykonf = {'product': args.product, 'rtk_dir': args.rtk_dir, 'psyko': args.psyko, 'cwd': TOP_DIR}
def psyko_cc(c_source):
generated_object = object_of(c_source)
psyko(psykonf, "cc", c_source, compile_config, "-o", generated_object)
return generated_object
def psyko_as(asm_source):
generated_object = object_of(asm_source)
psyko(psykonf, "as", asm_source, compile_config, "-o", generated_object)
return generated_object
def psyko_module(psy_source):
generated_object = object_of(psy_source, ".psyo")
psyko(psykonf, "module", psy_source, psymodule_config, "-o", generated_object)
return generated_object
def psyko_partition(name, objects, configs):
generated_object = args.build_dir / (name + ".parto")
psyko(psykonf, "partition", "-o", generated_object, '--gendir',
args.build_dir / 'gen' / 'part', *objects, *configs)
return generated_object
def psyko_app(partos, configs):
elf = args.build_dir / "program.elf"
gendir = args.build_dir / "gen" / "app"
psyko(psykonf, "app", "-a", args.build_dir / "program.app", "-b", args.output,
'--gendir', gendir, *partos, *configs)
return gendir
def psyko_memconf(t, files, configs=[], cor_kmems=[]):
"""
This function generates a valid default memconf used to perform the first compilation. It creates a default kmemory for the task and adds the configs for all the corunners.
"""
kmemconf = args.build_dir / ('kmemconf_'+t+'.json')
psyko(psykonf, 'gen-mem-conf', '-t', t, '--gendir', args.build_dir / 'gen' / 'memconf', '-o', kmemconf, *files, *configs)
if cor_kmems:
def_memconf = load_json(kmemconf)
cor_memconf = []
for kmem in cor_kmems:
cor_memconf.append(load_json(kmem))
max_reg = def_memconf['kmemory']['regions'][0]
if len(def_memconf['kmemory']['regions']) > 1:
for reg in def_memconf['kmemory']['regions'][1:]:
if reg['size'] > max_reg['size']:
max_reg = reg
if 'domains' not in max_reg:
max_reg['domains'] = []
out = cor_memconf[0]['domains'][0]['output_sections'][0]
out['physical_address'] = mar_reg['physical_address']
stacks = {obj['id']: obj
for obj in def_memconf['kmemory']['objects']
if obj['id'] in [f"core_{core}_co_runner_stack.c"
for core in used_cores]}
for core in cor_cores(args.corunner):
stack = f"core_{core}_co_runner_stack.c"
for corunner in corunners:
symbol = corunner if corunner[-1] == str(core) else ''
stacks[stack]['groups'] = [f'.stack_{symbol}']
for cor in cor_memconf:
max_reg['domains'] += cor['domains']
def_memconf['kmemory']['groups'] += cor['groups']
def_memconf['kmemory']['objects'] += cor['objects']
dump_json(def_memconf, f=kmemconf)
return kmemconf
#==========================================================================
# Compile all the C, ASM and PsyC sources.
# ASM sources are only present when co-runners are enabled.
parto_objects = []
for c_source in sources["c"]:
parto_objects.append(psyko_cc(c_source))
for asm_source in sources.get("asm", []):
parto_objects.append(psyko_as(asm_source))
for psy_source in sources["psy"]:
parto_objects.append(psyko_module(psy_source))
#==========================================================================
# Generate a single partition, and then executable to be able to get the size of the sections
parto = psyko_partition("main", parto_objects, part_configs)
mem_configs = [psyko_memconf('app', [parto], app_configs, mem_configs)]
mem_configs.append("--overwrite-memory-configuration")
gendir = psyko_app([parto], app_configs+mem_configs)
assert args.output.is_file(), "first app compilation not successfull"
# Finally generate the final memory configs and the executable
if args.mem_conf:
args.output.unlink()
gen_kmem_final(mem_configs[0], args.mem_conf,
gendir / 'applink' / 'memreport_out.ks', args.kdbv, tasks, corunners)
psyko_app([parto], app_configs+mem_configs)
assert args.output.is_file(), "final app compilation not successfull"
if __name__ == "__main__":
main(sys.argv)
| #! /usr/bin/env python3
import argparse
from pathlib import Path
import subprocess
import sys
import scripts.templates
from scripts.templates import P2020, MPC5777M, CORES, TOP_DIR, PSY_DIR, STUBS_DIR, SRC_DIR, CFG_DIR, Help, AGENT_CONFIG_HJSON_TEMPLATE, CORUNNER_CONFIG_HJSON_TEMPLATE, CORUNNER_KMEMORY_JSON_TEMPLATE, COMPILE_CONFIG_HJSON_TEMPLATE, PSYMODULE_CONFIG_HJSON_TEMPLATE, FLASHLIKE
from scripts.scriptutil import load_db, load_json, dump_json, write_template, psyko
from operator import itemgetter
def corunner_to_list(s):
"""
This function takes the corunner string (a comma separated list: <core>,<start address of read>) and returns a python list of the same form (with none as second element if there is only a core). This allows to set a start address for each corunner in case there is at least two.
Should not be used except as an argument parser type.
"""
pars = s.split(',')
pars[0] = int(pars[0])
assert pars[0] in CORES, \
f"The corunner id must be one of {CORES.join(', ')}"
l = len(pars)
if l > 2:
raise argparse.ArgumentTypeError("Corunners parameters must be of type <core>[,<start address of read>]")
elif l == 2 and pars[1] != '':
return pars
else:
return [pars[0], None]
def cor_cores(cors):
"""
Takes a list returned by corunner_to_list and returns a list containing only the cores in the same order (to know which cores are used).
"""
return [i[0] for i in cors]
def getopts(argv):
parser = argparse.ArgumentParser(description='Corunners builder')
parser.add_argument("--psyko", "-P", type=Path,
help=Help.PSYKO, required=True)
parser.add_argument("--kdbv", type=Path, required=True)
parser.add_argument("--rtk-dir", "-K", type=Path,
help=Help.RTK_DIR, required=True)
parser.add_argument("--product", "-p", type=str,
help=Help.PRODUCT, required=True,
choices=[P2020,MPC5777M])
parser.add_argument("--corunner", "-C", type=corunner_to_list,
action="append", help=Help.CORUNNER, default=[])
parser.add_argument("--task", "-T", type=str, choices=["H", "G"]+FLASHLIKE,
help=Help.TASK, required=True)
parser.add_argument("--core", "-c", type=int, choices=CORES,
help=Help.CORE, required=True)
parser.add_argument("--local-corunners", action='store_true',
help=Help.LOCAL_CORUNNERS)
parser.add_argument("--build-dir", type=Path, default=TOP_DIR / "build",
help=Help.BUILD_DIR)
parser.add_argument("--mem-conf", type=Path,
help=Help.MEM_CONF)
parser.add_argument("--output", "-o", type=Path,
help=Help.OUTPUT)
args = parser.parse_args(argv[1:])
assert args.core not in cor_cores(args.corunner)
if args.output is None:
args.output = args.build_dir / "program.elf"
return args
def gen_agent_config(output_filename, name, core):
write_template(output_filename, AGENT_CONFIG_HJSON_TEMPLATE, {
"agent_name": name,
"agent_core": core,
})
def gen_corunner_config(conf_filename, identifier, symbol, object_file, kmem_filename):
write_template(conf_filename, CORUNNER_CONFIG_HJSON_TEMPLATE, {
"corunner_id": identifier,
"corunner_symbol": symbol,
"corunner_object": str(object_file)
})
write_template(kmem_filename, CORUNNER_KMEMORY_JSON_TEMPLATE, {
'symbol': symbol,
})
def gen_corunner_source(output_filename, symbol, read=dict()):
cmd = [sys.executable, TOP_DIR / "scripts" / "gen-corunner.py", symbol]
if read:
cmd += ["--read"]
if 'nop' in read:
cmd += ["--nop", str(read['nop'])]
if 'start' in read:
cmd += ["--startaddr", str(read['start'])]
if 'size' in read:
cmd += ["--tablesize", str(read['size'])]
if 'stride' in read:
cmd += ["--stride", str(read['stride'])]
else:
cmd += ["--jump", "2048"]
with subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True) as proc:
with open(output_filename, "w") as fileh:
fileh.write(proc.stdout.read())
def gen_kmem_final(default, config, memreport, kdbv, tasks, corunners=list()):
config_json = load_json(config)
cmd = [sys.executable, TOP_DIR / 'scripts' / 'gen-kmem.py', '--config', config]
del_list = []
config_json['memreport'] = str(memreport)
config_json['default_kmemory'] = str(default)
for el in config_json['elements']:
if el['type'] == 'corunner':
if corunners:
el['names'] = corunners
else:
del_list.append(el)
elif el['type'] == 'task':
el['names'] = tasks
else:
del_list.append(el)
for el in del_list:
config_json['elements'].remove(el)
dump_json(config_json, config)
ret = subprocess.check_call(cmd)
def get_sources(task_name):
c_sources = [
SRC_DIR / "crc.c",
SRC_DIR / "filter.c",
SRC_DIR / "filter2.c",
]
psy_sources = [PSY_DIR / f"task_{task_name}.psy"]
if task_name not in FLASHLIKE:
c_sources += [
STUBS_DIR / f"suite_task_{task_name}.c",
]
psy_sources += [STUBS_DIR / f"for_task_{task_name}.psy"]
return {
"c": c_sources,
"asm": [],
"psy": psy_sources,
}
def main(argv):
args = getopts(argv)
used_cores = cor_cores(args.corunner) + [args.core]
args.corunner.sort(key=itemgetter(0))
def object_of(source_filename, extension = ".o"):
return args.build_dir / (source_filename.name + extension)
sources = get_sources(args.task)
ag_config = args.build_dir / "task.hjson"
app_configs = [
args.build_dir / "app.hjson",
CFG_DIR / f"task_{args.task}.hjson",
ag_config,
]
tasks = [f'task_{args.task}']
part_configs = []
compile_config = args.build_dir / "compile.hjson"
partition_config = args.build_dir / "partition.hjson"
psymodule_config = args.build_dir / "psymodule.hjson"
gen_agent_config(ag_config, f"task_{args.task}", args.core)
mem_configs = []
corunners = []
for corunner, cor_start in args.corunner:
# The read corunner is created only if a start address si provided for this corunner.
use_read = bool(cor_start)
read_args = dict()
co_config = args.build_dir / f"corunner_{corunner}.hjson"
co_kmem = args.build_dir / f"corunner_{corunner}_kmem.json"
co_file = args.build_dir / f"corunner_{corunner}"
if use_read:
read_args['start'] = cor_start
read_args['size'] = int(env.get(f"CORUNNER_READ_SIZE_{corunner}", "0x2000"), 16)
symbol = f"co_runner_read{corunner}" if read_args else f"co_runner_flash{corunner}"
co_file = co_file.with_suffix('.asm')
sources["asm"].append(co_file)
gen_corunner_source(co_file, symbol, read_args)
app_configs.append(co_config)
mem_configs.append(co_kmem)
corunners.append(symbol)
gen_corunner_config(co_config, corunner, symbol, object_of(co_file), co_kmem)
if args.task not in FLASHLIKE:
stub_config = args.build_dir / "stub.hjson"
gen_agent_config(stub_config, f"sends_to_task_{args.task}", args.core)
app_configs.append(stub_config)
tasks.append(f'sends_to_task_{args.task}')
write_template(compile_config, COMPILE_CONFIG_HJSON_TEMPLATE, {})
write_template(psymodule_config, PSYMODULE_CONFIG_HJSON_TEMPLATE, {})
#==========================================================================
# The functions below are just helpers to call the PsyC compiler psyko,
# with a convenient access to global variables such as the path to the
# compiler and the path to the RTK.
psykonf = {'product': args.product, 'rtk_dir': args.rtk_dir, 'psyko': args.psyko, 'cwd': TOP_DIR}
def psyko_cc(c_source):
generated_object = object_of(c_source)
psyko(psykonf, "cc", c_source, compile_config, "-o", generated_object)
return generated_object
def psyko_as(asm_source):
generated_object = object_of(asm_source)
psyko(psykonf, "as", asm_source, compile_config, "-o", generated_object)
return generated_object
def psyko_module(psy_source):
generated_object = object_of(psy_source, ".psyo")
psyko(psykonf, "module", psy_source, psymodule_config, "-o", generated_object)
return generated_object
def psyko_partition(name, objects, configs):
generated_object = args.build_dir / (name + ".parto")
psyko(psykonf, "partition", "-o", generated_object, '--gendir',
args.build_dir / 'gen' / 'part', *objects, *configs)
return generated_object
def psyko_app(partos, configs):
elf = args.build_dir / "program.elf"
gendir = args.build_dir / "gen" / "app"
psyko(psykonf, "app", "-a", args.build_dir / "program.app", "-b", args.output,
'--gendir', gendir, *partos, *configs)
return gendir
def psyko_memconf(t, files, configs=[], cor_kmems=[]):
"""
This function generates a valid default memconf used to perform the first compilation. It creates a default kmemory for the task and adds the configs for all the corunners.
"""
kmemconf = args.build_dir / ('kmemconf_'+t+'.json')
psyko(psykonf, 'gen-mem-conf', '-t', t, '--gendir', args.build_dir / 'gen' / 'memconf', '-o', kmemconf, *files, *configs)
if cor_kmems:
def_memconf = load_json(kmemconf)
cor_memconf = []
for kmem in cor_kmems:
cor_memconf.append(load_json(kmem))
max_reg = def_memconf['kmemory']['regions'][0]
if len(def_memconf['kmemory']['regions']) > 1:
for reg in def_memconf['kmemory']['regions'][1:]:
if reg['size'] > max_reg['size']:
max_reg = reg
if 'domains' not in max_reg:
max_reg['domains'] = []
out = cor_memconf[0]['domains'][0]['output_sections'][0]
out['physical_address'] = mar_reg['physical_address']
stacks = {obj['id']: obj
for obj in def_memconf['kmemory']['objects']
if obj['id'] in [f"core_{core}_co_runner_stack.c"
for core in used_cores]}
for core in cor_cores(args.corunner):
stack = f"core_{core}_co_runner_stack.c"
for corunner in corunners:
symbol = corunner if corunner[-1] == str(core) else ''
stacks[stack]['groups'] = [f'.stack_{symbol}']
for cor in cor_memconf:
max_reg['domains'] += cor['domains']
def_memconf['kmemory']['groups'] += cor['groups']
def_memconf['kmemory']['objects'] += cor['objects']
dump_json(def_memconf, f=kmemconf)
return kmemconf
#==========================================================================
# Compile all the C, ASM and PsyC sources.
# ASM sources are only present when co-runners are enabled.
parto_objects = []
for c_source in sources["c"]:
parto_objects.append(psyko_cc(c_source))
for asm_source in sources.get("asm", []):
parto_objects.append(psyko_as(asm_source))
for psy_source in sources["psy"]:
parto_objects.append(psyko_module(psy_source))
#==========================================================================
# Generate a single partition, and then executable to be able to get the size of the sections
parto = psyko_partition("main", parto_objects, part_configs)
mem_configs = [psyko_memconf('app', [parto], app_configs, mem_configs)]
mem_configs.append("--overwrite-memory-configuration")
gendir = psyko_app([parto], app_configs+mem_configs)
assert args.output.is_file(), "first app compilation not successfull"
# Finally generate the final memory configs and the executable
if args.mem_conf:
args.output.unlink()
gen_kmem_final(mem_configs[0], args.mem_conf,
gendir / 'applink' / 'memreport_out.ks', args.kdbv, tasks, corunners)
psyko_app([parto], app_configs+mem_configs)
assert args.output.is_file(), "final app compilation not successfull"
if __name__ == "__main__":
main(sys.argv)
|
import logging
import time
from relation_engine_bulk_update.loader import RELoader
WS_OBJ_DELIMITER = ":"
def _timestamp_to_epoch(timestamp):
return int(time.mktime(time.strptime(timestamp, "%Y-%m-%dT%H:%M:%S%z")))
def update_ws_object_collections(ws_client, re_api_url, token, params):
"""Update all workspaces matching the supplied parameters"""
if params.get('showDeleted'):
raise ValueError('This option makes it impossible to determine if the workspace is '
'deleted or not. Use "showOnlyDeleted" to crawl deleted workspaces.')
deleted_ws = params.get('showOnlyDeleted', False)
loader = RELoader([
# vertices
"users",
"workspaces",
"ws_objects",
"ws_object_versions",
"sdk_module_versions",
"sdk_module_method_versions",
# edges
"is_version_of",
"contains",
"is_latest_version_of",
"is_owner_of",
"is_instance_of",
"was_copied_from",
"refers_to",
"was_created_using",
"ws_object_was_created_with_method",
])
for workspace in ws_client.list_workspace_info(params):
ws_id = workspace[0]
public = workspace[6] == 'r'
owner = workspace[2]
logging.info(f"Processing workspace {ws_id}")
loader.add('workspaces', {'_key': str(ws_id), 'name': workspace[1],
'mod_epoch': _timestamp_to_epoch(workspace[3]),
'public': public, 'deleted': deleted_ws})
loader.add('users', {'_key': owner})
loader.add('is_owner_of', {'_from': f'users/{owner}',
'_to': f'workspaces/{ws_id}'})
for obj_info in ws_client.list_objects({'ids': [ws_id], 'showHidden': 1}):
obj_key = WS_OBJ_DELIMITER.join([str(ws_id), str(obj_info[0])])
latest_version = WS_OBJ_DELIMITER.join([obj_key, str(obj_info[4])])
loader.add('ws_objects', {'_key': obj_key,
'workspace_id': ws_id,
'object_id': obj_info[0],
'deleted': False})
loader.add('contains', {'_from': f'workspaces/{ws_id}',
'_to': f'ws_objects/{obj_key}'})
loader.add('is_latest_version_of',
{'_from': f'ws_object_versions/{latest_version}',
'_to': f'ws_objects/{obj_key}'})
_parse_ws_objects(ws_client, loader, obj_info, obj_key)
return loader.save_all_to_re(re_api_url, token)
def _parse_ws_objects(ws_client, loader, obj_info, obj_key):
"""Parse all versions of a specified object"""
objects = [{"ref": f"{obj_info[6]}/{obj_info[0]}/{ver}"}
for ver in list(range(1, obj_info[4]+1))]
for obj_vers in ws_client.get_objects2({'objects': objects, 'no_data': 1})['data']:
info = obj_vers['info']
if obj_vers.get('provenance'):
prov = obj_vers['provenance'][0] # only one provenance item per version in practice
else:
prov = {}
ws_id = info[6]
obj_id = obj_info[0]
ver = info[4]
ver_key = WS_OBJ_DELIMITER.join([str(ws_id), str(obj_id), str(ver)])
loader.add('ws_object_versions', {'_key': ver_key,
'workspace_id': ws_id,
'object_id': obj_id,
'version': ver,
'name': info[1],
'hash': info[8],
'size': info[9],
'epoch': obj_vers['epoch'],
'deleted': False})
loader.add('is_instance_of', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'type_versions/{info[2]}'})
loader.add('is_version_of', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'ws_objects/{obj_key}'})
loader.add('is_owner_of', {'_from': f'users/{obj_vers['creator']}',
'_to': f'ws_object_versions/{ver_key}'})
for ref in obj_vers.get("refs", []):
ref_key = ref.replace("/", WS_OBJ_DELIMITER)
loader.add('refers_to', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'ws_object_versions/{ref_key}'})
if obj_vers.get("copied"):
copy_key = obj_vers["copied"].replace("/", WS_OBJ_DELIMITER)
loader.add('was_copied_from', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'ws_object_versions/{copy_key}'})
for ref in prov.get("input_ws_objects", []):
input_key = ref.replace("/", WS_OBJ_DELIMITER)
loader.add('was_created_using', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'ws_object_versions/{input_key}'})
if prov.get('method_params'):
_proc_creating_method(loader, prov, ver_key)
for action in prov.get("subactions", []):
_proc_module_versions(action, loader, ver_key)
def _proc_module_versions(action, loader, ver_key):
# I think these are the results of test images?
if action['commit'] == 'local-docker-image':
action['commit'] = 'UNDEFINED'
action['ver'] = '0.0.0'
mod_ver_key = f"{action["name"]}:{action["commit"]}"
loader.add('sdk_module_versions', {'_key': mod_ver_key,
'name': action['name'],
'commit': action['commit'],
'ver': action['ver'].split("-")[0],
'code_url': action['code_url'],
})
loader.add('was_created_using', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'sdk_module_versions/{mod_ver_key}'})
def _proc_creating_method(loader, prov, ver_key):
if prov.get("subactions"):
mod_ver = prov["subactions"][0]['commit']
else:
mod_ver = "UNDEFINED"
if mod_ver == 'local-docker-image': # I think these are the results of test images?
mod_ver = "UNDEFINED"
prov['service_ver'] = '0.0.0'
mod_ver_key = f"{prov["service"]}:{mod_ver}"
app_key = mod_ver_key + f".{prov["method"]}"
loader.add('sdk_module_versions', {'_key': mod_ver_key,
'name': prov['service'],
'commit': mod_ver,
'ver': prov.get('service_ver', '0.0.0'),
'code_url': 'UNKNOWN',
})
loader.add('contains', {'_from': f'sdk_module_versions/{mod_ver_key}',
'_to': f'sdk_module_method_versions/{app_key}'})
loader.add('sdk_module_method_versions', {'_key': app_key,
'module_name': prov['service'],
'method_name': prov['method'],
'commit': mod_ver,
'ver': prov.get('service_ver', '0.0.0'),
'code_url': 'UNKNOWN',
})
if len(prov['method_params']) > 1:
method_params = {i: x for i, x in enumerate(prov['method_params'])}
else:
method_params = prov['method_params'][0]
loader.add('ws_object_was_created_with_method',
{'_from': f'ws_object_versions/{ver_key}',
'_to': f'sdk_module_method_versions/{app_key}',
'method_params': method_params})
| import logging
import time
from relation_engine_bulk_update.loader import RELoader
WS_OBJ_DELIMITER = ":"
def _timestamp_to_epoch(timestamp):
return int(time.mktime(time.strptime(timestamp, "%Y-%m-%dT%H:%M:%S%z")))
def update_ws_object_collections(ws_client, re_api_url, token, params):
"""Update all workspaces matching the supplied parameters"""
if params.get('showDeleted'):
raise ValueError('This option makes it impossible to determine if the workspace is '
'deleted or not. Use "showOnlyDeleted" to crawl deleted workspaces.')
deleted_ws = params.get('showOnlyDeleted', False)
loader = RELoader([
# vertices
"users",
"workspaces",
"ws_objects",
"ws_object_versions",
"sdk_module_versions",
"sdk_module_method_versions",
# edges
"is_version_of",
"contains",
"is_latest_version_of",
"is_owner_of",
"is_instance_of",
"was_copied_from",
"refers_to",
"was_created_using",
"ws_object_was_created_with_method",
])
for workspace in ws_client.list_workspace_info(params):
ws_id = workspace[0]
public = workspace[6] == 'r'
owner = workspace[2]
logging.info(f"Processing workspace {ws_id}")
loader.add('workspaces', {'_key': str(ws_id), 'name': workspace[1],
'mod_epoch': _timestamp_to_epoch(workspace[3]),
'public': public, 'deleted': deleted_ws})
loader.add('users', {'_key': owner})
loader.add('is_owner_of', {'_from': f'users/{owner}',
'_to': f'workspaces/{ws_id}'})
for obj_info in ws_client.list_objects({'ids': [ws_id], 'showHidden': 1}):
obj_key = WS_OBJ_DELIMITER.join([str(ws_id), str(obj_info[0])])
latest_version = WS_OBJ_DELIMITER.join([obj_key, str(obj_info[4])])
loader.add('ws_objects', {'_key': obj_key,
'workspace_id': ws_id,
'object_id': obj_info[0],
'deleted': False})
loader.add('contains', {'_from': f'workspaces/{ws_id}',
'_to': f'ws_objects/{obj_key}'})
loader.add('is_latest_version_of',
{'_from': f'ws_object_versions/{latest_version}',
'_to': f'ws_objects/{obj_key}'})
_parse_ws_objects(ws_client, loader, obj_info, obj_key)
return loader.save_all_to_re(re_api_url, token)
def _parse_ws_objects(ws_client, loader, obj_info, obj_key):
"""Parse all versions of a specified object"""
objects = [{"ref": f"{obj_info[6]}/{obj_info[0]}/{ver}"}
for ver in list(range(1, obj_info[4]+1))]
for obj_vers in ws_client.get_objects2({'objects': objects, 'no_data': 1})['data']:
info = obj_vers['info']
if obj_vers.get('provenance'):
prov = obj_vers['provenance'][0] # only one provenance item per version in practice
else:
prov = {}
ws_id = info[6]
obj_id = obj_info[0]
ver = info[4]
ver_key = WS_OBJ_DELIMITER.join([str(ws_id), str(obj_id), str(ver)])
loader.add('ws_object_versions', {'_key': ver_key,
'workspace_id': ws_id,
'object_id': obj_id,
'version': ver,
'name': info[1],
'hash': info[8],
'size': info[9],
'epoch': obj_vers['epoch'],
'deleted': False})
loader.add('is_instance_of', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'type_versions/{info[2]}'})
loader.add('is_version_of', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'ws_objects/{obj_key}'})
loader.add('is_owner_of', {'_from': f'users/{obj_vers["creator"]}',
'_to': f'ws_object_versions/{ver_key}'})
for ref in obj_vers.get("refs", []):
ref_key = ref.replace("/", WS_OBJ_DELIMITER)
loader.add('refers_to', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'ws_object_versions/{ref_key}'})
if obj_vers.get("copied"):
copy_key = obj_vers["copied"].replace("/", WS_OBJ_DELIMITER)
loader.add('was_copied_from', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'ws_object_versions/{copy_key}'})
for ref in prov.get("input_ws_objects", []):
input_key = ref.replace("/", WS_OBJ_DELIMITER)
loader.add('was_created_using', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'ws_object_versions/{input_key}'})
if prov.get('method_params'):
_proc_creating_method(loader, prov, ver_key)
for action in prov.get("subactions", []):
_proc_module_versions(action, loader, ver_key)
def _proc_module_versions(action, loader, ver_key):
# I think these are the results of test images?
if action['commit'] == 'local-docker-image':
action['commit'] = 'UNDEFINED'
action['ver'] = '0.0.0'
mod_ver_key = f"{action['name']}:{action['commit']}"
loader.add('sdk_module_versions', {'_key': mod_ver_key,
'name': action['name'],
'commit': action['commit'],
'ver': action['ver'].split("-")[0],
'code_url': action['code_url'],
})
loader.add('was_created_using', {'_from': f'ws_object_versions/{ver_key}',
'_to': f'sdk_module_versions/{mod_ver_key}'})
def _proc_creating_method(loader, prov, ver_key):
if prov.get("subactions"):
mod_ver = prov["subactions"][0]['commit']
else:
mod_ver = "UNDEFINED"
if mod_ver == 'local-docker-image': # I think these are the results of test images?
mod_ver = "UNDEFINED"
prov['service_ver'] = '0.0.0'
mod_ver_key = f"{prov['service']}:{mod_ver}"
app_key = mod_ver_key + f".{prov['method']}"
loader.add('sdk_module_versions', {'_key': mod_ver_key,
'name': prov['service'],
'commit': mod_ver,
'ver': prov.get('service_ver', '0.0.0'),
'code_url': 'UNKNOWN',
})
loader.add('contains', {'_from': f'sdk_module_versions/{mod_ver_key}',
'_to': f'sdk_module_method_versions/{app_key}'})
loader.add('sdk_module_method_versions', {'_key': app_key,
'module_name': prov['service'],
'method_name': prov['method'],
'commit': mod_ver,
'ver': prov.get('service_ver', '0.0.0'),
'code_url': 'UNKNOWN',
})
if len(prov['method_params']) > 1:
method_params = {i: x for i, x in enumerate(prov['method_params'])}
else:
method_params = prov['method_params'][0]
loader.add('ws_object_was_created_with_method',
{'_from': f'ws_object_versions/{ver_key}',
'_to': f'sdk_module_method_versions/{app_key}',
'method_params': method_params})
|
import os
import hashlib
import hmac
import time
import orjson
import requests
import extra_math
URL = "https://api.valr.com"
VERSION = "/v1"
previous_signatures = set()
# As copied from VALR API example
def gen_signature(api_key_secret, timestamp, verb, path, body=""):
"""
Signs the request payload using the api key secret
api_key_secret - the api key secret
timestamp - the unix timestamp of this request e.g. int(time.time()*1000)
verb - Http verb - GET, POST, PUT or DELETE
path - path excluding host name, e.g. '/v1/withdraw
body - http request body as a string, optional
"""
payload = "{}{}{}{}".format(timestamp, verb.upper(), path, body)
message = bytearray(payload, 'utf-8')
signature = hmac.new(bytearray(api_key_secret, 'utf-8'), message, digestmod=hashlib.sha512).hexdigest()
return signature
def gen_headers(method, path, body=""):
server_time_json = requests.get(f"{URL}{VERSION}/public/time").json()
timestamp = server_time_json["epochTime"] * 1000
signature = gen_signature(os.environ["VALR_API_SECRET"], timestamp, method, path, body)
while signature in previous_signatures:
timestamp += 1
signature = gen_signature(os.environ["VALR_API_SECRET"], timestamp, method, path, body)
previous_signatures.add(signature)
headers = {
"X-VALR-API-KEY": os.environ["VALR_API_KEY"],
"X-VALR-SIGNATURE": signature,
"X-VALR-TIMESTAMP": str(timestamp),
}
if len(body) > 0:
headers["Content-Type"] = "application/json"
return headers
def check_response(resp: requests.Response):
if resp.status_code > 400:
raise Exception(f"Request {resp.request.url} failed with {resp.status_code}.\nBody: {resp.text}")
def market_summary():
response = requests.get(f"{URL}{VERSION}/public/BTCZAR/marketsummary")
check_response(response)
return response.json()
def balances():
path = f"{VERSION}/account/balances"
headers = gen_headers("GET", path)
response = requests.get(f"{URL}{path}", headers=headers)
check_response(response)
return response.json()
def balance(currency) -> float:
b_list = balances()
for b in b_list:
if b['currency'] == currency:
return float(b['available'])
def order_summary(oid: str) -> dict:
path = f"{VERSION}/orders/history/summary/orderid/{oid}"
headers = gen_headers("GET", path)
resp = requests.get(url=f"{URL}{path}", headers=headers)
return orjson.loads(resp.text)
def market_order_req(body):
body_str = orjson.dumps(body).decode("utf-8")
path = f"{VERSION}/orders/market"
headers = gen_headers("POST", path, body_str)
resp = requests.post(url=f"{URL}{path}", data=body_str, headers=headers)
check_response(resp)
try:
order_id = orjson.loads(resp.text)["id"]
except KeyError:
raise Exception(orjson.loads(resp.text)["message"])
time.sleep(1) # allow order to be filled
o_sum = order_summary(order_id)
failure = o_sum["failedReason"]
if len(failure) > 0:
raise Exception(f"Order failed: {failure}")
return float(o_sum["averagePrice"])
def buy_at_market():
amt = balance("ZAR")
if amt == 0:
raise Exception("Trying to buy 0 ZAR of bitcoin?")
print(f"Buying at market for R{amt}")
amt = extra_math.floor_n(amt, 2)
body = {
"side": "BUY",
"quoteAmount": f"{amt:.2f}",
"pair": "BTCZAR",
}
return market_order_req(body)
def sell_at_market():
amt = balance("BTC")
if amt == 0:
raise Exception("Trying to sell 0 bitcoin?")
print(f"Selling {amt} BTC at market")
amt = extra_math.floor_n(amt, 8)
body = {
"side": "SELL",
"baseAmount": f"{amt:.8f}",
"pair": "BTCZAR",
}
return market_order_req(body)
def buy_order(price: int) -> str:
price = int(price)
qty = balance("ZAR") / price
if qty == 0:
raise Exception("Trying to buy 0 bitcoin?")
qty = extra_math.floor_n(qty, 8)
body = {
"side": "BUY",
"quantity": f"{qty:.8f}",
"price": str(price),
"pair": "BTCZAR",
}
body_str = orjson.dumps(body).decode('utf-8')
path = f"{VERSION}/orders/limit"
headers = gen_headers("POST", path, body_str)
resp = requests.post(url=f"{URL}{path}", data=body_str, headers=headers)
check_response(resp)
try:
return resp.json()["id"]
except KeyError:
raise Exception(orjson.loads(resp.text)["message"])
def order_placed(oid: str) -> bool:
return not order_summary(oid).get("failedReason", "")
def lowest_ask() -> float:
path = f"{VERSION}/marketdata/BTCZAR/orderbook"
headers = gen_headers("GET", path)
resp = requests.get(f'{URL}{path}', headers=headers)
asks = orjson.loads(resp.text)["Asks"]
if len(asks) == 0:
print("No ASKS returned from VALR")
raise Exception()
# According to VALR spec, asks[0] should be the lowest ask, but I do not trust that enough to not check
return min([float(ask["price"]) for ask in asks])
def close_order(oid: str):
body = {
"orderId": oid,
"pair": "BTCZAR",
}
body_str = orjson.dumps(body).decode("utf-8")
path = f"{VERSION}/orders/order"
headers = gen_headers("DELETE", path, body_str)
resp = requests.delete(f"{URL}{path}", data=body_str, headers=headers)
check_response(resp)
def get_open_orders():
path = f"{VERSION}/orders/open"
headers = gen_headers("GET", path)
resp = requests.get(f"{URL}{path}", headers=headers)
return orjson.loads(resp.text)
def close_open_buys():
for order in get_open_orders():
if order["side"].upper() == "BUY":
print(f"Found order to close: {order["orderId"]}")
close_order(order["orderId"])
if __name__ == "__main__":
print("MARKET SUMMARY")
print(market_summary())
print()
print("BALANCES")
print(balances())
print()
print("BTC BALANCE")
print(f'{balance('BTC'):.8f}')
print()
print("OPEN ORDERS")
print(get_open_orders())
print()
import sys
if "test-market-sell-buy" in sys.argv:
print("SELLING AT AT MARKET")
p = sell_at_market()
print(f"sold at {p}")
print("BUYING AT MARKET")
p = buy_at_market()
print(f'bought at {p}')
if "test-order-closing" in sys.argv:
print("CLOSING ORDERS")
close_open_buys()
| import os
import hashlib
import hmac
import time
import orjson
import requests
import extra_math
URL = "https://api.valr.com"
VERSION = "/v1"
previous_signatures = set()
# As copied from VALR API example
def gen_signature(api_key_secret, timestamp, verb, path, body=""):
"""
Signs the request payload using the api key secret
api_key_secret - the api key secret
timestamp - the unix timestamp of this request e.g. int(time.time()*1000)
verb - Http verb - GET, POST, PUT or DELETE
path - path excluding host name, e.g. '/v1/withdraw
body - http request body as a string, optional
"""
payload = "{}{}{}{}".format(timestamp, verb.upper(), path, body)
message = bytearray(payload, 'utf-8')
signature = hmac.new(bytearray(api_key_secret, 'utf-8'), message, digestmod=hashlib.sha512).hexdigest()
return signature
def gen_headers(method, path, body=""):
server_time_json = requests.get(f"{URL}{VERSION}/public/time").json()
timestamp = server_time_json["epochTime"] * 1000
signature = gen_signature(os.environ["VALR_API_SECRET"], timestamp, method, path, body)
while signature in previous_signatures:
timestamp += 1
signature = gen_signature(os.environ["VALR_API_SECRET"], timestamp, method, path, body)
previous_signatures.add(signature)
headers = {
"X-VALR-API-KEY": os.environ["VALR_API_KEY"],
"X-VALR-SIGNATURE": signature,
"X-VALR-TIMESTAMP": str(timestamp),
}
if len(body) > 0:
headers["Content-Type"] = "application/json"
return headers
def check_response(resp: requests.Response):
if resp.status_code > 400:
raise Exception(f"Request {resp.request.url} failed with {resp.status_code}.\nBody: {resp.text}")
def market_summary():
response = requests.get(f"{URL}{VERSION}/public/BTCZAR/marketsummary")
check_response(response)
return response.json()
def balances():
path = f"{VERSION}/account/balances"
headers = gen_headers("GET", path)
response = requests.get(f"{URL}{path}", headers=headers)
check_response(response)
return response.json()
def balance(currency) -> float:
b_list = balances()
for b in b_list:
if b['currency'] == currency:
return float(b['available'])
def order_summary(oid: str) -> dict:
path = f"{VERSION}/orders/history/summary/orderid/{oid}"
headers = gen_headers("GET", path)
resp = requests.get(url=f"{URL}{path}", headers=headers)
return orjson.loads(resp.text)
def market_order_req(body):
body_str = orjson.dumps(body).decode("utf-8")
path = f"{VERSION}/orders/market"
headers = gen_headers("POST", path, body_str)
resp = requests.post(url=f"{URL}{path}", data=body_str, headers=headers)
check_response(resp)
try:
order_id = orjson.loads(resp.text)["id"]
except KeyError:
raise Exception(orjson.loads(resp.text)["message"])
time.sleep(1) # allow order to be filled
o_sum = order_summary(order_id)
failure = o_sum["failedReason"]
if len(failure) > 0:
raise Exception(f"Order failed: {failure}")
return float(o_sum["averagePrice"])
def buy_at_market():
amt = balance("ZAR")
if amt == 0:
raise Exception("Trying to buy 0 ZAR of bitcoin?")
print(f"Buying at market for R{amt}")
amt = extra_math.floor_n(amt, 2)
body = {
"side": "BUY",
"quoteAmount": f"{amt:.2f}",
"pair": "BTCZAR",
}
return market_order_req(body)
def sell_at_market():
amt = balance("BTC")
if amt == 0:
raise Exception("Trying to sell 0 bitcoin?")
print(f"Selling {amt} BTC at market")
amt = extra_math.floor_n(amt, 8)
body = {
"side": "SELL",
"baseAmount": f"{amt:.8f}",
"pair": "BTCZAR",
}
return market_order_req(body)
def buy_order(price: int) -> str:
price = int(price)
qty = balance("ZAR") / price
if qty == 0:
raise Exception("Trying to buy 0 bitcoin?")
qty = extra_math.floor_n(qty, 8)
body = {
"side": "BUY",
"quantity": f"{qty:.8f}",
"price": str(price),
"pair": "BTCZAR",
}
body_str = orjson.dumps(body).decode('utf-8')
path = f"{VERSION}/orders/limit"
headers = gen_headers("POST", path, body_str)
resp = requests.post(url=f"{URL}{path}", data=body_str, headers=headers)
check_response(resp)
try:
return resp.json()["id"]
except KeyError:
raise Exception(orjson.loads(resp.text)["message"])
def order_placed(oid: str) -> bool:
return not order_summary(oid).get("failedReason", "")
def lowest_ask() -> float:
path = f"{VERSION}/marketdata/BTCZAR/orderbook"
headers = gen_headers("GET", path)
resp = requests.get(f'{URL}{path}', headers=headers)
asks = orjson.loads(resp.text)["Asks"]
if len(asks) == 0:
print("No ASKS returned from VALR")
raise Exception()
# According to VALR spec, asks[0] should be the lowest ask, but I do not trust that enough to not check
return min([float(ask["price"]) for ask in asks])
def close_order(oid: str):
body = {
"orderId": oid,
"pair": "BTCZAR",
}
body_str = orjson.dumps(body).decode("utf-8")
path = f"{VERSION}/orders/order"
headers = gen_headers("DELETE", path, body_str)
resp = requests.delete(f"{URL}{path}", data=body_str, headers=headers)
check_response(resp)
def get_open_orders():
path = f"{VERSION}/orders/open"
headers = gen_headers("GET", path)
resp = requests.get(f"{URL}{path}", headers=headers)
return orjson.loads(resp.text)
def close_open_buys():
for order in get_open_orders():
if order["side"].upper() == "BUY":
print(f"Found order to close: {order['orderId']}")
close_order(order["orderId"])
if __name__ == "__main__":
print("MARKET SUMMARY")
print(market_summary())
print()
print("BALANCES")
print(balances())
print()
print("BTC BALANCE")
print(f'{balance("BTC"):.8f}')
print()
print("OPEN ORDERS")
print(get_open_orders())
print()
import sys
if "test-market-sell-buy" in sys.argv:
print("SELLING AT AT MARKET")
p = sell_at_market()
print(f"sold at {p}")
print("BUYING AT MARKET")
p = buy_at_market()
print(f'bought at {p}')
if "test-order-closing" in sys.argv:
print("CLOSING ORDERS")
close_open_buys()
|
import json
from urllib.request import urlopen
import atexit
import datetime
import dateutil
import sys
import tda
API_KEY = 'FON1HLNGRN0KOVR6UDTCF4RPEMPYIXOB@AMER.OAUTHAP'
REDIRECT_URI = 'http://localhost:8080/'
TOKEN_PATH = '../access_token'
def save_to_file(data):
"""
Save to file for testing
"""
with open('../journal/data.json', 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def make_webdriver():
# Import selenium here because it's slow to import
from selenium import webdriver
driver = webdriver.Chrome()
atexit.register(lambda: driver.quit())
return driver
# Create a new client
client = tda.auth.easy_client(
API_KEY,
REDIRECT_URI,
TOKEN_PATH,
make_webdriver)
std = datetime.date(2021,8,1)
etd = datetime.date(2021,11,30)
tx_type = client.Transactions.TransactionType.TRADE
r = client.get_transactions(490673362,
transaction_type=tx_type,
start_date=std, end_date=etd)
#print(json.dumps(r.json()))
with open('../journal/data.json', 'w', encoding='utf-8') as f:
for tx in r.json():
json.dump(tx, f, ensure_ascii=False, indent=4)
# print(json.dumps(tx, indent=1))
# print("\n +++++++++++ \n")
#
# print(f'Keys of dict: {tx.keys()}')
# print(f"Keys of fees: {tx["fees"].keys()}")
# print(f"Keys of transactionItem: {tx["transactionItem"].keys()}")
| import json
from urllib.request import urlopen
import atexit
import datetime
import dateutil
import sys
import tda
API_KEY = 'FON1HLNGRN0KOVR6UDTCF4RPEMPYIXOB@AMER.OAUTHAP'
REDIRECT_URI = 'http://localhost:8080/'
TOKEN_PATH = '../access_token'
def save_to_file(data):
"""
Save to file for testing
"""
with open('../journal/data.json', 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def make_webdriver():
# Import selenium here because it's slow to import
from selenium import webdriver
driver = webdriver.Chrome()
atexit.register(lambda: driver.quit())
return driver
# Create a new client
client = tda.auth.easy_client(
API_KEY,
REDIRECT_URI,
TOKEN_PATH,
make_webdriver)
std = datetime.date(2021,8,1)
etd = datetime.date(2021,11,30)
tx_type = client.Transactions.TransactionType.TRADE
r = client.get_transactions(490673362,
transaction_type=tx_type,
start_date=std, end_date=etd)
#print(json.dumps(r.json()))
with open('../journal/data.json', 'w', encoding='utf-8') as f:
for tx in r.json():
json.dump(tx, f, ensure_ascii=False, indent=4)
# print(json.dumps(tx, indent=1))
# print("\n +++++++++++ \n")
#
# print(f'Keys of dict: {tx.keys()}')
# print(f"Keys of fees: {tx['fees'].keys()}")
# print(f"Keys of transactionItem: {tx['transactionItem'].keys()}")
|
"""APPLY PYTHAGOREAN THEOREM IN LEARNING DATA + SMOOTH VELOCITIES"""
import os
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from helper import img_id, sub_id, TRIALS_PATH
# Apply PT into smoothed learning data to find sample-to-sample distance:
for file in os.listdir(TRIALS_PATH):
dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))
x = dataset["BPOGX"].diff().fillna(0).to_numpy()
y = dataset["BPOGY"].diff().fillna(0).to_numpy()
sample_2_sample_distance = (x ** 2 + y ** 2) ** 0.5
dataset["Distance"] = np.nan_to_num(sample_2_sample_distance)
dataset["Time"] = dataset["TIME"].diff().fillna(0).to_numpy()
dataset["Velocity_px"] = dataset["Distance"] / dataset["Time"]
dataset["Velocity_deg"] = dataset["Velocity_px"] * 0.021
dataset["Velocity_deg"] = dataset["Velocity_deg"].fillna(0)
dataset = dataset[dataset["Velocity_deg"] != 0]
vel = dataset["Velocity_deg"]
sav_vel = savgol_filter(vel, 13, 2)
dataset["Smoothed_Velocity_deg"] = sav_vel.tolist()
fix_or_sac = dataset["Smoothed_Velocity_deg"] > 120
dataset["Fix_or_Sac"] = np.where(fix_or_sac, "Sac", "Fix")
write_f = dataset[dataset["Smoothed_Velocity_deg"] < 1000]
write_f.to_csv(os.path.join(TRIALS_PATH, file), index=False)
# Plot smoothed velocity vs. unsmoothed velocity
for k, i in itertools.product(sub_id, img_id):
try:
file = (
"Sub_" + str(k) + "_Image_" + i.split(".")[0] + "_Block_4.csv"
) # Block 1,2,3,4
dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20, 11))
fig.suptitle(
f'Subject:{str(k)} , Image:{i.split('.')[0]}, Block: 4', size=30
) # Block 1,2,3,4
time = dataset["Time"].cumsum()
smoothed_velocity1 = dataset["Velocity_deg"]
smoothed_velocity2 = dataset["Smoothed_Velocity_deg"]
ax1.plot(time, smoothed_velocity1)
ax1.set_ylim([0, 1000])
ax1.set_title("Unsmoothed velocity", size=15)
ax2.plot(time, smoothed_velocity2)
ax2.set_ylim([0, 1000])
ax2.set_title("Smoothed velocity", size=15)
# plt.axhline(90, color='red')
# plt.title(f'Subject:{str(k)} , Image:{i.split('.')[0]} , Block: 1')
ax2.axhline(120, color="red")
fig.text(
0.5,
0.04,
"Time (in seconds)",
ha="center",
va="center",
fontsize=22
)
fig.text(
0.08,
0.5,
"Velocity (deg/sec.)",
ha="center",
va="center",
rotation="vertical",
fontsize=22,
)
plt.show()
plt.close()
except OSError:
continue
#Plot to fine-tune the velocity threshold
for k, i in itertools.product(sub_id, img_id):
file = (
"Sub_" + str(k) + "_Image_" + i.split(".")[0] + "_Block_1.csv"
) # Block 1,2,3,4
dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))
time = dataset["Time"].cumsum().fillna(0)
velocity = dataset["Smoothed_Velocity_deg"]
plt.plot(time, velocity)
plt.axhline(100, color="red")
plt.ylim(0, 1000)
plt.title(f"Subject:{str(k)} , Image:{str(i)}")
plt.xlabel("Time (sec)")
plt.ylabel("Velocity values")
plt.show()
plt.close()
| """APPLY PYTHAGOREAN THEOREM IN LEARNING DATA + SMOOTH VELOCITIES"""
import os
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from helper import img_id, sub_id, TRIALS_PATH
# Apply PT into smoothed learning data to find sample-to-sample distance:
for file in os.listdir(TRIALS_PATH):
dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))
x = dataset["BPOGX"].diff().fillna(0).to_numpy()
y = dataset["BPOGY"].diff().fillna(0).to_numpy()
sample_2_sample_distance = (x ** 2 + y ** 2) ** 0.5
dataset["Distance"] = np.nan_to_num(sample_2_sample_distance)
dataset["Time"] = dataset["TIME"].diff().fillna(0).to_numpy()
dataset["Velocity_px"] = dataset["Distance"] / dataset["Time"]
dataset["Velocity_deg"] = dataset["Velocity_px"] * 0.021
dataset["Velocity_deg"] = dataset["Velocity_deg"].fillna(0)
dataset = dataset[dataset["Velocity_deg"] != 0]
vel = dataset["Velocity_deg"]
sav_vel = savgol_filter(vel, 13, 2)
dataset["Smoothed_Velocity_deg"] = sav_vel.tolist()
fix_or_sac = dataset["Smoothed_Velocity_deg"] > 120
dataset["Fix_or_Sac"] = np.where(fix_or_sac, "Sac", "Fix")
write_f = dataset[dataset["Smoothed_Velocity_deg"] < 1000]
write_f.to_csv(os.path.join(TRIALS_PATH, file), index=False)
# Plot smoothed velocity vs. unsmoothed velocity
for k, i in itertools.product(sub_id, img_id):
try:
file = (
"Sub_" + str(k) + "_Image_" + i.split(".")[0] + "_Block_4.csv"
) # Block 1,2,3,4
dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20, 11))
fig.suptitle(
f'Subject:{str(k)} , Image:{i.split(".")[0]}, Block: 4', size=30
) # Block 1,2,3,4
time = dataset["Time"].cumsum()
smoothed_velocity1 = dataset["Velocity_deg"]
smoothed_velocity2 = dataset["Smoothed_Velocity_deg"]
ax1.plot(time, smoothed_velocity1)
ax1.set_ylim([0, 1000])
ax1.set_title("Unsmoothed velocity", size=15)
ax2.plot(time, smoothed_velocity2)
ax2.set_ylim([0, 1000])
ax2.set_title("Smoothed velocity", size=15)
# plt.axhline(90, color='red')
# plt.title(f'Subject:{str(k)} , Image:{i.split(".")[0]} , Block: 1')
ax2.axhline(120, color="red")
fig.text(
0.5,
0.04,
"Time (in seconds)",
ha="center",
va="center",
fontsize=22
)
fig.text(
0.08,
0.5,
"Velocity (deg/sec.)",
ha="center",
va="center",
rotation="vertical",
fontsize=22,
)
plt.show()
plt.close()
except OSError:
continue
#Plot to fine-tune the velocity threshold
for k, i in itertools.product(sub_id, img_id):
file = (
"Sub_" + str(k) + "_Image_" + i.split(".")[0] + "_Block_1.csv"
) # Block 1,2,3,4
dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))
time = dataset["Time"].cumsum().fillna(0)
velocity = dataset["Smoothed_Velocity_deg"]
plt.plot(time, velocity)
plt.axhline(100, color="red")
plt.ylim(0, 1000)
plt.title(f"Subject:{str(k)} , Image:{str(i)}")
plt.xlabel("Time (sec)")
plt.ylabel("Velocity values")
plt.show()
plt.close()
|
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer
from mmcv.ops.carafe import CARAFEPack
from mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.core import mask_target
from mmdet.models.builder import HEADS, build_loss
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
@HEADS.register_module()
class FCNMaskHead(BaseModule):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=80,
class_agnostic=False,
upsample_cfg=dict(type='deconv', scale_factor=2),
conv_cfg=None,
norm_cfg=None,
predictor_cfg=dict(type='Conv'),
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(FCNMaskHead, self).__init__(init_cfg)
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg['type']}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.predictor_cfg = predictor_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=upsample_in_channels,
out_channels=self.conv_out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=upsample_in_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = build_conv_layer(self.predictor_cfg,
logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
super(FCNMaskHead, self).init_weights()
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
elif isinstance(m, CARAFEPack):
m.init_weights()
else:
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
"""
Example:
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> # There are lots of variations depending on the configuration
>>> self = FCNMaskHead(num_classes=C, num_convs=1)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> sf = self.scale_factor
>>> labels = torch.randint(0, C, size=(N,))
>>> # With the default properties the mask targets should indicate
>>> # a (potentially soft) single-class label
>>> mask_targets = torch.rand(N, H * sf, W * sf)
>>> loss = self.loss(mask_pred, mask_targets, labels)
>>> print('loss = {!r}'.format(loss))
"""
loss = dict()
if mask_pred.size(0) == 0:
loss_mask = mask_pred.sum()
else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape (Tuple): original image height and width, shape (2,)
scale_factor(float | Tensor): If ``rescale is True``, box
coordinates are divided by this scale factor to fit
``ori_shape``.
rescale (bool): If True, the resulting masks will be rescaled to
``ori_shape``.
Returns:
list[list]: encoded masks. The c-th item in the outer list
corresponds to the c-th class. Given the c-th outer list, the
i-th item in that inner list is the mask for the i-th box with
class label c.
Example:
>>> import mmcv
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> self = FCNMaskHead(num_classes=C, num_convs=0)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> # Each input is associated with some bounding box
>>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)
>>> det_labels = torch.randint(0, C, size=(N,))
>>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, })
>>> ori_shape = (H * 4, W * 4)
>>> scale_factor = torch.FloatTensor((1, 1))
>>> rescale = False
>>> # Encoded masks are a list for each category.
>>> encoded_masks = self.get_seg_masks(
>>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape,
>>> scale_factor, rescale
>>> )
>>> assert len(encoded_masks) == C
>>> assert sum(list(map(len, encoded_masks))) == N
"""
if not isinstance(mask_pred, torch.Tensor):
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
bboxes = det_bboxes[:, :4]
labels = det_labels
# No need to consider rescale and scale_factor while exporting to ONNX
if torch.onnx.is_in_onnx_export():
img_h, img_w = ori_shape[:2]
else:
if rescale:
img_h, img_w = ori_shape[:2]
else:
if isinstance(scale_factor, float):
img_h = np.round(ori_shape[0] * scale_factor).astype(
np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(
np.int32)
else:
w_scale, h_scale = scale_factor[0], scale_factor[1]
img_h = np.round(ori_shape[0] * h_scale.item()).astype(
np.int32)
img_w = np.round(ori_shape[1] * w_scale.item()).astype(
np.int32)
scale_factor = 1.0
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = bboxes / scale_factor
# support exporting to ONNX
if torch.onnx.is_in_onnx_export():
threshold = rcnn_test_cfg.mask_thr_binary
if not self.class_agnostic:
box_inds = torch.arange(mask_pred.shape[0])
mask_pred = mask_pred[box_inds, labels][:, None]
masks, _ = _do_paste_mask(
mask_pred, bboxes, img_h, img_w, skip_empty=False)
if threshold >= 0:
masks = (masks >= threshold).to(dtype=torch.bool)
else:
# TensorRT backend does not have data type of uint8
is_trt_backend = os.environ.get(
'ONNX_BACKEND') == 'MMCVTensorRT'
target_dtype = torch.int32 if is_trt_backend else torch.uint8
masks = (masks * 255).to(dtype=target_dtype)
return masks
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
num_chunks = int(
np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
if torch.jit.is_tracing():
return im_mask.detach().int()
for i in range(N):
cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())
return cls_segms
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks according to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
# IsInf op is not supported with ONNX<=1.7.0
if not torch.onnx.is_in_onnx_export():
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
| import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer
from mmcv.ops.carafe import CARAFEPack
from mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.core import mask_target
from mmdet.models.builder import HEADS, build_loss
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
@HEADS.register_module()
class FCNMaskHead(BaseModule):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=80,
class_agnostic=False,
upsample_cfg=dict(type='deconv', scale_factor=2),
conv_cfg=None,
norm_cfg=None,
predictor_cfg=dict(type='Conv'),
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(FCNMaskHead, self).__init__(init_cfg)
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.predictor_cfg = predictor_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=upsample_in_channels,
out_channels=self.conv_out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=upsample_in_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = build_conv_layer(self.predictor_cfg,
logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
super(FCNMaskHead, self).init_weights()
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
elif isinstance(m, CARAFEPack):
m.init_weights()
else:
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
"""
Example:
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> # There are lots of variations depending on the configuration
>>> self = FCNMaskHead(num_classes=C, num_convs=1)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> sf = self.scale_factor
>>> labels = torch.randint(0, C, size=(N,))
>>> # With the default properties the mask targets should indicate
>>> # a (potentially soft) single-class label
>>> mask_targets = torch.rand(N, H * sf, W * sf)
>>> loss = self.loss(mask_pred, mask_targets, labels)
>>> print('loss = {!r}'.format(loss))
"""
loss = dict()
if mask_pred.size(0) == 0:
loss_mask = mask_pred.sum()
else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape (Tuple): original image height and width, shape (2,)
scale_factor(float | Tensor): If ``rescale is True``, box
coordinates are divided by this scale factor to fit
``ori_shape``.
rescale (bool): If True, the resulting masks will be rescaled to
``ori_shape``.
Returns:
list[list]: encoded masks. The c-th item in the outer list
corresponds to the c-th class. Given the c-th outer list, the
i-th item in that inner list is the mask for the i-th box with
class label c.
Example:
>>> import mmcv
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> self = FCNMaskHead(num_classes=C, num_convs=0)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> # Each input is associated with some bounding box
>>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)
>>> det_labels = torch.randint(0, C, size=(N,))
>>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, })
>>> ori_shape = (H * 4, W * 4)
>>> scale_factor = torch.FloatTensor((1, 1))
>>> rescale = False
>>> # Encoded masks are a list for each category.
>>> encoded_masks = self.get_seg_masks(
>>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape,
>>> scale_factor, rescale
>>> )
>>> assert len(encoded_masks) == C
>>> assert sum(list(map(len, encoded_masks))) == N
"""
if not isinstance(mask_pred, torch.Tensor):
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
bboxes = det_bboxes[:, :4]
labels = det_labels
# No need to consider rescale and scale_factor while exporting to ONNX
if torch.onnx.is_in_onnx_export():
img_h, img_w = ori_shape[:2]
else:
if rescale:
img_h, img_w = ori_shape[:2]
else:
if isinstance(scale_factor, float):
img_h = np.round(ori_shape[0] * scale_factor).astype(
np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(
np.int32)
else:
w_scale, h_scale = scale_factor[0], scale_factor[1]
img_h = np.round(ori_shape[0] * h_scale.item()).astype(
np.int32)
img_w = np.round(ori_shape[1] * w_scale.item()).astype(
np.int32)
scale_factor = 1.0
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = bboxes / scale_factor
# support exporting to ONNX
if torch.onnx.is_in_onnx_export():
threshold = rcnn_test_cfg.mask_thr_binary
if not self.class_agnostic:
box_inds = torch.arange(mask_pred.shape[0])
mask_pred = mask_pred[box_inds, labels][:, None]
masks, _ = _do_paste_mask(
mask_pred, bboxes, img_h, img_w, skip_empty=False)
if threshold >= 0:
masks = (masks >= threshold).to(dtype=torch.bool)
else:
# TensorRT backend does not have data type of uint8
is_trt_backend = os.environ.get(
'ONNX_BACKEND') == 'MMCVTensorRT'
target_dtype = torch.int32 if is_trt_backend else torch.uint8
masks = (masks * 255).to(dtype=target_dtype)
return masks
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
num_chunks = int(
np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
if torch.jit.is_tracing():
return im_mask.detach().int()
for i in range(N):
cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())
return cls_segms
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks according to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
# IsInf op is not supported with ONNX<=1.7.0
if not torch.onnx.is_in_onnx_export():
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
|
# YOLOv5 common modules
import math
from copy import copy
from pathlib import Path
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from PIL import Image
from torch.cuda import amp
from utils.datasets import letterbox
from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box
from utils.plots import colors, plot_one_box
from utils.torch_utils import time_synchronized
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class TransformerLayer(nn.Module):
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
def __init__(self, c, num_heads):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
# Vision Transformer https://arxiv.org/abs/2010.11929
def __init__(self, c1, c2, num_heads, num_layers):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
self.c2 = c2
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2)
p = p.unsqueeze(0)
p = p.transpose(0, 3)
p = p.squeeze(3)
e = self.linear(p)
x = p + e
x = self.tr(x)
x = x.unsqueeze(3)
x = x.transpose(0, 3)
x = x.reshape(b, self.c2, w, h)
return x
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class C3TR(C3):
# C3 module with TransformerBlock()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
# self.contract = Contract(gain=2)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
# return self.conv(self.contract(x))
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class NMS(nn.Module):
# Non-Maximum Suppression (NMS) module
conf = 0.25 # confidence threshold
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class
max_det = 1000 # maximum number of detections per image
def __init__(self):
super(NMS, self).__init__()
def forward(self, x):
return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det)
class AutoShape(nn.Module):
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
max_det = 1000 # maximum number of detections per image
def __init__(self, model):
super(AutoShape, self).__init__()
self.model = model.eval()
def autoshape(self):
print('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
@torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=640, width=1280, RGB images example inputs are:
# filename: imgs = 'data/images/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(640,1280,3)
# numpy: = np.zeros((640,1280,3)) # HWC
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
t = [time_synchronized()]
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
with amp.autocast(enabled=p.device.type != 'cpu'):
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
f = f'image{i}' # filename
if isinstance(im, str): # filename or uri
im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im
elif isinstance(im, Image.Image): # PIL Image
im, f = np.asarray(im), getattr(im, 'filename', f) or f
files.append(Path(f).with_suffix('.jpg').name)
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
t.append(time_synchronized())
with amp.autocast(enabled=p.device.type != 'cpu'):
# Inference
y = self.model(x, augment, profile)[0] # forward
t.append(time_synchronized())
# Post-process
y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
t.append(time_synchronized())
return Detections(imgs, y, files, t, self.names, x.shape)
class Detections:
# detections class for YOLOv5 inference results
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super(Detections, self).__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.files = files # image filenames
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) # number of images (batch size)
self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
self.s = shape # inference BCHW shape
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{"s" * (n > 1)}, " # add to string
if show or save or render or crop:
for *box, conf, cls in pred: # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}'
if crop:
save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i])
else: # all others
plot_one_box(box, im, label=label, color=colors(cls))
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if pprint:
print(str.rstrip(', '))
if show:
im.show(self.files[i]) # show
if save:
f = self.files[i]
im.save(save_dir / f) # save
print(f"{"Saved" * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n')
if render:
self.imgs[i] = np.asarray(im)
def print(self):
self.display(pprint=True) # print results
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)
def show(self):
self.display(show=True) # show results
def save(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
self.display(save=True, save_dir=save_dir) # save results
def crop(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
self.display(crop=True, save_dir=save_dir) # crop results
print(f'Saved results to {save_dir}\n')
def render(self):
self.display(render=True) # render results
return self.imgs
def pandas(self):
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
new = copy(self) # return copy
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
return new
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
def __len__(self):
return self.n
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2) | # YOLOv5 common modules
import math
from copy import copy
from pathlib import Path
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from PIL import Image
from torch.cuda import amp
from utils.datasets import letterbox
from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box
from utils.plots import colors, plot_one_box
from utils.torch_utils import time_synchronized
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class TransformerLayer(nn.Module):
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
def __init__(self, c, num_heads):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
# Vision Transformer https://arxiv.org/abs/2010.11929
def __init__(self, c1, c2, num_heads, num_layers):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
self.c2 = c2
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2)
p = p.unsqueeze(0)
p = p.transpose(0, 3)
p = p.squeeze(3)
e = self.linear(p)
x = p + e
x = self.tr(x)
x = x.unsqueeze(3)
x = x.transpose(0, 3)
x = x.reshape(b, self.c2, w, h)
return x
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class C3TR(C3):
# C3 module with TransformerBlock()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
# self.contract = Contract(gain=2)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
# return self.conv(self.contract(x))
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class NMS(nn.Module):
# Non-Maximum Suppression (NMS) module
conf = 0.25 # confidence threshold
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class
max_det = 1000 # maximum number of detections per image
def __init__(self):
super(NMS, self).__init__()
def forward(self, x):
return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det)
class AutoShape(nn.Module):
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
max_det = 1000 # maximum number of detections per image
def __init__(self, model):
super(AutoShape, self).__init__()
self.model = model.eval()
def autoshape(self):
print('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
@torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=640, width=1280, RGB images example inputs are:
# filename: imgs = 'data/images/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(640,1280,3)
# numpy: = np.zeros((640,1280,3)) # HWC
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
t = [time_synchronized()]
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
with amp.autocast(enabled=p.device.type != 'cpu'):
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
f = f'image{i}' # filename
if isinstance(im, str): # filename or uri
im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im
elif isinstance(im, Image.Image): # PIL Image
im, f = np.asarray(im), getattr(im, 'filename', f) or f
files.append(Path(f).with_suffix('.jpg').name)
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
t.append(time_synchronized())
with amp.autocast(enabled=p.device.type != 'cpu'):
# Inference
y = self.model(x, augment, profile)[0] # forward
t.append(time_synchronized())
# Post-process
y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
t.append(time_synchronized())
return Detections(imgs, y, files, t, self.names, x.shape)
class Detections:
# detections class for YOLOv5 inference results
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super(Detections, self).__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.files = files # image filenames
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) # number of images (batch size)
self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
self.s = shape # inference BCHW shape
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
if show or save or render or crop:
for *box, conf, cls in pred: # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}'
if crop:
save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i])
else: # all others
plot_one_box(box, im, label=label, color=colors(cls))
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if pprint:
print(str.rstrip(', '))
if show:
im.show(self.files[i]) # show
if save:
f = self.files[i]
im.save(save_dir / f) # save
print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n')
if render:
self.imgs[i] = np.asarray(im)
def print(self):
self.display(pprint=True) # print results
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)
def show(self):
self.display(show=True) # show results
def save(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
self.display(save=True, save_dir=save_dir) # save results
def crop(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
self.display(crop=True, save_dir=save_dir) # crop results
print(f'Saved results to {save_dir}\n')
def render(self):
self.display(render=True) # render results
return self.imgs
def pandas(self):
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
new = copy(self) # return copy
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
return new
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
def __len__(self):
return self.n
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2) |
import os
import platform
import subprocess
import sys
from pathlib import Path
from core.management.commands.utils import Utils
from django.apps import apps
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = """Manager responsável por analisar as classes de modelos do projeto Django para gerar os arquivos
do projeto FastAPI correspondente às apps do Django"""
def __init__(self):
super().__init__()
self.path_root = os.getcwd()
self.path_core = os.path.join(self.BASE_DIR, "core")
self.operation_system = platform.system().lower()
self.project = 'fastapi'
self.fastapi_dir = os.path.join(self.BASE_DIR, '..', "fastapi")
self.fastapi_project = os.path.join(self.path_core, "management/commands/snippets/fastapi_project")
self.snippet_dir = "{}/{}".format(self.path_core, "management/commands/snippets/fastapi/")
self.current_app_model = None
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
_django_types = ["SmallAutoField", "AutoField", "BLANK_CHOICE_DASH", "BigAutoField", "BigIntegerField",
"BinaryField", "BooleanField", "CharField", "CommaSeparatedIntegerField", "DateField",
"DateTimeField", "DecimalField", "DurationField", "EmailField", "Empty", "FileField", "Field",
"FieldDoesNotExist", "FilePathField", "FloatField", "GenericIPAddressField", "IPAddressField",
"IntegerField", "FieldFile", "NOT_PROVIDED", "NullBooleanField", "ImageField",
"PositiveIntegerField", "PositiveSmallIntegerField", "SlugField", "SmallIntegerField", "TextField",
"TimeField", "URLField", "UUIDField", "ForeignKey", "OneToOneField"]
_schemas_types = ["int", "int", "BLANK_CHOICE_DASH", "int", "int", "str", "bool", "str", "str", "datetime.date",
"datetime.datetime", "float", "int", "EmailStr", "str", "str", "str", "str", "str", "float",
"str", "str", "int", "str", "str", "bool", "str", "int", "int", "str", "int",
"str", "DateTime", "str", "str", "int", "int",]
_models_types = ["Integer", "Integer", "BLANK_CHOICE_DASH", "Integer", "Integer", "String", "Boolean", "String", "String", "Date",
"Datetime", "Float", "Integer", "String", "String", "String", "String", "String", "String", "Float",
"String", "String", "Integer", "String", "String", "Boolean", "String", "Integer", "Integer", "String", "Integer",
"String", "DateTime", "String", "String", "Integer", "Integer", ]
def add_arguments(self, parser):
parser.add_argument("App", type=str, nargs="?")
parser.add_argument("Model", type=str, nargs="?")
parser.add_argument("--app", action="store_true", dest="app", help="Criar a App e seus models")
parser.add_argument("--app_model", action="store_true", dest="app_model",
help="Criar a App e o Model informado")
# Parâmetro opcionais
parser.add_argument(
'--schemas',
action='store_true',
dest='schemas',
help='Criar apenas os Schemas'
)
parser.add_argument(
'--api',
action='store_true',
dest='api',
help='Criar apenas as rotas da api'
)
parser.add_argument(
'--cruds',
action='store_true',
dest='cruds',
help='Criar apenas os cruds'
)
parser.add_argument(
'--models',
action='store_true',
dest='models',
help='Criar apenas os models'
)
def _check_dir(self, path) -> bool:
"""Método responsável por verificar se o diretório já existe."""
return Utils.check_dir(path)
def _check_file(self, path):
"""Método responsável por verificar se o arquivo já existe no caminho informado."""
return Utils.check_file(path)
def __check_content(self, path, text_check):
"""Método responsável por verificar se o texto passado com parâmetro existe no conteúdo do arquivo."""
return Utils.check_content(path, text_check)
def __ignore_base_fields(self, field):
"""Método responsável por remover da análise do models os atributos herdados da classe pai Base
Arguments:
field {String} -- Nome do atributo
Returns:
bool -- True se o atributo for um dos atributos da classe pai, caso contrário False.
"""
try:
__ignore_fields = ["id", "deleted", "created_on", "updated_on" ]
return field in __ignore_fields
except Exception as error:
Utils.show_message(f"Error in __ignore_base_fields: {error}", error=True)
def __get_snippet(self, path=None, file_name=None, state_manager=False):
"""Método para recuperar o valor do arquivo de snippet a ser convertido pela substituição com os valores
baseados em modelos do projeto Django
Arguments:
path {str} - Caminho do arquivo snippet a ser utilizado como padrão para gerar o arquivo resultante.
file_name {str} - Nome do arquivo snippet a ser lido
state_manager {bool} - Booleano para determinar se o snippet a ser lido é de algum dos pacotes
de gerência de estado do projeto Fastapi (deprecated)
Returns:
str -- Texto base a ser utilizado para geração dos arquivos resultantes da conversão
"""
try:
if os.path.isfile(path):
with open(path, encoding="utf-8") as arquivo:
return arquivo.read()
except Exception as e:
Utils.show_message(f"Error in get_snippet {e}", error=True)
sys.exit()
def __init_fastapi(self):
"""Método para iniciar o projeto Fastapi
"""
try:
if not Utils.check_dir(self.fastapi_dir):
Utils.show_message("Criando o projeto Fastapi.")
print(self.fastapi_project)
__cmd_fastapi_create = "cp -R {} {}".format(self.fastapi_project, self.fastapi_dir)
subprocess.call(__cmd_fastapi_create, shell=True)
Utils.show_message("Projeto criado com sucesso.")
except Exception as error:
Utils.show_message(f"Error in __init_Fastapi: {error}", error=True)
def __init_app(self, app_path):
"""Método para iniciar o projeto Fastapi
"""
try:
if not Utils.check_dir(app_path):
Utils.show_message("Criando diretório da app")
os.makedirs(app_path)
Utils.show_message("Diretório criado com sucesso")
except Exception as error:
Utils.show_message(f"Error in __init_Fastapi: {error}", error=True)
def __apply_pep(self, path):
try:
os.system('autopep8 --in-place --aggressive --aggressive {}'.format(path))
os.system('isort {}'.format(path))
except Exception as error:
Utils.show_message(f"Ocorreu o erro : {error}")
pass
def __manage_schema(self):
"""Método responsável por criar/configurar o arquivo de schema para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Schema do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/schema.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
model = self.app_instance.get_model(self.model)
fields = model._meta.fields
result = ''
for field in iter(fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if item["type"] not in self._django_types:
print('Campo {} desconhecido'.format(field))
continue
if not self.__ignore_base_fields(item['name']):
attribute = self._schemas_types[self._django_types.index(item['type'])]
field_name = item.get('name')
if (getattr(field, 'null', None)):
attribute = f"Optional[{attribute}]"
if (field.get_default() is not None and field.get_default() != ""):
attribute += f" = {field.get_default()}"
if (item.get("type") in ('ForeignKey', 'OneToOneField')):
field_name = field.get_attname_column()[1]
result += f"\t {field_name}: {attribute}\n"
content = content.replace("$fields$", result)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_schema) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_schema, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_schema)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_schema, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
with open(self.path_schema, 'a') as schema:
schema.write("\n")
schema.write(content)
self.__apply_pep(self.path_schema)
except Exception as error:
Utils.show_message(f"Error in __manage_schema: {error}", error=True)
def __manage_model(self):
"""Método responsável por criar/configurar o arquivo de schema para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Model do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/model.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
model = self.app_instance.get_model(self.model)
content = content.replace("$table$", model._meta.db_table)
fields = model._meta.fields
related_fields = model._meta.many_to_many
result = ''
imports = ""
many_to_many = ""
for field in iter(fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if item["type"] not in self._django_types:
print('Campo {} desconhecido'.format(field))
continue
if not self.__ignore_base_fields(item['name']):
attribute = self._models_types[self._django_types.index(item['type'])]
field_name = item.get('name')
relationship = None
if (field.max_length):
attribute += f"({field.max_length})"
if (item.get("type") in ('ForeignKey', 'OneToOneField')):
field_name = field.get_attname_column()[1]
__model = field.related_model._meta
attribute = f"ForeignKey('{__model.db_table}.id')"
if __model.app_label != item.get('app'):
imports += f"from {__model.app_label}.models import {__model.object_name}\n"
relationship = f"\t {item.get("name")} = relationship('{__model.object_name}')\n"
attribute = f"{attribute}, nullable={(getattr(field, "null", None))}"
if (field.has_default()):
attribute += f" ,default={field.get_default()}"
if (field.unique):
attribute += f" ,unique={field.unique}"
result += f"\t {field_name} = Column({attribute})\n"
if relationship is not None:
result += relationship
for field in iter(related_fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if (item.get("type") == "ManyToManyField"):
_model_name = field.model._meta.model_name
_app_name = field.model._meta.app_label
_related_model_name = field.related_model._meta.model_name
_related_model_app = field.related_model._meta.app_label
__model = field.related_model._meta
table = f"{item.get("app")}_{_model_name}_{field.related_model._meta.model_name}"
many_to_many += f"{table} = Table('{table}', Base.metadata,"
many_to_many += f"Column('id', Integer, primary_key=True, index=True),"
many_to_many += f"Column('{_model_name}_id', ForeignKey('{_app_name}_{_model_name}.id')),"
many_to_many += f"Column('{_related_model_name}_id', ForeignKey('{_related_model_app}_{_related_model_name}.id')))\n"
result += f"\t {item.get("name")} = relationship('{__model.object_name}', secondary={table})\n"
content = content.replace("$columns$", result)
content = content.replace("$imports$", imports)
content = content.replace("$manyToMany$", many_to_many)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_model_fastapi) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_model_fastapi, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_model_fastapi)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_model_fastapi, "class {}".format(self.model)):
Utils.show_message("O model informado já possui model configurado.")
return
with open(self.path_model_fastapi, 'a') as schema:
schema.write("\n")
schema.write(content)
self.__apply_pep(self.path_model_fastapi)
except Exception as error:
Utils.show_message(f"Error in __manage_model: {error}", error=True)
def __manage_cruds(self):
"""Método responsável por criar/configurar o arquivo de cruds para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Crud do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/cruds.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
content = content.replace("$app$", self.app)
content = content.replace("$model$", self.model_lower)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_crud) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_crud, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_crud)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_crud, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
with open(self.path_crud, 'a') as crud:
crud.write("\n")
crud.write(content)
self.__apply_pep(self.path_crud)
except Exception as error:
Utils.show_message(f"Error in __manage_crud: {error}", error=True)
def __manage_api(self):
"""Método responsável por criar/configurar o arquivo de cruds para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração das Rotas do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/api.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
content = content.replace("$app$", self.app)
content = content.replace("$model$", self.model_lower)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_api) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_api, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_api)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_api, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
if self.__check_content(self.path_api,
"router = APIRouter()"):
content = content.replace("router = APIRouter()", "")
with open(self.path_api, 'a') as crud:
crud.write("\n")
crud.write(content)
self.__apply_pep(self.path_api)
except Exception as error:
Utils.show_message(f"Error in __manage_crud: {error}", error=True)
def call_methods(self, options):
"""
Método que identifica qual comando foi solicitado pelo usuário para ser executado, antes de chamar o método,
as entradas informadas pelo usuário são validadas, evitando erros de execução do programa devido à ausência de
parâmetros obrigatórios.
Por uma questão de padrão de projeto as possibilidades de escolha do pacote de gerência
de estados para o projeto Fastapi foram alteradas, agora todo projeto gerado utiliza como pacote de gerência
de estado o pacote o Cubit/Bloc
"""
# Verificando se foram passados parâmetros opcionais
if options['cruds']:
Utils.show_message("Trabalhando apenas os cruds.")
self.__manage_cruds()
return
elif options['api']:
Utils.show_message("Trabalhando apenas a api.")
self.__manage_api()
return
elif options['schemas']:
Utils.show_message("Trabalhando apenas os schemas.")
self.__manage_schema()
return
elif options['models']:
Utils.show_message("Trabalhando apenas os models.")
self.__manage_model()
return
else:
# Chamando o método para tratar os api
self.__manage_api()
# Chamando o método para tratar as schemas
self.__manage_schema()
# Chamando o método para tratar o models
self.__manage_model()
# Chamando o método para tratar as cruds
self.__manage_cruds()
return
def handle(self, *args, **options):
app = options["App"] or None
model = options["Model"] or None
if app is None and model is None:
Utils.show_message(
f"Você não informou uma APP para ser gerada.",
error=True)
return
if app and Utils.contain_number(app):
Utils.show_message(f"Nome da app contendo números")
return
# Removendo os espaços em branco
self.app = app.strip()
# Pegando o diretório absoluto atual do projeto.
self.path_root = os.path.normpath(os.getcwd() + os.sep)
# Criando o path para a APP informada.
self.path_app = os.path.join(self.fastapi_dir, app)
self.path_app_local = os.path.join(self.path_root, app)
# Criando o path para a APP Core.
self.path_core = os.path.join(self.BASE_DIR, "core")
# Criando o path para os models baseado no App informada.
self.path_model = os.path.join(self.path_app_local, "models.py")
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.app_lower = app.lower()
# Criando o path para os forms baseado na App informada.
self.path_schema= os.path.join(self.path_app, "schemas.py")
self.path_model_fastapi = os.path.join(self.path_app, "models.py")
self.path_crud = os.path.join(self.path_app, "cruds.py")
self.path_api = os.path.join(self.path_app, "api.py")
# Verificando se o diretório do fast informada existe
if self._check_dir(self.fastapi_dir) is False:
self.__init_fastapi()
# Verifica se app esta instalada, pois precisa dela
# para recuperar as instancias dos models
if apps.is_installed(self.app_lower) is False:
Utils.show_message(
"Você deve colocar sua app no INSTALLED_APPS do settings.")
return
if self._check_dir(self.path_app) is False:
self.__init_app(self.path_app)
# Criando uma instancia da app
self.app_instance = apps.get_app_config(self.app_lower)
# Verificando se o usuário passou o nome do model
if options['Model']:
model = options['Model'] or None
if Utils.contain_number(model) is False:
# Removendo os espaços em branco
self.model = model.strip()
# Verificando se existe no models.py o Model informado
if self.__check_content(
self.path_model,
'class {}'.format(self.model)) is False:
Utils.show_message("Model informado não encontrado.")
return
try:
# Verifica se o model está na app informada
# Se o model for abstract ela retornará uma exceção
# LookupError
self.app_instance.get_model(self.model)
Utils.show_message(
"Gerando arquivos para o model {}".format(self.model))
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.model_lower = model.lower()
self.call_methods(options)
Utils.show_message("Processo concluído.")
except LookupError:
Utils.show_message(
"Esse model é abastrato. "
"Não vão ser gerados os arquivos.")
else:
# recupera todos os models da app
# print(self.app_instance.get_models())
for model in self.app_instance.get_models():
model = model.__name__
# Removendo os espaços em branco
self.model = model.strip()
Utils.show_message(
"Gerando arquivos para o model {}".format(self.model))
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.model_lower = model.lower()
# Chama os métodos de geração de arquivos
self.call_methods(options)
Utils.show_message(
"Processo concluído para o model {}.".format(
self.model))
Utils.show_message("Processo concluído.")
return
| import os
import platform
import subprocess
import sys
from pathlib import Path
from core.management.commands.utils import Utils
from django.apps import apps
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = """Manager responsável por analisar as classes de modelos do projeto Django para gerar os arquivos
do projeto FastAPI correspondente às apps do Django"""
def __init__(self):
super().__init__()
self.path_root = os.getcwd()
self.path_core = os.path.join(self.BASE_DIR, "core")
self.operation_system = platform.system().lower()
self.project = 'fastapi'
self.fastapi_dir = os.path.join(self.BASE_DIR, '..', "fastapi")
self.fastapi_project = os.path.join(self.path_core, "management/commands/snippets/fastapi_project")
self.snippet_dir = "{}/{}".format(self.path_core, "management/commands/snippets/fastapi/")
self.current_app_model = None
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
_django_types = ["SmallAutoField", "AutoField", "BLANK_CHOICE_DASH", "BigAutoField", "BigIntegerField",
"BinaryField", "BooleanField", "CharField", "CommaSeparatedIntegerField", "DateField",
"DateTimeField", "DecimalField", "DurationField", "EmailField", "Empty", "FileField", "Field",
"FieldDoesNotExist", "FilePathField", "FloatField", "GenericIPAddressField", "IPAddressField",
"IntegerField", "FieldFile", "NOT_PROVIDED", "NullBooleanField", "ImageField",
"PositiveIntegerField", "PositiveSmallIntegerField", "SlugField", "SmallIntegerField", "TextField",
"TimeField", "URLField", "UUIDField", "ForeignKey", "OneToOneField"]
_schemas_types = ["int", "int", "BLANK_CHOICE_DASH", "int", "int", "str", "bool", "str", "str", "datetime.date",
"datetime.datetime", "float", "int", "EmailStr", "str", "str", "str", "str", "str", "float",
"str", "str", "int", "str", "str", "bool", "str", "int", "int", "str", "int",
"str", "DateTime", "str", "str", "int", "int",]
_models_types = ["Integer", "Integer", "BLANK_CHOICE_DASH", "Integer", "Integer", "String", "Boolean", "String", "String", "Date",
"Datetime", "Float", "Integer", "String", "String", "String", "String", "String", "String", "Float",
"String", "String", "Integer", "String", "String", "Boolean", "String", "Integer", "Integer", "String", "Integer",
"String", "DateTime", "String", "String", "Integer", "Integer", ]
def add_arguments(self, parser):
parser.add_argument("App", type=str, nargs="?")
parser.add_argument("Model", type=str, nargs="?")
parser.add_argument("--app", action="store_true", dest="app", help="Criar a App e seus models")
parser.add_argument("--app_model", action="store_true", dest="app_model",
help="Criar a App e o Model informado")
# Parâmetro opcionais
parser.add_argument(
'--schemas',
action='store_true',
dest='schemas',
help='Criar apenas os Schemas'
)
parser.add_argument(
'--api',
action='store_true',
dest='api',
help='Criar apenas as rotas da api'
)
parser.add_argument(
'--cruds',
action='store_true',
dest='cruds',
help='Criar apenas os cruds'
)
parser.add_argument(
'--models',
action='store_true',
dest='models',
help='Criar apenas os models'
)
def _check_dir(self, path) -> bool:
"""Método responsável por verificar se o diretório já existe."""
return Utils.check_dir(path)
def _check_file(self, path):
"""Método responsável por verificar se o arquivo já existe no caminho informado."""
return Utils.check_file(path)
def __check_content(self, path, text_check):
"""Método responsável por verificar se o texto passado com parâmetro existe no conteúdo do arquivo."""
return Utils.check_content(path, text_check)
def __ignore_base_fields(self, field):
"""Método responsável por remover da análise do models os atributos herdados da classe pai Base
Arguments:
field {String} -- Nome do atributo
Returns:
bool -- True se o atributo for um dos atributos da classe pai, caso contrário False.
"""
try:
__ignore_fields = ["id", "deleted", "created_on", "updated_on" ]
return field in __ignore_fields
except Exception as error:
Utils.show_message(f"Error in __ignore_base_fields: {error}", error=True)
def __get_snippet(self, path=None, file_name=None, state_manager=False):
"""Método para recuperar o valor do arquivo de snippet a ser convertido pela substituição com os valores
baseados em modelos do projeto Django
Arguments:
path {str} - Caminho do arquivo snippet a ser utilizado como padrão para gerar o arquivo resultante.
file_name {str} - Nome do arquivo snippet a ser lido
state_manager {bool} - Booleano para determinar se o snippet a ser lido é de algum dos pacotes
de gerência de estado do projeto Fastapi (deprecated)
Returns:
str -- Texto base a ser utilizado para geração dos arquivos resultantes da conversão
"""
try:
if os.path.isfile(path):
with open(path, encoding="utf-8") as arquivo:
return arquivo.read()
except Exception as e:
Utils.show_message(f"Error in get_snippet {e}", error=True)
sys.exit()
def __init_fastapi(self):
"""Método para iniciar o projeto Fastapi
"""
try:
if not Utils.check_dir(self.fastapi_dir):
Utils.show_message("Criando o projeto Fastapi.")
print(self.fastapi_project)
__cmd_fastapi_create = "cp -R {} {}".format(self.fastapi_project, self.fastapi_dir)
subprocess.call(__cmd_fastapi_create, shell=True)
Utils.show_message("Projeto criado com sucesso.")
except Exception as error:
Utils.show_message(f"Error in __init_Fastapi: {error}", error=True)
def __init_app(self, app_path):
"""Método para iniciar o projeto Fastapi
"""
try:
if not Utils.check_dir(app_path):
Utils.show_message("Criando diretório da app")
os.makedirs(app_path)
Utils.show_message("Diretório criado com sucesso")
except Exception as error:
Utils.show_message(f"Error in __init_Fastapi: {error}", error=True)
def __apply_pep(self, path):
try:
os.system('autopep8 --in-place --aggressive --aggressive {}'.format(path))
os.system('isort {}'.format(path))
except Exception as error:
Utils.show_message(f"Ocorreu o erro : {error}")
pass
def __manage_schema(self):
"""Método responsável por criar/configurar o arquivo de schema para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Schema do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/schema.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
model = self.app_instance.get_model(self.model)
fields = model._meta.fields
result = ''
for field in iter(fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if item["type"] not in self._django_types:
print('Campo {} desconhecido'.format(field))
continue
if not self.__ignore_base_fields(item['name']):
attribute = self._schemas_types[self._django_types.index(item['type'])]
field_name = item.get('name')
if (getattr(field, 'null', None)):
attribute = f"Optional[{attribute}]"
if (field.get_default() is not None and field.get_default() != ""):
attribute += f" = {field.get_default()}"
if (item.get("type") in ('ForeignKey', 'OneToOneField')):
field_name = field.get_attname_column()[1]
result += f"\t {field_name}: {attribute}\n"
content = content.replace("$fields$", result)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_schema) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_schema, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_schema)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_schema, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
with open(self.path_schema, 'a') as schema:
schema.write("\n")
schema.write(content)
self.__apply_pep(self.path_schema)
except Exception as error:
Utils.show_message(f"Error in __manage_schema: {error}", error=True)
def __manage_model(self):
"""Método responsável por criar/configurar o arquivo de schema para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Model do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/model.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
model = self.app_instance.get_model(self.model)
content = content.replace("$table$", model._meta.db_table)
fields = model._meta.fields
related_fields = model._meta.many_to_many
result = ''
imports = ""
many_to_many = ""
for field in iter(fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if item["type"] not in self._django_types:
print('Campo {} desconhecido'.format(field))
continue
if not self.__ignore_base_fields(item['name']):
attribute = self._models_types[self._django_types.index(item['type'])]
field_name = item.get('name')
relationship = None
if (field.max_length):
attribute += f"({field.max_length})"
if (item.get("type") in ('ForeignKey', 'OneToOneField')):
field_name = field.get_attname_column()[1]
__model = field.related_model._meta
attribute = f"ForeignKey('{__model.db_table}.id')"
if __model.app_label != item.get('app'):
imports += f"from {__model.app_label}.models import {__model.object_name}\n"
relationship = f"\t {item.get('name')} = relationship('{__model.object_name}')\n"
attribute = f"{attribute}, nullable={(getattr(field, 'null', None))}"
if (field.has_default()):
attribute += f" ,default={field.get_default()}"
if (field.unique):
attribute += f" ,unique={field.unique}"
result += f"\t {field_name} = Column({attribute})\n"
if relationship is not None:
result += relationship
for field in iter(related_fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if (item.get("type") == "ManyToManyField"):
_model_name = field.model._meta.model_name
_app_name = field.model._meta.app_label
_related_model_name = field.related_model._meta.model_name
_related_model_app = field.related_model._meta.app_label
__model = field.related_model._meta
table = f"{item.get('app')}_{_model_name}_{field.related_model._meta.model_name}"
many_to_many += f"{table} = Table('{table}', Base.metadata,"
many_to_many += f"Column('id', Integer, primary_key=True, index=True),"
many_to_many += f"Column('{_model_name}_id', ForeignKey('{_app_name}_{_model_name}.id')),"
many_to_many += f"Column('{_related_model_name}_id', ForeignKey('{_related_model_app}_{_related_model_name}.id')))\n"
result += f"\t {item.get('name')} = relationship('{__model.object_name}', secondary={table})\n"
content = content.replace("$columns$", result)
content = content.replace("$imports$", imports)
content = content.replace("$manyToMany$", many_to_many)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_model_fastapi) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_model_fastapi, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_model_fastapi)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_model_fastapi, "class {}".format(self.model)):
Utils.show_message("O model informado já possui model configurado.")
return
with open(self.path_model_fastapi, 'a') as schema:
schema.write("\n")
schema.write(content)
self.__apply_pep(self.path_model_fastapi)
except Exception as error:
Utils.show_message(f"Error in __manage_model: {error}", error=True)
def __manage_cruds(self):
"""Método responsável por criar/configurar o arquivo de cruds para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Crud do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/cruds.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
content = content.replace("$app$", self.app)
content = content.replace("$model$", self.model_lower)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_crud) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_crud, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_crud)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_crud, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
with open(self.path_crud, 'a') as crud:
crud.write("\n")
crud.write(content)
self.__apply_pep(self.path_crud)
except Exception as error:
Utils.show_message(f"Error in __manage_crud: {error}", error=True)
def __manage_api(self):
"""Método responsável por criar/configurar o arquivo de cruds para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração das Rotas do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/api.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
content = content.replace("$app$", self.app)
content = content.replace("$model$", self.model_lower)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_api) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_api, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_api)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_api, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
if self.__check_content(self.path_api,
"router = APIRouter()"):
content = content.replace("router = APIRouter()", "")
with open(self.path_api, 'a') as crud:
crud.write("\n")
crud.write(content)
self.__apply_pep(self.path_api)
except Exception as error:
Utils.show_message(f"Error in __manage_crud: {error}", error=True)
def call_methods(self, options):
"""
Método que identifica qual comando foi solicitado pelo usuário para ser executado, antes de chamar o método,
as entradas informadas pelo usuário são validadas, evitando erros de execução do programa devido à ausência de
parâmetros obrigatórios.
Por uma questão de padrão de projeto as possibilidades de escolha do pacote de gerência
de estados para o projeto Fastapi foram alteradas, agora todo projeto gerado utiliza como pacote de gerência
de estado o pacote o Cubit/Bloc
"""
# Verificando se foram passados parâmetros opcionais
if options['cruds']:
Utils.show_message("Trabalhando apenas os cruds.")
self.__manage_cruds()
return
elif options['api']:
Utils.show_message("Trabalhando apenas a api.")
self.__manage_api()
return
elif options['schemas']:
Utils.show_message("Trabalhando apenas os schemas.")
self.__manage_schema()
return
elif options['models']:
Utils.show_message("Trabalhando apenas os models.")
self.__manage_model()
return
else:
# Chamando o método para tratar os api
self.__manage_api()
# Chamando o método para tratar as schemas
self.__manage_schema()
# Chamando o método para tratar o models
self.__manage_model()
# Chamando o método para tratar as cruds
self.__manage_cruds()
return
def handle(self, *args, **options):
app = options["App"] or None
model = options["Model"] or None
if app is None and model is None:
Utils.show_message(
f"Você não informou uma APP para ser gerada.",
error=True)
return
if app and Utils.contain_number(app):
Utils.show_message(f"Nome da app contendo números")
return
# Removendo os espaços em branco
self.app = app.strip()
# Pegando o diretório absoluto atual do projeto.
self.path_root = os.path.normpath(os.getcwd() + os.sep)
# Criando o path para a APP informada.
self.path_app = os.path.join(self.fastapi_dir, app)
self.path_app_local = os.path.join(self.path_root, app)
# Criando o path para a APP Core.
self.path_core = os.path.join(self.BASE_DIR, "core")
# Criando o path para os models baseado no App informada.
self.path_model = os.path.join(self.path_app_local, "models.py")
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.app_lower = app.lower()
# Criando o path para os forms baseado na App informada.
self.path_schema= os.path.join(self.path_app, "schemas.py")
self.path_model_fastapi = os.path.join(self.path_app, "models.py")
self.path_crud = os.path.join(self.path_app, "cruds.py")
self.path_api = os.path.join(self.path_app, "api.py")
# Verificando se o diretório do fast informada existe
if self._check_dir(self.fastapi_dir) is False:
self.__init_fastapi()
# Verifica se app esta instalada, pois precisa dela
# para recuperar as instancias dos models
if apps.is_installed(self.app_lower) is False:
Utils.show_message(
"Você deve colocar sua app no INSTALLED_APPS do settings.")
return
if self._check_dir(self.path_app) is False:
self.__init_app(self.path_app)
# Criando uma instancia da app
self.app_instance = apps.get_app_config(self.app_lower)
# Verificando se o usuário passou o nome do model
if options['Model']:
model = options['Model'] or None
if Utils.contain_number(model) is False:
# Removendo os espaços em branco
self.model = model.strip()
# Verificando se existe no models.py o Model informado
if self.__check_content(
self.path_model,
'class {}'.format(self.model)) is False:
Utils.show_message("Model informado não encontrado.")
return
try:
# Verifica se o model está na app informada
# Se o model for abstract ela retornará uma exceção
# LookupError
self.app_instance.get_model(self.model)
Utils.show_message(
"Gerando arquivos para o model {}".format(self.model))
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.model_lower = model.lower()
self.call_methods(options)
Utils.show_message("Processo concluído.")
except LookupError:
Utils.show_message(
"Esse model é abastrato. "
"Não vão ser gerados os arquivos.")
else:
# recupera todos os models da app
# print(self.app_instance.get_models())
for model in self.app_instance.get_models():
model = model.__name__
# Removendo os espaços em branco
self.model = model.strip()
Utils.show_message(
"Gerando arquivos para o model {}".format(self.model))
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.model_lower = model.lower()
# Chama os métodos de geração de arquivos
self.call_methods(options)
Utils.show_message(
"Processo concluído para o model {}.".format(
self.model))
Utils.show_message("Processo concluído.")
return
|
import os.path
import typing
import subprocess
import base64
from django.conf import settings
from django.core.files.storage import default_storage
from django.http import HttpResponse
from django.urls.base import resolve
from django.views.decorators.csrf import csrf_exempt
from drf_yasg.openapi import Parameter
from drf_yasg.utils import swagger_auto_schema
from rest_framework.filters import BaseFilterBackend
from rest_framework.response import Response
from rest_framework.schemas import coreapi
from rest_framework.views import APIView, Request
from rest_framework import status
from rest_framework.exceptions import ValidationError
from ..drf_auth_override import CsrfExemptSessionAuthentication
from ..utils import xresponse, get_pretty_logger, file_hash, ErrorCode, source_hash, encode_base64
from ..exceptions import ParamError
from ..serializers import ImgSerializer
from ..views import schema_utils
logger = get_pretty_logger('api:views')
class RequestImgFilterBackend(BaseFilterBackend):
def get_schema_fields(self, view):
return [
]
def validate_payload(serializer_class, payload: dict) -> dict:
img_serializer = serializer_class(data=payload)
img_serializer.is_valid(raise_exception=True)
clean_data = img_serializer.validated_data
name = ''.join(clean_data['source'].name.split('.')[:-1]).replace('.', '_').replace(' ', '_')
suffix = ''.join(clean_data['source'].name.split('.')[-1:])
filename = default_storage.save(f'{name}.{suffix}', clean_data['source'])
clean_data['filename'] = filename
clean_data['storage'] = default_storage.location
return clean_data
class ImgProcessAPIView(APIView):
filter_backends = (RequestImgFilterBackend,)
serializer_class = ImgSerializer
authentication_classes = (CsrfExemptSessionAuthentication,)
def process_request(self, clean_data, request):
raise NotImplementedError('not implemented')
@property
def return_format(self):
return ''
@swagger_auto_schema(operation_description="",
manual_parameters=[Parameter('output', in_='query', required=True, type='string')],
request_body=serializer_class,
responses={200: schema_utils.xresponse_ok(),
400: schema_utils.xresponse_nok()})
def post(self, request):
if 'output' not in request.query_params:
output = 'image'
else:
output = str(request.query_params['output']).lower()
supported_output_formats = ['image', 'url']
if output not in supported_output_formats:
return xresponse(
status=status.HTTP_400_BAD_REQUEST,
error_code=ErrorCode.InvalidParams,
msg=f'Unhandled output format. Selected: {output} available: [{', '.join(supported_output_formats)}]'
)
try:
clean_data = validate_payload(self.serializer_class, request.data)
except ParamError as e:
return xresponse(status.HTTP_400_BAD_REQUEST, e.error_code, e.msg)
try:
output_filepath, output_filename = self.process_request(clean_data, request)
if output == 'image':
with open(output_filepath, 'rb') as file:
return HttpResponse(content=file.read(), content_type=f'image/{self.return_format}')
else:
return HttpResponse(
status=status.HTTP_303_SEE_OTHER,
headers={
'Location': request.build_absolute_uri(f'{settings.MEDIA_URL}{output_filename}')
},
)
except Exception as e:
return xresponse(status.HTTP_400_BAD_REQUEST, ErrorCode.NotFound, e)
class Png2Tiff(ImgProcessAPIView):
@property
def return_format(self):
return 'tiff'
def process_request(self, clean_data, request):
# convert easy.png -set colorspace RGB -alpha extract easy_alpha.png
# convert easy_alpha.png easy_alpha.svg
# convert png to tiff
# gimp tiff with alpha.svg
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{"".join(clean_data["filename"].split(".")[:-1])}.tiff"
output_filepath = os.path.join(clean_data['storage'], output_filename)
output_alpha_filepath = os.path.join(clean_data['storage'], f"{"".join(clean_data["filename"].split(".")[:-1])}_alpha.png")
command_extract_alpha = f'convert {input_filepath} -set colorspace RGB -alpha extract {output_alpha_filepath}'
output_svg_filepath = f'{''.join(output_alpha_filepath.split('.')[:-1])}.svg'
command_alpha_svg = f'convert {output_alpha_filepath} {output_svg_filepath}'
output_tiff_tmp_filepath = os.path.join(clean_data['storage'], f"{"".join(clean_data["filename"].split(".")[:-1])}_tmp.tiff")
command_png_to_tiff = f'convert {input_filepath} {output_tiff_tmp_filepath}'
logger.info(f'command: {command_extract_alpha}')
process = subprocess.Popen(
command_extract_alpha.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_alpha_svg}')
process = subprocess.Popen(
command_alpha_svg.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_png_to_tiff}')
process = subprocess.Popen(
command_png_to_tiff.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
gimp_command = f"gimp -i -b '(svg-clip-path \"{output_tiff_tmp_filepath}\" \"{output_svg_filepath}\" \"{output_filepath}\" )' -b '(gimp-quit 0)'"
logger.info(f'command: {gimp_command}')
process = subprocess.Popen(
gimp_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(20)
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
os.remove(output_alpha_filepath)
os.remove(output_svg_filepath)
os.remove(output_tiff_tmp_filepath)
return output_filepath, output_filename
class Tiff2Png(ImgProcessAPIView):
@property
def return_format(self):
return 'png'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{"".join(clean_data["filename"].split(".")[:-1])}.png"
output_filepath = os.path.join(clean_data['storage'], output_filename)
command = f'convert {input_filepath} -alpha transparent -clip -alpha opaque {output_filepath}'
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'command: {command}')
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
return output_filepath, output_filename
class Eps2Png(ImgProcessAPIView):
@property
def return_format(self):
return 'png'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{"".join(clean_data["filename"].split(".")[:-1])}.png"
output_filepath = os.path.join(clean_data['storage'], output_filename)
command = f'convert {input_filepath} -alpha transparent -clip -alpha opaque {output_filepath}'
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'command: {command}')
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
return output_filepath, output_filename
class Png2Eps(ImgProcessAPIView):
@property
def return_format(self):
return 'postscript'
def process_request(self, clean_data, request):
# TODO: convert png-alpha to svg
# convert easy.png -set colorspace RGB -alpha extract easy_alpha.png
# convert easy_alpha.png easy_alpha.svg
# convert png to tiff
# gimp tiff with alpha.svg
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{"".join(clean_data["filename"].split(".")[:-1])}.eps"
output_filepath = os.path.join(clean_data['storage'], output_filename)
output_alpha_filepath = os.path.join(clean_data['storage'], f"{"".join(clean_data["filename"].split(".")[:-1])}_alpha.png")
command_extract_alpha = f'convert {input_filepath} -set colorspace RGB -alpha extract {output_alpha_filepath}'
output_svg_filepath = f'{''.join(output_alpha_filepath.split('.')[:-1])}.svg'
command_alpha_svg = f'convert {output_alpha_filepath} {output_svg_filepath}'
output_tiff_tmp_filepath = os.path.join(clean_data['storage'], f"{"".join(clean_data["filename"].split(".")[:-1])}_tmp.tiff")
output_filepath_tiff = os.path.join(clean_data['storage'], f"{"".join(clean_data["filename"].split(".")[:-1])}_final.tiff")
command_png_to_tiff = f'convert {input_filepath} {output_tiff_tmp_filepath}'
command_tiff_to_eps = f'convert {output_filepath_tiff} {output_filepath}'
logger.info(f'command: {command_extract_alpha}')
process = subprocess.Popen(
command_extract_alpha.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_alpha_svg}')
process = subprocess.Popen(
command_alpha_svg.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_png_to_tiff}')
process = subprocess.Popen(
command_png_to_tiff.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
gimp_command = f"gimp -i -b '(svg-clip-path \"{output_tiff_tmp_filepath}\" \"{output_svg_filepath}\" \"{output_filepath_tiff}\" )' -b '(gimp-quit 0)'"
logger.info(f'command: {gimp_command}')
process = subprocess.Popen(
gimp_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(20)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_tiff_to_eps}')
process = subprocess.Popen(
command_tiff_to_eps.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
os.remove(output_alpha_filepath)
os.remove(output_svg_filepath)
os.remove(output_tiff_tmp_filepath)
os.remove(output_filepath_tiff)
return output_filepath, output_filename
| import os.path
import typing
import subprocess
import base64
from django.conf import settings
from django.core.files.storage import default_storage
from django.http import HttpResponse
from django.urls.base import resolve
from django.views.decorators.csrf import csrf_exempt
from drf_yasg.openapi import Parameter
from drf_yasg.utils import swagger_auto_schema
from rest_framework.filters import BaseFilterBackend
from rest_framework.response import Response
from rest_framework.schemas import coreapi
from rest_framework.views import APIView, Request
from rest_framework import status
from rest_framework.exceptions import ValidationError
from ..drf_auth_override import CsrfExemptSessionAuthentication
from ..utils import xresponse, get_pretty_logger, file_hash, ErrorCode, source_hash, encode_base64
from ..exceptions import ParamError
from ..serializers import ImgSerializer
from ..views import schema_utils
logger = get_pretty_logger('api:views')
class RequestImgFilterBackend(BaseFilterBackend):
def get_schema_fields(self, view):
return [
]
def validate_payload(serializer_class, payload: dict) -> dict:
img_serializer = serializer_class(data=payload)
img_serializer.is_valid(raise_exception=True)
clean_data = img_serializer.validated_data
name = ''.join(clean_data['source'].name.split('.')[:-1]).replace('.', '_').replace(' ', '_')
suffix = ''.join(clean_data['source'].name.split('.')[-1:])
filename = default_storage.save(f'{name}.{suffix}', clean_data['source'])
clean_data['filename'] = filename
clean_data['storage'] = default_storage.location
return clean_data
class ImgProcessAPIView(APIView):
filter_backends = (RequestImgFilterBackend,)
serializer_class = ImgSerializer
authentication_classes = (CsrfExemptSessionAuthentication,)
def process_request(self, clean_data, request):
raise NotImplementedError('not implemented')
@property
def return_format(self):
return ''
@swagger_auto_schema(operation_description="",
manual_parameters=[Parameter('output', in_='query', required=True, type='string')],
request_body=serializer_class,
responses={200: schema_utils.xresponse_ok(),
400: schema_utils.xresponse_nok()})
def post(self, request):
if 'output' not in request.query_params:
output = 'image'
else:
output = str(request.query_params['output']).lower()
supported_output_formats = ['image', 'url']
if output not in supported_output_formats:
return xresponse(
status=status.HTTP_400_BAD_REQUEST,
error_code=ErrorCode.InvalidParams,
msg=f'Unhandled output format. Selected: {output} available: [{", ".join(supported_output_formats)}]'
)
try:
clean_data = validate_payload(self.serializer_class, request.data)
except ParamError as e:
return xresponse(status.HTTP_400_BAD_REQUEST, e.error_code, e.msg)
try:
output_filepath, output_filename = self.process_request(clean_data, request)
if output == 'image':
with open(output_filepath, 'rb') as file:
return HttpResponse(content=file.read(), content_type=f'image/{self.return_format}')
else:
return HttpResponse(
status=status.HTTP_303_SEE_OTHER,
headers={
'Location': request.build_absolute_uri(f'{settings.MEDIA_URL}{output_filename}')
},
)
except Exception as e:
return xresponse(status.HTTP_400_BAD_REQUEST, ErrorCode.NotFound, e)
class Png2Tiff(ImgProcessAPIView):
@property
def return_format(self):
return 'tiff'
def process_request(self, clean_data, request):
# convert easy.png -set colorspace RGB -alpha extract easy_alpha.png
# convert easy_alpha.png easy_alpha.svg
# convert png to tiff
# gimp tiff with alpha.svg
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.tiff"
output_filepath = os.path.join(clean_data['storage'], output_filename)
output_alpha_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_alpha.png")
command_extract_alpha = f'convert {input_filepath} -set colorspace RGB -alpha extract {output_alpha_filepath}'
output_svg_filepath = f'{"".join(output_alpha_filepath.split(".")[:-1])}.svg'
command_alpha_svg = f'convert {output_alpha_filepath} {output_svg_filepath}'
output_tiff_tmp_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_tmp.tiff")
command_png_to_tiff = f'convert {input_filepath} {output_tiff_tmp_filepath}'
logger.info(f'command: {command_extract_alpha}')
process = subprocess.Popen(
command_extract_alpha.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_alpha_svg}')
process = subprocess.Popen(
command_alpha_svg.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_png_to_tiff}')
process = subprocess.Popen(
command_png_to_tiff.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
gimp_command = f"gimp -i -b '(svg-clip-path \"{output_tiff_tmp_filepath}\" \"{output_svg_filepath}\" \"{output_filepath}\" )' -b '(gimp-quit 0)'"
logger.info(f'command: {gimp_command}')
process = subprocess.Popen(
gimp_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(20)
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
os.remove(output_alpha_filepath)
os.remove(output_svg_filepath)
os.remove(output_tiff_tmp_filepath)
return output_filepath, output_filename
class Tiff2Png(ImgProcessAPIView):
@property
def return_format(self):
return 'png'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.png"
output_filepath = os.path.join(clean_data['storage'], output_filename)
command = f'convert {input_filepath} -alpha transparent -clip -alpha opaque {output_filepath}'
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'command: {command}')
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
return output_filepath, output_filename
class Eps2Png(ImgProcessAPIView):
@property
def return_format(self):
return 'png'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.png"
output_filepath = os.path.join(clean_data['storage'], output_filename)
command = f'convert {input_filepath} -alpha transparent -clip -alpha opaque {output_filepath}'
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'command: {command}')
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
return output_filepath, output_filename
class Png2Eps(ImgProcessAPIView):
@property
def return_format(self):
return 'postscript'
def process_request(self, clean_data, request):
# TODO: convert png-alpha to svg
# convert easy.png -set colorspace RGB -alpha extract easy_alpha.png
# convert easy_alpha.png easy_alpha.svg
# convert png to tiff
# gimp tiff with alpha.svg
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.eps"
output_filepath = os.path.join(clean_data['storage'], output_filename)
output_alpha_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_alpha.png")
command_extract_alpha = f'convert {input_filepath} -set colorspace RGB -alpha extract {output_alpha_filepath}'
output_svg_filepath = f'{"".join(output_alpha_filepath.split(".")[:-1])}.svg'
command_alpha_svg = f'convert {output_alpha_filepath} {output_svg_filepath}'
output_tiff_tmp_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_tmp.tiff")
output_filepath_tiff = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_final.tiff")
command_png_to_tiff = f'convert {input_filepath} {output_tiff_tmp_filepath}'
command_tiff_to_eps = f'convert {output_filepath_tiff} {output_filepath}'
logger.info(f'command: {command_extract_alpha}')
process = subprocess.Popen(
command_extract_alpha.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_alpha_svg}')
process = subprocess.Popen(
command_alpha_svg.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_png_to_tiff}')
process = subprocess.Popen(
command_png_to_tiff.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
gimp_command = f"gimp -i -b '(svg-clip-path \"{output_tiff_tmp_filepath}\" \"{output_svg_filepath}\" \"{output_filepath_tiff}\" )' -b '(gimp-quit 0)'"
logger.info(f'command: {gimp_command}')
process = subprocess.Popen(
gimp_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(20)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_tiff_to_eps}')
process = subprocess.Popen(
command_tiff_to_eps.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
os.remove(output_alpha_filepath)
os.remove(output_svg_filepath)
os.remove(output_tiff_tmp_filepath)
os.remove(output_filepath_tiff)
return output_filepath, output_filename
|
import argparse
import os
import time
from typing import List, Optional
import glob
from multiprocessing import Process
import numpy as np
from astropy.io import fits
from pkg_resources import resource_filename
from pypeit.pypeitsetup import PypeItSetup
from pypeit.core import framematch
from pypeit import pypeit
from pypeit import fluxcalibrate
from pypeit.scripts import show_2dspec, show_1dspec
def get_cfg_lines(spectrograph: str) -> List[str]:
cfg_lines = [
"[rdx]",
f"spectrograph = {spectrograph}",
"[calibrations]",
f"master_dir = Master_{spectrograph.split("_")[-1]}",
"raise_chk_error = False",
"[scienceframe]",
"[[process]]",
"mask_cr = False",
"[baseprocess]",
"use_biasimage = False",
"[reduce]",
"[[extraction]]",
"skip_optimal = True",
"[[findobj]]",
"skip_second_find = True"
]
return cfg_lines
def parse(options: Optional[List[str]] = None) -> argparse.Namespace:
argparser = argparse.ArgumentParser(description="Quicklook for P200 DBSP",
formatter_class=argparse.RawTextHelpFormatter)
argparser.add_argument("fname", type=str, help="file to take a quick look at, or else red/blue\n"
"to just perform rough calibrations")
argparser.add_argument("--no-show", default=False, action="store_true",
help="Set this flag to suppress opening of plots")
return argparser.parse_args() if options is None else argparser.parse_args(options)
def main(args: argparse.Namespace):
t = time.perf_counter()
# need an arc frame and a flat frame
root = args.fname.rstrip('0123456789.fits')
paths = glob.glob(f'{root}*.fits')
spectrograph = 'p200_dbsp_red' if 'red' in os.path.basename(args.fname) else 'p200_dbsp_blue'
arm = spectrograph.split('_')[-1]
CFG_LINES = get_cfg_lines(spectrograph)
flatimg = ""
arcimg = ""
sciimg = args.fname
calib_only = not os.path.isfile(sciimg)
if calib_only:
for path in paths:
with fits.open(path) as hdul:
if not flatimg:
if hdul[0].header['OBJECT'] == 'flat' or hdul[0].header['IMGTYPE'] == 'flat':
flatimg = path
if not arcimg:
if hdul[0].header['OBJECT'] == 'arcs' or hdul[0].header['IMGTYPE'] == 'cal':
arcimg = path
if flatimg and arcimg:
break
if not (flatimg and arcimg):
raise Exception(f"Could not find a flat and an arc frame in the same directory as {root}!")
files = [arcimg, flatimg]
else:
files = [sciimg]
ps = PypeItSetup(files, path="./", spectrograph_name=spectrograph,
cfg_lines = CFG_LINES)
ps.build_fitstbl()
bm = framematch.FrameTypeBitMask()
file_bits = np.zeros(len(files), dtype=bm.minimum_dtype())
if calib_only:
file_bits[0] = bm.turn_on(file_bits[0], ['arc', 'tilt'])
file_bits[1] = bm.turn_on(file_bits[1], ['pixelflat', 'trace', 'illumflat'])
else:
file_bits[0] = bm.turn_on(file_bits[0], 'science')
asrt = np.array([ps.fitstbl['filename'].data.tolist().index(os.path.basename(fname)) for fname in files])
ps.fitstbl.set_frame_types(file_bits[asrt])
ps.fitstbl.set_combination_groups()
ps.fitstbl['setup'] = 'A'
ofiles = ps.fitstbl.write_pypeit(configs='A', cfg_lines=CFG_LINES)
pypeIt = pypeit.PypeIt(ofiles[0], verbosity=0,
reuse_masters=True, overwrite=True,
logname='dbsp_ql.log', show=False, calib_only=calib_only)
if calib_only:
pypeIt.calib_all()
else:
pypeIt.reduce_all()
pypeIt.build_qa()
output_spec2ds = list(filter(lambda f: os.path.isfile(os.path.join('Science', f)), [
pypeIt.spec_output_file(i, True) \
for i in range(len(pypeIt.fitstbl.table)) \
if pypeIt.fitstbl.table[i]['frametype'] in ['science']
]))
output_spec1ds = list(filter(lambda f: os.path.isfile(os.path.join('Science', f)), [
pypeIt.spec_output_file(i) \
for i in range(len(pypeIt.fitstbl.table)) \
if pypeIt.fitstbl.table[i]['frametype'] in ['science']
]))
if output_spec1ds and not calib_only:
sensfiles = [resource_filename("dbsp_drp", f"data/sens_{arm}_archived.fits")]
FxCalib = fluxcalibrate.FluxCalibrate.get_instance(output_spec1ds, sensfiles, par=ps.par['fluxcalib'])
print(f"Time elapsed: {time.perf_counter() - t}s.")
if not calib_only and not args.no_show:
p1 = Process(target = show_spec2d_helper, args=(output_spec2ds[0],))
p1.start()
if output_spec1ds:
with fits.open(output_spec1ds[0]) as hdul:
specs = len(hdul) - 2
parr = [ None ] * specs
for i in range(specs):
parr[i] = Process(target = show_spec1d_helper,
args=(str(i), output_spec1ds[0]))
parr[i].start()
def show_spec2d_helper(file):
return show_2dspec.Show2DSpec.main(show_2dspec.Show2DSpec.parse_args([file]))
def show_spec1d_helper(exten, file):
return show_1dspec.Show1DSpec.main(
show_1dspec.Show1DSpec.parse_args(['--extract', 'BOX', '--exten', exten,
'--flux', file])
)
| import argparse
import os
import time
from typing import List, Optional
import glob
from multiprocessing import Process
import numpy as np
from astropy.io import fits
from pkg_resources import resource_filename
from pypeit.pypeitsetup import PypeItSetup
from pypeit.core import framematch
from pypeit import pypeit
from pypeit import fluxcalibrate
from pypeit.scripts import show_2dspec, show_1dspec
def get_cfg_lines(spectrograph: str) -> List[str]:
cfg_lines = [
"[rdx]",
f"spectrograph = {spectrograph}",
"[calibrations]",
f"master_dir = Master_{spectrograph.split('_')[-1]}",
"raise_chk_error = False",
"[scienceframe]",
"[[process]]",
"mask_cr = False",
"[baseprocess]",
"use_biasimage = False",
"[reduce]",
"[[extraction]]",
"skip_optimal = True",
"[[findobj]]",
"skip_second_find = True"
]
return cfg_lines
def parse(options: Optional[List[str]] = None) -> argparse.Namespace:
argparser = argparse.ArgumentParser(description="Quicklook for P200 DBSP",
formatter_class=argparse.RawTextHelpFormatter)
argparser.add_argument("fname", type=str, help="file to take a quick look at, or else red/blue\n"
"to just perform rough calibrations")
argparser.add_argument("--no-show", default=False, action="store_true",
help="Set this flag to suppress opening of plots")
return argparser.parse_args() if options is None else argparser.parse_args(options)
def main(args: argparse.Namespace):
t = time.perf_counter()
# need an arc frame and a flat frame
root = args.fname.rstrip('0123456789.fits')
paths = glob.glob(f'{root}*.fits')
spectrograph = 'p200_dbsp_red' if 'red' in os.path.basename(args.fname) else 'p200_dbsp_blue'
arm = spectrograph.split('_')[-1]
CFG_LINES = get_cfg_lines(spectrograph)
flatimg = ""
arcimg = ""
sciimg = args.fname
calib_only = not os.path.isfile(sciimg)
if calib_only:
for path in paths:
with fits.open(path) as hdul:
if not flatimg:
if hdul[0].header['OBJECT'] == 'flat' or hdul[0].header['IMGTYPE'] == 'flat':
flatimg = path
if not arcimg:
if hdul[0].header['OBJECT'] == 'arcs' or hdul[0].header['IMGTYPE'] == 'cal':
arcimg = path
if flatimg and arcimg:
break
if not (flatimg and arcimg):
raise Exception(f"Could not find a flat and an arc frame in the same directory as {root}!")
files = [arcimg, flatimg]
else:
files = [sciimg]
ps = PypeItSetup(files, path="./", spectrograph_name=spectrograph,
cfg_lines = CFG_LINES)
ps.build_fitstbl()
bm = framematch.FrameTypeBitMask()
file_bits = np.zeros(len(files), dtype=bm.minimum_dtype())
if calib_only:
file_bits[0] = bm.turn_on(file_bits[0], ['arc', 'tilt'])
file_bits[1] = bm.turn_on(file_bits[1], ['pixelflat', 'trace', 'illumflat'])
else:
file_bits[0] = bm.turn_on(file_bits[0], 'science')
asrt = np.array([ps.fitstbl['filename'].data.tolist().index(os.path.basename(fname)) for fname in files])
ps.fitstbl.set_frame_types(file_bits[asrt])
ps.fitstbl.set_combination_groups()
ps.fitstbl['setup'] = 'A'
ofiles = ps.fitstbl.write_pypeit(configs='A', cfg_lines=CFG_LINES)
pypeIt = pypeit.PypeIt(ofiles[0], verbosity=0,
reuse_masters=True, overwrite=True,
logname='dbsp_ql.log', show=False, calib_only=calib_only)
if calib_only:
pypeIt.calib_all()
else:
pypeIt.reduce_all()
pypeIt.build_qa()
output_spec2ds = list(filter(lambda f: os.path.isfile(os.path.join('Science', f)), [
pypeIt.spec_output_file(i, True) \
for i in range(len(pypeIt.fitstbl.table)) \
if pypeIt.fitstbl.table[i]['frametype'] in ['science']
]))
output_spec1ds = list(filter(lambda f: os.path.isfile(os.path.join('Science', f)), [
pypeIt.spec_output_file(i) \
for i in range(len(pypeIt.fitstbl.table)) \
if pypeIt.fitstbl.table[i]['frametype'] in ['science']
]))
if output_spec1ds and not calib_only:
sensfiles = [resource_filename("dbsp_drp", f"data/sens_{arm}_archived.fits")]
FxCalib = fluxcalibrate.FluxCalibrate.get_instance(output_spec1ds, sensfiles, par=ps.par['fluxcalib'])
print(f"Time elapsed: {time.perf_counter() - t}s.")
if not calib_only and not args.no_show:
p1 = Process(target = show_spec2d_helper, args=(output_spec2ds[0],))
p1.start()
if output_spec1ds:
with fits.open(output_spec1ds[0]) as hdul:
specs = len(hdul) - 2
parr = [ None ] * specs
for i in range(specs):
parr[i] = Process(target = show_spec1d_helper,
args=(str(i), output_spec1ds[0]))
parr[i].start()
def show_spec2d_helper(file):
return show_2dspec.Show2DSpec.main(show_2dspec.Show2DSpec.parse_args([file]))
def show_spec1d_helper(exten, file):
return show_1dspec.Show1DSpec.main(
show_1dspec.Show1DSpec.parse_args(['--extract', 'BOX', '--exten', exten,
'--flux', file])
)
|
# Importando funções.
from random import randint
from time import sleep
from operator import itemgetter
# Declarando os dicionários.
jogadores = dict()
colocacao = list()
# Colocando os jogadores e seus valores no dicionário, e mostrando eles na tela.
for r in range(1, 5):
jogadores[f'jogador{r}'] = randint(1, 6)
print(f'O jogador{r} tirou {jogadores[f'jogador{r}']} no dado.')
sleep(0.5)
# Criando uma linha para organizar o programa.
print('=' * 25)
print(f'{'Ranking dos Jogadores':^25}')
print('=' * 25)
# Criando e mostrando na tela a colocação em ordem.
colocacao = sorted(jogadores.items(), key=itemgetter(1), reverse=True)
for i, v in enumerate(colocacao):
print(f'{i+1}° lugar - {v[0]} com {v[1]}')
| # Importando funções.
from random import randint
from time import sleep
from operator import itemgetter
# Declarando os dicionários.
jogadores = dict()
colocacao = list()
# Colocando os jogadores e seus valores no dicionário, e mostrando eles na tela.
for r in range(1, 5):
jogadores[f'jogador{r}'] = randint(1, 6)
print(f'O jogador{r} tirou {jogadores[f"jogador{r}"]} no dado.')
sleep(0.5)
# Criando uma linha para organizar o programa.
print('=' * 25)
print(f'{"Ranking dos Jogadores":^25}')
print('=' * 25)
# Criando e mostrando na tela a colocação em ordem.
colocacao = sorted(jogadores.items(), key=itemgetter(1), reverse=True)
for i, v in enumerate(colocacao):
print(f'{i+1}° lugar - {v[0]} com {v[1]}')
|
import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import traceback
import aiohttp
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
import inan.server.ws_connection as ws # lgtm [py/import-and-import-from]
from inan.consensus.coinbase import create_puzzlehash_for_pk
from inan.consensus.constants import ConsensusConstants
from inan.pools.pool_config import PoolWalletConfig, load_pool_config
from inan.protocols import farmer_protocol, harvester_protocol
from inan.protocols.pool_protocol import (
ErrorResponse,
get_current_authentication_token,
GetFarmerResponse,
PoolErrorCode,
PostFarmerPayload,
PostFarmerRequest,
PutFarmerPayload,
PutFarmerRequest,
AuthenticationPayload,
)
from inan.protocols.protocol_message_types import ProtocolMessageTypes
from inan.server.outbound_message import NodeType, make_msg
from inan.server.ws_connection import WSInanConnection
from inan.types.blockchain_format.proof_of_space import ProofOfSpace
from inan.types.blockchain_format.sized_bytes import bytes32
from inan.util.bech32m import decode_puzzle_hash
from inan.util.config import load_config, save_config, config_path_for_filename
from inan.util.hash import std_hash
from inan.util.ints import uint8, uint16, uint32, uint64
from inan.util.keychain import Keychain
from inan.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
find_authentication_sk,
find_owner_sk,
)
from inan.wallet.puzzles.singleton_top_layer import SINGLETON_MOD
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
log = logging.getLogger(__name__)
UPDATE_POOL_INFO_INTERVAL: int = 3600
UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300
UPDATE_HARVESTER_CACHE_INTERVAL: int = 60
"""
HARVESTER PROTOCOL (FARMER <-> HARVESTER)
"""
class Farmer:
def __init__(
self,
root_path: Path,
farmer_config: Dict,
pool_config: Dict,
keychain: Keychain,
consensus_constants: ConsensusConstants,
):
self._root_path = root_path
self.config = farmer_config
# Keep track of all sps, keyed on challenge chain signage point hash
self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {}
# Keep track of harvester plot identifier (str), target sp index, and PoSpace for each challenge
self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {}
# Quality string to plot identifier and challenge_hash, for use with harvester.RequestSignatures
self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {}
# number of responses to each signage point
self.number_of_responses: Dict[bytes32, int] = {}
# A dictionary of keys to time added. These keys refer to keys in the above 4 dictionaries. This is used
# to periodically clear the memory
self.cache_add_time: Dict[bytes32, uint64] = {}
self.cache_clear_task: asyncio.Task
self.update_pool_state_task: asyncio.Task
self.constants = consensus_constants
self._shut_down = False
self.server: Any = None
self.keychain = keychain
self.state_changed_callback: Optional[Callable] = None
self.log = log
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in self.keychain.get_all_private_keys()]
self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [
master_sk_to_pool_sk(sk) for sk in self.all_root_sks
]
if len(self.get_public_keys()) == 0:
error_str = "No keys exist. Please run 'inan keys generate' or open the UI."
raise RuntimeError(error_str)
# This is the farmer configuration
self.farmer_target_encoded = self.config["xgen_target_address"]
self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded)
self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]]
# This is the self pooling configuration, which is only used for original self-pooled plots
self.pool_target_encoded = pool_config["xgen_target_address"]
self.pool_target = decode_puzzle_hash(self.pool_target_encoded)
self.pool_sks_map: Dict = {}
for key in self.get_private_keys():
self.pool_sks_map[bytes(key.get_g1())] = key
assert len(self.farmer_target) == 32
assert len(self.pool_target) == 32
if len(self.pool_sks_map) == 0:
error_str = "No keys exist. Please run 'inan keys generate' or open the UI."
raise RuntimeError(error_str)
# The variables below are for use with an actual pool
# From p2_singleton_puzzle_hash to pool state dict
self.pool_state: Dict[bytes32, Dict] = {}
# From public key bytes to PrivateKey
self.authentication_keys: Dict[bytes, PrivateKey] = {}
# Last time we updated pool_state based on the config file
self.last_config_access_time: uint64 = uint64(0)
self.harvester_cache: Dict[str, Dict[str, Tuple[Dict, float]]] = {}
async def _start(self):
self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task())
self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task())
def _close(self):
self._shut_down = True
async def _await_closed(self):
await self.cache_clear_task
await self.update_pool_state_task
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def on_connect(self, peer: WSInanConnection):
# Sends a handshake to the harvester
self.state_changed("add_connection", {})
handshake = harvester_protocol.HarvesterHandshake(
self.get_public_keys(),
self.pool_public_keys,
)
if peer.connection_type is NodeType.HARVESTER:
msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake)
await peer.send_message(msg)
def set_server(self, server):
self.server = server
def state_changed(self, change: str, data: Dict[str, Any]):
if self.state_changed_callback is not None:
self.state_changed_callback(change, data)
def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str):
self.log.error(error_message)
self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append(
ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict()
)
def on_disconnect(self, connection: ws.WSInanConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self.state_changed("close_connection", {})
async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]:
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(f"{pool_config.pool_url}/pool_info") as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /pool_info response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}"
)
return None
async def _pool_get_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey
) -> Optional[Dict]:
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
get_farmer_params = {
"launcher_id": pool_config.launcher_id.hex(),
"authentication_token": authentication_token,
"signature": bytes(signature).hex(),
}
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(f"{pool_config.pool_url}/farmer", params=get_farmer_params) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_post_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
post_farmer_payload: PostFarmerPayload = PostFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash())
post_farmer_request = PostFarmerRequest(post_farmer_payload, signature)
post_farmer_body = json.dumps(post_farmer_request.to_json_dict())
headers = {
"content-type": "application/json;",
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_config.pool_url}/farmer", data=post_farmer_body, headers=headers
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"POST /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in POST /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_put_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
put_farmer_payload: PutFarmerPayload = PutFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash())
put_farmer_request = PutFarmerRequest(put_farmer_payload, signature)
put_farmer_body = json.dumps(put_farmer_request.to_json_dict())
try:
async with aiohttp.ClientSession() as session:
async with session.put(f"{pool_config.pool_url}/farmer", data=put_farmer_body) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"PUT /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}"
)
return None
async def update_pool_state(self):
config = load_config(self._root_path, "config.yaml")
pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path)
for pool_config in pool_config_list:
p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash
try:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
if p2_singleton_puzzle_hash not in self.pool_state:
self.authentication_keys[bytes(pool_config.authentication_public_key)] = authentication_sk
self.pool_state[p2_singleton_puzzle_hash] = {
"points_found_since_start": 0,
"points_found_24h": [],
"points_acknowledged_since_start": 0,
"points_acknowledged_24h": [],
"next_farmer_update": 0,
"next_pool_info_update": 0,
"current_points": 0,
"current_difficulty": None,
"pool_errors_24h": [],
"authentication_token_timeout": None,
}
self.log.info(f"Added pool: {pool_config}")
pool_state = self.pool_state[p2_singleton_puzzle_hash]
pool_state["pool_config"] = pool_config
# Skip state update when self pooling
if pool_config.pool_url == "":
continue
enforce_https = config["full_node"]["selected_network"] == "mainnet"
if enforce_https and not pool_config.pool_url.startswith("https://"):
self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}")
continue
# TODO: Improve error handling below, inform about unexpected failures
if time.time() >= pool_state["next_pool_info_update"]:
# Makes a GET request to the pool to get the updated information
pool_info = await self._pool_get_pool_info(pool_config)
if pool_info is not None and "error_code" not in pool_info:
pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"]
pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL
# Only update the first time from GET /pool_info, gets updated from GET /farmer later
if pool_state["current_difficulty"] is None:
pool_state["current_difficulty"] = pool_info["minimum_difficulty"]
if time.time() >= pool_state["next_farmer_update"]:
authentication_token_timeout = pool_state["authentication_token_timeout"]
async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[bool]]:
# Run a GET /farmer to see if the farmer is already known by the pool
response = await self._pool_get_farmer(
pool_config, authentication_token_timeout, authentication_sk
)
farmer_response: Optional[GetFarmerResponse] = None
farmer_known: Optional[bool] = None
if response is not None:
if "error_code" not in response:
farmer_response = GetFarmerResponse.from_json_dict(response)
if farmer_response is not None:
pool_state["current_difficulty"] = farmer_response.current_difficulty
pool_state["current_points"] = farmer_response.current_points
pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL
else:
farmer_known = response["error_code"] != PoolErrorCode.FARMER_NOT_KNOWN.value
self.log.error(
"update_pool_farmer_info failed: "
f"{response["error_code"]}, {response["error_message"]}"
)
return farmer_response, farmer_known
if authentication_token_timeout is not None:
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and farmer_is_known is not None and not farmer_is_known:
# Make the farmer known on the pool with a POST /farmer
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
post_response = await self._pool_post_farmer(
pool_config, authentication_token_timeout, owner_sk
)
if post_response is not None and "error_code" not in post_response:
self.log.info(
f"Welcome message from {pool_config.pool_url}: "
f"{post_response["welcome_message"]}"
)
# Now we should be able to update the local farmer info
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and not farmer_is_known:
self.log.error("Failed to update farmer info after POST /farmer.")
# Update the payout instructions on the pool if required
if (
farmer_info is not None
and pool_config.payout_instructions != farmer_info.payout_instructions
):
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
put_farmer_response_dict = await self._pool_put_farmer(
pool_config, authentication_token_timeout, owner_sk
)
try:
# put_farmer_response: PutFarmerResponse = PutFarmerResponse.from_json_dict(
# put_farmer_response_dict
# )
# if put_farmer_response.payout_instructions:
# self.log.info(
# f"Farmer information successfully updated on the pool {pool_config.pool_url}"
# )
# TODO: Fix Streamable implementation and recover the above.
if put_farmer_response_dict["payout_instructions"]:
self.log.info(
f"Farmer information successfully updated on the pool {pool_config.pool_url}"
)
else:
raise Exception
except Exception:
self.log.error(
f"Failed to update farmer information on the pool {pool_config.pool_url}"
)
else:
self.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}")
def get_public_keys(self):
return [child_sk.get_g1() for child_sk in self._private_keys]
def get_private_keys(self):
return self._private_keys
def get_reward_targets(self, search_for_private_key: bool) -> Dict:
if search_for_private_key:
all_sks = self.keychain.get_all_private_keys()
stop_searching_for_farmer, stop_searching_for_pool = False, False
for i in range(500):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1())
if ph == self.farmer_target:
stop_searching_for_farmer = True
if ph == self.pool_target:
stop_searching_for_pool = True
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
"have_farmer_sk": stop_searching_for_farmer,
"have_pool_sk": stop_searching_for_pool,
}
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
}
def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]):
config = load_config(self._root_path, "config.yaml")
if farmer_target_encoded is not None:
self.farmer_target_encoded = farmer_target_encoded
self.farmer_target = decode_puzzle_hash(farmer_target_encoded)
config["farmer"]["xgen_target_address"] = farmer_target_encoded
if pool_target_encoded is not None:
self.pool_target_encoded = pool_target_encoded
self.pool_target = decode_puzzle_hash(pool_target_encoded)
config["pool"]["xgen_target_address"] = pool_target_encoded
save_config(self._root_path, "config.yaml", config)
async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str):
for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items():
if launcher_id == pool_state_dict["pool_config"].launcher_id:
config = load_config(self._root_path, "config.yaml")
new_list = []
for list_element in config["pool"]["pool_list"]:
if bytes.fromhex(list_element["launcher_id"]) == bytes(launcher_id):
list_element["payout_instructions"] = payout_instructions
new_list.append(list_element)
config["pool"]["pool_list"] = new_list
save_config(self._root_path, "config.yaml", config)
# Force a GET /farmer which triggers the PUT /farmer if it detects the changed instructions
pool_state_dict["next_farmer_update"] = 0
return
self.log.warning(f"Launcher id: {launcher_id} not found")
async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]:
for pool_state in self.pool_state.values():
pool_config: PoolWalletConfig = pool_state["pool_config"]
if pool_config.launcher_id == launcher_id:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token_timeout = pool_state["authentication_token_timeout"]
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
return (
pool_config.pool_url
+ f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}"
f"&signature={bytes(signature).hex()}"
)
return None
async def update_cached_harvesters(self):
# First remove outdated cache entries
remove_hosts = []
for host, host_cache in self.harvester_cache.items():
remove_peers = []
for peer_id, peer_cache in host_cache.items():
_, last_update = peer_cache
# If the peer cache hasn't been updated for 10x interval, drop it since the harvester doesn't respond
if time.time() - last_update > UPDATE_HARVESTER_CACHE_INTERVAL * 10:
remove_peers.append(peer_id)
for key in remove_peers:
del host_cache[key]
if len(host_cache) == 0:
remove_hosts.append(host)
for key in remove_hosts:
del self.harvester_cache[key]
# Now query each harvester and update caches
for connection in self.server.get_connections():
if connection.connection_type != NodeType.HARVESTER:
continue
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry is None or time.time() - cache_entry[1] > UPDATE_HARVESTER_CACHE_INTERVAL:
response = await connection.request_plots(harvester_protocol.RequestPlots(), timeout=5)
if response is not None:
if isinstance(response, harvester_protocol.RespondPlots):
if connection.peer_host not in self.harvester_cache:
self.harvester_cache[connection.peer_host] = {}
self.harvester_cache[connection.peer_host][connection.peer_node_id.hex()] = (
response.to_json_dict(),
time.time(),
)
else:
self.log.error(
f"Invalid response from harvester:"
f"peer_host {connection.peer_host}, peer_node_id {connection.peer_node_id}"
)
else:
self.log.error(
"Harvester did not respond. You might need to update harvester to the latest version"
)
async def get_cached_harvesters(self, connection: WSInanConnection) -> Optional[Tuple[Dict, float]]:
host_cache = self.harvester_cache.get(connection.peer_host)
if host_cache is None:
return None
return host_cache.get(connection.peer_node_id.hex())
async def get_harvesters(self) -> Dict:
harvesters: List = []
for connection in self.server.get_connections():
if connection.connection_type != NodeType.HARVESTER:
continue
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry is not None:
harvester_object: dict = dict(cache_entry[0])
harvester_object["connection"] = {
"node_id": connection.peer_node_id.hex(),
"host": connection.peer_host,
"port": connection.peer_port,
}
harvesters.append(harvester_object)
return {"harvesters": harvesters}
async def _periodically_update_pool_state_task(self):
time_slept: uint64 = uint64(0)
config_path: Path = config_path_for_filename(self._root_path, "config.yaml")
while not self._shut_down:
# Every time the config file changes, read it to check the pool state
stat_info = config_path.stat()
if stat_info.st_mtime > self.last_config_access_time:
# If we detect the config file changed, refresh private keys first just in case
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in self.keychain.get_all_private_keys()]
self.last_config_access_time = stat_info.st_mtime
await self.update_pool_state()
time_slept = uint64(0)
elif time_slept > 60:
await self.update_pool_state()
time_slept = uint64(0)
time_slept += 1
await asyncio.sleep(1)
async def _periodically_clear_cache_and_refresh_task(self):
time_slept: uint64 = uint64(0)
refresh_slept = 0
while not self._shut_down:
try:
if time_slept > self.constants.SUB_SLOT_TIME_TARGET:
now = time.time()
removed_keys: List[bytes32] = []
for key, add_time in self.cache_add_time.items():
if now - float(add_time) > self.constants.SUB_SLOT_TIME_TARGET * 3:
self.sps.pop(key, None)
self.proofs_of_space.pop(key, None)
self.quality_str_to_identifiers.pop(key, None)
self.number_of_responses.pop(key, None)
removed_keys.append(key)
for key in removed_keys:
self.cache_add_time.pop(key, None)
time_slept = uint64(0)
log.debug(
f"Cleared farmer cache. Num sps: {len(self.sps)} {len(self.proofs_of_space)} "
f"{len(self.quality_str_to_identifiers)} {len(self.number_of_responses)}"
)
time_slept += 1
refresh_slept += 1
# Periodically refresh GUI to show the correct download/upload rate.
if refresh_slept >= 30:
self.state_changed("add_connection", {})
refresh_slept = 0
# Handles harvester plots cache cleanup and updates
await self.update_cached_harvesters()
except Exception:
log.error(f"_periodically_clear_cache_and_refresh_task failed: {traceback.print_exc()}")
await asyncio.sleep(1)
| import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import traceback
import aiohttp
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
import inan.server.ws_connection as ws # lgtm [py/import-and-import-from]
from inan.consensus.coinbase import create_puzzlehash_for_pk
from inan.consensus.constants import ConsensusConstants
from inan.pools.pool_config import PoolWalletConfig, load_pool_config
from inan.protocols import farmer_protocol, harvester_protocol
from inan.protocols.pool_protocol import (
ErrorResponse,
get_current_authentication_token,
GetFarmerResponse,
PoolErrorCode,
PostFarmerPayload,
PostFarmerRequest,
PutFarmerPayload,
PutFarmerRequest,
AuthenticationPayload,
)
from inan.protocols.protocol_message_types import ProtocolMessageTypes
from inan.server.outbound_message import NodeType, make_msg
from inan.server.ws_connection import WSInanConnection
from inan.types.blockchain_format.proof_of_space import ProofOfSpace
from inan.types.blockchain_format.sized_bytes import bytes32
from inan.util.bech32m import decode_puzzle_hash
from inan.util.config import load_config, save_config, config_path_for_filename
from inan.util.hash import std_hash
from inan.util.ints import uint8, uint16, uint32, uint64
from inan.util.keychain import Keychain
from inan.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
find_authentication_sk,
find_owner_sk,
)
from inan.wallet.puzzles.singleton_top_layer import SINGLETON_MOD
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
log = logging.getLogger(__name__)
UPDATE_POOL_INFO_INTERVAL: int = 3600
UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300
UPDATE_HARVESTER_CACHE_INTERVAL: int = 60
"""
HARVESTER PROTOCOL (FARMER <-> HARVESTER)
"""
class Farmer:
def __init__(
self,
root_path: Path,
farmer_config: Dict,
pool_config: Dict,
keychain: Keychain,
consensus_constants: ConsensusConstants,
):
self._root_path = root_path
self.config = farmer_config
# Keep track of all sps, keyed on challenge chain signage point hash
self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {}
# Keep track of harvester plot identifier (str), target sp index, and PoSpace for each challenge
self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {}
# Quality string to plot identifier and challenge_hash, for use with harvester.RequestSignatures
self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {}
# number of responses to each signage point
self.number_of_responses: Dict[bytes32, int] = {}
# A dictionary of keys to time added. These keys refer to keys in the above 4 dictionaries. This is used
# to periodically clear the memory
self.cache_add_time: Dict[bytes32, uint64] = {}
self.cache_clear_task: asyncio.Task
self.update_pool_state_task: asyncio.Task
self.constants = consensus_constants
self._shut_down = False
self.server: Any = None
self.keychain = keychain
self.state_changed_callback: Optional[Callable] = None
self.log = log
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in self.keychain.get_all_private_keys()]
self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [
master_sk_to_pool_sk(sk) for sk in self.all_root_sks
]
if len(self.get_public_keys()) == 0:
error_str = "No keys exist. Please run 'inan keys generate' or open the UI."
raise RuntimeError(error_str)
# This is the farmer configuration
self.farmer_target_encoded = self.config["xgen_target_address"]
self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded)
self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]]
# This is the self pooling configuration, which is only used for original self-pooled plots
self.pool_target_encoded = pool_config["xgen_target_address"]
self.pool_target = decode_puzzle_hash(self.pool_target_encoded)
self.pool_sks_map: Dict = {}
for key in self.get_private_keys():
self.pool_sks_map[bytes(key.get_g1())] = key
assert len(self.farmer_target) == 32
assert len(self.pool_target) == 32
if len(self.pool_sks_map) == 0:
error_str = "No keys exist. Please run 'inan keys generate' or open the UI."
raise RuntimeError(error_str)
# The variables below are for use with an actual pool
# From p2_singleton_puzzle_hash to pool state dict
self.pool_state: Dict[bytes32, Dict] = {}
# From public key bytes to PrivateKey
self.authentication_keys: Dict[bytes, PrivateKey] = {}
# Last time we updated pool_state based on the config file
self.last_config_access_time: uint64 = uint64(0)
self.harvester_cache: Dict[str, Dict[str, Tuple[Dict, float]]] = {}
async def _start(self):
self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task())
self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task())
def _close(self):
self._shut_down = True
async def _await_closed(self):
await self.cache_clear_task
await self.update_pool_state_task
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def on_connect(self, peer: WSInanConnection):
# Sends a handshake to the harvester
self.state_changed("add_connection", {})
handshake = harvester_protocol.HarvesterHandshake(
self.get_public_keys(),
self.pool_public_keys,
)
if peer.connection_type is NodeType.HARVESTER:
msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake)
await peer.send_message(msg)
def set_server(self, server):
self.server = server
def state_changed(self, change: str, data: Dict[str, Any]):
if self.state_changed_callback is not None:
self.state_changed_callback(change, data)
def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str):
self.log.error(error_message)
self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append(
ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict()
)
def on_disconnect(self, connection: ws.WSInanConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self.state_changed("close_connection", {})
async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]:
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(f"{pool_config.pool_url}/pool_info") as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /pool_info response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}"
)
return None
async def _pool_get_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey
) -> Optional[Dict]:
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
get_farmer_params = {
"launcher_id": pool_config.launcher_id.hex(),
"authentication_token": authentication_token,
"signature": bytes(signature).hex(),
}
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(f"{pool_config.pool_url}/farmer", params=get_farmer_params) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_post_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
post_farmer_payload: PostFarmerPayload = PostFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash())
post_farmer_request = PostFarmerRequest(post_farmer_payload, signature)
post_farmer_body = json.dumps(post_farmer_request.to_json_dict())
headers = {
"content-type": "application/json;",
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_config.pool_url}/farmer", data=post_farmer_body, headers=headers
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"POST /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in POST /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_put_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
put_farmer_payload: PutFarmerPayload = PutFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash())
put_farmer_request = PutFarmerRequest(put_farmer_payload, signature)
put_farmer_body = json.dumps(put_farmer_request.to_json_dict())
try:
async with aiohttp.ClientSession() as session:
async with session.put(f"{pool_config.pool_url}/farmer", data=put_farmer_body) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"PUT /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}"
)
return None
async def update_pool_state(self):
config = load_config(self._root_path, "config.yaml")
pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path)
for pool_config in pool_config_list:
p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash
try:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
if p2_singleton_puzzle_hash not in self.pool_state:
self.authentication_keys[bytes(pool_config.authentication_public_key)] = authentication_sk
self.pool_state[p2_singleton_puzzle_hash] = {
"points_found_since_start": 0,
"points_found_24h": [],
"points_acknowledged_since_start": 0,
"points_acknowledged_24h": [],
"next_farmer_update": 0,
"next_pool_info_update": 0,
"current_points": 0,
"current_difficulty": None,
"pool_errors_24h": [],
"authentication_token_timeout": None,
}
self.log.info(f"Added pool: {pool_config}")
pool_state = self.pool_state[p2_singleton_puzzle_hash]
pool_state["pool_config"] = pool_config
# Skip state update when self pooling
if pool_config.pool_url == "":
continue
enforce_https = config["full_node"]["selected_network"] == "mainnet"
if enforce_https and not pool_config.pool_url.startswith("https://"):
self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}")
continue
# TODO: Improve error handling below, inform about unexpected failures
if time.time() >= pool_state["next_pool_info_update"]:
# Makes a GET request to the pool to get the updated information
pool_info = await self._pool_get_pool_info(pool_config)
if pool_info is not None and "error_code" not in pool_info:
pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"]
pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL
# Only update the first time from GET /pool_info, gets updated from GET /farmer later
if pool_state["current_difficulty"] is None:
pool_state["current_difficulty"] = pool_info["minimum_difficulty"]
if time.time() >= pool_state["next_farmer_update"]:
authentication_token_timeout = pool_state["authentication_token_timeout"]
async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[bool]]:
# Run a GET /farmer to see if the farmer is already known by the pool
response = await self._pool_get_farmer(
pool_config, authentication_token_timeout, authentication_sk
)
farmer_response: Optional[GetFarmerResponse] = None
farmer_known: Optional[bool] = None
if response is not None:
if "error_code" not in response:
farmer_response = GetFarmerResponse.from_json_dict(response)
if farmer_response is not None:
pool_state["current_difficulty"] = farmer_response.current_difficulty
pool_state["current_points"] = farmer_response.current_points
pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL
else:
farmer_known = response["error_code"] != PoolErrorCode.FARMER_NOT_KNOWN.value
self.log.error(
"update_pool_farmer_info failed: "
f"{response['error_code']}, {response['error_message']}"
)
return farmer_response, farmer_known
if authentication_token_timeout is not None:
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and farmer_is_known is not None and not farmer_is_known:
# Make the farmer known on the pool with a POST /farmer
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
post_response = await self._pool_post_farmer(
pool_config, authentication_token_timeout, owner_sk
)
if post_response is not None and "error_code" not in post_response:
self.log.info(
f"Welcome message from {pool_config.pool_url}: "
f"{post_response['welcome_message']}"
)
# Now we should be able to update the local farmer info
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and not farmer_is_known:
self.log.error("Failed to update farmer info after POST /farmer.")
# Update the payout instructions on the pool if required
if (
farmer_info is not None
and pool_config.payout_instructions != farmer_info.payout_instructions
):
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
put_farmer_response_dict = await self._pool_put_farmer(
pool_config, authentication_token_timeout, owner_sk
)
try:
# put_farmer_response: PutFarmerResponse = PutFarmerResponse.from_json_dict(
# put_farmer_response_dict
# )
# if put_farmer_response.payout_instructions:
# self.log.info(
# f"Farmer information successfully updated on the pool {pool_config.pool_url}"
# )
# TODO: Fix Streamable implementation and recover the above.
if put_farmer_response_dict["payout_instructions"]:
self.log.info(
f"Farmer information successfully updated on the pool {pool_config.pool_url}"
)
else:
raise Exception
except Exception:
self.log.error(
f"Failed to update farmer information on the pool {pool_config.pool_url}"
)
else:
self.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}")
def get_public_keys(self):
return [child_sk.get_g1() for child_sk in self._private_keys]
def get_private_keys(self):
return self._private_keys
def get_reward_targets(self, search_for_private_key: bool) -> Dict:
if search_for_private_key:
all_sks = self.keychain.get_all_private_keys()
stop_searching_for_farmer, stop_searching_for_pool = False, False
for i in range(500):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1())
if ph == self.farmer_target:
stop_searching_for_farmer = True
if ph == self.pool_target:
stop_searching_for_pool = True
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
"have_farmer_sk": stop_searching_for_farmer,
"have_pool_sk": stop_searching_for_pool,
}
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
}
def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]):
config = load_config(self._root_path, "config.yaml")
if farmer_target_encoded is not None:
self.farmer_target_encoded = farmer_target_encoded
self.farmer_target = decode_puzzle_hash(farmer_target_encoded)
config["farmer"]["xgen_target_address"] = farmer_target_encoded
if pool_target_encoded is not None:
self.pool_target_encoded = pool_target_encoded
self.pool_target = decode_puzzle_hash(pool_target_encoded)
config["pool"]["xgen_target_address"] = pool_target_encoded
save_config(self._root_path, "config.yaml", config)
async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str):
for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items():
if launcher_id == pool_state_dict["pool_config"].launcher_id:
config = load_config(self._root_path, "config.yaml")
new_list = []
for list_element in config["pool"]["pool_list"]:
if bytes.fromhex(list_element["launcher_id"]) == bytes(launcher_id):
list_element["payout_instructions"] = payout_instructions
new_list.append(list_element)
config["pool"]["pool_list"] = new_list
save_config(self._root_path, "config.yaml", config)
# Force a GET /farmer which triggers the PUT /farmer if it detects the changed instructions
pool_state_dict["next_farmer_update"] = 0
return
self.log.warning(f"Launcher id: {launcher_id} not found")
async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]:
for pool_state in self.pool_state.values():
pool_config: PoolWalletConfig = pool_state["pool_config"]
if pool_config.launcher_id == launcher_id:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token_timeout = pool_state["authentication_token_timeout"]
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
return (
pool_config.pool_url
+ f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}"
f"&signature={bytes(signature).hex()}"
)
return None
async def update_cached_harvesters(self):
# First remove outdated cache entries
remove_hosts = []
for host, host_cache in self.harvester_cache.items():
remove_peers = []
for peer_id, peer_cache in host_cache.items():
_, last_update = peer_cache
# If the peer cache hasn't been updated for 10x interval, drop it since the harvester doesn't respond
if time.time() - last_update > UPDATE_HARVESTER_CACHE_INTERVAL * 10:
remove_peers.append(peer_id)
for key in remove_peers:
del host_cache[key]
if len(host_cache) == 0:
remove_hosts.append(host)
for key in remove_hosts:
del self.harvester_cache[key]
# Now query each harvester and update caches
for connection in self.server.get_connections():
if connection.connection_type != NodeType.HARVESTER:
continue
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry is None or time.time() - cache_entry[1] > UPDATE_HARVESTER_CACHE_INTERVAL:
response = await connection.request_plots(harvester_protocol.RequestPlots(), timeout=5)
if response is not None:
if isinstance(response, harvester_protocol.RespondPlots):
if connection.peer_host not in self.harvester_cache:
self.harvester_cache[connection.peer_host] = {}
self.harvester_cache[connection.peer_host][connection.peer_node_id.hex()] = (
response.to_json_dict(),
time.time(),
)
else:
self.log.error(
f"Invalid response from harvester:"
f"peer_host {connection.peer_host}, peer_node_id {connection.peer_node_id}"
)
else:
self.log.error(
"Harvester did not respond. You might need to update harvester to the latest version"
)
async def get_cached_harvesters(self, connection: WSInanConnection) -> Optional[Tuple[Dict, float]]:
host_cache = self.harvester_cache.get(connection.peer_host)
if host_cache is None:
return None
return host_cache.get(connection.peer_node_id.hex())
async def get_harvesters(self) -> Dict:
harvesters: List = []
for connection in self.server.get_connections():
if connection.connection_type != NodeType.HARVESTER:
continue
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry is not None:
harvester_object: dict = dict(cache_entry[0])
harvester_object["connection"] = {
"node_id": connection.peer_node_id.hex(),
"host": connection.peer_host,
"port": connection.peer_port,
}
harvesters.append(harvester_object)
return {"harvesters": harvesters}
async def _periodically_update_pool_state_task(self):
time_slept: uint64 = uint64(0)
config_path: Path = config_path_for_filename(self._root_path, "config.yaml")
while not self._shut_down:
# Every time the config file changes, read it to check the pool state
stat_info = config_path.stat()
if stat_info.st_mtime > self.last_config_access_time:
# If we detect the config file changed, refresh private keys first just in case
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in self.keychain.get_all_private_keys()]
self.last_config_access_time = stat_info.st_mtime
await self.update_pool_state()
time_slept = uint64(0)
elif time_slept > 60:
await self.update_pool_state()
time_slept = uint64(0)
time_slept += 1
await asyncio.sleep(1)
async def _periodically_clear_cache_and_refresh_task(self):
time_slept: uint64 = uint64(0)
refresh_slept = 0
while not self._shut_down:
try:
if time_slept > self.constants.SUB_SLOT_TIME_TARGET:
now = time.time()
removed_keys: List[bytes32] = []
for key, add_time in self.cache_add_time.items():
if now - float(add_time) > self.constants.SUB_SLOT_TIME_TARGET * 3:
self.sps.pop(key, None)
self.proofs_of_space.pop(key, None)
self.quality_str_to_identifiers.pop(key, None)
self.number_of_responses.pop(key, None)
removed_keys.append(key)
for key in removed_keys:
self.cache_add_time.pop(key, None)
time_slept = uint64(0)
log.debug(
f"Cleared farmer cache. Num sps: {len(self.sps)} {len(self.proofs_of_space)} "
f"{len(self.quality_str_to_identifiers)} {len(self.number_of_responses)}"
)
time_slept += 1
refresh_slept += 1
# Periodically refresh GUI to show the correct download/upload rate.
if refresh_slept >= 30:
self.state_changed("add_connection", {})
refresh_slept = 0
# Handles harvester plots cache cleanup and updates
await self.update_cached_harvesters()
except Exception:
log.error(f"_periodically_clear_cache_and_refresh_task failed: {traceback.print_exc()}")
await asyncio.sleep(1)
|
"""Demonstrates how an application might use the barcode_wheel library"""
import sys
import barcode_wheel
import svgwrite
import pathlib
import logging
import tempfile
import csv
from time import sleep
demo_contents = (
"""
22001,Money Order (Principal),
22101,Money Order (Fee),
10502,Club Card Savings,
12345678901,Test Product (Name Here),./4094485-random-picture.gif
9,Mardi Gras,
"""
)
PROG_NAME = pathlib.Path(sys.argv[0])
def main():
demo_file = pathlib.Path(f"./{PROG_NAME.with_suffix(".csv")}")
demo_svg = pathlib.Path(f"./{PROG_NAME.with_suffix(".svg")}")
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
log = logging.getLogger(f"{PROG_NAME.stem}")
log.info(
"""
This demo file includes a demo dataset.
To use your own dataset, make a file called 'demo_wheel.csv' and put it in the same folder
as this file.
The format for each line is:
PLU/UPC,Name,filepath to picture
"""
)
if demo_file.exists():
log.info(f"Using contents of '{demo_file}'")
else:
temp_file = tempfile.NamedTemporaryFile(mode="w+t", encoding="utf-8", dir=str(pathlib.Path.cwd()))
log.info(f"No file found\nUsing default demo contents in temporary file:\n{temp_file.name}")
demo_file = pathlib.Path(temp_file.name)
with demo_file.open(mode="w", newline="") as demo:
csv_writer = csv.writer(demo)
for row in demo_contents.split("\n"):
if not row:
continue
csv_writer.writerow(row.split(","))
num_slices = 0
with demo_file.open() as demo:
reader = csv.DictReader(
f=demo,
fieldnames=["PLU", "NAME", "PICTURE"],
)
for line in reader:
continue
drawing = svgwrite.Drawing(
filename=str(demo_svg),
size=("100%", "100%"),
#profile="tiny",
preserveAspectRatio="xMidyMid meet",
viewBox="0 0 200 200",
)
wheel, placeholders, defs = barcode_wheel.wheel_template(
center=(100, 100), radius=100, num_slices=9
)
drawing.add(wheel)
for def_item in defs:
drawing.defs.add(def_item)
drawing.save()
if __name__ == "__main__":
main()
| """Demonstrates how an application might use the barcode_wheel library"""
import sys
import barcode_wheel
import svgwrite
import pathlib
import logging
import tempfile
import csv
from time import sleep
demo_contents = (
"""
22001,Money Order (Principal),
22101,Money Order (Fee),
10502,Club Card Savings,
12345678901,Test Product (Name Here),./4094485-random-picture.gif
9,Mardi Gras,
"""
)
PROG_NAME = pathlib.Path(sys.argv[0])
def main():
demo_file = pathlib.Path(f"./{PROG_NAME.with_suffix('.csv')}")
demo_svg = pathlib.Path(f"./{PROG_NAME.with_suffix('.svg')}")
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
log = logging.getLogger(f"{PROG_NAME.stem}")
log.info(
"""
This demo file includes a demo dataset.
To use your own dataset, make a file called 'demo_wheel.csv' and put it in the same folder
as this file.
The format for each line is:
PLU/UPC,Name,filepath to picture
"""
)
if demo_file.exists():
log.info(f"Using contents of '{demo_file}'")
else:
temp_file = tempfile.NamedTemporaryFile(mode="w+t", encoding="utf-8", dir=str(pathlib.Path.cwd()))
log.info(f"No file found\nUsing default demo contents in temporary file:\n{temp_file.name}")
demo_file = pathlib.Path(temp_file.name)
with demo_file.open(mode="w", newline="") as demo:
csv_writer = csv.writer(demo)
for row in demo_contents.split("\n"):
if not row:
continue
csv_writer.writerow(row.split(","))
num_slices = 0
with demo_file.open() as demo:
reader = csv.DictReader(
f=demo,
fieldnames=["PLU", "NAME", "PICTURE"],
)
for line in reader:
continue
drawing = svgwrite.Drawing(
filename=str(demo_svg),
size=("100%", "100%"),
#profile="tiny",
preserveAspectRatio="xMidyMid meet",
viewBox="0 0 200 200",
)
wheel, placeholders, defs = barcode_wheel.wheel_template(
center=(100, 100), radius=100, num_slices=9
)
drawing.add(wheel)
for def_item in defs:
drawing.defs.add(def_item)
drawing.save()
if __name__ == "__main__":
main()
|
import numpy as np
import time
import tempfile
import os
import importlib.util
import argparse
from typing import Sequence
import subprocess
import re
import oneflow as flow
import oneflow._oneflow_internal as oneflow_internal
DEFAULT_TIMES = 20
gpu_memory_used_by_oneflow = 0
def import_file(path):
spec = importlib.util.spec_from_file_location("mod", path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def sync(x):
if test_oneflow:
x.numpy()
else:
x.cpu()
def gpu_memory_used():
output = subprocess.check_output(
[
"nvidia-smi",
"--query-compute-apps=pid,used_gpu_memory",
"--format=csv,noheader",
]
)
output = output.decode("utf-8").strip()
my_pid = os.getpid()
mem_used_by_me = 0
for line in output.split("\n"):
pid, mem_used = map(int, re.split(",? ", line)[:2])
if pid == my_pid:
mem_used_by_me += mem_used
return mem_used_by_me
def print_rank_0(*args, **kwargs):
rank = int(os.getenv("RANK", "0"))
if rank == 0:
print(*args, **kwargs)
def test(
model_path: str,
module_name: str,
input_shape: Sequence[int],
disable_backward=False,
times=DEFAULT_TIMES,
no_verbose=False,
ddp=False,
ddp_broadcast_buffers=False,
show_memory=True,
):
framework_name = "OneFlow" if test_oneflow else "PyTorch"
if test_oneflow:
python_module = import_file(model_path)
torch = flow
else:
with open(model_path) as f:
buf = f.read()
lines = buf.split("\n")
for i, line in enumerate(lines):
if "import" not in line and len(line.strip()) != 0:
break
lines = (
lines[:i]
+ [
"import torch as flow",
"import torch.nn as nn",
"from torch import Tensor",
"from torch.nn import Parameter",
]
+ lines[i:]
)
buf = "\n".join(lines)
with tempfile.NamedTemporaryFile("w", suffix=".py") as f:
f.write(buf)
f.flush()
python_module = import_file(f.name)
import torch
if ddp:
import torch.distributed as dist
local_rank_env_var = os.getenv("LOCAL_RANK")
assert local_rank_env_var is not None
rank = int(local_rank_env_var)
torch.cuda.set_device(rank)
dist.init_process_group(backend="nccl", init_method="env://")
Net = getattr(python_module, module_name)
warmup_times = 5
m = Net()
m = m.to("cuda")
if ddp:
if test_oneflow:
m = torch.nn.parallel.DistributedDataParallel(
m, broadcast_buffers=ddp_broadcast_buffers
)
else:
m = torch.nn.parallel.DistributedDataParallel(
m, device_ids=[rank], broadcast_buffers=ddp_broadcast_buffers
)
def run_model(m, x):
if disable_backward:
with torch.no_grad():
return m(x)
else:
return m(x)
learning_rate = 0.01
mom = 0.9
optimizer = torch.optim.SGD(m.parameters(), lr=learning_rate, momentum=mom)
# input tensor of OneFlow should set requires_grad=False due to a bug
x = torch.tensor(
np.ones(input_shape).astype(np.float32), requires_grad=not test_oneflow
).to("cuda")
for i in range(warmup_times + times):
if i == warmup_times:
start = time.time()
y = run_model(m, x)
if not disable_backward:
y = y.sum()
y.backward()
optimizer.zero_grad()
optimizer.step()
sync(y)
end = time.time()
total_time_ms = (end - start) * 1000
time_per_run_ms = total_time_ms / times
if no_verbose:
print_rank_0(f"{framework_name}: {time_per_run_ms:.1f}ms")
else:
print_rank_0(
f"{framework_name} {module_name} time: {time_per_run_ms:.1f}ms (= {total_time_ms:.1f}ms / {times}, input_shape={input_shape}{", backward is disabled" if disable_backward else ""}{", ddp" if ddp else ""}{", ddp_broadcast_buffers is disabled" if not ddp_broadcast_buffers else ""}{f", world size={flow.env.get_world_size()}" if flow.env.get_world_size() != 1 else ""})"
)
if show_memory:
global gpu_memory_used_by_oneflow
if test_oneflow:
gpu_memory_used_by_oneflow = gpu_memory_used()
print_rank_0(
f"{framework_name} GPU used (rank 0): {gpu_memory_used_by_oneflow} MiB"
)
else:
print_rank_0(
f"{framework_name} GPU used (rank 0, estimated): {gpu_memory_used() - gpu_memory_used_by_oneflow} MiB"
)
if ddp and not test_oneflow:
import torch.distributed as dist
dist.destroy_process_group()
return time_per_run_ms
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("model_path", type=str)
parser.add_argument("module_name", type=str)
parser.add_argument("input_shape", type=str)
parser.add_argument("--times", type=int, default=DEFAULT_TIMES)
parser.add_argument("--disable-backward", action="store_true")
parser.add_argument("--no-verbose", action="store_true")
parser.add_argument("--ddp", action="store_true")
parser.add_argument("--ddp-no-broadcast-buffers", action="store_true")
parser.add_argument("--only-oneflow", action="store_true")
parser.add_argument("--only-pytorch", action="store_true")
parser.add_argument("--no-show-memory", action="store_true")
args = parser.parse_args()
input_shape = list(map(int, args.input_shape.split("x")))
global test_oneflow
if not args.only_pytorch:
# NOTE: PyTorch must run after OneFlow for correct memory usage
test_oneflow = True
oneflow_time = test(
args.model_path,
args.module_name,
input_shape,
disable_backward=args.disable_backward,
times=args.times,
no_verbose=args.no_verbose,
ddp=args.ddp,
ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,
show_memory=not args.no_show_memory,
)
if not args.only_oneflow:
test_oneflow = False
pytorch_time = test(
args.model_path,
args.module_name,
input_shape,
disable_backward=args.disable_backward,
times=args.times,
no_verbose=args.no_verbose,
ddp=args.ddp,
ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,
show_memory=not args.no_show_memory,
)
if not args.only_pytorch and not args.only_oneflow:
relative_speed = pytorch_time / oneflow_time
if args.no_verbose:
print_rank_0(f"Relative speed: {relative_speed:.2f}")
else:
print_rank_0(
f"Relative speed: {relative_speed:.2f} (= {pytorch_time:.1f}ms / {oneflow_time:.1f}ms)"
)
| import numpy as np
import time
import tempfile
import os
import importlib.util
import argparse
from typing import Sequence
import subprocess
import re
import oneflow as flow
import oneflow._oneflow_internal as oneflow_internal
DEFAULT_TIMES = 20
gpu_memory_used_by_oneflow = 0
def import_file(path):
spec = importlib.util.spec_from_file_location("mod", path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def sync(x):
if test_oneflow:
x.numpy()
else:
x.cpu()
def gpu_memory_used():
output = subprocess.check_output(
[
"nvidia-smi",
"--query-compute-apps=pid,used_gpu_memory",
"--format=csv,noheader",
]
)
output = output.decode("utf-8").strip()
my_pid = os.getpid()
mem_used_by_me = 0
for line in output.split("\n"):
pid, mem_used = map(int, re.split(",? ", line)[:2])
if pid == my_pid:
mem_used_by_me += mem_used
return mem_used_by_me
def print_rank_0(*args, **kwargs):
rank = int(os.getenv("RANK", "0"))
if rank == 0:
print(*args, **kwargs)
def test(
model_path: str,
module_name: str,
input_shape: Sequence[int],
disable_backward=False,
times=DEFAULT_TIMES,
no_verbose=False,
ddp=False,
ddp_broadcast_buffers=False,
show_memory=True,
):
framework_name = "OneFlow" if test_oneflow else "PyTorch"
if test_oneflow:
python_module = import_file(model_path)
torch = flow
else:
with open(model_path) as f:
buf = f.read()
lines = buf.split("\n")
for i, line in enumerate(lines):
if "import" not in line and len(line.strip()) != 0:
break
lines = (
lines[:i]
+ [
"import torch as flow",
"import torch.nn as nn",
"from torch import Tensor",
"from torch.nn import Parameter",
]
+ lines[i:]
)
buf = "\n".join(lines)
with tempfile.NamedTemporaryFile("w", suffix=".py") as f:
f.write(buf)
f.flush()
python_module = import_file(f.name)
import torch
if ddp:
import torch.distributed as dist
local_rank_env_var = os.getenv("LOCAL_RANK")
assert local_rank_env_var is not None
rank = int(local_rank_env_var)
torch.cuda.set_device(rank)
dist.init_process_group(backend="nccl", init_method="env://")
Net = getattr(python_module, module_name)
warmup_times = 5
m = Net()
m = m.to("cuda")
if ddp:
if test_oneflow:
m = torch.nn.parallel.DistributedDataParallel(
m, broadcast_buffers=ddp_broadcast_buffers
)
else:
m = torch.nn.parallel.DistributedDataParallel(
m, device_ids=[rank], broadcast_buffers=ddp_broadcast_buffers
)
def run_model(m, x):
if disable_backward:
with torch.no_grad():
return m(x)
else:
return m(x)
learning_rate = 0.01
mom = 0.9
optimizer = torch.optim.SGD(m.parameters(), lr=learning_rate, momentum=mom)
# input tensor of OneFlow should set requires_grad=False due to a bug
x = torch.tensor(
np.ones(input_shape).astype(np.float32), requires_grad=not test_oneflow
).to("cuda")
for i in range(warmup_times + times):
if i == warmup_times:
start = time.time()
y = run_model(m, x)
if not disable_backward:
y = y.sum()
y.backward()
optimizer.zero_grad()
optimizer.step()
sync(y)
end = time.time()
total_time_ms = (end - start) * 1000
time_per_run_ms = total_time_ms / times
if no_verbose:
print_rank_0(f"{framework_name}: {time_per_run_ms:.1f}ms")
else:
print_rank_0(
f"{framework_name} {module_name} time: {time_per_run_ms:.1f}ms (= {total_time_ms:.1f}ms / {times}, input_shape={input_shape}{', backward is disabled' if disable_backward else ''}{', ddp' if ddp else ''}{', ddp_broadcast_buffers is disabled' if not ddp_broadcast_buffers else ''}{f', world size={flow.env.get_world_size()}' if flow.env.get_world_size() != 1 else ''})"
)
if show_memory:
global gpu_memory_used_by_oneflow
if test_oneflow:
gpu_memory_used_by_oneflow = gpu_memory_used()
print_rank_0(
f"{framework_name} GPU used (rank 0): {gpu_memory_used_by_oneflow} MiB"
)
else:
print_rank_0(
f"{framework_name} GPU used (rank 0, estimated): {gpu_memory_used() - gpu_memory_used_by_oneflow} MiB"
)
if ddp and not test_oneflow:
import torch.distributed as dist
dist.destroy_process_group()
return time_per_run_ms
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("model_path", type=str)
parser.add_argument("module_name", type=str)
parser.add_argument("input_shape", type=str)
parser.add_argument("--times", type=int, default=DEFAULT_TIMES)
parser.add_argument("--disable-backward", action="store_true")
parser.add_argument("--no-verbose", action="store_true")
parser.add_argument("--ddp", action="store_true")
parser.add_argument("--ddp-no-broadcast-buffers", action="store_true")
parser.add_argument("--only-oneflow", action="store_true")
parser.add_argument("--only-pytorch", action="store_true")
parser.add_argument("--no-show-memory", action="store_true")
args = parser.parse_args()
input_shape = list(map(int, args.input_shape.split("x")))
global test_oneflow
if not args.only_pytorch:
# NOTE: PyTorch must run after OneFlow for correct memory usage
test_oneflow = True
oneflow_time = test(
args.model_path,
args.module_name,
input_shape,
disable_backward=args.disable_backward,
times=args.times,
no_verbose=args.no_verbose,
ddp=args.ddp,
ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,
show_memory=not args.no_show_memory,
)
if not args.only_oneflow:
test_oneflow = False
pytorch_time = test(
args.model_path,
args.module_name,
input_shape,
disable_backward=args.disable_backward,
times=args.times,
no_verbose=args.no_verbose,
ddp=args.ddp,
ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,
show_memory=not args.no_show_memory,
)
if not args.only_pytorch and not args.only_oneflow:
relative_speed = pytorch_time / oneflow_time
if args.no_verbose:
print_rank_0(f"Relative speed: {relative_speed:.2f}")
else:
print_rank_0(
f"Relative speed: {relative_speed:.2f} (= {pytorch_time:.1f}ms / {oneflow_time:.1f}ms)"
)
|
from abc import abstractmethod
import PIL
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.hub import load_state_dict_from_url
from torchvision.models import DenseNet as _DenseNet
from torchvision.models import ResNet as _ResNet
from torchvision.models.densenet import _load_state_dict
from torchvision.models.densenet import model_urls as densenet_model_urls
from torchvision.models.resnet import BasicBlock, Bottleneck
from torchvision.models.resnet import model_urls as resnet_model_urls
class Model(pl.LightningModule):
DEFAULT_CONFIG = {}
def __init__(self, config: dict = None):
super().__init__()
self.config = self.DEFAULT_CONFIG.copy()
if config is not None:
self.config.update(config)
self._set_model()
@abstractmethod
def _set_model(self):
raise NotImplementedError()
class ResNet(_ResNet):
ACTIVATION_DIMS = [64, 128, 256, 512]
ACTIVATION_WIDTH_HEIGHT = [64, 32, 16, 8]
RESNET_TO_ARCH = {"resnet18": [2, 2, 2, 2], "resnet50": [3, 4, 6, 3]}
def __init__(
self,
num_classes: int,
arch: str = "resnet18",
dropout: float = 0.0,
pretrained: bool = True,
):
if arch not in self.RESNET_TO_ARCH:
raise ValueError(
f"config['classifier'] must be one of: {self.RESNET_TO_ARCH.keys()}"
)
block = BasicBlock if arch == "resnet18" else Bottleneck
super().__init__(block, self.RESNET_TO_ARCH[arch])
if pretrained:
state_dict = load_state_dict_from_url(
resnet_model_urls[arch], progress=True
)
self.load_state_dict(state_dict)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
self.fc = nn.Sequential(
nn.Dropout(dropout), nn.Linear(512 * block.expansion, num_classes)
)
def default_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
def default_train_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
class DenseNet(_DenseNet):
DENSENET_TO_ARCH = {
"densenet121": {
"growth_rate": 32,
"block_config": (6, 12, 24, 16),
"num_init_features": 64,
}
}
def __init__(
self, num_classes: int, arch: str = "densenet121", pretrained: bool = True
):
if arch not in self.DENSENET_TO_ARCH:
raise ValueError(
f"config['classifier'] must be one of: {self.DENSENET_TO_ARCH.keys()}"
)
super().__init__(**self.DENSENET_TO_ARCH[arch])
if pretrained:
_load_state_dict(self, densenet_model_urls[arch], progress=True)
self.classifier = nn.Linear(self.classifier.in_features, num_classes)
class VisionClassifier(Model):
DEFAULT_CONFIG = {
"lr": 1e-4,
"model_name": "resnet",
"arch": "resnet18",
"pretrained": True,
"num_classes": 2,
"transform": default_transform,
"train_transform": default_train_transform,
}
def _set_model(self):
if self.config["model_name"] == "resnet":
self.model = ResNet(
num_classes=self.config["num_classes"],
arch=self.config["arch"],
pretrained=self.config["pretrained"],
)
elif self.config["model_name"] == "densenet":
self.model = DenseNet(
num_classes=self.config["num_classes"], arch=self.config["arch"]
)
else:
raise ValueError(f"Model name {self.config["model_name"]} not supported.")
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
inputs, targets, _ = batch["input"], batch["target"], batch["id"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("train_loss", loss, on_step=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
inputs, targets = batch["input"], batch["target"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("valid_loss", loss)
def validation_epoch_end(self, outputs) -> None:
for metric_name, metric in self.metrics.items():
self.log(f"valid_{metric_name}", metric.compute())
metric.reset()
def test_epoch_end(self, outputs) -> None:
return self.validation_epoch_end(outputs)
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.config["lr"])
return optimizer
| from abc import abstractmethod
import PIL
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.hub import load_state_dict_from_url
from torchvision.models import DenseNet as _DenseNet
from torchvision.models import ResNet as _ResNet
from torchvision.models.densenet import _load_state_dict
from torchvision.models.densenet import model_urls as densenet_model_urls
from torchvision.models.resnet import BasicBlock, Bottleneck
from torchvision.models.resnet import model_urls as resnet_model_urls
class Model(pl.LightningModule):
DEFAULT_CONFIG = {}
def __init__(self, config: dict = None):
super().__init__()
self.config = self.DEFAULT_CONFIG.copy()
if config is not None:
self.config.update(config)
self._set_model()
@abstractmethod
def _set_model(self):
raise NotImplementedError()
class ResNet(_ResNet):
ACTIVATION_DIMS = [64, 128, 256, 512]
ACTIVATION_WIDTH_HEIGHT = [64, 32, 16, 8]
RESNET_TO_ARCH = {"resnet18": [2, 2, 2, 2], "resnet50": [3, 4, 6, 3]}
def __init__(
self,
num_classes: int,
arch: str = "resnet18",
dropout: float = 0.0,
pretrained: bool = True,
):
if arch not in self.RESNET_TO_ARCH:
raise ValueError(
f"config['classifier'] must be one of: {self.RESNET_TO_ARCH.keys()}"
)
block = BasicBlock if arch == "resnet18" else Bottleneck
super().__init__(block, self.RESNET_TO_ARCH[arch])
if pretrained:
state_dict = load_state_dict_from_url(
resnet_model_urls[arch], progress=True
)
self.load_state_dict(state_dict)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
self.fc = nn.Sequential(
nn.Dropout(dropout), nn.Linear(512 * block.expansion, num_classes)
)
def default_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
def default_train_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
class DenseNet(_DenseNet):
DENSENET_TO_ARCH = {
"densenet121": {
"growth_rate": 32,
"block_config": (6, 12, 24, 16),
"num_init_features": 64,
}
}
def __init__(
self, num_classes: int, arch: str = "densenet121", pretrained: bool = True
):
if arch not in self.DENSENET_TO_ARCH:
raise ValueError(
f"config['classifier'] must be one of: {self.DENSENET_TO_ARCH.keys()}"
)
super().__init__(**self.DENSENET_TO_ARCH[arch])
if pretrained:
_load_state_dict(self, densenet_model_urls[arch], progress=True)
self.classifier = nn.Linear(self.classifier.in_features, num_classes)
class VisionClassifier(Model):
DEFAULT_CONFIG = {
"lr": 1e-4,
"model_name": "resnet",
"arch": "resnet18",
"pretrained": True,
"num_classes": 2,
"transform": default_transform,
"train_transform": default_train_transform,
}
def _set_model(self):
if self.config["model_name"] == "resnet":
self.model = ResNet(
num_classes=self.config["num_classes"],
arch=self.config["arch"],
pretrained=self.config["pretrained"],
)
elif self.config["model_name"] == "densenet":
self.model = DenseNet(
num_classes=self.config["num_classes"], arch=self.config["arch"]
)
else:
raise ValueError(f"Model name {self.config['model_name']} not supported.")
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
inputs, targets, _ = batch["input"], batch["target"], batch["id"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("train_loss", loss, on_step=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
inputs, targets = batch["input"], batch["target"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("valid_loss", loss)
def validation_epoch_end(self, outputs) -> None:
for metric_name, metric in self.metrics.items():
self.log(f"valid_{metric_name}", metric.compute())
metric.reset()
def test_epoch_end(self, outputs) -> None:
return self.validation_epoch_end(outputs)
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.config["lr"])
return optimizer
|
from ciscosupportsdk.apisession import ApiSession
SERVICE_BASE_URL = "/software/v4.0"
class AutomatedSoftwareDistributionApi(object):
"""
Cisco Automated Software Distribution service provides software
information and download URLs to assist you in upgrading your
device/application to the latest version.
"""
def __init__(self, session: ApiSession) -> None:
self._session = session
def get_bug_details(self, bug_ids: list[str]) -> None:
"""
Returns detailed information for the specified bug ID or IDs.
:param: bug_ids: list[str]: Identifier of the bug or bugs for which
to return detailed information. A maximum of five (5) bug IDs can
be submitted separated by a comma.
:rtype: Bug
"""
path = f"{SERVICE_BASE_URL}/bug_ids/" f"{",".join(bug_ids)}"
print(path)
pass
| from ciscosupportsdk.apisession import ApiSession
SERVICE_BASE_URL = "/software/v4.0"
class AutomatedSoftwareDistributionApi(object):
"""
Cisco Automated Software Distribution service provides software
information and download URLs to assist you in upgrading your
device/application to the latest version.
"""
def __init__(self, session: ApiSession) -> None:
self._session = session
def get_bug_details(self, bug_ids: list[str]) -> None:
"""
Returns detailed information for the specified bug ID or IDs.
:param: bug_ids: list[str]: Identifier of the bug or bugs for which
to return detailed information. A maximum of five (5) bug IDs can
be submitted separated by a comma.
:rtype: Bug
"""
path = f"{SERVICE_BASE_URL}/bug_ids/" f"{','.join(bug_ids)}"
print(path)
pass
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Credits to Hitalo-Sama and FTG Modules
from datetime import datetime
from emoji import emojize
from math import sqrt
from telethon.tl.functions.channels import GetFullChannelRequest, GetParticipantsRequest
from telethon.tl.functions.messages import GetHistoryRequest, CheckChatInviteRequest, GetFullChatRequest
from telethon.tl.types import MessageActionChannelMigrateFrom, ChannelParticipantsAdmins
from telethon.errors import (ChannelInvalidError, ChannelPrivateError, ChannelPublicGroupNaError, InviteHashEmptyError, InviteHashExpiredError, InviteHashInvalidError)
from telethon.utils import get_input_location
from userbot import CMD_HELP
from userbot.events import register
@register(pattern=".chatinfo(?: |$)(.*)", outgoing=True)
async def info(event):
await event.edit("**🔬Analizando il gruppo/canale...**")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.edit(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.edit("`C'è stato un errore inaspettato.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.edit("`Gruppo/Canale non valido.`")
return None
except ChannelPrivateError:
await event.edit("`Questo è un canale/gruppo privato o sono Bannato da esso.`")
return None
except ChannelPublicGroupNaError:
await event.edit("`Canale o SuperGruppo non esistente.`")
return None
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(chat_obj_info, "broadcast") else False
chat_type = "Canale" if broadcast else "Gruppo"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
creator_id = msg_info.users[0].id if creator_valid else None
creator_firstname = msg_info.users[0].first_name if creator_valid and msg_info.users[0].first_name is not None else "Account Eliminato"
creator_username = msg_info.users[0].username if creator_valid and msg_info.users[0].username is not None else None
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and type(msg_info.messages[0].action) is MessageActionChannelMigrateFrom and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Sconosciuto"
location = str(e)
#this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Si</b>" if hasattr(chat_obj_info, "megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Si</b>" if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Si</b>" if hasattr(chat_obj_info, "restricted") and chat_obj_info.restricted else "No"
verified = "<b>Si</b>" if hasattr(chat_obj_info, "verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(creator_username) if creator_username else None
#end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None, works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b></b>\n"
caption += f"<b>🖊️Info della Chat</b>\n• 🆔: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"<b>• 📰Titolo del {chat_type} :</b> {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"<b>• 🥇Titolo Originario:</b> {former_title}\n"
if username is not None:
caption += f"<b>• 🏷Tipo di {chat_type}:</b> Publico\n"
caption += f"<b>• 🖇Link:<b> {username}\n"
else:
caption += f"<b>• 🏷Tipo di {chat_type} :</b> Privato\n"
if creator_username is not None:
caption += f"<b>• 👑Creatore:</b> {creator_username}\n"
elif creator_valid:
caption += f"<b>• 👑Creatore:</b> <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"<b>• 🕐Creato:</b> <code>{created.date().strftime("%b %d, %Y")} - {created.time()}</code>\n"
else:
caption += f"<b>• 🕐Creato:</b> <code>{chat_obj_info.date.date().strftime("%b %d, %Y")} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"<b>• 📡Data Center ID:</b> {dc_id}\n"
if exp_count is not None:
chat_level = int((1+sqrt(1+7*exp_count/14))/2)
caption += f"<b>• 🏁Livello del {chat_type}:</b> <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"<b>• ✉️Messaggi Visibili:</b> <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"<b>• 📨Messaggi inviati:</b> <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"<b>• 📨Messaggi Inviati:</b> <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"<b>• 👥Membri:</b> <code>{members}</code>\n"
if admins is not None:
caption += f"<b>• ⚜Amministratori:</b> <code>{admins}</code>\n"
if bots_list:
caption += f"<b>• 🤖Bot</b>: <code>{bots}</code>\n"
if members_online:
caption += f"<b>• 👥💡Membri Online al Momento:</b> <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"<b>• 👥🚨Utenti Limitati:</b> <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"<b>• 👥🚷Utenti Bannati:</b> <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"<b>• 🎨Sticker del {chat_type}:</b> <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"<b>• 🐌Modalità Lenta:</b> {slowmode}"
if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"<b>🏆SuperGruppo:<b> {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"<b>🚨Limitato:<b> {restricted}\n"
if chat_obj_info.restricted:
caption += f"<b>> 💻Piattaforma:<b> {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"<b>> 📝Motivo:<b> {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"<b>> 📖Testo:<b> {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "<b>⚠️Scam:<b> <b>Si</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"✅<b>Verificato da Telegram:<b> {verified}\n\n"
if description:
caption += f"<b>💬Descrizione:<b> \n<code>{description}</code>\n"
return caption
CMD_HELP.update({
"chatinfo":
".chatinfo [optional: <reply/tag/chat id/invite link>]\
\nUsage: Gets info of a chat. Some info might be limited due to missing permissions."
})
| # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Credits to Hitalo-Sama and FTG Modules
from datetime import datetime
from emoji import emojize
from math import sqrt
from telethon.tl.functions.channels import GetFullChannelRequest, GetParticipantsRequest
from telethon.tl.functions.messages import GetHistoryRequest, CheckChatInviteRequest, GetFullChatRequest
from telethon.tl.types import MessageActionChannelMigrateFrom, ChannelParticipantsAdmins
from telethon.errors import (ChannelInvalidError, ChannelPrivateError, ChannelPublicGroupNaError, InviteHashEmptyError, InviteHashExpiredError, InviteHashInvalidError)
from telethon.utils import get_input_location
from userbot import CMD_HELP
from userbot.events import register
@register(pattern=".chatinfo(?: |$)(.*)", outgoing=True)
async def info(event):
await event.edit("**🔬Analizando il gruppo/canale...**")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.edit(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.edit("`C'è stato un errore inaspettato.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.edit("`Gruppo/Canale non valido.`")
return None
except ChannelPrivateError:
await event.edit("`Questo è un canale/gruppo privato o sono Bannato da esso.`")
return None
except ChannelPublicGroupNaError:
await event.edit("`Canale o SuperGruppo non esistente.`")
return None
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(chat_obj_info, "broadcast") else False
chat_type = "Canale" if broadcast else "Gruppo"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
creator_id = msg_info.users[0].id if creator_valid else None
creator_firstname = msg_info.users[0].first_name if creator_valid and msg_info.users[0].first_name is not None else "Account Eliminato"
creator_username = msg_info.users[0].username if creator_valid and msg_info.users[0].username is not None else None
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and type(msg_info.messages[0].action) is MessageActionChannelMigrateFrom and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Sconosciuto"
location = str(e)
#this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Si</b>" if hasattr(chat_obj_info, "megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Si</b>" if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Si</b>" if hasattr(chat_obj_info, "restricted") and chat_obj_info.restricted else "No"
verified = "<b>Si</b>" if hasattr(chat_obj_info, "verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(creator_username) if creator_username else None
#end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None, works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b></b>\n"
caption += f"<b>🖊️Info della Chat</b>\n• 🆔: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"<b>• 📰Titolo del {chat_type} :</b> {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"<b>• 🥇Titolo Originario:</b> {former_title}\n"
if username is not None:
caption += f"<b>• 🏷Tipo di {chat_type}:</b> Publico\n"
caption += f"<b>• 🖇Link:<b> {username}\n"
else:
caption += f"<b>• 🏷Tipo di {chat_type} :</b> Privato\n"
if creator_username is not None:
caption += f"<b>• 👑Creatore:</b> {creator_username}\n"
elif creator_valid:
caption += f"<b>• 👑Creatore:</b> <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"<b>• 🕐Creato:</b> <code>{created.date().strftime('%b %d, %Y')} - {created.time()}</code>\n"
else:
caption += f"<b>• 🕐Creato:</b> <code>{chat_obj_info.date.date().strftime('%b %d, %Y')} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"<b>• 📡Data Center ID:</b> {dc_id}\n"
if exp_count is not None:
chat_level = int((1+sqrt(1+7*exp_count/14))/2)
caption += f"<b>• 🏁Livello del {chat_type}:</b> <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"<b>• ✉️Messaggi Visibili:</b> <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"<b>• 📨Messaggi inviati:</b> <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"<b>• 📨Messaggi Inviati:</b> <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"<b>• 👥Membri:</b> <code>{members}</code>\n"
if admins is not None:
caption += f"<b>• ⚜Amministratori:</b> <code>{admins}</code>\n"
if bots_list:
caption += f"<b>• 🤖Bot</b>: <code>{bots}</code>\n"
if members_online:
caption += f"<b>• 👥💡Membri Online al Momento:</b> <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"<b>• 👥🚨Utenti Limitati:</b> <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"<b>• 👥🚷Utenti Bannati:</b> <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"<b>• 🎨Sticker del {chat_type}:</b> <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"<b>• 🐌Modalità Lenta:</b> {slowmode}"
if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"<b>🏆SuperGruppo:<b> {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"<b>🚨Limitato:<b> {restricted}\n"
if chat_obj_info.restricted:
caption += f"<b>> 💻Piattaforma:<b> {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"<b>> 📝Motivo:<b> {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"<b>> 📖Testo:<b> {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "<b>⚠️Scam:<b> <b>Si</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"✅<b>Verificato da Telegram:<b> {verified}\n\n"
if description:
caption += f"<b>💬Descrizione:<b> \n<code>{description}</code>\n"
return caption
CMD_HELP.update({
"chatinfo":
".chatinfo [optional: <reply/tag/chat id/invite link>]\
\nUsage: Gets info of a chat. Some info might be limited due to missing permissions."
})
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gzip
import json
import re
import logging
import sys
import datetime as dt
import os
from operator import itemgetter
from statistics import median, mean
import argparse
from time import time, sleep
from itertools import groupby
from collections import namedtuple
from string import Template
from functools import partial
default_config = {"REPORT_SIZE": 1000,
"REPORT_DIR": "./reports",
"LOG_DIR": "./log"}
def parse_config(default_config: dict = None,
config_path: str = None):
"""
1. Checks whether main config exists at default path.
2. Updates default config keys.
3. Checks, whether any config file was passed in args.
4. Updates config keys, if it was passed.
5. Checks whether all dirs in config exist.
:param default_config: default config file.
:param config_path: main config file path.
:return: log_analyzer config.
"""
if not default_config:
return "No default config provided."
config = {k: v for k, v in default_config.items()}
if os.path.exists(config_path):
with open(config_path, mode='r') as f:
main_config = json.load(f)
else:
return "No config at given path."
config.update(main_config)
if not all((os.path.exists(config[k]) for k in config.keys() if k.endswith('DIR'))):
return "Some config path is broken."
return config
def find_latest_log(log_dir: str):
"""
Finds latest logfile in logs directory.
:param log_dir:
:return: name of the latest log or None if no log found.
"""
def get_log_date(log_name):
log_date = re.search('\d{8}', log_name)
return dt.datetime.strptime(log_date.group(0), "%Y%m%d") if log_date else None
log_name = log_date = None
for item in os.listdir(log_dir):
if 'nginx-access-ui.log' not in item:
continue
date = get_log_date(item)
if (not log_date) or (date > log_date):
log_name, log_date = item, date
return namedtuple('latest_log', ['log_name', 'log_date'])._make((log_name, log_date)
if log_name
else (None, None))
def log_finish_timestamp():
"""
Updates log_analyzer.ts with latest timestamp, if script has terminated successfully.
"""
with open("./monitoring/log_analyzer.ts", mode='w') as f:
f.write(f'{time()}')
sys.exit(0)
def check_if_report_exists(latest_log, report_dir: str):
"""
Checks if report for a certain log file already exists.
:param latest_log: latest log named tuple with log_date;
:param report_dir: path to reports;
:return: True if report already exists, False otherwise.
"""
return os.path.exists(f'{report_dir}/report-{latest_log.log_date.strftime('%Y.%m.%d')}.html')
def parse_log(log_path: str, parser) -> object:
"""
Parses a log file.
:param log_path: path to log file.
:return: log, parsed according to a given format.
"""
open_log = partial(gzip.open, mode='rt', encoding="utf-8") if log_path.endswith(".gz") else partial(open, mode='r')
with open_log(log_path) as f:
parse_results = [parser(line) for line in f]
return parse_results
def parse_line(line: str):
"""
Parses single record from a log according to log_pattern.
If error occurs in parsing request_time, the log line is considered broken and function returns None.
If error occurs in parsing URL, while request_time is present,
the URL is marked as 'parse_failed' to allow further statistical checking.
:param line: UTF-8 encoded string of a log record.
:return: dictionary, made up according to regex_log_pattern or None.
"""
log_contents = {}
request_time_pat = ' \d*[.]?\d*$'
request_pat = '"(GET|HEAD|POST|PUT|DELETE|CONNECT|OPTIONS|TRACE|PATCH)\s(?P<url>.+?)\sHTTP/.+"\s'
log_contents['request_time'] = re.search(request_time_pat, line)[0].strip()
request = re.findall(request_pat, line)
log_contents['request'] = request[0][1] if request else 'bad_request'
if log_contents['request_time']:
return log_contents
else:
return None
def make_report_table(access_logs: object, report_length: int = 1000):
"""
Calculates following statistics for all URLs within access log:
- count of visits to a URL;
- URL visit count percentage to total visits during log period;
- total time of response for a given URL;
- longest response time for a given URL;
- average response time for a given URL;
- median response time for a given URL;
- percentage of total response time for a given URL to total response time of all URLs.
:param access_logs: Parsed access log records.
:param report_length: Report length.
:param error_threshold: Sets parsing error threshold.
Raises a warning if percent of urls, parsed correctly, is less than threshold.
:return: Data to insert into report.
"""
logging.info('Preparing data for statistics calculation...')
urls = {}
logging.info('Calculating statistics...')
for url, group in groupby(sorted(access_logs, key=lambda x: x['request']), key=lambda x: x['request']):
req_times = [float(record['request_time']) for record in group]
urls[url] = {"url": url,
'count': len(req_times),
'time_sum': sum(req_times),
'time_max': max(req_times),
'time_med': median(req_times),
'time_avg': mean(req_times)}
total_time = sum([record['time_sum'] for record in urls.values()])
total_records = sum([record['count'] for record in urls.values()])
for url in urls.keys():
urls[url]['time_perc'] = urls[url]['time_sum'] / total_time
urls[url]['count_perc'] = urls[url]['count'] / total_records
report_table = sorted(list(urls.values()), key=lambda k: k['time_sum'], reverse=True)
return report_table[:report_length]
def render_html_report(table: list,
report_path: str,
latest_log_date) -> str:
"""
Renders html report from dummy 'report.html'.
:param table: Data to insert into dummy report.
:param report_path: Path to dummy 'report.html'.
:param latest_log_date: Latest log date, is used to make name of a new report.
:return: Returns name of freshly rendered report.
"""
with open(os.path.join(report_path, "report.html"), mode='r') as f:
report = f.read()
new_report_name = f"report-{latest_log_date.strftime("%Y.%m.%d")}.html"
if not os.path.exists(report_path):
os.makedirs(report_path)
with open(os.path.join(report_path, new_report_name), mode='w') as f:
f.write(Template(report).safe_substitute(table_json=json.dumps(table)))
return new_report_name
def main(config: dict = None):
"""
Main procedure flow:
1. Looks for latest log;
2. Checks if report for this log already exists;
3. Parses the log;
4. Makes report table;
5. Renders HTML report.
:param config: Configuration dict.
"""
# find latest access log
latest_log = find_latest_log(log_dir=config['LOG_DIR'])
if not all([latest_log.log_name, latest_log.log_date]):
logging.info(f"No logs found in LOG_DIR: {config["LOG_DIR"]}")
sys.exit(0)
logging.info(f"Latest log found: {latest_log.log_name}")
# check if report has already been created for this access log
if check_if_report_exists(latest_log=latest_log,
report_dir=config["REPORT_DIR"]):
logging.info(f"Report for latest logfile {latest_log.log_name} already exists.")
log_finish_timestamp()
logging.info("No report found for latest_log.")
# parse log
logging.info(f"Parsing {latest_log.log_name}...")
access_logs = parse_log(log_path=os.path.join(config["LOG_DIR"], latest_log.log_name), parser=parse_line)
if not access_logs:
logging.info("Log parsing failed.")
sys.exit(1)
# make a report
report_table = make_report_table(access_logs=access_logs,
report_length=config['REPORT_SIZE'])
if not report_table:
logging.info("Report table construction failed.")
sys.exit(1)
# render html report
logging.info("Rendering report...")
render_result = render_html_report(table=report_table,
report_path=config['REPORT_DIR'],
latest_log_date=latest_log.log_date)
if render_result:
logging.info(f"New report {render_result} successfully rendered.")
log_finish_timestamp()
else:
logging.error("Report render failed.")
sys.exit(1)
if __name__ == "__main__":
# check for config path, passed via --config
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('--config', default='./config/log_analyzer.conf')
config = parse_config(default_config=default_config,
config_path=argument_parser.parse_args().config)
if isinstance(config, str):
logging.error(config)
sys.exit(1)
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname).1s %(message)s',
datefmt='%Y.%m.%d %H:%M:%S',
filename=config.get("MONITORING_LOG", None))
logging.info("Starting log_analyzer")
try:
main(config=config)
except Exception as e:
logging.error(f'Something is wrong: {e}')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import gzip
import json
import re
import logging
import sys
import datetime as dt
import os
from operator import itemgetter
from statistics import median, mean
import argparse
from time import time, sleep
from itertools import groupby
from collections import namedtuple
from string import Template
from functools import partial
default_config = {"REPORT_SIZE": 1000,
"REPORT_DIR": "./reports",
"LOG_DIR": "./log"}
def parse_config(default_config: dict = None,
config_path: str = None):
"""
1. Checks whether main config exists at default path.
2. Updates default config keys.
3. Checks, whether any config file was passed in args.
4. Updates config keys, if it was passed.
5. Checks whether all dirs in config exist.
:param default_config: default config file.
:param config_path: main config file path.
:return: log_analyzer config.
"""
if not default_config:
return "No default config provided."
config = {k: v for k, v in default_config.items()}
if os.path.exists(config_path):
with open(config_path, mode='r') as f:
main_config = json.load(f)
else:
return "No config at given path."
config.update(main_config)
if not all((os.path.exists(config[k]) for k in config.keys() if k.endswith('DIR'))):
return "Some config path is broken."
return config
def find_latest_log(log_dir: str):
"""
Finds latest logfile in logs directory.
:param log_dir:
:return: name of the latest log or None if no log found.
"""
def get_log_date(log_name):
log_date = re.search('\d{8}', log_name)
return dt.datetime.strptime(log_date.group(0), "%Y%m%d") if log_date else None
log_name = log_date = None
for item in os.listdir(log_dir):
if 'nginx-access-ui.log' not in item:
continue
date = get_log_date(item)
if (not log_date) or (date > log_date):
log_name, log_date = item, date
return namedtuple('latest_log', ['log_name', 'log_date'])._make((log_name, log_date)
if log_name
else (None, None))
def log_finish_timestamp():
"""
Updates log_analyzer.ts with latest timestamp, if script has terminated successfully.
"""
with open("./monitoring/log_analyzer.ts", mode='w') as f:
f.write(f'{time()}')
sys.exit(0)
def check_if_report_exists(latest_log, report_dir: str):
"""
Checks if report for a certain log file already exists.
:param latest_log: latest log named tuple with log_date;
:param report_dir: path to reports;
:return: True if report already exists, False otherwise.
"""
return os.path.exists(f'{report_dir}/report-{latest_log.log_date.strftime("%Y.%m.%d")}.html')
def parse_log(log_path: str, parser) -> object:
"""
Parses a log file.
:param log_path: path to log file.
:return: log, parsed according to a given format.
"""
open_log = partial(gzip.open, mode='rt', encoding="utf-8") if log_path.endswith(".gz") else partial(open, mode='r')
with open_log(log_path) as f:
parse_results = [parser(line) for line in f]
return parse_results
def parse_line(line: str):
"""
Parses single record from a log according to log_pattern.
If error occurs in parsing request_time, the log line is considered broken and function returns None.
If error occurs in parsing URL, while request_time is present,
the URL is marked as 'parse_failed' to allow further statistical checking.
:param line: UTF-8 encoded string of a log record.
:return: dictionary, made up according to regex_log_pattern or None.
"""
log_contents = {}
request_time_pat = ' \d*[.]?\d*$'
request_pat = '"(GET|HEAD|POST|PUT|DELETE|CONNECT|OPTIONS|TRACE|PATCH)\s(?P<url>.+?)\sHTTP/.+"\s'
log_contents['request_time'] = re.search(request_time_pat, line)[0].strip()
request = re.findall(request_pat, line)
log_contents['request'] = request[0][1] if request else 'bad_request'
if log_contents['request_time']:
return log_contents
else:
return None
def make_report_table(access_logs: object, report_length: int = 1000):
"""
Calculates following statistics for all URLs within access log:
- count of visits to a URL;
- URL visit count percentage to total visits during log period;
- total time of response for a given URL;
- longest response time for a given URL;
- average response time for a given URL;
- median response time for a given URL;
- percentage of total response time for a given URL to total response time of all URLs.
:param access_logs: Parsed access log records.
:param report_length: Report length.
:param error_threshold: Sets parsing error threshold.
Raises a warning if percent of urls, parsed correctly, is less than threshold.
:return: Data to insert into report.
"""
logging.info('Preparing data for statistics calculation...')
urls = {}
logging.info('Calculating statistics...')
for url, group in groupby(sorted(access_logs, key=lambda x: x['request']), key=lambda x: x['request']):
req_times = [float(record['request_time']) for record in group]
urls[url] = {"url": url,
'count': len(req_times),
'time_sum': sum(req_times),
'time_max': max(req_times),
'time_med': median(req_times),
'time_avg': mean(req_times)}
total_time = sum([record['time_sum'] for record in urls.values()])
total_records = sum([record['count'] for record in urls.values()])
for url in urls.keys():
urls[url]['time_perc'] = urls[url]['time_sum'] / total_time
urls[url]['count_perc'] = urls[url]['count'] / total_records
report_table = sorted(list(urls.values()), key=lambda k: k['time_sum'], reverse=True)
return report_table[:report_length]
def render_html_report(table: list,
report_path: str,
latest_log_date) -> str:
"""
Renders html report from dummy 'report.html'.
:param table: Data to insert into dummy report.
:param report_path: Path to dummy 'report.html'.
:param latest_log_date: Latest log date, is used to make name of a new report.
:return: Returns name of freshly rendered report.
"""
with open(os.path.join(report_path, "report.html"), mode='r') as f:
report = f.read()
new_report_name = f"report-{latest_log_date.strftime('%Y.%m.%d')}.html"
if not os.path.exists(report_path):
os.makedirs(report_path)
with open(os.path.join(report_path, new_report_name), mode='w') as f:
f.write(Template(report).safe_substitute(table_json=json.dumps(table)))
return new_report_name
def main(config: dict = None):
"""
Main procedure flow:
1. Looks for latest log;
2. Checks if report for this log already exists;
3. Parses the log;
4. Makes report table;
5. Renders HTML report.
:param config: Configuration dict.
"""
# find latest access log
latest_log = find_latest_log(log_dir=config['LOG_DIR'])
if not all([latest_log.log_name, latest_log.log_date]):
logging.info(f"No logs found in LOG_DIR: {config['LOG_DIR']}")
sys.exit(0)
logging.info(f"Latest log found: {latest_log.log_name}")
# check if report has already been created for this access log
if check_if_report_exists(latest_log=latest_log,
report_dir=config["REPORT_DIR"]):
logging.info(f"Report for latest logfile {latest_log.log_name} already exists.")
log_finish_timestamp()
logging.info("No report found for latest_log.")
# parse log
logging.info(f"Parsing {latest_log.log_name}...")
access_logs = parse_log(log_path=os.path.join(config["LOG_DIR"], latest_log.log_name), parser=parse_line)
if not access_logs:
logging.info("Log parsing failed.")
sys.exit(1)
# make a report
report_table = make_report_table(access_logs=access_logs,
report_length=config['REPORT_SIZE'])
if not report_table:
logging.info("Report table construction failed.")
sys.exit(1)
# render html report
logging.info("Rendering report...")
render_result = render_html_report(table=report_table,
report_path=config['REPORT_DIR'],
latest_log_date=latest_log.log_date)
if render_result:
logging.info(f"New report {render_result} successfully rendered.")
log_finish_timestamp()
else:
logging.error("Report render failed.")
sys.exit(1)
if __name__ == "__main__":
# check for config path, passed via --config
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('--config', default='./config/log_analyzer.conf')
config = parse_config(default_config=default_config,
config_path=argument_parser.parse_args().config)
if isinstance(config, str):
logging.error(config)
sys.exit(1)
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname).1s %(message)s',
datefmt='%Y.%m.%d %H:%M:%S',
filename=config.get("MONITORING_LOG", None))
logging.info("Starting log_analyzer")
try:
main(config=config)
except Exception as e:
logging.error(f'Something is wrong: {e}')
|
'''
This Module is One to Make Your Code Shorter.
High API Will Make You Feel You're Ordering And Machine Is Doing!
Also There is Collection of most usefull function and methods from popular modules of python.
(Read Help of Functions)
Official Documention Will Be Added Soon.
'''
'''
Written By RX
Last Update: 1-15-2021
'''
__version__ = '3.0.0'
"""
< Release Changes >
- style.log_ now have all time prefix by default
- call=call_later
- system.mac_address
- io.selective_input choices can be dict
- Class Internet
- class date_time
"""
'''
TODO:
- average()
DATETIME:
X calendar_month_st replace day will be all noms
- Passed Time func
- System.(copy_to_clipboard & paste_from_clipboard)
- Other archive files in extract
- Call_later **kwargs
- Internet:
default_timeout
- files:
- files.join files.dirname
- Error in files.MEMBERS.all_all_*
- socket.socket()
- Screen recorder
- Make Sound
- mp3 tags (v 3.x)
- registery editor (v 3.x)
- re module (v 3.x)
- Developer:
reload_module
Check_Type
add_module_dir
- Create Local Server
- ( win32api.LoadLibrary() - ctypes.PyDLL() )
X Threading
- Ready-obj module
- !style defaults
- Check 3rd-party modules imports
- pip install update
- Open Video
- Open Audio
'''
#START
import os as _os
import re as _re
import sys as _sys
import abc as _abc
import time as _time
import socket as _socket
import typing as _typing
import urllib as _urllib
import shutil as _shutil
import random as _random
import datetime as _datetime
import calendar as _calendar
import requests as _requests
import subprocess as _subprocess
from bs4 import BeautifulSoup
from typing import (Any,Iterable,Optional,Callable,List,Union)
import psutil as _psutil
argv = _sys.argv
ABC = _abc.ABC
ABCMeta = _abc.ABCMeta
####### 8888888888 888 d8b #######
#### 888 888 Y8P ####
#### 888 888 ####
#### 8888888 888 888 88888b. .d8888b 888888 888 .d88b. 88888b. .d8888b ####
#### 888 888 888 888 "88b d88P" 888 888 d88""88b 888 "88b 88K ####
#### 888 888 888 888 888 888 888 888 888 888 888 888 "Y8888b. ####
#### 888 Y88b 888 888 888 Y88b. Y88b. 888 Y88..88P 888 888 X88 ####
####### 888 "Y88888 888 888 "Y8888P "Y888 888 "Y88P" 888 888 88888P' #######
def p(text='', end='\n'):
'''
p is print!
But because we use it a lot, we\'ve decided to make it one letter.
Example:
p('Hello World')
==>Hello World
'''
print(text, end=end)
def repeat(function, n: int, **kwargs):
'''
Repeat function for n times with given parameters
for more info see the example below.
Example:
re(rx.screenshot, 3, image_name='screenshot.png')
==> "function rx.screenshot will be executed 3 times."
'''
for _ in range(n):
function(**kwargs)
def wait(seconds):
'''
Use this if you want your program wait for a certain _time.
Parameters
----------
seconds : [int/float]
time to sleep program in seconds
'''
_time.sleep(seconds)
sleep = wait
def cls():
'''
You can use this function if you want to clear the environment.
'''
import platform
if platform.system() == "Windows":
_os.system('cls')
else:
_os.system('clear')
clear = cls
def progressbar(
total=100, dashes_nom=100, delay=1, dashes_shape=' ', complete_shape='█',
pre_text='Loading: ', left_port='|', right_port='|'):
'''
Use this function to make a custom in-app progress bar (Not Very Usefull).
(Use Progressbar() Generator instead to do your stuffs while updating progressbar)
Example:
progressbar(
Total=100,Dashes_Nom=10,Time=1,Dashes_Shape='-',
Complete_Shape='#', Pre_Text='Loading')
==> Loading|####------| 40/100
'''
def Progressbar(it, prefix="", size=60, file=_sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
file.write(f"{prefix}{right_port}{complete_shape*x}{dashes_shape*(size-x)}{left_port} {j}/{count}\r")
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
for _ in Progressbar(range(total), pre_text, dashes_nom):
wait(delay)
def wait_for(button:str):
"""
If You Want to Wait For the User to Press a Key (Keyboard/Mouse)
Use This Function.
Parameters
----------
button : str
Button to click
Raises
------
ValueError
It will be raised when invalid button is given
"""
button = button.lower()
if button.lower() in ('middle', 'left', 'right', 'back', 'forward'):
if button == 'back':
button = 'x'
if button == 'forward':
button = 'x2'
import mouse
mouse.wait(button)
else:
import keyboard
try:
keyboard.wait(button)
except:
raise ValueError('Incorrect Button Name.')
def call_later(function:Callable, *args, delay=0.001):
"""
Call Your Function Later Even Between Other Operations
(This function uses threading module so be careful about
how, when, and on what object you are going to operate on)
Parameters
----------
function : Callable
this should be your function name
delay : float,int
delay before calling function in seconds, by default 0.001
"""
import threading
thread = threading.Thread(target=lambda: (sleep(delay), function(*args)))
thread.start()
#keyboard.call_later(function, args, delay)
call = call_later
def convert_bytes(num:int) -> str:
"""
Convert num to idiomatic byte unit.
Parameters
----------
num : int
number you want to convert (in Byte)
Returns
-------
str
number + unit
Examples
--------
>>> convert_bytes(200)
'200.0 bytes'
>>> convert_bytes(6000)
'5.9 KB'
>>> convert_bytes(80000)
'78.1 KB'
"""
'''
'''
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def restart_app(python3:bool = False):
"""
This Function Close App and Recall it From Terminal
(It uses terminal.run to run command 'python[3] *argv')
Parameters
----------
python3 : bool, optional
use 'python' or 'python3', by default False
"""
_os.execv(_sys.executable, ['python3' if python3 else 'python'] + _sys.argv)
_sys.exit()
def active_window_title() -> str:
"""
Get active windows title
(Usually terminal is active window title
but if during executing your script you change window
this will return new window title)
Returns
-------
str
string of active window title
"""
import pyautogui
return pyautogui.getActiveWindowTitle()
def open_image(path:str) -> None:
"""
Open image file with default image viewer.
(Mac OS is not supported yet)
Parameters
----------
path : str
path to the image file
Raises
------
OSError
It will be raised when you run this function in not supported OS
"""
import platform
if platform.system() == 'Windows':
_os.system(path)
elif platform.system() == 'Linux':
_subprocess.getoutput(f'xdg-open {path}')
else:
raise OSError('Only Windows and Linux are supported for this function.')
_BASENAME=''
def download(url:str, filename:str=_BASENAME, save_memory:bool=True,
progressbar:bool =True, prefix:str='Downloading'):
'''
Use this function to download files.
if filename is not given, it will be last part of the url.
filename can be path for saving file.
save_memory parameter is used to save memory in large files
(save directly to storage)
'''
import requests, urllib
if not filename:
filename = url.split('/')[-1]
if save_memory:
'''
with _urllib.request.urlopen(url) as response, open(filename, 'wb') as f:
_shutil.copyfileobj(response, f)
'''
'''
r = _requests.get(url, stream = True)
with open(filename,"wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
'''
if progressbar:
with open(filename, "wb") as f:
response = _requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None:
f.write(response.content)
else:
dl = 0
done = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(33 * dl / total_length)
_sys.stdout.write(f"\r{prefix} {filename}: |{"█" * done}{" " * (33-done)}| {100-((33-done)*3)}%")
_sys.stdout.flush()
if 100-((33-done)*3) == 96:
_sys.stdout.write(f"\r{prefix} {filename}: |{"█" * done}{" " * (33-done)}| 100%")
_sys.stdout.flush()
else:
with open(filename, "wb") as f:
response = _requests.get(url, stream=True)
for data in response.iter_content(chunk_size=4096):
f.write(data)
else:
def report(blocknr, blocksize, size):
if progressbar:
current = blocknr*blocksize
_sys.stdout.write("\rDownloading {1}: {0:.2f}%".format(100.0*current/size,filename))
def downloadFile(url):
_urllib.request.urlretrieve(url, filename, report)
downloadFile(url)
pass
if progressbar: print()
def extract(filename:str, path:Optional[str]=None,files:Optional[Iterable[str]]=None,
password:Optional[str]=None) -> None:
"""
Extract Files from Zip files
By default it extracts all files
Parameters
----------
filename : str
path to .zip file
path : str, optional
path to extract files (by default: folder in current working directory)
files : Iterable[str], optional
Iterable of files you want to extract, by default None
password : str, optional
password if your .zip file is password protected, by default None
"""
import zipfile
zipfile.ZipFile(filename, 'r').extractall(path=path,members= files,pwd=password)
def screenshot(image_name:str='Screenshot.png'):
'''
This function will take a screenshot and save it as image_name
'''
import pyscreeze
return pyscreeze.screenshot(image_name)
def func_info(func:Callable):
"""
print some information about 'func'
Parameters
----------
func : Callable
function you want to get its information
"""
help(func) #func.__doc__
print('-'*30)
print('Module ', func.__module__)
print('-'*30)
try:
_code_ = str(func.__code__)
_code_ = _code_[_code_.index(',')+2:-1]
except AttributeError:
_code_ = f'No "file" and "line" information available '
_code_ += f' (I guess "{func}" is a built-in function)'
print(_code_)
def Progressbar(
total=60, dashes_nom=30, dashes_shape=' ', complete_shape='█',
pre_text='Loading: ', left_port='|', right_port='|'):
'''
Make your code more beautiful with progressbars!
this is generator function so use it like this:
>>> for _ in generator(100,10):
do_this()
do_that()
Loading: |████ | 40/100
'''
echo = _sys.stdout
def show(j):
x = int(dashes_nom*j/total)
echo.write(
f"{pre_text}{right_port}{complete_shape*x}{dashes_shape*(dashes_nom-x)}{left_port} {j}/{total}\r")
echo.flush()
show(0)
for i, item in enumerate(range(total)):
yield item
show(i+1)
echo.write("\n")
echo.flush()
_MOUSE_X = 0
_MOUSE_Y = 0
def pixel_color(x=_MOUSE_X, y=_MOUSE_Y) -> tuple:
"""
Function to return color of pixel of screen in tuple of RGB
Parameters
----------
x : int
pixel of column x, by default last x of mouse
y : int
pixel of row y, by default last y of mouse
Returns
-------
tuple
tuple with 3 integers: (RED,GREEN,BLUE)
"""
import pyautogui
if not x:
x = pyautogui.position()[0]
if not y:
y = pyautogui.position()[1]
PIXEL = pyautogui.screenshot(region=(x, y, 1, 1))
COLOR = PIXEL.getcolors()
return COLOR[0][1]
def import_module(path:str):
"""
Import modules from files even if they are not .py
Parameters
----------
path : str
path to file to import it
Returns
-------
ModuleType
return module
"""
import importlib.machinery
import importlib.util
loader = importlib.machinery.SourceFileLoader('MOD', path)
spec = importlib.util.spec_from_loader(loader.name, loader)
mod = importlib.util.module_from_spec(spec)
loader.exec_module(mod)
return mod
######################
# TUPLE FUNC #
######################
def force(tpl: Any, *var: Any) -> tuple:
'''
(TUPLE FUNCTION)
It returns tpl with adding var(s) to it.
'''
return tuple(list(tpl)+[v for v in var])
#force= lambda tpl,*var: tuple(list(tpl)+[v for v in var])
def erase(tpl: tuple, *var: Any) -> tuple:
'''
(TUPLE FUNCTION)
It returns tpl with removing var(s) from it.
'''
#lstv= [v for v in var if v in tpl]
lstt= list(tpl)
for th in [v for v in var if v in tpl]:
lstt.remove(th)
return tuple(lstt)
def replace(tpl: tuple, ind, var: Any) -> tuple:
'''
(TUPLE FUNCTION)
Replace tpl[ind] with var
'''
tpl=list(tpl)
if type(ind) == str:
ind= tpl.index(ind)
tpl[ind]=var
return tuple(tpl)
def insert(tpl: tuple, ind, var: Any) -> tuple:
'''
(TUPLE FUNCTION)
Exactly like tpl[ind]=var in lists but for tuples.
'''
tpl=list(tpl)
if type(ind) == str:
ind= tpl.index(ind)
tpl.insert(ind,var)
return tuple(tpl)
def pop(tuple,index=-1):
'''
(TUPLE FUNCTION)
pop method that is used in lists but for tuples
'''
return tuple(list(tuple).pop(index))
"""
def screen_recorder():
from screen_recorder_sdk import screen_recorder
#screen_recorder.enable_dev_log ()
screen_recorder.disable_log()
pid = 2456
screen_recorder.init_resources(pid)
screen_recorder.start_video_recording ('video1.mp4', 30, 8000000, True)
_time.sleep(10)
print('hello')
for i in range(100):
x= i**3
screen_recorder.stop_video_recording ()
screen_recorder.free_resources()
class Error(Exception):
'''
This module is for creating you own Error and Exception!
Useage:
>>> MyError = Error(name='MyError', msg='An Error occurred')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
MyError: An Error occurred
Also You can raise it directly:
>>> raise Error(name='MyError', msg='An Error occurred')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
MyError: An Error occurred
'''
def __new__(cls, msg, name=''):
Error.__name__ = name
return super(Error, cls).__new__(cls, msg)
def __init__(self, **kwargs):
pass
"""
####### .d8888b. 888 888 #######
#### d88P Y88b 888 888 ####
#### 888 888 888 888 ####
#### 888 888 888 8888b. .d8888b .d8888b .d88b. .d8888b ####
#### 888 888 888 "88b 88K 88K d8P Y8b 88K ####
#### 888 888 888 888 .d888888 "Y8888b. "Y8888b. 88888888 "Y8888b. ####
#### Y88b d88P 888 888 888 888 X88 X88 Y8b. X88 ####
####### "Y8888P" 888 888 "Y888888 88888P' 88888P' "Y8888 88888P' #######
class Random:
'''
random Variable Generator Class.
(ALL FUNCTIONS ARE STATIC METHODS)
'''
@staticmethod
def choose(iterator,k: int =1,duplicate=True):
'''
Return a random element from a non-empty sequence.
'''
if type(k) != int:
raise TypeError('k must be integer.')
if k == 1:
return _random.choice(iterator)
elif k > 1:
if duplicate:
return _random.choices(iterator,k=k)
else:
return _random.sample(iterator,k=k)
else:
raise ValueError('k Must Be Higher 0')
@staticmethod
def integer(first_number,last_number):
'''
Return random integer in range [a, b], including both end points.
'''
return _random.randint(first_number,last_number)
@staticmethod
def O1(decimal_number=17):
'''
return x in the interval [0, 1)
'''
return round(_random.random(),decimal_number)
@staticmethod
def number(first_number,last_number):
'''
return x in the interval [F, L]
'''
return _random.uniform(first_number,last_number)
@staticmethod
def shuffle(iterable):
'''
Return shuffled version of iterable
'''
real_type = type(iterable)
new_iterable = list(iterable)
_random.shuffle(new_iterable)
if real_type in (set,tuple):
return real_type(new_iterable)
elif real_type == str:
return ''.join(new_iterable)
elif real_type == dict:
return {item:iterable[item] for item in new_iterable}
else:
return new_iterable
random = Random
class Files:
'''
(STATIC METHODS)\n
Actions and information about files.\n
(READ FUNCTIONS DOCSTRING)
GET INFORMATION:
- exists()
- size()
- abspath()
- mdftime()
- acstime()
- content (read function)()
- is file()
- is dir()
- is readonly()
- is hidden()
ACTIONS:
- remove()
- rename()
- move()
- copy()
- hide()
- read only()
- write()
'''
@staticmethod
def size(path):
'''
return size of the file in byte(s).
Also work on directories.
'''
return _os.path.getsize(path)
#rooye pooshe emtehan she
@staticmethod
def remove(path,force=False):
'''
Use this to delete a file or a directory.
If force is True it will delete non-empty directories.
'''
if _os.path.isfile(path):
_os.remove(path)
else:
if force:
_shutil.rmtree(path)
else:
try:
_os.rmdir(path)
except OSError:
raise OSError(f"[WinError 145] The directory is not empty: '{path}'" + '\n' + ' '*23 +
'(Use force=True as an argument of remove function to remove non-empty directories.)') from None
delete = remove
@staticmethod
def rename(old_name,new_name):
'''Rename files with this function.'''
_os.rename(old_name,new_name)
@staticmethod
def abspath(path):
'''
return absolute path of given path.
'''
return _os.path.abspath(path)
@staticmethod
def exists(path):
'''
Search for the file And Returns a boolean.
if file exists: True
else: False
'''
return _os.path.exists(path)
@staticmethod
def mdftime(path):
'''
Get last modify time of the path.
'''
return _os.path.getmtime(path)
@staticmethod
def acstime(path):
'''
Get last access time of the path.
'''
return _os.path.getatime(path)
# change to date bayad biad
@staticmethod
def move(src,dst):
'''
Move (cut) file/directory from crs to dst.
'''
_shutil.move(src,dst)
#live_path= dst
#Baraye folder hast ya na?
@staticmethod
def copy(src,dest,preserve_metadata= True):
'''
Copy the file from src to destination.
preserve_metadata is for preserving metadata of file when copying.
(You can use it instead of rename too.
e.g:
copy('D:\\Test.py','E:\\Ali.py')
(It copies Test.py to E drive and renames it to Ali.py)
)
'''
if files.isdir(src):
_shutil.copytree(src,dest)
else:
if preserve_metadata: _shutil.copy2(src,dest)
else: _shutil.copy(src,dest)
@staticmethod
def hide(path,mode=True):
'''
Hide file or folder.
If mode==False: makes 'not hide'
(ONLY WINDOWS)
'''
try:
import win32api, win32con
except:
raise ImportError('Please install pywin32 via pip')
if mode:
win32api.SetFileAttributes(path,win32con.FILE_ATTRIBUTE_HIDDEN)
else:
win32api.SetFileAttributes(path,win32con.FILE_ATTRIBUTE_NORMAL)
@staticmethod
def read_only(path,mode=True):
'''
Make file attribute read_only.
If mode==False: makes 'not read_only'
'''
if type(mode)==bool:
from stat import S_IREAD,S_IWUSR
if mode==True:
_os.chmod(path, S_IREAD)
elif mode==False:
_os.chmod(path, S_IWUSR)
else:
raise Exception('Second argumant (mode) should be boolean.')
@staticmethod
def read(path):
'''
This can help you to read your file faster.
Example:
read('C:\\users\\Jack\\test.txt')
==> "Content of 'test.txt' will be shown."
'''
with open(path) as f:
FileR= f.read()
return FileR
@staticmethod
def write(file_path,text=None,mode='replace',start=''):
'''
With this method you can change content of the file.
file: File you want to change its content.
content: Content you want to add to file.
mode: Type of writing method.
'a' or 'continue' for add content to end of the file.
'w' or 'replace' for overwriting to file content.
start: I use this when I use mode='continue'
'''
if mode=='replace':
op= open(file_path,mode='w')
if text==None:
text= input('Type what you want.\n\n')
op.write(text)
op.close()
elif mode=='continue':
'''opr= open(file,mode='r')
FileR= opr.read()
op= open(file,mode='w')'''
op=open(file_path,'a')
if text==None:
text= input('Type what you want to add in the end of the file.\n\n')
op.write(start+text)
op.close()
else:
raise ValueError('mode can only be: replace(default) or continue Not "{0}"'.format(mode))
@staticmethod
def isdir(path):
return _os.path.isdir(path)
@staticmethod
def isfile(path):
return _os.path.isfile(path)
@staticmethod
def is_readonly(path):
'''
Return True if path is readonly else False.
(May Not Work in Linux)
'''
return _subprocess.getoutput(f'dir /ar {path} >nul 2>nul && echo True || echo False')
@staticmethod
def is_hidden(path):
"""
Check whether a file is presumed hidden, either because
the pathname starts with dot or because the platform
indicates such.
Return True if File or Directory is hidden.
(Work on both Linux and Windows)
"""
import platform
full_path = _os.path.abspath(path)
name = _os.path.basename(full_path)
def no(path): return False
platform_hidden = globals().get('is_hidden_' + platform.system(), no)
return name.startswith('.') or platform_hidden(full_path)
@staticmethod
def is_hidden_Windows(path):
import ctypes
res = ctypes.windll.kernel32.GetFileAttributesW(path)
assert res != -1
return bool(res & 2)
@staticmethod
def search_file(pattern, path='.\\',return_mode: Union['list','Generator']= 'list'):
'''
Search for files in path.
Return list or generator.
pattern:
- 'x.py' : search for 'x.py' in path.
- '*.py' : search for all files with .py extension in path.
- '*.*' : search for all files in path
- '**/*' : search for any file in path and also all sub-directories.
- '**/*.py: search for all python files in path and also sub-directories.
- 'mydir/**/*.py' : search for all python files in path/mydir/ and all of its sub-directories.
'''
import glob
if str(return_mode).lower() in ('list','generator'):
#print(_os.path.join(path,pattern))
if return_mode=='list': return glob.glob(_os.path.join(path,pattern), recursive=True)
else: return glob.iglob(_os.path.join(path,pattern), recursive=True)
else:
if type(return_mode)==str:
raise ValueError(f"return_mode van be 'list' or 'generator' not {return_mode}")
else:
raise TypeError(f"return_mode type should be 'str' and it should be in ['list', 'generator']")
@staticmethod
def search_content(path,word):
ALL= [val for sublist in [[_os.path.join(i[0], j) for j in i[2]] for i in _os.walk(path)] for val in sublist]
'''lst=[]
for file in ALL:
if word in rx.read(file):
lst.append(file)
return lst'''
return [file for file in ALL if word in open(file).read()]
@staticmethod
def mkdir(path):
path = _os.path.normpath(path)
NEW= ''
for FILE in path.split('\\'):
NEW+= FILE+'\\'
try: _os.mkdir(NEW)
except (FileExistsError,FileNotFoundError): pass
@staticmethod
def generate_tree(dir_path, level: int=-1, limit_to_directories: bool=False,
length_limit: int=1000, print_info: bool=True):
"""Given a directory Path object return a visual tree structure"""
from pathlib import Path
from itertools import islice
space= ' '; branch = '│ '; tee= '├── '; last= '└── '
dir_path = Path(dir_path) # accept string coerceable to Path
files = 0
directories = 0
def inner(dir_path: Path, prefix: str='', level=-1):
nonlocal files, directories
if not level: return # 0, stop iterating
if limit_to_directories: contents = [d for d in dir_path.iterdir() if d.is_dir()]
else: contents = list(dir_path.iterdir())
pointers = [tee] * (len(contents) - 1) + [last]
for pointer, path in zip(pointers, contents):
if path.is_dir():
yield prefix + pointer + path.name
directories += 1
extension = branch if pointer == tee else space
yield from inner(path, prefix=prefix+extension, level=level-1)
elif not limit_to_directories:
yield prefix + pointer + path.name
files += 1
RETURN=''
RETURN+=dir_path.name+'\n'
iterator = inner(dir_path, level=level)
for line in islice(iterator, length_limit): RETURN+=line+'\n'
if next(iterator, None): RETURN+=f'... length_limit, {length_limit}, reached, counted:'
if print_info: RETURN+=f'\n{directories} directories' + (f', {files} files' if files else '')
return RETURN
class MEMBERS:
@staticmethod
def all_exactdir(dir):
return _os.listdir(dir)
@staticmethod
def all_all_sep(dir):
return [i for i in _os.walk(dir)]
@staticmethod
def files_exactdir(dir,abspath=True):
if abspath:
return [dir+'/'+file_ for file_ in [i for i in _os.walk(dir)][0][2]]
return [i for i in _os.walk(dir)][0][2]
@staticmethod
def files_all(dir):
return [val for sublist in [[_os.path.join(i[0], j) for j in i[2]] for i in _os.walk(dir)] for val in sublist]
@staticmethod
def files_all_sep(dir):
return [[_os.path.join(i[0], j) for j in i[2]] for i in _os.walk(dir)]
@staticmethod
def dirs_exactdir(dir, abspath=True):
if dir.endswith('/'): dir=dir[:-1]
elif dir.endswith('\\'): dir=dir[:-1]
if abspath:
return [dir+'/'+folder for folder in [i for i in _os.walk(dir)][0][1]]
return [i for i in _os.walk(dir)][0][1]
@staticmethod
def dirs_all(dir):
return [TPL[0] for TPL in [i for i in _os.walk(dir)]]
files = Files
write = files.write
read = files.read
class System:
'''
Some system actions and information.
- Information about ram, ip, terminal, etc.
- Some System Actions like Shutdown and Restart
(ALL FUNCTIONS ARE STATIC METHODS)
'''
@staticmethod
def accname():
'''
return account username you have logged in.
'''
return _os.getlogin()
@staticmethod
def pid():
'''
Get pid number of terminal and return it.
'''
return _os.getpid()
'''@staticmethod
def disk_usage(path):
####
return _shutil.disk_usage(path)'''
@staticmethod
def chdir(path):
'''
Change directory of terminal.
'''
_os.chdir(path)
@staticmethod
def SHUT_DOWN():
'''
Shut down the PC. (WINDOWS)
'''
_os.system("shutdown /s /t 1")
@staticmethod
def RESTART():
'''
Restart the PC. (WINDOWS)
'''
_os.system("shutdown /r /t 1")
@staticmethod
def terminal_size() -> tuple:
'''
Return terminal size in tuple (columns,rows)
'''
size= _os.get_terminal_size()
return (size.columns,size.lines)
@staticmethod
def cwd():
'''
Return a unicode string representing the current working directory.
'''
return _os.getcwd()
@staticmethod
def ip_global():
"""
Return ip with by http://ipinfo.io/ip api.
returns global ip as string
"""
try:
import requests
new_session = _requests.session()
response = new_session.get("http://ipinfo.io/ip")
import re
ip_list = _re.findall(r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}", response.text)
new_session.close()
return ip_list[0]
except:
raise ConnectionError('No Internet Connection') from None
"""ip_global= internet.ip_global"""
@staticmethod
def ip_local():
"""
Return local ip of computer in windows by _socket. module
and in unix with hostname command in shell.
"""
#return [l for l in ([ip for ip in _socket.gethostbyname_ex(_socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [_socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
'''
s = _socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
'''
import platform
class NetworkError(Exception):
def __init__(self, message): super().__init__(message)
try:
ip = _socket.gethostbyname(_socket.gethostname())
if ip and ip != "127.0.1.1":
return ip
elif platform.system() != "Windows":
import subprocess
command = _subprocess.Popen(["hostname", "-I"],stdout=_subprocess.PIPE,stderr=_subprocess.PIPE,stdin=_subprocess.PIPE,shell=False)
response = list(command.communicate())
if len(response[0]) > 0:
return str(response[0])[2:-4]
raise NetworkError('No Network Connection')
raise NetworkError('No Network Connection')
except:
raise
"""ip_local= internet.ip_local"""
@staticmethod
def ram_total(convert=True):
"""
Return total ram of board as string
parameter convert: flag for convert mode (using of convert_byte function)
"""
response = list(_psutil.virtual_memory())
if convert:
return convert_bytes(int(response[0]))
return str(response[0])
@staticmethod
def ram_used(convert=True):
"""
Return how much ram is using.
parameter convert: flag for convert mode (convert with convert_byte function)
"""
response = list(_psutil.virtual_memory())
if convert:
return convert_bytes(int(response[3]))
return str(response[3])
@staticmethod
def ram_free(convert=True):
"""
Return how much ram is available.
parameter convert: flag for convert mode (convert with convert_byte function)
"""
response = list(_psutil.virtual_memory())
if convert:
return convert_bytes(int(response[1]))
return str(response[1])
@staticmethod
def ram_percent(ONLY_NOM=False):
"""
Return available ram percentage as an integer if ONLY_NOM, as string with % if not ONLY_NOM
Parameter ONLY_NOM: flag for return type and value.
"""
response = list(_psutil.virtual_memory())
if ONLY_NOM:
return response[2]
return str(response[2]) + " %"
@staticmethod
def boot_time():
'''
Return the system boot time expressed in seconds since the epoch.
'''
return _psutil.boot_time()
@staticmethod
def device_name():
return _socket.gethostname()
@staticmethod
def ip_website(url):
'''get IP address of Web Site'''
return _socket.gethostbyname(url)
"""ip_webs= internet.ip_website"""
@staticmethod
def win10_notification(title,message,icon=None, duration=5) -> None:
'''
(THIS ONLY WORKS FOR "WINDOWS 10")\n
Display Notification with title, message and icon for speciefic _time.
'''
try:
from win10toast import ToastNotifier
ToastNotifier().show_toast(title,message,duration=duration)
except:
raise ImportError('Use "pip install win10toast" to install required module')
@staticmethod
def cpu_count(logical=True):
'''
Return the number of logical CPUs in the system
(same as _os.cpu_count() in Python 3.4).
If *logical* is False return the number of physical cores only
(e.g. hyper thread CPUs are excluded).
Return None if undetermined.
'''
return _psutil.cpu_count(logical)
@staticmethod
def pyshell_execute_bit():
'''to determine whether a Python shell is executing in 32bit or 64bit'''
#return platform.architecture()[0][:2] # SLOW
#return ctypes.sizeof(ctypes.c_voidp)*8
import struct
return struct.calcsize("P") * 8
@staticmethod
def pids() -> list:
'''Return a list of current running PIDs'''
return _psutil.pids()
@staticmethod
def cpu_percent() -> float:
'''
Return a float representing the current system-wide CPU utilization as a percentage.'''
return _psutil.cpu_percent()
@staticmethod
def pid_exists(pid) -> bool:
return _psutil.pid_exists(pid)
@staticmethod
def mac_address(formatted=False):
import uuid
mac = uuid.getnode()
if formatted:
return ':'.join(['{:02x}'.format((mac >> ele) & 0xff) for ele in range(0,8*6,8)][::-1])
return hex(mac)
system = System
from colored import fg as _fg
from colored import bg as _bg
from colored import attr as _attr
class Style:
'''
This class is for Changing text Color,BG & Style.
(Using colored module but easier)
- style.print to customize your print.
- style.switch to change terminal colors.
- style.switch_default for making everything default.
Also You Can Create style object.
This will allow you to:
- Because it returns string You can Add it to other strings
- Slicing and indexing (Without Color)
'''
def __init__(self, text, color='default', BG='black'):
try:
self.color = color.lower()
self.BG = BG.lower()
#style = style.lower()
except:
pass
if color == 'default':
self.color = 7 #188
self.text = text
self.content = f"{_fg(color)}{_bg(BG)}{text}{_attr(0)}"
def __str__(self):
return self.content
def __repr__(self):
return self.content
def __add__(self, other):
#print(type(other))
if type(other)!=style:
return self.content+other
else:
return self.content+other.content
@staticmethod
def print(text='', color='default', BG='default', style=None, end='\n'):
'''
text(text='Hello World',color='red',BG='white')
output ==> 'Hello World' (With red color and white BG)
Styles: bold - underline - reverse - hidden
*bold and underline may not work. (Depends on terminal and OS)
'''
try:
color = color.lower()
BG = BG.lower()
style = style.lower() if style and type(style)==str else 0
except:
raise
if style == 'none':
style = 0
if color=='default' and BG!='default': # _bg & !clr
print(f'{_attr(style)}{_bg(BG)}{text}{_attr(0)}', end=end)
elif color!='default' and BG=='default': # !_bg & clr
print(f'{_attr(style)}{_fg(color)}{text}{_attr(0)}', end=end)
elif color=='default' and BG=='default': # !_bg & !clr
print(f'{_attr(style)}{text}{_attr(0)}', end=end)
elif color!='default' and BG!='default': # _bg & clr
print(f'{_attr(style)}{_bg(BG)}{_fg(color)}{text}{_attr(0)}', end=end)
@staticmethod
def switch(color='default', BG='black', style='None'):
'''
Change color,BG and style untill you call it again and change them.
'''
try:
color = color.lower()
BG = BG.lower()
style = style.lower()
except:
pass
if style == 'none':
style = 0
if color == 'default':
color = 7
print(f'{_attr(style)}{_bg(BG)}{_fg(color)}', end='')
@staticmethod
def switch_default():
'''Switch Terminal Attributes to its defaults'''
print(f'{_attr(0)}', end='')
reset = switch_default
@staticmethod
def log_success(text, color='green', BG='default', style=None, add_time=True):
#globals()['style'].print(text, color, BG, style=style)
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_info(text, color='grey_93', BG='default', style=None, add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_warning(text, color='gold_3a', BG='default', style=None, add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_error(text, color='red', BG='default', style=None, add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_critical(text, color='red_1', BG='default', style='bold', add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
style = Style
class Record:
'''
Use this method to record an action time in second.
Usage:
Start= record()
#Some codes here...
Finnish= Start.lap()
print(Finnish) ==> 0.25486741
#Some more codes here...
Finnish= Start.lap() ==> 0.4502586
Start.laps --> [0.25486741, 0.4502586]
Use Start.stop() to finnish recording and save memory.
(after self.stop() using self.lap will cause error.)
'''
def __init__(self):
self.__start = _time.time()
self.laps = []
def __call__(self):
return f'Laps: {self.laps}'
def __repr__(self):
return f'Laps: {self.laps}'
def lap(self, save=True, Round=15):
'''
Return time passed from creating time of self.
(Read 'record' Doc String)
If save is True, time will be added to self.laps
'''
lp = _time.time() - self.__start
lp = round(lp,Round)
if save:
self.laps.append(lp)
return lp
def reset(self, reset_start=False):
'''
This will erase self.laps
If reset_start is True, start time will reset too.
'''
self.laps = []
if reset_start:
self.__start = _time.time()
def last_lap(self, save=True):
'''
Return time passed from last lap
(If self.laps is False then from start_time)
'''
ret = (self.lap(False)-self.laps[-1]) if self.laps else self.lap(False)
if save:
self.laps.append(self.lap())
return ret
@staticmethod
def timit(code,setup,times,globals_):
'''
Run the 'code' for 'times' times and return time it needs (all, not once)
(If you need any initialization for your 'code', put it in setup arg)
'''
import timeit
return timeit.timeit(stmt=code,setup=setup,number=times,globals=globals_)
record = Record
class Terminal:
"""
Run Terminal Commands with Terminal functions
(ALL FUNCTIONS ARE STATIC METHODS)
"""
@staticmethod
def run(command:str) -> None:
'''
Execute the command in a subshell
(NO RETURN, LIVE EXECUTION, OUTPUT WILL BE PRINTED)
'''
_os.system(command)
@staticmethod
def getoutput(command:str) -> str:
'''
Return output of executing command in a shell
(RETURN STR, RETURN AFTER EXECUTING CODE)
'''
return _subprocess.getoutput(command)
terminal = Terminal
class Decorator:
class Check_Type:
"""
Function decorator for developers\n
Use this decorator to check if user gives right argument type\n
You need to annotate argument type when defining it.\n
Supported Types:
* str
* list
* set
* dict
* tuple
* User-Defined Objects
Typing Module Supported Types:
* Iterable
* Callable
* Generatr
* Container
* Any
(MORE TYPES SOON ...)
'''
sig = signature(foo)
print(str(sig))
print(str(sig.parameters['b']))
print(sig.parameters['b'].annotation)
####
sig = signature(foo)
for param in sig.parameters.values():
if (param.kind == param.KEYWORD_ONLY and
param.default is param.empty):
print('Parameter:', param.annotation)
'''
"""
auto_correct = False
def __init__(self, function):
self.function = function
def __call__(self, *args, **kwargs):
special_types = ('callable', 'iterable', 'generator','container', 'any')
i=-1
__local__= list(locals()['args'])
annots= list(self.function.__annotations__.keys())
def extra_remover(correct):
# Typing module annots check
if correct.startswith('typing.'):
correct = correct[7:].lower()
# built-in types check
elif correct.startswith('<class '):
correct = correct[8:-2]
return correct
def check_specials(TYPE, LOCAL_I):
import inspect
wrong = ''
if TYPE == 'generator':
if inspect.isgeneratorfunction(LOCAL_I) or inspect.isgenerator(LOCAL_I):
return
else:
correct = 'generator'
elif TYPE == 'callable':
if callable(LOCAL_I):
return
else:
correct = 'callable'
elif TYPE == 'iterable':
if type(LOCAL_I) in (list, tuple, set, str):
print(type(LOCAL_I))
return
else:
correct = 'iterable'
elif TYPE == 'container':
if type(LOCAL_I) in (list,set,dict,tuple):
return
else:
correct = 'container'
elif TYPE == 'any':
return
wrong = extra_remover(str(type(LOCAL_I))) if not wrong else wrong
func_name = self.function.__name__
Error= TypeError(f"'{func_name}()' argument '{ARG}' must be '{correct}' (not '{wrong}')")
raise Error
for ARG in annots:
i += 1
try:
LOCAL_I = __local__[i]
correct = str(self.function.__annotations__[ARG])
'''if correct.startswith('typing.Union'):
correct = eval(correct[12:])
if type(correct) != list:
correct = [correct]'''
correct = extra_remover(correct)
if correct in special_types:
print(type(LOCAL_I))
check_specials(correct,LOCAL_I)
# Builtins and other Libraries objects
elif not eval(correct) == type(LOCAL_I):
if Check_Type.auto_correct:
try:
__local__[i] = eval(correct)(LOCAL_I)
continue
except ValueError:
pass
wrong = extra_remover(str(type(LOCAL_I)))
#correct = str(self.function.__annotations__[ARG])#[8:-2]
correct = extra_remover(correct)
func_name = self.function.__name__
Error= TypeError(f"'{func_name}()' argument '{ARG}' must be '{correct}' (not '{wrong}')")
raise Error
except (ValueError,IndexError):
pass#raise
except NameError:
raise
return self.function(*__local__, **kwargs)
decorator_all:Callable = None
@staticmethod
def attach_to_all(cls):
import inspect
for name, method in inspect.getmembers(cls):
if (not inspect.ismethod(method) and
not inspect.isfunction(method) ) or (
inspect.isbuiltin(method)):
continue
#print("Decorating function %s" % name)
setattr(cls, name, Decorator.decorator_all(method))
return cls
abstractmethod = _abc.abstractmethod
_registered_functions = {} #:Dict[str, Any]
class _MultiMethod(object):
def __init__(self, name):
self.name = name
self.typemap = {}
def __call__(self, *args):
types = tuple(arg.__class__ for arg in args)
function = self.typemap.get(types)
if function is None:
raise TypeError("no match: ",types)
return function(*args)
def register(self, types, function):
self.typemap[types] = function
def overload(*types):
def register(function):
name = function.__name__
mm = decorator._registered_functions.get(name)
if mm is None:
mm = decorator._registered_functions[name] = Decorator._MultiMethod(name)
mm.register(types, function)
return mm
return register
decorator = Decorator
Check_Type = Decorator.Check_Type
overload = Decorator.overload
class IO:
@staticmethod
def wait_for_input(prompt,SS:list=[]):
answer= ''
try:
while not answer:
answer = input(prompt).strip()
except (EOFError,KeyboardInterrupt):
style.print('EXITING...','red')
exit()
return answer
@staticmethod
def selective_input(prompt,choices,default=None,ignore_case=False,error=True,invalid='Invalid input'):
if type(choices) == dict:
Choices = list(choices.keys())+list(choices.values())
pass
if ignore_case:
Choices = [item.lower() for item in Choices]
while True:
inp = input(prompt)
inp = inp.lower() if ignore_case else inp
if not inp or inp not in Choices:
if error:
style.print(invalid, 'red')
else:
if default:
inp = default
break
else:
break
if type(choices) == dict:
try:
inp = choices[inp]
except KeyError:
pass
return inp
@staticmethod
def yesno_input(prompt,default=None):
error= not bool(default)
return io.selective_input(prompt,['y','yes','n','no'],default,error)
@staticmethod
def Input(prompt:str ='', default_value:str =''):
'''
Make Default Value For Your Input!
(THIS ONLY WORK ON WINDOWS (SORRY))
prompt is what you want and it's input(prompt) .
default_value is what there should be after prompt.
E.g:
>>> Input('Is rx7 Library Easy to Learn? ', 'Yes')
Is rx7 Library Easy to Learn? Yes
'''
import win32console
_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
keys = []
for c in str(default_value):
evt = win32console.PyINPUT_RECORDType(win32console.KEY_EVENT)
evt.Char = c
evt.RepeatCount = 1
evt.KeyDown = True
keys.append(evt)
_stdin.WriteConsoleInput(keys)
return input(str(prompt))
@staticmethod
def getpass(prompt):
'''
Prompt for a password, with echo turned off.
'''
import getpass as Getpass
return Getpass.getpass(prompt=prompt)
io = IO
Input = default_input = io.Input
getpass = password_input = io.getpass
class Tuple:
'''
(Note That This is tuple of RX7 Module So it Has More Features!)\n
(This is Not Built-in immutable sequence.)\n
If no argument is given, the constructor returns an empty tuple.\n
There is *var argumant that you can add object as much as you need.\n
Any Built-in object is accepted. (Not tested on third-party objects.)\n
Beside built-in features of tuple, this supports:
+ You Can Add objects to your tuple now.
+ Also You Can Delete Them.
+ Replace Them.
+ Like lists, Tuple supports item assigning. ( tpl[2]='hello' )
(Tuple Unpacking is Supported.)
'''
#############################
def __init__(self,*var: Any, one_item=False):
if not one_item:
self.__content= tuple(var)
else:
self.__content=[]
for item in var:
for member in item:
self.__content.append(member)
self.__content= tuple(self.__content)
def __str__(self):
return str(self.__content)
def __repr__(self):
return str(self.__content)
#############################
#############################
def add(self,*var: Any):
'''
This will add var(s) to self.
'''
self.__content= tuple(list(self.__content)+[v for v in var])
#force= lambda tpl,*var: tuple(list(tpl)+[v for v in var])
force= add
def remove(self,*var: Any):
'''
It will remove var(s) from self.
'''
#lstv= [v for v in var if v in tpl]
lstt= list(self.__content)
for th in [v for v in var if v in self.__content]:
lstt.remove(th)
self.__content= tuple(lstt)
erase= remove
def pop(self,index):
return pop(self.__content)
#############################
#############################
def replace(self, ind: Union[int,Any], var: Any):
'''
Replace self[ind] with var.
'''
tpl=list(self.__content)
if type(ind) == str:
ind= tpl.index(ind)
tpl[ind]=var
self.__content= tuple(tpl)
def __setitem__(self,index,value,replace=False):
if not replace:
tpl=list(self.__content)
if type(index) == str:
ind= tpl.index(index)
tpl.insert(index,value)
self.__content= tuple(tpl)
else:
self.replace(index,value)
def __getitem__(self,index):
return self.__content[index]
#############################
def __add__(self,other):
return self.__content + other
def __contains__(self,var):
return var in self.__content
#############################
#############################
def __bool__(self):
return bool(len(self.__content))
def __hash__(self):
return hash(self.__content)
def __len__(self):
return len(self.__content)
#############################
#############################
_ReqConErr = _requests.exceptions.ConnectionError
class Internet:
@staticmethod
def is_connected(website='http://x.com/'):
'''
Check for internet connection with trying to connect to web-site
( Maybe you want to know why i used http://x.com/ as default web-site
The reason is there's no extra code to load
(compare x.com and google.com html source code)
And this make it a lot faster for checking.
)
'''
try:
_urllib.request.urlopen(website)
return True
except:
return False
def connection_checker(func):
"""Decaorator Which Checks Internet Connection before calling a function
Parameters
----------
func : Function
function which you are going to check if
there is internet connection before call it
"""
def inside(*args,**kwargs):
if not internet.is_connected():
raise ConnectionError('No internet connection') from None
return func(*args,**kwargs)
return inside
@staticmethod
def ip_global() -> str:
"""
Return your global ip by http://ipinfo.io/ip api.
"""
new_session = _requests.session()
response = new_session.get("http://ipinfo.io/ip")
ip_list = _re.findall(r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}", response.text)
new_session.close()
return ip_list[0]
@staticmethod
def ip_local() -> str:
"""
Return local ip of computer in windows by _socket. module
and in linux with hostname command in shell.
"""
#return [l for l in ([ip for ip in _socket.gethostbyname_ex(_socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [_socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
'''
s = _socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
'''
import platform
class NetworkError(Exception):
def __init__(self, message): super().__init__(message)
try:
ip = _socket.gethostbyname(_socket.gethostname())
if ip and ip not in ("127.0.1.1","127.0.0.1"):
return ip
elif platform.system() != "Windows":
command = _subprocess.Popen(["hostname", "-I"],stdout=_subprocess.PIPE,stderr=_subprocess.PIPE,stdin=_subprocess.PIPE,shell=False)
response = list(command.communicate())
if len(response[0]) > 0:
return str(response[0])[2:-4]
raise NetworkError('No Network Connection')
raise NetworkError('No Network Connection')
except:
raise
@staticmethod
def url_exists(URL) -> bool:
'''
check if url exists (with 'requests' module)
(NEED HTTP[S])
'''
try:
request = _requests.get(URL)
except _ReqConErr:
raise ConnectionError('No internet connection') from None
#print(response.status_code < 400)
if request.status_code == 200:
return True
else:
return False
@staticmethod
def ip_website(URL) -> str:
'''
get IP address of Web Site\n
(Without http[s])
'''
try:
return _socket.gethostbyname(URL)
except _socket.gaierror:
if internet.is_connected():
class NotExistsError(Exception):
def __init__(self):
super().__init__('URL Does Not Exists')
raise NotExistsError from None
else:
raise ConnectionError from None
@staticmethod
def url_links(URL) -> list:
'''
Get all links that are used in a specifiec url
(All "a" tags from html source)
(Needs 'http[s]')
''' #html.parser
try:
soup= BeautifulSoup(_requests.get(URL).text,features="lxml")
LINKS= []
for link in soup.find_all('a'):
LINKS.append(link.get('href'))
return LINKS
except _ReqConErr:
raise ConnectionError('No internet connection') from None
@staticmethod
def find_urls(string) -> list:
'''
find all urls in a string and returns list of them
(urls should start with http[s])
'''
url = _re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)
return url
@staticmethod
def is_url(URL) -> bool:
'''
check if a string is url (WITH HTTP[S])
'''
search= _re.search('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', URL)
'(http[s]?://)?([Ww]{3}\.)?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
if search and len(search.group())==len(URL):
return True
else:
return False
@staticmethod
def open_browser(url,new_tab=True):
import webbrowser
if new_tab:
webbrowser.open_new_tab(url)
else:
webbrowser.open(url)
"""
@staticmethod
def whois(URL):
'''
return whois lookup of a website
(WITHOUT HTTPS)
'''
try:
import whois
WHO = whois.query(URL)
WHOIS = WHO.dict
return {i:WHOIS[i] for i in WHOIS}
except _socket.gaierror:
raise ConnectionError('No internet connection') from None
"""
internet = Internet
class DateTime:
_NOW= 0
_NOW_YEAR= 0
_NOW_MONTH= 0
_NOW_DAY= 0
_NOW_HOUR= -1
_NOW_MINUTE= -1
_NOW_SECOND= -1
def NOW():
_NOW= _time.localtime()
_NOW_YEAR= _NOW.tm_year
_NOW_MONTH= _NOW.tm_mon
_NOW_DAY= _NOW.tm_mday
_NOW_HOUR= _NOW.tm_hour
_NOW_MINUTE= _NOW.tm_min
_NOW_SECOND= _NOW.tm_sec
return _datetime.datetime(_NOW_YEAR,_NOW_MONTH,_NOW_DAY,_NOW_HOUR,_NOW_MINUTE,_NOW_SECOND)
now = NOW
def normalize(date=[],time=[]):
now = date_time.NOW()
try:
if not date[0]: date[0]= now.year
if type(date[1]) == str:
try:
date[1]= date_time.month_dic[date[1].lower()]
except KeyError:
raise ValueError("Wrong Month Name") from None
if not date[1]: date[1]= now.month
if not date[2]: date[2]= now.day
except IndexError:
pass
try:
if time[0]<0: now.hour
if time[1]<0: now.minute
if time[2]<0: now.second
except IndexError:
pass
return [date,time]
Weekday_Names= ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
month_lst= ['january','february','march','april','may','june',
'july','august','september','october','november','december']
month_dic= {month:month_nom for month in month_lst for month_nom in range(1,13)}
def __init__(self,year=_NOW_YEAR,month=_NOW_MONTH,day=_NOW_DAY,hour=_NOW_HOUR,minute=_NOW_MINUTE,second=_NOW_SECOND,first_week_day=0):
'''
.: Working With Date and Time :.
- Include Both Static Methods and Class Methods
- Get NOW Time
- Show in Calendar
- Next and Previous Months in Calendar
- Determine Time Passed From Specific Date
- Calendar Supports Setting First Day of the Week
'''
"""
Now = date_time.NOW()
if not year : year=Now.year
if not month: month=Now.month
if not day : day=Now.day
if hour<0 : hour=Now.hour
if minute<0 : minute=Now.minute
if second<0 : second=Now.second
"""
_norm = date_time.normalize([year,month,day],[hour,minute,second])
year,month,day = _norm[0]
hour,minute,second = _norm[1]
if type(month)==str:
try:
month= date_time.month_dic[month.lower()]
except KeyError:
raise ValueError("Wrong Month Name") from None
self.date= _datetime.date(year,month,day)
self.year=year; self.month=month; self.day=day
self.time= (hour,minute,second)
self.hour=hour; self.minute=minute; self.second=second
self.weekday= date_time.get_weekday(self.year,self.month,self.day)
self.weekday_name= date_time.get_weekday(self.year,self.month,self.day,True)
self.week_nom= date_time.get_weeknom(self.year,self.month,self.day)
#self.first_week_day= first_week_day
_calendar.setfirstweekday(first_week_day)
self.calendar= str(_calendar.month(year, month)).replace(str(day),style(str(day),'green').content)
self.calendar_month= str(_calendar.month(year, month))
self.calendar_year_all=str(_calendar.calendar(year))
self.calendar_year= [_calendar.month(year,i) for i in range(1,13)]
self.calendar_next_all= [_calendar.month(year,i) for i in range(self.month+1,13)]
self.calendar_prev_all= [_calendar.month(year,i) for i in range(1,self.month)]
self.calendar_position_next_year= str(_calendar.month(year+1, month)).replace(str(day),style(str(day),'green').content)
self.calendar_position_prev_year= str(_calendar.month(year-1, month)).replace(str(day),style(str(day),'green').content)
def setfirstweekday(self,day):
if type(day)==int and day<7:
date_time.Weekday_Names= date_time.Weekday_Names[day:]+date_time.Weekday_Names[:day]
elif type(day)==str:
day= date_time.Weekday_Names.index(day)
date_time.Weekday_Names= date_time.Weekday_Names[day:]+date_time.Weekday_Names[:day]
else:
if type(day)==int:
raise ValueError('Invalid Nomber. Day number should be in range(7)')
else:
raise TypeError(f"Inappropriate Type For 'day'. day can be 'str' or 'int' not {type(day)}")
_calendar.setfirstweekday(day)
self.calendar= str(_calendar.month(self.year, self.month)).replace(str(day),style(str(day),'green').content)
self.calendar_month= str(_calendar.month(self.year, self.month))
self.calendar_year_all=str(_calendar.calendar(self.year))
self.calendar_year= [_calendar.month(self.year,i) for i in range(1,13)]
self.calendar_next_all= [_calendar.month(self.year,i) for i in range(self.month+1,13)]
self.calendar_prev_all= [_calendar.month(self.year,i) for i in range(1,self.month)]
self.calendar_position_next_year= str(_calendar.month(self.year+1, self.month)).replace(str(day),style(str(day),'green').content)
self.calendar_position_prev_year= str(_calendar.month(self.year-1, self.month)).replace(str(day),style(str(day),'green').content)
self.weekday= date_time.get_weekday(self.year,self.month,self.day)
self.weekday_name= date_time.get_weekday(self.year,self.month,self.day,True)
self.week_nom= date_time.get_weeknom(self.year,self.month,self.day)
@staticmethod
def today():
dt = date_time.NOW()
return (dt.year,dt.month,dt.day)
@staticmethod
def calender_year(year=_NOW_YEAR):
if not year: year=date_time.NOW().year
return [_calendar.month(year,i) for i in range(1,13)]
@staticmethod
def calendar_month_st(month=_NOW_MONTH,year=_NOW_YEAR,day=0):
year,month = date_time.normalize([year,month])[0]
if not day:
return str(_calendar.month(year, month))
else:
return str(_calendar.month(year, month)).replace(str(day),style(str(day),'green').content)
@staticmethod
def passed_date(f_date,l_date=_NOW,return_time='day'):
if not l_date: l_date=date_time.NOW()
f_date = _datetime.datetime(*f_date)
return_time= return_time.lower()
if return_time in ('day','month','year','hour','minute','second'):
DELTA= l_date - f_date
if return_time == 'year':
try:
_return = _re.search(r'(?P<X>(-)?\w+) day',str(DELTA/365)).group('X')
except:
_return = None
#_return = str(DELTA/365)
elif return_time == 'month':
_return = _re.search(r'\w+',str(DELTA/30)).group()
elif return_time == 'day':
_return = str(DELTA)[:-14]
elif return_time =='hour':
_return = str(DELTA*24)[:-14]
elif return_time == 'minute':
_return = str(DELTA*1440)[:-14]
elif return_time == 'second':
_return = str(DELTA*3600)[:-14]
if _return: return _return
else: return 0
else:
raise ValueError("return_time should be in ('year', 'month', 'day', 'hour', 'minute', 'second')")
passed_time = passed_date
'''@staticmethod
def passed_time(year=1970,month=1,day=1,hour=0,minute=0,second=0,return_time='second'):
pass'''
@staticmethod
def convert_epoch_to_local(second=_time.time()):
return _time.ctime(second)
@staticmethod
def get_weekday(year=_NOW_YEAR,month=_NOW_MONTH,day=_NOW_DAY,return_name=False):
"""
First day is Monday and the numbers starts from 0
"""
year,month,day = date_time.normalize([year,month,day])[0]
if return_name:
return date_time.Weekday_Names[_datetime.date(year,month,day).weekday()]
else:
return _datetime.date(year,month,day).weekday()
@staticmethod
def get_weeknom(year=_NOW_YEAR,month=_NOW_MONTH,day=_NOW_DAY):
"""
Returns 53 if First week is from last year
"""
year,month,day = date_time.normalize([year,month,day])[0]
return _datetime.date(year,month,day).isocalendar()[1]
@staticmethod
def calendar_show_week(week_nom,year=_NOW_YEAR):
year = date_time.normalize([year])[0][0]
week= week_nom
for i in list(range(1,8))[::-1]:
if date_time.get_weeknom(year,1,i)==1:
FIRST_WEEK_DAYS= len(list(range(i)))
break
day= (week-1)*7 - (6-FIRST_WEEK_DAYS)
mnth= 1
true=False
while not true:
try:
if _calendar.monthrange(year,mnth)[1]<day:
mnth+=1
day-= _calendar.monthrange(year,mnth)[1]
else:
true= True
except _calendar.IllegalMonthError:
class BadWeekNumber(Exception):
def __init__(self, message='Week Number is Higher Than Year Weeks.'): super().__init__(message)
raise BadWeekNumber from None
new= date_time(year,mnth,day)
cal= new.calendar_month.splitlines()
for item in cal:
if str(new.day) in item and item != cal[0]:
INDEX= cal.index(item);COLORED_WEEK= style(item,'green');break
WEEK_WITH_COLOR= '\n'.join(cal[:INDEX]+[str(COLORED_WEEK)]+cal[INDEX+1:])
return WEEK_WITH_COLOR
@staticmethod
def get_year():
return _time.localtime().tm_year
@staticmethod
def get_month():
return _time.localtime().tm_mon
@staticmethod
def get_day_of_month():
return _time.localtime().tm_mday
@staticmethod
def get_day_of_week():
return _time.localtime().tm_wday
@staticmethod
def get_day_of_year():
return _time.localtime().tm_yday
@staticmethod
def get_hour():
return _time.localtime().tm_hour
@staticmethod
def get_minute():
return _time.localtime().tm_min
@staticmethod
def get_second():
return _time.localtime().tm_sec
date_time = DateTime
_Auto = 0
class _Lang:
class Constant:
def __new__(cls,*args,array=True):
cls._init = False
return super(_Lang.Constant, cls).__new__(cls)
def __init__(self,*args,array=True):
'''
if array:
self.__members = args
else:
if len(args) > 1:
raise ValueError
self.__members = args[0]
'''
self.__members = args
self._init = True
def __str__(self):
#if len(self.__members) > 1:
return '<'+str(self.__members)[1:-1]+'>' #‹›
#return self.__members
def __repr__(self):
return '<'+str(self.__members)[1:-1]+'>'
def __setattr__(self,_attr,value):
if self._init:
raise AttributeError(f"'Constant' object does not support item assignment")
else:
super(_Lang.Constant,self).__setattr__(_attr,value)
def __getitem__(self,index):
return self.__members[index]
def __contains__(self,obj):
return obj in self.__members
def __bool__(self):
return bool(len(self.__members))
#'''
def __hash__(self):
return hash(tuple(['Constant',len(self)]+list(self.__members)))
#'''
def __len__(self):
#if type(self.__members) == tuple:
return len(self.__members)
def _dict_getter(self):
raise AttributeError("Conatant object has no attribute '__dict__'")
#return {}
__dict__ = property(_dict_getter)
def __dir__(self):
ret = list(super().__dir__())#[:-2]
ret.remove('_init')
ret.remove('_dict_getter')
return ret
const = Const = constant = Constant
class Array:
# Sized Array
__Type_Error = "Array of type '{}' does not accept object with type '{}'"
def __init__(self,*args,type_=_Auto,size=_Auto):
self.__members = []
if type_:
self.__TYPE = type_
else:
self.__TYPE = type(args[0])
self.__TYPE_NAME = self.__TYPE.__name__
if size:
self.__SIZE = size
else:
self.__SIZE = len(args)
for obj in args:
if type(obj) == self.__TYPE:
self.__members.append(obj)
else:
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
def __str__(self):
return '{'+str(self.__members)[1:-1]+'}' #‹›
def __repr__(self):
return '{'+str(self.__members)[1:-1]+'}'
def __getitem__(self,index):
return self.__members[index]
def __contains__(self,obj):
return obj in self.__members
def __bool__(self):
return bool(len(self.__members))
def __len__(self):
return len(self.__members)
def __setitem__(self,index,obj):
if type(obj) == self.__TYPE:
self.__members.insert(index,obj)
return
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
def insert(self,index,obj):
if type(obj) == self.__TYPE:
self.__members.insert(index,obj)
return
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
def append(self,obj):
if type(obj) == self.__TYPE:
self.__members.append(obj)
return
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
add = append
def remove(self,obj):
self.__members.remove(obj)
def pop(self,index=-1):
self.__members.pop(index)
array = Array
class Types:
Str = str
Int = int
Float = float
Set = set
Tuple = tuple
Dict = dict
List = list
Bool = bool
Bytes = bytes
Class = type
Type = type
Object = object
Lambda = type(lambda: None)
Function = Lambda #type(lambda: None)
#Constant = type(_Lang.Constant(1))
#Array = type(_Lang.Array(1,1))
Any = type#_typing.Any
Callable = _typing.Callable
Container = _typing.Container
Generator = Lambda #type(_f) #Not Built-in(s) #_types.GeneratorType || _typing.Generator
Iterable = _typing.Iterable
Iterator = _typing.Iterator
NoReturn = _typing.NoReturn
Optional = _typing.Optional
BuiltinFunction = type(len)
BuiltinMethod = type([].append)
Module = type(_typing)
Method = type(globals()['Tuple']().force)
#Mapping = _typing.Mapping
#OrderedDict = _typing.OrderedDict
#Text = str
#Union = _typing.Union
#_types.AsyncGeneratorType
types = Types
#setattr(_Lang,'Const',type(_Lang.Constant(1)))
#setattr(_Lang,'Array',type(_Lang.Array(1,1)))
#END
| '''
This Module is One to Make Your Code Shorter.
High API Will Make You Feel You're Ordering And Machine Is Doing!
Also There is Collection of most usefull function and methods from popular modules of python.
(Read Help of Functions)
Official Documention Will Be Added Soon.
'''
'''
Written By RX
Last Update: 1-15-2021
'''
__version__ = '3.0.0'
"""
< Release Changes >
- style.log_ now have all time prefix by default
- call=call_later
- system.mac_address
- io.selective_input choices can be dict
- Class Internet
- class date_time
"""
'''
TODO:
- average()
DATETIME:
X calendar_month_st replace day will be all noms
- Passed Time func
- System.(copy_to_clipboard & paste_from_clipboard)
- Other archive files in extract
- Call_later **kwargs
- Internet:
default_timeout
- files:
- files.join files.dirname
- Error in files.MEMBERS.all_all_*
- socket.socket()
- Screen recorder
- Make Sound
- mp3 tags (v 3.x)
- registery editor (v 3.x)
- re module (v 3.x)
- Developer:
reload_module
Check_Type
add_module_dir
- Create Local Server
- ( win32api.LoadLibrary() - ctypes.PyDLL() )
X Threading
- Ready-obj module
- !style defaults
- Check 3rd-party modules imports
- pip install update
- Open Video
- Open Audio
'''
#START
import os as _os
import re as _re
import sys as _sys
import abc as _abc
import time as _time
import socket as _socket
import typing as _typing
import urllib as _urllib
import shutil as _shutil
import random as _random
import datetime as _datetime
import calendar as _calendar
import requests as _requests
import subprocess as _subprocess
from bs4 import BeautifulSoup
from typing import (Any,Iterable,Optional,Callable,List,Union)
import psutil as _psutil
argv = _sys.argv
ABC = _abc.ABC
ABCMeta = _abc.ABCMeta
####### 8888888888 888 d8b #######
#### 888 888 Y8P ####
#### 888 888 ####
#### 8888888 888 888 88888b. .d8888b 888888 888 .d88b. 88888b. .d8888b ####
#### 888 888 888 888 "88b d88P" 888 888 d88""88b 888 "88b 88K ####
#### 888 888 888 888 888 888 888 888 888 888 888 888 "Y8888b. ####
#### 888 Y88b 888 888 888 Y88b. Y88b. 888 Y88..88P 888 888 X88 ####
####### 888 "Y88888 888 888 "Y8888P "Y888 888 "Y88P" 888 888 88888P' #######
def p(text='', end='\n'):
'''
p is print!
But because we use it a lot, we\'ve decided to make it one letter.
Example:
p('Hello World')
==>Hello World
'''
print(text, end=end)
def repeat(function, n: int, **kwargs):
'''
Repeat function for n times with given parameters
for more info see the example below.
Example:
re(rx.screenshot, 3, image_name='screenshot.png')
==> "function rx.screenshot will be executed 3 times."
'''
for _ in range(n):
function(**kwargs)
def wait(seconds):
'''
Use this if you want your program wait for a certain _time.
Parameters
----------
seconds : [int/float]
time to sleep program in seconds
'''
_time.sleep(seconds)
sleep = wait
def cls():
'''
You can use this function if you want to clear the environment.
'''
import platform
if platform.system() == "Windows":
_os.system('cls')
else:
_os.system('clear')
clear = cls
def progressbar(
total=100, dashes_nom=100, delay=1, dashes_shape=' ', complete_shape='█',
pre_text='Loading: ', left_port='|', right_port='|'):
'''
Use this function to make a custom in-app progress bar (Not Very Usefull).
(Use Progressbar() Generator instead to do your stuffs while updating progressbar)
Example:
progressbar(
Total=100,Dashes_Nom=10,Time=1,Dashes_Shape='-',
Complete_Shape='#', Pre_Text='Loading')
==> Loading|####------| 40/100
'''
def Progressbar(it, prefix="", size=60, file=_sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
file.write(f"{prefix}{right_port}{complete_shape*x}{dashes_shape*(size-x)}{left_port} {j}/{count}\r")
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
for _ in Progressbar(range(total), pre_text, dashes_nom):
wait(delay)
def wait_for(button:str):
"""
If You Want to Wait For the User to Press a Key (Keyboard/Mouse)
Use This Function.
Parameters
----------
button : str
Button to click
Raises
------
ValueError
It will be raised when invalid button is given
"""
button = button.lower()
if button.lower() in ('middle', 'left', 'right', 'back', 'forward'):
if button == 'back':
button = 'x'
if button == 'forward':
button = 'x2'
import mouse
mouse.wait(button)
else:
import keyboard
try:
keyboard.wait(button)
except:
raise ValueError('Incorrect Button Name.')
def call_later(function:Callable, *args, delay=0.001):
"""
Call Your Function Later Even Between Other Operations
(This function uses threading module so be careful about
how, when, and on what object you are going to operate on)
Parameters
----------
function : Callable
this should be your function name
delay : float,int
delay before calling function in seconds, by default 0.001
"""
import threading
thread = threading.Thread(target=lambda: (sleep(delay), function(*args)))
thread.start()
#keyboard.call_later(function, args, delay)
call = call_later
def convert_bytes(num:int) -> str:
"""
Convert num to idiomatic byte unit.
Parameters
----------
num : int
number you want to convert (in Byte)
Returns
-------
str
number + unit
Examples
--------
>>> convert_bytes(200)
'200.0 bytes'
>>> convert_bytes(6000)
'5.9 KB'
>>> convert_bytes(80000)
'78.1 KB'
"""
'''
'''
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def restart_app(python3:bool = False):
"""
This Function Close App and Recall it From Terminal
(It uses terminal.run to run command 'python[3] *argv')
Parameters
----------
python3 : bool, optional
use 'python' or 'python3', by default False
"""
_os.execv(_sys.executable, ['python3' if python3 else 'python'] + _sys.argv)
_sys.exit()
def active_window_title() -> str:
"""
Get active windows title
(Usually terminal is active window title
but if during executing your script you change window
this will return new window title)
Returns
-------
str
string of active window title
"""
import pyautogui
return pyautogui.getActiveWindowTitle()
def open_image(path:str) -> None:
"""
Open image file with default image viewer.
(Mac OS is not supported yet)
Parameters
----------
path : str
path to the image file
Raises
------
OSError
It will be raised when you run this function in not supported OS
"""
import platform
if platform.system() == 'Windows':
_os.system(path)
elif platform.system() == 'Linux':
_subprocess.getoutput(f'xdg-open {path}')
else:
raise OSError('Only Windows and Linux are supported for this function.')
_BASENAME=''
def download(url:str, filename:str=_BASENAME, save_memory:bool=True,
progressbar:bool =True, prefix:str='Downloading'):
'''
Use this function to download files.
if filename is not given, it will be last part of the url.
filename can be path for saving file.
save_memory parameter is used to save memory in large files
(save directly to storage)
'''
import requests, urllib
if not filename:
filename = url.split('/')[-1]
if save_memory:
'''
with _urllib.request.urlopen(url) as response, open(filename, 'wb') as f:
_shutil.copyfileobj(response, f)
'''
'''
r = _requests.get(url, stream = True)
with open(filename,"wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
'''
if progressbar:
with open(filename, "wb") as f:
response = _requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None:
f.write(response.content)
else:
dl = 0
done = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(33 * dl / total_length)
_sys.stdout.write(f"\r{prefix} {filename}: |{'█' * done}{' ' * (33-done)}| {100-((33-done)*3)}%")
_sys.stdout.flush()
if 100-((33-done)*3) == 96:
_sys.stdout.write(f"\r{prefix} {filename}: |{'█' * done}{' ' * (33-done)}| 100%")
_sys.stdout.flush()
else:
with open(filename, "wb") as f:
response = _requests.get(url, stream=True)
for data in response.iter_content(chunk_size=4096):
f.write(data)
else:
def report(blocknr, blocksize, size):
if progressbar:
current = blocknr*blocksize
_sys.stdout.write("\rDownloading {1}: {0:.2f}%".format(100.0*current/size,filename))
def downloadFile(url):
_urllib.request.urlretrieve(url, filename, report)
downloadFile(url)
pass
if progressbar: print()
def extract(filename:str, path:Optional[str]=None,files:Optional[Iterable[str]]=None,
password:Optional[str]=None) -> None:
"""
Extract Files from Zip files
By default it extracts all files
Parameters
----------
filename : str
path to .zip file
path : str, optional
path to extract files (by default: folder in current working directory)
files : Iterable[str], optional
Iterable of files you want to extract, by default None
password : str, optional
password if your .zip file is password protected, by default None
"""
import zipfile
zipfile.ZipFile(filename, 'r').extractall(path=path,members= files,pwd=password)
def screenshot(image_name:str='Screenshot.png'):
'''
This function will take a screenshot and save it as image_name
'''
import pyscreeze
return pyscreeze.screenshot(image_name)
def func_info(func:Callable):
"""
print some information about 'func'
Parameters
----------
func : Callable
function you want to get its information
"""
help(func) #func.__doc__
print('-'*30)
print('Module ', func.__module__)
print('-'*30)
try:
_code_ = str(func.__code__)
_code_ = _code_[_code_.index(',')+2:-1]
except AttributeError:
_code_ = f'No "file" and "line" information available '
_code_ += f' (I guess "{func}" is a built-in function)'
print(_code_)
def Progressbar(
total=60, dashes_nom=30, dashes_shape=' ', complete_shape='█',
pre_text='Loading: ', left_port='|', right_port='|'):
'''
Make your code more beautiful with progressbars!
this is generator function so use it like this:
>>> for _ in generator(100,10):
do_this()
do_that()
Loading: |████ | 40/100
'''
echo = _sys.stdout
def show(j):
x = int(dashes_nom*j/total)
echo.write(
f"{pre_text}{right_port}{complete_shape*x}{dashes_shape*(dashes_nom-x)}{left_port} {j}/{total}\r")
echo.flush()
show(0)
for i, item in enumerate(range(total)):
yield item
show(i+1)
echo.write("\n")
echo.flush()
_MOUSE_X = 0
_MOUSE_Y = 0
def pixel_color(x=_MOUSE_X, y=_MOUSE_Y) -> tuple:
"""
Function to return color of pixel of screen in tuple of RGB
Parameters
----------
x : int
pixel of column x, by default last x of mouse
y : int
pixel of row y, by default last y of mouse
Returns
-------
tuple
tuple with 3 integers: (RED,GREEN,BLUE)
"""
import pyautogui
if not x:
x = pyautogui.position()[0]
if not y:
y = pyautogui.position()[1]
PIXEL = pyautogui.screenshot(region=(x, y, 1, 1))
COLOR = PIXEL.getcolors()
return COLOR[0][1]
def import_module(path:str):
"""
Import modules from files even if they are not .py
Parameters
----------
path : str
path to file to import it
Returns
-------
ModuleType
return module
"""
import importlib.machinery
import importlib.util
loader = importlib.machinery.SourceFileLoader('MOD', path)
spec = importlib.util.spec_from_loader(loader.name, loader)
mod = importlib.util.module_from_spec(spec)
loader.exec_module(mod)
return mod
######################
# TUPLE FUNC #
######################
def force(tpl: Any, *var: Any) -> tuple:
'''
(TUPLE FUNCTION)
It returns tpl with adding var(s) to it.
'''
return tuple(list(tpl)+[v for v in var])
#force= lambda tpl,*var: tuple(list(tpl)+[v for v in var])
def erase(tpl: tuple, *var: Any) -> tuple:
'''
(TUPLE FUNCTION)
It returns tpl with removing var(s) from it.
'''
#lstv= [v for v in var if v in tpl]
lstt= list(tpl)
for th in [v for v in var if v in tpl]:
lstt.remove(th)
return tuple(lstt)
def replace(tpl: tuple, ind, var: Any) -> tuple:
'''
(TUPLE FUNCTION)
Replace tpl[ind] with var
'''
tpl=list(tpl)
if type(ind) == str:
ind= tpl.index(ind)
tpl[ind]=var
return tuple(tpl)
def insert(tpl: tuple, ind, var: Any) -> tuple:
'''
(TUPLE FUNCTION)
Exactly like tpl[ind]=var in lists but for tuples.
'''
tpl=list(tpl)
if type(ind) == str:
ind= tpl.index(ind)
tpl.insert(ind,var)
return tuple(tpl)
def pop(tuple,index=-1):
'''
(TUPLE FUNCTION)
pop method that is used in lists but for tuples
'''
return tuple(list(tuple).pop(index))
"""
def screen_recorder():
from screen_recorder_sdk import screen_recorder
#screen_recorder.enable_dev_log ()
screen_recorder.disable_log()
pid = 2456
screen_recorder.init_resources(pid)
screen_recorder.start_video_recording ('video1.mp4', 30, 8000000, True)
_time.sleep(10)
print('hello')
for i in range(100):
x= i**3
screen_recorder.stop_video_recording ()
screen_recorder.free_resources()
class Error(Exception):
'''
This module is for creating you own Error and Exception!
Useage:
>>> MyError = Error(name='MyError', msg='An Error occurred')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
MyError: An Error occurred
Also You can raise it directly:
>>> raise Error(name='MyError', msg='An Error occurred')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
MyError: An Error occurred
'''
def __new__(cls, msg, name=''):
Error.__name__ = name
return super(Error, cls).__new__(cls, msg)
def __init__(self, **kwargs):
pass
"""
####### .d8888b. 888 888 #######
#### d88P Y88b 888 888 ####
#### 888 888 888 888 ####
#### 888 888 888 8888b. .d8888b .d8888b .d88b. .d8888b ####
#### 888 888 888 "88b 88K 88K d8P Y8b 88K ####
#### 888 888 888 888 .d888888 "Y8888b. "Y8888b. 88888888 "Y8888b. ####
#### Y88b d88P 888 888 888 888 X88 X88 Y8b. X88 ####
####### "Y8888P" 888 888 "Y888888 88888P' 88888P' "Y8888 88888P' #######
class Random:
'''
random Variable Generator Class.
(ALL FUNCTIONS ARE STATIC METHODS)
'''
@staticmethod
def choose(iterator,k: int =1,duplicate=True):
'''
Return a random element from a non-empty sequence.
'''
if type(k) != int:
raise TypeError('k must be integer.')
if k == 1:
return _random.choice(iterator)
elif k > 1:
if duplicate:
return _random.choices(iterator,k=k)
else:
return _random.sample(iterator,k=k)
else:
raise ValueError('k Must Be Higher 0')
@staticmethod
def integer(first_number,last_number):
'''
Return random integer in range [a, b], including both end points.
'''
return _random.randint(first_number,last_number)
@staticmethod
def O1(decimal_number=17):
'''
return x in the interval [0, 1)
'''
return round(_random.random(),decimal_number)
@staticmethod
def number(first_number,last_number):
'''
return x in the interval [F, L]
'''
return _random.uniform(first_number,last_number)
@staticmethod
def shuffle(iterable):
'''
Return shuffled version of iterable
'''
real_type = type(iterable)
new_iterable = list(iterable)
_random.shuffle(new_iterable)
if real_type in (set,tuple):
return real_type(new_iterable)
elif real_type == str:
return ''.join(new_iterable)
elif real_type == dict:
return {item:iterable[item] for item in new_iterable}
else:
return new_iterable
random = Random
class Files:
'''
(STATIC METHODS)\n
Actions and information about files.\n
(READ FUNCTIONS DOCSTRING)
GET INFORMATION:
- exists()
- size()
- abspath()
- mdftime()
- acstime()
- content (read function)()
- is file()
- is dir()
- is readonly()
- is hidden()
ACTIONS:
- remove()
- rename()
- move()
- copy()
- hide()
- read only()
- write()
'''
@staticmethod
def size(path):
'''
return size of the file in byte(s).
Also work on directories.
'''
return _os.path.getsize(path)
#rooye pooshe emtehan she
@staticmethod
def remove(path,force=False):
'''
Use this to delete a file or a directory.
If force is True it will delete non-empty directories.
'''
if _os.path.isfile(path):
_os.remove(path)
else:
if force:
_shutil.rmtree(path)
else:
try:
_os.rmdir(path)
except OSError:
raise OSError(f"[WinError 145] The directory is not empty: '{path}'" + '\n' + ' '*23 +
'(Use force=True as an argument of remove function to remove non-empty directories.)') from None
delete = remove
@staticmethod
def rename(old_name,new_name):
'''Rename files with this function.'''
_os.rename(old_name,new_name)
@staticmethod
def abspath(path):
'''
return absolute path of given path.
'''
return _os.path.abspath(path)
@staticmethod
def exists(path):
'''
Search for the file And Returns a boolean.
if file exists: True
else: False
'''
return _os.path.exists(path)
@staticmethod
def mdftime(path):
'''
Get last modify time of the path.
'''
return _os.path.getmtime(path)
@staticmethod
def acstime(path):
'''
Get last access time of the path.
'''
return _os.path.getatime(path)
# change to date bayad biad
@staticmethod
def move(src,dst):
'''
Move (cut) file/directory from crs to dst.
'''
_shutil.move(src,dst)
#live_path= dst
#Baraye folder hast ya na?
@staticmethod
def copy(src,dest,preserve_metadata= True):
'''
Copy the file from src to destination.
preserve_metadata is for preserving metadata of file when copying.
(You can use it instead of rename too.
e.g:
copy('D:\\Test.py','E:\\Ali.py')
(It copies Test.py to E drive and renames it to Ali.py)
)
'''
if files.isdir(src):
_shutil.copytree(src,dest)
else:
if preserve_metadata: _shutil.copy2(src,dest)
else: _shutil.copy(src,dest)
@staticmethod
def hide(path,mode=True):
'''
Hide file or folder.
If mode==False: makes 'not hide'
(ONLY WINDOWS)
'''
try:
import win32api, win32con
except:
raise ImportError('Please install pywin32 via pip')
if mode:
win32api.SetFileAttributes(path,win32con.FILE_ATTRIBUTE_HIDDEN)
else:
win32api.SetFileAttributes(path,win32con.FILE_ATTRIBUTE_NORMAL)
@staticmethod
def read_only(path,mode=True):
'''
Make file attribute read_only.
If mode==False: makes 'not read_only'
'''
if type(mode)==bool:
from stat import S_IREAD,S_IWUSR
if mode==True:
_os.chmod(path, S_IREAD)
elif mode==False:
_os.chmod(path, S_IWUSR)
else:
raise Exception('Second argumant (mode) should be boolean.')
@staticmethod
def read(path):
'''
This can help you to read your file faster.
Example:
read('C:\\users\\Jack\\test.txt')
==> "Content of 'test.txt' will be shown."
'''
with open(path) as f:
FileR= f.read()
return FileR
@staticmethod
def write(file_path,text=None,mode='replace',start=''):
'''
With this method you can change content of the file.
file: File you want to change its content.
content: Content you want to add to file.
mode: Type of writing method.
'a' or 'continue' for add content to end of the file.
'w' or 'replace' for overwriting to file content.
start: I use this when I use mode='continue'
'''
if mode=='replace':
op= open(file_path,mode='w')
if text==None:
text= input('Type what you want.\n\n')
op.write(text)
op.close()
elif mode=='continue':
'''opr= open(file,mode='r')
FileR= opr.read()
op= open(file,mode='w')'''
op=open(file_path,'a')
if text==None:
text= input('Type what you want to add in the end of the file.\n\n')
op.write(start+text)
op.close()
else:
raise ValueError('mode can only be: replace(default) or continue Not "{0}"'.format(mode))
@staticmethod
def isdir(path):
return _os.path.isdir(path)
@staticmethod
def isfile(path):
return _os.path.isfile(path)
@staticmethod
def is_readonly(path):
'''
Return True if path is readonly else False.
(May Not Work in Linux)
'''
return _subprocess.getoutput(f'dir /ar {path} >nul 2>nul && echo True || echo False')
@staticmethod
def is_hidden(path):
"""
Check whether a file is presumed hidden, either because
the pathname starts with dot or because the platform
indicates such.
Return True if File or Directory is hidden.
(Work on both Linux and Windows)
"""
import platform
full_path = _os.path.abspath(path)
name = _os.path.basename(full_path)
def no(path): return False
platform_hidden = globals().get('is_hidden_' + platform.system(), no)
return name.startswith('.') or platform_hidden(full_path)
@staticmethod
def is_hidden_Windows(path):
import ctypes
res = ctypes.windll.kernel32.GetFileAttributesW(path)
assert res != -1
return bool(res & 2)
@staticmethod
def search_file(pattern, path='.\\',return_mode: Union['list','Generator']= 'list'):
'''
Search for files in path.
Return list or generator.
pattern:
- 'x.py' : search for 'x.py' in path.
- '*.py' : search for all files with .py extension in path.
- '*.*' : search for all files in path
- '**/*' : search for any file in path and also all sub-directories.
- '**/*.py: search for all python files in path and also sub-directories.
- 'mydir/**/*.py' : search for all python files in path/mydir/ and all of its sub-directories.
'''
import glob
if str(return_mode).lower() in ('list','generator'):
#print(_os.path.join(path,pattern))
if return_mode=='list': return glob.glob(_os.path.join(path,pattern), recursive=True)
else: return glob.iglob(_os.path.join(path,pattern), recursive=True)
else:
if type(return_mode)==str:
raise ValueError(f"return_mode van be 'list' or 'generator' not {return_mode}")
else:
raise TypeError(f"return_mode type should be 'str' and it should be in ['list', 'generator']")
@staticmethod
def search_content(path,word):
ALL= [val for sublist in [[_os.path.join(i[0], j) for j in i[2]] for i in _os.walk(path)] for val in sublist]
'''lst=[]
for file in ALL:
if word in rx.read(file):
lst.append(file)
return lst'''
return [file for file in ALL if word in open(file).read()]
@staticmethod
def mkdir(path):
path = _os.path.normpath(path)
NEW= ''
for FILE in path.split('\\'):
NEW+= FILE+'\\'
try: _os.mkdir(NEW)
except (FileExistsError,FileNotFoundError): pass
@staticmethod
def generate_tree(dir_path, level: int=-1, limit_to_directories: bool=False,
length_limit: int=1000, print_info: bool=True):
"""Given a directory Path object return a visual tree structure"""
from pathlib import Path
from itertools import islice
space= ' '; branch = '│ '; tee= '├── '; last= '└── '
dir_path = Path(dir_path) # accept string coerceable to Path
files = 0
directories = 0
def inner(dir_path: Path, prefix: str='', level=-1):
nonlocal files, directories
if not level: return # 0, stop iterating
if limit_to_directories: contents = [d for d in dir_path.iterdir() if d.is_dir()]
else: contents = list(dir_path.iterdir())
pointers = [tee] * (len(contents) - 1) + [last]
for pointer, path in zip(pointers, contents):
if path.is_dir():
yield prefix + pointer + path.name
directories += 1
extension = branch if pointer == tee else space
yield from inner(path, prefix=prefix+extension, level=level-1)
elif not limit_to_directories:
yield prefix + pointer + path.name
files += 1
RETURN=''
RETURN+=dir_path.name+'\n'
iterator = inner(dir_path, level=level)
for line in islice(iterator, length_limit): RETURN+=line+'\n'
if next(iterator, None): RETURN+=f'... length_limit, {length_limit}, reached, counted:'
if print_info: RETURN+=f'\n{directories} directories' + (f', {files} files' if files else '')
return RETURN
class MEMBERS:
@staticmethod
def all_exactdir(dir):
return _os.listdir(dir)
@staticmethod
def all_all_sep(dir):
return [i for i in _os.walk(dir)]
@staticmethod
def files_exactdir(dir,abspath=True):
if abspath:
return [dir+'/'+file_ for file_ in [i for i in _os.walk(dir)][0][2]]
return [i for i in _os.walk(dir)][0][2]
@staticmethod
def files_all(dir):
return [val for sublist in [[_os.path.join(i[0], j) for j in i[2]] for i in _os.walk(dir)] for val in sublist]
@staticmethod
def files_all_sep(dir):
return [[_os.path.join(i[0], j) for j in i[2]] for i in _os.walk(dir)]
@staticmethod
def dirs_exactdir(dir, abspath=True):
if dir.endswith('/'): dir=dir[:-1]
elif dir.endswith('\\'): dir=dir[:-1]
if abspath:
return [dir+'/'+folder for folder in [i for i in _os.walk(dir)][0][1]]
return [i for i in _os.walk(dir)][0][1]
@staticmethod
def dirs_all(dir):
return [TPL[0] for TPL in [i for i in _os.walk(dir)]]
files = Files
write = files.write
read = files.read
class System:
'''
Some system actions and information.
- Information about ram, ip, terminal, etc.
- Some System Actions like Shutdown and Restart
(ALL FUNCTIONS ARE STATIC METHODS)
'''
@staticmethod
def accname():
'''
return account username you have logged in.
'''
return _os.getlogin()
@staticmethod
def pid():
'''
Get pid number of terminal and return it.
'''
return _os.getpid()
'''@staticmethod
def disk_usage(path):
####
return _shutil.disk_usage(path)'''
@staticmethod
def chdir(path):
'''
Change directory of terminal.
'''
_os.chdir(path)
@staticmethod
def SHUT_DOWN():
'''
Shut down the PC. (WINDOWS)
'''
_os.system("shutdown /s /t 1")
@staticmethod
def RESTART():
'''
Restart the PC. (WINDOWS)
'''
_os.system("shutdown /r /t 1")
@staticmethod
def terminal_size() -> tuple:
'''
Return terminal size in tuple (columns,rows)
'''
size= _os.get_terminal_size()
return (size.columns,size.lines)
@staticmethod
def cwd():
'''
Return a unicode string representing the current working directory.
'''
return _os.getcwd()
@staticmethod
def ip_global():
"""
Return ip with by http://ipinfo.io/ip api.
returns global ip as string
"""
try:
import requests
new_session = _requests.session()
response = new_session.get("http://ipinfo.io/ip")
import re
ip_list = _re.findall(r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}", response.text)
new_session.close()
return ip_list[0]
except:
raise ConnectionError('No Internet Connection') from None
"""ip_global= internet.ip_global"""
@staticmethod
def ip_local():
"""
Return local ip of computer in windows by _socket. module
and in unix with hostname command in shell.
"""
#return [l for l in ([ip for ip in _socket.gethostbyname_ex(_socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [_socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
'''
s = _socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
'''
import platform
class NetworkError(Exception):
def __init__(self, message): super().__init__(message)
try:
ip = _socket.gethostbyname(_socket.gethostname())
if ip and ip != "127.0.1.1":
return ip
elif platform.system() != "Windows":
import subprocess
command = _subprocess.Popen(["hostname", "-I"],stdout=_subprocess.PIPE,stderr=_subprocess.PIPE,stdin=_subprocess.PIPE,shell=False)
response = list(command.communicate())
if len(response[0]) > 0:
return str(response[0])[2:-4]
raise NetworkError('No Network Connection')
raise NetworkError('No Network Connection')
except:
raise
"""ip_local= internet.ip_local"""
@staticmethod
def ram_total(convert=True):
"""
Return total ram of board as string
parameter convert: flag for convert mode (using of convert_byte function)
"""
response = list(_psutil.virtual_memory())
if convert:
return convert_bytes(int(response[0]))
return str(response[0])
@staticmethod
def ram_used(convert=True):
"""
Return how much ram is using.
parameter convert: flag for convert mode (convert with convert_byte function)
"""
response = list(_psutil.virtual_memory())
if convert:
return convert_bytes(int(response[3]))
return str(response[3])
@staticmethod
def ram_free(convert=True):
"""
Return how much ram is available.
parameter convert: flag for convert mode (convert with convert_byte function)
"""
response = list(_psutil.virtual_memory())
if convert:
return convert_bytes(int(response[1]))
return str(response[1])
@staticmethod
def ram_percent(ONLY_NOM=False):
"""
Return available ram percentage as an integer if ONLY_NOM, as string with % if not ONLY_NOM
Parameter ONLY_NOM: flag for return type and value.
"""
response = list(_psutil.virtual_memory())
if ONLY_NOM:
return response[2]
return str(response[2]) + " %"
@staticmethod
def boot_time():
'''
Return the system boot time expressed in seconds since the epoch.
'''
return _psutil.boot_time()
@staticmethod
def device_name():
return _socket.gethostname()
@staticmethod
def ip_website(url):
'''get IP address of Web Site'''
return _socket.gethostbyname(url)
"""ip_webs= internet.ip_website"""
@staticmethod
def win10_notification(title,message,icon=None, duration=5) -> None:
'''
(THIS ONLY WORKS FOR "WINDOWS 10")\n
Display Notification with title, message and icon for speciefic _time.
'''
try:
from win10toast import ToastNotifier
ToastNotifier().show_toast(title,message,duration=duration)
except:
raise ImportError('Use "pip install win10toast" to install required module')
@staticmethod
def cpu_count(logical=True):
'''
Return the number of logical CPUs in the system
(same as _os.cpu_count() in Python 3.4).
If *logical* is False return the number of physical cores only
(e.g. hyper thread CPUs are excluded).
Return None if undetermined.
'''
return _psutil.cpu_count(logical)
@staticmethod
def pyshell_execute_bit():
'''to determine whether a Python shell is executing in 32bit or 64bit'''
#return platform.architecture()[0][:2] # SLOW
#return ctypes.sizeof(ctypes.c_voidp)*8
import struct
return struct.calcsize("P") * 8
@staticmethod
def pids() -> list:
'''Return a list of current running PIDs'''
return _psutil.pids()
@staticmethod
def cpu_percent() -> float:
'''
Return a float representing the current system-wide CPU utilization as a percentage.'''
return _psutil.cpu_percent()
@staticmethod
def pid_exists(pid) -> bool:
return _psutil.pid_exists(pid)
@staticmethod
def mac_address(formatted=False):
import uuid
mac = uuid.getnode()
if formatted:
return ':'.join(['{:02x}'.format((mac >> ele) & 0xff) for ele in range(0,8*6,8)][::-1])
return hex(mac)
system = System
from colored import fg as _fg
from colored import bg as _bg
from colored import attr as _attr
class Style:
'''
This class is for Changing text Color,BG & Style.
(Using colored module but easier)
- style.print to customize your print.
- style.switch to change terminal colors.
- style.switch_default for making everything default.
Also You Can Create style object.
This will allow you to:
- Because it returns string You can Add it to other strings
- Slicing and indexing (Without Color)
'''
def __init__(self, text, color='default', BG='black'):
try:
self.color = color.lower()
self.BG = BG.lower()
#style = style.lower()
except:
pass
if color == 'default':
self.color = 7 #188
self.text = text
self.content = f"{_fg(color)}{_bg(BG)}{text}{_attr(0)}"
def __str__(self):
return self.content
def __repr__(self):
return self.content
def __add__(self, other):
#print(type(other))
if type(other)!=style:
return self.content+other
else:
return self.content+other.content
@staticmethod
def print(text='', color='default', BG='default', style=None, end='\n'):
'''
text(text='Hello World',color='red',BG='white')
output ==> 'Hello World' (With red color and white BG)
Styles: bold - underline - reverse - hidden
*bold and underline may not work. (Depends on terminal and OS)
'''
try:
color = color.lower()
BG = BG.lower()
style = style.lower() if style and type(style)==str else 0
except:
raise
if style == 'none':
style = 0
if color=='default' and BG!='default': # _bg & !clr
print(f'{_attr(style)}{_bg(BG)}{text}{_attr(0)}', end=end)
elif color!='default' and BG=='default': # !_bg & clr
print(f'{_attr(style)}{_fg(color)}{text}{_attr(0)}', end=end)
elif color=='default' and BG=='default': # !_bg & !clr
print(f'{_attr(style)}{text}{_attr(0)}', end=end)
elif color!='default' and BG!='default': # _bg & clr
print(f'{_attr(style)}{_bg(BG)}{_fg(color)}{text}{_attr(0)}', end=end)
@staticmethod
def switch(color='default', BG='black', style='None'):
'''
Change color,BG and style untill you call it again and change them.
'''
try:
color = color.lower()
BG = BG.lower()
style = style.lower()
except:
pass
if style == 'none':
style = 0
if color == 'default':
color = 7
print(f'{_attr(style)}{_bg(BG)}{_fg(color)}', end='')
@staticmethod
def switch_default():
'''Switch Terminal Attributes to its defaults'''
print(f'{_attr(0)}', end='')
reset = switch_default
@staticmethod
def log_success(text, color='green', BG='default', style=None, add_time=True):
#globals()['style'].print(text, color, BG, style=style)
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_info(text, color='grey_93', BG='default', style=None, add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_warning(text, color='gold_3a', BG='default', style=None, add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_error(text, color='red', BG='default', style=None, add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_critical(text, color='red_1', BG='default', style='bold', add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
style = Style
class Record:
'''
Use this method to record an action time in second.
Usage:
Start= record()
#Some codes here...
Finnish= Start.lap()
print(Finnish) ==> 0.25486741
#Some more codes here...
Finnish= Start.lap() ==> 0.4502586
Start.laps --> [0.25486741, 0.4502586]
Use Start.stop() to finnish recording and save memory.
(after self.stop() using self.lap will cause error.)
'''
def __init__(self):
self.__start = _time.time()
self.laps = []
def __call__(self):
return f'Laps: {self.laps}'
def __repr__(self):
return f'Laps: {self.laps}'
def lap(self, save=True, Round=15):
'''
Return time passed from creating time of self.
(Read 'record' Doc String)
If save is True, time will be added to self.laps
'''
lp = _time.time() - self.__start
lp = round(lp,Round)
if save:
self.laps.append(lp)
return lp
def reset(self, reset_start=False):
'''
This will erase self.laps
If reset_start is True, start time will reset too.
'''
self.laps = []
if reset_start:
self.__start = _time.time()
def last_lap(self, save=True):
'''
Return time passed from last lap
(If self.laps is False then from start_time)
'''
ret = (self.lap(False)-self.laps[-1]) if self.laps else self.lap(False)
if save:
self.laps.append(self.lap())
return ret
@staticmethod
def timit(code,setup,times,globals_):
'''
Run the 'code' for 'times' times and return time it needs (all, not once)
(If you need any initialization for your 'code', put it in setup arg)
'''
import timeit
return timeit.timeit(stmt=code,setup=setup,number=times,globals=globals_)
record = Record
class Terminal:
"""
Run Terminal Commands with Terminal functions
(ALL FUNCTIONS ARE STATIC METHODS)
"""
@staticmethod
def run(command:str) -> None:
'''
Execute the command in a subshell
(NO RETURN, LIVE EXECUTION, OUTPUT WILL BE PRINTED)
'''
_os.system(command)
@staticmethod
def getoutput(command:str) -> str:
'''
Return output of executing command in a shell
(RETURN STR, RETURN AFTER EXECUTING CODE)
'''
return _subprocess.getoutput(command)
terminal = Terminal
class Decorator:
class Check_Type:
"""
Function decorator for developers\n
Use this decorator to check if user gives right argument type\n
You need to annotate argument type when defining it.\n
Supported Types:
* str
* list
* set
* dict
* tuple
* User-Defined Objects
Typing Module Supported Types:
* Iterable
* Callable
* Generatr
* Container
* Any
(MORE TYPES SOON ...)
'''
sig = signature(foo)
print(str(sig))
print(str(sig.parameters['b']))
print(sig.parameters['b'].annotation)
####
sig = signature(foo)
for param in sig.parameters.values():
if (param.kind == param.KEYWORD_ONLY and
param.default is param.empty):
print('Parameter:', param.annotation)
'''
"""
auto_correct = False
def __init__(self, function):
self.function = function
def __call__(self, *args, **kwargs):
special_types = ('callable', 'iterable', 'generator','container', 'any')
i=-1
__local__= list(locals()['args'])
annots= list(self.function.__annotations__.keys())
def extra_remover(correct):
# Typing module annots check
if correct.startswith('typing.'):
correct = correct[7:].lower()
# built-in types check
elif correct.startswith('<class '):
correct = correct[8:-2]
return correct
def check_specials(TYPE, LOCAL_I):
import inspect
wrong = ''
if TYPE == 'generator':
if inspect.isgeneratorfunction(LOCAL_I) or inspect.isgenerator(LOCAL_I):
return
else:
correct = 'generator'
elif TYPE == 'callable':
if callable(LOCAL_I):
return
else:
correct = 'callable'
elif TYPE == 'iterable':
if type(LOCAL_I) in (list, tuple, set, str):
print(type(LOCAL_I))
return
else:
correct = 'iterable'
elif TYPE == 'container':
if type(LOCAL_I) in (list,set,dict,tuple):
return
else:
correct = 'container'
elif TYPE == 'any':
return
wrong = extra_remover(str(type(LOCAL_I))) if not wrong else wrong
func_name = self.function.__name__
Error= TypeError(f"'{func_name}()' argument '{ARG}' must be '{correct}' (not '{wrong}')")
raise Error
for ARG in annots:
i += 1
try:
LOCAL_I = __local__[i]
correct = str(self.function.__annotations__[ARG])
'''if correct.startswith('typing.Union'):
correct = eval(correct[12:])
if type(correct) != list:
correct = [correct]'''
correct = extra_remover(correct)
if correct in special_types:
print(type(LOCAL_I))
check_specials(correct,LOCAL_I)
# Builtins and other Libraries objects
elif not eval(correct) == type(LOCAL_I):
if Check_Type.auto_correct:
try:
__local__[i] = eval(correct)(LOCAL_I)
continue
except ValueError:
pass
wrong = extra_remover(str(type(LOCAL_I)))
#correct = str(self.function.__annotations__[ARG])#[8:-2]
correct = extra_remover(correct)
func_name = self.function.__name__
Error= TypeError(f"'{func_name}()' argument '{ARG}' must be '{correct}' (not '{wrong}')")
raise Error
except (ValueError,IndexError):
pass#raise
except NameError:
raise
return self.function(*__local__, **kwargs)
decorator_all:Callable = None
@staticmethod
def attach_to_all(cls):
import inspect
for name, method in inspect.getmembers(cls):
if (not inspect.ismethod(method) and
not inspect.isfunction(method) ) or (
inspect.isbuiltin(method)):
continue
#print("Decorating function %s" % name)
setattr(cls, name, Decorator.decorator_all(method))
return cls
abstractmethod = _abc.abstractmethod
_registered_functions = {} #:Dict[str, Any]
class _MultiMethod(object):
def __init__(self, name):
self.name = name
self.typemap = {}
def __call__(self, *args):
types = tuple(arg.__class__ for arg in args)
function = self.typemap.get(types)
if function is None:
raise TypeError("no match: ",types)
return function(*args)
def register(self, types, function):
self.typemap[types] = function
def overload(*types):
def register(function):
name = function.__name__
mm = decorator._registered_functions.get(name)
if mm is None:
mm = decorator._registered_functions[name] = Decorator._MultiMethod(name)
mm.register(types, function)
return mm
return register
decorator = Decorator
Check_Type = Decorator.Check_Type
overload = Decorator.overload
class IO:
@staticmethod
def wait_for_input(prompt,SS:list=[]):
answer= ''
try:
while not answer:
answer = input(prompt).strip()
except (EOFError,KeyboardInterrupt):
style.print('EXITING...','red')
exit()
return answer
@staticmethod
def selective_input(prompt,choices,default=None,ignore_case=False,error=True,invalid='Invalid input'):
if type(choices) == dict:
Choices = list(choices.keys())+list(choices.values())
pass
if ignore_case:
Choices = [item.lower() for item in Choices]
while True:
inp = input(prompt)
inp = inp.lower() if ignore_case else inp
if not inp or inp not in Choices:
if error:
style.print(invalid, 'red')
else:
if default:
inp = default
break
else:
break
if type(choices) == dict:
try:
inp = choices[inp]
except KeyError:
pass
return inp
@staticmethod
def yesno_input(prompt,default=None):
error= not bool(default)
return io.selective_input(prompt,['y','yes','n','no'],default,error)
@staticmethod
def Input(prompt:str ='', default_value:str =''):
'''
Make Default Value For Your Input!
(THIS ONLY WORK ON WINDOWS (SORRY))
prompt is what you want and it's input(prompt) .
default_value is what there should be after prompt.
E.g:
>>> Input('Is rx7 Library Easy to Learn? ', 'Yes')
Is rx7 Library Easy to Learn? Yes
'''
import win32console
_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
keys = []
for c in str(default_value):
evt = win32console.PyINPUT_RECORDType(win32console.KEY_EVENT)
evt.Char = c
evt.RepeatCount = 1
evt.KeyDown = True
keys.append(evt)
_stdin.WriteConsoleInput(keys)
return input(str(prompt))
@staticmethod
def getpass(prompt):
'''
Prompt for a password, with echo turned off.
'''
import getpass as Getpass
return Getpass.getpass(prompt=prompt)
io = IO
Input = default_input = io.Input
getpass = password_input = io.getpass
class Tuple:
'''
(Note That This is tuple of RX7 Module So it Has More Features!)\n
(This is Not Built-in immutable sequence.)\n
If no argument is given, the constructor returns an empty tuple.\n
There is *var argumant that you can add object as much as you need.\n
Any Built-in object is accepted. (Not tested on third-party objects.)\n
Beside built-in features of tuple, this supports:
+ You Can Add objects to your tuple now.
+ Also You Can Delete Them.
+ Replace Them.
+ Like lists, Tuple supports item assigning. ( tpl[2]='hello' )
(Tuple Unpacking is Supported.)
'''
#############################
def __init__(self,*var: Any, one_item=False):
if not one_item:
self.__content= tuple(var)
else:
self.__content=[]
for item in var:
for member in item:
self.__content.append(member)
self.__content= tuple(self.__content)
def __str__(self):
return str(self.__content)
def __repr__(self):
return str(self.__content)
#############################
#############################
def add(self,*var: Any):
'''
This will add var(s) to self.
'''
self.__content= tuple(list(self.__content)+[v for v in var])
#force= lambda tpl,*var: tuple(list(tpl)+[v for v in var])
force= add
def remove(self,*var: Any):
'''
It will remove var(s) from self.
'''
#lstv= [v for v in var if v in tpl]
lstt= list(self.__content)
for th in [v for v in var if v in self.__content]:
lstt.remove(th)
self.__content= tuple(lstt)
erase= remove
def pop(self,index):
return pop(self.__content)
#############################
#############################
def replace(self, ind: Union[int,Any], var: Any):
'''
Replace self[ind] with var.
'''
tpl=list(self.__content)
if type(ind) == str:
ind= tpl.index(ind)
tpl[ind]=var
self.__content= tuple(tpl)
def __setitem__(self,index,value,replace=False):
if not replace:
tpl=list(self.__content)
if type(index) == str:
ind= tpl.index(index)
tpl.insert(index,value)
self.__content= tuple(tpl)
else:
self.replace(index,value)
def __getitem__(self,index):
return self.__content[index]
#############################
def __add__(self,other):
return self.__content + other
def __contains__(self,var):
return var in self.__content
#############################
#############################
def __bool__(self):
return bool(len(self.__content))
def __hash__(self):
return hash(self.__content)
def __len__(self):
return len(self.__content)
#############################
#############################
_ReqConErr = _requests.exceptions.ConnectionError
class Internet:
@staticmethod
def is_connected(website='http://x.com/'):
'''
Check for internet connection with trying to connect to web-site
( Maybe you want to know why i used http://x.com/ as default web-site
The reason is there's no extra code to load
(compare x.com and google.com html source code)
And this make it a lot faster for checking.
)
'''
try:
_urllib.request.urlopen(website)
return True
except:
return False
def connection_checker(func):
"""Decaorator Which Checks Internet Connection before calling a function
Parameters
----------
func : Function
function which you are going to check if
there is internet connection before call it
"""
def inside(*args,**kwargs):
if not internet.is_connected():
raise ConnectionError('No internet connection') from None
return func(*args,**kwargs)
return inside
@staticmethod
def ip_global() -> str:
"""
Return your global ip by http://ipinfo.io/ip api.
"""
new_session = _requests.session()
response = new_session.get("http://ipinfo.io/ip")
ip_list = _re.findall(r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}", response.text)
new_session.close()
return ip_list[0]
@staticmethod
def ip_local() -> str:
"""
Return local ip of computer in windows by _socket. module
and in linux with hostname command in shell.
"""
#return [l for l in ([ip for ip in _socket.gethostbyname_ex(_socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [_socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
'''
s = _socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
'''
import platform
class NetworkError(Exception):
def __init__(self, message): super().__init__(message)
try:
ip = _socket.gethostbyname(_socket.gethostname())
if ip and ip not in ("127.0.1.1","127.0.0.1"):
return ip
elif platform.system() != "Windows":
command = _subprocess.Popen(["hostname", "-I"],stdout=_subprocess.PIPE,stderr=_subprocess.PIPE,stdin=_subprocess.PIPE,shell=False)
response = list(command.communicate())
if len(response[0]) > 0:
return str(response[0])[2:-4]
raise NetworkError('No Network Connection')
raise NetworkError('No Network Connection')
except:
raise
@staticmethod
def url_exists(URL) -> bool:
'''
check if url exists (with 'requests' module)
(NEED HTTP[S])
'''
try:
request = _requests.get(URL)
except _ReqConErr:
raise ConnectionError('No internet connection') from None
#print(response.status_code < 400)
if request.status_code == 200:
return True
else:
return False
@staticmethod
def ip_website(URL) -> str:
'''
get IP address of Web Site\n
(Without http[s])
'''
try:
return _socket.gethostbyname(URL)
except _socket.gaierror:
if internet.is_connected():
class NotExistsError(Exception):
def __init__(self):
super().__init__('URL Does Not Exists')
raise NotExistsError from None
else:
raise ConnectionError from None
@staticmethod
def url_links(URL) -> list:
'''
Get all links that are used in a specifiec url
(All "a" tags from html source)
(Needs 'http[s]')
''' #html.parser
try:
soup= BeautifulSoup(_requests.get(URL).text,features="lxml")
LINKS= []
for link in soup.find_all('a'):
LINKS.append(link.get('href'))
return LINKS
except _ReqConErr:
raise ConnectionError('No internet connection') from None
@staticmethod
def find_urls(string) -> list:
'''
find all urls in a string and returns list of them
(urls should start with http[s])
'''
url = _re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)
return url
@staticmethod
def is_url(URL) -> bool:
'''
check if a string is url (WITH HTTP[S])
'''
search= _re.search('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', URL)
'(http[s]?://)?([Ww]{3}\.)?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
if search and len(search.group())==len(URL):
return True
else:
return False
@staticmethod
def open_browser(url,new_tab=True):
import webbrowser
if new_tab:
webbrowser.open_new_tab(url)
else:
webbrowser.open(url)
"""
@staticmethod
def whois(URL):
'''
return whois lookup of a website
(WITHOUT HTTPS)
'''
try:
import whois
WHO = whois.query(URL)
WHOIS = WHO.dict
return {i:WHOIS[i] for i in WHOIS}
except _socket.gaierror:
raise ConnectionError('No internet connection') from None
"""
internet = Internet
class DateTime:
_NOW= 0
_NOW_YEAR= 0
_NOW_MONTH= 0
_NOW_DAY= 0
_NOW_HOUR= -1
_NOW_MINUTE= -1
_NOW_SECOND= -1
def NOW():
_NOW= _time.localtime()
_NOW_YEAR= _NOW.tm_year
_NOW_MONTH= _NOW.tm_mon
_NOW_DAY= _NOW.tm_mday
_NOW_HOUR= _NOW.tm_hour
_NOW_MINUTE= _NOW.tm_min
_NOW_SECOND= _NOW.tm_sec
return _datetime.datetime(_NOW_YEAR,_NOW_MONTH,_NOW_DAY,_NOW_HOUR,_NOW_MINUTE,_NOW_SECOND)
now = NOW
def normalize(date=[],time=[]):
now = date_time.NOW()
try:
if not date[0]: date[0]= now.year
if type(date[1]) == str:
try:
date[1]= date_time.month_dic[date[1].lower()]
except KeyError:
raise ValueError("Wrong Month Name") from None
if not date[1]: date[1]= now.month
if not date[2]: date[2]= now.day
except IndexError:
pass
try:
if time[0]<0: now.hour
if time[1]<0: now.minute
if time[2]<0: now.second
except IndexError:
pass
return [date,time]
Weekday_Names= ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
month_lst= ['january','february','march','april','may','june',
'july','august','september','october','november','december']
month_dic= {month:month_nom for month in month_lst for month_nom in range(1,13)}
def __init__(self,year=_NOW_YEAR,month=_NOW_MONTH,day=_NOW_DAY,hour=_NOW_HOUR,minute=_NOW_MINUTE,second=_NOW_SECOND,first_week_day=0):
'''
.: Working With Date and Time :.
- Include Both Static Methods and Class Methods
- Get NOW Time
- Show in Calendar
- Next and Previous Months in Calendar
- Determine Time Passed From Specific Date
- Calendar Supports Setting First Day of the Week
'''
"""
Now = date_time.NOW()
if not year : year=Now.year
if not month: month=Now.month
if not day : day=Now.day
if hour<0 : hour=Now.hour
if minute<0 : minute=Now.minute
if second<0 : second=Now.second
"""
_norm = date_time.normalize([year,month,day],[hour,minute,second])
year,month,day = _norm[0]
hour,minute,second = _norm[1]
if type(month)==str:
try:
month= date_time.month_dic[month.lower()]
except KeyError:
raise ValueError("Wrong Month Name") from None
self.date= _datetime.date(year,month,day)
self.year=year; self.month=month; self.day=day
self.time= (hour,minute,second)
self.hour=hour; self.minute=minute; self.second=second
self.weekday= date_time.get_weekday(self.year,self.month,self.day)
self.weekday_name= date_time.get_weekday(self.year,self.month,self.day,True)
self.week_nom= date_time.get_weeknom(self.year,self.month,self.day)
#self.first_week_day= first_week_day
_calendar.setfirstweekday(first_week_day)
self.calendar= str(_calendar.month(year, month)).replace(str(day),style(str(day),'green').content)
self.calendar_month= str(_calendar.month(year, month))
self.calendar_year_all=str(_calendar.calendar(year))
self.calendar_year= [_calendar.month(year,i) for i in range(1,13)]
self.calendar_next_all= [_calendar.month(year,i) for i in range(self.month+1,13)]
self.calendar_prev_all= [_calendar.month(year,i) for i in range(1,self.month)]
self.calendar_position_next_year= str(_calendar.month(year+1, month)).replace(str(day),style(str(day),'green').content)
self.calendar_position_prev_year= str(_calendar.month(year-1, month)).replace(str(day),style(str(day),'green').content)
def setfirstweekday(self,day):
if type(day)==int and day<7:
date_time.Weekday_Names= date_time.Weekday_Names[day:]+date_time.Weekday_Names[:day]
elif type(day)==str:
day= date_time.Weekday_Names.index(day)
date_time.Weekday_Names= date_time.Weekday_Names[day:]+date_time.Weekday_Names[:day]
else:
if type(day)==int:
raise ValueError('Invalid Nomber. Day number should be in range(7)')
else:
raise TypeError(f"Inappropriate Type For 'day'. day can be 'str' or 'int' not {type(day)}")
_calendar.setfirstweekday(day)
self.calendar= str(_calendar.month(self.year, self.month)).replace(str(day),style(str(day),'green').content)
self.calendar_month= str(_calendar.month(self.year, self.month))
self.calendar_year_all=str(_calendar.calendar(self.year))
self.calendar_year= [_calendar.month(self.year,i) for i in range(1,13)]
self.calendar_next_all= [_calendar.month(self.year,i) for i in range(self.month+1,13)]
self.calendar_prev_all= [_calendar.month(self.year,i) for i in range(1,self.month)]
self.calendar_position_next_year= str(_calendar.month(self.year+1, self.month)).replace(str(day),style(str(day),'green').content)
self.calendar_position_prev_year= str(_calendar.month(self.year-1, self.month)).replace(str(day),style(str(day),'green').content)
self.weekday= date_time.get_weekday(self.year,self.month,self.day)
self.weekday_name= date_time.get_weekday(self.year,self.month,self.day,True)
self.week_nom= date_time.get_weeknom(self.year,self.month,self.day)
@staticmethod
def today():
dt = date_time.NOW()
return (dt.year,dt.month,dt.day)
@staticmethod
def calender_year(year=_NOW_YEAR):
if not year: year=date_time.NOW().year
return [_calendar.month(year,i) for i in range(1,13)]
@staticmethod
def calendar_month_st(month=_NOW_MONTH,year=_NOW_YEAR,day=0):
year,month = date_time.normalize([year,month])[0]
if not day:
return str(_calendar.month(year, month))
else:
return str(_calendar.month(year, month)).replace(str(day),style(str(day),'green').content)
@staticmethod
def passed_date(f_date,l_date=_NOW,return_time='day'):
if not l_date: l_date=date_time.NOW()
f_date = _datetime.datetime(*f_date)
return_time= return_time.lower()
if return_time in ('day','month','year','hour','minute','second'):
DELTA= l_date - f_date
if return_time == 'year':
try:
_return = _re.search(r'(?P<X>(-)?\w+) day',str(DELTA/365)).group('X')
except:
_return = None
#_return = str(DELTA/365)
elif return_time == 'month':
_return = _re.search(r'\w+',str(DELTA/30)).group()
elif return_time == 'day':
_return = str(DELTA)[:-14]
elif return_time =='hour':
_return = str(DELTA*24)[:-14]
elif return_time == 'minute':
_return = str(DELTA*1440)[:-14]
elif return_time == 'second':
_return = str(DELTA*3600)[:-14]
if _return: return _return
else: return 0
else:
raise ValueError("return_time should be in ('year', 'month', 'day', 'hour', 'minute', 'second')")
passed_time = passed_date
'''@staticmethod
def passed_time(year=1970,month=1,day=1,hour=0,minute=0,second=0,return_time='second'):
pass'''
@staticmethod
def convert_epoch_to_local(second=_time.time()):
return _time.ctime(second)
@staticmethod
def get_weekday(year=_NOW_YEAR,month=_NOW_MONTH,day=_NOW_DAY,return_name=False):
"""
First day is Monday and the numbers starts from 0
"""
year,month,day = date_time.normalize([year,month,day])[0]
if return_name:
return date_time.Weekday_Names[_datetime.date(year,month,day).weekday()]
else:
return _datetime.date(year,month,day).weekday()
@staticmethod
def get_weeknom(year=_NOW_YEAR,month=_NOW_MONTH,day=_NOW_DAY):
"""
Returns 53 if First week is from last year
"""
year,month,day = date_time.normalize([year,month,day])[0]
return _datetime.date(year,month,day).isocalendar()[1]
@staticmethod
def calendar_show_week(week_nom,year=_NOW_YEAR):
year = date_time.normalize([year])[0][0]
week= week_nom
for i in list(range(1,8))[::-1]:
if date_time.get_weeknom(year,1,i)==1:
FIRST_WEEK_DAYS= len(list(range(i)))
break
day= (week-1)*7 - (6-FIRST_WEEK_DAYS)
mnth= 1
true=False
while not true:
try:
if _calendar.monthrange(year,mnth)[1]<day:
mnth+=1
day-= _calendar.monthrange(year,mnth)[1]
else:
true= True
except _calendar.IllegalMonthError:
class BadWeekNumber(Exception):
def __init__(self, message='Week Number is Higher Than Year Weeks.'): super().__init__(message)
raise BadWeekNumber from None
new= date_time(year,mnth,day)
cal= new.calendar_month.splitlines()
for item in cal:
if str(new.day) in item and item != cal[0]:
INDEX= cal.index(item);COLORED_WEEK= style(item,'green');break
WEEK_WITH_COLOR= '\n'.join(cal[:INDEX]+[str(COLORED_WEEK)]+cal[INDEX+1:])
return WEEK_WITH_COLOR
@staticmethod
def get_year():
return _time.localtime().tm_year
@staticmethod
def get_month():
return _time.localtime().tm_mon
@staticmethod
def get_day_of_month():
return _time.localtime().tm_mday
@staticmethod
def get_day_of_week():
return _time.localtime().tm_wday
@staticmethod
def get_day_of_year():
return _time.localtime().tm_yday
@staticmethod
def get_hour():
return _time.localtime().tm_hour
@staticmethod
def get_minute():
return _time.localtime().tm_min
@staticmethod
def get_second():
return _time.localtime().tm_sec
date_time = DateTime
_Auto = 0
class _Lang:
class Constant:
def __new__(cls,*args,array=True):
cls._init = False
return super(_Lang.Constant, cls).__new__(cls)
def __init__(self,*args,array=True):
'''
if array:
self.__members = args
else:
if len(args) > 1:
raise ValueError
self.__members = args[0]
'''
self.__members = args
self._init = True
def __str__(self):
#if len(self.__members) > 1:
return '<'+str(self.__members)[1:-1]+'>' #‹›
#return self.__members
def __repr__(self):
return '<'+str(self.__members)[1:-1]+'>'
def __setattr__(self,_attr,value):
if self._init:
raise AttributeError(f"'Constant' object does not support item assignment")
else:
super(_Lang.Constant,self).__setattr__(_attr,value)
def __getitem__(self,index):
return self.__members[index]
def __contains__(self,obj):
return obj in self.__members
def __bool__(self):
return bool(len(self.__members))
#'''
def __hash__(self):
return hash(tuple(['Constant',len(self)]+list(self.__members)))
#'''
def __len__(self):
#if type(self.__members) == tuple:
return len(self.__members)
def _dict_getter(self):
raise AttributeError("Conatant object has no attribute '__dict__'")
#return {}
__dict__ = property(_dict_getter)
def __dir__(self):
ret = list(super().__dir__())#[:-2]
ret.remove('_init')
ret.remove('_dict_getter')
return ret
const = Const = constant = Constant
class Array:
# Sized Array
__Type_Error = "Array of type '{}' does not accept object with type '{}'"
def __init__(self,*args,type_=_Auto,size=_Auto):
self.__members = []
if type_:
self.__TYPE = type_
else:
self.__TYPE = type(args[0])
self.__TYPE_NAME = self.__TYPE.__name__
if size:
self.__SIZE = size
else:
self.__SIZE = len(args)
for obj in args:
if type(obj) == self.__TYPE:
self.__members.append(obj)
else:
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
def __str__(self):
return '{'+str(self.__members)[1:-1]+'}' #‹›
def __repr__(self):
return '{'+str(self.__members)[1:-1]+'}'
def __getitem__(self,index):
return self.__members[index]
def __contains__(self,obj):
return obj in self.__members
def __bool__(self):
return bool(len(self.__members))
def __len__(self):
return len(self.__members)
def __setitem__(self,index,obj):
if type(obj) == self.__TYPE:
self.__members.insert(index,obj)
return
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
def insert(self,index,obj):
if type(obj) == self.__TYPE:
self.__members.insert(index,obj)
return
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
def append(self,obj):
if type(obj) == self.__TYPE:
self.__members.append(obj)
return
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
add = append
def remove(self,obj):
self.__members.remove(obj)
def pop(self,index=-1):
self.__members.pop(index)
array = Array
class Types:
Str = str
Int = int
Float = float
Set = set
Tuple = tuple
Dict = dict
List = list
Bool = bool
Bytes = bytes
Class = type
Type = type
Object = object
Lambda = type(lambda: None)
Function = Lambda #type(lambda: None)
#Constant = type(_Lang.Constant(1))
#Array = type(_Lang.Array(1,1))
Any = type#_typing.Any
Callable = _typing.Callable
Container = _typing.Container
Generator = Lambda #type(_f) #Not Built-in(s) #_types.GeneratorType || _typing.Generator
Iterable = _typing.Iterable
Iterator = _typing.Iterator
NoReturn = _typing.NoReturn
Optional = _typing.Optional
BuiltinFunction = type(len)
BuiltinMethod = type([].append)
Module = type(_typing)
Method = type(globals()['Tuple']().force)
#Mapping = _typing.Mapping
#OrderedDict = _typing.OrderedDict
#Text = str
#Union = _typing.Union
#_types.AsyncGeneratorType
types = Types
#setattr(_Lang,'Const',type(_Lang.Constant(1)))
#setattr(_Lang,'Array',type(_Lang.Array(1,1)))
#END
|
'''
Excited States software: qFit 3.0
Contributors: Saulo H. P. de Oliveira, Gydo van Zundert, and Henry van den Bedem.
Contact: vdbedem@stanford.edu
Copyright (C) 2009-2019 Stanford University
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
This entire text, including the above copyright notice and this permission notice
shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
'''
# ElementList=[
# " H", "HE",
# "LI", "BE", " B", " C", " N", " O", " F", "NE",
# "NA", "MG", "AL", "SI", " P", " S", "CL", "AR",
# " K", "CA",
# "SC", "TI", " V", "CR", "MN", "FE",
# "CO", "NI", "CU", "ZN",
# "GA", "GE", "AS", "SE", "BR", "KR",
# "RB", "SR",
# " Y", "ZR", "NB", "MO", "TC", "RU",
# "RH", "PD", "AG", "CD",
# "IN", "SN", "SB", "TE", " I", "XE",
# "CS", "BA",
# "LA", "CE", "PR", "ND", "PM", "SM", "EU",
# "GD", "TB", "DY", "HO", "ER", "TM", "YB",
# "LU", "HF", "TA", " W", "RE", "OS",
# "IR", "PT", "AU", "HG",
# "TL", "PB", "BI", "PO", "AT", "RN",
# "FR", "RA",
# "AC", "TH", "PA", " U", "NP", "PU", "AM",
# "CM", "BK", "CF", "ES", "FM", "MD", "NO",
# "LR", "RF", "DB", "SG", "BH", "HS",
# "MT", "UN", "UU", "UB",
# "UQ", "UH", "UO",
# " D", "AN"
# ]
#
#
# VanderWaalsRadiiList = [
# 1.20, 1.40,
# 1.82, 1.78, 1.74, 1.70, 1.55, 1.52, 1.47, 1.54,
# 2.27, 1.73, 1.80, 2.10, 1.80, 1.80, 1.75, 1.88,
# 2.75, 2.65,
# 2.55, 2.45, 2.35, 2.20, 1.73, 1.90,
# 1.75, 1.63, 1.40, 1.39,
# 1.87, 1.86, 1.85, 1.90, 1.85, 2.02,
# 2.75, 2.65,
# 2.55, 2.45, 2.35, 2.20, 2.05, 1.90,
# 1.75, 1.63, 1.72, 1.58,
# 1.93, 2.17, 2.10, 2.06, 1.98, 2.16,
# 2.75, 2.75,
# 2.75, 2.75, 2.75, 2.75, 2.75, 2.75, 2.75,
# 2.75, 2.75, 2.75, 2.75, 2.75, 2.65, 2.55,
# 2.45, 2.35, 2.25, 2.15, 2.05, 1.95,
# 1.85, 1.75, 1.66, 1.55,
# 1.96, 2.02, 2.00, 2.00, 2.00, 2.00,
# 2.75, 2.75,
# 2.50, 2.25, 1.95, 1.86, 1.80, 1.80, 1.80,
# 1.80, 1.80, 1.80, 1.80, 1.80, 1.80, 1.80,
# 1.80, 1.80, 1.80, 1.80, 1.80, 1.80,
# 1.80, 1.80, 1.80, 1.80,
# 1.80, 1.80, 1.80,
# 1.30, 1.50
# ]
# print("vdwRadiiTable={")
# for ele,radius in zip(ElementList,VanderWaalsRadiiList):
# print(f"\'{ele}\':{radius},")
# print("}")
vdwRadiiTable={
'H':1.2,
'HE':1.4,
'LI':1.82,
'BE':1.78,
'B':1.74,
'C':1.7,
'N':1.55,
'O':1.52,
'F':1.47,
'NE':1.54,
'NA':2.27,
'MG':1.73,
'AL':1.8,
'SI':2.1,
'P':1.8,
'S':1.8,
'CL':1.75,
'AR':1.88,
'K':2.75,
'CA':2.65,
'SC':2.55,
'TI':2.45,
'V':2.35,
'CR':2.2,
'MN':1.73,
'FE':1.9,
'CO':1.75,
'NI':1.63,
'CU':1.4,
'ZN':1.39,
'GA':1.87,
'GE':1.86,
'AS':1.85,
'SE':1.9,
'BR':1.85,
'KR':2.02,
'RB':2.75,
'SR':2.65,
'Y':2.55,
'ZR':2.45,
'NB':2.35,
'MO':2.2,
'TC':2.05,
'RU':1.9,
'RH':1.75,
'PD':1.63,
'AG':1.72,
'CD':1.58,
'IN':1.93,
'SN':2.17,
'SB':2.1,
'TE':2.06,
'I':1.98,
'XE':2.16,
'CS':2.75,
'BA':2.75,
'LA':2.75,
'CE':2.75,
'PR':2.75,
'ND':2.75,
'PM':2.75,
'SM':2.75,
'EU':2.75,
'GD':2.75,
'TB':2.75,
'DY':2.75,
'HO':2.75,
'ER':2.75,
'TM':2.65,
'YB':2.55,
'LU':2.45,
'HF':2.35,
'TA':2.25,
'W':2.15,
'RE':2.05,
'OS':1.95,
'IR':1.85,
'PT':1.75,
'AU':1.66,
'HG':1.55,
'TL':1.96,
'PB':2.02,
'BI':2.0,
'PO':2.0,
'AT':2.0,
'RN':2.0,
'FR':2.75,
'RA':2.75,
'AC':2.5,
'TH':2.25,
'PA':1.95,
'U':1.86,
'NP':1.8,
'PU':1.8,
'AM':1.8,
'CM':1.8,
'BK':1.8,
'CF':1.8,
'ES':1.8,
'FM':1.8,
'MD':1.8,
'NO':1.8,
'LR':1.8,
'RF':1.8,
'DB':1.8,
'SG':1.8,
'BH':1.8,
'HS':1.8,
'MT':1.8,
'UN':1.8,
'UU':1.8,
'UB':1.8,
'UQ':1.8,
'UH':1.8,
'UO':1.8,
'D':1.3,
'AN':1.5,
}
# from xml.dom import minidom
#
# # parse an xml file by name
# mydoc = minidom.parse('/data/sauloho/qfit/qfit-3.0/qfit/epsilon.xml')
#
# firsts = mydoc.getElementsByTagName('first')
# seconds = mydoc.getElementsByTagName('second')
# epsilons = mydoc.getElementsByTagName('epsilon')
#
# print("EpsilonTable={")
# for first,second,epsilon in zip(firsts,seconds,epsilons):
# print(f"\'{first.firstChild.data}\":{{\"{second.firstChild.data}\':{epsilon.firstChild.data}}},")
# print("}")
EpsilonTable={
'C':{'C':0.150, 'N':0.155, 'O':0.173, 'S':0.173, 'H':0.055},
'N':{'C':0.155, 'N':0.160, 'O':0.179, 'S':0.179, 'H':0.057},
'O':{'C':0.173, 'N':0.179, 'O':0.200, 'S':0.200, 'H':0.063},
'S':{'C':0.173, 'N':0.179, 'O':0.200, 'S':0.200, 'H':0.063},
'H':{'C':0.055, 'N':0.057, 'O':0.063, 'S':0.063, 'H':0.020},
}
EpsilonIndex = ["H", "C", "N", "O", "S"]
EpsilonArray = [[0.020, 0.055, 0.057, 0.063, 0.063], # H
[0.055, 0.150, 0.155, 0.173, 0.173], # C
[0.057, 0.155, 0.160, 0.179, 0.179], # N
[0.063, 0.173, 0.179, 0.200, 0.200], # O
[0.063, 0.173, 0.179, 0.200, 0.200]] # S
| '''
Excited States software: qFit 3.0
Contributors: Saulo H. P. de Oliveira, Gydo van Zundert, and Henry van den Bedem.
Contact: vdbedem@stanford.edu
Copyright (C) 2009-2019 Stanford University
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
This entire text, including the above copyright notice and this permission notice
shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
'''
# ElementList=[
# " H", "HE",
# "LI", "BE", " B", " C", " N", " O", " F", "NE",
# "NA", "MG", "AL", "SI", " P", " S", "CL", "AR",
# " K", "CA",
# "SC", "TI", " V", "CR", "MN", "FE",
# "CO", "NI", "CU", "ZN",
# "GA", "GE", "AS", "SE", "BR", "KR",
# "RB", "SR",
# " Y", "ZR", "NB", "MO", "TC", "RU",
# "RH", "PD", "AG", "CD",
# "IN", "SN", "SB", "TE", " I", "XE",
# "CS", "BA",
# "LA", "CE", "PR", "ND", "PM", "SM", "EU",
# "GD", "TB", "DY", "HO", "ER", "TM", "YB",
# "LU", "HF", "TA", " W", "RE", "OS",
# "IR", "PT", "AU", "HG",
# "TL", "PB", "BI", "PO", "AT", "RN",
# "FR", "RA",
# "AC", "TH", "PA", " U", "NP", "PU", "AM",
# "CM", "BK", "CF", "ES", "FM", "MD", "NO",
# "LR", "RF", "DB", "SG", "BH", "HS",
# "MT", "UN", "UU", "UB",
# "UQ", "UH", "UO",
# " D", "AN"
# ]
#
#
# VanderWaalsRadiiList = [
# 1.20, 1.40,
# 1.82, 1.78, 1.74, 1.70, 1.55, 1.52, 1.47, 1.54,
# 2.27, 1.73, 1.80, 2.10, 1.80, 1.80, 1.75, 1.88,
# 2.75, 2.65,
# 2.55, 2.45, 2.35, 2.20, 1.73, 1.90,
# 1.75, 1.63, 1.40, 1.39,
# 1.87, 1.86, 1.85, 1.90, 1.85, 2.02,
# 2.75, 2.65,
# 2.55, 2.45, 2.35, 2.20, 2.05, 1.90,
# 1.75, 1.63, 1.72, 1.58,
# 1.93, 2.17, 2.10, 2.06, 1.98, 2.16,
# 2.75, 2.75,
# 2.75, 2.75, 2.75, 2.75, 2.75, 2.75, 2.75,
# 2.75, 2.75, 2.75, 2.75, 2.75, 2.65, 2.55,
# 2.45, 2.35, 2.25, 2.15, 2.05, 1.95,
# 1.85, 1.75, 1.66, 1.55,
# 1.96, 2.02, 2.00, 2.00, 2.00, 2.00,
# 2.75, 2.75,
# 2.50, 2.25, 1.95, 1.86, 1.80, 1.80, 1.80,
# 1.80, 1.80, 1.80, 1.80, 1.80, 1.80, 1.80,
# 1.80, 1.80, 1.80, 1.80, 1.80, 1.80,
# 1.80, 1.80, 1.80, 1.80,
# 1.80, 1.80, 1.80,
# 1.30, 1.50
# ]
# print("vdwRadiiTable={")
# for ele,radius in zip(ElementList,VanderWaalsRadiiList):
# print(f"\'{ele}\':{radius},")
# print("}")
vdwRadiiTable={
'H':1.2,
'HE':1.4,
'LI':1.82,
'BE':1.78,
'B':1.74,
'C':1.7,
'N':1.55,
'O':1.52,
'F':1.47,
'NE':1.54,
'NA':2.27,
'MG':1.73,
'AL':1.8,
'SI':2.1,
'P':1.8,
'S':1.8,
'CL':1.75,
'AR':1.88,
'K':2.75,
'CA':2.65,
'SC':2.55,
'TI':2.45,
'V':2.35,
'CR':2.2,
'MN':1.73,
'FE':1.9,
'CO':1.75,
'NI':1.63,
'CU':1.4,
'ZN':1.39,
'GA':1.87,
'GE':1.86,
'AS':1.85,
'SE':1.9,
'BR':1.85,
'KR':2.02,
'RB':2.75,
'SR':2.65,
'Y':2.55,
'ZR':2.45,
'NB':2.35,
'MO':2.2,
'TC':2.05,
'RU':1.9,
'RH':1.75,
'PD':1.63,
'AG':1.72,
'CD':1.58,
'IN':1.93,
'SN':2.17,
'SB':2.1,
'TE':2.06,
'I':1.98,
'XE':2.16,
'CS':2.75,
'BA':2.75,
'LA':2.75,
'CE':2.75,
'PR':2.75,
'ND':2.75,
'PM':2.75,
'SM':2.75,
'EU':2.75,
'GD':2.75,
'TB':2.75,
'DY':2.75,
'HO':2.75,
'ER':2.75,
'TM':2.65,
'YB':2.55,
'LU':2.45,
'HF':2.35,
'TA':2.25,
'W':2.15,
'RE':2.05,
'OS':1.95,
'IR':1.85,
'PT':1.75,
'AU':1.66,
'HG':1.55,
'TL':1.96,
'PB':2.02,
'BI':2.0,
'PO':2.0,
'AT':2.0,
'RN':2.0,
'FR':2.75,
'RA':2.75,
'AC':2.5,
'TH':2.25,
'PA':1.95,
'U':1.86,
'NP':1.8,
'PU':1.8,
'AM':1.8,
'CM':1.8,
'BK':1.8,
'CF':1.8,
'ES':1.8,
'FM':1.8,
'MD':1.8,
'NO':1.8,
'LR':1.8,
'RF':1.8,
'DB':1.8,
'SG':1.8,
'BH':1.8,
'HS':1.8,
'MT':1.8,
'UN':1.8,
'UU':1.8,
'UB':1.8,
'UQ':1.8,
'UH':1.8,
'UO':1.8,
'D':1.3,
'AN':1.5,
}
# from xml.dom import minidom
#
# # parse an xml file by name
# mydoc = minidom.parse('/data/sauloho/qfit/qfit-3.0/qfit/epsilon.xml')
#
# firsts = mydoc.getElementsByTagName('first')
# seconds = mydoc.getElementsByTagName('second')
# epsilons = mydoc.getElementsByTagName('epsilon')
#
# print("EpsilonTable={")
# for first,second,epsilon in zip(firsts,seconds,epsilons):
# print(f"\'{first.firstChild.data}\':{{\'{second.firstChild.data}\':{epsilon.firstChild.data}}},")
# print("}")
EpsilonTable={
'C':{'C':0.150, 'N':0.155, 'O':0.173, 'S':0.173, 'H':0.055},
'N':{'C':0.155, 'N':0.160, 'O':0.179, 'S':0.179, 'H':0.057},
'O':{'C':0.173, 'N':0.179, 'O':0.200, 'S':0.200, 'H':0.063},
'S':{'C':0.173, 'N':0.179, 'O':0.200, 'S':0.200, 'H':0.063},
'H':{'C':0.055, 'N':0.057, 'O':0.063, 'S':0.063, 'H':0.020},
}
EpsilonIndex = ["H", "C", "N", "O", "S"]
EpsilonArray = [[0.020, 0.055, 0.057, 0.063, 0.063], # H
[0.055, 0.150, 0.155, 0.173, 0.173], # C
[0.057, 0.155, 0.160, 0.179, 0.179], # N
[0.063, 0.173, 0.179, 0.200, 0.200], # O
[0.063, 0.173, 0.179, 0.200, 0.200]] # S
|
import hashlib
import sys
import getpass
import argparse
import rx7 as rx
from LIB.Functions import pause, cls
from LIB.Hash import sa
def print_hashes(word, file=None, Print=True):
word=bytes(word, encoding='utf-8')
LIST = []
for name,func in sa.items():
try:
result = func(word).hexdigest()
LIST.append(result)
if Print:
print(f' {name.upper()}:{' '*(10-len(name))}{result}')
except TypeError:
pass
if file:
rx.write(str(file),'\n'.join(result))
BANNER = '''
88 88 db .dP"Y8 88 88
88 88 dPYb `Ybo." 88 88
888888 dP__Yb o.`Y8b 888888
88 88 dP""""Yb 8bodP' 88 88
dP""b8 888888 88b 88 888888 88""Yb db 888888 dP"Yb 88""Yb
dP `" 88__ 88Yb88 88__ 88__dP dPYb 88 dP Yb 88__dP
Yb "88 88"" 88 Y88 88"" 88"Yb dP__Yb 88 Yb dP 88"Yb
YboodP 888888 88 Y8 888888 88 Yb dP""""Yb 88 YbodP 88 Yb
'''
if __name__ == "__main__":
if len(sys.argv) > 1:
parser = argparse.ArgumentParser(
'Hash Generator',
description='Generate Hash of a word in all hash types',
allow_abbrev=False,
)
parser.add_argument('HASH',
help="Word which you want to get its hashes"
)
parser.add_argument('-f','--output-file',
metavar='FILE',
help='The file to save hashes of HASH to it'
)
parser.add_argument('-q','--quiet', action='store_false',
help='Run app in quiet mode (Do not print the hashes)'
)
args = parser.parse_args()
hashed_file_name = args.output_file
word = args.HASH
quiet = args.quiet
cls()
rx.style.print(BANNER, 'gold_3b')
print(f'''Here is list of hashes for "{rx.fg('dodger_blue_1')}{word}{rx.attr(0)}:"''')
print_hashes(word, hashed_file_name, quiet)
else:
while True:
cls()
rx.style.print(BANNER, 'gold_3b')
print('Use: "HASH||FILE" to save output to FILE \n')
inp= input('Enter String to Create Hashes: ')
if inp=='exit':
break
elif inp:
if '||' in inp:
inp = inp.split('||')
print(f'''Here is list of hashes for "{rx.fg('dodger_blue_1')}{inp[0]}{rx.attr(0)}":''')
print_hashes(inp[0],inp[1])
else:
print(f'''Here is list of hashes for "{rx.fg('dodger_blue_1')}{inp}{rx.attr(0)}":''')
print_hashes(inp)
pause()
| import hashlib
import sys
import getpass
import argparse
import rx7 as rx
from LIB.Functions import pause, cls
from LIB.Hash import sa
def print_hashes(word, file=None, Print=True):
word=bytes(word, encoding='utf-8')
LIST = []
for name,func in sa.items():
try:
result = func(word).hexdigest()
LIST.append(result)
if Print:
print(f' {name.upper()}:{" "*(10-len(name))}{result}')
except TypeError:
pass
if file:
rx.write(str(file),'\n'.join(result))
BANNER = '''
88 88 db .dP"Y8 88 88
88 88 dPYb `Ybo." 88 88
888888 dP__Yb o.`Y8b 888888
88 88 dP""""Yb 8bodP' 88 88
dP""b8 888888 88b 88 888888 88""Yb db 888888 dP"Yb 88""Yb
dP `" 88__ 88Yb88 88__ 88__dP dPYb 88 dP Yb 88__dP
Yb "88 88"" 88 Y88 88"" 88"Yb dP__Yb 88 Yb dP 88"Yb
YboodP 888888 88 Y8 888888 88 Yb dP""""Yb 88 YbodP 88 Yb
'''
if __name__ == "__main__":
if len(sys.argv) > 1:
parser = argparse.ArgumentParser(
'Hash Generator',
description='Generate Hash of a word in all hash types',
allow_abbrev=False,
)
parser.add_argument('HASH',
help="Word which you want to get its hashes"
)
parser.add_argument('-f','--output-file',
metavar='FILE',
help='The file to save hashes of HASH to it'
)
parser.add_argument('-q','--quiet', action='store_false',
help='Run app in quiet mode (Do not print the hashes)'
)
args = parser.parse_args()
hashed_file_name = args.output_file
word = args.HASH
quiet = args.quiet
cls()
rx.style.print(BANNER, 'gold_3b')
print(f'''Here is list of hashes for "{rx.fg('dodger_blue_1')}{word}{rx.attr(0)}:"''')
print_hashes(word, hashed_file_name, quiet)
else:
while True:
cls()
rx.style.print(BANNER, 'gold_3b')
print('Use: "HASH||FILE" to save output to FILE \n')
inp= input('Enter String to Create Hashes: ')
if inp=='exit':
break
elif inp:
if '||' in inp:
inp = inp.split('||')
print(f'''Here is list of hashes for "{rx.fg('dodger_blue_1')}{inp[0]}{rx.attr(0)}":''')
print_hashes(inp[0],inp[1])
else:
print(f'''Here is list of hashes for "{rx.fg('dodger_blue_1')}{inp}{rx.attr(0)}":''')
print_hashes(inp)
pause()
|
#!/usr/bin/env python
# Copyright (c) 2021, Farid Rashidi Mehrabadi All rights reserved.
# ======================================================================================
# Author : Farid Rashidi Mehrabadi (farid.rashidimehrabadi@nih.gov)
# Last Update: Aug 14, 2020
# Description: cleaning
# ======================================================================================
import glob
def _is_ok(name):
file = open(name)
body = file.read()
file.close()
a = body.count("&& echo Done! )")
b = body.count("Done!\n")
if a == 0 and b == 1:
return True
else:
return a == b
def after01(config):
if config["isrna"]:
steps = [
"s01indexing",
"s02mapping",
"s03indexing",
"s04mapping",
"s05calling",
"s06jointcalling",
"s07merging",
"s08annotating",
"s09expressing",
"s10velocitying",
]
else:
steps = [
"s02mapping",
"s04mapping",
"s05calling",
"s06jointcalling",
"s07merging",
"s08annotating",
]
conds = {}
for cond in steps:
x = 0
for file in glob.glob(f"{config["tmpdir"]}/log/{cond}/*.o"):
if not _is_ok(file):
x += 1
conds[cond] = x
print(conds)
| #!/usr/bin/env python
# Copyright (c) 2021, Farid Rashidi Mehrabadi All rights reserved.
# ======================================================================================
# Author : Farid Rashidi Mehrabadi (farid.rashidimehrabadi@nih.gov)
# Last Update: Aug 14, 2020
# Description: cleaning
# ======================================================================================
import glob
def _is_ok(name):
file = open(name)
body = file.read()
file.close()
a = body.count("&& echo Done! )")
b = body.count("Done!\n")
if a == 0 and b == 1:
return True
else:
return a == b
def after01(config):
if config["isrna"]:
steps = [
"s01indexing",
"s02mapping",
"s03indexing",
"s04mapping",
"s05calling",
"s06jointcalling",
"s07merging",
"s08annotating",
"s09expressing",
"s10velocitying",
]
else:
steps = [
"s02mapping",
"s04mapping",
"s05calling",
"s06jointcalling",
"s07merging",
"s08annotating",
]
conds = {}
for cond in steps:
x = 0
for file in glob.glob(f"{config['tmpdir']}/log/{cond}/*.o"):
if not _is_ok(file):
x += 1
conds[cond] = x
print(conds)
|
"""Collection of admin utility functions"""
import os
import sys
import logging
from nvp.nvp_component import NVPComponent
from nvp.nvp_context import NVPContext
logger = logging.getLogger(__name__)
# Default .editorconfig content:
DEFAULT_EDITORCONFIG_CONTENT = """# Autogenerated .editorconfig file
# Update as needed.
root = true
[*]
end_of_line = lf
"""
# Default .gitignore content:
DEFAULT_GITIGNORE_CONTENT = """# Ignore python compiled files:
*.pyc
# Ignore .vs_env file:
.vs_env
# Ignore visual studio code actual settings file:
.vscode/settings.json
# Ignore log files:
*.log
"""
# Default python .env content:
DEFAULT_PYTHONENV_CONTENT = """# Autogenerated .vs_env file
# Update as needed.
PYTHONPATH=.${SEP}${NVP_ROOT_DIR}
"""
# Default nvp_config.json content:
DEFAULT_NVPCONFIG_CONTENT = """/* NVP project configuration file */
{
// Add config entries as needed here.
}
"""
# Default nvp_plug.py content:
DEFAULT_NVPPLUG_CONTENT = '''""" NVP plug entrypoint module for ${PROJ_NAME} """
import logging
from nvp.nvp_component import NVPComponent
from nvp.nvp_context import NVPContext
logger = logging.getLogger('${PROJ_NAME}')
def register_nvp_plugin(context, proj):
"""This function should register this plugin in the current NVP context"""
logger.info("Registering ${PROJ_NAME} NVP plugin.")
proj.register_component('${PROJ_NAME}', MyComponent(context))
class MyComponent(NVPComponent):
"""Example component class"""
def __init__(self, ctx: NVPContext):
"""Constructor for component"""
NVPComponent.__init__(self, ctx)
# define parsers and build required logic from here:
# desc = {
# "build": {"libs": None},
# }
# ctx.define_subparsers("main", desc)
# psr = ctx.get_parser('main.build')
# psr.add_argument("-c", "--compiler", dest='compiler_type', type=str,
# help="Specify which type of compiler should be selected")
'''
# Default .gitattributes content:
# cf. https://rehansaeed.com/gitattributes-best-practices/
###############################
# Git Large File System (LFS) #
###############################
# Could use 'filter=lfs diff=lfs merge=lfs ' below but not clear yet how to do that
# properly
DEFAULT_GITATTRIBUTES_CONTENT = """###############################
# Git Line Endings #
###############################
# Set default behaviour to automatically normalize line endings.
* text=auto
# Force batch scripts to always use CRLF line endings so that if a repo is accessed
# in Windows via a file share from Linux, the scripts will work.
*.{cmd,[cC][mM][dD]} text eol=crlf
*.{bat,[bB][aA][tT]} text eol=crlf
# Force bash scripts to always use LF line endings so that if a repo is accessed
# in Unix via a file share from Windows, the scripts will work.
*.sh text eol=lf
# Archives
*.7z -text
*.br -text
*.gz -text
*.tar -text
*.zip -text
# Documents
*.pdf -text
# Images
*.gif -text
*.ico -text
*.jpg -text
*.pdf -text
*.png -text
*.psd -text
*.webp -text
# Fonts
*.woff2 -text
# Other
*.exe -text
"""
DEFAULT_CLI_PY_CONTENT = '''""" Main command line interface module """
import argparse
# => Adapt the code below to be your application entrypoint.
parser = argparse.ArgumentParser()
args = parser.parse_args()
print("Should implement application logic here.")
'''
DEFAULT_CLI_SH_CONTENT = '''#!/bin/bash
# cf. https://stackoverflow.com/questions/59895/how-can-i-get-the-source-directory-of-a-bash-script-from-within-the-script-itsel
ROOT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)
_${PROJ_NAME}_run_cli_windows() {
# On windows we should simply rely on the cli.bat script below:
ROOT_DIR="$(cygpath -w $ROOT_DIR)"
cmd /C "$ROOT_DIR\cli.bat" "$@"
}
_${PROJ_NAME}_run_cli_linux() {
local python_version="${PY_VERSION}"
# On linux we should call the python cli directly:
# Get the project root folder:
local root_dir=$(readlink -f $ROOT_DIR/)
# echo "Project root dir is: $root_dir"
# Check if we already have python:
local tools_dir=$root_dir/tools/linux
if [[ ! -d $tools_dir ]]; then
echo "Creating tools/linux folder..."
mkdir $tools_dir
fi
local python_dir=$tools_dir/python-$python_version
local python_path=$python_dir/bin/python3
if [[ ! -d $python_dir ]]; then
# Get the path to package:
local python_pkg=$root_dir/tools/packages/python-$python_version-linux.tar.xz
echo "Extracting $python_pkg..."
# $unzip_path x -o"$tools_dir" "$python_pkg" > /dev/null
pushd $tools_dir >/dev/null
tar xvJf $python_pkg
popd >/dev/null
# Once we have deployed the base python tool package we start with upgrading pip:
echo "Upgrading pip..."
$python_path -m pip install --upgrade pip
# Finally we install the python requirements:
echo "Installing python requirements..."
$python_path -m pip install -r $root_dir/tools/requirements.txt
fi
if [ "$1" == "--install-py-reqs" ]; then
echo "Installing python requirements..."
$python_path -m pip install -r $root_dir/tools/requirements.txt
elif [ "$1" == "python" ]; then
# shift the args by one:
shift
$python_path "$@"
elif [ "$1" == "pip" ]; then
# shift the args by one:
shift
$python_path -m pip "$@"
else
# Execute the command in python:
$python_path $root_dir/cli.py "$@"
fi
}
${PROJ_NAME}() {
if [ "$1" == "home" ]; then
# We simply go to the home of this project:
cd "$ROOT_DIR"
else
# Check if we are on a windows or a linux system:
pname=$(uname -s)
case $pname in
CYGWIN*)
_${PROJ_NAME}_run_cli_windows "$@"
;;
*)
_${PROJ_NAME}_run_cli_linux "$@"
;;
esac
fi
}
# cf. https://askubuntu.com/questions/141928/what-is-the-difference-between-bin-sh-and-bin-bash
(return 0 2>/dev/null) && sourced=1 || sourced=0
if [ "$sourced" == "0" ]; then
${PROJ_NAME} "$@"
else
echo "${PROJ_NAME} command loaded."
fi
'''
DEFAULT_CLI_BAT_CONTENT = '''
@echo off
SETLOCAL ENABLEDELAYEDEXPANSION
@REM Retrieve the current folder:
@REM cli script is located directly in the root, so we don't need the '..' in path:
@REM cd /D %~dp0..
cd /D %~dp0
FOR /F %%i IN (".") DO set ${PROJ_NAME}_ROOT_DIR=%%~fi
set ${PROJ_NAME}_DIR=%${PROJ_NAME}_ROOT_DIR%
@REM echo Using NervProj root folder: %${PROJ_NAME}_DIR%
@REM Extract the python env if needed:
set py_vers=${PY_VERSION}
set TOOLS_DIR=%${PROJ_NAME}_DIR%\\tools\\windows\\
set UNZIP=%TOOLS_DIR%\\7zip-${ZIP_VERSION}\\7za.exe
set PYTHON=%TOOLS_DIR%\\python-%py_vers%\\python.exe
@REM Check if python is extracted already:
if not exist "%PYTHON%" (
echo Extracting python tool...
%UNZIP% x -o"%TOOLS_DIR%" "%${PROJ_NAME}_DIR%\\tools\\packages\\python-%py_vers%-windows.7z" > nul
@REM Upgrade pip:
%PYTHON% -m pip install --upgrade pip
@REM Install requirements:
%PYTHON% -m pip install -r %${PROJ_NAME}_DIR%\\tools\\requirements.txt
)
@REM check if the first argument is "--install-py-reqs"
IF /i "%~1" == "--install-py-reqs" goto install_reqs
IF /i "%~1" == "python" goto run_python
IF /i "%~1" == "pip" goto run_pip
%PYTHON% %NERVHOME_DIR%\cli.py %*
goto common_exit
:install_reqs
%PYTHON% -m pip install -r %NERVHOME_DIR%\tools\requirements.txt
goto common_exit
@REM cannot rely on %* when we use shift below:
:run_python
shift
%PYTHON% %1 %2 %3 %4 %5 %6 %7 %8 %9
goto common_exit
:run_pip
shift
%PYTHON% -m pip %1 %2 %3 %4 %5 %6 %7 %8 %9
goto common_exit
:common_exit
'''
def register_component(ctx: NVPContext):
"""Register this component in the given context"""
comp = AdminManager(ctx)
ctx.register_component('admin', comp)
class AdminManager(NVPComponent):
"""Admin command manager class"""
def __init__(self, ctx: NVPContext):
"""Admin commands manager constructor"""
NVPComponent.__init__(self, ctx)
# # Check the value of the sub command:
# sub_cmd = self.settings['l1_cmd']
# if sub_cmd == 'install-cli':
# self.install_cli()
desc = {
"admin": {
"install": {"cli": None, "reqs": None, "repo": None},
"init": None,
}
}
ctx.define_subparsers("main", desc)
psr = ctx.get_parser('main.admin.init')
psr.add_argument("-p", "--with-py-env", dest="with_py_env", action="store_true",
help="Request deployment of a full python environment.")
def install_cli(self):
"""Install a CLI script in .bashrc if application"""
# Check if an $HOME folder is provider:
home_dir = os.getenv('HOME')
if home_dir is None:
logger.error("Cannot install cli alias: no $HOME environment variable detected.")
return
logger.info("Home folder is: %s", home_dir)
# Check if we have a .bashrc file in that folder:
bashrc_file = self.get_path(home_dir, ".bashrc")
if not self.file_exists(bashrc_file):
logger.warning("Cannot install cli alias: no .bashrc file in HOME folder.")
return
script_path = self.get_path(self.ctx.get_root_dir(), "cli.sh")
# If we are on windows, we may want to convert this path to a cygwin path
# if we are in a cygwin environment (but running the native python executable):
if self.is_windows:
script_path = self.to_cygwin_path(script_path)
assert script_path is not None, "Invalid cygwin environment."
sline = f"\n[ -f \"{script_path}\" ] && source \"{script_path}\"\n"
# Check if this string is already in the bashrc file:
content = self.read_text_file(bashrc_file)
if content.find(sline) == -1:
# We should add the string:
logger.info("Adding source file in .bashrc for NervProj")
# Make a backup of the file:
self.copy_file(bashrc_file, bashrc_file+".bak", force=True)
self.write_text_file(content+sline, bashrc_file, newline='\n')
else:
logger.info("NervProj setup file already referenced in .bashrc")
# pp = pprint.PrettyPrinter(indent=2)
# res = pp.pformat(dict(os.environ))
# logger.info("Current environment is: %s", res)
def install_python_requirements(self):
"""Install the requirements for the main python environment using pip"""
logger.info("Installing python requirements...")
reqfile = self.get_path(self.ctx.get_root_dir(), "tools/requirements.txt")
cmd = [sys.executable, "-m", "pip", "install", "-r", reqfile]
# logger.info("Executing command: %s", cmd)
self.execute(cmd)
logger.info("Done installing python requirements.")
def install_repository_bootstrap(self):
"""Install the bootstraped repository for this NervProj folder if not present already."""
base_dir = self.ctx.get_root_dir()
if self.dir_exists(base_dir, ".git"):
logger.info(".git folder already exists, bootstrapping ignored.")
return
# We need to bootstrap in a temp folder:
git = self.get_component('git')
url = self.config["repository_url"]
dest_dir = self.get_path(base_dir, "temp", "nervproj")
logger.info("Cloning NervProj folder into %s...", dest_dir)
git.clone_repository(url, dest_dir)
# When cloning is done we should move the .git folder from the clone location into our root
self.move_path(self.get_path(dest_dir, ".git"), self.get_path(base_dir, ".git"))
# And finally we remove the remaining files:
self.remove_folder(dest_dir)
logger.info("Done bootstrapping NervProj project.")
def setup_global_vscode_config(self, config_dir=None):
"""Setup global Visual studio code user settings"""
if config_dir is None:
# * on windows: in C:/Users/kenshin/AppData/Roaming/Code/User/settings.json
# => should use os.getenv('APPDATA')
# * on linux: in /home/kenshin/.config/Code/User/settings.json
if self.is_windows:
base_dir = os.getenv("APPDATA")
else:
base_dir = self.get_path(self.ctx.get_home_dir(), ".config")
config_dir = self.get_path(base_dir, "Code", "User")
cfg_file = self.get_path(config_dir, "settings.json")
config = {}
ref_config = None
if not self.file_exists(cfg_file):
# Ensure the folder exists:
self.make_folder(config_dir)
else:
# Read the config:
config = self.read_json(cfg_file)
# Keep a copy to compare the changes:
ref_config = self.read_json(cfg_file)
# Now write the changes we want:
tools = self.get_component('tools')
config["git.path"] = tools.get_git_path()
config["python.linting.pylintEnabled"] = True
config["python.linting.enabled"] = True
config["python.linting.pylintPath"] = tools.get_tool_path('pylint')
config["python.linting.pylintArgs"] = [
"--max-line-length=120",
"--good-names=i,j,k,ex,Run,_,x,y,z,w,t,dt",
"--good-names-rgxs=[a-z][0-9]$"]
config["python.defaultInterpreterPath"] = tools.get_tool_path('python')
config["python.formatting.autopep8Path"] = tools.get_tool_path("autopep8")
config["python.formatting.provider"] = "autopep8"
config["python.formatting.autopep8Args"] = ["--max-line-length=120", "--experimental"]
config["editor.formatOnSave"] = True
config["cmakeFormat.exePath"] = tools.get_tool_path("cmake_format")
if ref_config is None or config != ref_config:
logger.info("Wrtting updated vscode settings in %s", cfg_file)
self.write_json(config, cfg_file)
else:
logger.info("No change in %s", cfg_file)
def init_project_config(self, proj_dir, proj_name):
"""Setup initial project local config elements"""
config_dir = self.get_path(proj_dir, ".vscode")
cfg_file = self.get_path(config_dir, "settings.template.json")
self.make_folder(config_dir)
config = {}
ref_config = None
# Check if we should provide a python environment in this project:
with_py = self.get_param("with_py_env", False)
if with_py:
logger.info("Setting up dedicated python env for %s", proj_name)
if self.file_exists(cfg_file):
# Read the config:
config = self.read_json(cfg_file)
# Keep a copy to compare the changes:
ref_config = self.read_json(cfg_file)
config["python.envFile"] = "${workspaceFolder}/.vs_env"
ignore_elems = []
if with_py:
# We deploy the python packages:
dest_dir = self.get_path(proj_dir, "tools", "packages")
self.make_folder(dest_dir)
# get the python version on windows:
py_vers = {}
sevenzip_vers = {}
for plat_name in ["windows", "linux"]:
for el in self.config[f'{plat_name}_tools']:
if el["name"] == 'python':
py_vers[plat_name] = el["version"]
if el["name"] == '7zip':
sevenzip_vers[plat_name] = el["version"]
for plat_name, py_version in py_vers.items():
for ext in [".7z", ".tar.xz"]:
file_name = f"python-{py_version}-{plat_name}{ext}"
src_file = self.get_path(self.ctx.get_root_dir(), "tools", "packages", file_name)
dst_file = self.get_path(dest_dir, file_name)
if self.file_exists(src_file) and not self.file_exists(dst_file):
logger.info("Adding package file %s", dst_file)
self.copy_file(src_file, dst_file)
# more updates to vscode settings if we have a dedicated python env:
cur_py_vers = py_vers[self.platform]
ext = ".exe" if self.is_windows else ""
config["python.linting.pylintEnabled"] = True
config["python.linting.enabled"] = True
config["python.linting.pylintPath"] = f"${{workspaceFolder}}/tools/{self.platform}/python-{cur_py_vers}/Scripts/pylint{ext}"
config["python.linting.pylintArgs"] = ["--max-line-length=120"]
config["python.defaultInterpreterPath"] = f"${{workspaceFolder}}/tools/{self.platform}/python-{cur_py_vers}/python{ext}"
config["python.formatting.autopep8Path"] = f"${{workspaceFolder}}/tools/{self.platform}/python-{cur_py_vers}/Scripts/autopep8{ext}"
config["python.formatting.provider"] = "autopep8"
config["python.formatting.autopep8Args"] = ["--max-line-length=120", "--experimental"]
# Next, for the windows part we need to deploy the 7zip package too:
folder_name = f"7zip-{sevenzip_vers["windows"]}"
src_folder = self.get_path(self.ctx.get_root_dir(), "tools", "windows", folder_name)
dst_folder = self.get_path(proj_dir, "tools", "windows", folder_name)
if not self.dir_exists(dst_folder):
logger.info("Adding windows 7zip package at %s", dst_folder)
self.copy_folder(src_folder, dst_folder)
# Update the ignore elements:
ignore_elems += ["",
"# Ignore all the windows tools except the 7zip folder:",
"tools/windows/*",
"!tools/windows/7zip-*",
"tools/linux/*"]
# Should also install an requirements.txt file:
dest_file = self.get_path(proj_dir, "tools", "requirements.txt")
if not self.file_exists(dest_file):
logger.info("Installing pythong requirements file.")
content = ["# List here all the required python packages",
"# Then call cli.{sh/bat} --install-py-reqs",
"",
"pylint",
"autopep8",
""]
content = "\n".join(content)
self.write_text_file(content, dest_file)
# Should install the cli script files:
dest_file = self.get_path(proj_dir, "cli.py")
if not self.file_exists(dest_file):
logger.info("Writting cli python file %s", dest_file)
content = DEFAULT_CLI_PY_CONTENT
self.write_text_file(content, dest_file)
dest_file = self.get_path(proj_dir, "cli.sh")
if not self.file_exists(dest_file):
logger.info("Writting cli shell file %s", dest_file)
content = DEFAULT_CLI_SH_CONTENT
content = content.replace("${PROJ_NAME}", proj_name.lower())
# Use the linux python version below:
content = content.replace("${PY_VERSION}", py_vers['linux'])
self.write_text_file(content, dest_file, newline="\n")
dest_file = self.get_path(proj_dir, "cli.bat")
if not self.file_exists(dest_file):
logger.info("Writting cli batch file %s", dest_file)
content = DEFAULT_CLI_BAT_CONTENT
content = content.replace("${PROJ_NAME}", proj_name.upper())
# Use the windows versionq below:
content = content.replace("${PY_VERSION}", py_vers['windows'])
content = content.replace("${ZIP_VERSION}", sevenzip_vers['windows'])
self.write_text_file(content, dest_file)
# Finish writting the vscode config:
if ref_config is None or config != ref_config:
logger.info("Wrtting updated vscode settings in %s", cfg_file)
self.write_json(config, cfg_file)
else:
logger.info("No change in %s", cfg_file)
# Also copy to actuall settings if we don't have the file yet:
cfg_file2 = self.get_path(config_dir, "settings.json")
if not self.file_exists(cfg_file2):
logger.info("Copyging VSCode settings template to %s", cfg_file2)
self.copy_file(cfg_file, cfg_file2)
# Write the env file if needed:
dest_file = self.get_path(proj_dir, ".vs_env")
if not self.file_exists(dest_file):
logger.info("Writting python env file %s", dest_file)
content = DEFAULT_PYTHONENV_CONTENT
sep = ";" if self.is_windows else ":"
content = content.replace("${NVP_ROOT_DIR}", "" if with_py else self.ctx.get_root_dir())
content = content.replace("${SEP}", "" if with_py else sep)
self.write_text_file(content, dest_file)
# and write a .editorconfig file:
dest_file = self.get_path(proj_dir, ".editorconfig")
if not self.file_exists(dest_file):
logger.info("Writting editor config file %s", dest_file)
content = DEFAULT_EDITORCONFIG_CONTENT
self.write_text_file(content, dest_file)
# and write a .gitignore file:
dest_file = self.get_path(proj_dir, ".gitignore")
if not self.file_exists(dest_file):
logger.info("Writting .gitignore file %s", dest_file)
content = DEFAULT_GITIGNORE_CONTENT
content += "\n".join(ignore_elems)
content += "\n"
self.write_text_file(content, dest_file)
# and write a .gitattributes file:
dest_file = self.get_path(proj_dir, ".gitattributes")
if not self.file_exists(dest_file):
logger.info("Writting .gitattributes file %s", dest_file)
content = DEFAULT_GITATTRIBUTES_CONTENT
self.write_text_file(content, dest_file)
# write a nvp_config.json file:
dest_file = self.get_path(proj_dir, "nvp_config.json")
if not self.file_exists(dest_file):
logger.info("Writting nvp_config.json file %s", dest_file)
content = DEFAULT_NVPCONFIG_CONTENT
self.write_text_file(content, dest_file)
# write a nvp_plug.py file:
dest_file = self.get_path(proj_dir, "nvp_plug.py")
if not self.file_exists(dest_file):
logger.info("Writting nvp_plug.py file %s", dest_file)
content = DEFAULT_NVPPLUG_CONTENT.replace("${PROJ_NAME}", proj_name)
self.write_text_file(content, dest_file)
# Add pull rebase = false to .git/config
cfg_file = self.get_path(proj_dir, ".git", "config")
assert self.file_exists(cfg_file), f"Cannot fine git config file at {cfg_file}"
# Load that config:
config = self.read_ini(cfg_file)
save_needed = False
if 'pull' not in config:
logger.info("Adding pull section in git config.")
config['pull'] = {
"rebase": "false",
}
save_needed = True
else:
pull = config['pull']
if pull['rebase'] != 'false':
logger.info("Updating git pull rebase from %s to %s", pull['rebase'], 'false')
pull['rebase'] = 'false'
save_needed = True
if save_needed:
self.write_ini(config, cfg_file)
def process_command(self, cmd0):
"""Re-implementation of the process_command method."""
if cmd0 != 'admin':
return False
cmd1 = self.ctx.get_command(1)
cmd2 = self.ctx.get_command(2)
if cmd1 == 'install' and cmd2 == 'cli':
self.install_cli()
return True
if cmd1 == 'install' and cmd2 == 'reqs':
self.install_python_requirements()
return True
if cmd1 == 'install' and cmd2 == 'repo':
self.install_repository_bootstrap()
return True
if cmd1 == 'init':
self.setup_global_vscode_config()
proj = self.ctx.get_current_project()
proj_dir = proj.get_root_dir() if proj is not None else self.ctx.get_root_dir()
proj_name = proj.get_name(False) if proj is not None else "NervProj"
self.init_project_config(proj_dir, proj_name)
return True
return False
| """Collection of admin utility functions"""
import os
import sys
import logging
from nvp.nvp_component import NVPComponent
from nvp.nvp_context import NVPContext
logger = logging.getLogger(__name__)
# Default .editorconfig content:
DEFAULT_EDITORCONFIG_CONTENT = """# Autogenerated .editorconfig file
# Update as needed.
root = true
[*]
end_of_line = lf
"""
# Default .gitignore content:
DEFAULT_GITIGNORE_CONTENT = """# Ignore python compiled files:
*.pyc
# Ignore .vs_env file:
.vs_env
# Ignore visual studio code actual settings file:
.vscode/settings.json
# Ignore log files:
*.log
"""
# Default python .env content:
DEFAULT_PYTHONENV_CONTENT = """# Autogenerated .vs_env file
# Update as needed.
PYTHONPATH=.${SEP}${NVP_ROOT_DIR}
"""
# Default nvp_config.json content:
DEFAULT_NVPCONFIG_CONTENT = """/* NVP project configuration file */
{
// Add config entries as needed here.
}
"""
# Default nvp_plug.py content:
DEFAULT_NVPPLUG_CONTENT = '''""" NVP plug entrypoint module for ${PROJ_NAME} """
import logging
from nvp.nvp_component import NVPComponent
from nvp.nvp_context import NVPContext
logger = logging.getLogger('${PROJ_NAME}')
def register_nvp_plugin(context, proj):
"""This function should register this plugin in the current NVP context"""
logger.info("Registering ${PROJ_NAME} NVP plugin.")
proj.register_component('${PROJ_NAME}', MyComponent(context))
class MyComponent(NVPComponent):
"""Example component class"""
def __init__(self, ctx: NVPContext):
"""Constructor for component"""
NVPComponent.__init__(self, ctx)
# define parsers and build required logic from here:
# desc = {
# "build": {"libs": None},
# }
# ctx.define_subparsers("main", desc)
# psr = ctx.get_parser('main.build')
# psr.add_argument("-c", "--compiler", dest='compiler_type', type=str,
# help="Specify which type of compiler should be selected")
'''
# Default .gitattributes content:
# cf. https://rehansaeed.com/gitattributes-best-practices/
###############################
# Git Large File System (LFS) #
###############################
# Could use 'filter=lfs diff=lfs merge=lfs ' below but not clear yet how to do that
# properly
DEFAULT_GITATTRIBUTES_CONTENT = """###############################
# Git Line Endings #
###############################
# Set default behaviour to automatically normalize line endings.
* text=auto
# Force batch scripts to always use CRLF line endings so that if a repo is accessed
# in Windows via a file share from Linux, the scripts will work.
*.{cmd,[cC][mM][dD]} text eol=crlf
*.{bat,[bB][aA][tT]} text eol=crlf
# Force bash scripts to always use LF line endings so that if a repo is accessed
# in Unix via a file share from Windows, the scripts will work.
*.sh text eol=lf
# Archives
*.7z -text
*.br -text
*.gz -text
*.tar -text
*.zip -text
# Documents
*.pdf -text
# Images
*.gif -text
*.ico -text
*.jpg -text
*.pdf -text
*.png -text
*.psd -text
*.webp -text
# Fonts
*.woff2 -text
# Other
*.exe -text
"""
DEFAULT_CLI_PY_CONTENT = '''""" Main command line interface module """
import argparse
# => Adapt the code below to be your application entrypoint.
parser = argparse.ArgumentParser()
args = parser.parse_args()
print("Should implement application logic here.")
'''
DEFAULT_CLI_SH_CONTENT = '''#!/bin/bash
# cf. https://stackoverflow.com/questions/59895/how-can-i-get-the-source-directory-of-a-bash-script-from-within-the-script-itsel
ROOT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)
_${PROJ_NAME}_run_cli_windows() {
# On windows we should simply rely on the cli.bat script below:
ROOT_DIR="$(cygpath -w $ROOT_DIR)"
cmd /C "$ROOT_DIR\cli.bat" "$@"
}
_${PROJ_NAME}_run_cli_linux() {
local python_version="${PY_VERSION}"
# On linux we should call the python cli directly:
# Get the project root folder:
local root_dir=$(readlink -f $ROOT_DIR/)
# echo "Project root dir is: $root_dir"
# Check if we already have python:
local tools_dir=$root_dir/tools/linux
if [[ ! -d $tools_dir ]]; then
echo "Creating tools/linux folder..."
mkdir $tools_dir
fi
local python_dir=$tools_dir/python-$python_version
local python_path=$python_dir/bin/python3
if [[ ! -d $python_dir ]]; then
# Get the path to package:
local python_pkg=$root_dir/tools/packages/python-$python_version-linux.tar.xz
echo "Extracting $python_pkg..."
# $unzip_path x -o"$tools_dir" "$python_pkg" > /dev/null
pushd $tools_dir >/dev/null
tar xvJf $python_pkg
popd >/dev/null
# Once we have deployed the base python tool package we start with upgrading pip:
echo "Upgrading pip..."
$python_path -m pip install --upgrade pip
# Finally we install the python requirements:
echo "Installing python requirements..."
$python_path -m pip install -r $root_dir/tools/requirements.txt
fi
if [ "$1" == "--install-py-reqs" ]; then
echo "Installing python requirements..."
$python_path -m pip install -r $root_dir/tools/requirements.txt
elif [ "$1" == "python" ]; then
# shift the args by one:
shift
$python_path "$@"
elif [ "$1" == "pip" ]; then
# shift the args by one:
shift
$python_path -m pip "$@"
else
# Execute the command in python:
$python_path $root_dir/cli.py "$@"
fi
}
${PROJ_NAME}() {
if [ "$1" == "home" ]; then
# We simply go to the home of this project:
cd "$ROOT_DIR"
else
# Check if we are on a windows or a linux system:
pname=$(uname -s)
case $pname in
CYGWIN*)
_${PROJ_NAME}_run_cli_windows "$@"
;;
*)
_${PROJ_NAME}_run_cli_linux "$@"
;;
esac
fi
}
# cf. https://askubuntu.com/questions/141928/what-is-the-difference-between-bin-sh-and-bin-bash
(return 0 2>/dev/null) && sourced=1 || sourced=0
if [ "$sourced" == "0" ]; then
${PROJ_NAME} "$@"
else
echo "${PROJ_NAME} command loaded."
fi
'''
DEFAULT_CLI_BAT_CONTENT = '''
@echo off
SETLOCAL ENABLEDELAYEDEXPANSION
@REM Retrieve the current folder:
@REM cli script is located directly in the root, so we don't need the '..' in path:
@REM cd /D %~dp0..
cd /D %~dp0
FOR /F %%i IN (".") DO set ${PROJ_NAME}_ROOT_DIR=%%~fi
set ${PROJ_NAME}_DIR=%${PROJ_NAME}_ROOT_DIR%
@REM echo Using NervProj root folder: %${PROJ_NAME}_DIR%
@REM Extract the python env if needed:
set py_vers=${PY_VERSION}
set TOOLS_DIR=%${PROJ_NAME}_DIR%\\tools\\windows\\
set UNZIP=%TOOLS_DIR%\\7zip-${ZIP_VERSION}\\7za.exe
set PYTHON=%TOOLS_DIR%\\python-%py_vers%\\python.exe
@REM Check if python is extracted already:
if not exist "%PYTHON%" (
echo Extracting python tool...
%UNZIP% x -o"%TOOLS_DIR%" "%${PROJ_NAME}_DIR%\\tools\\packages\\python-%py_vers%-windows.7z" > nul
@REM Upgrade pip:
%PYTHON% -m pip install --upgrade pip
@REM Install requirements:
%PYTHON% -m pip install -r %${PROJ_NAME}_DIR%\\tools\\requirements.txt
)
@REM check if the first argument is "--install-py-reqs"
IF /i "%~1" == "--install-py-reqs" goto install_reqs
IF /i "%~1" == "python" goto run_python
IF /i "%~1" == "pip" goto run_pip
%PYTHON% %NERVHOME_DIR%\cli.py %*
goto common_exit
:install_reqs
%PYTHON% -m pip install -r %NERVHOME_DIR%\tools\requirements.txt
goto common_exit
@REM cannot rely on %* when we use shift below:
:run_python
shift
%PYTHON% %1 %2 %3 %4 %5 %6 %7 %8 %9
goto common_exit
:run_pip
shift
%PYTHON% -m pip %1 %2 %3 %4 %5 %6 %7 %8 %9
goto common_exit
:common_exit
'''
def register_component(ctx: NVPContext):
"""Register this component in the given context"""
comp = AdminManager(ctx)
ctx.register_component('admin', comp)
class AdminManager(NVPComponent):
"""Admin command manager class"""
def __init__(self, ctx: NVPContext):
"""Admin commands manager constructor"""
NVPComponent.__init__(self, ctx)
# # Check the value of the sub command:
# sub_cmd = self.settings['l1_cmd']
# if sub_cmd == 'install-cli':
# self.install_cli()
desc = {
"admin": {
"install": {"cli": None, "reqs": None, "repo": None},
"init": None,
}
}
ctx.define_subparsers("main", desc)
psr = ctx.get_parser('main.admin.init')
psr.add_argument("-p", "--with-py-env", dest="with_py_env", action="store_true",
help="Request deployment of a full python environment.")
def install_cli(self):
"""Install a CLI script in .bashrc if application"""
# Check if an $HOME folder is provider:
home_dir = os.getenv('HOME')
if home_dir is None:
logger.error("Cannot install cli alias: no $HOME environment variable detected.")
return
logger.info("Home folder is: %s", home_dir)
# Check if we have a .bashrc file in that folder:
bashrc_file = self.get_path(home_dir, ".bashrc")
if not self.file_exists(bashrc_file):
logger.warning("Cannot install cli alias: no .bashrc file in HOME folder.")
return
script_path = self.get_path(self.ctx.get_root_dir(), "cli.sh")
# If we are on windows, we may want to convert this path to a cygwin path
# if we are in a cygwin environment (but running the native python executable):
if self.is_windows:
script_path = self.to_cygwin_path(script_path)
assert script_path is not None, "Invalid cygwin environment."
sline = f"\n[ -f \"{script_path}\" ] && source \"{script_path}\"\n"
# Check if this string is already in the bashrc file:
content = self.read_text_file(bashrc_file)
if content.find(sline) == -1:
# We should add the string:
logger.info("Adding source file in .bashrc for NervProj")
# Make a backup of the file:
self.copy_file(bashrc_file, bashrc_file+".bak", force=True)
self.write_text_file(content+sline, bashrc_file, newline='\n')
else:
logger.info("NervProj setup file already referenced in .bashrc")
# pp = pprint.PrettyPrinter(indent=2)
# res = pp.pformat(dict(os.environ))
# logger.info("Current environment is: %s", res)
def install_python_requirements(self):
"""Install the requirements for the main python environment using pip"""
logger.info("Installing python requirements...")
reqfile = self.get_path(self.ctx.get_root_dir(), "tools/requirements.txt")
cmd = [sys.executable, "-m", "pip", "install", "-r", reqfile]
# logger.info("Executing command: %s", cmd)
self.execute(cmd)
logger.info("Done installing python requirements.")
def install_repository_bootstrap(self):
"""Install the bootstraped repository for this NervProj folder if not present already."""
base_dir = self.ctx.get_root_dir()
if self.dir_exists(base_dir, ".git"):
logger.info(".git folder already exists, bootstrapping ignored.")
return
# We need to bootstrap in a temp folder:
git = self.get_component('git')
url = self.config["repository_url"]
dest_dir = self.get_path(base_dir, "temp", "nervproj")
logger.info("Cloning NervProj folder into %s...", dest_dir)
git.clone_repository(url, dest_dir)
# When cloning is done we should move the .git folder from the clone location into our root
self.move_path(self.get_path(dest_dir, ".git"), self.get_path(base_dir, ".git"))
# And finally we remove the remaining files:
self.remove_folder(dest_dir)
logger.info("Done bootstrapping NervProj project.")
def setup_global_vscode_config(self, config_dir=None):
"""Setup global Visual studio code user settings"""
if config_dir is None:
# * on windows: in C:/Users/kenshin/AppData/Roaming/Code/User/settings.json
# => should use os.getenv('APPDATA')
# * on linux: in /home/kenshin/.config/Code/User/settings.json
if self.is_windows:
base_dir = os.getenv("APPDATA")
else:
base_dir = self.get_path(self.ctx.get_home_dir(), ".config")
config_dir = self.get_path(base_dir, "Code", "User")
cfg_file = self.get_path(config_dir, "settings.json")
config = {}
ref_config = None
if not self.file_exists(cfg_file):
# Ensure the folder exists:
self.make_folder(config_dir)
else:
# Read the config:
config = self.read_json(cfg_file)
# Keep a copy to compare the changes:
ref_config = self.read_json(cfg_file)
# Now write the changes we want:
tools = self.get_component('tools')
config["git.path"] = tools.get_git_path()
config["python.linting.pylintEnabled"] = True
config["python.linting.enabled"] = True
config["python.linting.pylintPath"] = tools.get_tool_path('pylint')
config["python.linting.pylintArgs"] = [
"--max-line-length=120",
"--good-names=i,j,k,ex,Run,_,x,y,z,w,t,dt",
"--good-names-rgxs=[a-z][0-9]$"]
config["python.defaultInterpreterPath"] = tools.get_tool_path('python')
config["python.formatting.autopep8Path"] = tools.get_tool_path("autopep8")
config["python.formatting.provider"] = "autopep8"
config["python.formatting.autopep8Args"] = ["--max-line-length=120", "--experimental"]
config["editor.formatOnSave"] = True
config["cmakeFormat.exePath"] = tools.get_tool_path("cmake_format")
if ref_config is None or config != ref_config:
logger.info("Wrtting updated vscode settings in %s", cfg_file)
self.write_json(config, cfg_file)
else:
logger.info("No change in %s", cfg_file)
def init_project_config(self, proj_dir, proj_name):
"""Setup initial project local config elements"""
config_dir = self.get_path(proj_dir, ".vscode")
cfg_file = self.get_path(config_dir, "settings.template.json")
self.make_folder(config_dir)
config = {}
ref_config = None
# Check if we should provide a python environment in this project:
with_py = self.get_param("with_py_env", False)
if with_py:
logger.info("Setting up dedicated python env for %s", proj_name)
if self.file_exists(cfg_file):
# Read the config:
config = self.read_json(cfg_file)
# Keep a copy to compare the changes:
ref_config = self.read_json(cfg_file)
config["python.envFile"] = "${workspaceFolder}/.vs_env"
ignore_elems = []
if with_py:
# We deploy the python packages:
dest_dir = self.get_path(proj_dir, "tools", "packages")
self.make_folder(dest_dir)
# get the python version on windows:
py_vers = {}
sevenzip_vers = {}
for plat_name in ["windows", "linux"]:
for el in self.config[f'{plat_name}_tools']:
if el["name"] == 'python':
py_vers[plat_name] = el["version"]
if el["name"] == '7zip':
sevenzip_vers[plat_name] = el["version"]
for plat_name, py_version in py_vers.items():
for ext in [".7z", ".tar.xz"]:
file_name = f"python-{py_version}-{plat_name}{ext}"
src_file = self.get_path(self.ctx.get_root_dir(), "tools", "packages", file_name)
dst_file = self.get_path(dest_dir, file_name)
if self.file_exists(src_file) and not self.file_exists(dst_file):
logger.info("Adding package file %s", dst_file)
self.copy_file(src_file, dst_file)
# more updates to vscode settings if we have a dedicated python env:
cur_py_vers = py_vers[self.platform]
ext = ".exe" if self.is_windows else ""
config["python.linting.pylintEnabled"] = True
config["python.linting.enabled"] = True
config["python.linting.pylintPath"] = f"${{workspaceFolder}}/tools/{self.platform}/python-{cur_py_vers}/Scripts/pylint{ext}"
config["python.linting.pylintArgs"] = ["--max-line-length=120"]
config["python.defaultInterpreterPath"] = f"${{workspaceFolder}}/tools/{self.platform}/python-{cur_py_vers}/python{ext}"
config["python.formatting.autopep8Path"] = f"${{workspaceFolder}}/tools/{self.platform}/python-{cur_py_vers}/Scripts/autopep8{ext}"
config["python.formatting.provider"] = "autopep8"
config["python.formatting.autopep8Args"] = ["--max-line-length=120", "--experimental"]
# Next, for the windows part we need to deploy the 7zip package too:
folder_name = f"7zip-{sevenzip_vers['windows']}"
src_folder = self.get_path(self.ctx.get_root_dir(), "tools", "windows", folder_name)
dst_folder = self.get_path(proj_dir, "tools", "windows", folder_name)
if not self.dir_exists(dst_folder):
logger.info("Adding windows 7zip package at %s", dst_folder)
self.copy_folder(src_folder, dst_folder)
# Update the ignore elements:
ignore_elems += ["",
"# Ignore all the windows tools except the 7zip folder:",
"tools/windows/*",
"!tools/windows/7zip-*",
"tools/linux/*"]
# Should also install an requirements.txt file:
dest_file = self.get_path(proj_dir, "tools", "requirements.txt")
if not self.file_exists(dest_file):
logger.info("Installing pythong requirements file.")
content = ["# List here all the required python packages",
"# Then call cli.{sh/bat} --install-py-reqs",
"",
"pylint",
"autopep8",
""]
content = "\n".join(content)
self.write_text_file(content, dest_file)
# Should install the cli script files:
dest_file = self.get_path(proj_dir, "cli.py")
if not self.file_exists(dest_file):
logger.info("Writting cli python file %s", dest_file)
content = DEFAULT_CLI_PY_CONTENT
self.write_text_file(content, dest_file)
dest_file = self.get_path(proj_dir, "cli.sh")
if not self.file_exists(dest_file):
logger.info("Writting cli shell file %s", dest_file)
content = DEFAULT_CLI_SH_CONTENT
content = content.replace("${PROJ_NAME}", proj_name.lower())
# Use the linux python version below:
content = content.replace("${PY_VERSION}", py_vers['linux'])
self.write_text_file(content, dest_file, newline="\n")
dest_file = self.get_path(proj_dir, "cli.bat")
if not self.file_exists(dest_file):
logger.info("Writting cli batch file %s", dest_file)
content = DEFAULT_CLI_BAT_CONTENT
content = content.replace("${PROJ_NAME}", proj_name.upper())
# Use the windows versionq below:
content = content.replace("${PY_VERSION}", py_vers['windows'])
content = content.replace("${ZIP_VERSION}", sevenzip_vers['windows'])
self.write_text_file(content, dest_file)
# Finish writting the vscode config:
if ref_config is None or config != ref_config:
logger.info("Wrtting updated vscode settings in %s", cfg_file)
self.write_json(config, cfg_file)
else:
logger.info("No change in %s", cfg_file)
# Also copy to actuall settings if we don't have the file yet:
cfg_file2 = self.get_path(config_dir, "settings.json")
if not self.file_exists(cfg_file2):
logger.info("Copyging VSCode settings template to %s", cfg_file2)
self.copy_file(cfg_file, cfg_file2)
# Write the env file if needed:
dest_file = self.get_path(proj_dir, ".vs_env")
if not self.file_exists(dest_file):
logger.info("Writting python env file %s", dest_file)
content = DEFAULT_PYTHONENV_CONTENT
sep = ";" if self.is_windows else ":"
content = content.replace("${NVP_ROOT_DIR}", "" if with_py else self.ctx.get_root_dir())
content = content.replace("${SEP}", "" if with_py else sep)
self.write_text_file(content, dest_file)
# and write a .editorconfig file:
dest_file = self.get_path(proj_dir, ".editorconfig")
if not self.file_exists(dest_file):
logger.info("Writting editor config file %s", dest_file)
content = DEFAULT_EDITORCONFIG_CONTENT
self.write_text_file(content, dest_file)
# and write a .gitignore file:
dest_file = self.get_path(proj_dir, ".gitignore")
if not self.file_exists(dest_file):
logger.info("Writting .gitignore file %s", dest_file)
content = DEFAULT_GITIGNORE_CONTENT
content += "\n".join(ignore_elems)
content += "\n"
self.write_text_file(content, dest_file)
# and write a .gitattributes file:
dest_file = self.get_path(proj_dir, ".gitattributes")
if not self.file_exists(dest_file):
logger.info("Writting .gitattributes file %s", dest_file)
content = DEFAULT_GITATTRIBUTES_CONTENT
self.write_text_file(content, dest_file)
# write a nvp_config.json file:
dest_file = self.get_path(proj_dir, "nvp_config.json")
if not self.file_exists(dest_file):
logger.info("Writting nvp_config.json file %s", dest_file)
content = DEFAULT_NVPCONFIG_CONTENT
self.write_text_file(content, dest_file)
# write a nvp_plug.py file:
dest_file = self.get_path(proj_dir, "nvp_plug.py")
if not self.file_exists(dest_file):
logger.info("Writting nvp_plug.py file %s", dest_file)
content = DEFAULT_NVPPLUG_CONTENT.replace("${PROJ_NAME}", proj_name)
self.write_text_file(content, dest_file)
# Add pull rebase = false to .git/config
cfg_file = self.get_path(proj_dir, ".git", "config")
assert self.file_exists(cfg_file), f"Cannot fine git config file at {cfg_file}"
# Load that config:
config = self.read_ini(cfg_file)
save_needed = False
if 'pull' not in config:
logger.info("Adding pull section in git config.")
config['pull'] = {
"rebase": "false",
}
save_needed = True
else:
pull = config['pull']
if pull['rebase'] != 'false':
logger.info("Updating git pull rebase from %s to %s", pull['rebase'], 'false')
pull['rebase'] = 'false'
save_needed = True
if save_needed:
self.write_ini(config, cfg_file)
def process_command(self, cmd0):
"""Re-implementation of the process_command method."""
if cmd0 != 'admin':
return False
cmd1 = self.ctx.get_command(1)
cmd2 = self.ctx.get_command(2)
if cmd1 == 'install' and cmd2 == 'cli':
self.install_cli()
return True
if cmd1 == 'install' and cmd2 == 'reqs':
self.install_python_requirements()
return True
if cmd1 == 'install' and cmd2 == 'repo':
self.install_repository_bootstrap()
return True
if cmd1 == 'init':
self.setup_global_vscode_config()
proj = self.ctx.get_current_project()
proj_dir = proj.get_root_dir() if proj is not None else self.ctx.get_root_dir()
proj_name = proj.get_name(False) if proj is not None else "NervProj"
self.init_project_config(proj_dir, proj_name)
return True
return False
|
import dataclasses
import enum
import inspect
import json
import struct
import sys
import typing
from abc import ABC
from base64 import b64decode, b64encode
from datetime import datetime, timedelta, timezone
from dateutil.parser import isoparse
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Optional,
Set,
Tuple,
Type,
Union,
get_type_hints,
)
from ._types import T
from .casing import camel_case, safe_snake_case, snake_case
from .grpc.grpclib_client import ServiceStub
# Proto 3 data types
TYPE_ENUM = "enum"
TYPE_BOOL = "bool"
TYPE_INT32 = "int32"
TYPE_INT64 = "int64"
TYPE_UINT32 = "uint32"
TYPE_UINT64 = "uint64"
TYPE_SINT32 = "sint32"
TYPE_SINT64 = "sint64"
TYPE_FLOAT = "float"
TYPE_DOUBLE = "double"
TYPE_FIXED32 = "fixed32"
TYPE_SFIXED32 = "sfixed32"
TYPE_FIXED64 = "fixed64"
TYPE_SFIXED64 = "sfixed64"
TYPE_STRING = "string"
TYPE_BYTES = "bytes"
TYPE_MESSAGE = "message"
TYPE_MAP = "map"
# Fields that use a fixed amount of space (4 or 8 bytes)
FIXED_TYPES = [
TYPE_FLOAT,
TYPE_DOUBLE,
TYPE_FIXED32,
TYPE_SFIXED32,
TYPE_FIXED64,
TYPE_SFIXED64,
]
# Fields that are numerical 64-bit types
INT_64_TYPES = [TYPE_INT64, TYPE_UINT64, TYPE_SINT64, TYPE_FIXED64, TYPE_SFIXED64]
# Fields that are efficiently packed when
PACKED_TYPES = [
TYPE_ENUM,
TYPE_BOOL,
TYPE_INT32,
TYPE_INT64,
TYPE_UINT32,
TYPE_UINT64,
TYPE_SINT32,
TYPE_SINT64,
TYPE_FLOAT,
TYPE_DOUBLE,
TYPE_FIXED32,
TYPE_SFIXED32,
TYPE_FIXED64,
TYPE_SFIXED64,
]
# Wire types
# https://developers.google.com/protocol-buffers/docs/encoding#structure
WIRE_VARINT = 0
WIRE_FIXED_64 = 1
WIRE_LEN_DELIM = 2
WIRE_FIXED_32 = 5
# Mappings of which Proto 3 types correspond to which wire types.
WIRE_VARINT_TYPES = [
TYPE_ENUM,
TYPE_BOOL,
TYPE_INT32,
TYPE_INT64,
TYPE_UINT32,
TYPE_UINT64,
TYPE_SINT32,
TYPE_SINT64,
]
WIRE_FIXED_32_TYPES = [TYPE_FLOAT, TYPE_FIXED32, TYPE_SFIXED32]
WIRE_FIXED_64_TYPES = [TYPE_DOUBLE, TYPE_FIXED64, TYPE_SFIXED64]
WIRE_LEN_DELIM_TYPES = [TYPE_STRING, TYPE_BYTES, TYPE_MESSAGE, TYPE_MAP]
# Protobuf datetimes start at the Unix Epoch in 1970 in UTC.
def datetime_default_gen() -> datetime:
return datetime(1970, 1, 1, tzinfo=timezone.utc)
DATETIME_ZERO = datetime_default_gen()
class Casing(enum.Enum):
"""Casing constants for serialization."""
CAMEL = camel_case #: A camelCase sterilization function.
SNAKE = snake_case #: A snake_case sterilization function.
PLACEHOLDER: Any = object()
@dataclasses.dataclass(frozen=True)
class FieldMetadata:
"""Stores internal metadata used for parsing & serialization."""
# Protobuf field number
number: int
# Protobuf type name
proto_type: str
# Map information if the proto_type is a map
map_types: Optional[Tuple[str, str]] = None
# Groups several "one-of" fields together
group: Optional[str] = None
# Describes the wrapped type (e.g. when using google.protobuf.BoolValue)
wraps: Optional[str] = None
@staticmethod
def get(field: dataclasses.Field) -> "FieldMetadata":
"""Returns the field metadata for a dataclass field."""
return field.metadata["betterproto"]
def dataclass_field(
number: int,
proto_type: str,
*,
map_types: Optional[Tuple[str, str]] = None,
group: Optional[str] = None,
wraps: Optional[str] = None,
) -> dataclasses.Field:
"""Creates a dataclass field with attached protobuf metadata."""
return dataclasses.field(
default=PLACEHOLDER,
metadata={
"betterproto": FieldMetadata(number, proto_type, map_types, group, wraps)
},
)
# Note: the fields below return `Any` to prevent type errors in the generated
# data classes since the types won't match with `Field` and they get swapped
# out at runtime. The generated dataclass variables are still typed correctly.
def enum_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_ENUM, group=group)
def bool_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_BOOL, group=group)
def int32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_INT32, group=group)
def int64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_INT64, group=group)
def uint32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_UINT32, group=group)
def uint64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_UINT64, group=group)
def sint32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SINT32, group=group)
def sint64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SINT64, group=group)
def float_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_FLOAT, group=group)
def double_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_DOUBLE, group=group)
def fixed32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_FIXED32, group=group)
def fixed64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_FIXED64, group=group)
def sfixed32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SFIXED32, group=group)
def sfixed64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SFIXED64, group=group)
def string_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_STRING, group=group)
def bytes_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_BYTES, group=group)
def message_field(
number: int, group: Optional[str] = None, wraps: Optional[str] = None
) -> Any:
return dataclass_field(number, TYPE_MESSAGE, group=group, wraps=wraps)
def map_field(
number: int, key_type: str, value_type: str, group: Optional[str] = None
) -> Any:
return dataclass_field(
number, TYPE_MAP, map_types=(key_type, value_type), group=group
)
class Enum(enum.IntEnum):
"""
The base class for protobuf enumerations, all generated enumerations will inherit
from this. Bases :class:`enum.IntEnum`.
"""
@classmethod
def from_string(cls, name: str) -> "Enum":
"""Return the value which corresponds to the string name.
Parameters
-----------
name: :class:`str`
The name of the enum member to get
Raises
-------
:exc:`ValueError`
The member was not found in the Enum.
"""
try:
return cls._member_map_[name]
except KeyError as e:
raise ValueError(f"Unknown value {name} for enum {cls.__name__}") from e
def _pack_fmt(proto_type: str) -> str:
"""Returns a little-endian format string for reading/writing binary."""
return {
TYPE_DOUBLE: "<d",
TYPE_FLOAT: "<f",
TYPE_FIXED32: "<I",
TYPE_FIXED64: "<Q",
TYPE_SFIXED32: "<i",
TYPE_SFIXED64: "<q",
}[proto_type]
def encode_varint(value: int) -> bytes:
"""Encodes a single varint value for serialization."""
b: List[int] = []
if value < 0:
value += 1 << 64
bits = value & 0x7F
value >>= 7
while value:
b.append(0x80 | bits)
bits = value & 0x7F
value >>= 7
return bytes(b + [bits])
def _preprocess_single(proto_type: str, wraps: str, value: Any) -> bytes:
"""Adjusts values before serialization."""
if proto_type in [
TYPE_ENUM,
TYPE_BOOL,
TYPE_INT32,
TYPE_INT64,
TYPE_UINT32,
TYPE_UINT64,
]:
return encode_varint(value)
elif proto_type in [TYPE_SINT32, TYPE_SINT64]:
# Handle zig-zag encoding.
return encode_varint(value << 1 if value >= 0 else (value << 1) ^ (~0))
elif proto_type in FIXED_TYPES:
return struct.pack(_pack_fmt(proto_type), value)
elif proto_type == TYPE_STRING:
return value.encode("utf-8")
elif proto_type == TYPE_MESSAGE:
if isinstance(value, datetime):
# Convert the `datetime` to a timestamp message.
seconds = int(value.timestamp())
nanos = int(value.microsecond * 1e3)
value = _Timestamp(seconds=seconds, nanos=nanos)
elif isinstance(value, timedelta):
# Convert the `timedelta` to a duration message.
total_ms = value // timedelta(microseconds=1)
seconds = int(total_ms / 1e6)
nanos = int((total_ms % 1e6) * 1e3)
value = _Duration(seconds=seconds, nanos=nanos)
elif wraps:
if value is None:
return b""
value = _get_wrapper(wraps)(value=value)
return bytes(value)
return value
def _serialize_single(
field_number: int,
proto_type: str,
value: Any,
*,
serialize_empty: bool = False,
wraps: str = "",
) -> bytes:
"""Serializes a single field and value."""
value = _preprocess_single(proto_type, wraps, value)
output = bytearray()
if proto_type in WIRE_VARINT_TYPES:
key = encode_varint(field_number << 3)
output += key + value
elif proto_type in WIRE_FIXED_32_TYPES:
key = encode_varint((field_number << 3) | 5)
output += key + value
elif proto_type in WIRE_FIXED_64_TYPES:
key = encode_varint((field_number << 3) | 1)
output += key + value
elif proto_type in WIRE_LEN_DELIM_TYPES:
if len(value) or serialize_empty or wraps:
key = encode_varint((field_number << 3) | 2)
output += key + encode_varint(len(value)) + value
else:
raise NotImplementedError(proto_type)
return bytes(output)
def decode_varint(buffer: bytes, pos: int) -> Tuple[int, int]:
"""
Decode a single varint value from a byte buffer. Returns the value and the
new position in the buffer.
"""
result = 0
shift = 0
while 1:
b = buffer[pos]
result |= (b & 0x7F) << shift
pos += 1
if not (b & 0x80):
return result, pos
shift += 7
if shift >= 64:
raise ValueError("Too many bytes when decoding varint.")
@dataclasses.dataclass(frozen=True)
class ParsedField:
number: int
wire_type: int
value: Any
raw: bytes
def parse_fields(value: bytes) -> Generator[ParsedField, None, None]:
i = 0
while i < len(value):
start = i
num_wire, i = decode_varint(value, i)
number = num_wire >> 3
wire_type = num_wire & 0x7
decoded: Any = None
if wire_type == WIRE_VARINT:
decoded, i = decode_varint(value, i)
elif wire_type == WIRE_FIXED_64:
decoded, i = value[i : i + 8], i + 8
elif wire_type == WIRE_LEN_DELIM:
length, i = decode_varint(value, i)
decoded = value[i : i + length]
i += length
elif wire_type == WIRE_FIXED_32:
decoded, i = value[i : i + 4], i + 4
yield ParsedField(
number=number, wire_type=wire_type, value=decoded, raw=value[start:i]
)
class ProtoClassMetadata:
__slots__ = (
"oneof_group_by_field",
"oneof_field_by_group",
"default_gen",
"cls_by_field",
"field_name_by_number",
"meta_by_field_name",
"sorted_field_names",
)
oneof_group_by_field: Dict[str, str]
oneof_field_by_group: Dict[str, Set[dataclasses.Field]]
field_name_by_number: Dict[int, str]
meta_by_field_name: Dict[str, FieldMetadata]
sorted_field_names: Tuple[str, ...]
default_gen: Dict[str, Callable[[], Any]]
cls_by_field: Dict[str, Type]
def __init__(self, cls: Type["Message"]):
by_field = {}
by_group: Dict[str, Set] = {}
by_field_name = {}
by_field_number = {}
fields = dataclasses.fields(cls)
for field in fields:
meta = FieldMetadata.get(field)
if meta.group:
# This is part of a one-of group.
by_field[field.name] = meta.group
by_group.setdefault(meta.group, set()).add(field)
by_field_name[field.name] = meta
by_field_number[meta.number] = field.name
self.oneof_group_by_field = by_field
self.oneof_field_by_group = by_group
self.field_name_by_number = by_field_number
self.meta_by_field_name = by_field_name
self.sorted_field_names = tuple(
by_field_number[number] for number in sorted(by_field_number)
)
self.default_gen = self._get_default_gen(cls, fields)
self.cls_by_field = self._get_cls_by_field(cls, fields)
@staticmethod
def _get_default_gen(
cls: Type["Message"], fields: List[dataclasses.Field]
) -> Dict[str, Callable[[], Any]]:
return {field.name: cls._get_field_default_gen(field) for field in fields}
@staticmethod
def _get_cls_by_field(
cls: Type["Message"], fields: List[dataclasses.Field]
) -> Dict[str, Type]:
field_cls = {}
for field in fields:
meta = FieldMetadata.get(field)
if meta.proto_type == TYPE_MAP:
assert meta.map_types
kt = cls._cls_for(field, index=0)
vt = cls._cls_for(field, index=1)
field_cls[field.name] = dataclasses.make_dataclass(
"Entry",
[
("key", kt, dataclass_field(1, meta.map_types[0])),
("value", vt, dataclass_field(2, meta.map_types[1])),
],
bases=(Message,),
)
field_cls[f"{field.name}.value"] = vt
else:
field_cls[field.name] = cls._cls_for(field)
return field_cls
class Message(ABC):
"""
The base class for protobuf messages, all generated messages will inherit from
this. This class registers the message fields which are used by the serializers and
parsers to go between the Python, binary and JSON representations of the message.
.. container:: operations
.. describe:: bytes(x)
Calls :meth:`__bytes__`.
.. describe:: bool(x)
Calls :meth:`__bool__`.
"""
_serialized_on_wire: bool
_unknown_fields: bytes
_group_current: Dict[str, str]
def __post_init__(self) -> None:
# Keep track of whether every field was default
all_sentinel = True
# Set current field of each group after `__init__` has already been run.
group_current: Dict[str, Optional[str]] = {}
for field_name, meta in self._betterproto.meta_by_field_name.items():
if meta.group:
group_current.setdefault(meta.group)
if self.__raw_get(field_name) != PLACEHOLDER:
# Found a non-sentinel value
all_sentinel = False
if meta.group:
# This was set, so make it the selected value of the one-of.
group_current[meta.group] = field_name
# Now that all the defaults are set, reset it!
self.__dict__["_serialized_on_wire"] = not all_sentinel
self.__dict__["_unknown_fields"] = b""
self.__dict__["_group_current"] = group_current
def __raw_get(self, name: str) -> Any:
return super().__getattribute__(name)
def __eq__(self, other) -> bool:
if type(self) is not type(other):
return False
for field_name in self._betterproto.meta_by_field_name:
self_val = self.__raw_get(field_name)
other_val = other.__raw_get(field_name)
if self_val is PLACEHOLDER:
if other_val is PLACEHOLDER:
continue
self_val = self._get_field_default(field_name)
elif other_val is PLACEHOLDER:
other_val = other._get_field_default(field_name)
if self_val != other_val:
return False
return True
def __repr__(self) -> str:
parts = [
f"{field_name}={value!r}"
for field_name in self._betterproto.sorted_field_names
for value in (self.__raw_get(field_name),)
if value is not PLACEHOLDER
]
return f"{self.__class__.__name__}({", ".join(parts)})"
def __getattribute__(self, name: str) -> Any:
"""
Lazily initialize default values to avoid infinite recursion for recursive
message types
"""
value = super().__getattribute__(name)
if value is not PLACEHOLDER:
return value
value = self._get_field_default(name)
super().__setattr__(name, value)
return value
def __setattr__(self, attr: str, value: Any) -> None:
if attr != "_serialized_on_wire":
# Track when a field has been set.
self.__dict__["_serialized_on_wire"] = True
if hasattr(self, "_group_current"): # __post_init__ had already run
if attr in self._betterproto.oneof_group_by_field:
group = self._betterproto.oneof_group_by_field[attr]
for field in self._betterproto.oneof_field_by_group[group]:
if field.name == attr:
self._group_current[group] = field.name
else:
super().__setattr__(field.name, PLACEHOLDER)
super().__setattr__(attr, value)
def __bool__(self) -> bool:
"""True if the Message has any fields with non-default values."""
return any(
self.__raw_get(field_name)
not in (PLACEHOLDER, self._get_field_default(field_name))
for field_name in self._betterproto.meta_by_field_name
)
@property
def _betterproto(self) -> ProtoClassMetadata:
"""
Lazy initialize metadata for each protobuf class.
It may be initialized multiple times in a multi-threaded environment,
but that won't affect the correctness.
"""
meta = getattr(self.__class__, "_betterproto_meta", None)
if not meta:
meta = ProtoClassMetadata(self.__class__)
self.__class__._betterproto_meta = meta
return meta
def __bytes__(self) -> bytes:
"""
Get the binary encoded Protobuf representation of this message instance.
"""
output = bytearray()
for field_name, meta in self._betterproto.meta_by_field_name.items():
value = getattr(self, field_name)
if value is None:
# Optional items should be skipped. This is used for the Google
# wrapper types.
continue
# Being selected in a a group means this field is the one that is
# currently set in a `oneof` group, so it must be serialized even
# if the value is the default zero value.
selected_in_group = (
meta.group and self._group_current[meta.group] == field_name
)
# Empty messages can still be sent on the wire if they were
# set (or received empty).
serialize_empty = isinstance(value, Message) and value._serialized_on_wire
include_default_value_for_oneof = self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
if value == self._get_field_default(field_name) and not (
selected_in_group or serialize_empty or include_default_value_for_oneof
):
# Default (zero) values are not serialized. Two exceptions are
# if this is the selected oneof item or if we know we have to
# serialize an empty message (i.e. zero value was explicitly
# set by the user).
continue
if isinstance(value, list):
if meta.proto_type in PACKED_TYPES:
# Packed lists look like a length-delimited field. First,
# preprocess/encode each value into a buffer and then
# treat it like a field of raw bytes.
buf = bytearray()
for item in value:
buf += _preprocess_single(meta.proto_type, "", item)
output += _serialize_single(meta.number, TYPE_BYTES, buf)
else:
for item in value:
output += _serialize_single(
meta.number, meta.proto_type, item, wraps=meta.wraps or ""
)
elif isinstance(value, dict):
for k, v in value.items():
assert meta.map_types
sk = _serialize_single(1, meta.map_types[0], k)
sv = _serialize_single(2, meta.map_types[1], v)
output += _serialize_single(meta.number, meta.proto_type, sk + sv)
else:
# If we have an empty string and we're including the default value for
# a oneof, make sure we serialize it. This ensures that the byte string
# output isn't simply an empty string. This also ensures that round trip
# serialization will keep `which_one_of` calls consistent.
if (
isinstance(value, str)
and value == ""
and include_default_value_for_oneof
):
serialize_empty = True
output += _serialize_single(
meta.number,
meta.proto_type,
value,
serialize_empty=serialize_empty,
wraps=meta.wraps or "",
)
output += self._unknown_fields
return bytes(output)
# For compatibility with other libraries
def SerializeToString(self: T) -> bytes:
"""
Get the binary encoded Protobuf representation of this message instance.
.. note::
This is a method for compatibility with other libraries,
you should really use ``bytes(x)``.
Returns
--------
:class:`bytes`
The binary encoded Protobuf representation of this message instance
"""
return bytes(self)
@classmethod
def _type_hint(cls, field_name: str) -> Type:
return cls._type_hints()[field_name]
@classmethod
def _type_hints(cls) -> Dict[str, Type]:
module = sys.modules[cls.__module__]
return get_type_hints(cls, vars(module))
@classmethod
def _cls_for(cls, field: dataclasses.Field, index: int = 0) -> Type:
"""Get the message class for a field from the type hints."""
field_cls = cls._type_hint(field.name)
if hasattr(field_cls, "__args__") and index >= 0:
if field_cls.__args__ is not None:
field_cls = field_cls.__args__[index]
return field_cls
def _get_field_default(self, field_name: str) -> Any:
return self._betterproto.default_gen[field_name]()
@classmethod
def _get_field_default_gen(cls, field: dataclasses.Field) -> Any:
t = cls._type_hint(field.name)
if hasattr(t, "__origin__"):
if t.__origin__ in (dict, Dict):
# This is some kind of map (dict in Python).
return dict
elif t.__origin__ in (list, List):
# This is some kind of list (repeated) field.
return list
elif t.__origin__ is Union and t.__args__[1] is type(None):
# This is an optional (wrapped) field. For setting the default we
# really don't care what kind of field it is.
return type(None)
else:
return t
elif issubclass(t, Enum):
# Enums always default to zero.
return int
elif t is datetime:
# Offsets are relative to 1970-01-01T00:00:00Z
return datetime_default_gen
else:
# This is either a primitive scalar or another message type. Calling
# it should result in its zero value.
return t
def _postprocess_single(
self, wire_type: int, meta: FieldMetadata, field_name: str, value: Any
) -> Any:
"""Adjusts values after parsing."""
if wire_type == WIRE_VARINT:
if meta.proto_type in [TYPE_INT32, TYPE_INT64]:
bits = int(meta.proto_type[3:])
value = value & ((1 << bits) - 1)
signbit = 1 << (bits - 1)
value = int((value ^ signbit) - signbit)
elif meta.proto_type in [TYPE_SINT32, TYPE_SINT64]:
# Undo zig-zag encoding
value = (value >> 1) ^ (-(value & 1))
elif meta.proto_type == TYPE_BOOL:
# Booleans use a varint encoding, so convert it to true/false.
value = value > 0
elif wire_type in [WIRE_FIXED_32, WIRE_FIXED_64]:
fmt = _pack_fmt(meta.proto_type)
value = struct.unpack(fmt, value)[0]
elif wire_type == WIRE_LEN_DELIM:
if meta.proto_type == TYPE_STRING:
value = value.decode("utf-8")
elif meta.proto_type == TYPE_MESSAGE:
cls = self._betterproto.cls_by_field[field_name]
if cls == datetime:
value = _Timestamp().parse(value).to_datetime()
elif cls == timedelta:
value = _Duration().parse(value).to_timedelta()
elif meta.wraps:
# This is a Google wrapper value message around a single
# scalar type.
value = _get_wrapper(meta.wraps)().parse(value).value
else:
value = cls().parse(value)
value._serialized_on_wire = True
elif meta.proto_type == TYPE_MAP:
value = self._betterproto.cls_by_field[field_name]().parse(value)
return value
def _include_default_value_for_oneof(
self, field_name: str, meta: FieldMetadata
) -> bool:
return (
meta.group is not None and self._group_current.get(meta.group) == field_name
)
def parse(self: T, data: bytes) -> T:
"""
Parse the binary encoded Protobuf into this message instance. This
returns the instance itself and is therefore assignable and chainable.
Parameters
-----------
data: :class:`bytes`
The data to parse the protobuf from.
Returns
--------
:class:`Message`
The initialized message.
"""
# Got some data over the wire
self._serialized_on_wire = True
proto_meta = self._betterproto
for parsed in parse_fields(data):
field_name = proto_meta.field_name_by_number.get(parsed.number)
if not field_name:
self._unknown_fields += parsed.raw
continue
meta = proto_meta.meta_by_field_name[field_name]
value: Any
if parsed.wire_type == WIRE_LEN_DELIM and meta.proto_type in PACKED_TYPES:
# This is a packed repeated field.
pos = 0
value = []
while pos < len(parsed.value):
if meta.proto_type in [TYPE_FLOAT, TYPE_FIXED32, TYPE_SFIXED32]:
decoded, pos = parsed.value[pos : pos + 4], pos + 4
wire_type = WIRE_FIXED_32
elif meta.proto_type in [TYPE_DOUBLE, TYPE_FIXED64, TYPE_SFIXED64]:
decoded, pos = parsed.value[pos : pos + 8], pos + 8
wire_type = WIRE_FIXED_64
else:
decoded, pos = decode_varint(parsed.value, pos)
wire_type = WIRE_VARINT
decoded = self._postprocess_single(
wire_type, meta, field_name, decoded
)
value.append(decoded)
else:
value = self._postprocess_single(
parsed.wire_type, meta, field_name, parsed.value
)
current = getattr(self, field_name)
if meta.proto_type == TYPE_MAP:
# Value represents a single key/value pair entry in the map.
current[value.key] = value.value
elif isinstance(current, list) and not isinstance(value, list):
current.append(value)
else:
setattr(self, field_name, value)
return self
# For compatibility with other libraries.
@classmethod
def FromString(cls: Type[T], data: bytes) -> T:
"""
Parse the binary encoded Protobuf into this message instance. This
returns the instance itself and is therefore assignable and chainable.
.. note::
This is a method for compatibility with other libraries,
you should really use :meth:`parse`.
Parameters
-----------
data: :class:`bytes`
The data to parse the protobuf from.
Returns
--------
:class:`Message`
The initialized message.
"""
return cls().parse(data)
def to_dict(
self, casing: Casing = Casing.CAMEL, include_default_values: bool = False
) -> Dict[str, Any]:
"""
Returns a JSON serializable dict representation of this object.
Parameters
-----------
casing: :class:`Casing`
The casing to use for key values. Default is :attr:`Casing.CAMEL` for
compatibility purposes.
include_default_values: :class:`bool`
If ``True`` will include the default values of fields. Default is ``False``.
E.g. an ``int32`` field will be included with a value of ``0`` if this is
set to ``True``, otherwise this would be ignored.
Returns
--------
Dict[:class:`str`, Any]
The JSON serializable dict representation of this object.
"""
output: Dict[str, Any] = {}
field_types = self._type_hints()
defaults = self._betterproto.default_gen
for field_name, meta in self._betterproto.meta_by_field_name.items():
field_is_repeated = defaults[field_name] is list
value = getattr(self, field_name)
cased_name = casing(field_name).rstrip("_") # type: ignore
if meta.proto_type == TYPE_MESSAGE:
if isinstance(value, datetime):
if (
value != DATETIME_ZERO
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = _Timestamp.timestamp_to_json(value)
elif isinstance(value, timedelta):
if (
value != timedelta(0)
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = _Duration.delta_to_json(value)
elif meta.wraps:
if value is not None or include_default_values:
output[cased_name] = value
elif field_is_repeated:
# Convert each item.
cls = self._betterproto.cls_by_field[field_name]
if cls == datetime:
value = [_Timestamp.timestamp_to_json(i) for i in value]
elif cls == timedelta:
value = [_Duration.delta_to_json(i) for i in value]
else:
value = [
i.to_dict(casing, include_default_values) for i in value
]
if value or include_default_values:
output[cased_name] = value
elif (
value._serialized_on_wire
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = value.to_dict(casing, include_default_values)
elif meta.proto_type == TYPE_MAP:
for k in value:
if hasattr(value[k], "to_dict"):
value[k] = value[k].to_dict(casing, include_default_values)
if value or include_default_values:
output[cased_name] = value
elif (
value != self._get_field_default(field_name)
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
if meta.proto_type in INT_64_TYPES:
if field_is_repeated:
output[cased_name] = [str(n) for n in value]
else:
output[cased_name] = str(value)
elif meta.proto_type == TYPE_BYTES:
if field_is_repeated:
output[cased_name] = [
b64encode(b).decode("utf8") for b in value
]
else:
output[cased_name] = b64encode(value).decode("utf8")
elif meta.proto_type == TYPE_ENUM:
if field_is_repeated:
enum_class: Type[Enum] = field_types[field_name].__args__[0]
if isinstance(value, typing.Iterable) and not isinstance(
value, str
):
output[cased_name] = [enum_class(el).name for el in value]
else:
# transparently upgrade single value to repeated
output[cased_name] = [enum_class(value).name]
else:
enum_class: Type[Enum] = field_types[field_name] # noqa
output[cased_name] = enum_class(value).name
else:
output[cased_name] = value
return output
def from_dict(self: T, value: Dict[str, Any]) -> T:
"""
Parse the key/value pairs into the current message instance. This returns the
instance itself and is therefore assignable and chainable.
Parameters
-----------
value: Dict[:class:`str`, Any]
The dictionary to parse from.
Returns
--------
:class:`Message`
The initialized message.
"""
self._serialized_on_wire = True
for key in value:
field_name = safe_snake_case(key)
meta = self._betterproto.meta_by_field_name.get(field_name)
if not meta:
continue
if value[key] is not None:
if meta.proto_type == TYPE_MESSAGE:
v = getattr(self, field_name)
if isinstance(v, list):
cls = self._betterproto.cls_by_field[field_name]
if cls == datetime:
v = [isoparse(item) for item in value[key]]
elif cls == timedelta:
v = [
timedelta(seconds=float(item[:-1]))
for item in value[key]
]
else:
v = [cls().from_dict(item) for item in value[key]]
elif isinstance(v, datetime):
v = isoparse(value[key])
setattr(self, field_name, v)
elif isinstance(v, timedelta):
v = timedelta(seconds=float(value[key][:-1]))
setattr(self, field_name, v)
elif meta.wraps:
setattr(self, field_name, value[key])
else:
# NOTE: `from_dict` mutates the underlying message, so no
# assignment here is necessary.
v.from_dict(value[key])
elif meta.map_types and meta.map_types[1] == TYPE_MESSAGE:
v = getattr(self, field_name)
cls = self._betterproto.cls_by_field[f"{field_name}.value"]
for k in value[key]:
v[k] = cls().from_dict(value[key][k])
else:
v = value[key]
if meta.proto_type in INT_64_TYPES:
if isinstance(value[key], list):
v = [int(n) for n in value[key]]
else:
v = int(value[key])
elif meta.proto_type == TYPE_BYTES:
if isinstance(value[key], list):
v = [b64decode(n) for n in value[key]]
else:
v = b64decode(value[key])
elif meta.proto_type == TYPE_ENUM:
enum_cls = self._betterproto.cls_by_field[field_name]
if isinstance(v, list):
v = [enum_cls.from_string(e) for e in v]
elif isinstance(v, str):
v = enum_cls.from_string(v)
if v is not None:
setattr(self, field_name, v)
return self
def to_json(self, indent: Union[None, int, str] = None) -> str:
"""A helper function to parse the message instance into its JSON
representation.
This is equivalent to::
json.dumps(message.to_dict(), indent=indent)
Parameters
-----------
indent: Optional[Union[:class:`int`, :class:`str`]]
The indent to pass to :func:`json.dumps`.
Returns
--------
:class:`str`
The JSON representation of the message.
"""
return json.dumps(self.to_dict(), indent=indent)
def from_json(self: T, value: Union[str, bytes]) -> T:
"""A helper function to return the message instance from its JSON
representation. This returns the instance itself and is therefore assignable
and chainable.
This is equivalent to::
return message.from_dict(json.loads(value))
Parameters
-----------
value: Union[:class:`str`, :class:`bytes`]
The value to pass to :func:`json.loads`.
Returns
--------
:class:`Message`
The initialized message.
"""
return self.from_dict(json.loads(value))
def serialized_on_wire(message: Message) -> bool:
"""
If this message was or should be serialized on the wire. This can be used to detect
presence (e.g. optional wrapper message) and is used internally during
parsing/serialization.
Returns
--------
:class:`bool`
Whether this message was or should be serialized on the wire.
"""
return message._serialized_on_wire
def which_one_of(message: Message, group_name: str) -> Tuple[str, Optional[Any]]:
"""
Return the name and value of a message's one-of field group.
Returns
--------
Tuple[:class:`str`, Any]
The field name and the value for that field.
"""
field_name = message._group_current.get(group_name)
if not field_name:
return "", None
return field_name, getattr(message, field_name)
# Circular import workaround: google.protobuf depends on base classes defined above.
from .lib.google.protobuf import ( # noqa
BoolValue,
BytesValue,
DoubleValue,
Duration,
FloatValue,
Int32Value,
Int64Value,
StringValue,
Timestamp,
UInt32Value,
UInt64Value,
)
class _Duration(Duration):
def to_timedelta(self) -> timedelta:
return timedelta(seconds=self.seconds, microseconds=self.nanos / 1e3)
@staticmethod
def delta_to_json(delta: timedelta) -> str:
parts = str(delta.total_seconds()).split(".")
if len(parts) > 1:
while len(parts[1]) not in [3, 6, 9]:
parts[1] = f"{parts[1]}0"
return f"{".".join(parts)}s"
class _Timestamp(Timestamp):
def to_datetime(self) -> datetime:
ts = self.seconds + (self.nanos / 1e9)
return datetime.fromtimestamp(ts, tz=timezone.utc)
@staticmethod
def timestamp_to_json(dt: datetime) -> str:
nanos = dt.microsecond * 1e3
copy = dt.replace(microsecond=0, tzinfo=None)
result = copy.isoformat()
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return f"{result}Z"
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return f"{result}.{int(nanos // 1e6) :03d}Z"
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return f"{result}.{int(nanos // 1e3) :06d}Z"
# Serialize 9 fractional digits.
return f"{result}.{nanos:09d}"
class _WrappedMessage(Message):
"""
Google protobuf wrapper types base class. JSON representation is just the
value itself.
"""
value: Any
def to_dict(self, casing: Casing = Casing.CAMEL) -> Any:
return self.value
def from_dict(self: T, value: Any) -> T:
if value is not None:
self.value = value
return self
def _get_wrapper(proto_type: str) -> Type:
"""Get the wrapper message class for a wrapped type."""
return {
TYPE_BOOL: BoolValue,
TYPE_INT32: Int32Value,
TYPE_UINT32: UInt32Value,
TYPE_INT64: Int64Value,
TYPE_UINT64: UInt64Value,
TYPE_FLOAT: FloatValue,
TYPE_DOUBLE: DoubleValue,
TYPE_STRING: StringValue,
TYPE_BYTES: BytesValue,
}[proto_type]
| import dataclasses
import enum
import inspect
import json
import struct
import sys
import typing
from abc import ABC
from base64 import b64decode, b64encode
from datetime import datetime, timedelta, timezone
from dateutil.parser import isoparse
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Optional,
Set,
Tuple,
Type,
Union,
get_type_hints,
)
from ._types import T
from .casing import camel_case, safe_snake_case, snake_case
from .grpc.grpclib_client import ServiceStub
# Proto 3 data types
TYPE_ENUM = "enum"
TYPE_BOOL = "bool"
TYPE_INT32 = "int32"
TYPE_INT64 = "int64"
TYPE_UINT32 = "uint32"
TYPE_UINT64 = "uint64"
TYPE_SINT32 = "sint32"
TYPE_SINT64 = "sint64"
TYPE_FLOAT = "float"
TYPE_DOUBLE = "double"
TYPE_FIXED32 = "fixed32"
TYPE_SFIXED32 = "sfixed32"
TYPE_FIXED64 = "fixed64"
TYPE_SFIXED64 = "sfixed64"
TYPE_STRING = "string"
TYPE_BYTES = "bytes"
TYPE_MESSAGE = "message"
TYPE_MAP = "map"
# Fields that use a fixed amount of space (4 or 8 bytes)
FIXED_TYPES = [
TYPE_FLOAT,
TYPE_DOUBLE,
TYPE_FIXED32,
TYPE_SFIXED32,
TYPE_FIXED64,
TYPE_SFIXED64,
]
# Fields that are numerical 64-bit types
INT_64_TYPES = [TYPE_INT64, TYPE_UINT64, TYPE_SINT64, TYPE_FIXED64, TYPE_SFIXED64]
# Fields that are efficiently packed when
PACKED_TYPES = [
TYPE_ENUM,
TYPE_BOOL,
TYPE_INT32,
TYPE_INT64,
TYPE_UINT32,
TYPE_UINT64,
TYPE_SINT32,
TYPE_SINT64,
TYPE_FLOAT,
TYPE_DOUBLE,
TYPE_FIXED32,
TYPE_SFIXED32,
TYPE_FIXED64,
TYPE_SFIXED64,
]
# Wire types
# https://developers.google.com/protocol-buffers/docs/encoding#structure
WIRE_VARINT = 0
WIRE_FIXED_64 = 1
WIRE_LEN_DELIM = 2
WIRE_FIXED_32 = 5
# Mappings of which Proto 3 types correspond to which wire types.
WIRE_VARINT_TYPES = [
TYPE_ENUM,
TYPE_BOOL,
TYPE_INT32,
TYPE_INT64,
TYPE_UINT32,
TYPE_UINT64,
TYPE_SINT32,
TYPE_SINT64,
]
WIRE_FIXED_32_TYPES = [TYPE_FLOAT, TYPE_FIXED32, TYPE_SFIXED32]
WIRE_FIXED_64_TYPES = [TYPE_DOUBLE, TYPE_FIXED64, TYPE_SFIXED64]
WIRE_LEN_DELIM_TYPES = [TYPE_STRING, TYPE_BYTES, TYPE_MESSAGE, TYPE_MAP]
# Protobuf datetimes start at the Unix Epoch in 1970 in UTC.
def datetime_default_gen() -> datetime:
return datetime(1970, 1, 1, tzinfo=timezone.utc)
DATETIME_ZERO = datetime_default_gen()
class Casing(enum.Enum):
"""Casing constants for serialization."""
CAMEL = camel_case #: A camelCase sterilization function.
SNAKE = snake_case #: A snake_case sterilization function.
PLACEHOLDER: Any = object()
@dataclasses.dataclass(frozen=True)
class FieldMetadata:
"""Stores internal metadata used for parsing & serialization."""
# Protobuf field number
number: int
# Protobuf type name
proto_type: str
# Map information if the proto_type is a map
map_types: Optional[Tuple[str, str]] = None
# Groups several "one-of" fields together
group: Optional[str] = None
# Describes the wrapped type (e.g. when using google.protobuf.BoolValue)
wraps: Optional[str] = None
@staticmethod
def get(field: dataclasses.Field) -> "FieldMetadata":
"""Returns the field metadata for a dataclass field."""
return field.metadata["betterproto"]
def dataclass_field(
number: int,
proto_type: str,
*,
map_types: Optional[Tuple[str, str]] = None,
group: Optional[str] = None,
wraps: Optional[str] = None,
) -> dataclasses.Field:
"""Creates a dataclass field with attached protobuf metadata."""
return dataclasses.field(
default=PLACEHOLDER,
metadata={
"betterproto": FieldMetadata(number, proto_type, map_types, group, wraps)
},
)
# Note: the fields below return `Any` to prevent type errors in the generated
# data classes since the types won't match with `Field` and they get swapped
# out at runtime. The generated dataclass variables are still typed correctly.
def enum_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_ENUM, group=group)
def bool_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_BOOL, group=group)
def int32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_INT32, group=group)
def int64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_INT64, group=group)
def uint32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_UINT32, group=group)
def uint64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_UINT64, group=group)
def sint32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SINT32, group=group)
def sint64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SINT64, group=group)
def float_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_FLOAT, group=group)
def double_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_DOUBLE, group=group)
def fixed32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_FIXED32, group=group)
def fixed64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_FIXED64, group=group)
def sfixed32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SFIXED32, group=group)
def sfixed64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SFIXED64, group=group)
def string_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_STRING, group=group)
def bytes_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_BYTES, group=group)
def message_field(
number: int, group: Optional[str] = None, wraps: Optional[str] = None
) -> Any:
return dataclass_field(number, TYPE_MESSAGE, group=group, wraps=wraps)
def map_field(
number: int, key_type: str, value_type: str, group: Optional[str] = None
) -> Any:
return dataclass_field(
number, TYPE_MAP, map_types=(key_type, value_type), group=group
)
class Enum(enum.IntEnum):
"""
The base class for protobuf enumerations, all generated enumerations will inherit
from this. Bases :class:`enum.IntEnum`.
"""
@classmethod
def from_string(cls, name: str) -> "Enum":
"""Return the value which corresponds to the string name.
Parameters
-----------
name: :class:`str`
The name of the enum member to get
Raises
-------
:exc:`ValueError`
The member was not found in the Enum.
"""
try:
return cls._member_map_[name]
except KeyError as e:
raise ValueError(f"Unknown value {name} for enum {cls.__name__}") from e
def _pack_fmt(proto_type: str) -> str:
"""Returns a little-endian format string for reading/writing binary."""
return {
TYPE_DOUBLE: "<d",
TYPE_FLOAT: "<f",
TYPE_FIXED32: "<I",
TYPE_FIXED64: "<Q",
TYPE_SFIXED32: "<i",
TYPE_SFIXED64: "<q",
}[proto_type]
def encode_varint(value: int) -> bytes:
"""Encodes a single varint value for serialization."""
b: List[int] = []
if value < 0:
value += 1 << 64
bits = value & 0x7F
value >>= 7
while value:
b.append(0x80 | bits)
bits = value & 0x7F
value >>= 7
return bytes(b + [bits])
def _preprocess_single(proto_type: str, wraps: str, value: Any) -> bytes:
"""Adjusts values before serialization."""
if proto_type in [
TYPE_ENUM,
TYPE_BOOL,
TYPE_INT32,
TYPE_INT64,
TYPE_UINT32,
TYPE_UINT64,
]:
return encode_varint(value)
elif proto_type in [TYPE_SINT32, TYPE_SINT64]:
# Handle zig-zag encoding.
return encode_varint(value << 1 if value >= 0 else (value << 1) ^ (~0))
elif proto_type in FIXED_TYPES:
return struct.pack(_pack_fmt(proto_type), value)
elif proto_type == TYPE_STRING:
return value.encode("utf-8")
elif proto_type == TYPE_MESSAGE:
if isinstance(value, datetime):
# Convert the `datetime` to a timestamp message.
seconds = int(value.timestamp())
nanos = int(value.microsecond * 1e3)
value = _Timestamp(seconds=seconds, nanos=nanos)
elif isinstance(value, timedelta):
# Convert the `timedelta` to a duration message.
total_ms = value // timedelta(microseconds=1)
seconds = int(total_ms / 1e6)
nanos = int((total_ms % 1e6) * 1e3)
value = _Duration(seconds=seconds, nanos=nanos)
elif wraps:
if value is None:
return b""
value = _get_wrapper(wraps)(value=value)
return bytes(value)
return value
def _serialize_single(
field_number: int,
proto_type: str,
value: Any,
*,
serialize_empty: bool = False,
wraps: str = "",
) -> bytes:
"""Serializes a single field and value."""
value = _preprocess_single(proto_type, wraps, value)
output = bytearray()
if proto_type in WIRE_VARINT_TYPES:
key = encode_varint(field_number << 3)
output += key + value
elif proto_type in WIRE_FIXED_32_TYPES:
key = encode_varint((field_number << 3) | 5)
output += key + value
elif proto_type in WIRE_FIXED_64_TYPES:
key = encode_varint((field_number << 3) | 1)
output += key + value
elif proto_type in WIRE_LEN_DELIM_TYPES:
if len(value) or serialize_empty or wraps:
key = encode_varint((field_number << 3) | 2)
output += key + encode_varint(len(value)) + value
else:
raise NotImplementedError(proto_type)
return bytes(output)
def decode_varint(buffer: bytes, pos: int) -> Tuple[int, int]:
"""
Decode a single varint value from a byte buffer. Returns the value and the
new position in the buffer.
"""
result = 0
shift = 0
while 1:
b = buffer[pos]
result |= (b & 0x7F) << shift
pos += 1
if not (b & 0x80):
return result, pos
shift += 7
if shift >= 64:
raise ValueError("Too many bytes when decoding varint.")
@dataclasses.dataclass(frozen=True)
class ParsedField:
number: int
wire_type: int
value: Any
raw: bytes
def parse_fields(value: bytes) -> Generator[ParsedField, None, None]:
i = 0
while i < len(value):
start = i
num_wire, i = decode_varint(value, i)
number = num_wire >> 3
wire_type = num_wire & 0x7
decoded: Any = None
if wire_type == WIRE_VARINT:
decoded, i = decode_varint(value, i)
elif wire_type == WIRE_FIXED_64:
decoded, i = value[i : i + 8], i + 8
elif wire_type == WIRE_LEN_DELIM:
length, i = decode_varint(value, i)
decoded = value[i : i + length]
i += length
elif wire_type == WIRE_FIXED_32:
decoded, i = value[i : i + 4], i + 4
yield ParsedField(
number=number, wire_type=wire_type, value=decoded, raw=value[start:i]
)
class ProtoClassMetadata:
__slots__ = (
"oneof_group_by_field",
"oneof_field_by_group",
"default_gen",
"cls_by_field",
"field_name_by_number",
"meta_by_field_name",
"sorted_field_names",
)
oneof_group_by_field: Dict[str, str]
oneof_field_by_group: Dict[str, Set[dataclasses.Field]]
field_name_by_number: Dict[int, str]
meta_by_field_name: Dict[str, FieldMetadata]
sorted_field_names: Tuple[str, ...]
default_gen: Dict[str, Callable[[], Any]]
cls_by_field: Dict[str, Type]
def __init__(self, cls: Type["Message"]):
by_field = {}
by_group: Dict[str, Set] = {}
by_field_name = {}
by_field_number = {}
fields = dataclasses.fields(cls)
for field in fields:
meta = FieldMetadata.get(field)
if meta.group:
# This is part of a one-of group.
by_field[field.name] = meta.group
by_group.setdefault(meta.group, set()).add(field)
by_field_name[field.name] = meta
by_field_number[meta.number] = field.name
self.oneof_group_by_field = by_field
self.oneof_field_by_group = by_group
self.field_name_by_number = by_field_number
self.meta_by_field_name = by_field_name
self.sorted_field_names = tuple(
by_field_number[number] for number in sorted(by_field_number)
)
self.default_gen = self._get_default_gen(cls, fields)
self.cls_by_field = self._get_cls_by_field(cls, fields)
@staticmethod
def _get_default_gen(
cls: Type["Message"], fields: List[dataclasses.Field]
) -> Dict[str, Callable[[], Any]]:
return {field.name: cls._get_field_default_gen(field) for field in fields}
@staticmethod
def _get_cls_by_field(
cls: Type["Message"], fields: List[dataclasses.Field]
) -> Dict[str, Type]:
field_cls = {}
for field in fields:
meta = FieldMetadata.get(field)
if meta.proto_type == TYPE_MAP:
assert meta.map_types
kt = cls._cls_for(field, index=0)
vt = cls._cls_for(field, index=1)
field_cls[field.name] = dataclasses.make_dataclass(
"Entry",
[
("key", kt, dataclass_field(1, meta.map_types[0])),
("value", vt, dataclass_field(2, meta.map_types[1])),
],
bases=(Message,),
)
field_cls[f"{field.name}.value"] = vt
else:
field_cls[field.name] = cls._cls_for(field)
return field_cls
class Message(ABC):
"""
The base class for protobuf messages, all generated messages will inherit from
this. This class registers the message fields which are used by the serializers and
parsers to go between the Python, binary and JSON representations of the message.
.. container:: operations
.. describe:: bytes(x)
Calls :meth:`__bytes__`.
.. describe:: bool(x)
Calls :meth:`__bool__`.
"""
_serialized_on_wire: bool
_unknown_fields: bytes
_group_current: Dict[str, str]
def __post_init__(self) -> None:
# Keep track of whether every field was default
all_sentinel = True
# Set current field of each group after `__init__` has already been run.
group_current: Dict[str, Optional[str]] = {}
for field_name, meta in self._betterproto.meta_by_field_name.items():
if meta.group:
group_current.setdefault(meta.group)
if self.__raw_get(field_name) != PLACEHOLDER:
# Found a non-sentinel value
all_sentinel = False
if meta.group:
# This was set, so make it the selected value of the one-of.
group_current[meta.group] = field_name
# Now that all the defaults are set, reset it!
self.__dict__["_serialized_on_wire"] = not all_sentinel
self.__dict__["_unknown_fields"] = b""
self.__dict__["_group_current"] = group_current
def __raw_get(self, name: str) -> Any:
return super().__getattribute__(name)
def __eq__(self, other) -> bool:
if type(self) is not type(other):
return False
for field_name in self._betterproto.meta_by_field_name:
self_val = self.__raw_get(field_name)
other_val = other.__raw_get(field_name)
if self_val is PLACEHOLDER:
if other_val is PLACEHOLDER:
continue
self_val = self._get_field_default(field_name)
elif other_val is PLACEHOLDER:
other_val = other._get_field_default(field_name)
if self_val != other_val:
return False
return True
def __repr__(self) -> str:
parts = [
f"{field_name}={value!r}"
for field_name in self._betterproto.sorted_field_names
for value in (self.__raw_get(field_name),)
if value is not PLACEHOLDER
]
return f"{self.__class__.__name__}({', '.join(parts)})"
def __getattribute__(self, name: str) -> Any:
"""
Lazily initialize default values to avoid infinite recursion for recursive
message types
"""
value = super().__getattribute__(name)
if value is not PLACEHOLDER:
return value
value = self._get_field_default(name)
super().__setattr__(name, value)
return value
def __setattr__(self, attr: str, value: Any) -> None:
if attr != "_serialized_on_wire":
# Track when a field has been set.
self.__dict__["_serialized_on_wire"] = True
if hasattr(self, "_group_current"): # __post_init__ had already run
if attr in self._betterproto.oneof_group_by_field:
group = self._betterproto.oneof_group_by_field[attr]
for field in self._betterproto.oneof_field_by_group[group]:
if field.name == attr:
self._group_current[group] = field.name
else:
super().__setattr__(field.name, PLACEHOLDER)
super().__setattr__(attr, value)
def __bool__(self) -> bool:
"""True if the Message has any fields with non-default values."""
return any(
self.__raw_get(field_name)
not in (PLACEHOLDER, self._get_field_default(field_name))
for field_name in self._betterproto.meta_by_field_name
)
@property
def _betterproto(self) -> ProtoClassMetadata:
"""
Lazy initialize metadata for each protobuf class.
It may be initialized multiple times in a multi-threaded environment,
but that won't affect the correctness.
"""
meta = getattr(self.__class__, "_betterproto_meta", None)
if not meta:
meta = ProtoClassMetadata(self.__class__)
self.__class__._betterproto_meta = meta
return meta
def __bytes__(self) -> bytes:
"""
Get the binary encoded Protobuf representation of this message instance.
"""
output = bytearray()
for field_name, meta in self._betterproto.meta_by_field_name.items():
value = getattr(self, field_name)
if value is None:
# Optional items should be skipped. This is used for the Google
# wrapper types.
continue
# Being selected in a a group means this field is the one that is
# currently set in a `oneof` group, so it must be serialized even
# if the value is the default zero value.
selected_in_group = (
meta.group and self._group_current[meta.group] == field_name
)
# Empty messages can still be sent on the wire if they were
# set (or received empty).
serialize_empty = isinstance(value, Message) and value._serialized_on_wire
include_default_value_for_oneof = self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
if value == self._get_field_default(field_name) and not (
selected_in_group or serialize_empty or include_default_value_for_oneof
):
# Default (zero) values are not serialized. Two exceptions are
# if this is the selected oneof item or if we know we have to
# serialize an empty message (i.e. zero value was explicitly
# set by the user).
continue
if isinstance(value, list):
if meta.proto_type in PACKED_TYPES:
# Packed lists look like a length-delimited field. First,
# preprocess/encode each value into a buffer and then
# treat it like a field of raw bytes.
buf = bytearray()
for item in value:
buf += _preprocess_single(meta.proto_type, "", item)
output += _serialize_single(meta.number, TYPE_BYTES, buf)
else:
for item in value:
output += _serialize_single(
meta.number, meta.proto_type, item, wraps=meta.wraps or ""
)
elif isinstance(value, dict):
for k, v in value.items():
assert meta.map_types
sk = _serialize_single(1, meta.map_types[0], k)
sv = _serialize_single(2, meta.map_types[1], v)
output += _serialize_single(meta.number, meta.proto_type, sk + sv)
else:
# If we have an empty string and we're including the default value for
# a oneof, make sure we serialize it. This ensures that the byte string
# output isn't simply an empty string. This also ensures that round trip
# serialization will keep `which_one_of` calls consistent.
if (
isinstance(value, str)
and value == ""
and include_default_value_for_oneof
):
serialize_empty = True
output += _serialize_single(
meta.number,
meta.proto_type,
value,
serialize_empty=serialize_empty,
wraps=meta.wraps or "",
)
output += self._unknown_fields
return bytes(output)
# For compatibility with other libraries
def SerializeToString(self: T) -> bytes:
"""
Get the binary encoded Protobuf representation of this message instance.
.. note::
This is a method for compatibility with other libraries,
you should really use ``bytes(x)``.
Returns
--------
:class:`bytes`
The binary encoded Protobuf representation of this message instance
"""
return bytes(self)
@classmethod
def _type_hint(cls, field_name: str) -> Type:
return cls._type_hints()[field_name]
@classmethod
def _type_hints(cls) -> Dict[str, Type]:
module = sys.modules[cls.__module__]
return get_type_hints(cls, vars(module))
@classmethod
def _cls_for(cls, field: dataclasses.Field, index: int = 0) -> Type:
"""Get the message class for a field from the type hints."""
field_cls = cls._type_hint(field.name)
if hasattr(field_cls, "__args__") and index >= 0:
if field_cls.__args__ is not None:
field_cls = field_cls.__args__[index]
return field_cls
def _get_field_default(self, field_name: str) -> Any:
return self._betterproto.default_gen[field_name]()
@classmethod
def _get_field_default_gen(cls, field: dataclasses.Field) -> Any:
t = cls._type_hint(field.name)
if hasattr(t, "__origin__"):
if t.__origin__ in (dict, Dict):
# This is some kind of map (dict in Python).
return dict
elif t.__origin__ in (list, List):
# This is some kind of list (repeated) field.
return list
elif t.__origin__ is Union and t.__args__[1] is type(None):
# This is an optional (wrapped) field. For setting the default we
# really don't care what kind of field it is.
return type(None)
else:
return t
elif issubclass(t, Enum):
# Enums always default to zero.
return int
elif t is datetime:
# Offsets are relative to 1970-01-01T00:00:00Z
return datetime_default_gen
else:
# This is either a primitive scalar or another message type. Calling
# it should result in its zero value.
return t
def _postprocess_single(
self, wire_type: int, meta: FieldMetadata, field_name: str, value: Any
) -> Any:
"""Adjusts values after parsing."""
if wire_type == WIRE_VARINT:
if meta.proto_type in [TYPE_INT32, TYPE_INT64]:
bits = int(meta.proto_type[3:])
value = value & ((1 << bits) - 1)
signbit = 1 << (bits - 1)
value = int((value ^ signbit) - signbit)
elif meta.proto_type in [TYPE_SINT32, TYPE_SINT64]:
# Undo zig-zag encoding
value = (value >> 1) ^ (-(value & 1))
elif meta.proto_type == TYPE_BOOL:
# Booleans use a varint encoding, so convert it to true/false.
value = value > 0
elif wire_type in [WIRE_FIXED_32, WIRE_FIXED_64]:
fmt = _pack_fmt(meta.proto_type)
value = struct.unpack(fmt, value)[0]
elif wire_type == WIRE_LEN_DELIM:
if meta.proto_type == TYPE_STRING:
value = value.decode("utf-8")
elif meta.proto_type == TYPE_MESSAGE:
cls = self._betterproto.cls_by_field[field_name]
if cls == datetime:
value = _Timestamp().parse(value).to_datetime()
elif cls == timedelta:
value = _Duration().parse(value).to_timedelta()
elif meta.wraps:
# This is a Google wrapper value message around a single
# scalar type.
value = _get_wrapper(meta.wraps)().parse(value).value
else:
value = cls().parse(value)
value._serialized_on_wire = True
elif meta.proto_type == TYPE_MAP:
value = self._betterproto.cls_by_field[field_name]().parse(value)
return value
def _include_default_value_for_oneof(
self, field_name: str, meta: FieldMetadata
) -> bool:
return (
meta.group is not None and self._group_current.get(meta.group) == field_name
)
def parse(self: T, data: bytes) -> T:
"""
Parse the binary encoded Protobuf into this message instance. This
returns the instance itself and is therefore assignable and chainable.
Parameters
-----------
data: :class:`bytes`
The data to parse the protobuf from.
Returns
--------
:class:`Message`
The initialized message.
"""
# Got some data over the wire
self._serialized_on_wire = True
proto_meta = self._betterproto
for parsed in parse_fields(data):
field_name = proto_meta.field_name_by_number.get(parsed.number)
if not field_name:
self._unknown_fields += parsed.raw
continue
meta = proto_meta.meta_by_field_name[field_name]
value: Any
if parsed.wire_type == WIRE_LEN_DELIM and meta.proto_type in PACKED_TYPES:
# This is a packed repeated field.
pos = 0
value = []
while pos < len(parsed.value):
if meta.proto_type in [TYPE_FLOAT, TYPE_FIXED32, TYPE_SFIXED32]:
decoded, pos = parsed.value[pos : pos + 4], pos + 4
wire_type = WIRE_FIXED_32
elif meta.proto_type in [TYPE_DOUBLE, TYPE_FIXED64, TYPE_SFIXED64]:
decoded, pos = parsed.value[pos : pos + 8], pos + 8
wire_type = WIRE_FIXED_64
else:
decoded, pos = decode_varint(parsed.value, pos)
wire_type = WIRE_VARINT
decoded = self._postprocess_single(
wire_type, meta, field_name, decoded
)
value.append(decoded)
else:
value = self._postprocess_single(
parsed.wire_type, meta, field_name, parsed.value
)
current = getattr(self, field_name)
if meta.proto_type == TYPE_MAP:
# Value represents a single key/value pair entry in the map.
current[value.key] = value.value
elif isinstance(current, list) and not isinstance(value, list):
current.append(value)
else:
setattr(self, field_name, value)
return self
# For compatibility with other libraries.
@classmethod
def FromString(cls: Type[T], data: bytes) -> T:
"""
Parse the binary encoded Protobuf into this message instance. This
returns the instance itself and is therefore assignable and chainable.
.. note::
This is a method for compatibility with other libraries,
you should really use :meth:`parse`.
Parameters
-----------
data: :class:`bytes`
The data to parse the protobuf from.
Returns
--------
:class:`Message`
The initialized message.
"""
return cls().parse(data)
def to_dict(
self, casing: Casing = Casing.CAMEL, include_default_values: bool = False
) -> Dict[str, Any]:
"""
Returns a JSON serializable dict representation of this object.
Parameters
-----------
casing: :class:`Casing`
The casing to use for key values. Default is :attr:`Casing.CAMEL` for
compatibility purposes.
include_default_values: :class:`bool`
If ``True`` will include the default values of fields. Default is ``False``.
E.g. an ``int32`` field will be included with a value of ``0`` if this is
set to ``True``, otherwise this would be ignored.
Returns
--------
Dict[:class:`str`, Any]
The JSON serializable dict representation of this object.
"""
output: Dict[str, Any] = {}
field_types = self._type_hints()
defaults = self._betterproto.default_gen
for field_name, meta in self._betterproto.meta_by_field_name.items():
field_is_repeated = defaults[field_name] is list
value = getattr(self, field_name)
cased_name = casing(field_name).rstrip("_") # type: ignore
if meta.proto_type == TYPE_MESSAGE:
if isinstance(value, datetime):
if (
value != DATETIME_ZERO
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = _Timestamp.timestamp_to_json(value)
elif isinstance(value, timedelta):
if (
value != timedelta(0)
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = _Duration.delta_to_json(value)
elif meta.wraps:
if value is not None or include_default_values:
output[cased_name] = value
elif field_is_repeated:
# Convert each item.
cls = self._betterproto.cls_by_field[field_name]
if cls == datetime:
value = [_Timestamp.timestamp_to_json(i) for i in value]
elif cls == timedelta:
value = [_Duration.delta_to_json(i) for i in value]
else:
value = [
i.to_dict(casing, include_default_values) for i in value
]
if value or include_default_values:
output[cased_name] = value
elif (
value._serialized_on_wire
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = value.to_dict(casing, include_default_values)
elif meta.proto_type == TYPE_MAP:
for k in value:
if hasattr(value[k], "to_dict"):
value[k] = value[k].to_dict(casing, include_default_values)
if value or include_default_values:
output[cased_name] = value
elif (
value != self._get_field_default(field_name)
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
if meta.proto_type in INT_64_TYPES:
if field_is_repeated:
output[cased_name] = [str(n) for n in value]
else:
output[cased_name] = str(value)
elif meta.proto_type == TYPE_BYTES:
if field_is_repeated:
output[cased_name] = [
b64encode(b).decode("utf8") for b in value
]
else:
output[cased_name] = b64encode(value).decode("utf8")
elif meta.proto_type == TYPE_ENUM:
if field_is_repeated:
enum_class: Type[Enum] = field_types[field_name].__args__[0]
if isinstance(value, typing.Iterable) and not isinstance(
value, str
):
output[cased_name] = [enum_class(el).name for el in value]
else:
# transparently upgrade single value to repeated
output[cased_name] = [enum_class(value).name]
else:
enum_class: Type[Enum] = field_types[field_name] # noqa
output[cased_name] = enum_class(value).name
else:
output[cased_name] = value
return output
def from_dict(self: T, value: Dict[str, Any]) -> T:
"""
Parse the key/value pairs into the current message instance. This returns the
instance itself and is therefore assignable and chainable.
Parameters
-----------
value: Dict[:class:`str`, Any]
The dictionary to parse from.
Returns
--------
:class:`Message`
The initialized message.
"""
self._serialized_on_wire = True
for key in value:
field_name = safe_snake_case(key)
meta = self._betterproto.meta_by_field_name.get(field_name)
if not meta:
continue
if value[key] is not None:
if meta.proto_type == TYPE_MESSAGE:
v = getattr(self, field_name)
if isinstance(v, list):
cls = self._betterproto.cls_by_field[field_name]
if cls == datetime:
v = [isoparse(item) for item in value[key]]
elif cls == timedelta:
v = [
timedelta(seconds=float(item[:-1]))
for item in value[key]
]
else:
v = [cls().from_dict(item) for item in value[key]]
elif isinstance(v, datetime):
v = isoparse(value[key])
setattr(self, field_name, v)
elif isinstance(v, timedelta):
v = timedelta(seconds=float(value[key][:-1]))
setattr(self, field_name, v)
elif meta.wraps:
setattr(self, field_name, value[key])
else:
# NOTE: `from_dict` mutates the underlying message, so no
# assignment here is necessary.
v.from_dict(value[key])
elif meta.map_types and meta.map_types[1] == TYPE_MESSAGE:
v = getattr(self, field_name)
cls = self._betterproto.cls_by_field[f"{field_name}.value"]
for k in value[key]:
v[k] = cls().from_dict(value[key][k])
else:
v = value[key]
if meta.proto_type in INT_64_TYPES:
if isinstance(value[key], list):
v = [int(n) for n in value[key]]
else:
v = int(value[key])
elif meta.proto_type == TYPE_BYTES:
if isinstance(value[key], list):
v = [b64decode(n) for n in value[key]]
else:
v = b64decode(value[key])
elif meta.proto_type == TYPE_ENUM:
enum_cls = self._betterproto.cls_by_field[field_name]
if isinstance(v, list):
v = [enum_cls.from_string(e) for e in v]
elif isinstance(v, str):
v = enum_cls.from_string(v)
if v is not None:
setattr(self, field_name, v)
return self
def to_json(self, indent: Union[None, int, str] = None) -> str:
"""A helper function to parse the message instance into its JSON
representation.
This is equivalent to::
json.dumps(message.to_dict(), indent=indent)
Parameters
-----------
indent: Optional[Union[:class:`int`, :class:`str`]]
The indent to pass to :func:`json.dumps`.
Returns
--------
:class:`str`
The JSON representation of the message.
"""
return json.dumps(self.to_dict(), indent=indent)
def from_json(self: T, value: Union[str, bytes]) -> T:
"""A helper function to return the message instance from its JSON
representation. This returns the instance itself and is therefore assignable
and chainable.
This is equivalent to::
return message.from_dict(json.loads(value))
Parameters
-----------
value: Union[:class:`str`, :class:`bytes`]
The value to pass to :func:`json.loads`.
Returns
--------
:class:`Message`
The initialized message.
"""
return self.from_dict(json.loads(value))
def serialized_on_wire(message: Message) -> bool:
"""
If this message was or should be serialized on the wire. This can be used to detect
presence (e.g. optional wrapper message) and is used internally during
parsing/serialization.
Returns
--------
:class:`bool`
Whether this message was or should be serialized on the wire.
"""
return message._serialized_on_wire
def which_one_of(message: Message, group_name: str) -> Tuple[str, Optional[Any]]:
"""
Return the name and value of a message's one-of field group.
Returns
--------
Tuple[:class:`str`, Any]
The field name and the value for that field.
"""
field_name = message._group_current.get(group_name)
if not field_name:
return "", None
return field_name, getattr(message, field_name)
# Circular import workaround: google.protobuf depends on base classes defined above.
from .lib.google.protobuf import ( # noqa
BoolValue,
BytesValue,
DoubleValue,
Duration,
FloatValue,
Int32Value,
Int64Value,
StringValue,
Timestamp,
UInt32Value,
UInt64Value,
)
class _Duration(Duration):
def to_timedelta(self) -> timedelta:
return timedelta(seconds=self.seconds, microseconds=self.nanos / 1e3)
@staticmethod
def delta_to_json(delta: timedelta) -> str:
parts = str(delta.total_seconds()).split(".")
if len(parts) > 1:
while len(parts[1]) not in [3, 6, 9]:
parts[1] = f"{parts[1]}0"
return f"{'.'.join(parts)}s"
class _Timestamp(Timestamp):
def to_datetime(self) -> datetime:
ts = self.seconds + (self.nanos / 1e9)
return datetime.fromtimestamp(ts, tz=timezone.utc)
@staticmethod
def timestamp_to_json(dt: datetime) -> str:
nanos = dt.microsecond * 1e3
copy = dt.replace(microsecond=0, tzinfo=None)
result = copy.isoformat()
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return f"{result}Z"
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return f"{result}.{int(nanos // 1e6) :03d}Z"
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return f"{result}.{int(nanos // 1e3) :06d}Z"
# Serialize 9 fractional digits.
return f"{result}.{nanos:09d}"
class _WrappedMessage(Message):
"""
Google protobuf wrapper types base class. JSON representation is just the
value itself.
"""
value: Any
def to_dict(self, casing: Casing = Casing.CAMEL) -> Any:
return self.value
def from_dict(self: T, value: Any) -> T:
if value is not None:
self.value = value
return self
def _get_wrapper(proto_type: str) -> Type:
"""Get the wrapper message class for a wrapped type."""
return {
TYPE_BOOL: BoolValue,
TYPE_INT32: Int32Value,
TYPE_UINT32: UInt32Value,
TYPE_INT64: Int64Value,
TYPE_UINT64: UInt64Value,
TYPE_FLOAT: FloatValue,
TYPE_DOUBLE: DoubleValue,
TYPE_STRING: StringValue,
TYPE_BYTES: BytesValue,
}[proto_type]
|
# Module for running CNN-BiLSTM vad model,
# may also be run directly as a script
# Author: Nick Wilkinson 2021
import argparse
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from typing import Tuple
from tensorflow.keras import models
from voxseg import utils
from scipy.signal import medfilt
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only use the first GPU, quick enough for decoding
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(e)
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=10,inter_op_parallelism_threads=10)
sess = tf.compat.v1.Session(config=session_conf)
def decode(targets: pd.DataFrame, speech_thresh: float = 0.5, speech_w_music_thresh: float = 0.5, filt: int = 1) -> pd.DataFrame:
'''Function for converting target sequences within a pd.DataFrame to endpoints.
Args:
targets: A pd.DataFrame containing predicted targets (in array form) and metadata.
speech_thresh (optional): A decision threshold between 0 and 1 for the speech class, lower values
result in more frames being classified as speech. (Default: 0.5)
speech_w_music_thresh (optional): A decision threshold between 0 and 1 for the speech_with_music class.
Setting this threshold higher will filter out more music which may be desirable for ASR. (Default: 0.5)
filt (optional): a kernel size for the median filter to apply to the output labels for smoothing. (Default: 1)
Returns:
A pd.DataFrame containing speech segment endpoints and metadata.
'''
targets = targets.copy()
if targets['predicted-targets'].iloc[0].shape[-1] == 4:
prior = np.array([(1-speech_thresh) * speech_w_music_thresh,
speech_thresh * speech_w_music_thresh,
(1-speech_thresh) * (1-speech_w_music_thresh),
(1-speech_thresh) * speech_w_music_thresh])
temp = pd.concat([_targets_to_endpoints(medfilt([0 if (j*prior).argmax() == 1 else 1 for j in i], filt), 0.32) \
for i in targets['predicted-targets']], ignore_index=True)
elif targets['predicted-targets'].iloc[0].shape[-1] == 2:
prior = np.array([speech_thresh,
1-speech_thresh])
temp = pd.concat([_targets_to_endpoints(medfilt([0 if (j*prior).argmax() == 0 else 1 for j in i], filt), 0.32) \
for i in targets['predicted-targets']], ignore_index=True)
else:
print(f'ERROR: model provided has {targets['predicted-targets'].iloc[0].shape[-1]} outputs. Model expected to have 2 or 4 outputs.')
if 'start' in targets.columns:
targets['end'] = targets['start'] + temp['end']
targets['start'] = targets['start'] + temp['start']
else:
targets['start'] = temp['start']
targets['end'] = temp['end']
targets = targets.drop(['predicted-targets'], axis=1)
targets = targets.apply(pd.Series.explode).reset_index(drop=True)
targets['utterance-id'] = targets['recording-id'].astype(str) + '_' + \
((targets['start'] * 100).astype(int)).astype(str).str.zfill(7) + '_' + \
((targets['end'] * 100).astype(int)).astype(str).str.zfill(7)
return targets
def predict_targets(model: tf.keras.Model, features: pd.DataFrame) -> pd.DataFrame:
'''Function for applying a pretrained model to predict targets from features.
Args:
model: A pretrained tf.keras model.
features: A pd.DataFrame containing features and metadata.
Returns:
A pd.DataFrame containing predicted targets and metadata.
'''
targets = features.drop(['normalized-features'], axis=1)
print('------------------- Running VAD -------------------')
targets['predicted-targets'] = _predict(model, features['normalized-features'])
return targets
def to_data_dir(endpoints: pd.DataFrame, out_dir: str) -> None:
'''A function for generating a Kaldi-style data directory output of the dicovered speech segments.
Args:
endpoints: A pd.DataFrame containing speech segment endpoints and metadata.
out_dir: A path to an output directory where data files will be placed.
'''
if not os.path.exists(out_dir):
print(f'Directory {out_dir} does not exist, creating it.')
os.mkdir(out_dir)
endpoints[['recording-id', 'extended filename']].drop_duplicates().to_csv(
f'{out_dir}/wav.scp',sep=' ', index=False, header=False)
pd.concat([endpoints[['utterance-id', 'recording-id']], endpoints[['start', 'end']].astype(float).round(3)],
axis=1).to_csv(f'{out_dir}/segments', sep=' ', index=False, header=False)
def _predict(model: tf.keras.Model, col: pd.Series) -> pd.Series:
'''Auxiliary function used by predict_targets(). Applies a pretrained model to
each feature set in the 'normalized-features' or 'features' column of a pd.DataFrame
containing features and metadata.
Args:
model: A pretrained tf.keras model.
col: A column of a pd.DataFrame containing features.
Returns:
A pd.Series containing the predicted target sequences.
'''
targets = []
for features in col:
#temp = model.predict(utils.time_distribute(features, 15)[:,:,:,:,np.newaxis])
temp = model.predict(features[np.newaxis,:,:,:,np.newaxis])
targets.append(temp.reshape(-1, temp.shape[-1]))
return pd.Series(targets)
def _targets_to_endpoints(targets: np.ndarray, frame_length: float) -> pd.DataFrame:
'''Auxilory function used by decode() for converting a target sequence to endpoints.
Args:
targets: A binary np.ndarray of speech/nonspeech targets where 1 indicates the presence of speech.
frame_length: The length of each target in seconds.
Returns:
A pd.DataFrame, containing the speech segment start and end boundaries in arrays.
'''
starts = []
ends = []
state = 0
for n, i in enumerate(targets):
state, emmision = _update_fst(state, i)
if emmision == 'start':
starts.append(n)
elif emmision == 'end':
ends.append(n)
state, emmision = _update_fst(state, None)
if emmision == 'start':
starts.append(n)
elif emmision == 'end':
ends.append(n + 1)
starts = np.around(np.array([i * frame_length for i in starts]), 3)
ends = np.around(np.array([i * frame_length for i in ends]), 3)
return pd.DataFrame({'start': [starts],'end': [ends]})
def _update_fst(state: int, transition: int) -> Tuple[int, str]:
'''Auxiliary function used by _targets_to_endpoints() for updating finite state
transducer.
Args:
state: The current state.
transition: The input (the next binary target).
Returns:
A tuple consisting of the new state and the output ('start', 'end' or None,
representing a start, end or no endpoint detections respectively).
'''
if state == 0:
if transition == 0:
state = 1
return state, None
elif transition == 1:
state = 2
return state, 'start'
elif state == 1:
if transition == 0:
return state, None
elif transition == 1:
state = 2
return state, 'start'
elif transition is None:
state = 3
return state, None
elif state == 2:
if transition == 0:
state = 1
return state, 'end'
elif transition == 1:
return state, None
elif transition is None:
state = 3
return state, 'end'
# Handle args when run directly
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='run_cnnlstm.py',
description='Run a trained voice activity detector on extracted feature set.')
parser.add_argument('-s', '--speech_thresh', type=float,
help='a decision threshold value between (0,1) for speech vs non-speech, defaults to 0.5')
parser.add_argument('-m', '--speech_w_music_thresh', type=float,
help='a decision threshold value between (0,1) for speech_with_music vs non-speech, defaults to 0.5, \
increasing will remove more speech_with_music, useful for downsteam ASR')
parser.add_argument('-f', '--median_filter_kernel', type=int,
help='a kernel size for a median filter to smooth the output labels, defaults to 1 (no smoothing)')
parser.add_argument('-M', '--model_path', type=str,
help='a path to a trained vad model saved as in .h5 format, overrides default pretrained model')
parser.add_argument('feat_dir', type=str,
help='a path to a directory containing a feats.h5 file with extracted features')
parser.add_argument('out_dir', type=str,
help='a path to an output directory where the output segments will be saved')
args = parser.parse_args()
if args.speech_thresh is not None:
speech_thresh = args.speech_thresh
else:
speech_thresh = 0.5
if args.speech_w_music_thresh is not None:
speech_w_music_thresh = args.speech_w_music_thresh
else:
speech_w_music_thresh = 0.5
if args.median_filter_kernel is not None:
filt = args.median_filter_kernel
else:
filt = 1
feats = pd.read_hdf(f'{args.feat_dir}/feats.h5')
if args.model_path is not None:
model = models.load_model(args.model_path)
else:
model = models.load_model(f'{os.path.dirname(os.path.realpath(__file__))}/models/cnn_bilstm.h5')
targets = predict_targets(model, feats)
endpoints = decode(targets, speech_thresh, speech_w_music_thresh, filt)
to_data_dir(endpoints, args.out_dir)
| # Module for running CNN-BiLSTM vad model,
# may also be run directly as a script
# Author: Nick Wilkinson 2021
import argparse
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from typing import Tuple
from tensorflow.keras import models
from voxseg import utils
from scipy.signal import medfilt
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only use the first GPU, quick enough for decoding
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(e)
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=10,inter_op_parallelism_threads=10)
sess = tf.compat.v1.Session(config=session_conf)
def decode(targets: pd.DataFrame, speech_thresh: float = 0.5, speech_w_music_thresh: float = 0.5, filt: int = 1) -> pd.DataFrame:
'''Function for converting target sequences within a pd.DataFrame to endpoints.
Args:
targets: A pd.DataFrame containing predicted targets (in array form) and metadata.
speech_thresh (optional): A decision threshold between 0 and 1 for the speech class, lower values
result in more frames being classified as speech. (Default: 0.5)
speech_w_music_thresh (optional): A decision threshold between 0 and 1 for the speech_with_music class.
Setting this threshold higher will filter out more music which may be desirable for ASR. (Default: 0.5)
filt (optional): a kernel size for the median filter to apply to the output labels for smoothing. (Default: 1)
Returns:
A pd.DataFrame containing speech segment endpoints and metadata.
'''
targets = targets.copy()
if targets['predicted-targets'].iloc[0].shape[-1] == 4:
prior = np.array([(1-speech_thresh) * speech_w_music_thresh,
speech_thresh * speech_w_music_thresh,
(1-speech_thresh) * (1-speech_w_music_thresh),
(1-speech_thresh) * speech_w_music_thresh])
temp = pd.concat([_targets_to_endpoints(medfilt([0 if (j*prior).argmax() == 1 else 1 for j in i], filt), 0.32) \
for i in targets['predicted-targets']], ignore_index=True)
elif targets['predicted-targets'].iloc[0].shape[-1] == 2:
prior = np.array([speech_thresh,
1-speech_thresh])
temp = pd.concat([_targets_to_endpoints(medfilt([0 if (j*prior).argmax() == 0 else 1 for j in i], filt), 0.32) \
for i in targets['predicted-targets']], ignore_index=True)
else:
print(f'ERROR: model provided has {targets["predicted-targets"].iloc[0].shape[-1]} outputs. Model expected to have 2 or 4 outputs.')
if 'start' in targets.columns:
targets['end'] = targets['start'] + temp['end']
targets['start'] = targets['start'] + temp['start']
else:
targets['start'] = temp['start']
targets['end'] = temp['end']
targets = targets.drop(['predicted-targets'], axis=1)
targets = targets.apply(pd.Series.explode).reset_index(drop=True)
targets['utterance-id'] = targets['recording-id'].astype(str) + '_' + \
((targets['start'] * 100).astype(int)).astype(str).str.zfill(7) + '_' + \
((targets['end'] * 100).astype(int)).astype(str).str.zfill(7)
return targets
def predict_targets(model: tf.keras.Model, features: pd.DataFrame) -> pd.DataFrame:
'''Function for applying a pretrained model to predict targets from features.
Args:
model: A pretrained tf.keras model.
features: A pd.DataFrame containing features and metadata.
Returns:
A pd.DataFrame containing predicted targets and metadata.
'''
targets = features.drop(['normalized-features'], axis=1)
print('------------------- Running VAD -------------------')
targets['predicted-targets'] = _predict(model, features['normalized-features'])
return targets
def to_data_dir(endpoints: pd.DataFrame, out_dir: str) -> None:
'''A function for generating a Kaldi-style data directory output of the dicovered speech segments.
Args:
endpoints: A pd.DataFrame containing speech segment endpoints and metadata.
out_dir: A path to an output directory where data files will be placed.
'''
if not os.path.exists(out_dir):
print(f'Directory {out_dir} does not exist, creating it.')
os.mkdir(out_dir)
endpoints[['recording-id', 'extended filename']].drop_duplicates().to_csv(
f'{out_dir}/wav.scp',sep=' ', index=False, header=False)
pd.concat([endpoints[['utterance-id', 'recording-id']], endpoints[['start', 'end']].astype(float).round(3)],
axis=1).to_csv(f'{out_dir}/segments', sep=' ', index=False, header=False)
def _predict(model: tf.keras.Model, col: pd.Series) -> pd.Series:
'''Auxiliary function used by predict_targets(). Applies a pretrained model to
each feature set in the 'normalized-features' or 'features' column of a pd.DataFrame
containing features and metadata.
Args:
model: A pretrained tf.keras model.
col: A column of a pd.DataFrame containing features.
Returns:
A pd.Series containing the predicted target sequences.
'''
targets = []
for features in col:
#temp = model.predict(utils.time_distribute(features, 15)[:,:,:,:,np.newaxis])
temp = model.predict(features[np.newaxis,:,:,:,np.newaxis])
targets.append(temp.reshape(-1, temp.shape[-1]))
return pd.Series(targets)
def _targets_to_endpoints(targets: np.ndarray, frame_length: float) -> pd.DataFrame:
'''Auxilory function used by decode() for converting a target sequence to endpoints.
Args:
targets: A binary np.ndarray of speech/nonspeech targets where 1 indicates the presence of speech.
frame_length: The length of each target in seconds.
Returns:
A pd.DataFrame, containing the speech segment start and end boundaries in arrays.
'''
starts = []
ends = []
state = 0
for n, i in enumerate(targets):
state, emmision = _update_fst(state, i)
if emmision == 'start':
starts.append(n)
elif emmision == 'end':
ends.append(n)
state, emmision = _update_fst(state, None)
if emmision == 'start':
starts.append(n)
elif emmision == 'end':
ends.append(n + 1)
starts = np.around(np.array([i * frame_length for i in starts]), 3)
ends = np.around(np.array([i * frame_length for i in ends]), 3)
return pd.DataFrame({'start': [starts],'end': [ends]})
def _update_fst(state: int, transition: int) -> Tuple[int, str]:
'''Auxiliary function used by _targets_to_endpoints() for updating finite state
transducer.
Args:
state: The current state.
transition: The input (the next binary target).
Returns:
A tuple consisting of the new state and the output ('start', 'end' or None,
representing a start, end or no endpoint detections respectively).
'''
if state == 0:
if transition == 0:
state = 1
return state, None
elif transition == 1:
state = 2
return state, 'start'
elif state == 1:
if transition == 0:
return state, None
elif transition == 1:
state = 2
return state, 'start'
elif transition is None:
state = 3
return state, None
elif state == 2:
if transition == 0:
state = 1
return state, 'end'
elif transition == 1:
return state, None
elif transition is None:
state = 3
return state, 'end'
# Handle args when run directly
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='run_cnnlstm.py',
description='Run a trained voice activity detector on extracted feature set.')
parser.add_argument('-s', '--speech_thresh', type=float,
help='a decision threshold value between (0,1) for speech vs non-speech, defaults to 0.5')
parser.add_argument('-m', '--speech_w_music_thresh', type=float,
help='a decision threshold value between (0,1) for speech_with_music vs non-speech, defaults to 0.5, \
increasing will remove more speech_with_music, useful for downsteam ASR')
parser.add_argument('-f', '--median_filter_kernel', type=int,
help='a kernel size for a median filter to smooth the output labels, defaults to 1 (no smoothing)')
parser.add_argument('-M', '--model_path', type=str,
help='a path to a trained vad model saved as in .h5 format, overrides default pretrained model')
parser.add_argument('feat_dir', type=str,
help='a path to a directory containing a feats.h5 file with extracted features')
parser.add_argument('out_dir', type=str,
help='a path to an output directory where the output segments will be saved')
args = parser.parse_args()
if args.speech_thresh is not None:
speech_thresh = args.speech_thresh
else:
speech_thresh = 0.5
if args.speech_w_music_thresh is not None:
speech_w_music_thresh = args.speech_w_music_thresh
else:
speech_w_music_thresh = 0.5
if args.median_filter_kernel is not None:
filt = args.median_filter_kernel
else:
filt = 1
feats = pd.read_hdf(f'{args.feat_dir}/feats.h5')
if args.model_path is not None:
model = models.load_model(args.model_path)
else:
model = models.load_model(f'{os.path.dirname(os.path.realpath(__file__))}/models/cnn_bilstm.h5')
targets = predict_targets(model, feats)
endpoints = decode(targets, speech_thresh, speech_w_music_thresh, filt)
to_data_dir(endpoints, args.out_dir)
|
# plot.py
# --------------- #
# Import Packages #
# --------------- #
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Set seaborn as default plot config
sns.set()
sns.set_style("whitegrid")
from itertools import cycle
# ---------------------------------- #
# Define Subdirectories & Info Files #
# ---------------------------------- #
data_dir = '../01_Data/'
info_dir = '../02_Info/'
plot_dir = '../04_Charts/'
# Create plot dir if necessary
if not os.path.exists(plot_dir): os.makedirs(plot_dir)
# Read in channel list & create list of sensor groups
full_channel_list = pd.read_csv(f'{info_dir}channel_list.csv', index_col='Channel_Name')
# ------------------- #
# Set Plot Parameters #
# ------------------- #
label_size = 18
tick_size = 16
line_width = 2
event_font = 12
font_rotation = 60
legend_font = 12
fig_width = 10
fig_height = 8
# ---------------------- #
# User-Defined Functions #
# ---------------------- #
def timestamp_to_seconds(timestamp):
timestamp = timestamp[11:]
hh, mm, ss = timestamp.split(':')
return(3600 * int(hh) + 60 * int(mm) + int(ss))
def convert_timestamps(timestamps, start_time):
raw_seconds = map(timestamp_to_seconds, timestamps)
return([s - start_time for s in list(raw_seconds)])
def create_1plot_fig():
# Define figure for the plot
fig, ax1 = plt.subplots(figsize=(fig_width, fig_height))
# Set line colors & markers; reset axis lims
current_palette_8 = sns.color_palette('deep', 8)
sns.set_palette(current_palette_8)
plot_markers = cycle(['s', 'o', '^', 'd', 'h', 'p','v', '8', 'D', '*', '<', '>', 'H'])
x_max, y_min, y_max = 0, 0, 0
return(fig, ax1, plot_markers, x_max, y_min, y_max)
def format_and_save_plot(y_lims, x_lims, secondary_axis_label, file_loc):
# Set tick parameters
ax1.tick_params(labelsize=tick_size, length=0, width=0)
# Scale axes limits & labels
ax1.grid(True)
ax1.set_ylim(bottom=y_lims[0], top=y_lims[1])
ax1.set_xlim(x_lims[0] - x_lims[1] / 500, x_lims[1])
ax1.set_xlabel('Time (s)', fontsize=label_size)
# Secondary y-axis parameters
if secondary_axis_label != 'None':
ax2 = ax1.twinx()
ax2.tick_params(labelsize=tick_size, length=0, width=0)
ax2.set_ylabel(secondary_axis_label, fontsize=label_size)
if secondary_axis_label == 'Temperature ($^\circ$F)':
ax2.set_ylim([y_lims[0] * 1.8 + 32., y_lims[1] * 1.8 + 32.])
else:
ax2.set_ylim([secondary_axis_scale * y_lims[0], secondary_axis_scale * y_lims[1]])
ax2.yaxis.grid(visible=None)
# Add vertical lines and labels for timing information (if available)
ax3 = ax1.twiny()
ax3.set_xlim(x_lims[0] - x_lims[1] / 500, x_lims[1])
ax3.set_xticks([_x for _x in Events.index.values if _x >= x_lims[0] and _x <= x_lims[1]])
ax3.tick_params(axis='x', width=1, labelrotation=font_rotation, labelsize=event_font)
ax3.set_xticklabels([Events['Event'][_x] for _x in Events.index.values if _x >= x_lims[0] and _x <= x_lims[1]], fontsize=event_font, ha='left')
ax3.xaxis.grid(visible=None)
# Add legend, clean up whitespace padding, save chart as pdf, & close fig
handles1, labels1 = ax1.get_legend_handles_labels()
ax1.legend(handles1, labels1, loc='best', fontsize=legend_font, handlelength=3, frameon=True, framealpha=0.75)
fig.tight_layout()
plt.savefig(file_loc)
plt.close()
# ----------------- #
# Main Body of Code #
# ----------------- #
# Loop through test data files & create plots
for f in os.listdir(data_dir):
# Skip if f is not a exp data file
if any([not f.endswith('.csv'), f.startswith('.'), f.endswith('_Events.csv')]):
continue
# Get test name from file & load data & event files for given experiment
test_name = f[:-4]
data_df = pd.read_csv(f'{data_dir}{f}', index_col='Time')
Events = pd.read_csv(f'{data_dir}{test_name}_Events.csv')
print (f'--- Loaded data for {test_name} ---')
# Create index column of time relative to ignition in events file
Events = pd.read_csv(f'{data_dir}{f[:-4]}_Events.csv')
Events.rename(columns={'Time':'Timestamp'}, inplace=True)
start_timestamp = Events.loc[0, 'Timestamp'][11:]
hh,mm,ss = start_timestamp.split(':')
start_time = 3600 * int(hh) + 60 * int(mm) + int(ss)
Events['Time'] = convert_timestamps(Events['Timestamp'], start_time)
Events = Events.set_index('Time')
# Define channel list as full list & drop unused channels for given experiment
channel_list = full_channel_list[[i in data_df.columns for i in full_channel_list.index]]
# Loop through channel groups to plot data from all channels in each group
for group in channel_list.groupby('Group').groups:
# Create figure for plot
print (f" Plotting {group.replace("_"," ")}")
fig, ax1, plot_markers, x_max, y_min, y_max = create_1plot_fig()
# Loop through each channel in given group
for channel in channel_list.groupby('Group').get_group(group).index.values:
# Set secondary axis default to None, get data type from channel list
secondary_axis_label = 'None'
data_type = channel_list.loc[channel, 'Type']
# Set plot parameters based on data type
if data_type == 'Temperature':
# Set y-axis labels & y_min
ax1.set_ylabel('Temperature ($^\circ$C)', fontsize=label_size)
secondary_axis_label = 'Temperature ($^\circ$F)'
y_min = 0
elif data_type == 'Velocity':
# Apply moving average & set y-axis labels, secondary scale
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Velocity (m/s)', fontsize=label_size)
secondary_axis_label = 'Velocity (mph)'
secondary_axis_scale = 2.23694
elif data_type == 'Pressure':
# Apply moving average & set y-axis labels, secondary scale
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Pressure (Pa)', fontsize=label_size)
elif data_type == 'Oxygen':
# Set y-axis label
ax1.set_ylabel('O$_2$ Concentration (%)', fontsize=label_size)
elif data_type.endswith('Heat Flux'):
# Apply moving average & set y-axis label
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Heat Flux (kW/m$^2$)', fontsize=label_size)
elif data_type == 'Heat Release Rate':
# Set y-axis label
ax1.set_ylabel('Heat Release Rate (kW)', fontsize=label_size)
# Determine x max bound for current data & update max of chart if necessary
x_end = data_df[channel].index[-1]
if x_end > x_max:
x_max = x_end
# Plot channel data
ax1.plot(data_df.index, data_df[channel], lw=line_width,
marker=next(plot_markers), markevery=30, mew=3, mec='none', ms=7,
label=channel_list.loc[channel, 'Label'])
# Check if y min/max need to be updated
if data_df[channel].min() - abs(data_df[channel].min() * .1) < y_min:
y_min = data_df[channel].min() - abs(data_df[channel].min() * .1)
if data_df[channel].max() * 1.1 > y_max:
y_max = data_df[channel].max() * 1.1
# Add vertical lines for event labels; label to y axis
[ax1.axvline(_x, color='0.25', lw=1.5) for _x in Events.index.values if _x >= 0 and _x <= x_max]
# Define/create save directory, call function to format & save plot
save_dir = f'{plot_dir}{test_name}/'
if not os.path.exists(save_dir): os.makedirs(save_dir)
format_and_save_plot([y_min, y_max], [0, x_max], secondary_axis_label, f'{save_dir}{group}.pdf')
print() | # plot.py
# --------------- #
# Import Packages #
# --------------- #
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Set seaborn as default plot config
sns.set()
sns.set_style("whitegrid")
from itertools import cycle
# ---------------------------------- #
# Define Subdirectories & Info Files #
# ---------------------------------- #
data_dir = '../01_Data/'
info_dir = '../02_Info/'
plot_dir = '../04_Charts/'
# Create plot dir if necessary
if not os.path.exists(plot_dir): os.makedirs(plot_dir)
# Read in channel list & create list of sensor groups
full_channel_list = pd.read_csv(f'{info_dir}channel_list.csv', index_col='Channel_Name')
# ------------------- #
# Set Plot Parameters #
# ------------------- #
label_size = 18
tick_size = 16
line_width = 2
event_font = 12
font_rotation = 60
legend_font = 12
fig_width = 10
fig_height = 8
# ---------------------- #
# User-Defined Functions #
# ---------------------- #
def timestamp_to_seconds(timestamp):
timestamp = timestamp[11:]
hh, mm, ss = timestamp.split(':')
return(3600 * int(hh) + 60 * int(mm) + int(ss))
def convert_timestamps(timestamps, start_time):
raw_seconds = map(timestamp_to_seconds, timestamps)
return([s - start_time for s in list(raw_seconds)])
def create_1plot_fig():
# Define figure for the plot
fig, ax1 = plt.subplots(figsize=(fig_width, fig_height))
# Set line colors & markers; reset axis lims
current_palette_8 = sns.color_palette('deep', 8)
sns.set_palette(current_palette_8)
plot_markers = cycle(['s', 'o', '^', 'd', 'h', 'p','v', '8', 'D', '*', '<', '>', 'H'])
x_max, y_min, y_max = 0, 0, 0
return(fig, ax1, plot_markers, x_max, y_min, y_max)
def format_and_save_plot(y_lims, x_lims, secondary_axis_label, file_loc):
# Set tick parameters
ax1.tick_params(labelsize=tick_size, length=0, width=0)
# Scale axes limits & labels
ax1.grid(True)
ax1.set_ylim(bottom=y_lims[0], top=y_lims[1])
ax1.set_xlim(x_lims[0] - x_lims[1] / 500, x_lims[1])
ax1.set_xlabel('Time (s)', fontsize=label_size)
# Secondary y-axis parameters
if secondary_axis_label != 'None':
ax2 = ax1.twinx()
ax2.tick_params(labelsize=tick_size, length=0, width=0)
ax2.set_ylabel(secondary_axis_label, fontsize=label_size)
if secondary_axis_label == 'Temperature ($^\circ$F)':
ax2.set_ylim([y_lims[0] * 1.8 + 32., y_lims[1] * 1.8 + 32.])
else:
ax2.set_ylim([secondary_axis_scale * y_lims[0], secondary_axis_scale * y_lims[1]])
ax2.yaxis.grid(visible=None)
# Add vertical lines and labels for timing information (if available)
ax3 = ax1.twiny()
ax3.set_xlim(x_lims[0] - x_lims[1] / 500, x_lims[1])
ax3.set_xticks([_x for _x in Events.index.values if _x >= x_lims[0] and _x <= x_lims[1]])
ax3.tick_params(axis='x', width=1, labelrotation=font_rotation, labelsize=event_font)
ax3.set_xticklabels([Events['Event'][_x] for _x in Events.index.values if _x >= x_lims[0] and _x <= x_lims[1]], fontsize=event_font, ha='left')
ax3.xaxis.grid(visible=None)
# Add legend, clean up whitespace padding, save chart as pdf, & close fig
handles1, labels1 = ax1.get_legend_handles_labels()
ax1.legend(handles1, labels1, loc='best', fontsize=legend_font, handlelength=3, frameon=True, framealpha=0.75)
fig.tight_layout()
plt.savefig(file_loc)
plt.close()
# ----------------- #
# Main Body of Code #
# ----------------- #
# Loop through test data files & create plots
for f in os.listdir(data_dir):
# Skip if f is not a exp data file
if any([not f.endswith('.csv'), f.startswith('.'), f.endswith('_Events.csv')]):
continue
# Get test name from file & load data & event files for given experiment
test_name = f[:-4]
data_df = pd.read_csv(f'{data_dir}{f}', index_col='Time')
Events = pd.read_csv(f'{data_dir}{test_name}_Events.csv')
print (f'--- Loaded data for {test_name} ---')
# Create index column of time relative to ignition in events file
Events = pd.read_csv(f'{data_dir}{f[:-4]}_Events.csv')
Events.rename(columns={'Time':'Timestamp'}, inplace=True)
start_timestamp = Events.loc[0, 'Timestamp'][11:]
hh,mm,ss = start_timestamp.split(':')
start_time = 3600 * int(hh) + 60 * int(mm) + int(ss)
Events['Time'] = convert_timestamps(Events['Timestamp'], start_time)
Events = Events.set_index('Time')
# Define channel list as full list & drop unused channels for given experiment
channel_list = full_channel_list[[i in data_df.columns for i in full_channel_list.index]]
# Loop through channel groups to plot data from all channels in each group
for group in channel_list.groupby('Group').groups:
# Create figure for plot
print (f" Plotting {group.replace('_',' ')}")
fig, ax1, plot_markers, x_max, y_min, y_max = create_1plot_fig()
# Loop through each channel in given group
for channel in channel_list.groupby('Group').get_group(group).index.values:
# Set secondary axis default to None, get data type from channel list
secondary_axis_label = 'None'
data_type = channel_list.loc[channel, 'Type']
# Set plot parameters based on data type
if data_type == 'Temperature':
# Set y-axis labels & y_min
ax1.set_ylabel('Temperature ($^\circ$C)', fontsize=label_size)
secondary_axis_label = 'Temperature ($^\circ$F)'
y_min = 0
elif data_type == 'Velocity':
# Apply moving average & set y-axis labels, secondary scale
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Velocity (m/s)', fontsize=label_size)
secondary_axis_label = 'Velocity (mph)'
secondary_axis_scale = 2.23694
elif data_type == 'Pressure':
# Apply moving average & set y-axis labels, secondary scale
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Pressure (Pa)', fontsize=label_size)
elif data_type == 'Oxygen':
# Set y-axis label
ax1.set_ylabel('O$_2$ Concentration (%)', fontsize=label_size)
elif data_type.endswith('Heat Flux'):
# Apply moving average & set y-axis label
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Heat Flux (kW/m$^2$)', fontsize=label_size)
elif data_type == 'Heat Release Rate':
# Set y-axis label
ax1.set_ylabel('Heat Release Rate (kW)', fontsize=label_size)
# Determine x max bound for current data & update max of chart if necessary
x_end = data_df[channel].index[-1]
if x_end > x_max:
x_max = x_end
# Plot channel data
ax1.plot(data_df.index, data_df[channel], lw=line_width,
marker=next(plot_markers), markevery=30, mew=3, mec='none', ms=7,
label=channel_list.loc[channel, 'Label'])
# Check if y min/max need to be updated
if data_df[channel].min() - abs(data_df[channel].min() * .1) < y_min:
y_min = data_df[channel].min() - abs(data_df[channel].min() * .1)
if data_df[channel].max() * 1.1 > y_max:
y_max = data_df[channel].max() * 1.1
# Add vertical lines for event labels; label to y axis
[ax1.axvline(_x, color='0.25', lw=1.5) for _x in Events.index.values if _x >= 0 and _x <= x_max]
# Define/create save directory, call function to format & save plot
save_dir = f'{plot_dir}{test_name}/'
if not os.path.exists(save_dir): os.makedirs(save_dir)
format_and_save_plot([y_min, y_max], [0, x_max], secondary_axis_label, f'{save_dir}{group}.pdf')
print() |
# -*- coding: utf-8 -*-
#
# VPP test framework documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 13 08:45:03 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import subprocess
from datetime import date
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
autodoc_mock_imports = ['objgraph',
'parameterized',
'pexpect',
'psutil',
'pympler',
'scapy',
'syslog_rfc5424_parser',
'vpp_papi']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'VPP test framework'
copyright = f'{date.today().year}, FD.io VPP team'
author = u'FD.io VPP team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
output = subprocess.run(['../../src/scripts/version'], stdout=subprocess.PIPE)
version = f'{output.stdout.decode('utf-8')}'
# The full version, including alpha/beta/rc tags.
release = f'{output.stdout.decode('utf-8')}'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'VPP test framework v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'VPPtestframeworkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'VPPtestframework.tex', u'VPP test framework Documentation',
u'VPP team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vpptestframework', u'VPP test framework Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'VPPtestframework', u'VPP test framework Documentation',
author, 'VPPtestframework', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| # -*- coding: utf-8 -*-
#
# VPP test framework documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 13 08:45:03 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import subprocess
from datetime import date
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
autodoc_mock_imports = ['objgraph',
'parameterized',
'pexpect',
'psutil',
'pympler',
'scapy',
'syslog_rfc5424_parser',
'vpp_papi']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'VPP test framework'
copyright = f'{date.today().year}, FD.io VPP team'
author = u'FD.io VPP team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
output = subprocess.run(['../../src/scripts/version'], stdout=subprocess.PIPE)
version = f'{output.stdout.decode("utf-8")}'
# The full version, including alpha/beta/rc tags.
release = f'{output.stdout.decode("utf-8")}'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'VPP test framework v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'VPPtestframeworkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'VPPtestframework.tex', u'VPP test framework Documentation',
u'VPP team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vpptestframework', u'VPP test framework Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'VPPtestframework', u'VPP test framework Documentation',
author, 'VPPtestframework', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Check the tools submodule."""
import os
import pytest
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from niworkflows.interfaces.reportlets.masks import SimpleShowMaskRPT
from ..ancillary import init_brainextraction_wf
@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") == "true", reason="this is GH Actions")
@pytest.mark.parametrize("folder", ["magnitude/ds000054", "magnitude/ds000217"])
def test_brainmasker(tmpdir, datadir, workdir, outdir, folder):
"""Exercise the brain masking tool."""
tmpdir.chdir()
wf = pe.Workflow(name=f"test_mask_{folder.replace("/", "_")}")
if workdir:
wf.base_dir = str(workdir)
input_files = [
str(f) for f in (datadir / "brain-extraction-tests" / folder).glob("*.nii.gz")
]
inputnode = pe.Node(niu.IdentityInterface(fields=("in_file",)), name="inputnode")
inputnode.iterables = ("in_file", input_files)
merger = pe.Node(niu.Function(function=_merge), name="merger")
brainmask_wf = init_brainextraction_wf()
# fmt:off
wf.connect([
(inputnode, merger, [("in_file", "in_file")]),
(merger, brainmask_wf, [("out", "inputnode.in_file")]),
])
# fmt:on
if outdir:
out_path = outdir / "masks" / folder.split("/")[-1]
out_path.mkdir(exist_ok=True, parents=True)
report = pe.Node(SimpleShowMaskRPT(), name="report")
report.interface._always_run = True
def _report_name(fname, out_path):
from pathlib import Path
return str(
out_path
/ Path(fname)
.name.replace(".nii", "_mask.svg")
.replace("_magnitude", "_desc-magnitude")
.replace(".gz", "")
)
# fmt: off
wf.connect([
(inputnode, report, [(("in_file", _report_name, out_path), "out_report")]),
(brainmask_wf, report, [("outputnode.out_mask", "mask_file"),
("outputnode.out_file", "background_file")]),
])
# fmt: on
wf.run()
def _merge(in_file):
import nibabel as nb
import numpy as np
img = nb.squeeze_image(nb.load(in_file))
data = np.asanyarray(img.dataobj)
if data.ndim == 3:
return in_file
from pathlib import Path
data = data.mean(-1)
out_file = (Path() / "merged.nii.gz").absolute()
img.__class__(data, img.affine, img.header).to_filename(out_file)
return str(out_file)
| # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Check the tools submodule."""
import os
import pytest
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from niworkflows.interfaces.reportlets.masks import SimpleShowMaskRPT
from ..ancillary import init_brainextraction_wf
@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") == "true", reason="this is GH Actions")
@pytest.mark.parametrize("folder", ["magnitude/ds000054", "magnitude/ds000217"])
def test_brainmasker(tmpdir, datadir, workdir, outdir, folder):
"""Exercise the brain masking tool."""
tmpdir.chdir()
wf = pe.Workflow(name=f"test_mask_{folder.replace('/', '_')}")
if workdir:
wf.base_dir = str(workdir)
input_files = [
str(f) for f in (datadir / "brain-extraction-tests" / folder).glob("*.nii.gz")
]
inputnode = pe.Node(niu.IdentityInterface(fields=("in_file",)), name="inputnode")
inputnode.iterables = ("in_file", input_files)
merger = pe.Node(niu.Function(function=_merge), name="merger")
brainmask_wf = init_brainextraction_wf()
# fmt:off
wf.connect([
(inputnode, merger, [("in_file", "in_file")]),
(merger, brainmask_wf, [("out", "inputnode.in_file")]),
])
# fmt:on
if outdir:
out_path = outdir / "masks" / folder.split("/")[-1]
out_path.mkdir(exist_ok=True, parents=True)
report = pe.Node(SimpleShowMaskRPT(), name="report")
report.interface._always_run = True
def _report_name(fname, out_path):
from pathlib import Path
return str(
out_path
/ Path(fname)
.name.replace(".nii", "_mask.svg")
.replace("_magnitude", "_desc-magnitude")
.replace(".gz", "")
)
# fmt: off
wf.connect([
(inputnode, report, [(("in_file", _report_name, out_path), "out_report")]),
(brainmask_wf, report, [("outputnode.out_mask", "mask_file"),
("outputnode.out_file", "background_file")]),
])
# fmt: on
wf.run()
def _merge(in_file):
import nibabel as nb
import numpy as np
img = nb.squeeze_image(nb.load(in_file))
data = np.asanyarray(img.dataobj)
if data.ndim == 3:
return in_file
from pathlib import Path
data = data.mean(-1)
out_file = (Path() / "merged.nii.gz").absolute()
img.__class__(data, img.affine, img.header).to_filename(out_file)
return str(out_file)
|
"""
This module contains the code for the interactive conversion
of units at the command line.
@author: tgwoodcock
"""
from . import convmag_functions as cm
# interactive conversion
def main():
CONVERTING = True
print("*****Conversion between magnetic units.*****")
print("\nAt the 'Input:' promt, enter:")
print("[value startunit endunit] e.g. 6 T A/m,")
print("[units] to list the available units,")
print("[conv] to list the conversion factors or")
print("[q] to quit.")
while CONVERTING:
r = input("\nInput: ")
if r == "q":
CONVERTING = False
elif r == "units":
print("\nThe base units available for conversion are:")
print("\n".join(cm.units)+"\nmuB/fu")
print("\nThe prefactors available for any base unit are:",
", ".join(cm.prefactors))
elif r == "conv":
lgst = max(map(len, cm.units))
print("\nThe conversions between base units available are:")
for k in list(cm.convmag.keys()):
St, En = k.split("_")
print(f"{St:>{lgst}} <-> {En:<{lgst}}: {cm.convmag[k]}")
print(f"{"muB/fu":>{lgst}} <-> {"T":<{lgst}}: requires user input")
print("\nINFO: the factors given above are for the forward conversion")
print("INFO: permeability of free space, MU_0 = 4 * 3.14159 * 1e-7 H/m (== Vs/Am)")
print("INFO: Bohr magneton, MU_B = 9.274015e-24 Am^2")
print(" (muB is the unit string for conversions with Bohr magnetons)")
print("INFO: prefactors available for any base unit:",
", ".join(cm.prefactors))
else:
val = float(r.split(" ")[0])
startunit = r.split(" ")[1]
endunit = r.split(" ")[2]
if "muB/fu" in [startunit, endunit] and "T" in [startunit, endunit]:
print("\n***INFO: muB per formula unit <-> T***\n")
print("Please enter lattice parameters: a b c in Angstrom")
lp = input("a b c: ")
a = float(lp.split(" ")[0])
b = float(lp.split(" ")[1])
c = float(lp.split(" ")[2])
print("\nLimited to orthogonal or hexagonal unit cells:")
gamma = input("Please enter gamma in deg. (90 or 120): ")
if gamma == "120":
vol = cm.calculate_unitcell_volume(a, b, c, gamma=120)
elif gamma == "90":
vol = cm.calculate_unitcell_volume(a, b, c)
vol = vol * (1E-10)**3 # to get m^3 from A^3
print("Please enter the number of formula units per unit cell:")
num_fu = int(input("f.u./unit cell: "))
if startunit == "muB/fu":
Tesla = cm.muB_per_fu_to_Tesla(val, num_fu, vol)
s1 = f"\n{val} muB per f.u. = {Tesla:.5f} T"
s2 = f" ({num_fu:d} f.u./unit cell, "
s3 = f"cell volume = {vol:.3e} m^3)"
print("".join([s1, s2, s3]))
elif startunit == "T":
muB_fu = cm.Tesla_to_muB_per_fu(val, num_fu, vol)
s1 = f"\n{val} T = {muB_fu:.5f} muB per f.u."
s2 = f" ({num_fu:d} f.u./unit cell, "
s3 = f"cell volume = {vol:.3e} m^3)"
print("".join([s1, s2, s3]))
else:
cm.convert_unit(val, startunit, endunit, verbose=True)
| """
This module contains the code for the interactive conversion
of units at the command line.
@author: tgwoodcock
"""
from . import convmag_functions as cm
# interactive conversion
def main():
CONVERTING = True
print("*****Conversion between magnetic units.*****")
print("\nAt the 'Input:' promt, enter:")
print("[value startunit endunit] e.g. 6 T A/m,")
print("[units] to list the available units,")
print("[conv] to list the conversion factors or")
print("[q] to quit.")
while CONVERTING:
r = input("\nInput: ")
if r == "q":
CONVERTING = False
elif r == "units":
print("\nThe base units available for conversion are:")
print("\n".join(cm.units)+"\nmuB/fu")
print("\nThe prefactors available for any base unit are:",
", ".join(cm.prefactors))
elif r == "conv":
lgst = max(map(len, cm.units))
print("\nThe conversions between base units available are:")
for k in list(cm.convmag.keys()):
St, En = k.split("_")
print(f"{St:>{lgst}} <-> {En:<{lgst}}: {cm.convmag[k]}")
print(f"{'muB/fu':>{lgst}} <-> {'T':<{lgst}}: requires user input")
print("\nINFO: the factors given above are for the forward conversion")
print("INFO: permeability of free space, MU_0 = 4 * 3.14159 * 1e-7 H/m (== Vs/Am)")
print("INFO: Bohr magneton, MU_B = 9.274015e-24 Am^2")
print(" (muB is the unit string for conversions with Bohr magnetons)")
print("INFO: prefactors available for any base unit:",
", ".join(cm.prefactors))
else:
val = float(r.split(" ")[0])
startunit = r.split(" ")[1]
endunit = r.split(" ")[2]
if "muB/fu" in [startunit, endunit] and "T" in [startunit, endunit]:
print("\n***INFO: muB per formula unit <-> T***\n")
print("Please enter lattice parameters: a b c in Angstrom")
lp = input("a b c: ")
a = float(lp.split(" ")[0])
b = float(lp.split(" ")[1])
c = float(lp.split(" ")[2])
print("\nLimited to orthogonal or hexagonal unit cells:")
gamma = input("Please enter gamma in deg. (90 or 120): ")
if gamma == "120":
vol = cm.calculate_unitcell_volume(a, b, c, gamma=120)
elif gamma == "90":
vol = cm.calculate_unitcell_volume(a, b, c)
vol = vol * (1E-10)**3 # to get m^3 from A^3
print("Please enter the number of formula units per unit cell:")
num_fu = int(input("f.u./unit cell: "))
if startunit == "muB/fu":
Tesla = cm.muB_per_fu_to_Tesla(val, num_fu, vol)
s1 = f"\n{val} muB per f.u. = {Tesla:.5f} T"
s2 = f" ({num_fu:d} f.u./unit cell, "
s3 = f"cell volume = {vol:.3e} m^3)"
print("".join([s1, s2, s3]))
elif startunit == "T":
muB_fu = cm.Tesla_to_muB_per_fu(val, num_fu, vol)
s1 = f"\n{val} T = {muB_fu:.5f} muB per f.u."
s2 = f" ({num_fu:d} f.u./unit cell, "
s3 = f"cell volume = {vol:.3e} m^3)"
print("".join([s1, s2, s3]))
else:
cm.convert_unit(val, startunit, endunit, verbose=True)
|
import argparse
import os
import sys
import io
import time
import json
from bson.json_util import dumps
import traceback
import confluent_kafka
from ast import literal_eval
import avro.schema
import fastavro
import subprocess
import datetime
import multiprocessing
# import threading
import pymongo
import pytz
from numba import jit
import numpy as np
from tensorflow.keras.models import load_model
import gzip
import io
from astropy.io import fits
from copy import deepcopy
''' load config and secrets '''
with open('/app/config.json') as cjson:
config = json.load(cjson)
with open('/app/secrets.json') as sjson:
secrets = json.load(sjson)
for k in secrets:
config[k].update(secrets.get(k, {}))
def utc_now():
return datetime.datetime.now(pytz.utc)
def time_stamps():
"""
:return: local time, UTC time
"""
return datetime.datetime.now().strftime('%Y%m%d_%H:%M:%S'), \
datetime.datetime.utcnow().strftime('%Y%m%d_%H:%M:%S')
@jit
def deg2hms(x):
"""Transform degrees to *hours:minutes:seconds* strings.
Parameters
----------
x : float
The degree value c [0, 360) to be written as a sexagesimal string.
Returns
-------
out : str
The input angle written as a sexagesimal string, in the
form, hours:minutes:seconds.
"""
assert 0.0 <= x < 360.0, 'Bad RA value in degrees'
# ac = Angle(x, unit='degree')
# hms = str(ac.to_string(unit='hour', sep=':', pad=True))
# print(str(hms))
_h = np.floor(x * 12.0 / 180.)
_m = np.floor((x * 12.0 / 180. - _h) * 60.0)
_s = ((x * 12.0 / 180. - _h) * 60.0 - _m) * 60.0
hms = '{:02.0f}:{:02.0f}:{:07.4f}'.format(_h, _m, _s)
# print(hms)
return hms
@jit
def deg2dms(x):
"""Transform degrees to *degrees:arcminutes:arcseconds* strings.
Parameters
----------
x : float
The degree value c [-90, 90] to be converted.
Returns
-------
out : str
The input angle as a string, written as degrees:minutes:seconds.
"""
assert -90.0 <= x <= 90.0, 'Bad Dec value in degrees'
# ac = Angle(x, unit='degree')
# dms = str(ac.to_string(unit='degree', sep=':', pad=True))
# print(dms)
_d = np.floor(abs(x)) * np.sign(x)
_m = np.floor(np.abs(x - _d) * 60.0)
_s = np.abs(np.abs(x - _d) * 60.0 - _m) * 60.0
dms = '{:02.0f}:{:02.0f}:{:06.3f}'.format(_d, _m, _s)
# print(dms)
return dms
@jit
def great_circle_distance(ra1_deg, dec1_deg, ra2_deg, dec2_deg):
"""
Distance between two points on the sphere
:param ra1_deg:
:param dec1_deg:
:param ra2_deg:
:param dec2_deg:
:return: distance in degrees
"""
# this is orders of magnitude faster than astropy.coordinates.Skycoord.separation
DEGRA = np.pi / 180.0
ra1, dec1, ra2, dec2 = ra1_deg * DEGRA, dec1_deg * DEGRA, ra2_deg * DEGRA, dec2_deg * DEGRA
delta_ra = np.abs(ra2 - ra1)
distance = np.arctan2(np.sqrt((np.cos(dec2) * np.sin(delta_ra)) ** 2
+ (np.cos(dec1) * np.sin(dec2) - np.sin(dec1) * np.cos(dec2) * np.cos(
delta_ra)) ** 2),
np.sin(dec1) * np.sin(dec2) + np.cos(dec1) * np.cos(dec2) * np.cos(delta_ra))
return distance * 180.0 / np.pi
@jit
def in_ellipse(alpha, delta0, alpha1, delta01, d0, axis_ratio, PA0):
"""
Check if a given point (alpha, delta0)
is within an ellipse specified by
center (alpha1, delta01), maj_ax (d0), axis ratio and positional angle
All angles are in decimal degrees
Adapted from q3c: https://github.com/segasai/q3c/blob/master/q3cube.c
:param alpha:
:param delta0:
:param alpha1:
:param delta01:
:param d0:
:param axis_ratio:
:param PA0:
:return:
"""
DEGRA = np.pi / 180.0
# convert degrees to radians
d_alpha = (alpha1 - alpha) * DEGRA
delta1 = delta01 * DEGRA
delta = delta0 * DEGRA
PA = PA0 * DEGRA
d = d0 * DEGRA
e = np.sqrt(1.0 - axis_ratio * axis_ratio)
t1 = np.cos(d_alpha)
t22 = np.sin(d_alpha)
t3 = np.cos(delta1)
t32 = np.sin(delta1)
t6 = np.cos(delta)
t26 = np.sin(delta)
t9 = np.cos(d)
t55 = np.sin(d)
if (t3 * t6 * t1 + t32 * t26) < 0:
return False
t2 = t1 * t1
t4 = t3 * t3
t5 = t2 * t4
t7 = t6 * t6
t8 = t5 * t7
t10 = t9 * t9
t11 = t7 * t10
t13 = np.cos(PA)
t14 = t13 * t13
t15 = t14 * t10
t18 = t7 * t14
t19 = t18 * t10
t24 = np.sin(PA)
t31 = t1 * t3
t36 = 2.0 * t31 * t32 * t26 * t6
t37 = t31 * t32
t38 = t26 * t6
t45 = t4 * t10
t56 = t55 * t55
t57 = t4 * t7
t60 = -t8 + t5 * t11 + 2.0 * t5 * t15 - t5 * t19 - \
2.0 * t1 * t4 * t22 * t10 * t24 * t13 * t26 - t36 + \
2.0 * t37 * t38 * t10 - 2.0 * t37 * t38 * t15 - t45 * t14 - t45 * t2 + \
2.0 * t22 * t3 * t32 * t6 * t24 * t10 * t13 - t56 + t7 - t11 + t4 - t57 + t57 * t10 + t19 - t18 * t45
t61 = e * e
t63 = t60 * t61 + t8 + t57 - t4 - t7 + t56 + t36
return t63 > 0
"""Utilities for manipulating Avro data and schemas.
"""
def _loadSingleAvsc(file_path, names):
"""Load a single avsc file.
"""
with open(file_path) as file_text:
json_data = json.load(file_text)
schema = avro.schema.SchemaFromJSONData(json_data, names)
return schema
def combineSchemas(schema_files):
"""Combine multiple nested schemas into a single schema.
Parameters
----------
schema_files : `list`
List of files containing schemas.
If nested, most internal schema must be first.
Returns
-------
`dict`
Avro schema
"""
known_schemas = avro.schema.Names()
for s in schema_files:
schema = _loadSingleAvsc(s, known_schemas)
return schema.to_json()
def writeAvroData(json_data, json_schema):
"""Encode json into Avro format given a schema.
Parameters
----------
json_data : `dict`
The JSON data containing message content.
json_schema : `dict`
The writer Avro schema for encoding data.
Returns
-------
`_io.BytesIO`
Encoded data.
"""
bytes_io = io.BytesIO()
fastavro.schemaless_writer(bytes_io, json_schema, json_data)
return bytes_io
def readAvroData(bytes_io, json_schema):
"""Read data and decode with a given Avro schema.
Parameters
----------
bytes_io : `_io.BytesIO`
Data to be decoded.
json_schema : `dict`
The reader Avro schema for decoding data.
Returns
-------
`dict`
Decoded data.
"""
bytes_io.seek(0)
message = fastavro.schemaless_reader(bytes_io, json_schema)
return message
def readSchemaData(bytes_io):
"""Read data that already has an Avro schema.
Parameters
----------
bytes_io : `_io.BytesIO`
Data to be decoded.
Returns
-------
`dict`
Decoded data.
"""
bytes_io.seek(0)
message = fastavro.reader(bytes_io)
return message
class AlertError(Exception):
"""Base class for exceptions in this module.
"""
pass
class EopError(AlertError):
"""Exception raised when reaching end of partition.
Parameters
----------
msg : Kafka message
The Kafka message result from consumer.poll().
"""
def __init__(self, msg):
message = 'topic:%s, partition:%d, status:end, ' \
'offset:%d, key:%s, time:%.3f\n' \
% (msg.topic(), msg.partition(),
msg.offset(), str(msg.key()), time.time())
self.message = message
def __str__(self):
return self.message
class AlertConsumer(object):
"""Creates an alert stream Kafka consumer for a given topic.
Parameters
----------
topic : `str`
Name of the topic to subscribe to.
schema_files : Avro schema files
The reader Avro schema files for decoding data. Optional.
**kwargs
Keyword arguments for configuring confluent_kafka.Consumer().
"""
def __init__(self, topic, schema_files=None, **kwargs):
# keep track of disconnected partitions
self.num_disconnected_partitions = 0
self.topic = topic
def error_cb(err, _self=self):
print(*time_stamps(), 'error_cb -------->', err)
# print(err.code())
if err.code() == -195:
_self.num_disconnected_partitions += 1
if _self.num_disconnected_partitions == _self.num_partitions:
print(*time_stamps(), 'all partitions got disconnected, killing thread')
sys.exit()
else:
print(*time_stamps(), '{:s}: disconnected from partition.'.format(_self.topic),
'total:', self.num_disconnected_partitions)
# 'error_cb': error_cb
kwargs['error_cb'] = error_cb
self.consumer = confluent_kafka.Consumer(**kwargs)
self.num_partitions = 0
def on_assign(consumer, partitions, _self=self):
# force-reset offsets when subscribing to a topic:
for part in partitions:
# -2 stands for beginning and -1 for end
part.offset = -2
# keep number of partitions. when reaching end of last partition, kill thread and start from beginning
_self.num_partitions += 1
print(consumer.get_watermark_offsets(part))
self.consumer.subscribe([topic], on_assign=on_assign)
# self.consumer.subscribe([topic])
# fixme?
# if schema_files is not None:
# self.alert_schema = combineSchemas(schema_files)
# MongoDB:
self.config = config
self.collection_alerts = 'ZUDS_alerts'
self.collection_alerts_aux = 'ZUDS_alerts_aux'
self.db = None
self.connect_to_db()
# indexes
self.db['db'][self.collection_alerts].create_index([('coordinates.radec_geojson', '2dsphere'),
('candid', pymongo.DESCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('coordinates.radec_geojson', '2dsphere'),
('objectId', pymongo.DESCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('objectId', pymongo.ASCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('candid', pymongo.ASCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('candidate.ztfname', pymongo.ASCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('candidate.jdstartstack', pymongo.DESCENDING),
('candidate.jdendstack', pymongo.ASCENDING)],
background=True, sparse=True)
self.db['db'][self.collection_alerts].create_index([('candidate.jd', pymongo.DESCENDING),
('candidate.drb', pymongo.DESCENDING),
('candid', pymongo.DESCENDING)],
background=True, sparse=True)
self.db['db'][self.collection_alerts].create_index([('candidate.jd', 1),
('candidate.drb', 1),
('candidate.isdiffpos', 1),
('candidate.ndethist', 1)],
name='jd__braai__magpsf__isdiffpos__ndethist',
background=True, sparse=True)
# ML models:
self.ml_models = dict()
for m in config['ml_models']:
try:
m_v = config["ml_models"][m]["version"]
self.ml_models[m] = {'model': load_model(f'/app/models/{m}_{m_v}.h5'),
'version': m_v}
except Exception as e:
print(*time_stamps(), f'Error loading ML model {m}')
traceback.print_exc()
print(e)
continue
def connect_to_db(self):
"""
Connect to mongo
:return:
"""
_config = self.config
try:
# there's only one instance of DB, it's too big to be replicated
_client = pymongo.MongoClient(host=_config['database']['host'],
port=_config['database']['port'], connect=False)
# grab main database:
_db = _client[_config['database']['db']]
except Exception as _e:
raise ConnectionRefusedError
try:
# authenticate
_db.authenticate(_config['database']['user'], _config['database']['pwd'])
except Exception as _e:
raise ConnectionRefusedError
self.db = dict()
self.db['client'] = _client
self.db['db'] = _db
def insert_db_entry(self, _collection=None, _db_entry=None):
"""
Insert a document _doc to collection _collection in DB.
It is monitored for timeout in case DB connection hangs for some reason
:param _collection:
:param _db_entry:
:return:
"""
assert _collection is not None, 'Must specify collection'
assert _db_entry is not None, 'Must specify document'
try:
self.db['db'][_collection].insert_one(_db_entry)
except Exception as _e:
print(*time_stamps(), 'Error inserting {:s} into {:s}'.format(str(_db_entry['_id']), _collection))
traceback.print_exc()
print(_e)
def insert_multiple_db_entries(self, _collection=None, _db_entries=None):
"""
Insert a document _doc to collection _collection in DB.
It is monitored for timeout in case DB connection hangs for some reason
:param _db:
:param _collection:
:param _db_entries:
:return:
"""
assert _collection is not None, 'Must specify collection'
assert _db_entries is not None, 'Must specify documents'
try:
# ordered=False ensures that every insert operation will be attempted
# so that if, e.g., a document already exists, it will be simply skipped
self.db['db'][_collection].insert_many(_db_entries, ordered=False)
except pymongo.errors.BulkWriteError as bwe:
print(*time_stamps(), bwe.details)
except Exception as _e:
traceback.print_exc()
print(_e)
def replace_db_entry(self, _collection=None, _filter=None, _db_entry=None):
"""
Insert a document _doc to collection _collection in DB.
It is monitored for timeout in case DB connection hangs for some reason
:param _collection:
:param _filter:
:param _db_entry:
:return:
"""
assert _collection is not None, 'Must specify collection'
assert _db_entry is not None, 'Must specify document'
try:
self.db['db'][_collection].replace_one(_filter, _db_entry, upsert=True)
except Exception as _e:
print(*time_stamps(), 'Error replacing {:s} in {:s}'.format(str(_db_entry['_id']), _collection))
traceback.print_exc()
print(_e)
@staticmethod
def alert_mongify(alert):
doc = dict(alert)
# let mongo create a unique id
# candid+objectId is a unique combination:
# doc['_id'] = f"{alert["candid"]}_{alert["objectId"]}"
# placeholders for cross-matches and classifications
# doc['cross_matches'] = dict()
doc['classifications'] = dict()
# GeoJSON for 2D indexing
doc['coordinates'] = {}
_ra = doc['candidate']['ra']
_dec = doc['candidate']['dec']
_radec = [_ra, _dec]
# string format: H:M:S, D:M:S
# tic = time.time()
_radec_str = [deg2hms(_ra), deg2dms(_dec)]
# print(time.time() - tic)
# print(_radec_str)
doc['coordinates']['radec_str'] = _radec_str
# for GeoJSON, must be lon:[-180, 180], lat:[-90, 90] (i.e. in deg)
_radec_geojson = [_ra - 180.0, _dec]
doc['coordinates']['radec_geojson'] = {'type': 'Point',
'coordinates': _radec_geojson}
# radians and degrees:
# doc['coordinates']['radec_rad'] = [_ra * np.pi / 180.0, _dec * np.pi / 180.0]
# doc['coordinates']['radec_deg'] = [_ra, _dec]
light_curve = deepcopy(doc['light_curve'])
doc.pop('light_curve', None)
if light_curve is None:
light_curve = []
for lc in light_curve:
if lc['flux'] > 0:
lc['mag'] = -2.5 * np.log10(lc['flux']) + lc['zp']
return doc, light_curve
def poll(self, path_alerts=None, path_tess=None, datestr=None, save_packets=True):
"""
Polls Kafka broker to consume topic.
:param path_alerts:
:param path_tess:
:param datestr:
:return:
"""
# msg = self.consumer.poll(timeout=timeout)
msg = self.consumer.poll()
if msg is None:
print(*time_stamps(), 'Caught error: msg is None')
if msg.error():
print('Caught error:', msg.error())
# if msg.value() is not None:
# print(*time_stamps(), msg.value())
raise EopError(msg)
elif msg is not None:
# decode avro packet
msg_decoded = self.decodeMessage(msg)
for record in msg_decoded:
candid = record['candid']
objectId = record['objectId']
print(*time_stamps(), self.topic, objectId, candid)
# check that candid not in collection_alerts
if self.db['db'][self.collection_alerts].count_documents({'candid': candid}, limit=1) == 0:
# candid not in db, ingest
if save_packets:
# save avro packet to disk
path_alert_dir = os.path.join(path_alerts, datestr)
# mkdir if does not exist
if not os.path.exists(path_alert_dir):
os.makedirs(path_alert_dir)
path_avro = os.path.join(path_alert_dir, f'{candid}.avro')
print(*time_stamps(), f'saving {candid} to disk')
with open(path_avro, 'wb') as f:
f.write(msg.value())
# ingest decoded avro packet into db
alert, light_curve = self.alert_mongify(record)
# alert filters:
# ML models:
scores = alert_filter__ml(record, ml_models=self.ml_models)
alert['classifications'] = scores
print(*time_stamps(), f'ingesting {alert['candid']} into db')
self.insert_db_entry(_collection=self.collection_alerts, _db_entry=alert)
# light_curve: pop nulls - save space
light_curve = [{kk: vv for kk, vv in lc.items() if vv is not None} for lc in light_curve]
# cross-match with external catalogs if objectId not in collection_alerts_aux:
if self.db['db'][self.collection_alerts_aux].count_documents({'_id': objectId}, limit=1) == 0:
# tic = time.time()
xmatches = alert_filter__xmatch(self.db['db'], alert)
# CLU cross-match:
xmatches = {**xmatches, **alert_filter__xmatch_clu(self.db['db'], alert)}
# alert['cross_matches'] = xmatches
# toc = time.time()
# print(f'xmatch for {alert['candid']} took {toc-tic:.2f} s')
alert_aux = {'_id': objectId,
'cross_matches': xmatches,
'light_curve': light_curve}
self.insert_db_entry(_collection=self.collection_alerts_aux, _db_entry=alert_aux)
else:
self.db['db'][self.collection_alerts_aux].update_one({'_id': objectId},
{'$addToSet':
{'light_curve':
{'$each': light_curve}}},
upsert=True)
# dump packet as json to disk if in a public TESS sector
if 'TESS' in alert['candidate']['programpi']:
# put light_curve back
alert['light_curve'] = light_curve
# get cross-matches
# xmatches = self.db['db'][self.collection_alerts_aux].find_one({'_id': objectId})
xmatches = self.db['db'][self.collection_alerts_aux].find({'_id': objectId},
{'cross_matches': 1},
limit=1)
xmatches = list(xmatches)[0]
alert['cross_matches'] = xmatches['cross_matches']
if save_packets:
path_tess_dir = os.path.join(path_tess, datestr)
# mkdir if does not exist
if not os.path.exists(path_tess_dir):
os.makedirs(path_tess_dir)
print(*time_stamps(), f'saving {alert['candid']} to disk')
try:
with open(os.path.join(path_tess_dir, f"{alert["candid"]}.json"), 'w') as f:
f.write(dumps(alert))
except Exception as e:
print(time_stamps(), str(e))
_err = traceback.format_exc()
print(*time_stamps(), str(_err))
def decodeMessage(self, msg):
"""Decode Avro message according to a schema.
Parameters
----------
msg : Kafka message
The Kafka message result from consumer.poll().
Returns
-------
`dict`
Decoded message.
"""
# print(msg.topic(), msg.offset(), msg.error(), msg.key(), msg.value())
message = msg.value()
# print(message)
try:
bytes_io = io.BytesIO(message)
decoded_msg = readSchemaData(bytes_io)
# print(decoded_msg)
# decoded_msg = readAvroData(bytes_io, self.alert_schema)
# print(decoded_msg)
except AssertionError:
# FIXME this exception is raised but not sure if it matters yet
bytes_io = io.BytesIO(message)
decoded_msg = None
except IndexError:
literal_msg = literal_eval(str(message, encoding='utf-8')) # works to give bytes
bytes_io = io.BytesIO(literal_msg) # works to give <class '_io.BytesIO'>
decoded_msg = readSchemaData(bytes_io) # yields reader
except Exception:
decoded_msg = message
finally:
return decoded_msg
def msg_text(message):
"""Remove postage stamp cutouts from an alert message.
"""
message_text = {k: message[k] for k in message
if k not in ['cutoutDifference', 'cutoutTemplate', 'cutoutScience']}
return message_text
def write_stamp_file(stamp_dict, output_dir):
"""Given a stamp dict that follows the cutout schema,
write data to a file in a given directory.
"""
try:
filename = stamp_dict['fileName']
try:
os.makedirs(output_dir)
except OSError:
pass
out_path = os.path.join(output_dir, filename)
with open(out_path, 'wb') as f:
f.write(stamp_dict['stampData'])
except TypeError:
sys.stderr.write('%% Cannot get stamp\n')
return
def alert_filter(alert, stampdir=None):
"""Filter to apply to each alert.
See schemas: https://github.com/ZwickyTransientFacility/ztf-avro-alert
"""
data = msg_text(alert)
if data: # Write your condition statement here
print(data) # Print all main alert data to screen
if stampdir is not None: # Collect all postage stamps
write_stamp_file(
alert.get('cutoutDifference'), stampdir)
write_stamp_file(
alert.get('cutoutTemplate'), stampdir)
write_stamp_file(
alert.get('cutoutScience'), stampdir)
return
def make_triplet(alert, to_tpu: bool = False):
"""
Feed in alert packet
"""
cutout_dict = dict()
for cutout in ('science', 'template', 'difference'):
# cutout_data = loads(dumps([alert[f'cutout{cutout.capitalize()}']['stampData']]))[0]
# cutout_data = alert[f'cutout{cutout.capitalize()}']['stampData']
cutout_data = alert[f'cutout{cutout.capitalize()}']
# unzip
with gzip.open(io.BytesIO(cutout_data), 'rb') as f:
with fits.open(io.BytesIO(f.read())) as hdu:
data = hdu[0].data
# replace nans with zeros
cutout_dict[cutout] = np.nan_to_num(data)
# L2-normalize
cutout_dict[cutout] /= np.linalg.norm(cutout_dict[cutout])
# pad to 63x63 if smaller
shape = cutout_dict[cutout].shape
if shape != (63, 63):
# print(f'Shape of {candid}/{cutout}: {shape}, padding to (63, 63)')
cutout_dict[cutout] = np.pad(cutout_dict[cutout], [(0, 63 - shape[0]), (0, 63 - shape[1])],
mode='constant', constant_values=1e-9)
triplet = np.zeros((63, 63, 3))
triplet[:, :, 0] = cutout_dict['science']
triplet[:, :, 1] = cutout_dict['template']
triplet[:, :, 2] = cutout_dict['difference']
if to_tpu:
# Edge TPUs require additional processing
triplet = np.rint(triplet * 128 + 128).astype(np.uint8).flatten()
return triplet
def alert_filter__ml(alert, ml_models: dict = None):
"""Filter to apply to each alert.
"""
scores = dict()
try:
''' braai '''
triplet = make_triplet(alert)
triplets = np.expand_dims(triplet, axis=0)
braai = ml_models['braai']['model'].predict(x=triplets)[0]
# braai = 1.0
scores['braai'] = float(braai)
scores['braai_version'] = ml_models['braai']['version']
except Exception as e:
print(*time_stamps(), str(e))
return scores
# cone search radius:
cone_search_radius = float(config['xmatch']['cone_search_radius'])
# convert to rad:
if config['xmatch']['cone_search_unit'] == 'arcsec':
cone_search_radius *= np.pi / 180.0 / 3600.
elif config['xmatch']['cone_search_unit'] == 'arcmin':
cone_search_radius *= np.pi / 180.0 / 60.
elif config['xmatch']['cone_search_unit'] == 'deg':
cone_search_radius *= np.pi / 180.0
elif config['xmatch']['cone_search_unit'] == 'rad':
cone_search_radius *= 1
else:
raise Exception('Unknown cone search unit. Must be in [deg, rad, arcsec, arcmin]')
def alert_filter__xmatch(db, alert):
"""
Filter to apply to each alert.
"""
xmatches = dict()
try:
ra_geojson = float(alert['candidate']['ra'])
# geojson-friendly ra:
ra_geojson -= 180.0
dec_geojson = float(alert['candidate']['dec'])
''' catalogs '''
for catalog in config['xmatch']['catalogs']:
catalog_filter = config['xmatch']['catalogs'][catalog]['filter']
catalog_projection = config['xmatch']['catalogs'][catalog]['projection']
object_position_query = dict()
object_position_query['coordinates.radec_geojson'] = {
'$geoWithin': {'$centerSphere': [[ra_geojson, dec_geojson], cone_search_radius]}}
s = db[catalog].find({**object_position_query, **catalog_filter},
{**catalog_projection})
xmatches[catalog] = list(s)
except Exception as e:
print(*time_stamps(), str(e))
return xmatches
# cone search radius in deg:
cone_search_radius_clu = 3.0
# convert deg to rad:
cone_search_radius_clu *= np.pi / 180.0
def alert_filter__xmatch_clu(database, alert, size_margin=3, clu_version='CLU_20190625'):
"""
Filter to apply to each alert.
:param size_margin: multiply galaxy size by this much before looking for a match
:param clu_version: CLU catalog version
"""
xmatches = dict()
try:
ra = float(alert['candidate']['ra'])
dec = float(alert['candidate']['dec'])
# geojson-friendly ra:
ra_geojson = float(alert['candidate']['ra']) - 180.0
dec_geojson = dec
catalog_filter = {}
catalog_projection = {"_id": 1, "name": 1, "ra": 1, "dec": 1,
"a": 1, "b2a": 1, "pa": 1, "z": 1,
"sfr_fuv": 1, "mstar": 1, "sfr_ha": 1,
"coordinates.radec_str": 1}
# first do a coarse search of everything that is around
object_position_query = dict()
object_position_query['coordinates.radec_geojson'] = {
'$geoWithin': {'$centerSphere': [[ra_geojson, dec_geojson], cone_search_radius_clu]}}
s = database[clu_version].find({**object_position_query, **catalog_filter},
{**catalog_projection})
galaxies = list(s)
# these guys are very big, so check them separately
M31 = {'_id': 596900, 'name': 'PGC2557',
'ra': 10.6847, 'dec': 41.26901, 'a': 6.35156, 'b2a': 0.32, 'pa': 35.0,
'sfr_fuv': None, 'mstar': 253816876.412914, 'sfr_ha': 0,
'coordinates': {'radec_geojson': ["00:42:44.3503", "41:16:08.634"]}
}
M33 = {'_id': 597543, 'name': 'PGC5818',
'ra': 23.46204, 'dec': 30.66022, 'a': 2.35983, 'b2a': 0.59, 'pa': 23.0,
'sfr_fuv': None, 'mstar': 4502777.420493, 'sfr_ha': 0,
'coordinates': {'radec_geojson': ["01:33:50.8900", "30:39:36.800"]}
}
# do elliptical matches
matches = []
for galaxy in galaxies + [M31, M33]:
alpha1, delta01 = galaxy['ra'], galaxy['dec']
d0, axis_ratio, PA0 = galaxy['a'], galaxy['b2a'], galaxy['pa']
# no shape info for galaxy? replace with median values
if d0 < -990:
d0 = 0.0265889
if axis_ratio < -990:
axis_ratio = 0.61
if PA0 < -990:
PA0 = 86.0
in_galaxy = in_ellipse(ra, dec, alpha1, delta01, size_margin * d0, axis_ratio, PA0)
if in_galaxy:
match = galaxy
distance_arcsec = round(great_circle_distance(ra, dec, alpha1, delta01) * 3600, 2)
match['coordinates']['distance_arcsec'] = distance_arcsec
matches.append(match)
xmatches[clu_version] = matches
except Exception as e:
print(*time_stamps(), str(e))
return xmatches
def listener(topic, bootstrap_servers='', offset_reset='earliest',
group=None, path_alerts=None, path_tess=None, save_packets=True):
"""
Listen to a topic
:param topic:
:param bootstrap_servers:
:param offset_reset:
:param group:
:param path_alerts:
:return:
"""
# def error_cb(err):
# print(*time_stamps(), 'error_cb -------->', err)
# # print(err.code())
# if err.code() == -195:
# print(*time_stamps(), 'got disconnected, killing thread')
# sys.exit()
# Configure consumer connection to Kafka broker
conf = {'bootstrap.servers': bootstrap_servers,
# 'error_cb': error_cb,
'default.topic.config': {'auto.offset.reset': offset_reset}}
if group is not None:
conf['group.id'] = group
else:
conf['group.id'] = os.environ['HOSTNAME'] if 'HOSTNAME' in os.environ else 'kowalski.caltech.edu'
# make it unique:
conf['group.id'] = '{:s}_{:s}'.format(conf['group.id'], datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f'))
# Configure Avro reader schema
schema_files = ["ztf-avro-alert/schema/candidate.avsc",
"ztf-avro-alert/schema/cutout.avsc",
"ztf-avro-alert/schema/light_curve.avsc",
"ztf-avro-alert/schema/alert.avsc"]
# date string:
datestr = topic.split('_')[1]
# Start alert stream consumer
stream_reader = AlertConsumer(topic, schema_files, **conf)
# todo: Subscribe alert filters to stream_readers
# todo: they will be notified when an alert arrived/got x-matched
while True:
try:
# poll!
# print(*time_stamps(), 'Polling')
stream_reader.poll(path_alerts=path_alerts, path_tess=path_tess,
datestr=datestr, save_packets=save_packets)
except EopError as e:
# Write when reaching end of partition
# sys.stderr.write(e.message)
print(*time_stamps(), e.message)
except IndexError:
# sys.stderr.write('%% Data cannot be decoded\n')
print(*time_stamps(), '%% Data cannot be decoded\n')
except UnicodeDecodeError:
# sys.stderr.write('%% Unexpected data format received\n')
print(*time_stamps(), '%% Unexpected data format received\n')
except KeyboardInterrupt:
# sys.stderr.write('%% Aborted by user\n')
print(*time_stamps(), '%% Aborted by user\n')
sys.exit()
except Exception as e:
print(*time_stamps(), str(e))
_err = traceback.format_exc()
print(*time_stamps(), str(_err))
sys.exit()
def main(_obs_date=None, _save_packets=True):
topics_on_watch = dict()
while True:
try:
if True:
# get kafka topic names with kafka-topics command
kafka_cmd = [config['kafka-topics']['cmd'],
'--zookeeper', config['kafka-topics']['zookeeper'], '-list']
# print(kafka_cmd)
topics = subprocess.run(kafka_cmd, stdout=subprocess.PIPE).stdout.decode('utf-8').split('\n')[:-1]
# print(topics)
if _obs_date is None:
datestr = datetime.datetime.utcnow().strftime('%Y%m%d')
else:
datestr = _obs_date
# as of 20180403 naming convention is ztf_%Y%m%d_programidN
# topics_tonight = [t for t in topics if (datestr in t) and ('programid' in t)]
# ZUDS only
topics_tonight = [t for t in topics if (datestr in t) and ('programid' in t) and ('zuds' in t)]
print(*time_stamps(), topics_tonight)
if False:
# for testing
topics_tonight = ['ztf_20180604_programid3']
for t in topics_tonight:
if t not in topics_on_watch:
print(*time_stamps(), f'starting listener thread for {t}')
offset_reset = config['kafka']['default.topic.config']['auto.offset.reset']
bootstrap_servers = config['kafka']['bootstrap.servers']
group = '{:s}'.format(config['kafka']['group'])
# print(group)
path_alerts = config['path']['path_alerts']
path_tess = config['path']['path_tess']
save_packets = _save_packets
# topics_on_watch[t] = threading.Thread(target=listener,
# args=(t, bootstrap_servers,
# offset_reset, group, path_alerts))
topics_on_watch[t] = multiprocessing.Process(target=listener,
args=(t, bootstrap_servers,
offset_reset, group,
path_alerts, path_tess,
save_packets))
topics_on_watch[t].daemon = True
topics_on_watch[t].start()
else:
print(*time_stamps(), f'performing thread health check for {t}')
try:
# if not topics_on_watch[t].isAlive():
if not topics_on_watch[t].is_alive():
print(*time_stamps(), f'{t} died, removing')
# topics_on_watch[t].terminate()
topics_on_watch.pop(t, None)
else:
print(*time_stamps(), f'{t} appears normal')
except Exception as _e:
print(*time_stamps(), 'Failed to perform health check', str(_e))
pass
except Exception as e:
print(*time_stamps(), str(e))
_err = traceback.format_exc()
print(*time_stamps(), str(_err))
if _obs_date is None:
time.sleep(300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Fetch AVRO packets from Kafka streams and ingest them into DB')
parser.add_argument('--obsdate', help='observing date')
parser.add_argument('--noio', help='reduce i/o - do not save packets', action='store_true')
args = parser.parse_args()
obs_date = args.obsdate
save = False if args.noio else True
# print(obs_date)
main(_obs_date=obs_date, _save_packets=save)
| import argparse
import os
import sys
import io
import time
import json
from bson.json_util import dumps
import traceback
import confluent_kafka
from ast import literal_eval
import avro.schema
import fastavro
import subprocess
import datetime
import multiprocessing
# import threading
import pymongo
import pytz
from numba import jit
import numpy as np
from tensorflow.keras.models import load_model
import gzip
import io
from astropy.io import fits
from copy import deepcopy
''' load config and secrets '''
with open('/app/config.json') as cjson:
config = json.load(cjson)
with open('/app/secrets.json') as sjson:
secrets = json.load(sjson)
for k in secrets:
config[k].update(secrets.get(k, {}))
def utc_now():
return datetime.datetime.now(pytz.utc)
def time_stamps():
"""
:return: local time, UTC time
"""
return datetime.datetime.now().strftime('%Y%m%d_%H:%M:%S'), \
datetime.datetime.utcnow().strftime('%Y%m%d_%H:%M:%S')
@jit
def deg2hms(x):
"""Transform degrees to *hours:minutes:seconds* strings.
Parameters
----------
x : float
The degree value c [0, 360) to be written as a sexagesimal string.
Returns
-------
out : str
The input angle written as a sexagesimal string, in the
form, hours:minutes:seconds.
"""
assert 0.0 <= x < 360.0, 'Bad RA value in degrees'
# ac = Angle(x, unit='degree')
# hms = str(ac.to_string(unit='hour', sep=':', pad=True))
# print(str(hms))
_h = np.floor(x * 12.0 / 180.)
_m = np.floor((x * 12.0 / 180. - _h) * 60.0)
_s = ((x * 12.0 / 180. - _h) * 60.0 - _m) * 60.0
hms = '{:02.0f}:{:02.0f}:{:07.4f}'.format(_h, _m, _s)
# print(hms)
return hms
@jit
def deg2dms(x):
"""Transform degrees to *degrees:arcminutes:arcseconds* strings.
Parameters
----------
x : float
The degree value c [-90, 90] to be converted.
Returns
-------
out : str
The input angle as a string, written as degrees:minutes:seconds.
"""
assert -90.0 <= x <= 90.0, 'Bad Dec value in degrees'
# ac = Angle(x, unit='degree')
# dms = str(ac.to_string(unit='degree', sep=':', pad=True))
# print(dms)
_d = np.floor(abs(x)) * np.sign(x)
_m = np.floor(np.abs(x - _d) * 60.0)
_s = np.abs(np.abs(x - _d) * 60.0 - _m) * 60.0
dms = '{:02.0f}:{:02.0f}:{:06.3f}'.format(_d, _m, _s)
# print(dms)
return dms
@jit
def great_circle_distance(ra1_deg, dec1_deg, ra2_deg, dec2_deg):
"""
Distance between two points on the sphere
:param ra1_deg:
:param dec1_deg:
:param ra2_deg:
:param dec2_deg:
:return: distance in degrees
"""
# this is orders of magnitude faster than astropy.coordinates.Skycoord.separation
DEGRA = np.pi / 180.0
ra1, dec1, ra2, dec2 = ra1_deg * DEGRA, dec1_deg * DEGRA, ra2_deg * DEGRA, dec2_deg * DEGRA
delta_ra = np.abs(ra2 - ra1)
distance = np.arctan2(np.sqrt((np.cos(dec2) * np.sin(delta_ra)) ** 2
+ (np.cos(dec1) * np.sin(dec2) - np.sin(dec1) * np.cos(dec2) * np.cos(
delta_ra)) ** 2),
np.sin(dec1) * np.sin(dec2) + np.cos(dec1) * np.cos(dec2) * np.cos(delta_ra))
return distance * 180.0 / np.pi
@jit
def in_ellipse(alpha, delta0, alpha1, delta01, d0, axis_ratio, PA0):
"""
Check if a given point (alpha, delta0)
is within an ellipse specified by
center (alpha1, delta01), maj_ax (d0), axis ratio and positional angle
All angles are in decimal degrees
Adapted from q3c: https://github.com/segasai/q3c/blob/master/q3cube.c
:param alpha:
:param delta0:
:param alpha1:
:param delta01:
:param d0:
:param axis_ratio:
:param PA0:
:return:
"""
DEGRA = np.pi / 180.0
# convert degrees to radians
d_alpha = (alpha1 - alpha) * DEGRA
delta1 = delta01 * DEGRA
delta = delta0 * DEGRA
PA = PA0 * DEGRA
d = d0 * DEGRA
e = np.sqrt(1.0 - axis_ratio * axis_ratio)
t1 = np.cos(d_alpha)
t22 = np.sin(d_alpha)
t3 = np.cos(delta1)
t32 = np.sin(delta1)
t6 = np.cos(delta)
t26 = np.sin(delta)
t9 = np.cos(d)
t55 = np.sin(d)
if (t3 * t6 * t1 + t32 * t26) < 0:
return False
t2 = t1 * t1
t4 = t3 * t3
t5 = t2 * t4
t7 = t6 * t6
t8 = t5 * t7
t10 = t9 * t9
t11 = t7 * t10
t13 = np.cos(PA)
t14 = t13 * t13
t15 = t14 * t10
t18 = t7 * t14
t19 = t18 * t10
t24 = np.sin(PA)
t31 = t1 * t3
t36 = 2.0 * t31 * t32 * t26 * t6
t37 = t31 * t32
t38 = t26 * t6
t45 = t4 * t10
t56 = t55 * t55
t57 = t4 * t7
t60 = -t8 + t5 * t11 + 2.0 * t5 * t15 - t5 * t19 - \
2.0 * t1 * t4 * t22 * t10 * t24 * t13 * t26 - t36 + \
2.0 * t37 * t38 * t10 - 2.0 * t37 * t38 * t15 - t45 * t14 - t45 * t2 + \
2.0 * t22 * t3 * t32 * t6 * t24 * t10 * t13 - t56 + t7 - t11 + t4 - t57 + t57 * t10 + t19 - t18 * t45
t61 = e * e
t63 = t60 * t61 + t8 + t57 - t4 - t7 + t56 + t36
return t63 > 0
"""Utilities for manipulating Avro data and schemas.
"""
def _loadSingleAvsc(file_path, names):
"""Load a single avsc file.
"""
with open(file_path) as file_text:
json_data = json.load(file_text)
schema = avro.schema.SchemaFromJSONData(json_data, names)
return schema
def combineSchemas(schema_files):
"""Combine multiple nested schemas into a single schema.
Parameters
----------
schema_files : `list`
List of files containing schemas.
If nested, most internal schema must be first.
Returns
-------
`dict`
Avro schema
"""
known_schemas = avro.schema.Names()
for s in schema_files:
schema = _loadSingleAvsc(s, known_schemas)
return schema.to_json()
def writeAvroData(json_data, json_schema):
"""Encode json into Avro format given a schema.
Parameters
----------
json_data : `dict`
The JSON data containing message content.
json_schema : `dict`
The writer Avro schema for encoding data.
Returns
-------
`_io.BytesIO`
Encoded data.
"""
bytes_io = io.BytesIO()
fastavro.schemaless_writer(bytes_io, json_schema, json_data)
return bytes_io
def readAvroData(bytes_io, json_schema):
"""Read data and decode with a given Avro schema.
Parameters
----------
bytes_io : `_io.BytesIO`
Data to be decoded.
json_schema : `dict`
The reader Avro schema for decoding data.
Returns
-------
`dict`
Decoded data.
"""
bytes_io.seek(0)
message = fastavro.schemaless_reader(bytes_io, json_schema)
return message
def readSchemaData(bytes_io):
"""Read data that already has an Avro schema.
Parameters
----------
bytes_io : `_io.BytesIO`
Data to be decoded.
Returns
-------
`dict`
Decoded data.
"""
bytes_io.seek(0)
message = fastavro.reader(bytes_io)
return message
class AlertError(Exception):
"""Base class for exceptions in this module.
"""
pass
class EopError(AlertError):
"""Exception raised when reaching end of partition.
Parameters
----------
msg : Kafka message
The Kafka message result from consumer.poll().
"""
def __init__(self, msg):
message = 'topic:%s, partition:%d, status:end, ' \
'offset:%d, key:%s, time:%.3f\n' \
% (msg.topic(), msg.partition(),
msg.offset(), str(msg.key()), time.time())
self.message = message
def __str__(self):
return self.message
class AlertConsumer(object):
"""Creates an alert stream Kafka consumer for a given topic.
Parameters
----------
topic : `str`
Name of the topic to subscribe to.
schema_files : Avro schema files
The reader Avro schema files for decoding data. Optional.
**kwargs
Keyword arguments for configuring confluent_kafka.Consumer().
"""
def __init__(self, topic, schema_files=None, **kwargs):
# keep track of disconnected partitions
self.num_disconnected_partitions = 0
self.topic = topic
def error_cb(err, _self=self):
print(*time_stamps(), 'error_cb -------->', err)
# print(err.code())
if err.code() == -195:
_self.num_disconnected_partitions += 1
if _self.num_disconnected_partitions == _self.num_partitions:
print(*time_stamps(), 'all partitions got disconnected, killing thread')
sys.exit()
else:
print(*time_stamps(), '{:s}: disconnected from partition.'.format(_self.topic),
'total:', self.num_disconnected_partitions)
# 'error_cb': error_cb
kwargs['error_cb'] = error_cb
self.consumer = confluent_kafka.Consumer(**kwargs)
self.num_partitions = 0
def on_assign(consumer, partitions, _self=self):
# force-reset offsets when subscribing to a topic:
for part in partitions:
# -2 stands for beginning and -1 for end
part.offset = -2
# keep number of partitions. when reaching end of last partition, kill thread and start from beginning
_self.num_partitions += 1
print(consumer.get_watermark_offsets(part))
self.consumer.subscribe([topic], on_assign=on_assign)
# self.consumer.subscribe([topic])
# fixme?
# if schema_files is not None:
# self.alert_schema = combineSchemas(schema_files)
# MongoDB:
self.config = config
self.collection_alerts = 'ZUDS_alerts'
self.collection_alerts_aux = 'ZUDS_alerts_aux'
self.db = None
self.connect_to_db()
# indexes
self.db['db'][self.collection_alerts].create_index([('coordinates.radec_geojson', '2dsphere'),
('candid', pymongo.DESCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('coordinates.radec_geojson', '2dsphere'),
('objectId', pymongo.DESCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('objectId', pymongo.ASCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('candid', pymongo.ASCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('candidate.ztfname', pymongo.ASCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('candidate.jdstartstack', pymongo.DESCENDING),
('candidate.jdendstack', pymongo.ASCENDING)],
background=True, sparse=True)
self.db['db'][self.collection_alerts].create_index([('candidate.jd', pymongo.DESCENDING),
('candidate.drb', pymongo.DESCENDING),
('candid', pymongo.DESCENDING)],
background=True, sparse=True)
self.db['db'][self.collection_alerts].create_index([('candidate.jd', 1),
('candidate.drb', 1),
('candidate.isdiffpos', 1),
('candidate.ndethist', 1)],
name='jd__braai__magpsf__isdiffpos__ndethist',
background=True, sparse=True)
# ML models:
self.ml_models = dict()
for m in config['ml_models']:
try:
m_v = config["ml_models"][m]["version"]
self.ml_models[m] = {'model': load_model(f'/app/models/{m}_{m_v}.h5'),
'version': m_v}
except Exception as e:
print(*time_stamps(), f'Error loading ML model {m}')
traceback.print_exc()
print(e)
continue
def connect_to_db(self):
"""
Connect to mongo
:return:
"""
_config = self.config
try:
# there's only one instance of DB, it's too big to be replicated
_client = pymongo.MongoClient(host=_config['database']['host'],
port=_config['database']['port'], connect=False)
# grab main database:
_db = _client[_config['database']['db']]
except Exception as _e:
raise ConnectionRefusedError
try:
# authenticate
_db.authenticate(_config['database']['user'], _config['database']['pwd'])
except Exception as _e:
raise ConnectionRefusedError
self.db = dict()
self.db['client'] = _client
self.db['db'] = _db
def insert_db_entry(self, _collection=None, _db_entry=None):
"""
Insert a document _doc to collection _collection in DB.
It is monitored for timeout in case DB connection hangs for some reason
:param _collection:
:param _db_entry:
:return:
"""
assert _collection is not None, 'Must specify collection'
assert _db_entry is not None, 'Must specify document'
try:
self.db['db'][_collection].insert_one(_db_entry)
except Exception as _e:
print(*time_stamps(), 'Error inserting {:s} into {:s}'.format(str(_db_entry['_id']), _collection))
traceback.print_exc()
print(_e)
def insert_multiple_db_entries(self, _collection=None, _db_entries=None):
"""
Insert a document _doc to collection _collection in DB.
It is monitored for timeout in case DB connection hangs for some reason
:param _db:
:param _collection:
:param _db_entries:
:return:
"""
assert _collection is not None, 'Must specify collection'
assert _db_entries is not None, 'Must specify documents'
try:
# ordered=False ensures that every insert operation will be attempted
# so that if, e.g., a document already exists, it will be simply skipped
self.db['db'][_collection].insert_many(_db_entries, ordered=False)
except pymongo.errors.BulkWriteError as bwe:
print(*time_stamps(), bwe.details)
except Exception as _e:
traceback.print_exc()
print(_e)
def replace_db_entry(self, _collection=None, _filter=None, _db_entry=None):
"""
Insert a document _doc to collection _collection in DB.
It is monitored for timeout in case DB connection hangs for some reason
:param _collection:
:param _filter:
:param _db_entry:
:return:
"""
assert _collection is not None, 'Must specify collection'
assert _db_entry is not None, 'Must specify document'
try:
self.db['db'][_collection].replace_one(_filter, _db_entry, upsert=True)
except Exception as _e:
print(*time_stamps(), 'Error replacing {:s} in {:s}'.format(str(_db_entry['_id']), _collection))
traceback.print_exc()
print(_e)
@staticmethod
def alert_mongify(alert):
doc = dict(alert)
# let mongo create a unique id
# candid+objectId is a unique combination:
# doc['_id'] = f"{alert['candid']}_{alert['objectId']}"
# placeholders for cross-matches and classifications
# doc['cross_matches'] = dict()
doc['classifications'] = dict()
# GeoJSON for 2D indexing
doc['coordinates'] = {}
_ra = doc['candidate']['ra']
_dec = doc['candidate']['dec']
_radec = [_ra, _dec]
# string format: H:M:S, D:M:S
# tic = time.time()
_radec_str = [deg2hms(_ra), deg2dms(_dec)]
# print(time.time() - tic)
# print(_radec_str)
doc['coordinates']['radec_str'] = _radec_str
# for GeoJSON, must be lon:[-180, 180], lat:[-90, 90] (i.e. in deg)
_radec_geojson = [_ra - 180.0, _dec]
doc['coordinates']['radec_geojson'] = {'type': 'Point',
'coordinates': _radec_geojson}
# radians and degrees:
# doc['coordinates']['radec_rad'] = [_ra * np.pi / 180.0, _dec * np.pi / 180.0]
# doc['coordinates']['radec_deg'] = [_ra, _dec]
light_curve = deepcopy(doc['light_curve'])
doc.pop('light_curve', None)
if light_curve is None:
light_curve = []
for lc in light_curve:
if lc['flux'] > 0:
lc['mag'] = -2.5 * np.log10(lc['flux']) + lc['zp']
return doc, light_curve
def poll(self, path_alerts=None, path_tess=None, datestr=None, save_packets=True):
"""
Polls Kafka broker to consume topic.
:param path_alerts:
:param path_tess:
:param datestr:
:return:
"""
# msg = self.consumer.poll(timeout=timeout)
msg = self.consumer.poll()
if msg is None:
print(*time_stamps(), 'Caught error: msg is None')
if msg.error():
print('Caught error:', msg.error())
# if msg.value() is not None:
# print(*time_stamps(), msg.value())
raise EopError(msg)
elif msg is not None:
# decode avro packet
msg_decoded = self.decodeMessage(msg)
for record in msg_decoded:
candid = record['candid']
objectId = record['objectId']
print(*time_stamps(), self.topic, objectId, candid)
# check that candid not in collection_alerts
if self.db['db'][self.collection_alerts].count_documents({'candid': candid}, limit=1) == 0:
# candid not in db, ingest
if save_packets:
# save avro packet to disk
path_alert_dir = os.path.join(path_alerts, datestr)
# mkdir if does not exist
if not os.path.exists(path_alert_dir):
os.makedirs(path_alert_dir)
path_avro = os.path.join(path_alert_dir, f'{candid}.avro')
print(*time_stamps(), f'saving {candid} to disk')
with open(path_avro, 'wb') as f:
f.write(msg.value())
# ingest decoded avro packet into db
alert, light_curve = self.alert_mongify(record)
# alert filters:
# ML models:
scores = alert_filter__ml(record, ml_models=self.ml_models)
alert['classifications'] = scores
print(*time_stamps(), f'ingesting {alert["candid"]} into db')
self.insert_db_entry(_collection=self.collection_alerts, _db_entry=alert)
# light_curve: pop nulls - save space
light_curve = [{kk: vv for kk, vv in lc.items() if vv is not None} for lc in light_curve]
# cross-match with external catalogs if objectId not in collection_alerts_aux:
if self.db['db'][self.collection_alerts_aux].count_documents({'_id': objectId}, limit=1) == 0:
# tic = time.time()
xmatches = alert_filter__xmatch(self.db['db'], alert)
# CLU cross-match:
xmatches = {**xmatches, **alert_filter__xmatch_clu(self.db['db'], alert)}
# alert['cross_matches'] = xmatches
# toc = time.time()
# print(f'xmatch for {alert["candid"]} took {toc-tic:.2f} s')
alert_aux = {'_id': objectId,
'cross_matches': xmatches,
'light_curve': light_curve}
self.insert_db_entry(_collection=self.collection_alerts_aux, _db_entry=alert_aux)
else:
self.db['db'][self.collection_alerts_aux].update_one({'_id': objectId},
{'$addToSet':
{'light_curve':
{'$each': light_curve}}},
upsert=True)
# dump packet as json to disk if in a public TESS sector
if 'TESS' in alert['candidate']['programpi']:
# put light_curve back
alert['light_curve'] = light_curve
# get cross-matches
# xmatches = self.db['db'][self.collection_alerts_aux].find_one({'_id': objectId})
xmatches = self.db['db'][self.collection_alerts_aux].find({'_id': objectId},
{'cross_matches': 1},
limit=1)
xmatches = list(xmatches)[0]
alert['cross_matches'] = xmatches['cross_matches']
if save_packets:
path_tess_dir = os.path.join(path_tess, datestr)
# mkdir if does not exist
if not os.path.exists(path_tess_dir):
os.makedirs(path_tess_dir)
print(*time_stamps(), f'saving {alert["candid"]} to disk')
try:
with open(os.path.join(path_tess_dir, f"{alert['candid']}.json"), 'w') as f:
f.write(dumps(alert))
except Exception as e:
print(time_stamps(), str(e))
_err = traceback.format_exc()
print(*time_stamps(), str(_err))
def decodeMessage(self, msg):
"""Decode Avro message according to a schema.
Parameters
----------
msg : Kafka message
The Kafka message result from consumer.poll().
Returns
-------
`dict`
Decoded message.
"""
# print(msg.topic(), msg.offset(), msg.error(), msg.key(), msg.value())
message = msg.value()
# print(message)
try:
bytes_io = io.BytesIO(message)
decoded_msg = readSchemaData(bytes_io)
# print(decoded_msg)
# decoded_msg = readAvroData(bytes_io, self.alert_schema)
# print(decoded_msg)
except AssertionError:
# FIXME this exception is raised but not sure if it matters yet
bytes_io = io.BytesIO(message)
decoded_msg = None
except IndexError:
literal_msg = literal_eval(str(message, encoding='utf-8')) # works to give bytes
bytes_io = io.BytesIO(literal_msg) # works to give <class '_io.BytesIO'>
decoded_msg = readSchemaData(bytes_io) # yields reader
except Exception:
decoded_msg = message
finally:
return decoded_msg
def msg_text(message):
"""Remove postage stamp cutouts from an alert message.
"""
message_text = {k: message[k] for k in message
if k not in ['cutoutDifference', 'cutoutTemplate', 'cutoutScience']}
return message_text
def write_stamp_file(stamp_dict, output_dir):
"""Given a stamp dict that follows the cutout schema,
write data to a file in a given directory.
"""
try:
filename = stamp_dict['fileName']
try:
os.makedirs(output_dir)
except OSError:
pass
out_path = os.path.join(output_dir, filename)
with open(out_path, 'wb') as f:
f.write(stamp_dict['stampData'])
except TypeError:
sys.stderr.write('%% Cannot get stamp\n')
return
def alert_filter(alert, stampdir=None):
"""Filter to apply to each alert.
See schemas: https://github.com/ZwickyTransientFacility/ztf-avro-alert
"""
data = msg_text(alert)
if data: # Write your condition statement here
print(data) # Print all main alert data to screen
if stampdir is not None: # Collect all postage stamps
write_stamp_file(
alert.get('cutoutDifference'), stampdir)
write_stamp_file(
alert.get('cutoutTemplate'), stampdir)
write_stamp_file(
alert.get('cutoutScience'), stampdir)
return
def make_triplet(alert, to_tpu: bool = False):
"""
Feed in alert packet
"""
cutout_dict = dict()
for cutout in ('science', 'template', 'difference'):
# cutout_data = loads(dumps([alert[f'cutout{cutout.capitalize()}']['stampData']]))[0]
# cutout_data = alert[f'cutout{cutout.capitalize()}']['stampData']
cutout_data = alert[f'cutout{cutout.capitalize()}']
# unzip
with gzip.open(io.BytesIO(cutout_data), 'rb') as f:
with fits.open(io.BytesIO(f.read())) as hdu:
data = hdu[0].data
# replace nans with zeros
cutout_dict[cutout] = np.nan_to_num(data)
# L2-normalize
cutout_dict[cutout] /= np.linalg.norm(cutout_dict[cutout])
# pad to 63x63 if smaller
shape = cutout_dict[cutout].shape
if shape != (63, 63):
# print(f'Shape of {candid}/{cutout}: {shape}, padding to (63, 63)')
cutout_dict[cutout] = np.pad(cutout_dict[cutout], [(0, 63 - shape[0]), (0, 63 - shape[1])],
mode='constant', constant_values=1e-9)
triplet = np.zeros((63, 63, 3))
triplet[:, :, 0] = cutout_dict['science']
triplet[:, :, 1] = cutout_dict['template']
triplet[:, :, 2] = cutout_dict['difference']
if to_tpu:
# Edge TPUs require additional processing
triplet = np.rint(triplet * 128 + 128).astype(np.uint8).flatten()
return triplet
def alert_filter__ml(alert, ml_models: dict = None):
"""Filter to apply to each alert.
"""
scores = dict()
try:
''' braai '''
triplet = make_triplet(alert)
triplets = np.expand_dims(triplet, axis=0)
braai = ml_models['braai']['model'].predict(x=triplets)[0]
# braai = 1.0
scores['braai'] = float(braai)
scores['braai_version'] = ml_models['braai']['version']
except Exception as e:
print(*time_stamps(), str(e))
return scores
# cone search radius:
cone_search_radius = float(config['xmatch']['cone_search_radius'])
# convert to rad:
if config['xmatch']['cone_search_unit'] == 'arcsec':
cone_search_radius *= np.pi / 180.0 / 3600.
elif config['xmatch']['cone_search_unit'] == 'arcmin':
cone_search_radius *= np.pi / 180.0 / 60.
elif config['xmatch']['cone_search_unit'] == 'deg':
cone_search_radius *= np.pi / 180.0
elif config['xmatch']['cone_search_unit'] == 'rad':
cone_search_radius *= 1
else:
raise Exception('Unknown cone search unit. Must be in [deg, rad, arcsec, arcmin]')
def alert_filter__xmatch(db, alert):
"""
Filter to apply to each alert.
"""
xmatches = dict()
try:
ra_geojson = float(alert['candidate']['ra'])
# geojson-friendly ra:
ra_geojson -= 180.0
dec_geojson = float(alert['candidate']['dec'])
''' catalogs '''
for catalog in config['xmatch']['catalogs']:
catalog_filter = config['xmatch']['catalogs'][catalog]['filter']
catalog_projection = config['xmatch']['catalogs'][catalog]['projection']
object_position_query = dict()
object_position_query['coordinates.radec_geojson'] = {
'$geoWithin': {'$centerSphere': [[ra_geojson, dec_geojson], cone_search_radius]}}
s = db[catalog].find({**object_position_query, **catalog_filter},
{**catalog_projection})
xmatches[catalog] = list(s)
except Exception as e:
print(*time_stamps(), str(e))
return xmatches
# cone search radius in deg:
cone_search_radius_clu = 3.0
# convert deg to rad:
cone_search_radius_clu *= np.pi / 180.0
def alert_filter__xmatch_clu(database, alert, size_margin=3, clu_version='CLU_20190625'):
"""
Filter to apply to each alert.
:param size_margin: multiply galaxy size by this much before looking for a match
:param clu_version: CLU catalog version
"""
xmatches = dict()
try:
ra = float(alert['candidate']['ra'])
dec = float(alert['candidate']['dec'])
# geojson-friendly ra:
ra_geojson = float(alert['candidate']['ra']) - 180.0
dec_geojson = dec
catalog_filter = {}
catalog_projection = {"_id": 1, "name": 1, "ra": 1, "dec": 1,
"a": 1, "b2a": 1, "pa": 1, "z": 1,
"sfr_fuv": 1, "mstar": 1, "sfr_ha": 1,
"coordinates.radec_str": 1}
# first do a coarse search of everything that is around
object_position_query = dict()
object_position_query['coordinates.radec_geojson'] = {
'$geoWithin': {'$centerSphere': [[ra_geojson, dec_geojson], cone_search_radius_clu]}}
s = database[clu_version].find({**object_position_query, **catalog_filter},
{**catalog_projection})
galaxies = list(s)
# these guys are very big, so check them separately
M31 = {'_id': 596900, 'name': 'PGC2557',
'ra': 10.6847, 'dec': 41.26901, 'a': 6.35156, 'b2a': 0.32, 'pa': 35.0,
'sfr_fuv': None, 'mstar': 253816876.412914, 'sfr_ha': 0,
'coordinates': {'radec_geojson': ["00:42:44.3503", "41:16:08.634"]}
}
M33 = {'_id': 597543, 'name': 'PGC5818',
'ra': 23.46204, 'dec': 30.66022, 'a': 2.35983, 'b2a': 0.59, 'pa': 23.0,
'sfr_fuv': None, 'mstar': 4502777.420493, 'sfr_ha': 0,
'coordinates': {'radec_geojson': ["01:33:50.8900", "30:39:36.800"]}
}
# do elliptical matches
matches = []
for galaxy in galaxies + [M31, M33]:
alpha1, delta01 = galaxy['ra'], galaxy['dec']
d0, axis_ratio, PA0 = galaxy['a'], galaxy['b2a'], galaxy['pa']
# no shape info for galaxy? replace with median values
if d0 < -990:
d0 = 0.0265889
if axis_ratio < -990:
axis_ratio = 0.61
if PA0 < -990:
PA0 = 86.0
in_galaxy = in_ellipse(ra, dec, alpha1, delta01, size_margin * d0, axis_ratio, PA0)
if in_galaxy:
match = galaxy
distance_arcsec = round(great_circle_distance(ra, dec, alpha1, delta01) * 3600, 2)
match['coordinates']['distance_arcsec'] = distance_arcsec
matches.append(match)
xmatches[clu_version] = matches
except Exception as e:
print(*time_stamps(), str(e))
return xmatches
def listener(topic, bootstrap_servers='', offset_reset='earliest',
group=None, path_alerts=None, path_tess=None, save_packets=True):
"""
Listen to a topic
:param topic:
:param bootstrap_servers:
:param offset_reset:
:param group:
:param path_alerts:
:return:
"""
# def error_cb(err):
# print(*time_stamps(), 'error_cb -------->', err)
# # print(err.code())
# if err.code() == -195:
# print(*time_stamps(), 'got disconnected, killing thread')
# sys.exit()
# Configure consumer connection to Kafka broker
conf = {'bootstrap.servers': bootstrap_servers,
# 'error_cb': error_cb,
'default.topic.config': {'auto.offset.reset': offset_reset}}
if group is not None:
conf['group.id'] = group
else:
conf['group.id'] = os.environ['HOSTNAME'] if 'HOSTNAME' in os.environ else 'kowalski.caltech.edu'
# make it unique:
conf['group.id'] = '{:s}_{:s}'.format(conf['group.id'], datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f'))
# Configure Avro reader schema
schema_files = ["ztf-avro-alert/schema/candidate.avsc",
"ztf-avro-alert/schema/cutout.avsc",
"ztf-avro-alert/schema/light_curve.avsc",
"ztf-avro-alert/schema/alert.avsc"]
# date string:
datestr = topic.split('_')[1]
# Start alert stream consumer
stream_reader = AlertConsumer(topic, schema_files, **conf)
# todo: Subscribe alert filters to stream_readers
# todo: they will be notified when an alert arrived/got x-matched
while True:
try:
# poll!
# print(*time_stamps(), 'Polling')
stream_reader.poll(path_alerts=path_alerts, path_tess=path_tess,
datestr=datestr, save_packets=save_packets)
except EopError as e:
# Write when reaching end of partition
# sys.stderr.write(e.message)
print(*time_stamps(), e.message)
except IndexError:
# sys.stderr.write('%% Data cannot be decoded\n')
print(*time_stamps(), '%% Data cannot be decoded\n')
except UnicodeDecodeError:
# sys.stderr.write('%% Unexpected data format received\n')
print(*time_stamps(), '%% Unexpected data format received\n')
except KeyboardInterrupt:
# sys.stderr.write('%% Aborted by user\n')
print(*time_stamps(), '%% Aborted by user\n')
sys.exit()
except Exception as e:
print(*time_stamps(), str(e))
_err = traceback.format_exc()
print(*time_stamps(), str(_err))
sys.exit()
def main(_obs_date=None, _save_packets=True):
topics_on_watch = dict()
while True:
try:
if True:
# get kafka topic names with kafka-topics command
kafka_cmd = [config['kafka-topics']['cmd'],
'--zookeeper', config['kafka-topics']['zookeeper'], '-list']
# print(kafka_cmd)
topics = subprocess.run(kafka_cmd, stdout=subprocess.PIPE).stdout.decode('utf-8').split('\n')[:-1]
# print(topics)
if _obs_date is None:
datestr = datetime.datetime.utcnow().strftime('%Y%m%d')
else:
datestr = _obs_date
# as of 20180403 naming convention is ztf_%Y%m%d_programidN
# topics_tonight = [t for t in topics if (datestr in t) and ('programid' in t)]
# ZUDS only
topics_tonight = [t for t in topics if (datestr in t) and ('programid' in t) and ('zuds' in t)]
print(*time_stamps(), topics_tonight)
if False:
# for testing
topics_tonight = ['ztf_20180604_programid3']
for t in topics_tonight:
if t not in topics_on_watch:
print(*time_stamps(), f'starting listener thread for {t}')
offset_reset = config['kafka']['default.topic.config']['auto.offset.reset']
bootstrap_servers = config['kafka']['bootstrap.servers']
group = '{:s}'.format(config['kafka']['group'])
# print(group)
path_alerts = config['path']['path_alerts']
path_tess = config['path']['path_tess']
save_packets = _save_packets
# topics_on_watch[t] = threading.Thread(target=listener,
# args=(t, bootstrap_servers,
# offset_reset, group, path_alerts))
topics_on_watch[t] = multiprocessing.Process(target=listener,
args=(t, bootstrap_servers,
offset_reset, group,
path_alerts, path_tess,
save_packets))
topics_on_watch[t].daemon = True
topics_on_watch[t].start()
else:
print(*time_stamps(), f'performing thread health check for {t}')
try:
# if not topics_on_watch[t].isAlive():
if not topics_on_watch[t].is_alive():
print(*time_stamps(), f'{t} died, removing')
# topics_on_watch[t].terminate()
topics_on_watch.pop(t, None)
else:
print(*time_stamps(), f'{t} appears normal')
except Exception as _e:
print(*time_stamps(), 'Failed to perform health check', str(_e))
pass
except Exception as e:
print(*time_stamps(), str(e))
_err = traceback.format_exc()
print(*time_stamps(), str(_err))
if _obs_date is None:
time.sleep(300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Fetch AVRO packets from Kafka streams and ingest them into DB')
parser.add_argument('--obsdate', help='observing date')
parser.add_argument('--noio', help='reduce i/o - do not save packets', action='store_true')
args = parser.parse_args()
obs_date = args.obsdate
save = False if args.noio else True
# print(obs_date)
main(_obs_date=obs_date, _save_packets=save)
|
#! python3
# randomQuizGenerator.py - Creates quizzes with questions and answers in
# random order, along with the answer key.
from pathlib import Path
import random
# The quiz data. Keys are states and values are their capitals
capitals = {'Alabama': 'Montgomery', 'Alaska': 'Juneau', 'Arizona': 'Phoenix',
'Arkansas': 'Little Rock', 'California': 'Sacramento', 'Colorado': 'Denver',
'Connecticut': 'Hartford', 'Delaware': 'Dover', 'Florida': 'Tallahassee',
'Georgia': 'Atlanta', 'Hawaii': 'Honolulu', 'Idaho': 'Boise', 'Illinois':
'Springfield', 'Indiana': 'Indianapolis', 'Iowa': 'Des Moines', 'Kansas':
'Topeka', 'Kentucky': 'Frankfort', 'Louisiana': 'Baton Rouge', 'Maine':
'Augusta', 'Maryland': 'Annapolis', 'Massachusetts': 'Boston', 'Michigan':
'Lansing', 'Minnesota': 'Saint Paul', 'Mississippi': 'Jackson', 'Missouri':
'Jefferson City', 'Montana': 'Helena', 'Nebraska': 'Lincoln', 'Nevada':
'Carson City', 'New Hampshire': 'Concord', 'New Jersey': 'Trenton',
'New Mexico': 'Santa Fe', 'New York': 'Albany',
'North Carolina': 'Raleigh', 'North Dakota': 'Bismarck', 'Ohio': 'Columbus',
'Oklahoma': 'Oklahoma City',
'Oregon': 'Salem', 'Pennsylvania': 'Harrisburg', 'Rhode Island': 'Providence',
'South Carolina': 'Columbia', 'South Dakota': 'Pierre', 'Tennessee':
'Nashville', 'Texas': 'Austin', 'Utah': 'Salt Lake City', 'Vermont':
'Montpelier', 'Virginia': 'Richmond', 'Washington': 'Olympia',
'West Virginia': 'Charleston', 'Wisconsin': 'Madison',
'Wyoming': 'Cheyenne'}
for quizNum in range(35):
# Create the quiz and answer key files.
quizFile = open(f'capitalsquiz{quizNum + 1}.txt', 'w')
answerKeyFile = open(f'capitalsquiz_answers{quizNum + 1}.txt', 'w')
# Write out the header for the quiz.
quizFile.write('Name:\n\nDate:\n\nPeriod:\n\n')
quizFile.write(('' * 20) + f'State Capitals Quiz (Form{quizNum + 1})')
quizFile.write('\n\n')
# Shuffle the order of the states.
states = list(capitals.keys())
random.shuffle(states)
# Loop through all 50 states, making a question for each.
for questionNum in range(50):
# Get right and wrong answers.
correctAnswer = capitals[states[questionNum]]
wrongAnswers = list(capitals.values())
del wrongAnswers[wrongAnswers.index(correctAnswer)]
wrongAnswers = random.sample(wrongAnswers, 3)
answerOptions = wrongAnswers + [correctAnswer]
random.shuffle(answerOptions)
# Write the question and the answer options to the quiz file.
quizFile.write(f'{questionNum + 1}. What is the capital of {states[questionNum]}?\n')
for i in range(4):
quizFile.write(f" {"ABCD"[i]}.{answerOptions[i]}\n")
quizFile.write('\n')
# Write the answer key to a file.
answerKeyFile.write(f"{questionNum + 1}.{"ABCD"[answerOptions.index(correctAnswer)]}")
quizFile.close()
answerKeyFile.close()
| #! python3
# randomQuizGenerator.py - Creates quizzes with questions and answers in
# random order, along with the answer key.
from pathlib import Path
import random
# The quiz data. Keys are states and values are their capitals
capitals = {'Alabama': 'Montgomery', 'Alaska': 'Juneau', 'Arizona': 'Phoenix',
'Arkansas': 'Little Rock', 'California': 'Sacramento', 'Colorado': 'Denver',
'Connecticut': 'Hartford', 'Delaware': 'Dover', 'Florida': 'Tallahassee',
'Georgia': 'Atlanta', 'Hawaii': 'Honolulu', 'Idaho': 'Boise', 'Illinois':
'Springfield', 'Indiana': 'Indianapolis', 'Iowa': 'Des Moines', 'Kansas':
'Topeka', 'Kentucky': 'Frankfort', 'Louisiana': 'Baton Rouge', 'Maine':
'Augusta', 'Maryland': 'Annapolis', 'Massachusetts': 'Boston', 'Michigan':
'Lansing', 'Minnesota': 'Saint Paul', 'Mississippi': 'Jackson', 'Missouri':
'Jefferson City', 'Montana': 'Helena', 'Nebraska': 'Lincoln', 'Nevada':
'Carson City', 'New Hampshire': 'Concord', 'New Jersey': 'Trenton',
'New Mexico': 'Santa Fe', 'New York': 'Albany',
'North Carolina': 'Raleigh', 'North Dakota': 'Bismarck', 'Ohio': 'Columbus',
'Oklahoma': 'Oklahoma City',
'Oregon': 'Salem', 'Pennsylvania': 'Harrisburg', 'Rhode Island': 'Providence',
'South Carolina': 'Columbia', 'South Dakota': 'Pierre', 'Tennessee':
'Nashville', 'Texas': 'Austin', 'Utah': 'Salt Lake City', 'Vermont':
'Montpelier', 'Virginia': 'Richmond', 'Washington': 'Olympia',
'West Virginia': 'Charleston', 'Wisconsin': 'Madison',
'Wyoming': 'Cheyenne'}
for quizNum in range(35):
# Create the quiz and answer key files.
quizFile = open(f'capitalsquiz{quizNum + 1}.txt', 'w')
answerKeyFile = open(f'capitalsquiz_answers{quizNum + 1}.txt', 'w')
# Write out the header for the quiz.
quizFile.write('Name:\n\nDate:\n\nPeriod:\n\n')
quizFile.write(('' * 20) + f'State Capitals Quiz (Form{quizNum + 1})')
quizFile.write('\n\n')
# Shuffle the order of the states.
states = list(capitals.keys())
random.shuffle(states)
# Loop through all 50 states, making a question for each.
for questionNum in range(50):
# Get right and wrong answers.
correctAnswer = capitals[states[questionNum]]
wrongAnswers = list(capitals.values())
del wrongAnswers[wrongAnswers.index(correctAnswer)]
wrongAnswers = random.sample(wrongAnswers, 3)
answerOptions = wrongAnswers + [correctAnswer]
random.shuffle(answerOptions)
# Write the question and the answer options to the quiz file.
quizFile.write(f'{questionNum + 1}. What is the capital of {states[questionNum]}?\n')
for i in range(4):
quizFile.write(f" {'ABCD'[i]}.{answerOptions[i]}\n")
quizFile.write('\n')
# Write the answer key to a file.
answerKeyFile.write(f"{questionNum + 1}.{'ABCD'[answerOptions.index(correctAnswer)]}")
quizFile.close()
answerKeyFile.close()
|
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Widecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importdescriptors RPC.
Test importdescriptors by generating keys on node0, importing the corresponding
descriptors on node1 and then testing the address info for the different address
variants.
- `get_generate_key()` is called to generate keys and return the privkeys,
pubkeys and all variants of scriptPubKey and address.
- `test_importdesc()` is called to send an importdescriptors call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.address import key_to_p2pkh
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import WidecoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
find_vout_for_address,
)
from test_framework.wallet_util import (
get_generate_key,
test_address,
)
class ImportDescriptorsTest(WidecoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"],
["-addresstype=bech32", "-keypool=5"]
]
self.setup_clean_chain = True
self.wallet_names = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_sqlite()
def test_importdesc(self, req, success, error_code=None, error_message=None, warnings=None, wallet=None):
"""Run importdescriptors and assert success"""
if warnings is None:
warnings = []
wrpc = self.nodes[1].get_wallet_rpc('w1')
if wallet is not None:
wrpc = wallet
result = wrpc.importdescriptors([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info('Setting up wallets')
self.nodes[0].createwallet(wallet_name='w0', disable_private_keys=False, descriptors=True)
w0 = self.nodes[0].get_wallet_rpc('w0')
self.nodes[1].createwallet(wallet_name='w1', disable_private_keys=True, blank=True, descriptors=True)
w1 = self.nodes[1].get_wallet_rpc('w1')
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
self.nodes[1].createwallet(wallet_name="wpriv", disable_private_keys=False, blank=True, descriptors=True)
wpriv = self.nodes[1].get_wallet_rpc("wpriv")
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 0)
self.log.info('Mining coins')
w0.generatetoaddress(COINBASE_MATURITY + 1, w0.getnewaddress())
# RPC importdescriptors -----------------------------------------------
# # Test import fails if no descriptor present
self.log.info("Import should fail if a descriptor is not provided")
self.test_importdesc({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor not found.')
# # Test importing of a P2PKH descriptor
key = get_generate_key()
self.log.info("Should import a p2pkh descriptor")
import_request = {"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": "Descriptor import test"}
self.test_importdesc(import_request, success=True)
test_address(w1,
key.p2pkh_addr,
solvable=True,
ismine=True,
labels=["Descriptor import test"])
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
self.log.info("Test can import same descriptor with public key twice")
self.test_importdesc(import_request, success=True)
self.log.info("Test can update descriptor label")
self.test_importdesc({**import_request, "label": "Updated label"}, success=True)
test_address(w1, key.p2pkh_addr, solvable=True, ismine=True, labels=["Updated label"])
self.log.info("Internal addresses cannot have labels")
self.test_importdesc({**import_request, "internal": True},
success=False,
error_code=-8,
error_message="Internal addresses should not have a label")
self.log.info("Internal addresses should be detected as such")
key = get_generate_key()
addr = key_to_p2pkh(key.pubkey)
self.test_importdesc({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"internal": True},
success=True)
info = w1.getaddressinfo(addr)
assert_equal(info["ismine"], True)
assert_equal(info["ischange"], True)
# # Test importing of a P2SH-P2WPKH descriptor
key = get_generate_key()
self.log.info("Should not import a p2sh-p2wpkh descriptor without checksum")
self.test_importdesc({"desc": "sh(wpkh(" + key.pubkey + "))",
"timestamp": "now"
},
success=False,
error_code=-5,
error_message="Missing checksum")
self.log.info("Should not import a p2sh-p2wpkh descriptor that has range specified")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"range": 1,
},
success=False,
error_code=-8,
error_message="Range should not be specified for an un-ranged descriptor")
self.log.info("Should not import a p2sh-p2wpkh descriptor and have it set to active")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"active": True,
},
success=False,
error_code=-8,
error_message="Active descriptors must be ranged")
self.log.info("Should import a (non-active) p2sh-p2wpkh descriptor")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"active": False,
},
success=True)
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
test_address(w1,
key.p2sh_p2wpkh_addr,
ismine=True,
solvable=True)
# Check persistence of data and that loading works correctly
w1.unloadwallet()
self.nodes[1].loadwallet('w1')
test_address(w1,
key.p2sh_p2wpkh_addr,
ismine=True,
solvable=True)
# # Test importing of a multisig descriptor
key1 = get_generate_key()
key2 = get_generate_key()
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importdesc({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True)
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
test_address(w1,
key1.p2pkh_addr,
ismine=False)
# # Test ranged descriptors
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
xpub = "tpubD6NzVbkrYhZ4YNXVQbNhMK1WqguFsUXceaVJKbmno2aZ3B6QfbMeraaYvnBSGpV3vxLyTTK9DYT1yoEck4XUScMzXoQ2U2oSmE2JyMedq3H"
addresses = ["2N7yv4p8G8yEaPddJxY41kPihnWvs39qCMf", "2MsHxyb2JS3pAySeNUsJ7mNnurtpeenDzLA"] # hdkeypath=m/0'/0'/0' and 1'
addresses += ["bcrt1qrd3n235cj2czsfmsuvqqpr3lu6lg0ju7scl8gn", "bcrt1qfqeppuvj0ww98r6qghmdkj70tv8qpchehegrg8"] # wpkh subscripts corresponding to the above addresses
desc = "sh(wpkh(" + xpub + "/0/0/*" + "))"
self.log.info("Ranged descriptors cannot have labels")
self.test_importdesc({"desc":descsum_create(desc),
"timestamp": "now",
"range": [0, 100],
"label": "test"},
success=False,
error_code=-8,
error_message='Ranged descriptors should not have a label')
self.log.info("Private keys required for private keys enabled wallet")
self.test_importdesc({"desc":descsum_create(desc),
"timestamp": "now",
"range": [0, 100]},
success=False,
error_code=-4,
error_message='Cannot import descriptor without private keys to a wallet with private keys enabled',
wallet=wpriv)
self.log.info("Ranged descriptor import should warn without a specified range")
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now"},
success=True,
warnings=['Range not given, using default keypool range'])
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
# # Test importing of a ranged descriptor with xpriv
self.log.info("Should not import a ranged descriptor that includes xpriv into a watch-only wallet")
desc = "sh(wpkh(" + xpriv + "/0'/0'/*'" + "))"
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=False,
error_code=-4,
error_message='Cannot import private keys to a wallet with private keys disabled')
self.log.info("Should not import a descriptor with hardened derivations when private keys are disabled")
self.test_importdesc({"desc": descsum_create("wpkh(" + xpub + "/1h/*)"),
"timestamp": "now",
"range": 1},
success=False,
error_code=-4,
error_message='Cannot expand descriptor. Probably because of hardened derivations without private keys provided')
for address in addresses:
test_address(w1,
address,
ismine=False,
solvable=False)
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
self.log.info("Verify we can only extend descriptor's range")
range_request = {"desc": descsum_create(desc), "timestamp": "now", "range": [5, 10], 'active': True}
self.test_importdesc(range_request, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 6)
self.test_importdesc({**range_request, "range": [0, 10]}, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 11)
self.test_importdesc({**range_request, "range": [0, 20]}, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 21)
# Can keep range the same
self.test_importdesc({**range_request, "range": [0, 20]}, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 21)
self.test_importdesc({**range_request, "range": [5, 10]}, wallet=wpriv, success=False,
error_code=-8, error_message='new range must include current range = [0,20]')
self.test_importdesc({**range_request, "range": [0, 10]}, wallet=wpriv, success=False,
error_code=-8, error_message='new range must include current range = [0,20]')
self.test_importdesc({**range_request, "range": [5, 20]}, wallet=wpriv, success=False,
error_code=-8, error_message='new range must include current range = [0,20]')
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 21)
self.log.info("Check we can change descriptor internal flag")
self.test_importdesc({**range_request, "range": [0, 20], "internal": True}, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, 'This wallet has no available keys', wpriv.getnewaddress, '', 'p2sh-segwit')
assert_equal(wpriv.getwalletinfo()['keypoolsize_hd_internal'], 21)
wpriv.getrawchangeaddress('p2sh-segwit')
self.test_importdesc({**range_request, "range": [0, 20], "internal": False}, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 21)
wpriv.getnewaddress('', 'p2sh-segwit')
assert_equal(wpriv.getwalletinfo()['keypoolsize_hd_internal'], 0)
assert_raises_rpc_error(-4, 'This wallet has no available keys', wpriv.getrawchangeaddress, 'p2sh-segwit')
# Make sure ranged imports import keys in order
w1 = self.nodes[1].get_wallet_rpc('w1')
self.log.info('Key ranges should be imported in order')
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv', # m/0'/0'/0
'bcrt1q8vprchan07gzagd5e6v9wd7azyucksq2xc76k8', # m/0'/0'/1
'bcrt1qtuqdtha7zmqgcrr26n2rqxztv5y8rafjp9lulu', # m/0'/0'/2
'bcrt1qau64272ymawq26t90md6an0ps99qkrse58m640', # m/0'/0'/3
'bcrt1qsg97266hrh6cpmutqen8s4s962aryy77jp0fg0', # m/0'/0'/4
]
self.test_importdesc({'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
self.test_importdesc({'desc': descsum_create('sh(wpkh([abcdef12/0h/0h]' + xpub + '/*))'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
self.test_importdesc({'desc': descsum_create('pkh([12345678/0h/0h]' + xpub + '/*)'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
assert_equal(w1.getwalletinfo()['keypoolsize'], 5 * 3)
for i, expected_addr in enumerate(addresses):
received_addr = w1.getnewaddress('', 'bech32')
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'bech32')
assert_equal(received_addr, expected_addr)
bech32_addr_info = w1.getaddressinfo(received_addr)
assert_equal(bech32_addr_info['desc'][:23], 'wpkh([80002067/0\'/0\'/{}]'.format(i))
shwpkh_addr = w1.getnewaddress('', 'p2sh-segwit')
shwpkh_addr_info = w1.getaddressinfo(shwpkh_addr)
assert_equal(shwpkh_addr_info['desc'][:26], 'sh(wpkh([abcdef12/0\'/0\'/{}]'.format(i))
pkh_addr = w1.getnewaddress('', 'legacy')
pkh_addr_info = w1.getaddressinfo(pkh_addr)
assert_equal(pkh_addr_info['desc'][:22], 'pkh([12345678/0\'/0\'/{}]'.format(i))
assert_equal(w1.getwalletinfo()['keypoolsize'], 4 * 3) # After retrieving a key, we don't refill the keypool again, so it's one less for each address type
w1.keypoolrefill()
assert_equal(w1.getwalletinfo()['keypoolsize'], 5 * 3)
self.log.info("Check we can change next_index")
# go back and forth with next_index
for i in [4, 0, 2, 1, 3]:
self.test_importdesc({'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'active': True,
'range': [0, 9],
'next_index': i,
'timestamp': 'now'
},
success=True)
assert_equal(w1.getnewaddress('', 'bech32'), addresses[i])
# Check active=False default
self.log.info('Check imported descriptors are not active by default')
self.test_importdesc({'desc': descsum_create('pkh([12345678/1h]' + xpub + '/*)'),
'range' : [0, 2],
'timestamp': 'now',
'internal': True
},
success=True)
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'legacy')
self.log.info('Check can activate inactive descriptor')
self.test_importdesc({'desc': descsum_create('pkh([12345678]' + xpub + '/*)'),
'range': [0, 5],
'active': True,
'timestamp': 'now',
'internal': True
},
success=True)
address = w1.getrawchangeaddress('legacy')
assert_equal(address, "mpA2Wh9dvZT7yfELq1UnrUmAoc5qCkMetg")
self.log.info('Check can deactivate active descriptor')
self.test_importdesc({'desc': descsum_create('pkh([12345678]' + xpub + '/*)'),
'range': [0, 5],
'active': False,
'timestamp': 'now',
'internal': True
},
success=True)
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'legacy')
self.log.info('Verify activation state is persistent')
w1.unloadwallet()
self.nodes[1].loadwallet('w1')
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'legacy')
# # Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
address = "2MuhcG52uHPknxDgmGPsV18jSHFBnnRgjPg"
desc = "sh(wpkh(" + wif_priv + "))"
self.log.info("Should import a descriptor with a WIF private key as spendable")
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now"},
success=True,
wallet=wpriv)
self.log.info('Test can import same descriptor with private key twice')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now"}, success=True, wallet=wpriv)
test_address(wpriv,
address,
solvable=True,
ismine=True)
txid = w0.sendtoaddress(address, 49.99995540)
w0.generatetoaddress(6, w0.getnewaddress())
self.sync_blocks()
tx = wpriv.createrawtransaction([{"txid": txid, "vout": 0}], {w0.getnewaddress(): 49.999})
signed_tx = wpriv.signrawtransactionwithwallet(tx)
w1.sendrawtransaction(signed_tx['hex'])
# Make sure that we can use import and use multisig as addresses
self.log.info('Test that multisigs can be imported, signed for, and getnewaddress\'d')
self.nodes[1].createwallet(wallet_name="wmulti_priv", disable_private_keys=False, blank=True, descriptors=True)
wmulti_priv = self.nodes[1].get_wallet_rpc("wmulti_priv")
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 0)
xprv1 = 'tprv8ZgxMBicQKsPevADjDCWsa6DfhkVXicu8NQUzfibwX2MexVwW4tCec5mXdCW8kJwkzBRRmAay1KZya4WsehVvjTGVW6JLqiqd8DdZ4xSg52'
acc_xpub1 = 'tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8' # /84'/0'/0'
chg_xpub1 = 'tpubDCXqdwWZcszwqYJSnZp8eARkxGJfHAk23KDxbztV4BbschfaTfYLTcSkSJ3TN64dRqwa1rnFUScsYormKkGqNbbPwkorQimVevXjxzUV9Gf' # /84'/1'/0'
xprv2 = 'tprv8ZgxMBicQKsPdSNWUhDiwTScDr6JfkZuLshTRwzvZGnMSnGikV6jxpmdDkC3YRc4T3GD6Nvg9uv6hQg73RVv1EiTXDZwxVbsLugVHU8B1aq'
acc_xprv2 = 'tprv8gVCsmRAxVSxyUpsL13Y7ZEWBFPWbgS5E2MmFVNGuANrknvmmn2vWnmHvU8AwEFYzR2ji6EeZLSCLVacsYkvor3Pcb5JY5FGcevqTwYvdYx'
acc_xpub2 = 'tpubDDBF2BTR6s8drwrfDei8WxtckGuSm1cyoKxYY1QaKSBFbHBYQArWhHPA6eJrzZej6nfHGLSURYSLHr7GuYch8aY5n61tGqgn8b4cXrMuoPH'
chg_xpub2 = 'tpubDCYfZY2ceyHzYzMMVPt9MNeiqtQ2T7Uyp9QSFwYXh8Vi9iJFYXcuphJaGXfF3jUQJi5Y3GMNXvM11gaL4txzZgNGK22BFAwMXynnzv4z2Jh'
xprv3 = 'tprv8ZgxMBicQKsPeonDt8Ka2mrQmHa61hQ5FQCsvWBTpSNzBFgM58cV2EuXNAHF14VawVpznnme3SuTbA62sGriwWyKifJmXntfNeK7zeqMCj1'
acc_xpub3 = 'tpubDCsWoW1kuQB9kG5MXewHqkbjPtqPueRnXju7uM2NK7y3JYb2ajAZ9EiuZXNNuE4661RAfriBWhL8UsnAPpk8zrKKnZw1Ug7X4oHgMdZiU4E'
chg_xpub3 = 'tpubDC6UGqnsQStngYuGD4MKsMy7eD1Yg9NTJfPdvjdG2JE5oZ7EsSL3WHg4Gsw2pR5K39ZwJ46M1wZayhedVdQtMGaUhq5S23PH6fnENK3V1sb'
self.test_importdesc({"desc":"wsh(multi(2," + xprv1 + "/84h/0h/0h/*," + xprv2 + "/84h/0h/0h/*," + xprv3 + "/84h/0h/0h/*))#m2sr93jn",
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_priv)
self.test_importdesc({"desc":"wsh(multi(2," + xprv1 + "/84h/1h/0h/*," + xprv2 + "/84h/1h/0h/*," + xprv3 + "/84h/1h/0h/*))#q3sztvx5",
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_priv)
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 1001) # Range end (1000) is inclusive, so 1001 addresses generated
addr = wmulti_priv.getnewaddress('', 'bech32')
assert_equal(addr, 'bcrt1qdt0qy5p7dzhxzmegnn4ulzhard33s2809arjqgjndx87rv5vd0fq2czhy8') # Derived at m/84'/0'/0'/0
change_addr = wmulti_priv.getrawchangeaddress('bech32')
assert_equal(change_addr, 'bcrt1qt9uhe3a9hnq7vajl7a094z4s3crm9ttf8zw3f5v9gr2nyd7e3lnsy44n8e')
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 1000)
txid = w0.sendtoaddress(addr, 10)
self.nodes[0].generate(6)
self.sync_all()
send_txid = wmulti_priv.sendtoaddress(w0.getnewaddress(), 8)
decoded = wmulti_priv.decoderawtransaction(wmulti_priv.gettransaction(send_txid)['hex'])
assert_equal(len(decoded['vin'][0]['txinwitness']), 4)
self.nodes[0].generate(6)
self.sync_all()
self.nodes[1].createwallet(wallet_name="wmulti_pub", disable_private_keys=True, blank=True, descriptors=True)
wmulti_pub = self.nodes[1].get_wallet_rpc("wmulti_pub")
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 0)
self.test_importdesc({"desc":"wsh(multi(2,[7b2d0242/84h/0h/0h]" + acc_xpub1 + "/*,[59b09cd6/84h/0h/0h]" + acc_xpub2 + "/*,[e81a0532/84h/0h/0h]" + acc_xpub3 +"/*))#tsry0s5e",
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_pub)
self.test_importdesc({"desc":"wsh(multi(2,[7b2d0242/84h/1h/0h]" + chg_xpub1 + "/*,[59b09cd6/84h/1h/0h]" + chg_xpub2 + "/*,[e81a0532/84h/1h/0h]" + chg_xpub3 + "/*))#c08a2rzv",
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_pub)
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 1000) # The first one was already consumed by previous import and is detected as used
addr = wmulti_pub.getnewaddress('', 'bech32')
assert_equal(addr, 'bcrt1qp8s25ckjl7gr6x2q3dx3tn2pytwp05upkjztk6ey857tt50r5aeqn6mvr9') # Derived at m/84'/0'/0'/1
change_addr = wmulti_pub.getrawchangeaddress('bech32')
assert_equal(change_addr, 'bcrt1qt9uhe3a9hnq7vajl7a094z4s3crm9ttf8zw3f5v9gr2nyd7e3lnsy44n8e')
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 999)
# generate some utxos for next tests
txid = w0.sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
addr2 = wmulti_pub.getnewaddress('', 'bech32')
txid2 = w0.sendtoaddress(addr2, 10)
vout2 = find_vout_for_address(self.nodes[0], txid2, addr2)
self.nodes[0].generate(6)
self.sync_all()
assert_equal(wmulti_pub.getbalance(), wmulti_priv.getbalance())
# Make sure that descriptor wallets containing multiple xpubs in a single descriptor load correctly
wmulti_pub.unloadwallet()
self.nodes[1].loadwallet('wmulti_pub')
self.log.info("Multisig with distributed keys")
self.nodes[1].createwallet(wallet_name="wmulti_priv1", descriptors=True)
wmulti_priv1 = self.nodes[1].get_wallet_rpc("wmulti_priv1")
res = wmulti_priv1.importdescriptors([
{
"desc": descsum_create("wsh(multi(2," + xprv1 + "/84h/0h/0h/*,[59b09cd6/84h/0h/0h]" + acc_xpub2 + "/*,[e81a0532/84h/0h/0h]" + acc_xpub3 + "/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create("wsh(multi(2," + xprv1 + "/84h/1h/0h/*,[59b09cd6/84h/1h/0h]" + chg_xpub2 + "/*,[e81a0532/84h/1h/0h]" + chg_xpub3 + "/*))"),
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[0]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
assert_equal(res[1]['success'], True)
assert_equal(res[1]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
self.nodes[1].createwallet(wallet_name='wmulti_priv2', blank=True, descriptors=True)
wmulti_priv2 = self.nodes[1].get_wallet_rpc('wmulti_priv2')
res = wmulti_priv2.importdescriptors([
{
"desc": descsum_create("wsh(multi(2,[7b2d0242/84h/0h/0h]" + acc_xpub1 + "/*," + xprv2 + "/84h/0h/0h/*,[e81a0532/84h/0h/0h]" + acc_xpub3 + "/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create("wsh(multi(2,[7b2d0242/84h/1h/0h]" + chg_xpub1 + "/*," + xprv2 + "/84h/1h/0h/*,[e81a0532/84h/1h/0h]" + chg_xpub3 + "/*))"),
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[0]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
assert_equal(res[1]['success'], True)
assert_equal(res[1]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
rawtx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {w0.getnewaddress(): 9.999})
tx_signed_1 = wmulti_priv1.signrawtransactionwithwallet(rawtx)
assert_equal(tx_signed_1['complete'], False)
tx_signed_2 = wmulti_priv2.signrawtransactionwithwallet(tx_signed_1['hex'])
assert_equal(tx_signed_2['complete'], True)
self.nodes[1].sendrawtransaction(tx_signed_2['hex'])
self.log.info("We can create and use a huge multisig under P2WSH")
self.nodes[1].createwallet(wallet_name='wmulti_priv_big', blank=True, descriptors=True)
wmulti_priv_big = self.nodes[1].get_wallet_rpc('wmulti_priv_big')
xkey = "tprv8ZgxMBicQKsPeZSeYx7VXDDTs3XrTcmZQpRLbAeSQFCQGgKwR4gKpcxHaKdoTNHniv4EPDJNdzA3KxRrrBHcAgth8fU5X4oCndkkxk39iAt/*"
xkey_int = "tprv8ZgxMBicQKsPeZSeYx7VXDDTs3XrTcmZQpRLbAeSQFCQGgKwR4gKpcxHaKdoTNHniv4EPDJNdzA3KxRrrBHcAgth8fU5X4oCndkkxk39iAt/1/*"
res = wmulti_priv_big.importdescriptors([
{
"desc": descsum_create(f"wsh(sortedmulti(20,{(xkey + ",") * 19}{xkey}))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create(f"wsh(sortedmulti(20,{(xkey_int + ",") * 19}{xkey_int}))"),
"active": True,
"internal": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[1]['success'], True)
addr = wmulti_priv_big.getnewaddress()
w0.sendtoaddress(addr, 10)
self.nodes[0].generate(1)
self.sync_all()
# It is standard and would relay.
txid = wmulti_priv_big.sendtoaddress(w0.getnewaddress(), 9.999)
decoded = wmulti_priv_big.decoderawtransaction(wmulti_priv_big.gettransaction(txid)['hex'])
# 20 sigs + dummy + witness script
assert_equal(len(decoded['vin'][0]['txinwitness']), 22)
self.log.info("Under P2SH, multisig are standard with up to 15 "
"compressed keys")
self.nodes[1].createwallet(wallet_name='multi_priv_big_legacy',
blank=True, descriptors=True)
multi_priv_big = self.nodes[1].get_wallet_rpc('multi_priv_big_legacy')
res = multi_priv_big.importdescriptors([
{
"desc": descsum_create(f"sh(multi(15,{(xkey + ",") * 14}{xkey}))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create(f"sh(multi(15,{(xkey_int + ",") * 14}{xkey_int}))"),
"active": True,
"internal": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[1]['success'], True)
addr = multi_priv_big.getnewaddress("", "legacy")
w0.sendtoaddress(addr, 10)
self.nodes[0].generate(6)
self.sync_all()
# It is standard and would relay.
txid = multi_priv_big.sendtoaddress(w0.getnewaddress(), 10, "", "",
True)
decoded = multi_priv_big.decoderawtransaction(
multi_priv_big.gettransaction(txid)['hex']
)
self.log.info("Amending multisig with new private keys")
self.nodes[1].createwallet(wallet_name="wmulti_priv3", descriptors=True)
wmulti_priv3 = self.nodes[1].get_wallet_rpc("wmulti_priv3")
res = wmulti_priv3.importdescriptors([
{
"desc": descsum_create("wsh(multi(2," + xprv1 + "/84h/0h/0h/*,[59b09cd6/84h/0h/0h]" + acc_xpub2 + "/*,[e81a0532/84h/0h/0h]" + acc_xpub3 + "/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
res = wmulti_priv3.importdescriptors([
{
"desc": descsum_create("wsh(multi(2," + xprv1 + "/84h/0h/0h/*,[59b09cd6/84h/0h/0h]" + acc_xprv2 + "/*,[e81a0532/84h/0h/0h]" + acc_xpub3 + "/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
rawtx = self.nodes[1].createrawtransaction([{'txid': txid2, 'vout': vout2}], {w0.getnewaddress(): 9.999})
tx = wmulti_priv3.signrawtransactionwithwallet(rawtx)
assert_equal(tx['complete'], True)
self.nodes[1].sendrawtransaction(tx['hex'])
self.log.info("Combo descriptors cannot be active")
self.test_importdesc({"desc": descsum_create("combo(tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*)"),
"active": True,
"range": 1,
"timestamp": "now"},
success=False,
error_code=-4,
error_message="Combo descriptors cannot be set to active")
self.log.info("Descriptors with no type cannot be active")
self.test_importdesc({"desc": descsum_create("pk(tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*)"),
"active": True,
"range": 1,
"timestamp": "now"},
success=True,
warnings=["Unknown output type, cannot set descriptor to active."])
if __name__ == '__main__':
ImportDescriptorsTest().main()
| #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Widecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importdescriptors RPC.
Test importdescriptors by generating keys on node0, importing the corresponding
descriptors on node1 and then testing the address info for the different address
variants.
- `get_generate_key()` is called to generate keys and return the privkeys,
pubkeys and all variants of scriptPubKey and address.
- `test_importdesc()` is called to send an importdescriptors call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.address import key_to_p2pkh
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import WidecoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
find_vout_for_address,
)
from test_framework.wallet_util import (
get_generate_key,
test_address,
)
class ImportDescriptorsTest(WidecoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"],
["-addresstype=bech32", "-keypool=5"]
]
self.setup_clean_chain = True
self.wallet_names = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_sqlite()
def test_importdesc(self, req, success, error_code=None, error_message=None, warnings=None, wallet=None):
"""Run importdescriptors and assert success"""
if warnings is None:
warnings = []
wrpc = self.nodes[1].get_wallet_rpc('w1')
if wallet is not None:
wrpc = wallet
result = wrpc.importdescriptors([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info('Setting up wallets')
self.nodes[0].createwallet(wallet_name='w0', disable_private_keys=False, descriptors=True)
w0 = self.nodes[0].get_wallet_rpc('w0')
self.nodes[1].createwallet(wallet_name='w1', disable_private_keys=True, blank=True, descriptors=True)
w1 = self.nodes[1].get_wallet_rpc('w1')
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
self.nodes[1].createwallet(wallet_name="wpriv", disable_private_keys=False, blank=True, descriptors=True)
wpriv = self.nodes[1].get_wallet_rpc("wpriv")
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 0)
self.log.info('Mining coins')
w0.generatetoaddress(COINBASE_MATURITY + 1, w0.getnewaddress())
# RPC importdescriptors -----------------------------------------------
# # Test import fails if no descriptor present
self.log.info("Import should fail if a descriptor is not provided")
self.test_importdesc({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor not found.')
# # Test importing of a P2PKH descriptor
key = get_generate_key()
self.log.info("Should import a p2pkh descriptor")
import_request = {"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": "Descriptor import test"}
self.test_importdesc(import_request, success=True)
test_address(w1,
key.p2pkh_addr,
solvable=True,
ismine=True,
labels=["Descriptor import test"])
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
self.log.info("Test can import same descriptor with public key twice")
self.test_importdesc(import_request, success=True)
self.log.info("Test can update descriptor label")
self.test_importdesc({**import_request, "label": "Updated label"}, success=True)
test_address(w1, key.p2pkh_addr, solvable=True, ismine=True, labels=["Updated label"])
self.log.info("Internal addresses cannot have labels")
self.test_importdesc({**import_request, "internal": True},
success=False,
error_code=-8,
error_message="Internal addresses should not have a label")
self.log.info("Internal addresses should be detected as such")
key = get_generate_key()
addr = key_to_p2pkh(key.pubkey)
self.test_importdesc({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"internal": True},
success=True)
info = w1.getaddressinfo(addr)
assert_equal(info["ismine"], True)
assert_equal(info["ischange"], True)
# # Test importing of a P2SH-P2WPKH descriptor
key = get_generate_key()
self.log.info("Should not import a p2sh-p2wpkh descriptor without checksum")
self.test_importdesc({"desc": "sh(wpkh(" + key.pubkey + "))",
"timestamp": "now"
},
success=False,
error_code=-5,
error_message="Missing checksum")
self.log.info("Should not import a p2sh-p2wpkh descriptor that has range specified")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"range": 1,
},
success=False,
error_code=-8,
error_message="Range should not be specified for an un-ranged descriptor")
self.log.info("Should not import a p2sh-p2wpkh descriptor and have it set to active")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"active": True,
},
success=False,
error_code=-8,
error_message="Active descriptors must be ranged")
self.log.info("Should import a (non-active) p2sh-p2wpkh descriptor")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"active": False,
},
success=True)
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
test_address(w1,
key.p2sh_p2wpkh_addr,
ismine=True,
solvable=True)
# Check persistence of data and that loading works correctly
w1.unloadwallet()
self.nodes[1].loadwallet('w1')
test_address(w1,
key.p2sh_p2wpkh_addr,
ismine=True,
solvable=True)
# # Test importing of a multisig descriptor
key1 = get_generate_key()
key2 = get_generate_key()
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importdesc({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True)
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
test_address(w1,
key1.p2pkh_addr,
ismine=False)
# # Test ranged descriptors
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
xpub = "tpubD6NzVbkrYhZ4YNXVQbNhMK1WqguFsUXceaVJKbmno2aZ3B6QfbMeraaYvnBSGpV3vxLyTTK9DYT1yoEck4XUScMzXoQ2U2oSmE2JyMedq3H"
addresses = ["2N7yv4p8G8yEaPddJxY41kPihnWvs39qCMf", "2MsHxyb2JS3pAySeNUsJ7mNnurtpeenDzLA"] # hdkeypath=m/0'/0'/0' and 1'
addresses += ["bcrt1qrd3n235cj2czsfmsuvqqpr3lu6lg0ju7scl8gn", "bcrt1qfqeppuvj0ww98r6qghmdkj70tv8qpchehegrg8"] # wpkh subscripts corresponding to the above addresses
desc = "sh(wpkh(" + xpub + "/0/0/*" + "))"
self.log.info("Ranged descriptors cannot have labels")
self.test_importdesc({"desc":descsum_create(desc),
"timestamp": "now",
"range": [0, 100],
"label": "test"},
success=False,
error_code=-8,
error_message='Ranged descriptors should not have a label')
self.log.info("Private keys required for private keys enabled wallet")
self.test_importdesc({"desc":descsum_create(desc),
"timestamp": "now",
"range": [0, 100]},
success=False,
error_code=-4,
error_message='Cannot import descriptor without private keys to a wallet with private keys enabled',
wallet=wpriv)
self.log.info("Ranged descriptor import should warn without a specified range")
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now"},
success=True,
warnings=['Range not given, using default keypool range'])
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
# # Test importing of a ranged descriptor with xpriv
self.log.info("Should not import a ranged descriptor that includes xpriv into a watch-only wallet")
desc = "sh(wpkh(" + xpriv + "/0'/0'/*'" + "))"
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=False,
error_code=-4,
error_message='Cannot import private keys to a wallet with private keys disabled')
self.log.info("Should not import a descriptor with hardened derivations when private keys are disabled")
self.test_importdesc({"desc": descsum_create("wpkh(" + xpub + "/1h/*)"),
"timestamp": "now",
"range": 1},
success=False,
error_code=-4,
error_message='Cannot expand descriptor. Probably because of hardened derivations without private keys provided')
for address in addresses:
test_address(w1,
address,
ismine=False,
solvable=False)
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
self.log.info("Verify we can only extend descriptor's range")
range_request = {"desc": descsum_create(desc), "timestamp": "now", "range": [5, 10], 'active': True}
self.test_importdesc(range_request, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 6)
self.test_importdesc({**range_request, "range": [0, 10]}, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 11)
self.test_importdesc({**range_request, "range": [0, 20]}, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 21)
# Can keep range the same
self.test_importdesc({**range_request, "range": [0, 20]}, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 21)
self.test_importdesc({**range_request, "range": [5, 10]}, wallet=wpriv, success=False,
error_code=-8, error_message='new range must include current range = [0,20]')
self.test_importdesc({**range_request, "range": [0, 10]}, wallet=wpriv, success=False,
error_code=-8, error_message='new range must include current range = [0,20]')
self.test_importdesc({**range_request, "range": [5, 20]}, wallet=wpriv, success=False,
error_code=-8, error_message='new range must include current range = [0,20]')
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 21)
self.log.info("Check we can change descriptor internal flag")
self.test_importdesc({**range_request, "range": [0, 20], "internal": True}, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, 'This wallet has no available keys', wpriv.getnewaddress, '', 'p2sh-segwit')
assert_equal(wpriv.getwalletinfo()['keypoolsize_hd_internal'], 21)
wpriv.getrawchangeaddress('p2sh-segwit')
self.test_importdesc({**range_request, "range": [0, 20], "internal": False}, wallet=wpriv, success=True)
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 21)
wpriv.getnewaddress('', 'p2sh-segwit')
assert_equal(wpriv.getwalletinfo()['keypoolsize_hd_internal'], 0)
assert_raises_rpc_error(-4, 'This wallet has no available keys', wpriv.getrawchangeaddress, 'p2sh-segwit')
# Make sure ranged imports import keys in order
w1 = self.nodes[1].get_wallet_rpc('w1')
self.log.info('Key ranges should be imported in order')
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv', # m/0'/0'/0
'bcrt1q8vprchan07gzagd5e6v9wd7azyucksq2xc76k8', # m/0'/0'/1
'bcrt1qtuqdtha7zmqgcrr26n2rqxztv5y8rafjp9lulu', # m/0'/0'/2
'bcrt1qau64272ymawq26t90md6an0ps99qkrse58m640', # m/0'/0'/3
'bcrt1qsg97266hrh6cpmutqen8s4s962aryy77jp0fg0', # m/0'/0'/4
]
self.test_importdesc({'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
self.test_importdesc({'desc': descsum_create('sh(wpkh([abcdef12/0h/0h]' + xpub + '/*))'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
self.test_importdesc({'desc': descsum_create('pkh([12345678/0h/0h]' + xpub + '/*)'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
assert_equal(w1.getwalletinfo()['keypoolsize'], 5 * 3)
for i, expected_addr in enumerate(addresses):
received_addr = w1.getnewaddress('', 'bech32')
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'bech32')
assert_equal(received_addr, expected_addr)
bech32_addr_info = w1.getaddressinfo(received_addr)
assert_equal(bech32_addr_info['desc'][:23], 'wpkh([80002067/0\'/0\'/{}]'.format(i))
shwpkh_addr = w1.getnewaddress('', 'p2sh-segwit')
shwpkh_addr_info = w1.getaddressinfo(shwpkh_addr)
assert_equal(shwpkh_addr_info['desc'][:26], 'sh(wpkh([abcdef12/0\'/0\'/{}]'.format(i))
pkh_addr = w1.getnewaddress('', 'legacy')
pkh_addr_info = w1.getaddressinfo(pkh_addr)
assert_equal(pkh_addr_info['desc'][:22], 'pkh([12345678/0\'/0\'/{}]'.format(i))
assert_equal(w1.getwalletinfo()['keypoolsize'], 4 * 3) # After retrieving a key, we don't refill the keypool again, so it's one less for each address type
w1.keypoolrefill()
assert_equal(w1.getwalletinfo()['keypoolsize'], 5 * 3)
self.log.info("Check we can change next_index")
# go back and forth with next_index
for i in [4, 0, 2, 1, 3]:
self.test_importdesc({'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'active': True,
'range': [0, 9],
'next_index': i,
'timestamp': 'now'
},
success=True)
assert_equal(w1.getnewaddress('', 'bech32'), addresses[i])
# Check active=False default
self.log.info('Check imported descriptors are not active by default')
self.test_importdesc({'desc': descsum_create('pkh([12345678/1h]' + xpub + '/*)'),
'range' : [0, 2],
'timestamp': 'now',
'internal': True
},
success=True)
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'legacy')
self.log.info('Check can activate inactive descriptor')
self.test_importdesc({'desc': descsum_create('pkh([12345678]' + xpub + '/*)'),
'range': [0, 5],
'active': True,
'timestamp': 'now',
'internal': True
},
success=True)
address = w1.getrawchangeaddress('legacy')
assert_equal(address, "mpA2Wh9dvZT7yfELq1UnrUmAoc5qCkMetg")
self.log.info('Check can deactivate active descriptor')
self.test_importdesc({'desc': descsum_create('pkh([12345678]' + xpub + '/*)'),
'range': [0, 5],
'active': False,
'timestamp': 'now',
'internal': True
},
success=True)
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'legacy')
self.log.info('Verify activation state is persistent')
w1.unloadwallet()
self.nodes[1].loadwallet('w1')
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'legacy')
# # Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
address = "2MuhcG52uHPknxDgmGPsV18jSHFBnnRgjPg"
desc = "sh(wpkh(" + wif_priv + "))"
self.log.info("Should import a descriptor with a WIF private key as spendable")
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now"},
success=True,
wallet=wpriv)
self.log.info('Test can import same descriptor with private key twice')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now"}, success=True, wallet=wpriv)
test_address(wpriv,
address,
solvable=True,
ismine=True)
txid = w0.sendtoaddress(address, 49.99995540)
w0.generatetoaddress(6, w0.getnewaddress())
self.sync_blocks()
tx = wpriv.createrawtransaction([{"txid": txid, "vout": 0}], {w0.getnewaddress(): 49.999})
signed_tx = wpriv.signrawtransactionwithwallet(tx)
w1.sendrawtransaction(signed_tx['hex'])
# Make sure that we can use import and use multisig as addresses
self.log.info('Test that multisigs can be imported, signed for, and getnewaddress\'d')
self.nodes[1].createwallet(wallet_name="wmulti_priv", disable_private_keys=False, blank=True, descriptors=True)
wmulti_priv = self.nodes[1].get_wallet_rpc("wmulti_priv")
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 0)
xprv1 = 'tprv8ZgxMBicQKsPevADjDCWsa6DfhkVXicu8NQUzfibwX2MexVwW4tCec5mXdCW8kJwkzBRRmAay1KZya4WsehVvjTGVW6JLqiqd8DdZ4xSg52'
acc_xpub1 = 'tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8' # /84'/0'/0'
chg_xpub1 = 'tpubDCXqdwWZcszwqYJSnZp8eARkxGJfHAk23KDxbztV4BbschfaTfYLTcSkSJ3TN64dRqwa1rnFUScsYormKkGqNbbPwkorQimVevXjxzUV9Gf' # /84'/1'/0'
xprv2 = 'tprv8ZgxMBicQKsPdSNWUhDiwTScDr6JfkZuLshTRwzvZGnMSnGikV6jxpmdDkC3YRc4T3GD6Nvg9uv6hQg73RVv1EiTXDZwxVbsLugVHU8B1aq'
acc_xprv2 = 'tprv8gVCsmRAxVSxyUpsL13Y7ZEWBFPWbgS5E2MmFVNGuANrknvmmn2vWnmHvU8AwEFYzR2ji6EeZLSCLVacsYkvor3Pcb5JY5FGcevqTwYvdYx'
acc_xpub2 = 'tpubDDBF2BTR6s8drwrfDei8WxtckGuSm1cyoKxYY1QaKSBFbHBYQArWhHPA6eJrzZej6nfHGLSURYSLHr7GuYch8aY5n61tGqgn8b4cXrMuoPH'
chg_xpub2 = 'tpubDCYfZY2ceyHzYzMMVPt9MNeiqtQ2T7Uyp9QSFwYXh8Vi9iJFYXcuphJaGXfF3jUQJi5Y3GMNXvM11gaL4txzZgNGK22BFAwMXynnzv4z2Jh'
xprv3 = 'tprv8ZgxMBicQKsPeonDt8Ka2mrQmHa61hQ5FQCsvWBTpSNzBFgM58cV2EuXNAHF14VawVpznnme3SuTbA62sGriwWyKifJmXntfNeK7zeqMCj1'
acc_xpub3 = 'tpubDCsWoW1kuQB9kG5MXewHqkbjPtqPueRnXju7uM2NK7y3JYb2ajAZ9EiuZXNNuE4661RAfriBWhL8UsnAPpk8zrKKnZw1Ug7X4oHgMdZiU4E'
chg_xpub3 = 'tpubDC6UGqnsQStngYuGD4MKsMy7eD1Yg9NTJfPdvjdG2JE5oZ7EsSL3WHg4Gsw2pR5K39ZwJ46M1wZayhedVdQtMGaUhq5S23PH6fnENK3V1sb'
self.test_importdesc({"desc":"wsh(multi(2," + xprv1 + "/84h/0h/0h/*," + xprv2 + "/84h/0h/0h/*," + xprv3 + "/84h/0h/0h/*))#m2sr93jn",
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_priv)
self.test_importdesc({"desc":"wsh(multi(2," + xprv1 + "/84h/1h/0h/*," + xprv2 + "/84h/1h/0h/*," + xprv3 + "/84h/1h/0h/*))#q3sztvx5",
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_priv)
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 1001) # Range end (1000) is inclusive, so 1001 addresses generated
addr = wmulti_priv.getnewaddress('', 'bech32')
assert_equal(addr, 'bcrt1qdt0qy5p7dzhxzmegnn4ulzhard33s2809arjqgjndx87rv5vd0fq2czhy8') # Derived at m/84'/0'/0'/0
change_addr = wmulti_priv.getrawchangeaddress('bech32')
assert_equal(change_addr, 'bcrt1qt9uhe3a9hnq7vajl7a094z4s3crm9ttf8zw3f5v9gr2nyd7e3lnsy44n8e')
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 1000)
txid = w0.sendtoaddress(addr, 10)
self.nodes[0].generate(6)
self.sync_all()
send_txid = wmulti_priv.sendtoaddress(w0.getnewaddress(), 8)
decoded = wmulti_priv.decoderawtransaction(wmulti_priv.gettransaction(send_txid)['hex'])
assert_equal(len(decoded['vin'][0]['txinwitness']), 4)
self.nodes[0].generate(6)
self.sync_all()
self.nodes[1].createwallet(wallet_name="wmulti_pub", disable_private_keys=True, blank=True, descriptors=True)
wmulti_pub = self.nodes[1].get_wallet_rpc("wmulti_pub")
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 0)
self.test_importdesc({"desc":"wsh(multi(2,[7b2d0242/84h/0h/0h]" + acc_xpub1 + "/*,[59b09cd6/84h/0h/0h]" + acc_xpub2 + "/*,[e81a0532/84h/0h/0h]" + acc_xpub3 +"/*))#tsry0s5e",
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_pub)
self.test_importdesc({"desc":"wsh(multi(2,[7b2d0242/84h/1h/0h]" + chg_xpub1 + "/*,[59b09cd6/84h/1h/0h]" + chg_xpub2 + "/*,[e81a0532/84h/1h/0h]" + chg_xpub3 + "/*))#c08a2rzv",
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_pub)
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 1000) # The first one was already consumed by previous import and is detected as used
addr = wmulti_pub.getnewaddress('', 'bech32')
assert_equal(addr, 'bcrt1qp8s25ckjl7gr6x2q3dx3tn2pytwp05upkjztk6ey857tt50r5aeqn6mvr9') # Derived at m/84'/0'/0'/1
change_addr = wmulti_pub.getrawchangeaddress('bech32')
assert_equal(change_addr, 'bcrt1qt9uhe3a9hnq7vajl7a094z4s3crm9ttf8zw3f5v9gr2nyd7e3lnsy44n8e')
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 999)
# generate some utxos for next tests
txid = w0.sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
addr2 = wmulti_pub.getnewaddress('', 'bech32')
txid2 = w0.sendtoaddress(addr2, 10)
vout2 = find_vout_for_address(self.nodes[0], txid2, addr2)
self.nodes[0].generate(6)
self.sync_all()
assert_equal(wmulti_pub.getbalance(), wmulti_priv.getbalance())
# Make sure that descriptor wallets containing multiple xpubs in a single descriptor load correctly
wmulti_pub.unloadwallet()
self.nodes[1].loadwallet('wmulti_pub')
self.log.info("Multisig with distributed keys")
self.nodes[1].createwallet(wallet_name="wmulti_priv1", descriptors=True)
wmulti_priv1 = self.nodes[1].get_wallet_rpc("wmulti_priv1")
res = wmulti_priv1.importdescriptors([
{
"desc": descsum_create("wsh(multi(2," + xprv1 + "/84h/0h/0h/*,[59b09cd6/84h/0h/0h]" + acc_xpub2 + "/*,[e81a0532/84h/0h/0h]" + acc_xpub3 + "/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create("wsh(multi(2," + xprv1 + "/84h/1h/0h/*,[59b09cd6/84h/1h/0h]" + chg_xpub2 + "/*,[e81a0532/84h/1h/0h]" + chg_xpub3 + "/*))"),
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[0]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
assert_equal(res[1]['success'], True)
assert_equal(res[1]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
self.nodes[1].createwallet(wallet_name='wmulti_priv2', blank=True, descriptors=True)
wmulti_priv2 = self.nodes[1].get_wallet_rpc('wmulti_priv2')
res = wmulti_priv2.importdescriptors([
{
"desc": descsum_create("wsh(multi(2,[7b2d0242/84h/0h/0h]" + acc_xpub1 + "/*," + xprv2 + "/84h/0h/0h/*,[e81a0532/84h/0h/0h]" + acc_xpub3 + "/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create("wsh(multi(2,[7b2d0242/84h/1h/0h]" + chg_xpub1 + "/*," + xprv2 + "/84h/1h/0h/*,[e81a0532/84h/1h/0h]" + chg_xpub3 + "/*))"),
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[0]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
assert_equal(res[1]['success'], True)
assert_equal(res[1]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
rawtx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {w0.getnewaddress(): 9.999})
tx_signed_1 = wmulti_priv1.signrawtransactionwithwallet(rawtx)
assert_equal(tx_signed_1['complete'], False)
tx_signed_2 = wmulti_priv2.signrawtransactionwithwallet(tx_signed_1['hex'])
assert_equal(tx_signed_2['complete'], True)
self.nodes[1].sendrawtransaction(tx_signed_2['hex'])
self.log.info("We can create and use a huge multisig under P2WSH")
self.nodes[1].createwallet(wallet_name='wmulti_priv_big', blank=True, descriptors=True)
wmulti_priv_big = self.nodes[1].get_wallet_rpc('wmulti_priv_big')
xkey = "tprv8ZgxMBicQKsPeZSeYx7VXDDTs3XrTcmZQpRLbAeSQFCQGgKwR4gKpcxHaKdoTNHniv4EPDJNdzA3KxRrrBHcAgth8fU5X4oCndkkxk39iAt/*"
xkey_int = "tprv8ZgxMBicQKsPeZSeYx7VXDDTs3XrTcmZQpRLbAeSQFCQGgKwR4gKpcxHaKdoTNHniv4EPDJNdzA3KxRrrBHcAgth8fU5X4oCndkkxk39iAt/1/*"
res = wmulti_priv_big.importdescriptors([
{
"desc": descsum_create(f"wsh(sortedmulti(20,{(xkey + ',') * 19}{xkey}))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create(f"wsh(sortedmulti(20,{(xkey_int + ',') * 19}{xkey_int}))"),
"active": True,
"internal": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[1]['success'], True)
addr = wmulti_priv_big.getnewaddress()
w0.sendtoaddress(addr, 10)
self.nodes[0].generate(1)
self.sync_all()
# It is standard and would relay.
txid = wmulti_priv_big.sendtoaddress(w0.getnewaddress(), 9.999)
decoded = wmulti_priv_big.decoderawtransaction(wmulti_priv_big.gettransaction(txid)['hex'])
# 20 sigs + dummy + witness script
assert_equal(len(decoded['vin'][0]['txinwitness']), 22)
self.log.info("Under P2SH, multisig are standard with up to 15 "
"compressed keys")
self.nodes[1].createwallet(wallet_name='multi_priv_big_legacy',
blank=True, descriptors=True)
multi_priv_big = self.nodes[1].get_wallet_rpc('multi_priv_big_legacy')
res = multi_priv_big.importdescriptors([
{
"desc": descsum_create(f"sh(multi(15,{(xkey + ',') * 14}{xkey}))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create(f"sh(multi(15,{(xkey_int + ',') * 14}{xkey_int}))"),
"active": True,
"internal": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[1]['success'], True)
addr = multi_priv_big.getnewaddress("", "legacy")
w0.sendtoaddress(addr, 10)
self.nodes[0].generate(6)
self.sync_all()
# It is standard and would relay.
txid = multi_priv_big.sendtoaddress(w0.getnewaddress(), 10, "", "",
True)
decoded = multi_priv_big.decoderawtransaction(
multi_priv_big.gettransaction(txid)['hex']
)
self.log.info("Amending multisig with new private keys")
self.nodes[1].createwallet(wallet_name="wmulti_priv3", descriptors=True)
wmulti_priv3 = self.nodes[1].get_wallet_rpc("wmulti_priv3")
res = wmulti_priv3.importdescriptors([
{
"desc": descsum_create("wsh(multi(2," + xprv1 + "/84h/0h/0h/*,[59b09cd6/84h/0h/0h]" + acc_xpub2 + "/*,[e81a0532/84h/0h/0h]" + acc_xpub3 + "/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
res = wmulti_priv3.importdescriptors([
{
"desc": descsum_create("wsh(multi(2," + xprv1 + "/84h/0h/0h/*,[59b09cd6/84h/0h/0h]" + acc_xprv2 + "/*,[e81a0532/84h/0h/0h]" + acc_xpub3 + "/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
rawtx = self.nodes[1].createrawtransaction([{'txid': txid2, 'vout': vout2}], {w0.getnewaddress(): 9.999})
tx = wmulti_priv3.signrawtransactionwithwallet(rawtx)
assert_equal(tx['complete'], True)
self.nodes[1].sendrawtransaction(tx['hex'])
self.log.info("Combo descriptors cannot be active")
self.test_importdesc({"desc": descsum_create("combo(tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*)"),
"active": True,
"range": 1,
"timestamp": "now"},
success=False,
error_code=-4,
error_message="Combo descriptors cannot be set to active")
self.log.info("Descriptors with no type cannot be active")
self.test_importdesc({"desc": descsum_create("pk(tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*)"),
"active": True,
"range": 1,
"timestamp": "now"},
success=True,
warnings=["Unknown output type, cannot set descriptor to active."])
if __name__ == '__main__':
ImportDescriptorsTest().main()
|
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import inspect
import math
import os
import re
import shutil
import time
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_distributed_available,
is_torch_tpu_available,
)
from .modeling_utils import PreTrainedModel
from .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
TrainOutput,
default_compute_objective,
default_hp_space,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
def _model_unwrap(model: nn.Module) -> nn.Module:
# since there could be multiple levels of wrapping, unwrap recursively
if hasattr(model, "module"):
return _model_unwrap(model.module)
else:
return model
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
"""
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# Model parallel
if not self.is_model_parallel:
model = model.to(args.device)
else:
# Force n_gpu to 1 to avoid DataParallel.
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel):
# Set an xla_device flag on the model's config.
# We'll find a more elegant and not need to do this in the future.
self.model.config.xla_device = True
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Setup Sharded DDP training
self.sharded_dpp = False
if args.sharded_ddp:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
else:
self.sharded_dpp = True
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_dpp else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
signature_columns += ["label", "label_ids"]
columns = [k for k in signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(signature_columns))
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description}don"t have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {", ".join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns)
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Gather the number of processes and this process index.
if self.args.parallel_mode == ParallelMode.TPU:
num_processes = xm.xrt_world_size()
process_index = xm.get_ordinal()
elif (
self.args.parallel_mode == ParallelMode.DISTRIBUTED
or self.args.parallel_mode == ParallelMode.SAGEMAKER_DISTRIBUTED
):
num_processes = dist.get_world_size()
process_index = dist.get_rank()
else:
num_processes = 1
process_index = 0
# Build the sampler.
if self.args.group_by_length:
if num_processes <= 1:
return LengthGroupedSampler(self.train_dataset, self.args.train_batch_size)
else:
return DistributedLengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, num_replicas=num_processes, rank=process_index
)
else:
if num_processes <= 1:
return RandomSampler(self.train_dataset)
else:
return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_dpp:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.state.global_step % self.args.save_steps == 0:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
self.args.output_dir = checkpoint_dir
output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def train(
self,
resume_from_checkpoint: Optional[str] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str`, `optional`):
Local path to a saved checkpoint as saved by a previous instance of :class:`~transformers.Trainer`. If
present, training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {", ".join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(resume_from_checkpoint)
model_reloaded = True
else:
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if not self.is_model_parallel:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
else:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
model = self.model_wrapped
# Mixed precision training with apex (torch < 1.6)
if self.use_apex:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.sharded_dpp:
model = ShardedDDP(model, self.optimizer)
elif is_sagemaker_distributed_available():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), DDP(Deepspeed(Transformers Model)), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += self.floating_point_ops(inputs)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
self.deepspeed.step()
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if not self.is_model_parallel:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
self.lr_scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else self.lr_scheduler.get_lr()[0]
)
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save.
assert _model_unwrap(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)
else:
output_dir = os.path.join(self.args.output_dir, checkpoint_folder)
self.store_flos()
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_dpp:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or dist.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the world_master process (unless in TPUs).
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_process_zero():
self._save(output_dir)
# If on sagemaker and we are saving the main model (not a checkpoint so output_dir=None), save a copy to
# SM_MODEL_DIR for easy deployment.
if output_dir is None and os.getenv("SM_MODEL_DIR") is not None:
self.save_model(output_dir=os.getenv("SM_MODEL_DIR"))
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = 1
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
world_size = max(1, world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
with torch.no_grad():
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
| # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import inspect
import math
import os
import re
import shutil
import time
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_distributed_available,
is_torch_tpu_available,
)
from .modeling_utils import PreTrainedModel
from .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
TrainOutput,
default_compute_objective,
default_hp_space,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
def _model_unwrap(model: nn.Module) -> nn.Module:
# since there could be multiple levels of wrapping, unwrap recursively
if hasattr(model, "module"):
return _model_unwrap(model.module)
else:
return model
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
"""
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# Model parallel
if not self.is_model_parallel:
model = model.to(args.device)
else:
# Force n_gpu to 1 to avoid DataParallel.
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel):
# Set an xla_device flag on the model's config.
# We'll find a more elegant and not need to do this in the future.
self.model.config.xla_device = True
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Setup Sharded DDP training
self.sharded_dpp = False
if args.sharded_ddp:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
else:
self.sharded_dpp = True
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_dpp else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
signature_columns += ["label", "label_ids"]
columns = [k for k in signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(signature_columns))
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns)
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Gather the number of processes and this process index.
if self.args.parallel_mode == ParallelMode.TPU:
num_processes = xm.xrt_world_size()
process_index = xm.get_ordinal()
elif (
self.args.parallel_mode == ParallelMode.DISTRIBUTED
or self.args.parallel_mode == ParallelMode.SAGEMAKER_DISTRIBUTED
):
num_processes = dist.get_world_size()
process_index = dist.get_rank()
else:
num_processes = 1
process_index = 0
# Build the sampler.
if self.args.group_by_length:
if num_processes <= 1:
return LengthGroupedSampler(self.train_dataset, self.args.train_batch_size)
else:
return DistributedLengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, num_replicas=num_processes, rank=process_index
)
else:
if num_processes <= 1:
return RandomSampler(self.train_dataset)
else:
return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_dpp:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.state.global_step % self.args.save_steps == 0:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
self.args.output_dir = checkpoint_dir
output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def train(
self,
resume_from_checkpoint: Optional[str] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str`, `optional`):
Local path to a saved checkpoint as saved by a previous instance of :class:`~transformers.Trainer`. If
present, training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(resume_from_checkpoint)
model_reloaded = True
else:
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if not self.is_model_parallel:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
else:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
model = self.model_wrapped
# Mixed precision training with apex (torch < 1.6)
if self.use_apex:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.sharded_dpp:
model = ShardedDDP(model, self.optimizer)
elif is_sagemaker_distributed_available():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), DDP(Deepspeed(Transformers Model)), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += self.floating_point_ops(inputs)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
self.deepspeed.step()
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if not self.is_model_parallel:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
self.lr_scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else self.lr_scheduler.get_lr()[0]
)
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save.
assert _model_unwrap(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)
else:
output_dir = os.path.join(self.args.output_dir, checkpoint_folder)
self.store_flos()
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_dpp:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or dist.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the world_master process (unless in TPUs).
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_process_zero():
self._save(output_dir)
# If on sagemaker and we are saving the main model (not a checkpoint so output_dir=None), save a copy to
# SM_MODEL_DIR for easy deployment.
if output_dir is None and os.getenv("SM_MODEL_DIR") is not None:
self.save_model(output_dir=os.getenv("SM_MODEL_DIR"))
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = 1
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
world_size = max(1, world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
with torch.no_grad():
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
|
"""variants_process main command."""
import timeit
import re
from datetime import datetime
from os.path import join
import pandas as pd
import numpy as np
import pybedtools as pyb
START = timeit.default_timer()
## IMPORT VARIANTS FILE
def import_variants(path):
"""
Determine filetype and import, returns pandas dataFrame
"""
if re.search('.csv$', path):
try:
variants = pd.read_csv(
filepath_or_buffer=path,
comment='#',
low_memory=False)
except NameError:
raise Exception(f'Error when importing file {path}')
print(f'Loaded file containing {variants.shape[0]} '
f'variant calls. Processing...')
return(variants)
elif re.search('.tsv.gz$', path):
try:
variants = pd.read_csv(
filepath_or_buffer=path,
compression='gzip',
sep='\t',
comment='#',
low_memory=False)
except NameError:
raise Exception(f'Error when importing file {path}')
print(f'Loaded file containing {variants.shape[0]} '
f'variant calls. Processing...')
return(variants)
else:
raise Exception(f'Input file {path} has unsupported '
f'extension: try .csv or .tsv.gz')
## ANNOTATION FUNCTIONS
def annotate_cosmic(variants):
"""
Generate columns:
HEME_EXACT: Number of exact matches for hematopoietic and
lymphoid tissue in cosmic.
ANY_EXACT_POS: YES/NO for any EXACT or POS match in cosmic.
"""
heme_exact = []
cosmic = variants['COSMIC'].tolist()
search_1 = 'HAEMATOPOIETIC_AND_LYMPHOID_TISSUE'
search_2 = r'(?<=HAEMATOPOIETIC_AND_LYMPHOID_TISSUE=)\w+'
for entry in cosmic:
if pd.isnull(entry):
heme_exact.append(None)
else:
first = entry.split('|')[0]
if re.search('^GENOMIC_EXACT', first):
if re.search(search_1, first):
count = re.search(search_2, first)[0]
heme_exact.append(count)
else:
heme_exact.append(None)
else:
heme_exact.append(None)
variants['HEME_EXACT'] = heme_exact
any_exact_pos = []
for entry in cosmic:
if pd.isnull(entry):
any_exact_pos.append(0)
elif re.search(
'GENOMIC_EXACT', entry) or re.search(
'GENOMIC_POS', entry):
any_exact_pos.append(1)
else:
any_exact_pos.append(0)
variants['ANY_EXACT_POS'] = any_exact_pos
return(variants)
def annotate_genefreq(variants, genes):
"""
Generate column:
MAX_MUTFREQ: Maximal mutation frequency in gene
as previously published in large MM studies.
"""
freqlist = pd.read_excel(io=genes)
freqlist['MAX_MUTFREQ'] = round(
freqlist.filter(regex='freq').max(axis=1), 1)
freqlist = freqlist[['GENE', 'MAX_MUTFREQ']]
variants = pd.merge(variants, freqlist, how='left')
return(variants)
def annotate_maf(variants):
"""
Generate column:
MAX_MAF: Maximal MAF of variant in any normal database
"""
variants['MAX_MAF'] = 0 # Sets variable to 0 if frequency is not reported
variants['MAX_MAF'] = variants.filter(regex='MAF').max(axis=1)
return(variants)
def annotate_normals(variants, path_normals):
"""
Annotates variants with internal normal controls:
Class: Close (chr, start within 10 bp),
Pos (chr, start),
Exact (chr, start, ref, alt)
Frequency: Number of matches
VAF: Median VAF
Q25: 25th VAF-quartile
Q75: 75th VAF-quartile
Positions: START position
Change: REF > ALT
"""
normals = pd.read_csv(
filepath_or_buffer=path_normals)
normals = normals.set_index(['CHR','START'])
def annot_row(row, data):
thres = 10
chrom = str(row['CHR'])
start = row['START']
po = (chrom, start) in data.index
close = data.ix[(chrom, start-thres):(chrom, start+thres)]
if po:
pos = data.loc[(chrom, start)]
exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]
if len(exact) > 0:
ex_out = ['genomic_exact',
exact['count'].iloc[0],
exact['MEDIAN_VAF'].iloc[0],
exact['VAF_Q25'].iloc[0],
exact['VAF_Q75'].iloc[0],
start,
exact['REF'].iloc[0] + '>' + exact['ALT'].iloc[0]
]
return pd.Series(ex_out)
else:
pos_out = ['genomic_pos',
', '.join(pos['count'].astype(str)),
', '.join(pos['MEDIAN_VAF'].astype(str)),
', '.join(pos['VAF_Q25'].astype(str)),
', '.join(pos['VAF_Q75'].astype(str)),
', '.join([str(i) for i in pos.index.\
get_level_values('START').tolist()]),
', '.join([str(a) + '>' + str(b) for a, b in \
zip(pos['REF'], pos['ALT'])])
]
return pd.Series(pos_out)
elif close.shape[0] > 0:
cl_out = ['genomic_close',
', '.join(close['count'].astype(str).tolist()),
', '.join(close['MEDIAN_VAF'].astype(str).tolist()),
', '.join(close['VAF_Q25'].astype(str).tolist()),
', '.join(close['VAF_Q75'].astype(str).tolist()),
', '.join([str(i) for i in close.index.\
get_level_values('START').tolist()]),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(close['REF'].tolist(), close['ALT'].tolist())]))
]
return pd.Series(cl_out)
else:
return pd.Series([None]*7)
out_names = ["_Class", "_Frequency", "_VAF", "_Q25", "_Q75",
"_Position", "_Change"]
out_names = ['Normals' + s for s in out_names]
variants[out_names] = variants.apply(lambda row: annot_row(row, normals),
axis=1)
return(variants)
def annotate_mmrf(variants, path_mmrf):
"""
Annotates variants with MMRF data:
Class: Close (chr, start within 10 bp),
Pos (chr, start),
Exact (chr, start, ref, alt)
Frequency: Number of matches
VAF: Median VAF
Q25: 25th VAF-quartile
Q75: 75th VAF-quartile
Positions: START position
Change: REF > ALT
"""
mmrf = pd.read_csv(filepath_or_buffer=path_mmrf, sep='\t')
mmrf = mmrf[["#CHROM", "POS", "REF", "ALT", "GEN[1].AR"]]
mmrf = mmrf.drop_duplicates() ## What are these duplicates?
mmrf.columns = ["CHR", "START", "REF", "ALT", "TARGET_VAF"]
def annot_row(row, data):
thres = 10
subdat = data[data['CHR'].astype(str) == str(row['CHR'])]
po = row['START'] in subdat['START'].as_matrix().astype(int)
close = (abs(subdat['START'].as_matrix() \
.astype(int) - row['START']) < thres)
if po:
pos = subdat[subdat['START'] == row['START']]
exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]
if len(exact) > 0:
ex_out = ['genomic_exact',
exact['REF'].count(),
exact['TARGET_VAF'].median(),
exact['TARGET_VAF'].quantile(q=0.25),
exact['TARGET_VAF'].quantile(q=0.75),
', '.join(set(exact['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(exact['REF'].tolist(), exact['ALT'].tolist())]))
]
return pd.Series(ex_out)
else:
pos_out = ['genomic_pos',
pos['REF'].count(),
pos['TARGET_VAF'].median(),
pos['TARGET_VAF'].quantile(q=0.25),
pos['TARGET_VAF'].quantile(q=0.75),
', '.join(set(pos['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(pos['REF'].tolist(), pos['ALT'].tolist())]))
]
return pd.Series(pos_out)
elif close.any():
close = subdat[close]
cl_out = ['genomic_close',
', '.join(close.groupby(['ALT', 'REF']).size() \
.astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.median().astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.quantile(q=0.25).astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.quantile(q=0.75).astype(str).tolist()),
', '.join(set(close['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(close['REF'].tolist(), close['ALT'].tolist())]))
]
return pd.Series(cl_out)
else:
return pd.Series([None]*7)
out_names = ["_Class", "_Frequency", "_VAF", "_Q25", "_Q75",
"_Position", "_Change"]
out_names = ['MMRF' + s for s in out_names]
variants[out_names] = variants.apply(lambda row: annot_row(row, mmrf),
axis=1)
return(variants)
def annotate_bolli(variants, path_bolli):
"""
Annotates variants with Bolli data:
Class: Close (chr, start within 10 bp),
Pos (chr, start),
Exact (chr, start, ref, alt)
Frequency: Number of matches
Positions: START position
Change: REF > ALT
Annotation: Manual annotation category.
"""
bolli = pd.read_csv(filepath_or_buffer=path_bolli, sep='\t')
bolli = bolli[["CHR", "START", "WT", "MT", "Variant_class"]]
bolli.columns = ["CHR", "START", "REF", "ALT", "ANNOTATION"]
def annot_row(row, data):
thres = 10
subdat = data[data['CHR'].astype(str) == str(row['CHR'])]
po = row['START'] in subdat['START'].as_matrix().astype(int)
close = (abs(subdat['START'].as_matrix() \
.astype(int) - row['START']) < thres)
if po:
pos = subdat[subdat['START'] == row['START']]
exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]
if len(exact) > 0:
ex_out = ['genomic_exact',
exact['REF'].count(),
', '.join(set(exact['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(exact['REF'].tolist(), exact['ALT'].tolist())])),
', '.join(set(exact['ANNOTATION']))
]
return pd.Series(ex_out)
else:
pos_out = ['genomic_pos',
pos['REF'].count(),
', '.join(set(pos['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(pos['REF'].tolist(), pos['ALT'].tolist())])),
', '.join(set(pos['ANNOTATION']))
]
return pd.Series(pos_out)
elif close.any():
close = subdat[close]
cl_out = ['genomic_close',
', '.join(close.groupby(['ALT', 'REF']).size() \
.astype(str).tolist()),
', '.join(set(close['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(close['REF'].tolist(), close['ALT'].tolist())])),
', '.join(set(close['ANNOTATION']))
]
return pd.Series(cl_out)
else:
return pd.Series([None]*5)
out_names = ["_Class", "_Frequency",
"_Position", "_Change", "_Annotation"]
out_names = ['Bolli' + s for s in out_names]
variants[out_names] = variants.apply(lambda row: annot_row(row, bolli),
axis=1)
return(variants)
def annotate_lohr(variants, lohr_path):
"""
Annotates variants with lohr data:
Class: Close (chr, start within 10 bp),
Pos (chr, start),
Exact (chr, start, ref, alt)
Frequency: Number of matches
Positions: START position
Change: REF > ALT
"""
lohr = pd.read_csv(filepath_or_buffer=lohr_path, sep='\t')
lohr = lohr[["Chromosome", "Start_Position", "Reference_Allele",
"Tumor_Seq_Allele2"]]
lohr.columns = ["CHR", "START", "REF", "ALT"]
def annot_row(row, data):
thres = 10
subdat = data[data['CHR'].astype(str) == str(row['CHR'])]
po = row['START'] in subdat['START'].as_matrix().astype(int)
close = (abs(subdat['START'].as_matrix() \
.astype(int) - row['START']) < thres)
if po:
pos = subdat[subdat['START'] == row['START']]
exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]
if len(exact) > 0:
ex_out = ['genomic_exact',
exact['REF'].count(),
', '.join(set(exact['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(exact['REF'].tolist(), exact['ALT'].tolist())]))
]
return pd.Series(ex_out)
else:
pos_out = ['genomic_pos',
pos['REF'].count(),
', '.join(set(pos['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(pos['REF'].tolist(), pos['ALT'].tolist())]))
]
return pd.Series(pos_out)
elif close.any():
close = subdat[close]
cl_out = ['genomic_close',
', '.join(close.groupby(['ALT', 'REF']).size() \
.astype(str).tolist()),
', '.join(set(close['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(close['REF'].tolist(), close['ALT'].tolist())]))
]
return pd.Series(cl_out)
else:
return pd.Series([None]*4)
out_names = ["_Class", "_Frequency",
"_Position", "_Change"]
out_names = ['Lohr' + s for s in out_names]
variants[out_names] = variants.apply(lambda row: annot_row(row, lohr),
axis=1)
return(variants)
def annotate_mytype(variants, path_mytype):
"""
Annotates variants with previous myTYPE data:
Class: Close (chr, start within 10 bp),
Pos (chr, start),
Exact (chr, start, ref, alt)
Frequency: Number of matches
VAF: Median VAF
Q25: 25th VAF-quartile
Q75: 75th VAF-quartile
Positions: START position
Change: REF > ALT
Annotation: Manual annotation category.
"""
mytype = pd.read_csv(filepath_or_buffer=path_mytype, sep=',')
mytype = mytype[["CHR", "START", "REF", "ALT",
"MANUAL_ANNOTATION", "TARGET_VAF"]]
mytype.columns = ["CHR", "START", "REF", "ALT",
"ANNOTATION", "TARGET_VAF"]
def annot_row(row, data):
thres = 10
subdat = data[data['CHR'].astype(str) == str(row['CHR'])]
po = row['START'] in subdat['START'].as_matrix().astype(int)
close = (abs(subdat['START'].as_matrix() \
.astype(int) - row['START']) < thres)
if po:
pos = subdat[subdat['START'] == row['START']]
exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]
if len(exact) > 0:
ex_out = ['genomic_exact',
exact['REF'].count(),
exact['TARGET_VAF'].median(),
exact['TARGET_VAF'].quantile(q=0.25),
exact['TARGET_VAF'].quantile(q=0.75),
', '.join(set(exact['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(exact['REF'].tolist(), exact['ALT'].tolist())])),
', '.join(set(exact['ANNOTATION']))
]
return pd.Series(ex_out)
else:
pos_out = ['genomic_pos',
pos['REF'].count(),
pos['TARGET_VAF'].median(),
pos['TARGET_VAF'].quantile(q=0.25),
pos['TARGET_VAF'].quantile(q=0.75),
', '.join(set(pos['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(pos['REF'].tolist(), pos['ALT'].tolist())])),
', '.join(set(pos['ANNOTATION']))
]
return pd.Series(pos_out)
elif close.any():
close = subdat[close]
cl_out = ['genomic_close',
', '.join(close.groupby(['ALT', 'REF']).size() \
.astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.median().astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.quantile(q=0.25).astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.quantile(q=0.75).astype(str).tolist()),
', '.join(set(close['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(close['REF'].tolist(), close['ALT'].tolist())])),
', '.join(set(close['ANNOTATION']))
]
return pd.Series(cl_out)
else:
return pd.Series([None]*8)
out_names = ["_Class", "_Frequency", "_VAF", "_Q25", "_Q75",
"_Position", "_Change", "_Annotation"]
out_names = ['myTYPE' + s for s in out_names]
variants[out_names] = variants.apply(lambda row: annot_row(row, mytype),
axis=1)
return(variants)
def annotate_known(variants, mytype):
"""
Generate columns:
KNOWN_MM = 1 if previously found in MM. Includes any match in MMRF,
Bolli and Lohr, and UNKNOWN/LIKELY/ONCOGENIC by mytype
"""
# Only run function if data is passed to the optional variable "mytype"
if mytype:
mytype_annot = variants['myTYPE_Annotation'].tolist()
myTYPE_somatic = []
for entry in mytype_annot:
if pd.isnull(entry):
myTYPE_somatic.append(0)
else:
search_1 = re.search('ONCOGENIC', entry)
search_2 = re.search('LIKELY', entry)
search_3 = re.search('UNKNOWN', entry)
if search_1 or search_2 or search_3:
myTYPE_somatic.append(1)
else:
myTYPE_somatic.append(0)
variants['myTYPE_somatic'] = myTYPE_somatic
else:
variants['myTYPE_somatic'] = 0
# Define column KNOWN_MM based on annotation data
variants['KNOWN_MM'] = np.where((variants['myTYPE_somatic'] == 1) |
(variants['MMRF_Class'].notnull()) |
(variants['Bolli_Class'].notnull()) |
(variants['Lohr_Class'].notnull()), 1, 0)
variants = variants.drop('myTYPE_somatic', axis=1)
return(variants)
## APPLY FLAGS FOR FILTERING
def filter_panel(variants, genes_bed):
"""
Filter MFLAG_PANEL: 1 if variant is not in BED file of regions to keep
"""
variants_bed = variants[["CHR", "START", "END", "ID_VARIANT"]]
# Turning variants file into bed format
variants_bed = pyb.BedTool.from_dataframe(variants_bed)
# Import list of genes in panel as bed format
genes = pyb.BedTool(genes_bed)
# Bed file with intersection of panel and input file
variants_inter = variants_bed.intersect(genes, u=True)
# Empty list for names of variants in intersection bed file
flaglist = []
# If bed file is not empty
if not variants_inter.head(n=1, as_string=True) == '':
# Convert intersect bed file to data frame; subset col with variant ID
flaglist = pyb.BedTool.to_dataframe(variants_inter)['name']
# Flag variant if ID is not in overlap list
variants['MFLAG_PANEL'] = np.where(variants.ID_VARIANT.isin(flaglist), 0, 1)
return(variants)
def filter_drop(variants, genes_drop):
"""
Filter MFLAG_DROP: 1 if variant is in list of genes to drop.
"""
drop = pd.read_excel(io=genes_drop)['GENE']
variants['MFLAG_DROP'] = np.where(variants.GENE.isin(drop), 1, 0)
return(variants)
def filter_igh(variants, igh_path):
"""
Filter MFLAG_IGH: 1 if variant in IGH locus
"""
variants_bed = variants[["CHR", "START", "END", "ID_VARIANT"]]
variants_bed = pyb.BedTool.from_dataframe(variants_bed)
igh = pyb.BedTool(igh_path)
variants_inter = variants_bed.intersect(igh, u=True)
flaglist = []
if not variants_inter.head(n=1, as_string=True) == '':
flaglist = pyb.BedTool.to_dataframe(variants_inter)['name']
variants['MFLAG_IGH'] = np.where(variants.ID_VARIANT.isin(flaglist), 1, 0)
return(variants)
def filter_maf(variants):
"""
Filter MFLAG_MAF: 1 if variant MAF > 3 % in exac/1000genomes
"""
variants['MFLAG_MAF'] = np.where(variants['MAX_MAF'] > 0.03, 1, 0)
return(variants)
def filter_maf_cosmic(variants, mode):
"""
Filter MFLAG_MAFCOS: 1 if variant has >0.1 % MAF and not in COSMIC
For SNVs: Only counts exact and pos as in cosmic
For Indels: Counts all COSMIC.
"""
if mode == 'snv':
variants['MFLAG_MAFCOS'] = np.where(
(variants['MAX_MAF'] > 0.001) &
(variants['ANY_EXACT_POS'] == 0), 1, 0)
if mode == 'indel':
variants['MFLAG_MAFCOS'] = np.where(
(variants['MAX_MAF'] > 0.001) &
(variants['COSMIC'].isnull()), 1, 0)
return(variants)
def filter_nonpass(variants, mode):
"""
Filter MFLAG_MAF: 1 if NON-PASS AND not in cosmic or previously known in MM
Counts SNVs and Indels as "in cosmic" like for MAFCOS flag.
For SNV: Only removes missense mutations with this flag
"""
if mode == 'snv':
drop = ['non_synonymous_codon']
variants['MFLAG_NONPASS'] = np.where(
(variants['FILTER'] != "PASS") &
(variants['EFFECT'].isin(drop)) &
(variants['ANY_EXACT_POS'] == 0) &
(variants['KNOWN_MM'] == 0), 1, 0)
return(variants)
variants['MFLAG_NONPASS'] = np.where(
(variants['FILTER'] != "PASS") &
(variants['COSMIC'].isnull()) &
(variants['KNOWN_MM'] == 0), 1, 0)
return(variants)
def filter_normals(variants):
"""
Filter MFLAG_NORM: 1 if variant has genomic exact or pos in normals
"""
match = ['genomic_exact', 'genomic_pos']
variants['MFLAG_NORM'] = np.where(variants['Normals_Class'] \
.isin(match), 1, 0)
return(variants)
def filter_vaf(variants):
"""
Filter MFLAG_VAF: 1 if target VAF < 1 %
"""
variants['MFLAG_VAF'] = np.where(
(variants['TARGET_VAF'] < 0.01) & (variants['FILTER'] != 'PASS'), 1, 0)
return(variants)
def filter_bidir(variants):
"""
Filter MFLAG_BIDIR: 1 if BIDIR = 0
"""
variants['MFLAG_BIDIR'] = np.where(variants['BIDIR'] == 0, 1, 0)
return(variants)
## FILTER AND EXPORT
def namecore(infile):
"""
Returns the "core" of the input file name, for use in output files.
"""
name = infile.split('/')[-1]
if re.search('.csv$', name):
return(re.sub('.csv$', '', name))
return(re.sub('.tsv.gz$', '', name))
def filter_export(variants, outdir, name, mode):
"""
Function properties:
1. Filters variants into "good" or "bad" based on flags.
2. Writes files with good and bad variants.
3. Creates processing summary report.
"""
# Filtering
good = variants[variants.filter(regex='MFLAG').sum(axis=1) == 0]
bad = variants[variants.filter(regex='MFLAG').sum(axis=1) > 0]
# Define output names
date = str(datetime.today()).split()[0].split("-")
name = '_'.join([name, '_'.join(date)])
goodname = join(outdir, name + '_goodcalls.csv')
badname = join(outdir, name + '_badcalls.csv')
textname = join(outdir, name + '_report.txt')
# Export files
good.to_csv(
path_or_buf=goodname,
index=False)
bad.to_csv(
path_or_buf=badname,
index=False)
# Summary report
stop = timeit.default_timer()
with open(textname, 'w') as f:
# Call the "Version" file for version info?
f.write(
f'Somatic variant processing for myTYPE\nv.1.0\n '
f'Completed time: {str(datetime.today()).split('.')[0]}\n')
f.write(f'Run time: {round(stop-START, 3)}\n')
f.write(f'####\nMode: {mode}\n')
f.write(f'Imported calls: {variants.shape[0]}\n')
f.write('Flagging variants for filtering:\n')
f.write(f'MFLAG_PANEL: Variant not in BED file of '
f'regions to keep: {variants['MFLAG_PANEL'].sum()}\n')
f.write(f'MFLAG_DROP: Variant in excluded gene: '
f'{variants['MFLAG_DROP'].sum()}\n')
f.write(f'MFLAG_IGH: In IGH locus: {variants['MFLAG_IGH'].sum()}\n')
f.write(f'MFLAG_MAF: MAF > 3 % in exac/1000genomes: '
f'{variants['MFLAG_MAF'].sum()}\n')
f.write(f'MFLAG_MAFCOS: MAF > 0.1 % and not in COSMIC '
f'(exact/pos): {variants['MFLAG_MAFCOS'].sum()}\n')
f.write(f'MFLAG_NONPASS: NON-PASS IF not in cosmic, previously '
f'known in MM, not stopgain, splicesite..: '
f'{variants['MFLAG_NONPASS'].sum()}\n')
f.write(f'MFLAG_NORM: Variant exact or pos in >0 good normals: '
f'{variants['MFLAG_NORM'].sum()}\n')
f.write(f'MFLAG_VAF: Remove NON-PASS calls with target '
f'VAF < 1 %: {variants['MFLAG_VAF'].sum()}\n')
f.write(f'MFLAG_BIDIR: Remove variants BIDIR = 0 (only reads '
f'on one strand): {variants['MFLAG_BIDIR'].sum(0)}\n')
f.write(f'Removing calls with >= 1 MFLAG: {bad.shape[0]}\n')
f.write(f'Calls passed filters: {good.shape[0]}\n')
return()
# Main Function
def process(
mode,
infile,
outdir,
genes,
genes_drop,
genes_bed,
igh,
mmrf,
bolli,
lohr,
normals,
mytype):
"""Main function to process myTYPE SNV and indel output"""
## IMPORTING DATA
variants = import_variants(infile)
## ANNOTATIONS
variants = annotate_cosmic(variants)
if genes:
# Only runs if a path was passed to optional argument "gene"
variants = annotate_genefreq(variants, genes)
# Replace this with mutation frequency from MMRF? (and other raw data?)
variants = annotate_maf(variants)
variants = annotate_normals(variants, normals)
variants = annotate_mmrf(variants, mmrf)
variants = annotate_bolli(variants, bolli)
variants = annotate_lohr(variants, lohr)
if mytype:
# Only runs if a path was passed to optional argument "mytype"
variants = annotate_mytype(variants, mytype)
variants = annotate_known(variants, mytype)
## FILTERS
variants = filter_panel(variants, genes_bed)
if genes_drop:
variants = filter_drop(variants, genes_drop)
variants = filter_igh(variants, igh)
variants = filter_maf(variants)
variants = filter_maf_cosmic(variants, mode)
variants = filter_nonpass(variants, mode)
variants = filter_normals(variants)
variants = filter_vaf(variants)
variants = filter_bidir(variants)
## OUTPUT
name = namecore(infile)
filter_export(variants, outdir, name, mode)
print('Variant processing complete')
return(variants) # Added this here - may be necessary for test?
| """variants_process main command."""
import timeit
import re
from datetime import datetime
from os.path import join
import pandas as pd
import numpy as np
import pybedtools as pyb
START = timeit.default_timer()
## IMPORT VARIANTS FILE
def import_variants(path):
"""
Determine filetype and import, returns pandas dataFrame
"""
if re.search('.csv$', path):
try:
variants = pd.read_csv(
filepath_or_buffer=path,
comment='#',
low_memory=False)
except NameError:
raise Exception(f'Error when importing file {path}')
print(f'Loaded file containing {variants.shape[0]} '
f'variant calls. Processing...')
return(variants)
elif re.search('.tsv.gz$', path):
try:
variants = pd.read_csv(
filepath_or_buffer=path,
compression='gzip',
sep='\t',
comment='#',
low_memory=False)
except NameError:
raise Exception(f'Error when importing file {path}')
print(f'Loaded file containing {variants.shape[0]} '
f'variant calls. Processing...')
return(variants)
else:
raise Exception(f'Input file {path} has unsupported '
f'extension: try .csv or .tsv.gz')
## ANNOTATION FUNCTIONS
def annotate_cosmic(variants):
"""
Generate columns:
HEME_EXACT: Number of exact matches for hematopoietic and
lymphoid tissue in cosmic.
ANY_EXACT_POS: YES/NO for any EXACT or POS match in cosmic.
"""
heme_exact = []
cosmic = variants['COSMIC'].tolist()
search_1 = 'HAEMATOPOIETIC_AND_LYMPHOID_TISSUE'
search_2 = r'(?<=HAEMATOPOIETIC_AND_LYMPHOID_TISSUE=)\w+'
for entry in cosmic:
if pd.isnull(entry):
heme_exact.append(None)
else:
first = entry.split('|')[0]
if re.search('^GENOMIC_EXACT', first):
if re.search(search_1, first):
count = re.search(search_2, first)[0]
heme_exact.append(count)
else:
heme_exact.append(None)
else:
heme_exact.append(None)
variants['HEME_EXACT'] = heme_exact
any_exact_pos = []
for entry in cosmic:
if pd.isnull(entry):
any_exact_pos.append(0)
elif re.search(
'GENOMIC_EXACT', entry) or re.search(
'GENOMIC_POS', entry):
any_exact_pos.append(1)
else:
any_exact_pos.append(0)
variants['ANY_EXACT_POS'] = any_exact_pos
return(variants)
def annotate_genefreq(variants, genes):
"""
Generate column:
MAX_MUTFREQ: Maximal mutation frequency in gene
as previously published in large MM studies.
"""
freqlist = pd.read_excel(io=genes)
freqlist['MAX_MUTFREQ'] = round(
freqlist.filter(regex='freq').max(axis=1), 1)
freqlist = freqlist[['GENE', 'MAX_MUTFREQ']]
variants = pd.merge(variants, freqlist, how='left')
return(variants)
def annotate_maf(variants):
"""
Generate column:
MAX_MAF: Maximal MAF of variant in any normal database
"""
variants['MAX_MAF'] = 0 # Sets variable to 0 if frequency is not reported
variants['MAX_MAF'] = variants.filter(regex='MAF').max(axis=1)
return(variants)
def annotate_normals(variants, path_normals):
"""
Annotates variants with internal normal controls:
Class: Close (chr, start within 10 bp),
Pos (chr, start),
Exact (chr, start, ref, alt)
Frequency: Number of matches
VAF: Median VAF
Q25: 25th VAF-quartile
Q75: 75th VAF-quartile
Positions: START position
Change: REF > ALT
"""
normals = pd.read_csv(
filepath_or_buffer=path_normals)
normals = normals.set_index(['CHR','START'])
def annot_row(row, data):
thres = 10
chrom = str(row['CHR'])
start = row['START']
po = (chrom, start) in data.index
close = data.ix[(chrom, start-thres):(chrom, start+thres)]
if po:
pos = data.loc[(chrom, start)]
exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]
if len(exact) > 0:
ex_out = ['genomic_exact',
exact['count'].iloc[0],
exact['MEDIAN_VAF'].iloc[0],
exact['VAF_Q25'].iloc[0],
exact['VAF_Q75'].iloc[0],
start,
exact['REF'].iloc[0] + '>' + exact['ALT'].iloc[0]
]
return pd.Series(ex_out)
else:
pos_out = ['genomic_pos',
', '.join(pos['count'].astype(str)),
', '.join(pos['MEDIAN_VAF'].astype(str)),
', '.join(pos['VAF_Q25'].astype(str)),
', '.join(pos['VAF_Q75'].astype(str)),
', '.join([str(i) for i in pos.index.\
get_level_values('START').tolist()]),
', '.join([str(a) + '>' + str(b) for a, b in \
zip(pos['REF'], pos['ALT'])])
]
return pd.Series(pos_out)
elif close.shape[0] > 0:
cl_out = ['genomic_close',
', '.join(close['count'].astype(str).tolist()),
', '.join(close['MEDIAN_VAF'].astype(str).tolist()),
', '.join(close['VAF_Q25'].astype(str).tolist()),
', '.join(close['VAF_Q75'].astype(str).tolist()),
', '.join([str(i) for i in close.index.\
get_level_values('START').tolist()]),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(close['REF'].tolist(), close['ALT'].tolist())]))
]
return pd.Series(cl_out)
else:
return pd.Series([None]*7)
out_names = ["_Class", "_Frequency", "_VAF", "_Q25", "_Q75",
"_Position", "_Change"]
out_names = ['Normals' + s for s in out_names]
variants[out_names] = variants.apply(lambda row: annot_row(row, normals),
axis=1)
return(variants)
def annotate_mmrf(variants, path_mmrf):
"""
Annotates variants with MMRF data:
Class: Close (chr, start within 10 bp),
Pos (chr, start),
Exact (chr, start, ref, alt)
Frequency: Number of matches
VAF: Median VAF
Q25: 25th VAF-quartile
Q75: 75th VAF-quartile
Positions: START position
Change: REF > ALT
"""
mmrf = pd.read_csv(filepath_or_buffer=path_mmrf, sep='\t')
mmrf = mmrf[["#CHROM", "POS", "REF", "ALT", "GEN[1].AR"]]
mmrf = mmrf.drop_duplicates() ## What are these duplicates?
mmrf.columns = ["CHR", "START", "REF", "ALT", "TARGET_VAF"]
def annot_row(row, data):
thres = 10
subdat = data[data['CHR'].astype(str) == str(row['CHR'])]
po = row['START'] in subdat['START'].as_matrix().astype(int)
close = (abs(subdat['START'].as_matrix() \
.astype(int) - row['START']) < thres)
if po:
pos = subdat[subdat['START'] == row['START']]
exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]
if len(exact) > 0:
ex_out = ['genomic_exact',
exact['REF'].count(),
exact['TARGET_VAF'].median(),
exact['TARGET_VAF'].quantile(q=0.25),
exact['TARGET_VAF'].quantile(q=0.75),
', '.join(set(exact['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(exact['REF'].tolist(), exact['ALT'].tolist())]))
]
return pd.Series(ex_out)
else:
pos_out = ['genomic_pos',
pos['REF'].count(),
pos['TARGET_VAF'].median(),
pos['TARGET_VAF'].quantile(q=0.25),
pos['TARGET_VAF'].quantile(q=0.75),
', '.join(set(pos['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(pos['REF'].tolist(), pos['ALT'].tolist())]))
]
return pd.Series(pos_out)
elif close.any():
close = subdat[close]
cl_out = ['genomic_close',
', '.join(close.groupby(['ALT', 'REF']).size() \
.astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.median().astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.quantile(q=0.25).astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.quantile(q=0.75).astype(str).tolist()),
', '.join(set(close['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(close['REF'].tolist(), close['ALT'].tolist())]))
]
return pd.Series(cl_out)
else:
return pd.Series([None]*7)
out_names = ["_Class", "_Frequency", "_VAF", "_Q25", "_Q75",
"_Position", "_Change"]
out_names = ['MMRF' + s for s in out_names]
variants[out_names] = variants.apply(lambda row: annot_row(row, mmrf),
axis=1)
return(variants)
def annotate_bolli(variants, path_bolli):
"""
Annotates variants with Bolli data:
Class: Close (chr, start within 10 bp),
Pos (chr, start),
Exact (chr, start, ref, alt)
Frequency: Number of matches
Positions: START position
Change: REF > ALT
Annotation: Manual annotation category.
"""
bolli = pd.read_csv(filepath_or_buffer=path_bolli, sep='\t')
bolli = bolli[["CHR", "START", "WT", "MT", "Variant_class"]]
bolli.columns = ["CHR", "START", "REF", "ALT", "ANNOTATION"]
def annot_row(row, data):
thres = 10
subdat = data[data['CHR'].astype(str) == str(row['CHR'])]
po = row['START'] in subdat['START'].as_matrix().astype(int)
close = (abs(subdat['START'].as_matrix() \
.astype(int) - row['START']) < thres)
if po:
pos = subdat[subdat['START'] == row['START']]
exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]
if len(exact) > 0:
ex_out = ['genomic_exact',
exact['REF'].count(),
', '.join(set(exact['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(exact['REF'].tolist(), exact['ALT'].tolist())])),
', '.join(set(exact['ANNOTATION']))
]
return pd.Series(ex_out)
else:
pos_out = ['genomic_pos',
pos['REF'].count(),
', '.join(set(pos['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(pos['REF'].tolist(), pos['ALT'].tolist())])),
', '.join(set(pos['ANNOTATION']))
]
return pd.Series(pos_out)
elif close.any():
close = subdat[close]
cl_out = ['genomic_close',
', '.join(close.groupby(['ALT', 'REF']).size() \
.astype(str).tolist()),
', '.join(set(close['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(close['REF'].tolist(), close['ALT'].tolist())])),
', '.join(set(close['ANNOTATION']))
]
return pd.Series(cl_out)
else:
return pd.Series([None]*5)
out_names = ["_Class", "_Frequency",
"_Position", "_Change", "_Annotation"]
out_names = ['Bolli' + s for s in out_names]
variants[out_names] = variants.apply(lambda row: annot_row(row, bolli),
axis=1)
return(variants)
def annotate_lohr(variants, lohr_path):
"""
Annotates variants with lohr data:
Class: Close (chr, start within 10 bp),
Pos (chr, start),
Exact (chr, start, ref, alt)
Frequency: Number of matches
Positions: START position
Change: REF > ALT
"""
lohr = pd.read_csv(filepath_or_buffer=lohr_path, sep='\t')
lohr = lohr[["Chromosome", "Start_Position", "Reference_Allele",
"Tumor_Seq_Allele2"]]
lohr.columns = ["CHR", "START", "REF", "ALT"]
def annot_row(row, data):
thres = 10
subdat = data[data['CHR'].astype(str) == str(row['CHR'])]
po = row['START'] in subdat['START'].as_matrix().astype(int)
close = (abs(subdat['START'].as_matrix() \
.astype(int) - row['START']) < thres)
if po:
pos = subdat[subdat['START'] == row['START']]
exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]
if len(exact) > 0:
ex_out = ['genomic_exact',
exact['REF'].count(),
', '.join(set(exact['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(exact['REF'].tolist(), exact['ALT'].tolist())]))
]
return pd.Series(ex_out)
else:
pos_out = ['genomic_pos',
pos['REF'].count(),
', '.join(set(pos['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(pos['REF'].tolist(), pos['ALT'].tolist())]))
]
return pd.Series(pos_out)
elif close.any():
close = subdat[close]
cl_out = ['genomic_close',
', '.join(close.groupby(['ALT', 'REF']).size() \
.astype(str).tolist()),
', '.join(set(close['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(close['REF'].tolist(), close['ALT'].tolist())]))
]
return pd.Series(cl_out)
else:
return pd.Series([None]*4)
out_names = ["_Class", "_Frequency",
"_Position", "_Change"]
out_names = ['Lohr' + s for s in out_names]
variants[out_names] = variants.apply(lambda row: annot_row(row, lohr),
axis=1)
return(variants)
def annotate_mytype(variants, path_mytype):
"""
Annotates variants with previous myTYPE data:
Class: Close (chr, start within 10 bp),
Pos (chr, start),
Exact (chr, start, ref, alt)
Frequency: Number of matches
VAF: Median VAF
Q25: 25th VAF-quartile
Q75: 75th VAF-quartile
Positions: START position
Change: REF > ALT
Annotation: Manual annotation category.
"""
mytype = pd.read_csv(filepath_or_buffer=path_mytype, sep=',')
mytype = mytype[["CHR", "START", "REF", "ALT",
"MANUAL_ANNOTATION", "TARGET_VAF"]]
mytype.columns = ["CHR", "START", "REF", "ALT",
"ANNOTATION", "TARGET_VAF"]
def annot_row(row, data):
thres = 10
subdat = data[data['CHR'].astype(str) == str(row['CHR'])]
po = row['START'] in subdat['START'].as_matrix().astype(int)
close = (abs(subdat['START'].as_matrix() \
.astype(int) - row['START']) < thres)
if po:
pos = subdat[subdat['START'] == row['START']]
exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]
if len(exact) > 0:
ex_out = ['genomic_exact',
exact['REF'].count(),
exact['TARGET_VAF'].median(),
exact['TARGET_VAF'].quantile(q=0.25),
exact['TARGET_VAF'].quantile(q=0.75),
', '.join(set(exact['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(exact['REF'].tolist(), exact['ALT'].tolist())])),
', '.join(set(exact['ANNOTATION']))
]
return pd.Series(ex_out)
else:
pos_out = ['genomic_pos',
pos['REF'].count(),
pos['TARGET_VAF'].median(),
pos['TARGET_VAF'].quantile(q=0.25),
pos['TARGET_VAF'].quantile(q=0.75),
', '.join(set(pos['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(pos['REF'].tolist(), pos['ALT'].tolist())])),
', '.join(set(pos['ANNOTATION']))
]
return pd.Series(pos_out)
elif close.any():
close = subdat[close]
cl_out = ['genomic_close',
', '.join(close.groupby(['ALT', 'REF']).size() \
.astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.median().astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.quantile(q=0.25).astype(str).tolist()),
', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \
.quantile(q=0.75).astype(str).tolist()),
', '.join(set(close['START'].astype(str))),
', '.join(set([str(a) + '>' + str(b) for a, b in \
zip(close['REF'].tolist(), close['ALT'].tolist())])),
', '.join(set(close['ANNOTATION']))
]
return pd.Series(cl_out)
else:
return pd.Series([None]*8)
out_names = ["_Class", "_Frequency", "_VAF", "_Q25", "_Q75",
"_Position", "_Change", "_Annotation"]
out_names = ['myTYPE' + s for s in out_names]
variants[out_names] = variants.apply(lambda row: annot_row(row, mytype),
axis=1)
return(variants)
def annotate_known(variants, mytype):
"""
Generate columns:
KNOWN_MM = 1 if previously found in MM. Includes any match in MMRF,
Bolli and Lohr, and UNKNOWN/LIKELY/ONCOGENIC by mytype
"""
# Only run function if data is passed to the optional variable "mytype"
if mytype:
mytype_annot = variants['myTYPE_Annotation'].tolist()
myTYPE_somatic = []
for entry in mytype_annot:
if pd.isnull(entry):
myTYPE_somatic.append(0)
else:
search_1 = re.search('ONCOGENIC', entry)
search_2 = re.search('LIKELY', entry)
search_3 = re.search('UNKNOWN', entry)
if search_1 or search_2 or search_3:
myTYPE_somatic.append(1)
else:
myTYPE_somatic.append(0)
variants['myTYPE_somatic'] = myTYPE_somatic
else:
variants['myTYPE_somatic'] = 0
# Define column KNOWN_MM based on annotation data
variants['KNOWN_MM'] = np.where((variants['myTYPE_somatic'] == 1) |
(variants['MMRF_Class'].notnull()) |
(variants['Bolli_Class'].notnull()) |
(variants['Lohr_Class'].notnull()), 1, 0)
variants = variants.drop('myTYPE_somatic', axis=1)
return(variants)
## APPLY FLAGS FOR FILTERING
def filter_panel(variants, genes_bed):
"""
Filter MFLAG_PANEL: 1 if variant is not in BED file of regions to keep
"""
variants_bed = variants[["CHR", "START", "END", "ID_VARIANT"]]
# Turning variants file into bed format
variants_bed = pyb.BedTool.from_dataframe(variants_bed)
# Import list of genes in panel as bed format
genes = pyb.BedTool(genes_bed)
# Bed file with intersection of panel and input file
variants_inter = variants_bed.intersect(genes, u=True)
# Empty list for names of variants in intersection bed file
flaglist = []
# If bed file is not empty
if not variants_inter.head(n=1, as_string=True) == '':
# Convert intersect bed file to data frame; subset col with variant ID
flaglist = pyb.BedTool.to_dataframe(variants_inter)['name']
# Flag variant if ID is not in overlap list
variants['MFLAG_PANEL'] = np.where(variants.ID_VARIANT.isin(flaglist), 0, 1)
return(variants)
def filter_drop(variants, genes_drop):
"""
Filter MFLAG_DROP: 1 if variant is in list of genes to drop.
"""
drop = pd.read_excel(io=genes_drop)['GENE']
variants['MFLAG_DROP'] = np.where(variants.GENE.isin(drop), 1, 0)
return(variants)
def filter_igh(variants, igh_path):
"""
Filter MFLAG_IGH: 1 if variant in IGH locus
"""
variants_bed = variants[["CHR", "START", "END", "ID_VARIANT"]]
variants_bed = pyb.BedTool.from_dataframe(variants_bed)
igh = pyb.BedTool(igh_path)
variants_inter = variants_bed.intersect(igh, u=True)
flaglist = []
if not variants_inter.head(n=1, as_string=True) == '':
flaglist = pyb.BedTool.to_dataframe(variants_inter)['name']
variants['MFLAG_IGH'] = np.where(variants.ID_VARIANT.isin(flaglist), 1, 0)
return(variants)
def filter_maf(variants):
"""
Filter MFLAG_MAF: 1 if variant MAF > 3 % in exac/1000genomes
"""
variants['MFLAG_MAF'] = np.where(variants['MAX_MAF'] > 0.03, 1, 0)
return(variants)
def filter_maf_cosmic(variants, mode):
"""
Filter MFLAG_MAFCOS: 1 if variant has >0.1 % MAF and not in COSMIC
For SNVs: Only counts exact and pos as in cosmic
For Indels: Counts all COSMIC.
"""
if mode == 'snv':
variants['MFLAG_MAFCOS'] = np.where(
(variants['MAX_MAF'] > 0.001) &
(variants['ANY_EXACT_POS'] == 0), 1, 0)
if mode == 'indel':
variants['MFLAG_MAFCOS'] = np.where(
(variants['MAX_MAF'] > 0.001) &
(variants['COSMIC'].isnull()), 1, 0)
return(variants)
def filter_nonpass(variants, mode):
"""
Filter MFLAG_MAF: 1 if NON-PASS AND not in cosmic or previously known in MM
Counts SNVs and Indels as "in cosmic" like for MAFCOS flag.
For SNV: Only removes missense mutations with this flag
"""
if mode == 'snv':
drop = ['non_synonymous_codon']
variants['MFLAG_NONPASS'] = np.where(
(variants['FILTER'] != "PASS") &
(variants['EFFECT'].isin(drop)) &
(variants['ANY_EXACT_POS'] == 0) &
(variants['KNOWN_MM'] == 0), 1, 0)
return(variants)
variants['MFLAG_NONPASS'] = np.where(
(variants['FILTER'] != "PASS") &
(variants['COSMIC'].isnull()) &
(variants['KNOWN_MM'] == 0), 1, 0)
return(variants)
def filter_normals(variants):
"""
Filter MFLAG_NORM: 1 if variant has genomic exact or pos in normals
"""
match = ['genomic_exact', 'genomic_pos']
variants['MFLAG_NORM'] = np.where(variants['Normals_Class'] \
.isin(match), 1, 0)
return(variants)
def filter_vaf(variants):
"""
Filter MFLAG_VAF: 1 if target VAF < 1 %
"""
variants['MFLAG_VAF'] = np.where(
(variants['TARGET_VAF'] < 0.01) & (variants['FILTER'] != 'PASS'), 1, 0)
return(variants)
def filter_bidir(variants):
"""
Filter MFLAG_BIDIR: 1 if BIDIR = 0
"""
variants['MFLAG_BIDIR'] = np.where(variants['BIDIR'] == 0, 1, 0)
return(variants)
## FILTER AND EXPORT
def namecore(infile):
"""
Returns the "core" of the input file name, for use in output files.
"""
name = infile.split('/')[-1]
if re.search('.csv$', name):
return(re.sub('.csv$', '', name))
return(re.sub('.tsv.gz$', '', name))
def filter_export(variants, outdir, name, mode):
"""
Function properties:
1. Filters variants into "good" or "bad" based on flags.
2. Writes files with good and bad variants.
3. Creates processing summary report.
"""
# Filtering
good = variants[variants.filter(regex='MFLAG').sum(axis=1) == 0]
bad = variants[variants.filter(regex='MFLAG').sum(axis=1) > 0]
# Define output names
date = str(datetime.today()).split()[0].split("-")
name = '_'.join([name, '_'.join(date)])
goodname = join(outdir, name + '_goodcalls.csv')
badname = join(outdir, name + '_badcalls.csv')
textname = join(outdir, name + '_report.txt')
# Export files
good.to_csv(
path_or_buf=goodname,
index=False)
bad.to_csv(
path_or_buf=badname,
index=False)
# Summary report
stop = timeit.default_timer()
with open(textname, 'w') as f:
# Call the "Version" file for version info?
f.write(
f'Somatic variant processing for myTYPE\nv.1.0\n '
f'Completed time: {str(datetime.today()).split(".")[0]}\n')
f.write(f'Run time: {round(stop-START, 3)}\n')
f.write(f'####\nMode: {mode}\n')
f.write(f'Imported calls: {variants.shape[0]}\n')
f.write('Flagging variants for filtering:\n')
f.write(f'MFLAG_PANEL: Variant not in BED file of '
f'regions to keep: {variants["MFLAG_PANEL"].sum()}\n')
f.write(f'MFLAG_DROP: Variant in excluded gene: '
f'{variants["MFLAG_DROP"].sum()}\n')
f.write(f'MFLAG_IGH: In IGH locus: {variants["MFLAG_IGH"].sum()}\n')
f.write(f'MFLAG_MAF: MAF > 3 % in exac/1000genomes: '
f'{variants["MFLAG_MAF"].sum()}\n')
f.write(f'MFLAG_MAFCOS: MAF > 0.1 % and not in COSMIC '
f'(exact/pos): {variants["MFLAG_MAFCOS"].sum()}\n')
f.write(f'MFLAG_NONPASS: NON-PASS IF not in cosmic, previously '
f'known in MM, not stopgain, splicesite..: '
f'{variants["MFLAG_NONPASS"].sum()}\n')
f.write(f'MFLAG_NORM: Variant exact or pos in >0 good normals: '
f'{variants["MFLAG_NORM"].sum()}\n')
f.write(f'MFLAG_VAF: Remove NON-PASS calls with target '
f'VAF < 1 %: {variants["MFLAG_VAF"].sum()}\n')
f.write(f'MFLAG_BIDIR: Remove variants BIDIR = 0 (only reads '
f'on one strand): {variants["MFLAG_BIDIR"].sum(0)}\n')
f.write(f'Removing calls with >= 1 MFLAG: {bad.shape[0]}\n')
f.write(f'Calls passed filters: {good.shape[0]}\n')
return()
# Main Function
def process(
mode,
infile,
outdir,
genes,
genes_drop,
genes_bed,
igh,
mmrf,
bolli,
lohr,
normals,
mytype):
"""Main function to process myTYPE SNV and indel output"""
## IMPORTING DATA
variants = import_variants(infile)
## ANNOTATIONS
variants = annotate_cosmic(variants)
if genes:
# Only runs if a path was passed to optional argument "gene"
variants = annotate_genefreq(variants, genes)
# Replace this with mutation frequency from MMRF? (and other raw data?)
variants = annotate_maf(variants)
variants = annotate_normals(variants, normals)
variants = annotate_mmrf(variants, mmrf)
variants = annotate_bolli(variants, bolli)
variants = annotate_lohr(variants, lohr)
if mytype:
# Only runs if a path was passed to optional argument "mytype"
variants = annotate_mytype(variants, mytype)
variants = annotate_known(variants, mytype)
## FILTERS
variants = filter_panel(variants, genes_bed)
if genes_drop:
variants = filter_drop(variants, genes_drop)
variants = filter_igh(variants, igh)
variants = filter_maf(variants)
variants = filter_maf_cosmic(variants, mode)
variants = filter_nonpass(variants, mode)
variants = filter_normals(variants)
variants = filter_vaf(variants)
variants = filter_bidir(variants)
## OUTPUT
name = namecore(infile)
filter_export(variants, outdir, name, mode)
print('Variant processing complete')
return(variants) # Added this here - may be necessary for test?
|
import os
import torch
import glob
import numpy as np
import scipy.sparse as sp
import yaml
from sklearn.preprocessing import StandardScaler
from shaDow.globals import git_rev, timestamp, Logger
from torch_scatter import scatter
from copy import deepcopy
from typing import List, Union
from shaDow import TRAIN, VALID, TEST
from shaDow.data_converter import convert2shaDow, to_undirected
def load_data(prefix, dataset, config_data, os_='linux'):
Logger.printf("Loading training data..")
prefix_l = prefix['local']
fs_shadow = ['adj_full_raw.np[yz]', 'adj_train_raw.np[yz]', 'label_full.npy', 'feat_full.npy', 'split.npy']
if not all(glob.glob(f"{prefix_l}/{dataset}/{f}") for f in fs_shadow):
convert2shaDow(dataset, prefix_l)
role = np.load(f"./{prefix_l}/{dataset}/split.npy", allow_pickle=True)
if type(role) == np.ndarray:
role = role[()]
else:
assert type(role) == dict
# role is used as index, which is required to be int64 (node_set won't take much mem anyways)
node_set = {TRAIN: np.asarray(role[TRAIN], dtype=np.int64),
VALID: np.asarray(role[VALID], dtype=np.int64),
TEST : np.asarray(role[TEST], dtype=np.int64)}
# load adj. If we want to convert to_undirected, and the undirected adj has been stored as external file,
# then we skip the conversion in the program and directly load the undirected adj.
bin_adj_files = {TRAIN: {'indptr': None, 'indices': None, 'data': None},
VALID: {'indptr': None, 'indices': None, 'data': None},
TEST: {'indptr': None, 'indices': None, 'data': None}}
def fill_bin_adj_dict(mode_, split_, type_):
for d in ['indptr', 'indices', 'data']:
bin_adj_files[mode_][d] = f"{prefix_l}/{dataset}/cpp/adj_{split_}_{type_}_{d}.bin"
if config_data['to_undirected']:
if (adj_full := load_adj(prefix_l, dataset, 'undirected', 'full')) is None:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
adj_full = to_undirected(adj_full)
fill_bin_adj_dict(VALID, 'full', 'undirected')
fill_bin_adj_dict(TEST, 'full', 'undirected')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'undirected')
elif (adj_train := load_adj(prefix_l, dataset, 'undirected', 'train')) is None:
adj_train = load_adj(prefix_l, dataset, 'raw', 'train')
adj_train = to_undirected(adj_train)
fill_bin_adj_dict(TRAIN, 'train', 'undirected')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
else:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
fill_bin_adj_dict(VALID, 'full', 'raw')
fill_bin_adj_dict(TEST, 'full', 'raw')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'raw')
else:
adj_train = load_adj(prefix, dataset, 'raw', 'train')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
fill_bin_adj_dict(TRAIN, 'train', 'raw')
bin_adj_files = validate_bin_file(bin_adj_files)
Logger.printf(f"SETTING TO {"TRANS" if config_data["transductive"] else "IN"}DUCTIVE LEARNING", style="red")
label_full = np.load(f"./{prefix_l}/{dataset}/label_full.npy")
label_full = torch.from_numpy(label_full)
# ======= deal with feats =======
mode_norm = 'all' if config_data['transductive'] else 'train'
if config_data['norm_feat'] and os.path.isfile(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy"):
feats = np.load(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy")
Logger.printf(f"Loading '{mode_norm}'-normalized features", style='yellow')
else:
feats = np.load(f"./{prefix_l}/{dataset}/feat_full.npy")
if config_data['norm_feat']:
feats_fit = feats if config_data['transductive'] else feats[node_set[TRAIN]]
scaler = StandardScaler()
scaler.fit(feats_fit)
feats = scaler.transform(feats)
Logger.printf(f"Normalizing node features (mode = {mode_norm})", style="yellow")
else:
Logger.printf("Not normalizing node features", style="yellow")
feats = torch.from_numpy(feats.astype(np.float32, copy=False))
Logger.printf("Done loading training data..")
return {'adj_full' : adj_full,
'adj_train' : adj_train,
'feat_full' : feats,
'label_full': label_full,
'node_set' : node_set,
'bin_adj_files': bin_adj_files}
def parse_n_prepare(task, args, name_graph, dir_log, os_='linux'):
# [config]
if args.configs is not None:
config_train = args.configs
else:
assert task in ['inference', 'postproc']
if task == 'inference':
if args.inference_configs is None:
assert not args.compute_complexity_only
dir_candy = args.inference_dir
else:
assert args.inference_dir is None and args.compute_complexity_only
dir_candy = None
config_train = args.inference_configs
else:
if args.postproc_dir is not None:
dir_candy = args.postproc_dir
else:
with open(args.postproc_configs) as f:
config_temp = yaml.load(f, Loader=yaml.FullLoader)
if 'dir_pred_mat' in config_temp: # all such dirs MUST contain the same yaml
dir_candy = config_temp['dir_pred_mat'][0]
elif 'dir_emb_mat' in config_temp: # all ens models should have the same arch (only differs in sampler)
dir_candy = next(iter(config_temp['dir_emb_mat'].values()))[0]
else:
raise NotImplementedError
if dir_candy is not None:
assert os.path.isdir(dir_candy)
f_yml = [f for f in os.listdir(dir_candy) if f.split('.')[-1] in ['yml', 'yaml']]
assert len(f_yml) == 1
config_train = f"{dir_candy}/{f_yml[0]}"
with open(config_train) as f_config_train:
config_train = yaml.load(f_config_train, Loader=yaml.FullLoader)
config_train_copy = deepcopy(config_train)
# [data]
config_data = {"to_undirected" : False,
"transductive" : False,
"norm_feat" : True}
config_data.update(config_train['data'])
# [arch]
arch_gnn = { # default values
"dim" : -1,
"aggr" : "sage",
"residue" : "none",
"pooling" : "center",
"loss" : "softmax",
"num_layers" : -1,
"act" : "I",
"heads" : -1,
"feature_augment" : "hops",
"feature_smoothen" : "none",
"label_smoothen" : "none", # label_smoothen is only considered if use_label != none
"ensemble_act" : "leakyrelu",
"branch_sharing" : False,
"use_label" : "none"
}
arch_gnn.update(config_train["architecture"])
assert arch_gnn['aggr'] in ['sage', 'gat', 'gatscat', 'gcn', 'mlp', 'gin', 'sgc', 'sign']
assert arch_gnn['use_label'].lower() in ['all', 'none', 'no_valid']
assert arch_gnn['pooling'].lower().split('-')[0] in ['mean', 'max', 'sum', 'center', 'sort']
assert arch_gnn['residue'].lower() in ['sum', 'concat', 'max', 'none']
assert arch_gnn['feature_augment'].lower() in ['hops', 'ppr', 'none']
if arch_gnn["feature_augment"] and arch_gnn["feature_augment"].lower() != "none":
arch_gnn["feature_augment"] = set(k for k in arch_gnn["feature_augment"].split("-"))
else:
arch_gnn['feature_augment'] = set()
# [params]
params_train = {
"lr" : 0.01,
"dropedge" : 0.0,
"ensemble_dropout" : "none"
}
params_train.update(config_train["hyperparameter"])
params_train["lr"] = float(params_train["lr"])
# [sampler]
sampler_preproc, sampler_train = [], []
for s in config_train['sampler']:
phase = s.pop('phase')
if phase == 'preprocess':
sampler_preproc.append(s)
elif phase == 'train':
sampler_train.append(s)
else:
raise NotImplementedError
batch_size = config_train["hyperparameter"]["batch_size"]
config_sampler_preproc = {"batch_size": batch_size, "configs": sampler_preproc}
config_sampler_train = {"batch_size": batch_size, "configs": sampler_train}
# add self-edges for certain arch. e.g., for GAT, will be divide-by-0 error in grad without self-edges
if arch_gnn["aggr"] in ["gcn", "gat", "gatscat"]:
for sc in config_sampler_train["configs"]:
num_ens = [len(v) for k, v in sc.items() if k != 'method']
assert max(num_ens) == min(num_ens)
sc["add_self_edge"] = [True] * num_ens[0]
# [copy yml]
name_key = f"{arch_gnn["aggr"]}_{arch_gnn["num_layers"]}"
dir_log_full = log_dir(task, config_train_copy, name_key, dir_log, name_graph, git_rev, timestamp)
return params_train, config_sampler_preproc, config_sampler_train, config_data, arch_gnn, dir_log_full
def parse_n_prepare_postproc(dir_load, f_config, name_graph, dir_log, arch_gnn, logger):
if f_config is not None:
with open(f_config) as f:
config_postproc = yaml.load(f, Loader=yaml.FullLoader)
name_key = f"postproc-{arch_gnn["aggr"]}_{arch_gnn["num_layers"]}"
log_dir('postproc', config_postproc, name_key, dir_log, name_graph, git_rev, timestamp)
skip_instantiate = []
if 'check_record' in config_postproc:
load_acc_record = config_postproc['check_record']
else:
load_acc_record = True
if config_postproc['method'] == 'cs': # C&S
acc_record = [] if load_acc_record else None
if dir_load is not None:
if 'dir_pred_mat' not in config_postproc:
config_postproc['dir_pred_mat'] = [dir_load]
elif os.path.realpath(dir_load) not in [os.path.realpath(pc) for pc in config_postproc['dir_pred_mat']]:
config_postproc['dir_pred_mat'].append(dir_load)
config_postproc['pred_mat'] = [None] * len(config_postproc['dir_pred_mat'])
for i, di in enumerate(config_postproc['dir_pred_mat']):
if load_acc_record:
acc_record.append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'cs' == f.split('.')[-1] and f.startswith('pred_mat'):
config_postproc['pred_mat'][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for m in config_postproc['pred_mat']):
skip_instantiate = ['data', 'model']
elif config_postproc['method'] == 'ensemble': # Variant of subgraph ensemble as postproc
acc_record = {s: [] for s in config_postproc['dir_emb_mat']} if load_acc_record else None
assert dir_load is None
config_postproc['emb_mat'] = {k: [None] * len(v) for k, v in config_postproc['dir_emb_mat'].items()}
for sname, dirs_l in config_postproc['dir_emb_mat'].items():
for i, di in enumerate(dirs_l):
if load_acc_record:
acc_record[sname].append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'ens' == f.split('.')[-1] and f.startswith('emb_mat'):
config_postproc['emb_mat'][sname][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for s, mat_l in config_postproc['emb_mat'].items() for m in mat_l):
skip_instantiate = ['model'] # you have to load data (role, labels) anyways
return config_postproc, acc_record, skip_instantiate
def log_dir(task, config_new, yml_name_key, dir_log, name_graph, git_rev, timestamp):
if task == 'train':
prefix = 'running'
elif task == 'inference':
prefix = 'INF'
elif task == 'postproc':
prefix = 'POST'
else:
raise NotImplementedError
log_dir = f"{dir_log}/{name_graph}/{prefix}/{timestamp}-{git_rev.strip():s}/"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
yml_file = f"{log_dir}/{yml_name_key}.yml"
with open(yml_file, 'w') as f:
yaml.dump(config_new, f, default_flow_style=False, sort_keys=False)
return log_dir
# =============== #
# ADJ UTILS #
# =============== #
def get_deg_torch_sparse(adj):
return scatter(adj._values(), adj._indices()[0], reduce="sum")
def adj_norm_rw(adj, deg=None, dropedge=0., sort_indices=True):
"""
Normalize adj according to the method of rw normalization.
Note that sym norm is used in the original GCN paper (kipf),
while rw norm is used in GraphSAGE and some other variants.
# Procedure:
# 1. adj add self-connection --> adj'
# 2. D' deg matrix from adj'
# 3. norm by D^{-1} x adj'
if sort_indices is True, we re-sort the indices of the returned adj
Note that after 'dot' the indices of a node would be in descending order
rather than ascending order
"""
if type(adj) == torch.Tensor:
assert deg is None
assert torch.sum(adj._values()).cpu().long().item() == adj._values().size()[0]
_deg_orig = get_deg_torch_sparse(adj)
if dropedge > 0:
masked_indices = torch.floor(torch.rand(int(adj._values().size()[0] * dropedge)) * adj._values().size()[0]).long()
adj._values()[masked_indices] = 0
_deg_dropped = get_deg_torch_sparse(adj)
else:
_deg_dropped = _deg_orig
_deg = torch.repeat_interleave(_deg_dropped, _deg_orig.long())
_deg = torch.clamp(_deg, min=1)
_val = adj._values()
_val /= _deg
adj_norm = adj
else:
assert dropedge == 0., "not supporting dropedge for scipy csr matrices"
assert adj.shape[0] == adj.shape[1]
diag_shape = (adj.shape[0], adj.shape[1])
D = adj.sum(1).flatten() if deg is None else deg
D = np.clip(D, 1, None) # if deg_v == 0, it doesn't matter what value we clip it to.
norm_diag = sp.dia_matrix((1 / D, 0), shape=diag_shape)
adj_norm = norm_diag.dot(adj)
if sort_indices:
adj_norm.sort_indices()
return adj_norm
def adj_norm_sym(adj, sort_indices=True, add_self_edge=False, dropedge=0.):
assert adj.shape[0] == adj.shape[1]
assert adj.data.sum() == adj.size, "symmetric normalization only supports binary input adj"
N = adj.shape[0]
# drop edges symmetrically
if dropedge > 0:
masked_indices = np.random.choice(adj.size, int(adj.size * dropedge))
adj.data[masked_indices] = 0
adjT = adj.tocsc()
data_add = adj.data + adjT.data
survived_indices = np.where(data_add == 2)[0]
adj.data *= 0
adj.data[survived_indices] = 1
# augment adj with self-connection
if add_self_edge:
indptr_new = np.zeros(N + 1)
neigh_list = [set(adj.indices[adj.indptr[v] : adj.indptr[v+1]]) for v in range(N)]
for i in range(len(neigh_list)):
neigh_list[i].add(i)
neigh_list[i] = np.sort(np.fromiter(neigh_list[i], int, len(neigh_list[i])))
indptr_new[i + 1] = neigh_list[i].size
indptr_new = indptr_new.cumsum()
indices_new = np.concatenate(neigh_list)
data_new = np.broadcast_to(np.ones(1), indices_new.size)
adj_aug = sp.csr_matrix((data_new, indices_new, indptr_new), shape=adj.shape)
# NOTE: no need to explicitly convert dtype, since adj_norm_sym is used for subg only
else:
adj_aug = adj
# normalize
D = np.clip(adj_aug.sum(1).flatten(), 1, None)
norm_diag = sp.dia_matrix((np.power(D, -0.5), 0), shape=adj_aug.shape)
adj_norm = norm_diag.dot(adj_aug).dot(norm_diag)
if sort_indices:
adj_norm.sort_indices()
return adj_norm
def coo_scipy2torch(adj):
"""
convert a scipy sparse COO matrix to torch
"""
values = adj.data
indices = np.vstack((adj.row, adj.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
return torch.sparse.FloatTensor(i, v, torch.Size(adj.shape))
# ================= #
# ADJ FILE IO UTILS #
# ================= #
def load_adj(prefix, dataset, type_, split_):
"""
Try to load the prestored undirected adj. If the file does not exist, then you MUST return a None
"""
assert split_ in ['full', 'train'], "UNKNOWN ADJ SPLIT. ONLY ACCEPT [full] or [train]"
assert type_ in ['raw', 'undirected'], "UNKNOWN ADJ TYPE. ONLY ACCEPT [raw] or [undirected]"
file_adj = f"{prefix}/{dataset}/adj_{split_}_{type_}." + "{}"
if os.path.isfile(file_adj.format('npz')):
adj = sp.load_npz(file_adj.format('npz'))
elif os.path.isfile(file_adj.format('npy')):
adj_d = np.load(file_adj.format('npy'), allow_pickle=True)
if type(adj_d) == np.ndarray:
adj_d = adj_d[()]
else:
assert type(adj_d) == dict
indptr = adj_d['indptr']
indices = adj_d['indices']
if 'data' in adj_d:
data = adj_d['data']
else:
data = np.broadcast_to(np.ones(1, dtype=np.bool), indices.size)
num_nodes = indptr.size - 1
adj = sp.csr_matrix((data, indices, indptr), shape=(num_nodes, num_nodes))
else:
adj = None
return adj
def validate_bin_file(bin_adj_files):
for md, df in bin_adj_files.items():
assert set(df.keys()) == set(['indptr', 'indices', 'data'])
if not os.path.isfile(df['indptr']) or not os.path.isfile(df['indices']):
return {mmd: None for mmd in bin_adj_files}
if not os.path.isfile(df['data']):
df['data'] = ''
return bin_adj_files
def merge_stat_record(dict_l : List[dict]):
key_l = [set(d.keys()) for d in dict_l]
assert all(k == key_l[0] == set([TRAIN, VALID, TEST]) for k in key_l)
names_stat = set(dict_l[0][TRAIN].keys())
ret = {n: {TRAIN: [], VALID: [], TEST: []} for n in names_stat}
for d in dict_l:
for m in [TRAIN, VALID, TEST]:
assert set(d[m].keys()) == names_stat
for k, v in d[m].items():
ret[k][m].append(v)
return ret | import os
import torch
import glob
import numpy as np
import scipy.sparse as sp
import yaml
from sklearn.preprocessing import StandardScaler
from shaDow.globals import git_rev, timestamp, Logger
from torch_scatter import scatter
from copy import deepcopy
from typing import List, Union
from shaDow import TRAIN, VALID, TEST
from shaDow.data_converter import convert2shaDow, to_undirected
def load_data(prefix, dataset, config_data, os_='linux'):
Logger.printf("Loading training data..")
prefix_l = prefix['local']
fs_shadow = ['adj_full_raw.np[yz]', 'adj_train_raw.np[yz]', 'label_full.npy', 'feat_full.npy', 'split.npy']
if not all(glob.glob(f"{prefix_l}/{dataset}/{f}") for f in fs_shadow):
convert2shaDow(dataset, prefix_l)
role = np.load(f"./{prefix_l}/{dataset}/split.npy", allow_pickle=True)
if type(role) == np.ndarray:
role = role[()]
else:
assert type(role) == dict
# role is used as index, which is required to be int64 (node_set won't take much mem anyways)
node_set = {TRAIN: np.asarray(role[TRAIN], dtype=np.int64),
VALID: np.asarray(role[VALID], dtype=np.int64),
TEST : np.asarray(role[TEST], dtype=np.int64)}
# load adj. If we want to convert to_undirected, and the undirected adj has been stored as external file,
# then we skip the conversion in the program and directly load the undirected adj.
bin_adj_files = {TRAIN: {'indptr': None, 'indices': None, 'data': None},
VALID: {'indptr': None, 'indices': None, 'data': None},
TEST: {'indptr': None, 'indices': None, 'data': None}}
def fill_bin_adj_dict(mode_, split_, type_):
for d in ['indptr', 'indices', 'data']:
bin_adj_files[mode_][d] = f"{prefix_l}/{dataset}/cpp/adj_{split_}_{type_}_{d}.bin"
if config_data['to_undirected']:
if (adj_full := load_adj(prefix_l, dataset, 'undirected', 'full')) is None:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
adj_full = to_undirected(adj_full)
fill_bin_adj_dict(VALID, 'full', 'undirected')
fill_bin_adj_dict(TEST, 'full', 'undirected')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'undirected')
elif (adj_train := load_adj(prefix_l, dataset, 'undirected', 'train')) is None:
adj_train = load_adj(prefix_l, dataset, 'raw', 'train')
adj_train = to_undirected(adj_train)
fill_bin_adj_dict(TRAIN, 'train', 'undirected')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
else:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
fill_bin_adj_dict(VALID, 'full', 'raw')
fill_bin_adj_dict(TEST, 'full', 'raw')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'raw')
else:
adj_train = load_adj(prefix, dataset, 'raw', 'train')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
fill_bin_adj_dict(TRAIN, 'train', 'raw')
bin_adj_files = validate_bin_file(bin_adj_files)
Logger.printf(f"SETTING TO {'TRANS' if config_data['transductive'] else 'IN'}DUCTIVE LEARNING", style="red")
label_full = np.load(f"./{prefix_l}/{dataset}/label_full.npy")
label_full = torch.from_numpy(label_full)
# ======= deal with feats =======
mode_norm = 'all' if config_data['transductive'] else 'train'
if config_data['norm_feat'] and os.path.isfile(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy"):
feats = np.load(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy")
Logger.printf(f"Loading '{mode_norm}'-normalized features", style='yellow')
else:
feats = np.load(f"./{prefix_l}/{dataset}/feat_full.npy")
if config_data['norm_feat']:
feats_fit = feats if config_data['transductive'] else feats[node_set[TRAIN]]
scaler = StandardScaler()
scaler.fit(feats_fit)
feats = scaler.transform(feats)
Logger.printf(f"Normalizing node features (mode = {mode_norm})", style="yellow")
else:
Logger.printf("Not normalizing node features", style="yellow")
feats = torch.from_numpy(feats.astype(np.float32, copy=False))
Logger.printf("Done loading training data..")
return {'adj_full' : adj_full,
'adj_train' : adj_train,
'feat_full' : feats,
'label_full': label_full,
'node_set' : node_set,
'bin_adj_files': bin_adj_files}
def parse_n_prepare(task, args, name_graph, dir_log, os_='linux'):
# [config]
if args.configs is not None:
config_train = args.configs
else:
assert task in ['inference', 'postproc']
if task == 'inference':
if args.inference_configs is None:
assert not args.compute_complexity_only
dir_candy = args.inference_dir
else:
assert args.inference_dir is None and args.compute_complexity_only
dir_candy = None
config_train = args.inference_configs
else:
if args.postproc_dir is not None:
dir_candy = args.postproc_dir
else:
with open(args.postproc_configs) as f:
config_temp = yaml.load(f, Loader=yaml.FullLoader)
if 'dir_pred_mat' in config_temp: # all such dirs MUST contain the same yaml
dir_candy = config_temp['dir_pred_mat'][0]
elif 'dir_emb_mat' in config_temp: # all ens models should have the same arch (only differs in sampler)
dir_candy = next(iter(config_temp['dir_emb_mat'].values()))[0]
else:
raise NotImplementedError
if dir_candy is not None:
assert os.path.isdir(dir_candy)
f_yml = [f for f in os.listdir(dir_candy) if f.split('.')[-1] in ['yml', 'yaml']]
assert len(f_yml) == 1
config_train = f"{dir_candy}/{f_yml[0]}"
with open(config_train) as f_config_train:
config_train = yaml.load(f_config_train, Loader=yaml.FullLoader)
config_train_copy = deepcopy(config_train)
# [data]
config_data = {"to_undirected" : False,
"transductive" : False,
"norm_feat" : True}
config_data.update(config_train['data'])
# [arch]
arch_gnn = { # default values
"dim" : -1,
"aggr" : "sage",
"residue" : "none",
"pooling" : "center",
"loss" : "softmax",
"num_layers" : -1,
"act" : "I",
"heads" : -1,
"feature_augment" : "hops",
"feature_smoothen" : "none",
"label_smoothen" : "none", # label_smoothen is only considered if use_label != none
"ensemble_act" : "leakyrelu",
"branch_sharing" : False,
"use_label" : "none"
}
arch_gnn.update(config_train["architecture"])
assert arch_gnn['aggr'] in ['sage', 'gat', 'gatscat', 'gcn', 'mlp', 'gin', 'sgc', 'sign']
assert arch_gnn['use_label'].lower() in ['all', 'none', 'no_valid']
assert arch_gnn['pooling'].lower().split('-')[0] in ['mean', 'max', 'sum', 'center', 'sort']
assert arch_gnn['residue'].lower() in ['sum', 'concat', 'max', 'none']
assert arch_gnn['feature_augment'].lower() in ['hops', 'ppr', 'none']
if arch_gnn["feature_augment"] and arch_gnn["feature_augment"].lower() != "none":
arch_gnn["feature_augment"] = set(k for k in arch_gnn["feature_augment"].split("-"))
else:
arch_gnn['feature_augment'] = set()
# [params]
params_train = {
"lr" : 0.01,
"dropedge" : 0.0,
"ensemble_dropout" : "none"
}
params_train.update(config_train["hyperparameter"])
params_train["lr"] = float(params_train["lr"])
# [sampler]
sampler_preproc, sampler_train = [], []
for s in config_train['sampler']:
phase = s.pop('phase')
if phase == 'preprocess':
sampler_preproc.append(s)
elif phase == 'train':
sampler_train.append(s)
else:
raise NotImplementedError
batch_size = config_train["hyperparameter"]["batch_size"]
config_sampler_preproc = {"batch_size": batch_size, "configs": sampler_preproc}
config_sampler_train = {"batch_size": batch_size, "configs": sampler_train}
# add self-edges for certain arch. e.g., for GAT, will be divide-by-0 error in grad without self-edges
if arch_gnn["aggr"] in ["gcn", "gat", "gatscat"]:
for sc in config_sampler_train["configs"]:
num_ens = [len(v) for k, v in sc.items() if k != 'method']
assert max(num_ens) == min(num_ens)
sc["add_self_edge"] = [True] * num_ens[0]
# [copy yml]
name_key = f"{arch_gnn['aggr']}_{arch_gnn['num_layers']}"
dir_log_full = log_dir(task, config_train_copy, name_key, dir_log, name_graph, git_rev, timestamp)
return params_train, config_sampler_preproc, config_sampler_train, config_data, arch_gnn, dir_log_full
def parse_n_prepare_postproc(dir_load, f_config, name_graph, dir_log, arch_gnn, logger):
if f_config is not None:
with open(f_config) as f:
config_postproc = yaml.load(f, Loader=yaml.FullLoader)
name_key = f"postproc-{arch_gnn['aggr']}_{arch_gnn['num_layers']}"
log_dir('postproc', config_postproc, name_key, dir_log, name_graph, git_rev, timestamp)
skip_instantiate = []
if 'check_record' in config_postproc:
load_acc_record = config_postproc['check_record']
else:
load_acc_record = True
if config_postproc['method'] == 'cs': # C&S
acc_record = [] if load_acc_record else None
if dir_load is not None:
if 'dir_pred_mat' not in config_postproc:
config_postproc['dir_pred_mat'] = [dir_load]
elif os.path.realpath(dir_load) not in [os.path.realpath(pc) for pc in config_postproc['dir_pred_mat']]:
config_postproc['dir_pred_mat'].append(dir_load)
config_postproc['pred_mat'] = [None] * len(config_postproc['dir_pred_mat'])
for i, di in enumerate(config_postproc['dir_pred_mat']):
if load_acc_record:
acc_record.append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'cs' == f.split('.')[-1] and f.startswith('pred_mat'):
config_postproc['pred_mat'][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for m in config_postproc['pred_mat']):
skip_instantiate = ['data', 'model']
elif config_postproc['method'] == 'ensemble': # Variant of subgraph ensemble as postproc
acc_record = {s: [] for s in config_postproc['dir_emb_mat']} if load_acc_record else None
assert dir_load is None
config_postproc['emb_mat'] = {k: [None] * len(v) for k, v in config_postproc['dir_emb_mat'].items()}
for sname, dirs_l in config_postproc['dir_emb_mat'].items():
for i, di in enumerate(dirs_l):
if load_acc_record:
acc_record[sname].append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'ens' == f.split('.')[-1] and f.startswith('emb_mat'):
config_postproc['emb_mat'][sname][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for s, mat_l in config_postproc['emb_mat'].items() for m in mat_l):
skip_instantiate = ['model'] # you have to load data (role, labels) anyways
return config_postproc, acc_record, skip_instantiate
def log_dir(task, config_new, yml_name_key, dir_log, name_graph, git_rev, timestamp):
if task == 'train':
prefix = 'running'
elif task == 'inference':
prefix = 'INF'
elif task == 'postproc':
prefix = 'POST'
else:
raise NotImplementedError
log_dir = f"{dir_log}/{name_graph}/{prefix}/{timestamp}-{git_rev.strip():s}/"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
yml_file = f"{log_dir}/{yml_name_key}.yml"
with open(yml_file, 'w') as f:
yaml.dump(config_new, f, default_flow_style=False, sort_keys=False)
return log_dir
# =============== #
# ADJ UTILS #
# =============== #
def get_deg_torch_sparse(adj):
return scatter(adj._values(), adj._indices()[0], reduce="sum")
def adj_norm_rw(adj, deg=None, dropedge=0., sort_indices=True):
"""
Normalize adj according to the method of rw normalization.
Note that sym norm is used in the original GCN paper (kipf),
while rw norm is used in GraphSAGE and some other variants.
# Procedure:
# 1. adj add self-connection --> adj'
# 2. D' deg matrix from adj'
# 3. norm by D^{-1} x adj'
if sort_indices is True, we re-sort the indices of the returned adj
Note that after 'dot' the indices of a node would be in descending order
rather than ascending order
"""
if type(adj) == torch.Tensor:
assert deg is None
assert torch.sum(adj._values()).cpu().long().item() == adj._values().size()[0]
_deg_orig = get_deg_torch_sparse(adj)
if dropedge > 0:
masked_indices = torch.floor(torch.rand(int(adj._values().size()[0] * dropedge)) * adj._values().size()[0]).long()
adj._values()[masked_indices] = 0
_deg_dropped = get_deg_torch_sparse(adj)
else:
_deg_dropped = _deg_orig
_deg = torch.repeat_interleave(_deg_dropped, _deg_orig.long())
_deg = torch.clamp(_deg, min=1)
_val = adj._values()
_val /= _deg
adj_norm = adj
else:
assert dropedge == 0., "not supporting dropedge for scipy csr matrices"
assert adj.shape[0] == adj.shape[1]
diag_shape = (adj.shape[0], adj.shape[1])
D = adj.sum(1).flatten() if deg is None else deg
D = np.clip(D, 1, None) # if deg_v == 0, it doesn't matter what value we clip it to.
norm_diag = sp.dia_matrix((1 / D, 0), shape=diag_shape)
adj_norm = norm_diag.dot(adj)
if sort_indices:
adj_norm.sort_indices()
return adj_norm
def adj_norm_sym(adj, sort_indices=True, add_self_edge=False, dropedge=0.):
assert adj.shape[0] == adj.shape[1]
assert adj.data.sum() == adj.size, "symmetric normalization only supports binary input adj"
N = adj.shape[0]
# drop edges symmetrically
if dropedge > 0:
masked_indices = np.random.choice(adj.size, int(adj.size * dropedge))
adj.data[masked_indices] = 0
adjT = adj.tocsc()
data_add = adj.data + adjT.data
survived_indices = np.where(data_add == 2)[0]
adj.data *= 0
adj.data[survived_indices] = 1
# augment adj with self-connection
if add_self_edge:
indptr_new = np.zeros(N + 1)
neigh_list = [set(adj.indices[adj.indptr[v] : adj.indptr[v+1]]) for v in range(N)]
for i in range(len(neigh_list)):
neigh_list[i].add(i)
neigh_list[i] = np.sort(np.fromiter(neigh_list[i], int, len(neigh_list[i])))
indptr_new[i + 1] = neigh_list[i].size
indptr_new = indptr_new.cumsum()
indices_new = np.concatenate(neigh_list)
data_new = np.broadcast_to(np.ones(1), indices_new.size)
adj_aug = sp.csr_matrix((data_new, indices_new, indptr_new), shape=adj.shape)
# NOTE: no need to explicitly convert dtype, since adj_norm_sym is used for subg only
else:
adj_aug = adj
# normalize
D = np.clip(adj_aug.sum(1).flatten(), 1, None)
norm_diag = sp.dia_matrix((np.power(D, -0.5), 0), shape=adj_aug.shape)
adj_norm = norm_diag.dot(adj_aug).dot(norm_diag)
if sort_indices:
adj_norm.sort_indices()
return adj_norm
def coo_scipy2torch(adj):
"""
convert a scipy sparse COO matrix to torch
"""
values = adj.data
indices = np.vstack((adj.row, adj.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
return torch.sparse.FloatTensor(i, v, torch.Size(adj.shape))
# ================= #
# ADJ FILE IO UTILS #
# ================= #
def load_adj(prefix, dataset, type_, split_):
"""
Try to load the prestored undirected adj. If the file does not exist, then you MUST return a None
"""
assert split_ in ['full', 'train'], "UNKNOWN ADJ SPLIT. ONLY ACCEPT [full] or [train]"
assert type_ in ['raw', 'undirected'], "UNKNOWN ADJ TYPE. ONLY ACCEPT [raw] or [undirected]"
file_adj = f"{prefix}/{dataset}/adj_{split_}_{type_}." + "{}"
if os.path.isfile(file_adj.format('npz')):
adj = sp.load_npz(file_adj.format('npz'))
elif os.path.isfile(file_adj.format('npy')):
adj_d = np.load(file_adj.format('npy'), allow_pickle=True)
if type(adj_d) == np.ndarray:
adj_d = adj_d[()]
else:
assert type(adj_d) == dict
indptr = adj_d['indptr']
indices = adj_d['indices']
if 'data' in adj_d:
data = adj_d['data']
else:
data = np.broadcast_to(np.ones(1, dtype=np.bool), indices.size)
num_nodes = indptr.size - 1
adj = sp.csr_matrix((data, indices, indptr), shape=(num_nodes, num_nodes))
else:
adj = None
return adj
def validate_bin_file(bin_adj_files):
for md, df in bin_adj_files.items():
assert set(df.keys()) == set(['indptr', 'indices', 'data'])
if not os.path.isfile(df['indptr']) or not os.path.isfile(df['indices']):
return {mmd: None for mmd in bin_adj_files}
if not os.path.isfile(df['data']):
df['data'] = ''
return bin_adj_files
def merge_stat_record(dict_l : List[dict]):
key_l = [set(d.keys()) for d in dict_l]
assert all(k == key_l[0] == set([TRAIN, VALID, TEST]) for k in key_l)
names_stat = set(dict_l[0][TRAIN].keys())
ret = {n: {TRAIN: [], VALID: [], TEST: []} for n in names_stat}
for d in dict_l:
for m in [TRAIN, VALID, TEST]:
assert set(d[m].keys()) == names_stat
for k, v in d[m].items():
ret[k][m].append(v)
return ret |
#!/usr/bin/env python
# coding: utf-8
"""
tests
~~~~~
Provides the tests for opts.
:copyright: 2010 by Daniel Neuhäuser
:license: BSD, see LICENSE for details
"""
import unittest
import sys
from decimal import Decimal
from StringIO import StringIO
from opts import (Node, Option, BooleanOption, IntOption, FloatOption,
DecimalOption, MultipleOptions, Positional, IntPositional,
FloatPositional, DecimalPositional, Command, Parser)
def xrange(*args):
if len(args) == 1:
start, stop, step = 0, args[0], 1
elif len(args) == 2:
start, stop, step = args[0], args[1], 1
else:
start, stop, step = args
i = start
while i <= stop:
yield i
i += step
class TestCase(unittest.TestCase):
def assertContains(self, container, item):
if item not in container:
raise AssertionError('{0!r} not in {1!r}'.format(item, container))
def assertContainsAll(self, container, items):
for item in items:
self.assertContains(container, item)
class TestNode(TestCase):
def test_short_description_fallback(self):
n = Node()
self.assertEqual(n.short_description, u"No short description.")
def test_long_description_fallback(self):
n = Node()
self.assertEqual(n.long_description, u"No long description.")
def test_long_description_fallback_to_short(self):
n = Node(short_description=u"Foobar")
self.assertEqual(n.long_description, u"Foobar")
class TestOption(TestCase):
def test_valueerror_on_init(self):
self.assertRaises(ValueError, Option)
class TestBooleanOption(TestCase):
def test_evaluate(self):
o = BooleanOption(short="b")
p = Parser(options=dict(b=o))
self.assertEqual(p.evaluate([u'-b']), ({'b': True}, []))
o = BooleanOption(short="b", default=True)
p = Parser(options=dict(b=o))
self.assertEqual(p.evaluate(['-b']), ({'b': False}, []))
class TestNumberOptions(TestCase):
def test_intoption_evaluate(self):
self.make_test(xrange(-10, 10), IntOption(short='o'))
def test_floatoption_evaluate(self):
self.make_test(xrange(-10.0, 10.0, 0.5), FloatOption(short='o'))
def test_decimaloption_evaluate(self):
self.make_test(
xrange(Decimal('-10.0'), Decimal('10.0'), Decimal('0.5')),
DecimalOption(short='o')
)
def make_test(self, range, o):
p = Parser(options=dict(o=o))
for i in range:
self.assertEqual(p.evaluate([u'-o', unicode(i)]), ({'o': i}, []))
class TestMultipleOptions(TestCase):
def test_evaluate_no_quotes(self):
o = MultipleOptions(short='o')
p = Parser(options=dict(o=o))
self.assertEqual(
p.evaluate([u'-o', u'foo,bar,baz']),
({'o': [u'foo', u'bar', u'baz']}, [])
)
def test_evaluate_with_quotes(self):
o = MultipleOptions(short='o')
p = Parser(options=dict(o=o))
self.assertEqual(
p.evaluate([u'-o', u'foo,"bar,baz"']),
({'o': [u'foo', u'bar,baz']}, [])
)
self.assertEqual(
p.evaluate([u'-o', u'"foo,bar",baz']),
({'o': [u'foo,bar', u'baz']}, [])
)
class TestPositional(TestCase):
def test_evaluate(self):
p = Parser(positionals=[Positional('foo')])
self.assertEquals(p.evaluate([u'spam']), ({}, [u'spam']))
class TestNumberPositionals(TestCase):
def test_intpositional_evaluate(self):
self.make_test(xrange(10), IntPositional('foo'))
def test_floatpositional_evaluate(self):
self.make_test(xrange(10, 0.5), FloatPositional('foo'))
def test_decimalpositional_evaluate(self):
self.make_test(
xrange(Decimal('0'), Decimal('10.0'), Decimal('0.5')),
DecimalPositional('foo')
)
def make_test(self, range, p):
parser = Parser(positionals=[p])
for i in range:
self.assertEqual(parser.evaluate([unicode(i)]), ({}, [i]))
class TestCommand(TestCase):
def test_remaining_arguments(self):
c = Command(options={'a': Option('a')})
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'foo']),
({'c': ({}, [u'foo'])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo']),
({'c': ({'a': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo', u'bar']),
({u'c': ({'a': u'foo'}, [u'bar'])}, [])
)
def test_options(self):
class TestDeclarative(Command):
spam = Option('a', 'asomething')
eggs = Option('b', 'bsomething')
a = TestDeclarative()
b = Command(options={
'spam': Option('a', 'asomething'),
'eggs': Option('b', 'bsomething')})
for c in [a, b]:
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo']),
({'c': ({'spam': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--asomething', u'foo']),
({'c': ({'spam': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-b', u'foo']),
({'c': ({u'eggs': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--bsomething', u'foo']),
({'c': ({u'eggs': u'foo'}, [])}, [])
)
def test_commands(self):
class TestDeclarative(Command):
spam = Command()
eggs = Command()
a = TestDeclarative()
b = Command(commands={
'spam': Command(),
'eggs': Command()})
cp = [u'script_name']
for c in [a, b]:
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'spam']),
({'c': ({u'spam': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'eggs']),
({'c': ({'eggs': ({}, [])}, [])}, [])
)
def test_abbreviations(self):
c = Command(
options={
'stack': Option(long='stack'),
'stash': Option(long='stash')},
commands={
'stack': Command(),
'stash': Command()})
p = Parser(commands=dict(c=c))
cp = [u'script_name']
for s in [u's', u'st', u'sta']:
cmd = [u'c', s]
result = ({'c': ({}, [s])}, [])
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(
p.evaluate([u'c', u'stac']),
({'c': ({u'stack': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'stas']),
({'c': ({u'stash': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--stac', u'foo']),
({'c': ({u'stack': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--stas', u'foo']),
({'c': ({u'stash': u'foo'}, [])}, [])
)
def test_disallow_abbreviated_commands(self):
class NewCommand(Command):
allow_abbreviated_commands = False
c = NewCommand(commands={
'foo': Command()
})
p = Parser(commands=dict(c=c))
self.assertEqual(p.evaluate([u'c', u'f']), ({'c': ({}, [u'f'])}, []))
def test_apply_defaults(self):
class FooParser(Parser):
activate = BooleanOption('a')
foo = Command(
options={
'spam': Option('a'),
'eggs': Option('b')
}
)
p = FooParser()
p.apply_defaults({
'activate': 'huhu',
'foo': {
'spam': 'bla',
'eggs': 'blubb'
}
})
self.assertEquals(p.options['activate'].default, 'huhu')
self.assertEquals(p.commands['foo'].options['spam'].default, 'bla')
self.assertEquals(p.commands['foo'].options['eggs'].default, 'blubb')
def test_getattr(self):
p = Parser(
options={
'activate': Option('a')
},
commands={
'foo': Command(options={
'spam': Option('b'),
'eggs': Option('c')
})
}
)
p.activate
p.foo
p.foo.spam
p.foo.eggs
def test_dynamically_adding_nodes(self):
p = Parser()
p.commands['foo'] = Command()
p.commands['foo'].options['a'] = BooleanOption('a')
p.options['bar'] = Option('b')
self.assertEquals(p.evaluate([u'-b', u'spam']), ({'bar': u'spam'}, []))
self.assertEquals(
p.evaluate([u'foo']),
({'foo': ({'a': False}, [])}, [])
)
self.assertEquals(
p.evaluate([u'foo', u'-a']),
({'foo': ({'a': True}, [])}, [])
)
class TestParser(TestCase):
def test_default_evaluate_arguments(self):
old_argv = sys.argv
enc = sys.stdin.encoding or sys.getdefaultencoding()
sys.argv = [s.encode(enc) for s in [u'script_name', u'foo', u'bar']]
p = Parser()
self.assertEqual(p.evaluate(), ({}, [u'foo', u'bar']))
sys.argv = old_argv
class OutputTest(TestCase):
def setUp(self):
self.out_file = StringIO()
self._old_argv = sys.argv
sys.argv = ['script']
def tearDown(self):
self.out_file = StringIO()
sys.argv = self._old_argv
class TestParserOutput(OutputTest):
def test_alternative_commands(self):
p = Parser(
commands={
'stack': Command(),
'stash': Command(),
},
out_file=self.out_file,
takes_arguments=False
)
for cmd in [u's', u'st', u'sta']:
self.assertRaises(SystemExit, p.evaluate, [cmd])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [commands]')
self.assertContains(
output,
u'command "{0}" does not exist, did you mean?'.format(cmd)
)
self.assertContains(output, u'stack')
self.assertContains(output, u'stash')
def test_alternative_options(self):
p = Parser(
options={
'stack': Option(long='stack'),
'stash': Option(long='stash')
},
out_file=self.out_file
)
for option in [u'--s', u'--st', u'--sta']:
self.assertRaises(SystemExit, p.evaluate, [option])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options]')
self.assertContains(
output,
u'option "{0}" does not exist, did you mean?'.format(option)
)
self.assertContains(output, u'--stack')
self.assertContains(output, u'--stash')
def test_nonexisting_command(self):
p = Parser(
out_file=self.out_file,
takes_arguments=False
)
self.assertRaises(SystemExit, p.evaluate, [u'foo'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'command "foo" does not exist')
def test_nonexisting_long_option(self):
p = Parser(out_file=self.out_file)
self.assertRaises(SystemExit, p.evaluate, [u'--foo'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'option "--foo" does not exist')
def test_nonexisting_short_option(self):
p = Parser(out_file=self.out_file)
self.assertRaises(SystemExit, p.evaluate, [u'-f'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'option "-f" does not exist')
class TestHelp(OutputTest):
def test_commands(self):
p = Parser(
commands={
'foo': Command(short_description=u'foo description'),
'bar': Command(short_description=u'bar description')
},
description=u'The script description',
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [commands]',
p.long_description,
u'Commands:',
u' foo',
p.commands['foo'].short_description,
u' bar',
p.commands['bar'].short_description
])
def test_options(self):
p = Parser(
options={
'foo': Option('f'),
'bar': Option(long='bar'),
'baz': Option('b', 'baz')
},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [options]',
u'Options:',
u' -f',
u' --bar',
u' -b --baz'
])
def test_positional_arguments(self):
p = Parser(
positionals=[
Positional(u'foo'),
Positional(u'bar', short_description=u'something')
],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script foo bar',
u'Positional arguments:',
u' foo',
u'No short description.',
u' bar',
u'something'
])
def test_commands_and_options(self):
p = Parser(
commands={
'spam': Command(),
'eggs': Command()
},
options={
'foo': Option('f'),
'bar': Option('b')
},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [options] [commands]',
u'Commands:',
u' spam',
u' eggs',
u'Options:',
u' -f',
u' -b'
])
class TestUsage(OutputTest):
def test_only_commands(self):
p = Parser(
commands={'foo': Command()},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [commands]')
def test_only_options(self):
p = Parser(
options={'foo': Option('f')},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options]')
def test_commands_and_options(self):
p = Parser(
options={'foo': Option('f')},
commands={'bar': Command()},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options] [commands]')
def test_positionals(self):
p = Parser(
positionals=[
Positional('a'),
Positional('b')
],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script a b')
def test_all(self):
p = Parser(
options={'foo': Option('f')},
commands={'bar': Command()},
positionals=[Positional('baz')],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options] [commands] baz')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestNode))
suite.addTest(unittest.makeSuite(TestOption))
suite.addTest(unittest.makeSuite(TestBooleanOption))
suite.addTest(unittest.makeSuite(TestNumberOptions))
suite.addTest(unittest.makeSuite(TestMultipleOptions))
suite.addTest(unittest.makeSuite(TestPositional))
suite.addTest(unittest.makeSuite(TestNumberPositionals))
suite.addTest(unittest.makeSuite(TestCommand))
suite.addTest(unittest.makeSuite(TestParser))
suite.addTest(unittest.makeSuite(TestParserOutput))
suite.addTest(unittest.makeSuite(TestHelp))
suite.addTest(unittest.makeSuite(TestUsage))
return suite
if __name__ == "__main__":
unittest.main(defaultTest='suite')
| #!/usr/bin/env python
# coding: utf-8
"""
tests
~~~~~
Provides the tests for opts.
:copyright: 2010 by Daniel Neuhäuser
:license: BSD, see LICENSE for details
"""
import unittest
import sys
from decimal import Decimal
from StringIO import StringIO
from opts import (Node, Option, BooleanOption, IntOption, FloatOption,
DecimalOption, MultipleOptions, Positional, IntPositional,
FloatPositional, DecimalPositional, Command, Parser)
def xrange(*args):
if len(args) == 1:
start, stop, step = 0, args[0], 1
elif len(args) == 2:
start, stop, step = args[0], args[1], 1
else:
start, stop, step = args
i = start
while i <= stop:
yield i
i += step
class TestCase(unittest.TestCase):
def assertContains(self, container, item):
if item not in container:
raise AssertionError('{0!r} not in {1!r}'.format(item, container))
def assertContainsAll(self, container, items):
for item in items:
self.assertContains(container, item)
class TestNode(TestCase):
def test_short_description_fallback(self):
n = Node()
self.assertEqual(n.short_description, u"No short description.")
def test_long_description_fallback(self):
n = Node()
self.assertEqual(n.long_description, u"No long description.")
def test_long_description_fallback_to_short(self):
n = Node(short_description=u"Foobar")
self.assertEqual(n.long_description, u"Foobar")
class TestOption(TestCase):
def test_valueerror_on_init(self):
self.assertRaises(ValueError, Option)
class TestBooleanOption(TestCase):
def test_evaluate(self):
o = BooleanOption(short="b")
p = Parser(options=dict(b=o))
self.assertEqual(p.evaluate([u'-b']), ({'b': True}, []))
o = BooleanOption(short="b", default=True)
p = Parser(options=dict(b=o))
self.assertEqual(p.evaluate(['-b']), ({'b': False}, []))
class TestNumberOptions(TestCase):
def test_intoption_evaluate(self):
self.make_test(xrange(-10, 10), IntOption(short='o'))
def test_floatoption_evaluate(self):
self.make_test(xrange(-10.0, 10.0, 0.5), FloatOption(short='o'))
def test_decimaloption_evaluate(self):
self.make_test(
xrange(Decimal('-10.0'), Decimal('10.0'), Decimal('0.5')),
DecimalOption(short='o')
)
def make_test(self, range, o):
p = Parser(options=dict(o=o))
for i in range:
self.assertEqual(p.evaluate([u'-o', unicode(i)]), ({'o': i}, []))
class TestMultipleOptions(TestCase):
def test_evaluate_no_quotes(self):
o = MultipleOptions(short='o')
p = Parser(options=dict(o=o))
self.assertEqual(
p.evaluate([u'-o', u'foo,bar,baz']),
({'o': [u'foo', u'bar', u'baz']}, [])
)
def test_evaluate_with_quotes(self):
o = MultipleOptions(short='o')
p = Parser(options=dict(o=o))
self.assertEqual(
p.evaluate([u'-o', u'foo,"bar,baz"']),
({'o': [u'foo', u'bar,baz']}, [])
)
self.assertEqual(
p.evaluate([u'-o', u'"foo,bar",baz']),
({'o': [u'foo,bar', u'baz']}, [])
)
class TestPositional(TestCase):
def test_evaluate(self):
p = Parser(positionals=[Positional('foo')])
self.assertEquals(p.evaluate([u'spam']), ({}, [u'spam']))
class TestNumberPositionals(TestCase):
def test_intpositional_evaluate(self):
self.make_test(xrange(10), IntPositional('foo'))
def test_floatpositional_evaluate(self):
self.make_test(xrange(10, 0.5), FloatPositional('foo'))
def test_decimalpositional_evaluate(self):
self.make_test(
xrange(Decimal('0'), Decimal('10.0'), Decimal('0.5')),
DecimalPositional('foo')
)
def make_test(self, range, p):
parser = Parser(positionals=[p])
for i in range:
self.assertEqual(parser.evaluate([unicode(i)]), ({}, [i]))
class TestCommand(TestCase):
def test_remaining_arguments(self):
c = Command(options={'a': Option('a')})
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'foo']),
({'c': ({}, [u'foo'])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo']),
({'c': ({'a': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo', u'bar']),
({u'c': ({'a': u'foo'}, [u'bar'])}, [])
)
def test_options(self):
class TestDeclarative(Command):
spam = Option('a', 'asomething')
eggs = Option('b', 'bsomething')
a = TestDeclarative()
b = Command(options={
'spam': Option('a', 'asomething'),
'eggs': Option('b', 'bsomething')})
for c in [a, b]:
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo']),
({'c': ({'spam': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--asomething', u'foo']),
({'c': ({'spam': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-b', u'foo']),
({'c': ({u'eggs': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--bsomething', u'foo']),
({'c': ({u'eggs': u'foo'}, [])}, [])
)
def test_commands(self):
class TestDeclarative(Command):
spam = Command()
eggs = Command()
a = TestDeclarative()
b = Command(commands={
'spam': Command(),
'eggs': Command()})
cp = [u'script_name']
for c in [a, b]:
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'spam']),
({'c': ({u'spam': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'eggs']),
({'c': ({'eggs': ({}, [])}, [])}, [])
)
def test_abbreviations(self):
c = Command(
options={
'stack': Option(long='stack'),
'stash': Option(long='stash')},
commands={
'stack': Command(),
'stash': Command()})
p = Parser(commands=dict(c=c))
cp = [u'script_name']
for s in [u's', u'st', u'sta']:
cmd = [u'c', s]
result = ({'c': ({}, [s])}, [])
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(
p.evaluate([u'c', u'stac']),
({'c': ({u'stack': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'stas']),
({'c': ({u'stash': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--stac', u'foo']),
({'c': ({u'stack': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--stas', u'foo']),
({'c': ({u'stash': u'foo'}, [])}, [])
)
def test_disallow_abbreviated_commands(self):
class NewCommand(Command):
allow_abbreviated_commands = False
c = NewCommand(commands={
'foo': Command()
})
p = Parser(commands=dict(c=c))
self.assertEqual(p.evaluate([u'c', u'f']), ({'c': ({}, [u'f'])}, []))
def test_apply_defaults(self):
class FooParser(Parser):
activate = BooleanOption('a')
foo = Command(
options={
'spam': Option('a'),
'eggs': Option('b')
}
)
p = FooParser()
p.apply_defaults({
'activate': 'huhu',
'foo': {
'spam': 'bla',
'eggs': 'blubb'
}
})
self.assertEquals(p.options['activate'].default, 'huhu')
self.assertEquals(p.commands['foo'].options['spam'].default, 'bla')
self.assertEquals(p.commands['foo'].options['eggs'].default, 'blubb')
def test_getattr(self):
p = Parser(
options={
'activate': Option('a')
},
commands={
'foo': Command(options={
'spam': Option('b'),
'eggs': Option('c')
})
}
)
p.activate
p.foo
p.foo.spam
p.foo.eggs
def test_dynamically_adding_nodes(self):
p = Parser()
p.commands['foo'] = Command()
p.commands['foo'].options['a'] = BooleanOption('a')
p.options['bar'] = Option('b')
self.assertEquals(p.evaluate([u'-b', u'spam']), ({'bar': u'spam'}, []))
self.assertEquals(
p.evaluate([u'foo']),
({'foo': ({'a': False}, [])}, [])
)
self.assertEquals(
p.evaluate([u'foo', u'-a']),
({'foo': ({'a': True}, [])}, [])
)
class TestParser(TestCase):
def test_default_evaluate_arguments(self):
old_argv = sys.argv
enc = sys.stdin.encoding or sys.getdefaultencoding()
sys.argv = [s.encode(enc) for s in [u'script_name', u'foo', u'bar']]
p = Parser()
self.assertEqual(p.evaluate(), ({}, [u'foo', u'bar']))
sys.argv = old_argv
class OutputTest(TestCase):
def setUp(self):
self.out_file = StringIO()
self._old_argv = sys.argv
sys.argv = ['script']
def tearDown(self):
self.out_file = StringIO()
sys.argv = self._old_argv
class TestParserOutput(OutputTest):
def test_alternative_commands(self):
p = Parser(
commands={
'stack': Command(),
'stash': Command(),
},
out_file=self.out_file,
takes_arguments=False
)
for cmd in [u's', u'st', u'sta']:
self.assertRaises(SystemExit, p.evaluate, [cmd])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [commands]')
self.assertContains(
output,
u'command "{0}" does not exist, did you mean?'.format(cmd)
)
self.assertContains(output, u'stack')
self.assertContains(output, u'stash')
def test_alternative_options(self):
p = Parser(
options={
'stack': Option(long='stack'),
'stash': Option(long='stash')
},
out_file=self.out_file
)
for option in [u'--s', u'--st', u'--sta']:
self.assertRaises(SystemExit, p.evaluate, [option])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options]')
self.assertContains(
output,
u'option "{0}" does not exist, did you mean?'.format(option)
)
self.assertContains(output, u'--stack')
self.assertContains(output, u'--stash')
def test_nonexisting_command(self):
p = Parser(
out_file=self.out_file,
takes_arguments=False
)
self.assertRaises(SystemExit, p.evaluate, [u'foo'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'command "foo" does not exist')
def test_nonexisting_long_option(self):
p = Parser(out_file=self.out_file)
self.assertRaises(SystemExit, p.evaluate, [u'--foo'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'option "--foo" does not exist')
def test_nonexisting_short_option(self):
p = Parser(out_file=self.out_file)
self.assertRaises(SystemExit, p.evaluate, [u'-f'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'option "-f" does not exist')
class TestHelp(OutputTest):
def test_commands(self):
p = Parser(
commands={
'foo': Command(short_description=u'foo description'),
'bar': Command(short_description=u'bar description')
},
description=u'The script description',
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [commands]',
p.long_description,
u'Commands:',
u' foo',
p.commands['foo'].short_description,
u' bar',
p.commands['bar'].short_description
])
def test_options(self):
p = Parser(
options={
'foo': Option('f'),
'bar': Option(long='bar'),
'baz': Option('b', 'baz')
},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [options]',
u'Options:',
u' -f',
u' --bar',
u' -b --baz'
])
def test_positional_arguments(self):
p = Parser(
positionals=[
Positional(u'foo'),
Positional(u'bar', short_description=u'something')
],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script foo bar',
u'Positional arguments:',
u' foo',
u'No short description.',
u' bar',
u'something'
])
def test_commands_and_options(self):
p = Parser(
commands={
'spam': Command(),
'eggs': Command()
},
options={
'foo': Option('f'),
'bar': Option('b')
},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [options] [commands]',
u'Commands:',
u' spam',
u' eggs',
u'Options:',
u' -f',
u' -b'
])
class TestUsage(OutputTest):
def test_only_commands(self):
p = Parser(
commands={'foo': Command()},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [commands]')
def test_only_options(self):
p = Parser(
options={'foo': Option('f')},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options]')
def test_commands_and_options(self):
p = Parser(
options={'foo': Option('f')},
commands={'bar': Command()},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options] [commands]')
def test_positionals(self):
p = Parser(
positionals=[
Positional('a'),
Positional('b')
],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script a b')
def test_all(self):
p = Parser(
options={'foo': Option('f')},
commands={'bar': Command()},
positionals=[Positional('baz')],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options] [commands] baz')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestNode))
suite.addTest(unittest.makeSuite(TestOption))
suite.addTest(unittest.makeSuite(TestBooleanOption))
suite.addTest(unittest.makeSuite(TestNumberOptions))
suite.addTest(unittest.makeSuite(TestMultipleOptions))
suite.addTest(unittest.makeSuite(TestPositional))
suite.addTest(unittest.makeSuite(TestNumberPositionals))
suite.addTest(unittest.makeSuite(TestCommand))
suite.addTest(unittest.makeSuite(TestParser))
suite.addTest(unittest.makeSuite(TestParserOutput))
suite.addTest(unittest.makeSuite(TestHelp))
suite.addTest(unittest.makeSuite(TestUsage))
return suite
if __name__ == "__main__":
unittest.main(defaultTest='suite')
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import ast
import glob
import re
def list_functions(source_glob):
"""
List all of the functions and classes defined
"""
defined = []
# Iterate through each source file
for sp in glob.glob(source_glob):
module_name = sp[:-3]
module_name = module_name.replace("/", ".")
# Parse the source file into an AST
node = ast.parse(open(sp).read())
# Extract the names of all functions and classes defined in this file
defined.extend(
(n.name, module_name + "." + n.name)
for n in node.body
if (isinstance(n, ast.FunctionDef) or isinstance(n, ast.ClassDef))
)
return defined
def replace_backticks(source_path, docs_path):
markdown_glob = docs_path + "/*.md"
source_glob = source_path + "/**/*.py"
methods = list_functions(source_glob)
for f in glob.glob(markdown_glob):
for n, m in methods:
# Match backquoted mentions of the function/class name which are
# not already links
pattern = "(?<![[`])(`" + n + "`)"
link = f"[`{n}`](/api/{m.split(".")[1]}.html#{m})"
lines = open(f).readlines()
for i, l in enumerate(lines):
match = re.search(pattern, l)
if match:
print(f"{f}:{i+1} s/{match.group(0)}/{link}")
lines[i] = re.sub(pattern, link, l)
open(f, "w").writelines(lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""In markdown docs, replace backtick-quoted names of
objects exported from Ax with links to the API docs."""
)
parser.add_argument(
"--source_path",
metavar="source_path",
required=True,
help="Path to source files (e.g. 'ax/').",
)
parser.add_argument(
"--docs_path", type=str, required=True, help="Path to docs (e.g. 'docs/'."
)
args = parser.parse_args()
replace_backticks(args.source_path, args.docs_path)
| #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import ast
import glob
import re
def list_functions(source_glob):
"""
List all of the functions and classes defined
"""
defined = []
# Iterate through each source file
for sp in glob.glob(source_glob):
module_name = sp[:-3]
module_name = module_name.replace("/", ".")
# Parse the source file into an AST
node = ast.parse(open(sp).read())
# Extract the names of all functions and classes defined in this file
defined.extend(
(n.name, module_name + "." + n.name)
for n in node.body
if (isinstance(n, ast.FunctionDef) or isinstance(n, ast.ClassDef))
)
return defined
def replace_backticks(source_path, docs_path):
markdown_glob = docs_path + "/*.md"
source_glob = source_path + "/**/*.py"
methods = list_functions(source_glob)
for f in glob.glob(markdown_glob):
for n, m in methods:
# Match backquoted mentions of the function/class name which are
# not already links
pattern = "(?<![[`])(`" + n + "`)"
link = f"[`{n}`](/api/{m.split('.')[1]}.html#{m})"
lines = open(f).readlines()
for i, l in enumerate(lines):
match = re.search(pattern, l)
if match:
print(f"{f}:{i+1} s/{match.group(0)}/{link}")
lines[i] = re.sub(pattern, link, l)
open(f, "w").writelines(lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""In markdown docs, replace backtick-quoted names of
objects exported from Ax with links to the API docs."""
)
parser.add_argument(
"--source_path",
metavar="source_path",
required=True,
help="Path to source files (e.g. 'ax/').",
)
parser.add_argument(
"--docs_path", type=str, required=True, help="Path to docs (e.g. 'docs/'."
)
args = parser.parse_args()
replace_backticks(args.source_path, args.docs_path)
|
import argparse
from enum import Enum
from random import random
import re
import input
import debug
import math
import util
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from solve import Premise
def display_start() -> None:
p = argparse.ArgumentParser()
p.add_argument("--file", help="Parse from a file")
p.add_argument("--debug", help="Enable debuggin", action='store_true')
args = p.parse_args()
if args.file:
print(f"Importing statements from {args.file}...\n")
input.read_from_file(args.file)
else:
input.read_from_input()
if args.debug:
debug.DEBUG = True
class StepType(Enum):
P=0 # Premise
A=1 # Assumption
OA=2 # Open Assumption
CA=3 # Close Assumption
EI=4 # Elimination implication
II=5 # Introdcution implication
CT=6 # Contradiction aka Introduction negation
IN=7 # Introduction negation
EN=8 # Elimination negation
IA=9 # Introduction and
EA=10 # Elimination and
RI=11 # Reiteration
class Step:
# TODO: implement rule numbers for the steps, hashmap maybe??
def __init__(self, premise: 'Premise', type: StepType, assumptions: list['Premise']=None):
self._premise = premise
premise = str(premise).replace("[", "(").replace("]", ")").replace("'", "")
self.premise = premise
self._type = type
self._assumptions = assumptions
def get(self):
if self._type == StepType.P:
return "[ass.]"
elif self._type == StepType.A:
return "[ass.]"
elif self._type == StepType.EI:
return "[E→, "
elif self._type == StepType.II:
return "[I→, "
elif self._type == StepType.CT:
return "[I¬, "
elif self._type == StepType.IN:
return "[I¬, "
elif self._type == StepType.EN:
return "[E¬, "
elif self._type == StepType.IA:
return "[I^, "
elif self._type == StepType.EA:
return "[E^, "
elif self._type == StepType.RI:
return f"[reit.,#{self._premise.id}]"
return self._type
def __repr__(self):
return f"{self.premise} {self.get()}"
class NaturalDeductionTree:
def __init__(self, statement: str):
self.steps: list[Step] = []
self.statement = statement.replace(":-", "⊢")
self.statement = statement.replace("!", "¬")
def add(self, step: Step):
self.steps.append(step)
def get_premise(self, id: int, r: list[(str, 'Premise')]) -> 'Premise':
for i, (_, a) in enumerate(r):
print(a.id, id)
if a.id == id:
return i
def close(self):
result: list[(str, 'Premise')] = []
r = str(random())
level = 1
line = 1
max_lines = len(str(len(self.steps)))
max_prepend = ' ' * max_lines
for i, step in enumerate(self.steps):
# Change current level if open assumption or close assumption
if step._type == StepType.OA:
level += 1
continue
elif step._type == StepType.CA:
level -= 1
continue
lines = f"{line}{" " * (max_lines - len(str(line)))}"
if isinstance(step.premise, str):
premise = util.raw_to_str(step.premise)
else:
premise = util.cleanup(str(step.premise))
premise = premise.replace("!", "¬")
if step._type == StepType.CT:
premise = "⊥"
raw_step = step.get()
while match := re.search(r"#(\d+)", raw_step):
id = match.group(1)
i = self.get_premise(int(id), result)
raw_step = raw_step[:match.start()]+str(i+1)+raw_step[match.end():]
break
string = f"{lines}{" │ " * level}{premise}{r}_{raw_step}\n"
# Open assumption so draw a line
if step._type == StepType.A and i-1 >= 0 and self.steps[i-1]._type == StepType.OA:
string += f"{max_prepend}{" │ " * (level-1)} ├{"─"*len(premise)}\n"
# If its the last premise draw a line
elif step._type == StepType.P and i+1 != len(self.steps) and self.steps[i+1]._type != StepType.P:
string += f"{max_prepend}{" │ " * (level-1)} ├{"─"*(len(premise)+1)}\n"
# If its the last premise draw a line, but added the case where the premise is the last premise
elif step._type == StepType.P and i+1 == len(self.steps):
string += f"{max_prepend}{" │ " * (level-1)} ├{"─"*(len(premise)+1)}\n"
result.append((string, step._premise))
line += 1
max_len = max([len(x[0].split("\n")[0]) for x in result])
p = self.statement+"\n\n"
for string, premise in result:
s = string.split("\n")[0]
l = len(s)
l2 = len(s.split("_")[1])
# Align all action type thingies to the right on the same place
replaceable = " " * (4 + max_len - l + l2)
string = string.replace(r+"_", replaceable)
p += string
# TODO: better way of printing?
print(p) | import argparse
from enum import Enum
from random import random
import re
import input
import debug
import math
import util
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from solve import Premise
def display_start() -> None:
p = argparse.ArgumentParser()
p.add_argument("--file", help="Parse from a file")
p.add_argument("--debug", help="Enable debuggin", action='store_true')
args = p.parse_args()
if args.file:
print(f"Importing statements from {args.file}...\n")
input.read_from_file(args.file)
else:
input.read_from_input()
if args.debug:
debug.DEBUG = True
class StepType(Enum):
P=0 # Premise
A=1 # Assumption
OA=2 # Open Assumption
CA=3 # Close Assumption
EI=4 # Elimination implication
II=5 # Introdcution implication
CT=6 # Contradiction aka Introduction negation
IN=7 # Introduction negation
EN=8 # Elimination negation
IA=9 # Introduction and
EA=10 # Elimination and
RI=11 # Reiteration
class Step:
# TODO: implement rule numbers for the steps, hashmap maybe??
def __init__(self, premise: 'Premise', type: StepType, assumptions: list['Premise']=None):
self._premise = premise
premise = str(premise).replace("[", "(").replace("]", ")").replace("'", "")
self.premise = premise
self._type = type
self._assumptions = assumptions
def get(self):
if self._type == StepType.P:
return "[ass.]"
elif self._type == StepType.A:
return "[ass.]"
elif self._type == StepType.EI:
return "[E→, "
elif self._type == StepType.II:
return "[I→, "
elif self._type == StepType.CT:
return "[I¬, "
elif self._type == StepType.IN:
return "[I¬, "
elif self._type == StepType.EN:
return "[E¬, "
elif self._type == StepType.IA:
return "[I^, "
elif self._type == StepType.EA:
return "[E^, "
elif self._type == StepType.RI:
return f"[reit.,#{self._premise.id}]"
return self._type
def __repr__(self):
return f"{self.premise} {self.get()}"
class NaturalDeductionTree:
def __init__(self, statement: str):
self.steps: list[Step] = []
self.statement = statement.replace(":-", "⊢")
self.statement = statement.replace("!", "¬")
def add(self, step: Step):
self.steps.append(step)
def get_premise(self, id: int, r: list[(str, 'Premise')]) -> 'Premise':
for i, (_, a) in enumerate(r):
print(a.id, id)
if a.id == id:
return i
def close(self):
result: list[(str, 'Premise')] = []
r = str(random())
level = 1
line = 1
max_lines = len(str(len(self.steps)))
max_prepend = ' ' * max_lines
for i, step in enumerate(self.steps):
# Change current level if open assumption or close assumption
if step._type == StepType.OA:
level += 1
continue
elif step._type == StepType.CA:
level -= 1
continue
lines = f"{line}{' ' * (max_lines - len(str(line)))}"
if isinstance(step.premise, str):
premise = util.raw_to_str(step.premise)
else:
premise = util.cleanup(str(step.premise))
premise = premise.replace("!", "¬")
if step._type == StepType.CT:
premise = "⊥"
raw_step = step.get()
while match := re.search(r"#(\d+)", raw_step):
id = match.group(1)
i = self.get_premise(int(id), result)
raw_step = raw_step[:match.start()]+str(i+1)+raw_step[match.end():]
break
string = f"{lines}{' │ ' * level}{premise}{r}_{raw_step}\n"
# Open assumption so draw a line
if step._type == StepType.A and i-1 >= 0 and self.steps[i-1]._type == StepType.OA:
string += f"{max_prepend}{' │ ' * (level-1)} ├{'─'*len(premise)}\n"
# If its the last premise draw a line
elif step._type == StepType.P and i+1 != len(self.steps) and self.steps[i+1]._type != StepType.P:
string += f"{max_prepend}{' │ ' * (level-1)} ├{'─'*(len(premise)+1)}\n"
# If its the last premise draw a line, but added the case where the premise is the last premise
elif step._type == StepType.P and i+1 == len(self.steps):
string += f"{max_prepend}{' │ ' * (level-1)} ├{'─'*(len(premise)+1)}\n"
result.append((string, step._premise))
line += 1
max_len = max([len(x[0].split("\n")[0]) for x in result])
p = self.statement+"\n\n"
for string, premise in result:
s = string.split("\n")[0]
l = len(s)
l2 = len(s.split("_")[1])
# Align all action type thingies to the right on the same place
replaceable = " " * (4 + max_len - l + l2)
string = string.replace(r+"_", replaceable)
p += string
# TODO: better way of printing?
print(p) |
from transformers import LongformerTokenizer, EncoderDecoderModel
from .base_single_doc_model import SingleDocSummModel
class LongformerModel(SingleDocSummModel):
# static variables
model_name = "Longformer"
is_extractive = False
is_neural = True
def __init__(self):
super(LongformerModel, self).__init__(
trained_domain="News", max_input_length=4096, max_output_length=None
)
self.model = EncoderDecoderModel.from_pretrained(
"patrickvonplaten/longformer2roberta-cnn_dailymail-fp16"
)
self.tokenizer = LongformerTokenizer.from_pretrained(
"allenai/longformer-base-4096"
)
def summarize(self, corpus, queries=None):
self.assert_summ_input_type(corpus, queries)
summaries = list(map(lambda doc: self.summarize_single(doc), corpus))
return summaries
def summarize_single(self, document):
# Tokenizes document and returns PyTorch torch.Tensor object with length attribute
tokenized_sequence = self.tokenizer(
document,
return_tensors="pt",
return_length=True,
truncation=True,
max_length=4096,
)
print(
f"Longformer model: processing document of {tokenized_sequence.length} tokens"
)
input_ids = tokenized_sequence.input_ids
# output_ids is tensor with one layer: output_ids[0] extracts tensor layer for decoding
output_ids = self.model.generate(input_ids)
return self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
@classmethod
def show_capability(cls) -> None:
basic_description = cls.generate_basic_description()
more_details = (
"A Longformer2Roberta model finetuned on CNN-DM dataset for summarization.\n\n"
"Strengths:\n - Correctly handles longer (> 2000 tokens) corpus.\n\n"
"Weaknesses:\n - Less accurate on contexts outside training domain.\n\n"
"Initialization arguments:\n "
" - `corpus`: Unlabelled corpus of documents.\n"
)
print(f"{basic_description} \n {"#"*20} \n {more_details}")
| from transformers import LongformerTokenizer, EncoderDecoderModel
from .base_single_doc_model import SingleDocSummModel
class LongformerModel(SingleDocSummModel):
# static variables
model_name = "Longformer"
is_extractive = False
is_neural = True
def __init__(self):
super(LongformerModel, self).__init__(
trained_domain="News", max_input_length=4096, max_output_length=None
)
self.model = EncoderDecoderModel.from_pretrained(
"patrickvonplaten/longformer2roberta-cnn_dailymail-fp16"
)
self.tokenizer = LongformerTokenizer.from_pretrained(
"allenai/longformer-base-4096"
)
def summarize(self, corpus, queries=None):
self.assert_summ_input_type(corpus, queries)
summaries = list(map(lambda doc: self.summarize_single(doc), corpus))
return summaries
def summarize_single(self, document):
# Tokenizes document and returns PyTorch torch.Tensor object with length attribute
tokenized_sequence = self.tokenizer(
document,
return_tensors="pt",
return_length=True,
truncation=True,
max_length=4096,
)
print(
f"Longformer model: processing document of {tokenized_sequence.length} tokens"
)
input_ids = tokenized_sequence.input_ids
# output_ids is tensor with one layer: output_ids[0] extracts tensor layer for decoding
output_ids = self.model.generate(input_ids)
return self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
@classmethod
def show_capability(cls) -> None:
basic_description = cls.generate_basic_description()
more_details = (
"A Longformer2Roberta model finetuned on CNN-DM dataset for summarization.\n\n"
"Strengths:\n - Correctly handles longer (> 2000 tokens) corpus.\n\n"
"Weaknesses:\n - Less accurate on contexts outside training domain.\n\n"
"Initialization arguments:\n "
" - `corpus`: Unlabelled corpus of documents.\n"
)
print(f"{basic_description} \n {'#'*20} \n {more_details}")
|
# -*- coding: utf-8 -*-
import asyncio
import aiofiles
import aiohttp
import orjson
from pathlib import Path
from cmyui import log, Ansi
from constants.gamemodes import GameMode
from constants.mods import Mods
__all__ = 'PPCalculator',
BEATMAPS_PATH = Path.cwd() / '.data/osu'
class PPCalculator:
"""Asynchronously wraps the process of calculating difficulty in osu!."""
def __init__(self, map_id: int, **kwargs) -> None:
# NOTE: this constructor should not be called
# unless you are CERTAIN the map is on disk
# for normal usage, use the classmethods
self.file = f'.data/osu/{map_id}.osu'
self.mods = kwargs.get('mods', Mods.NOMOD)
self.combo = kwargs.get('combo', 0)
self.nmiss = kwargs.get('nmiss', 0)
self.mode = kwargs.get('mode', GameMode.vn_std)
self.acc = kwargs.get('acc', 100.00)
@staticmethod
async def get_from_osuapi(map_id: int, dest_path: Path) -> bool:
url = f'https://old.ppy.sh/osu/{map_id}'
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
if not r or r.status != 200:
log(f'Could not find map by id {map_id}!', Ansi.LRED)
return False
content = await r.read()
async with aiofiles.open(dest_path, 'wb') as f:
await f.write(content)
return True
@classmethod
async def get_file(cls, map_id: int) -> None:
path = BEATMAPS_PATH / f'{map_id}.osu'
# check if file exists on disk already
if not path.exists():
# not found on disk, try osu!api
if not await cls.get_from_osuapi(map_id, path):
# failed to find the map
return
# map is now on disk, return filepath.
return path
@classmethod
async def from_id(cls, map_id: int, **kwargs):
# ensure we have the file on disk for recalc
if not await cls.get_file(map_id):
return
return cls(map_id, **kwargs)
async def perform(self) -> tuple[float, float]:
"""Perform the calculations with the current state, returning (pp, sr)."""
# TODO: PLEASE rewrite this with c bindings,
# add ways to get specific stuff like aim pp
# for now, we'll generate a bash command and
# use subprocess to do the calculations (yikes).
cmd = [f'./oppai-ng/oppai {self.file}']
if self.mods: cmd.append(f'+{self.mods!r}')
if self.combo: cmd.append(f'{self.combo}x')
if self.nmiss: cmd.append(f'{self.nmiss}xM')
if self.acc: cmd.append(f'{self.acc:.4f}%')
if self.mode:
mode_vn = self.mode.as_vanilla
if mode_vn not in (0, 1):
# oppai-ng only supports std & taiko
# TODO: osu!catch & mania support
return
cmd.append(f'-m{mode_vn}')
if mode_vn == GameMode.vn_taiko:
cmd.append('-otaiko')
# XXX: could probably use binary to save a bit
# of time.. but in reality i should just write
# some bindings lmao this is so cursed overall
cmd.append('-ojson')
# join & run the command
pipe = asyncio.subprocess.PIPE
proc = await asyncio.create_subprocess_shell(
' '.join(cmd), stdout=pipe, stderr=pipe
)
stdout, _ = await proc.communicate() # stderr not needed
output = orjson.loads(stdout.decode())
if 'code' not in output or output['code'] != 200:
log(f"oppai-ng: {output["errstr"]}", Ansi.LRED)
await proc.wait() # wait for exit
return output['pp'], output['stars']
| # -*- coding: utf-8 -*-
import asyncio
import aiofiles
import aiohttp
import orjson
from pathlib import Path
from cmyui import log, Ansi
from constants.gamemodes import GameMode
from constants.mods import Mods
__all__ = 'PPCalculator',
BEATMAPS_PATH = Path.cwd() / '.data/osu'
class PPCalculator:
"""Asynchronously wraps the process of calculating difficulty in osu!."""
def __init__(self, map_id: int, **kwargs) -> None:
# NOTE: this constructor should not be called
# unless you are CERTAIN the map is on disk
# for normal usage, use the classmethods
self.file = f'.data/osu/{map_id}.osu'
self.mods = kwargs.get('mods', Mods.NOMOD)
self.combo = kwargs.get('combo', 0)
self.nmiss = kwargs.get('nmiss', 0)
self.mode = kwargs.get('mode', GameMode.vn_std)
self.acc = kwargs.get('acc', 100.00)
@staticmethod
async def get_from_osuapi(map_id: int, dest_path: Path) -> bool:
url = f'https://old.ppy.sh/osu/{map_id}'
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
if not r or r.status != 200:
log(f'Could not find map by id {map_id}!', Ansi.LRED)
return False
content = await r.read()
async with aiofiles.open(dest_path, 'wb') as f:
await f.write(content)
return True
@classmethod
async def get_file(cls, map_id: int) -> None:
path = BEATMAPS_PATH / f'{map_id}.osu'
# check if file exists on disk already
if not path.exists():
# not found on disk, try osu!api
if not await cls.get_from_osuapi(map_id, path):
# failed to find the map
return
# map is now on disk, return filepath.
return path
@classmethod
async def from_id(cls, map_id: int, **kwargs):
# ensure we have the file on disk for recalc
if not await cls.get_file(map_id):
return
return cls(map_id, **kwargs)
async def perform(self) -> tuple[float, float]:
"""Perform the calculations with the current state, returning (pp, sr)."""
# TODO: PLEASE rewrite this with c bindings,
# add ways to get specific stuff like aim pp
# for now, we'll generate a bash command and
# use subprocess to do the calculations (yikes).
cmd = [f'./oppai-ng/oppai {self.file}']
if self.mods: cmd.append(f'+{self.mods!r}')
if self.combo: cmd.append(f'{self.combo}x')
if self.nmiss: cmd.append(f'{self.nmiss}xM')
if self.acc: cmd.append(f'{self.acc:.4f}%')
if self.mode:
mode_vn = self.mode.as_vanilla
if mode_vn not in (0, 1):
# oppai-ng only supports std & taiko
# TODO: osu!catch & mania support
return
cmd.append(f'-m{mode_vn}')
if mode_vn == GameMode.vn_taiko:
cmd.append('-otaiko')
# XXX: could probably use binary to save a bit
# of time.. but in reality i should just write
# some bindings lmao this is so cursed overall
cmd.append('-ojson')
# join & run the command
pipe = asyncio.subprocess.PIPE
proc = await asyncio.create_subprocess_shell(
' '.join(cmd), stdout=pipe, stderr=pipe
)
stdout, _ = await proc.communicate() # stderr not needed
output = orjson.loads(stdout.decode())
if 'code' not in output or output['code'] != 200:
log(f"oppai-ng: {output['errstr']}", Ansi.LRED)
await proc.wait() # wait for exit
return output['pp'], output['stars']
|
# Copyright The IETF Trust 2007-2020, All Rights Reserved
# -*- coding: utf-8 -*-
# old meeting models can be found in ../proceedings/models.py
import datetime
import io
import os
import pytz
import random
import re
import string
from collections import namedtuple
from pathlib import Path
from urllib.parse import urljoin
import debug # pyflakes:ignore
from django.core.validators import MinValueValidator, RegexValidator
from django.db import models
from django.db.models import Max, Subquery, OuterRef, TextField, Value, Q
from django.db.models.functions import Coalesce
from django.conf import settings
from django.urls import reverse as urlreverse
from django.utils.text import slugify
from django.utils.safestring import mark_safe
from ietf.dbtemplate.models import DBTemplate
from ietf.doc.models import Document
from ietf.group.models import Group
from ietf.group.utils import can_manage_materials
from ietf.name.models import (
MeetingTypeName, TimeSlotTypeName, SessionStatusName, ConstraintName, RoomResourceName,
ImportantDateName, TimerangeName, SlideSubmissionStatusName, ProceedingsMaterialTypeName,
SessionPurposeName,
)
from ietf.person.models import Person
from ietf.utils.decorators import memoize
from ietf.utils.storage import NoLocationMigrationFileSystemStorage
from ietf.utils.text import xslugify
from ietf.utils.timezone import date2datetime
from ietf.utils.models import ForeignKey
from ietf.utils.validators import (
MaxImageSizeValidator, WrappedValidator, validate_file_size, validate_mime_type,
validate_file_extension,
)
from ietf.utils.fields import MissingOkImageField
from ietf.utils.log import unreachable
countries = list(pytz.country_names.items())
countries.sort(key=lambda x: x[1])
timezones = []
for name in pytz.common_timezones:
tzfn = os.path.join(settings.TZDATA_ICS_PATH, name + ".ics")
if not os.path.islink(tzfn):
timezones.append((name, name))
timezones.sort()
# this is used in models to format dates, as the built-in json serializer
# can not deal with them, and the django provided serializer is inaccessible.
from django.utils import datetime_safe
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def fmt_date(o):
d = datetime_safe.new_date(o)
return d.strftime(DATE_FORMAT)
class Meeting(models.Model):
# number is either the number for IETF meetings, or some other
# identifier for interim meetings/IESG retreats/liaison summits/...
number = models.CharField(unique=True, max_length=64)
type = ForeignKey(MeetingTypeName)
# Date is useful when generating a set of timeslot for this meeting, but
# is not used to determine date for timeslot instances thereafter, as
# they have their own datetime field.
date = models.DateField()
days = models.IntegerField(default=7, null=False, validators=[MinValueValidator(1)],
help_text="The number of days the meeting lasts")
city = models.CharField(blank=True, max_length=255)
country = models.CharField(blank=True, max_length=2, choices=countries)
# We can't derive time-zone from country, as there are some that have
# more than one timezone, and the pytz module doesn't provide timezone
# lookup information for all relevant city/country combinations.
time_zone = models.CharField(blank=True, max_length=255, choices=timezones)
idsubmit_cutoff_day_offset_00 = models.IntegerField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_DAY_OFFSET_00,
help_text = "The number of days before the meeting start date when the submission of -00 drafts will be closed.")
idsubmit_cutoff_day_offset_01 = models.IntegerField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_DAY_OFFSET_01,
help_text = "The number of days before the meeting start date when the submission of -01 drafts etc. will be closed.")
idsubmit_cutoff_time_utc = models.DurationField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_TIME_UTC,
help_text = "The time of day (UTC) after which submission will be closed. Use for example 23:59:59.")
idsubmit_cutoff_warning_days = models.DurationField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_WARNING_DAYS,
help_text = "How long before the 00 cutoff to start showing cutoff warnings. Use for example '21' or '21 days'.")
submission_start_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_START_DAYS,
help_text = "The number of days before the meeting start date after which meeting materials will be accepted.")
submission_cutoff_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_CUTOFF_DAYS,
help_text = "The number of days after the meeting start date in which new meeting materials will be accepted.")
submission_correction_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_CORRECTION_DAYS,
help_text = "The number of days after the meeting start date in which updates to existing meeting materials will be accepted.")
venue_name = models.CharField(blank=True, max_length=255)
venue_addr = models.TextField(blank=True)
break_area = models.CharField(blank=True, max_length=255)
reg_area = models.CharField(blank=True, max_length=255)
agenda_info_note = models.TextField(blank=True, help_text="Text in this field will be placed at the top of the html agenda page for the meeting. HTML can be used, but will not be validated.")
agenda_warning_note = models.TextField(blank=True, help_text="Text in this field will be placed more prominently at the top of the html agenda page for the meeting. HTML can be used, but will not be validated.")
schedule = ForeignKey('Schedule',null=True,blank=True, related_name='+')
session_request_lock_message = models.CharField(blank=True,max_length=255) # locked if not empty
proceedings_final = models.BooleanField(default=False, help_text="Are the proceedings for this meeting complete?")
acknowledgements = models.TextField(blank=True, help_text="Acknowledgements for use in meeting proceedings. Use ReStructuredText markup.")
overview = ForeignKey(DBTemplate, related_name='overview', null=True, editable=False)
show_important_dates = models.BooleanField(default=False)
attendees = models.IntegerField(blank=True, null=True, default=None,
help_text="Number of Attendees for backfilled meetings, leave it blank for new meetings, and then it is calculated from the registrations")
group_conflict_types = models.ManyToManyField(
ConstraintName, blank=True, limit_choices_to=dict(is_group_conflict=True),
help_text='Types of scheduling conflict between groups to consider')
def __str__(self):
if self.type_id == "ietf":
return u"IETF-%s" % (self.number)
else:
return self.number
def get_meeting_date (self,offset):
return self.date + datetime.timedelta(days=offset)
def end_date(self):
return self.get_meeting_date(self.days-1)
def get_00_cutoff(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day, tzinfo=pytz.utc)
importantdate = self.importantdate_set.filter(name_id='idcutoff').first()
if not importantdate:
importantdate = self.importantdate_set.filter(name_id='00cutoff').first()
if importantdate:
cutoff_date = importantdate.date
else:
cutoff_date = start_date + datetime.timedelta(days=ImportantDateName.objects.get(slug='idcutoff').default_offset_days)
cutoff_time = date2datetime(cutoff_date) + self.idsubmit_cutoff_time_utc
return cutoff_time
def get_01_cutoff(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day, tzinfo=pytz.utc)
importantdate = self.importantdate_set.filter(name_id='idcutoff').first()
if not importantdate:
importantdate = self.importantdate_set.filter(name_id='01cutoff').first()
if importantdate:
cutoff_date = importantdate.date
else:
cutoff_date = start_date + datetime.timedelta(days=ImportantDateName.objects.get(slug='idcutoff').default_offset_days)
cutoff_time = date2datetime(cutoff_date) + self.idsubmit_cutoff_time_utc
return cutoff_time
def get_reopen_time(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day)
local_tz = pytz.timezone(self.time_zone)
local_date = local_tz.localize(start_date)
cutoff = self.get_00_cutoff()
if cutoff.date() == start_date:
# no cutoff, so no local-time re-open
reopen_time = cutoff
else:
# reopen time is in local timezone. May need policy change?? XXX
reopen_time = local_date + self.idsubmit_cutoff_time_utc
return reopen_time
@classmethod
def get_current_meeting(cls, type="ietf"):
return cls.objects.filter(type=type, date__gte=datetime.datetime.today()-datetime.timedelta(days=7) ).order_by('date').first()
def get_first_cut_off(self):
return self.get_00_cutoff()
def get_second_cut_off(self):
return self.get_01_cutoff()
def get_ietf_monday(self):
for offset in range(self.days):
date = self.date+datetime.timedelta(days=offset)
if date.weekday() == 0: # Monday is 0
return date
def get_materials_path(self):
return os.path.join(settings.AGENDA_PATH,self.number)
# the various dates are currently computed
def get_submission_start_date(self):
return self.date - datetime.timedelta(days=self.submission_start_day_offset)
def get_submission_cut_off_date(self):
importantdate = self.importantdate_set.filter(name_id='procsub').first()
if importantdate:
return importantdate.date
else:
return self.date + datetime.timedelta(days=self.submission_cutoff_day_offset)
def get_submission_correction_date(self):
importantdate = self.importantdate_set.filter(name_id='revsub').first()
if importantdate:
return importantdate.date
else:
return self.date + datetime.timedelta(days=self.submission_correction_day_offset)
def enabled_constraint_names(self):
return ConstraintName.objects.filter(
Q(is_group_conflict=False) # any non-group-conflict constraints
| Q(is_group_conflict=True, meeting=self) # or specifically enabled for this meeting
)
def enabled_constraints(self):
return self.constraint_set.filter(name__in=self.enabled_constraint_names())
def get_schedule_by_name(self, name):
return self.schedule_set.filter(name=name).first()
def get_number(self):
"Return integer meeting number for ietf meetings, rather than strings."
if self.number.isdigit():
return int(self.number)
else:
return None
def get_proceedings_materials(self):
"""Get proceedings materials"""
return self.proceedings_materials.filter(
document__states__slug='active', document__states__type_id='procmaterials'
).order_by('type__order')
def get_attendance(self):
"""Get the meeting attendance from the MeetingRegistrations
Returns a NamedTuple with onsite and online attributes. Returns None if the record is unavailable
for this meeting.
"""
number = self.get_number()
if number is None or number < 110:
return None
Attendance = namedtuple('Attendance', 'onsite online')
return Attendance(
onsite=Person.objects.filter(
meetingregistration__meeting=self,
meetingregistration__attended=True,
meetingregistration__reg_type__contains='in_person',
).distinct().count(),
online=Person.objects.filter(
meetingregistration__meeting=self,
meetingregistration__attended=True,
meetingregistration__reg_type__contains='remote',
).distinct().count(),
)
@property
def proceedings_format_version(self):
"""Indicate version of proceedings that should be used for this meeting
Only makes sense for IETF meeting. Returns None for any meeting without a purely numeric number.
Uses settings.PROCEEDINGS_VERSION_CHANGES. Versions start at 1. Entries
in the array are the first meeting number using each version.
"""
if not hasattr(self, '_proceedings_format_version'):
if not self.number.isdigit():
version = None # no version for non-IETF meeting
else:
version = len(settings.PROCEEDINGS_VERSION_CHANGES) # start assuming latest version
mtg_number = self.get_number()
if mtg_number is None:
unreachable('2021-08-10')
else:
# Find the index of the first entry in the version change array that
# is >= this meeting's number. The first entry in the array is 0, so the
# version is always >= 1 for positive meeting numbers.
for vers, threshold in enumerate(settings.PROCEEDINGS_VERSION_CHANGES):
if mtg_number < threshold:
version = vers
break
self._proceedings_format_version = version # save this for later
return self._proceedings_format_version
@property
def session_constraintnames(self):
"""Gets a list of the constraint names that should be used for this meeting
Anticipated that this will soon become a many-to-many relationship with ConstraintName
(see issue #2770). Making this a @property allows use of the .all(), .filter(), etc,
so that other code should not need changes when this is replaced.
"""
try:
mtg_num = int(self.number)
except ValueError:
mtg_num = None # should not come up, but this method should not fail
if mtg_num is None or mtg_num >= 106:
# These meetings used the old 'conflic?' constraint types labeled as though
# they were the new types.
slugs = ('chair_conflict', 'tech_overlap', 'key_participant')
else:
slugs = ('conflict', 'conflic2', 'conflic3')
return ConstraintName.objects.filter(slug__in=slugs)
def base_url(self):
return "/meeting/%s" % (self.number, )
def build_timeslices(self):
"""Get unique day/time/timeslot data for meeting
Returns a list of days, time intervals for each day, and timeslots for each day,
with repeated days/time intervals removed. Ignores timeslots that do not have a
location. The slots return value contains only one TimeSlot for each distinct
time interval.
"""
days = [] # the days of the meetings
time_slices = {} # the times on each day
slots = {}
for ts in self.timeslot_set.all():
if ts.location_id is None:
continue
ymd = ts.time.date()
if ymd not in time_slices:
time_slices[ymd] = []
slots[ymd] = []
days.append(ymd)
if ymd in time_slices:
# only keep unique entries
if [ts.time, ts.time + ts.duration, ts.duration.seconds] not in time_slices[ymd]:
time_slices[ymd].append([ts.time, ts.time + ts.duration, ts.duration.seconds])
slots[ymd].append(ts)
days.sort()
for ymd in time_slices:
# Make sure these sort the same way
time_slices[ymd].sort()
slots[ymd].sort(key=lambda x: (x.time, x.duration))
return days,time_slices,slots
# this functions makes a list of timeslices and rooms, and
# makes sure that all schedules have all of them.
# def create_all_timeslots(self):
# alltimeslots = self.timeslot_set.all()
# for sched in self.schedule_set.all():
# ts_hash = {}
# for ss in sched.assignments.all():
# ts_hash[ss.timeslot] = ss
# for ts in alltimeslots:
# if not (ts in ts_hash):
# SchedTimeSessAssignment.objects.create(schedule = sched,
# timeslot = ts)
def vtimezone(self):
if self.time_zone:
try:
tzfn = os.path.join(settings.TZDATA_ICS_PATH, self.time_zone + ".ics")
if os.path.exists(tzfn):
with io.open(tzfn) as tzf:
icstext = tzf.read()
vtimezone = re.search("(?sm)(\nBEGIN:VTIMEZONE.*\nEND:VTIMEZONE\n)", icstext).group(1).strip()
if vtimezone:
vtimezone += "\n"
return vtimezone
except IOError:
pass
return ''
def set_official_schedule(self, schedule):
if self.schedule != schedule:
self.schedule = schedule
self.save()
def updated(self):
min_time = datetime.datetime(1970, 1, 1, 0, 0, 0) # should be Meeting.modified, but we don't have that
timeslots_updated = self.timeslot_set.aggregate(Max('modified'))["modified__max"] or min_time
sessions_updated = self.session_set.aggregate(Max('modified'))["modified__max"] or min_time
assignments_updated = min_time
if self.schedule:
assignments_updated = SchedTimeSessAssignment.objects.filter(schedule__in=[self.schedule, self.schedule.base if self.schedule else None]).aggregate(Max('modified'))["modified__max"] or min_time
ts = max(timeslots_updated, sessions_updated, assignments_updated)
tz = pytz.timezone(settings.PRODUCTION_TIMEZONE)
ts = tz.localize(ts)
return ts
@memoize
def previous_meeting(self):
return Meeting.objects.filter(type_id=self.type_id,date__lt=self.date).order_by('-date').first()
class Meta:
ordering = ["-date", "-id"]
indexes = [
models.Index(fields=['-date', '-id']),
]
# === Rooms, Resources, Floorplans =============================================
class ResourceAssociation(models.Model):
name = ForeignKey(RoomResourceName)
icon = models.CharField(max_length=64) # icon to be found in /static/img
desc = models.CharField(max_length=256)
def __str__(self):
return self.desc
class Room(models.Model):
meeting = ForeignKey(Meeting)
modified = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=255)
functional_name = models.CharField(max_length=255, blank = True)
capacity = models.IntegerField(null=True, blank=True)
resources = models.ManyToManyField(ResourceAssociation, blank = True)
session_types = models.ManyToManyField(TimeSlotTypeName, blank = True)
# floorplan-related properties
floorplan = ForeignKey('FloorPlan', null=True, blank=True, default=None)
# floorplan: room pixel position : (0,0) is top left of image, (xd, yd)
# is room width, height.
x1 = models.SmallIntegerField(null=True, blank=True, default=None)
y1 = models.SmallIntegerField(null=True, blank=True, default=None)
x2 = models.SmallIntegerField(null=True, blank=True, default=None)
y2 = models.SmallIntegerField(null=True, blank=True, default=None)
# end floorplan-related stuff
def __str__(self):
return u"%s size: %s" % (self.name, self.capacity)
def delete_timeslots(self):
for ts in self.timeslot_set.all():
ts.sessionassignments.all().delete()
ts.delete()
def create_timeslots(self):
days, time_slices, slots = self.meeting.build_timeslices()
for day in days:
for ts in slots[day]:
TimeSlot.objects.create(type_id=ts.type_id,
meeting=self.meeting,
name=ts.name,
time=ts.time,
location=self,
duration=ts.duration)
#self.meeting.create_all_timeslots()
def dom_id(self):
return "room%u" % (self.pk)
# floorplan support
def floorplan_url(self):
mtg_num = self.meeting.get_number()
if not mtg_num:
return None
elif mtg_num <= settings.FLOORPLAN_LAST_LEGACY_MEETING:
base_url = settings.FLOORPLAN_LEGACY_BASE_URL.format(meeting=self.meeting)
elif self.floorplan:
base_url = urlreverse('ietf.meeting.views.floor_plan', kwargs=dict(num=mtg_num))
else:
return None
return f'{base_url}?room={xslugify(self.name)}'
def left(self):
return min(self.x1, self.x2) if (self.x1 and self.x2) else 0
def top(self):
return min(self.y1, self.y2) if (self.y1 and self.y2) else 0
def right(self):
return max(self.x1, self.x2) if (self.x1 and self.x2) else 0
def bottom(self):
return max(self.y1, self.y2) if (self.y1 and self.y2) else 0
def functional_display_name(self):
if not self.functional_name:
return ""
if 'breakout' in self.functional_name.lower():
return ""
if self.functional_name[0].isdigit():
return ""
return self.functional_name
# audio stream support
def audio_stream_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id == 'audiostream']
return urlresources[0].url if urlresources else None
def video_stream_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['meetecho']]
return urlresources[0].url if urlresources else None
def onsite_tool_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['meetecho_onsite']]
return urlresources[0].url if urlresources else None
def webex_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['webex']]
return urlresources[0].url if urlresources else None
#
class Meta:
ordering = ["-id"]
class UrlResource(models.Model):
"For things like audio stream urls, meetecho stream urls"
name = ForeignKey(RoomResourceName)
room = ForeignKey(Room)
url = models.URLField(null=True, blank=True)
def floorplan_path(instance, filename):
root, ext = os.path.splitext(filename)
return "%s/floorplan-%s-%s%s" % (settings.FLOORPLAN_MEDIA_DIR, instance.meeting.number, xslugify(instance.name), ext)
class FloorPlan(models.Model):
name = models.CharField(max_length=255)
short = models.CharField(max_length=3, default='')
modified= models.DateTimeField(auto_now=True)
meeting = ForeignKey(Meeting)
order = models.SmallIntegerField()
image = models.ImageField(storage=NoLocationMigrationFileSystemStorage(), upload_to=floorplan_path, blank=True, default=None)
#
class Meta:
ordering = ['-id',]
#
def __str__(self):
return u'floorplan-%s-%s' % (self.meeting.number, xslugify(self.name))
# === Schedules, Sessions, Timeslots and Assignments ===========================
class TimeSlot(models.Model):
"""
Everything that would appear on the meeting agenda of a meeting is
mapped to a timeslot, including breaks. Sessions are connected to
TimeSlots during scheduling.
"""
meeting = ForeignKey(Meeting)
type = ForeignKey(TimeSlotTypeName)
name = models.CharField(max_length=255)
time = models.DateTimeField()
duration = models.DurationField(default=datetime.timedelta(0))
location = ForeignKey(Room, blank=True, null=True)
show_location = models.BooleanField(default=True, help_text="Show location in agenda.")
sessions = models.ManyToManyField('Session', related_name='slots', through='SchedTimeSessAssignment', blank=True, help_text="Scheduled session, if any.")
modified = models.DateTimeField(auto_now=True)
#
@property
def session(self):
if not hasattr(self, "_session_cache"):
self._session_cache = self.sessions.filter(timeslotassignments__schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting else None]).first()
return self._session_cache
@property
def time_desc(self):
return "%s-%s" % (self.time.strftime("%H%M"), (self.time + self.duration).strftime("%H%M"))
def meeting_date(self):
return self.time.date()
def registration(self):
# below implements a object local cache
# it tries to find a timeslot of type registration which starts at the same time as this slot
# so that it can be shown at the top of the agenda.
if not hasattr(self, '_reg_info'):
try:
self._reg_info = TimeSlot.objects.get(meeting=self.meeting, time__month=self.time.month, time__day=self.time.day, type="reg")
except TimeSlot.DoesNotExist:
self._reg_info = None
return self._reg_info
def __str__(self):
location = self.get_location()
if not location:
location = u"(no location)"
return u"%s: %s-%s %s, %s" % (self.meeting.number, self.time.strftime("%m-%d %H:%M"), (self.time + self.duration).strftime("%H:%M"), self.name, location)
def end_time(self):
return self.time + self.duration
def get_hidden_location(self):
if not hasattr(self, '_cached_hidden_location'):
location = self.location
if location:
location = location.name
elif self.type_id == "reg":
location = self.meeting.reg_area
elif self.type_id == "break":
location = self.meeting.break_area
self._cached_hidden_location = location
return self._cached_hidden_location
def get_location(self):
return self.get_hidden_location() if self.show_location else ""
def get_functional_location(self):
name_parts = []
room = self.location
if room and room.functional_name:
name_parts.append(room.functional_name)
location = self.get_hidden_location()
if location:
name_parts.append(location)
return ' - '.join(name_parts)
def get_html_location(self):
if not hasattr(self, '_cached_html_location'):
self._cached_html_location = self.get_location()
if len(self._cached_html_location) > 8:
self._cached_html_location = mark_safe(self._cached_html_location.replace('/', '/<wbr>'))
else:
self._cached_html_location = mark_safe(self._cached_html_location.replace(' ', ' '))
return self._cached_html_location
def tz(self):
if not hasattr(self, '_cached_tz'):
if self.meeting.time_zone:
self._cached_tz = pytz.timezone(self.meeting.time_zone)
else:
self._cached_tz = None
return self._cached_tz
def tzname(self):
if self.tz():
return self.tz().tzname(self.time)
else:
return ""
def utc_start_time(self):
if self.tz():
local_start_time = self.tz().localize(self.time)
return local_start_time.astimezone(pytz.utc)
else:
return None
def utc_end_time(self):
utc_start = self.utc_start_time()
# Add duration after converting start time, otherwise errors creep in around DST change
return None if utc_start is None else utc_start + self.duration
def local_start_time(self):
if self.tz():
return self.tz().localize(self.time)
else:
return None
def local_end_time(self):
local_start = self.local_start_time()
# Add duration after converting start time, otherwise errors creep in around DST change
return None if local_start is None else local_start + self.duration
@property
def js_identifier(self):
# this returns a unique identifier that is js happy.
# {{s.timeslot.time|date:'Y-m-d'}}_{{ s.timeslot.time|date:'Hi' }}"
# also must match:
# {{r|slugify}}_{{day}}_{{slot.0|date:'Hi'}}
dom_id="ts%u" % (self.pk)
if self.location is not None:
dom_id = self.location.dom_id()
return "%s_%s_%s" % (dom_id, self.time.strftime('%Y-%m-%d'), self.time.strftime('%H%M'))
def delete_concurrent_timeslots(self):
"""Delete all timeslots which are in the same time as this slot"""
# can not include duration in filter, because there is no support
# for having it a WHERE clause.
# below will delete self as well.
for ts in self.meeting.timeslot_set.filter(time=self.time).all():
if ts.duration!=self.duration:
continue
# now remove any schedule that might have been made to this
# timeslot.
ts.sessionassignments.all().delete()
ts.delete()
"""
Find a timeslot that comes next, in the same room. It must be on the same day,
and it must have a gap of less than 11 minutes. (10 is the spec)
"""
@property
def slot_to_the_right(self):
return self.meeting.timeslot_set.filter(
location = self.location, # same room!
type = self.type, # must be same type (usually session)
time__gt = self.time + self.duration, # must be after this session
time__lt = self.time + self.duration + datetime.timedelta(seconds=11*60)).first()
class Meta:
ordering = ["-time", "-id"]
indexes = [
models.Index(fields=['-time', '-id']),
]
# end of TimeSlot
class Schedule(models.Model):
"""
Each person may have multiple schedules saved.
A Schedule may be made visible, which means that it will show up in
public drop down menus, etc. It may also be made public, which means
that someone who knows about it by name/id would be able to reference
it. A non-visible, public schedule might be passed around by the
Secretariat to IESG members for review. Only the owner may edit the
schedule, others may copy it
"""
meeting = ForeignKey(Meeting, null=True, related_name='schedule_set')
name = models.CharField(max_length=64, blank=False, help_text="Letters, numbers and -:_ allowed.", validators=[RegexValidator(r'^[A-Za-z0-9-:_]*$')])
owner = ForeignKey(Person)
visible = models.BooleanField("Show in agenda list", default=True, help_text="Show in the list of possible agendas for the meeting.")
public = models.BooleanField(default=True, help_text="Allow others to see this agenda.")
badness = models.IntegerField(null=True, blank=True)
notes = models.TextField(blank=True)
origin = ForeignKey('Schedule', blank=True, null=True, on_delete=models.SET_NULL, related_name="+")
base = ForeignKey('Schedule', blank=True, null=True, on_delete=models.SET_NULL,
help_text="Sessions scheduled in the base schedule show up in this schedule too.", related_name="derivedschedule_set",
limit_choices_to={'base': None}) # prevent the inheritance from being more than one layer deep (no recursion)
def __str__(self):
return u"%s:%s(%s)" % (self.meeting, self.name, self.owner)
def base_url(self):
return "/meeting/%s/agenda/%s/%s" % (self.meeting.number, self.owner_email(), self.name)
# temporary property to pacify the places where Schedule.assignments is used
# @property
# def schedtimesessassignment_set(self):
# return self.assignments
#
# def url_edit(self):
# return "/meeting/%s/agenda/%s/edit" % (self.meeting.number, self.name)
#
# @property
# def relurl_edit(self):
# return self.url_edit("")
def owner_email(self):
return self.owner.email_address() or "noemail"
@property
def is_official(self):
return (self.meeting.schedule == self)
@property
def is_official_record(self):
return (self.is_official and
self.meeting.end_date() <= datetime.date.today() )
# returns a dictionary {group -> [schedtimesessassignment+]}
# and it has [] if the session is not placed.
# if there is more than one session for that group,
# then a list of them is returned (always a list)
@property
def official_token(self):
if self.is_official:
return "official"
else:
return "unofficial"
def delete_assignments(self):
self.assignments.all().delete()
@property
def qs_assignments_with_sessions(self):
return self.assignments.filter(session__isnull=False)
def qs_timeslots_in_use(self):
"""Get QuerySet containing timeslots used by the schedule"""
return TimeSlot.objects.filter(sessionassignments__schedule=self)
def qs_sessions_scheduled(self):
"""Get QuerySet containing sessions assigned to timeslots by this schedule"""
return Session.objects.filter(timeslotassignments__schedule=self)
def delete_schedule(self):
self.assignments.all().delete()
self.delete()
# to be renamed SchedTimeSessAssignments (stsa)
class SchedTimeSessAssignment(models.Model):
"""
This model provides an N:M relationship between Session and TimeSlot.
Each relationship is attached to the named schedule, which is owned by
a specific person/user.
"""
timeslot = ForeignKey('TimeSlot', null=False, blank=False, related_name='sessionassignments')
session = ForeignKey('Session', null=True, default=None, related_name='timeslotassignments', help_text="Scheduled session.")
schedule = ForeignKey('Schedule', null=False, blank=False, related_name='assignments')
extendedfrom = ForeignKey('self', null=True, default=None, help_text="Timeslot this session is an extension of.")
modified = models.DateTimeField(auto_now=True)
notes = models.TextField(blank=True)
badness = models.IntegerField(default=0, blank=True, null=True)
pinned = models.BooleanField(default=False, help_text="Do not move session during automatic placement.")
class Meta:
ordering = ["timeslot__time", "timeslot__type__slug", "session__group__parent__name", "session__group__acronym", "session__name", ]
def __str__(self):
return u"%s [%s<->%s]" % (self.schedule, self.session, self.timeslot)
@property
def room_name(self):
return self.timeslot.location.name if self.timeslot and self.timeslot.location else None
@property
def acronym(self):
if self.session and self.session.group:
return self.session.group.acronym
@property
def slot_to_the_right(self):
s = self.timeslot.slot_to_the_right
if s:
return self.schedule.assignments.filter(timeslot=s).first()
else:
return None
def meeting(self):
"""Get the meeting to which this assignment belongs"""
return self.session.meeting
def slot_type(self):
"""Get the TimeSlotTypeName that applies to this assignment"""
return self.timeslot.type
def slug(self):
"""Return sensible id string for session, e.g. suitable for use as HTML anchor."""
components = []
components.append(self.schedule.meeting.number)
if not self.timeslot:
components.append("unknown")
if not self.session or not (getattr(self.session, "historic_group", None) or self.session.group):
components.append("unknown")
else:
components.append(self.timeslot.time.strftime("%Y-%m-%d-%a-%H%M"))
g = getattr(self.session, "historic_group", None) or self.session.group
if self.timeslot.type.slug in ('break', 'reg', 'other'):
components.append(g.acronym)
components.append(slugify(self.session.name))
if self.timeslot.type.slug in ('regular', 'plenary'):
if self.timeslot.type.slug == "plenary":
components.append("1plenary")
else:
p = getattr(g, "historic_parent", None) or g.parent
if p and p.type_id in ("area", "irtf", 'ietf'):
components.append(p.acronym)
components.append(g.acronym)
return "-".join(components).lower()
class BusinessConstraint(models.Model):
"""
Constraints on the scheduling that apply across all qualifying
sessions in all meetings. Used by the ScheduleGenerator.
"""
slug = models.CharField(max_length=32, primary_key=True)
name = models.CharField(max_length=255)
penalty = models.IntegerField(default=0, help_text="The penalty for violating this kind of constraint; for instance 10 (small penalty) or 10000 (large penalty)")
class Constraint(models.Model):
"""
Specifies a constraint on the scheduling.
These constraints apply to a specific group during a specific meeting.
Available types are:
- conflict/conflic2/conflic3: a conflict between source and target WG/session,
with varying priority. The first is used for a chair conflict, the second for
technology overlap, third for key person conflict
- bethere: a constraint between source WG and a particular person
- timerange: can not meet during these times
- time_relation: preference for a time difference between sessions
- wg_adjacent: request for source WG to be adjacent (directly before or after,
no breaks, same room) the target WG
In the schedule editor, run-time, a couple non-persistent ConstraintName instances
are created for rendering purposes. This is done in
meeting.utils.preprocess_constraints_for_meeting_schedule_editor(). This adds:
- joint_with_groups
- responsible_ad
"""
TIME_RELATION_CHOICES = (
('subsequent-days', 'Schedule the sessions on subsequent days'),
('one-day-seperation', 'Leave at least one free day in between the two sessions'),
)
meeting = ForeignKey(Meeting)
source = ForeignKey(Group, related_name="constraint_source_set")
target = ForeignKey(Group, related_name="constraint_target_set", null=True)
person = ForeignKey(Person, null=True, blank=True)
name = ForeignKey(ConstraintName)
time_relation = models.CharField(max_length=200, choices=TIME_RELATION_CHOICES, blank=True)
timeranges = models.ManyToManyField(TimerangeName)
active_status = None
def __str__(self):
return u"%s %s target=%s person=%s" % (self.source, self.name.name.lower(), self.target, self.person)
def brief_display(self):
if self.name.slug == "wg_adjacent":
return "Adjacent with %s" % self.target.acronym
elif self.name.slug == "time_relation":
return self.get_time_relation_display()
elif self.name.slug == "timerange":
timeranges_str = ", ".join([t.desc for t in self.timeranges.all()])
return "Can't meet %s" % timeranges_str
elif self.target and self.person:
return "%s ; %s" % (self.target.acronym, self.person)
elif self.target and not self.person:
return "%s " % (self.target.acronym)
elif not self.target and self.person:
return "%s " % (self.person)
class SessionPresentation(models.Model):
session = ForeignKey('Session')
document = ForeignKey(Document)
rev = models.CharField(verbose_name="revision", max_length=16, null=True, blank=True)
order = models.PositiveSmallIntegerField(default=0)
class Meta:
db_table = 'meeting_session_materials'
ordering = ('order',)
unique_together = (('session', 'document'),)
def __str__(self):
return u"%s -> %s-%s" % (self.session, self.document.name, self.rev)
constraint_cache_uses = 0
constraint_cache_initials = 0
class SessionQuerySet(models.QuerySet):
def with_current_status(self):
"""Annotate session with its current status
Adds current_status, containing the text representation of the status.
"""
return self.annotate(
# coalesce with '' to avoid nulls which give funny
# results, e.g. .exclude(current_status='canceled') also
# skips rows with null in them
current_status=Coalesce(
Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'-time', '-id'
).values('status')[:1]),
Value(''),
output_field=TextField()),
)
def with_requested_by(self):
"""Annotate session with requested_by field
Adds requested_by field - pk of the Person who made the request
"""
return self.annotate(
requested_by=Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'time', 'id'
).values('by')[:1]),
)
def with_requested_time(self):
"""Annotate session with requested_time field"""
return self.annotate(
requested_time=Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'time', 'id'
).values('time')[:1]),
)
def not_canceled(self):
"""Queryset containing all sessions not canceled
Results annotated with current_status
"""
return self.with_current_status().exclude(current_status__in=Session.CANCELED_STATUSES)
def not_deleted(self):
"""Queryset containing all sessions not deleted
Results annotated with current_status
"""
return self.with_current_status().exclude(current_status='deleted')
def that_can_meet(self):
"""Queryset containing sessions that can meet
Results annotated with current_status
"""
return self.with_current_status().exclude(
current_status__in=['notmeet', 'disappr', 'deleted', 'apprw']
).filter(
type__slug='regular'
)
def requests(self):
"""Queryset containing sessions that may be handled as requests"""
return self.exclude(
type__in=('offagenda', 'reserved', 'unavail')
)
class Session(models.Model):
"""Session records that a group should have a session on the
meeting (time and location is stored in a TimeSlot) - if multiple
timeslots are needed, multiple sessions will have to be created.
Training sessions and similar are modeled by filling in a
responsible group (e.g. Edu team) and filling in the name."""
objects = SessionQuerySet.as_manager() # sets default query manager
meeting = ForeignKey(Meeting)
name = models.CharField(blank=True, max_length=255, help_text="Name of session, in case the session has a purpose rather than just being a group meeting.")
short = models.CharField(blank=True, max_length=32, help_text="Short version of 'name' above, for use in filenames.")
purpose = ForeignKey(SessionPurposeName, null=False, help_text='Purpose of the session')
type = ForeignKey(TimeSlotTypeName)
group = ForeignKey(Group) # The group type historically determined the session type. BOFs also need to be added as a group. Note that not all meeting requests have a natural group to associate with.
joint_with_groups = models.ManyToManyField(Group, related_name='sessions_joint_in',blank=True)
attendees = models.IntegerField(null=True, blank=True)
agenda_note = models.CharField(blank=True, max_length=255)
requested_duration = models.DurationField(default=datetime.timedelta(0))
comments = models.TextField(blank=True)
scheduled = models.DateTimeField(null=True, blank=True)
modified = models.DateTimeField(auto_now=True)
remote_instructions = models.CharField(blank=True,max_length=1024)
on_agenda = models.BooleanField(default=True, help_text='Is this session visible on the meeting agenda?')
tombstone_for = models.ForeignKey('Session', blank=True, null=True, help_text="This session is the tombstone for a session that was rescheduled", on_delete=models.CASCADE)
materials = models.ManyToManyField(Document, through=SessionPresentation, blank=True)
resources = models.ManyToManyField(ResourceAssociation, blank=True)
unique_constraints_dict = None
CANCELED_STATUSES = ['canceled', 'canceledpa']
# Should work on how materials are captured so that deleted things are no longer associated with the session
# (We can keep the information about something being added to and removed from a session in the document's history)
def get_material(self, material_type, only_one):
if hasattr(self, "prefetched_active_materials"):
l = [d for d in self.prefetched_active_materials if d.type_id == material_type]
for d in l:
d.meeting_related = lambda: True
else:
l = self.materials.filter(type=material_type).exclude(states__type=material_type, states__slug='deleted').order_by('sessionpresentation__order')
if only_one:
if l:
return l[0]
else:
return None
else:
return l
def agenda(self):
if not hasattr(self, "_agenda_cache"):
self._agenda_cache = self.get_material("agenda", only_one=True)
return self._agenda_cache
def minutes(self):
if not hasattr(self, '_cached_minutes'):
self._cached_minutes = self.get_material("minutes", only_one=True)
return self._cached_minutes
def recordings(self):
return list(self.get_material("recording", only_one=False))
def bluesheets(self):
return list(self.get_material("bluesheets", only_one=False))
def slides(self):
if not hasattr(self, "_slides_cache"):
self._slides_cache = list(self.get_material("slides", only_one=False))
return self._slides_cache
def drafts(self):
return list(self.materials.filter(type='draft'))
# The utilities below are used in the proceedings and materials
# templates, and should be moved there - then we could also query
# out the needed information in a few passes and speed up those
# pages.
def all_meeting_sessions_for_group(self):
from ietf.meeting.utils import add_event_info_to_session_qs
if self.group.features.has_meetings:
if not hasattr(self, "_all_meeting_sessions_for_group_cache"):
sessions = [s for s in add_event_info_to_session_qs(self.meeting.session_set.filter(group=self.group,type=self.type)) if s.official_timeslotassignment()]
self._all_meeting_sessions_for_group_cache = sorted(sessions, key = lambda x: x.official_timeslotassignment().timeslot.time)
return self._all_meeting_sessions_for_group_cache
else:
return [self]
def order_in_meeting(self):
if not hasattr(self, '_order_in_meeting'):
session_list = self.all_meeting_sessions_for_group()
self._order_in_meeting = session_list.index(self) + 1 if self in session_list else 0
return self._order_in_meeting
def all_meeting_sessions_cancelled(self):
return set(s.current_status for s in self.all_meeting_sessions_for_group()) == {'canceled'}
def all_meeting_recordings(self):
recordings = [] # These are not sets because we need to preserve relative ordering or redo the ordering work later
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
recordings.extend([r for r in session.recordings() if r not in recordings])
return recordings
def all_meeting_bluesheets(self):
bluesheets = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
bluesheets.extend([b for b in session.bluesheets() if b not in bluesheets])
return bluesheets
def all_meeting_drafts(self):
drafts = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
drafts.extend([d for d in session.drafts() if d not in drafts])
return drafts
def all_meeting_agendas(self):
agendas = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
agenda = session.agenda()
if agenda and agenda not in agendas:
agendas.append(agenda)
return agendas
def all_meeting_slides(self):
slides = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
slides.extend([s for s in session.slides() if s not in slides])
return slides
def all_meeting_minutes(self):
minutes = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
minutes_doc = session.minutes()
if minutes_doc and minutes_doc not in minutes:
minutes.append(minutes_doc)
return minutes
def can_manage_materials(self, user):
return can_manage_materials(user,self.group)
def is_material_submission_cutoff(self):
return datetime.date.today() > self.meeting.get_submission_correction_date()
def joint_with_groups_acronyms(self):
return [group.acronym for group in self.joint_with_groups.all()]
def __str__(self):
if self.meeting.type_id == "interim":
return self.meeting.number
status_id = None
if hasattr(self, 'current_status'):
status_id = self.current_status
elif self.pk is not None:
latest_event = SchedulingEvent.objects.filter(session=self.pk).order_by('-time', '-id').first()
if latest_event:
status_id = latest_event.status_id
if status_id in ('canceled','disappr','notmeet','deleted'):
ss0name = "(%s)" % SessionStatusName.objects.get(slug=status_id).name
else:
ss0name = "(unscheduled)"
ss = self.timeslotassignments.filter(schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting.schedule else None]).order_by('timeslot__time')
if ss:
ss0name = ','.join(x.timeslot.time.strftime("%a-%H%M") for x in ss)
return "%s: %s %s %s" % (self.meeting, self.group.acronym, self.name, ss0name)
@property
def short_name(self):
if self.name:
return self.name
if self.short:
return self.short
if self.group:
return self.group.acronym
return "req#%u" % (id)
@property
def special_request_token(self):
if self.comments is not None and len(self.comments)>0:
return "*"
else:
return ""
def docname_token(self):
sess_mtg = Session.objects.filter(meeting=self.meeting, group=self.group).order_by('pk')
index = list(sess_mtg).index(self)
return 'sess%s' % (string.ascii_lowercase[index])
def docname_token_only_for_multiple(self):
sess_mtg = Session.objects.filter(meeting=self.meeting, group=self.group).order_by('pk')
if len(list(sess_mtg)) > 1:
index = list(sess_mtg).index(self)
if index < 26:
token = 'sess%s' % (string.ascii_lowercase[index])
else:
token = 'sess%s%s' % (string.ascii_lowercase[index//26],string.ascii_lowercase[index%26])
return token
return None
def constraints(self):
return Constraint.objects.filter(source=self.group, meeting=self.meeting).order_by('name__name', 'target__acronym', 'person__name').prefetch_related("source","target","person")
def reverse_constraints(self):
return Constraint.objects.filter(target=self.group, meeting=self.meeting).order_by('name__name')
def official_timeslotassignment(self):
return self.timeslotassignments.filter(schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting.schedule else None]).first()
@property
def people_constraints(self):
return self.group.constraint_source_set.filter(meeting=self.meeting, name='bethere')
def agenda_text(self):
doc = self.agenda()
if doc:
path = os.path.join(settings.AGENDA_PATH, self.meeting.number, "agenda", doc.uploaded_filename)
if os.path.exists(path):
with io.open(path) as f:
return f.read()
else:
return "No agenda file found"
else:
return "The agenda has not been uploaded yet."
def agenda_file(self):
if not hasattr(self, '_agenda_file'):
self._agenda_file = ""
agenda = self.agenda()
if not agenda:
return ""
# FIXME: uploaded_filename should be replaced with a function that computes filenames when they are of a fixed schema and not uploaded names
self._agenda_file = "%s/agenda/%s" % (self.meeting.number, agenda.uploaded_filename)
return self._agenda_file
def jabber_room_name(self):
if self.type_id=='plenary':
return 'plenary'
elif self.historic_group:
return self.historic_group.acronym
else:
return self.group.acronym
def notes_id(self):
note_id_fragment = 'plenary' if self.type.slug == 'plenary' else self.group.acronym
return f'notes-ietf-{self.meeting.number}-{note_id_fragment}'
def notes_url(self):
return urljoin(settings.IETF_NOTES_URL, self.notes_id())
class SchedulingEvent(models.Model):
session = ForeignKey(Session)
time = models.DateTimeField(default=datetime.datetime.now, help_text="When the event happened")
status = ForeignKey(SessionStatusName)
by = ForeignKey(Person)
def __str__(self):
return u'%s : %s : %s : %s' % (self.session, self.status, self.time, self.by)
class ImportantDate(models.Model):
meeting = ForeignKey(Meeting)
date = models.DateField()
name = ForeignKey(ImportantDateName)
class Meta:
ordering = ["-meeting_id","date", ]
def __str__(self):
return u'%s : %s : %s' % ( self.meeting, self.name, self.date )
class SlideSubmission(models.Model):
time = models.DateTimeField(auto_now=True)
session = ForeignKey(Session)
title = models.CharField(max_length=255)
filename = models.CharField(max_length=255)
apply_to_all = models.BooleanField(default=False)
submitter = ForeignKey(Person)
status = ForeignKey(SlideSubmissionStatusName, null=True, default='pending', on_delete=models.SET_NULL)
doc = ForeignKey(Document, null=True, on_delete=models.SET_NULL)
def staged_filepath(self):
return os.path.join(settings.SLIDE_STAGING_PATH , self.filename)
def staged_url(self):
return "".join([settings.SLIDE_STAGING_URL, self.filename])
class ProceedingsMaterial(models.Model):
meeting = ForeignKey(Meeting, related_name='proceedings_materials')
document = ForeignKey(
Document,
limit_choices_to=dict(type_id='procmaterials'),
unique=True,
)
type = ForeignKey(ProceedingsMaterialTypeName)
class Meta:
unique_together = (('meeting', 'type'),)
def __str__(self):
return self.document.title
def get_href(self):
return f'{self.document.get_href(self.meeting)}'
def active(self):
return self.document.get_state().slug == 'active'
def is_url(self):
return len(self.document.external_url) > 0
def _host_upload_path(instance : 'MeetingHost', filename):
"""Compute filename relative to the storage location
Must live outside a class to allow migrations to deconstruct fields that use it
"""
num = instance.meeting.number
path = (
Path(num) / 'meetinghosts' / f'logo-{''.join(random.choices(string.ascii_lowercase, k=10))}'
).with_suffix(
Path(filename).suffix
)
return str(path)
class MeetingHost(models.Model):
"""Meeting sponsor"""
meeting = ForeignKey(Meeting, related_name='meetinghosts')
name = models.CharField(max_length=255, blank=False)
logo = MissingOkImageField(
storage=NoLocationMigrationFileSystemStorage(location=settings.MEETINGHOST_LOGO_PATH),
upload_to=_host_upload_path,
width_field='logo_width',
height_field='logo_height',
blank=False,
validators=[
MaxImageSizeValidator(
settings.MEETINGHOST_LOGO_MAX_UPLOAD_WIDTH,
settings.MEETINGHOST_LOGO_MAX_UPLOAD_HEIGHT,
),
WrappedValidator(validate_file_size, True),
WrappedValidator(
validate_file_extension,
settings.MEETING_VALID_UPLOAD_EXTENSIONS['meetinghostlogo'],
),
WrappedValidator(
validate_mime_type,
settings.MEETING_VALID_UPLOAD_MIME_TYPES['meetinghostlogo'],
True,
),
],
)
# These are filled in by the ImageField allow retrieval of image dimensions
# without processing the image each time it's loaded.
logo_width = models.PositiveIntegerField(null=True)
logo_height = models.PositiveIntegerField(null=True)
class Meta:
unique_together = (('meeting', 'name'),)
ordering = ('pk',) | # Copyright The IETF Trust 2007-2020, All Rights Reserved
# -*- coding: utf-8 -*-
# old meeting models can be found in ../proceedings/models.py
import datetime
import io
import os
import pytz
import random
import re
import string
from collections import namedtuple
from pathlib import Path
from urllib.parse import urljoin
import debug # pyflakes:ignore
from django.core.validators import MinValueValidator, RegexValidator
from django.db import models
from django.db.models import Max, Subquery, OuterRef, TextField, Value, Q
from django.db.models.functions import Coalesce
from django.conf import settings
from django.urls import reverse as urlreverse
from django.utils.text import slugify
from django.utils.safestring import mark_safe
from ietf.dbtemplate.models import DBTemplate
from ietf.doc.models import Document
from ietf.group.models import Group
from ietf.group.utils import can_manage_materials
from ietf.name.models import (
MeetingTypeName, TimeSlotTypeName, SessionStatusName, ConstraintName, RoomResourceName,
ImportantDateName, TimerangeName, SlideSubmissionStatusName, ProceedingsMaterialTypeName,
SessionPurposeName,
)
from ietf.person.models import Person
from ietf.utils.decorators import memoize
from ietf.utils.storage import NoLocationMigrationFileSystemStorage
from ietf.utils.text import xslugify
from ietf.utils.timezone import date2datetime
from ietf.utils.models import ForeignKey
from ietf.utils.validators import (
MaxImageSizeValidator, WrappedValidator, validate_file_size, validate_mime_type,
validate_file_extension,
)
from ietf.utils.fields import MissingOkImageField
from ietf.utils.log import unreachable
countries = list(pytz.country_names.items())
countries.sort(key=lambda x: x[1])
timezones = []
for name in pytz.common_timezones:
tzfn = os.path.join(settings.TZDATA_ICS_PATH, name + ".ics")
if not os.path.islink(tzfn):
timezones.append((name, name))
timezones.sort()
# this is used in models to format dates, as the built-in json serializer
# can not deal with them, and the django provided serializer is inaccessible.
from django.utils import datetime_safe
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def fmt_date(o):
d = datetime_safe.new_date(o)
return d.strftime(DATE_FORMAT)
class Meeting(models.Model):
# number is either the number for IETF meetings, or some other
# identifier for interim meetings/IESG retreats/liaison summits/...
number = models.CharField(unique=True, max_length=64)
type = ForeignKey(MeetingTypeName)
# Date is useful when generating a set of timeslot for this meeting, but
# is not used to determine date for timeslot instances thereafter, as
# they have their own datetime field.
date = models.DateField()
days = models.IntegerField(default=7, null=False, validators=[MinValueValidator(1)],
help_text="The number of days the meeting lasts")
city = models.CharField(blank=True, max_length=255)
country = models.CharField(blank=True, max_length=2, choices=countries)
# We can't derive time-zone from country, as there are some that have
# more than one timezone, and the pytz module doesn't provide timezone
# lookup information for all relevant city/country combinations.
time_zone = models.CharField(blank=True, max_length=255, choices=timezones)
idsubmit_cutoff_day_offset_00 = models.IntegerField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_DAY_OFFSET_00,
help_text = "The number of days before the meeting start date when the submission of -00 drafts will be closed.")
idsubmit_cutoff_day_offset_01 = models.IntegerField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_DAY_OFFSET_01,
help_text = "The number of days before the meeting start date when the submission of -01 drafts etc. will be closed.")
idsubmit_cutoff_time_utc = models.DurationField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_TIME_UTC,
help_text = "The time of day (UTC) after which submission will be closed. Use for example 23:59:59.")
idsubmit_cutoff_warning_days = models.DurationField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_WARNING_DAYS,
help_text = "How long before the 00 cutoff to start showing cutoff warnings. Use for example '21' or '21 days'.")
submission_start_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_START_DAYS,
help_text = "The number of days before the meeting start date after which meeting materials will be accepted.")
submission_cutoff_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_CUTOFF_DAYS,
help_text = "The number of days after the meeting start date in which new meeting materials will be accepted.")
submission_correction_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_CORRECTION_DAYS,
help_text = "The number of days after the meeting start date in which updates to existing meeting materials will be accepted.")
venue_name = models.CharField(blank=True, max_length=255)
venue_addr = models.TextField(blank=True)
break_area = models.CharField(blank=True, max_length=255)
reg_area = models.CharField(blank=True, max_length=255)
agenda_info_note = models.TextField(blank=True, help_text="Text in this field will be placed at the top of the html agenda page for the meeting. HTML can be used, but will not be validated.")
agenda_warning_note = models.TextField(blank=True, help_text="Text in this field will be placed more prominently at the top of the html agenda page for the meeting. HTML can be used, but will not be validated.")
schedule = ForeignKey('Schedule',null=True,blank=True, related_name='+')
session_request_lock_message = models.CharField(blank=True,max_length=255) # locked if not empty
proceedings_final = models.BooleanField(default=False, help_text="Are the proceedings for this meeting complete?")
acknowledgements = models.TextField(blank=True, help_text="Acknowledgements for use in meeting proceedings. Use ReStructuredText markup.")
overview = ForeignKey(DBTemplate, related_name='overview', null=True, editable=False)
show_important_dates = models.BooleanField(default=False)
attendees = models.IntegerField(blank=True, null=True, default=None,
help_text="Number of Attendees for backfilled meetings, leave it blank for new meetings, and then it is calculated from the registrations")
group_conflict_types = models.ManyToManyField(
ConstraintName, blank=True, limit_choices_to=dict(is_group_conflict=True),
help_text='Types of scheduling conflict between groups to consider')
def __str__(self):
if self.type_id == "ietf":
return u"IETF-%s" % (self.number)
else:
return self.number
def get_meeting_date (self,offset):
return self.date + datetime.timedelta(days=offset)
def end_date(self):
return self.get_meeting_date(self.days-1)
def get_00_cutoff(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day, tzinfo=pytz.utc)
importantdate = self.importantdate_set.filter(name_id='idcutoff').first()
if not importantdate:
importantdate = self.importantdate_set.filter(name_id='00cutoff').first()
if importantdate:
cutoff_date = importantdate.date
else:
cutoff_date = start_date + datetime.timedelta(days=ImportantDateName.objects.get(slug='idcutoff').default_offset_days)
cutoff_time = date2datetime(cutoff_date) + self.idsubmit_cutoff_time_utc
return cutoff_time
def get_01_cutoff(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day, tzinfo=pytz.utc)
importantdate = self.importantdate_set.filter(name_id='idcutoff').first()
if not importantdate:
importantdate = self.importantdate_set.filter(name_id='01cutoff').first()
if importantdate:
cutoff_date = importantdate.date
else:
cutoff_date = start_date + datetime.timedelta(days=ImportantDateName.objects.get(slug='idcutoff').default_offset_days)
cutoff_time = date2datetime(cutoff_date) + self.idsubmit_cutoff_time_utc
return cutoff_time
def get_reopen_time(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day)
local_tz = pytz.timezone(self.time_zone)
local_date = local_tz.localize(start_date)
cutoff = self.get_00_cutoff()
if cutoff.date() == start_date:
# no cutoff, so no local-time re-open
reopen_time = cutoff
else:
# reopen time is in local timezone. May need policy change?? XXX
reopen_time = local_date + self.idsubmit_cutoff_time_utc
return reopen_time
@classmethod
def get_current_meeting(cls, type="ietf"):
return cls.objects.filter(type=type, date__gte=datetime.datetime.today()-datetime.timedelta(days=7) ).order_by('date').first()
def get_first_cut_off(self):
return self.get_00_cutoff()
def get_second_cut_off(self):
return self.get_01_cutoff()
def get_ietf_monday(self):
for offset in range(self.days):
date = self.date+datetime.timedelta(days=offset)
if date.weekday() == 0: # Monday is 0
return date
def get_materials_path(self):
return os.path.join(settings.AGENDA_PATH,self.number)
# the various dates are currently computed
def get_submission_start_date(self):
return self.date - datetime.timedelta(days=self.submission_start_day_offset)
def get_submission_cut_off_date(self):
importantdate = self.importantdate_set.filter(name_id='procsub').first()
if importantdate:
return importantdate.date
else:
return self.date + datetime.timedelta(days=self.submission_cutoff_day_offset)
def get_submission_correction_date(self):
importantdate = self.importantdate_set.filter(name_id='revsub').first()
if importantdate:
return importantdate.date
else:
return self.date + datetime.timedelta(days=self.submission_correction_day_offset)
def enabled_constraint_names(self):
return ConstraintName.objects.filter(
Q(is_group_conflict=False) # any non-group-conflict constraints
| Q(is_group_conflict=True, meeting=self) # or specifically enabled for this meeting
)
def enabled_constraints(self):
return self.constraint_set.filter(name__in=self.enabled_constraint_names())
def get_schedule_by_name(self, name):
return self.schedule_set.filter(name=name).first()
def get_number(self):
"Return integer meeting number for ietf meetings, rather than strings."
if self.number.isdigit():
return int(self.number)
else:
return None
def get_proceedings_materials(self):
"""Get proceedings materials"""
return self.proceedings_materials.filter(
document__states__slug='active', document__states__type_id='procmaterials'
).order_by('type__order')
def get_attendance(self):
"""Get the meeting attendance from the MeetingRegistrations
Returns a NamedTuple with onsite and online attributes. Returns None if the record is unavailable
for this meeting.
"""
number = self.get_number()
if number is None or number < 110:
return None
Attendance = namedtuple('Attendance', 'onsite online')
return Attendance(
onsite=Person.objects.filter(
meetingregistration__meeting=self,
meetingregistration__attended=True,
meetingregistration__reg_type__contains='in_person',
).distinct().count(),
online=Person.objects.filter(
meetingregistration__meeting=self,
meetingregistration__attended=True,
meetingregistration__reg_type__contains='remote',
).distinct().count(),
)
@property
def proceedings_format_version(self):
"""Indicate version of proceedings that should be used for this meeting
Only makes sense for IETF meeting. Returns None for any meeting without a purely numeric number.
Uses settings.PROCEEDINGS_VERSION_CHANGES. Versions start at 1. Entries
in the array are the first meeting number using each version.
"""
if not hasattr(self, '_proceedings_format_version'):
if not self.number.isdigit():
version = None # no version for non-IETF meeting
else:
version = len(settings.PROCEEDINGS_VERSION_CHANGES) # start assuming latest version
mtg_number = self.get_number()
if mtg_number is None:
unreachable('2021-08-10')
else:
# Find the index of the first entry in the version change array that
# is >= this meeting's number. The first entry in the array is 0, so the
# version is always >= 1 for positive meeting numbers.
for vers, threshold in enumerate(settings.PROCEEDINGS_VERSION_CHANGES):
if mtg_number < threshold:
version = vers
break
self._proceedings_format_version = version # save this for later
return self._proceedings_format_version
@property
def session_constraintnames(self):
"""Gets a list of the constraint names that should be used for this meeting
Anticipated that this will soon become a many-to-many relationship with ConstraintName
(see issue #2770). Making this a @property allows use of the .all(), .filter(), etc,
so that other code should not need changes when this is replaced.
"""
try:
mtg_num = int(self.number)
except ValueError:
mtg_num = None # should not come up, but this method should not fail
if mtg_num is None or mtg_num >= 106:
# These meetings used the old 'conflic?' constraint types labeled as though
# they were the new types.
slugs = ('chair_conflict', 'tech_overlap', 'key_participant')
else:
slugs = ('conflict', 'conflic2', 'conflic3')
return ConstraintName.objects.filter(slug__in=slugs)
def base_url(self):
return "/meeting/%s" % (self.number, )
def build_timeslices(self):
"""Get unique day/time/timeslot data for meeting
Returns a list of days, time intervals for each day, and timeslots for each day,
with repeated days/time intervals removed. Ignores timeslots that do not have a
location. The slots return value contains only one TimeSlot for each distinct
time interval.
"""
days = [] # the days of the meetings
time_slices = {} # the times on each day
slots = {}
for ts in self.timeslot_set.all():
if ts.location_id is None:
continue
ymd = ts.time.date()
if ymd not in time_slices:
time_slices[ymd] = []
slots[ymd] = []
days.append(ymd)
if ymd in time_slices:
# only keep unique entries
if [ts.time, ts.time + ts.duration, ts.duration.seconds] not in time_slices[ymd]:
time_slices[ymd].append([ts.time, ts.time + ts.duration, ts.duration.seconds])
slots[ymd].append(ts)
days.sort()
for ymd in time_slices:
# Make sure these sort the same way
time_slices[ymd].sort()
slots[ymd].sort(key=lambda x: (x.time, x.duration))
return days,time_slices,slots
# this functions makes a list of timeslices and rooms, and
# makes sure that all schedules have all of them.
# def create_all_timeslots(self):
# alltimeslots = self.timeslot_set.all()
# for sched in self.schedule_set.all():
# ts_hash = {}
# for ss in sched.assignments.all():
# ts_hash[ss.timeslot] = ss
# for ts in alltimeslots:
# if not (ts in ts_hash):
# SchedTimeSessAssignment.objects.create(schedule = sched,
# timeslot = ts)
def vtimezone(self):
if self.time_zone:
try:
tzfn = os.path.join(settings.TZDATA_ICS_PATH, self.time_zone + ".ics")
if os.path.exists(tzfn):
with io.open(tzfn) as tzf:
icstext = tzf.read()
vtimezone = re.search("(?sm)(\nBEGIN:VTIMEZONE.*\nEND:VTIMEZONE\n)", icstext).group(1).strip()
if vtimezone:
vtimezone += "\n"
return vtimezone
except IOError:
pass
return ''
def set_official_schedule(self, schedule):
if self.schedule != schedule:
self.schedule = schedule
self.save()
def updated(self):
min_time = datetime.datetime(1970, 1, 1, 0, 0, 0) # should be Meeting.modified, but we don't have that
timeslots_updated = self.timeslot_set.aggregate(Max('modified'))["modified__max"] or min_time
sessions_updated = self.session_set.aggregate(Max('modified'))["modified__max"] or min_time
assignments_updated = min_time
if self.schedule:
assignments_updated = SchedTimeSessAssignment.objects.filter(schedule__in=[self.schedule, self.schedule.base if self.schedule else None]).aggregate(Max('modified'))["modified__max"] or min_time
ts = max(timeslots_updated, sessions_updated, assignments_updated)
tz = pytz.timezone(settings.PRODUCTION_TIMEZONE)
ts = tz.localize(ts)
return ts
@memoize
def previous_meeting(self):
return Meeting.objects.filter(type_id=self.type_id,date__lt=self.date).order_by('-date').first()
class Meta:
ordering = ["-date", "-id"]
indexes = [
models.Index(fields=['-date', '-id']),
]
# === Rooms, Resources, Floorplans =============================================
class ResourceAssociation(models.Model):
name = ForeignKey(RoomResourceName)
icon = models.CharField(max_length=64) # icon to be found in /static/img
desc = models.CharField(max_length=256)
def __str__(self):
return self.desc
class Room(models.Model):
meeting = ForeignKey(Meeting)
modified = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=255)
functional_name = models.CharField(max_length=255, blank = True)
capacity = models.IntegerField(null=True, blank=True)
resources = models.ManyToManyField(ResourceAssociation, blank = True)
session_types = models.ManyToManyField(TimeSlotTypeName, blank = True)
# floorplan-related properties
floorplan = ForeignKey('FloorPlan', null=True, blank=True, default=None)
# floorplan: room pixel position : (0,0) is top left of image, (xd, yd)
# is room width, height.
x1 = models.SmallIntegerField(null=True, blank=True, default=None)
y1 = models.SmallIntegerField(null=True, blank=True, default=None)
x2 = models.SmallIntegerField(null=True, blank=True, default=None)
y2 = models.SmallIntegerField(null=True, blank=True, default=None)
# end floorplan-related stuff
def __str__(self):
return u"%s size: %s" % (self.name, self.capacity)
def delete_timeslots(self):
for ts in self.timeslot_set.all():
ts.sessionassignments.all().delete()
ts.delete()
def create_timeslots(self):
days, time_slices, slots = self.meeting.build_timeslices()
for day in days:
for ts in slots[day]:
TimeSlot.objects.create(type_id=ts.type_id,
meeting=self.meeting,
name=ts.name,
time=ts.time,
location=self,
duration=ts.duration)
#self.meeting.create_all_timeslots()
def dom_id(self):
return "room%u" % (self.pk)
# floorplan support
def floorplan_url(self):
mtg_num = self.meeting.get_number()
if not mtg_num:
return None
elif mtg_num <= settings.FLOORPLAN_LAST_LEGACY_MEETING:
base_url = settings.FLOORPLAN_LEGACY_BASE_URL.format(meeting=self.meeting)
elif self.floorplan:
base_url = urlreverse('ietf.meeting.views.floor_plan', kwargs=dict(num=mtg_num))
else:
return None
return f'{base_url}?room={xslugify(self.name)}'
def left(self):
return min(self.x1, self.x2) if (self.x1 and self.x2) else 0
def top(self):
return min(self.y1, self.y2) if (self.y1 and self.y2) else 0
def right(self):
return max(self.x1, self.x2) if (self.x1 and self.x2) else 0
def bottom(self):
return max(self.y1, self.y2) if (self.y1 and self.y2) else 0
def functional_display_name(self):
if not self.functional_name:
return ""
if 'breakout' in self.functional_name.lower():
return ""
if self.functional_name[0].isdigit():
return ""
return self.functional_name
# audio stream support
def audio_stream_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id == 'audiostream']
return urlresources[0].url if urlresources else None
def video_stream_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['meetecho']]
return urlresources[0].url if urlresources else None
def onsite_tool_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['meetecho_onsite']]
return urlresources[0].url if urlresources else None
def webex_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['webex']]
return urlresources[0].url if urlresources else None
#
class Meta:
ordering = ["-id"]
class UrlResource(models.Model):
"For things like audio stream urls, meetecho stream urls"
name = ForeignKey(RoomResourceName)
room = ForeignKey(Room)
url = models.URLField(null=True, blank=True)
def floorplan_path(instance, filename):
root, ext = os.path.splitext(filename)
return "%s/floorplan-%s-%s%s" % (settings.FLOORPLAN_MEDIA_DIR, instance.meeting.number, xslugify(instance.name), ext)
class FloorPlan(models.Model):
name = models.CharField(max_length=255)
short = models.CharField(max_length=3, default='')
modified= models.DateTimeField(auto_now=True)
meeting = ForeignKey(Meeting)
order = models.SmallIntegerField()
image = models.ImageField(storage=NoLocationMigrationFileSystemStorage(), upload_to=floorplan_path, blank=True, default=None)
#
class Meta:
ordering = ['-id',]
#
def __str__(self):
return u'floorplan-%s-%s' % (self.meeting.number, xslugify(self.name))
# === Schedules, Sessions, Timeslots and Assignments ===========================
class TimeSlot(models.Model):
"""
Everything that would appear on the meeting agenda of a meeting is
mapped to a timeslot, including breaks. Sessions are connected to
TimeSlots during scheduling.
"""
meeting = ForeignKey(Meeting)
type = ForeignKey(TimeSlotTypeName)
name = models.CharField(max_length=255)
time = models.DateTimeField()
duration = models.DurationField(default=datetime.timedelta(0))
location = ForeignKey(Room, blank=True, null=True)
show_location = models.BooleanField(default=True, help_text="Show location in agenda.")
sessions = models.ManyToManyField('Session', related_name='slots', through='SchedTimeSessAssignment', blank=True, help_text="Scheduled session, if any.")
modified = models.DateTimeField(auto_now=True)
#
@property
def session(self):
if not hasattr(self, "_session_cache"):
self._session_cache = self.sessions.filter(timeslotassignments__schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting else None]).first()
return self._session_cache
@property
def time_desc(self):
return "%s-%s" % (self.time.strftime("%H%M"), (self.time + self.duration).strftime("%H%M"))
def meeting_date(self):
return self.time.date()
def registration(self):
# below implements a object local cache
# it tries to find a timeslot of type registration which starts at the same time as this slot
# so that it can be shown at the top of the agenda.
if not hasattr(self, '_reg_info'):
try:
self._reg_info = TimeSlot.objects.get(meeting=self.meeting, time__month=self.time.month, time__day=self.time.day, type="reg")
except TimeSlot.DoesNotExist:
self._reg_info = None
return self._reg_info
def __str__(self):
location = self.get_location()
if not location:
location = u"(no location)"
return u"%s: %s-%s %s, %s" % (self.meeting.number, self.time.strftime("%m-%d %H:%M"), (self.time + self.duration).strftime("%H:%M"), self.name, location)
def end_time(self):
return self.time + self.duration
def get_hidden_location(self):
if not hasattr(self, '_cached_hidden_location'):
location = self.location
if location:
location = location.name
elif self.type_id == "reg":
location = self.meeting.reg_area
elif self.type_id == "break":
location = self.meeting.break_area
self._cached_hidden_location = location
return self._cached_hidden_location
def get_location(self):
return self.get_hidden_location() if self.show_location else ""
def get_functional_location(self):
name_parts = []
room = self.location
if room and room.functional_name:
name_parts.append(room.functional_name)
location = self.get_hidden_location()
if location:
name_parts.append(location)
return ' - '.join(name_parts)
def get_html_location(self):
if not hasattr(self, '_cached_html_location'):
self._cached_html_location = self.get_location()
if len(self._cached_html_location) > 8:
self._cached_html_location = mark_safe(self._cached_html_location.replace('/', '/<wbr>'))
else:
self._cached_html_location = mark_safe(self._cached_html_location.replace(' ', ' '))
return self._cached_html_location
def tz(self):
if not hasattr(self, '_cached_tz'):
if self.meeting.time_zone:
self._cached_tz = pytz.timezone(self.meeting.time_zone)
else:
self._cached_tz = None
return self._cached_tz
def tzname(self):
if self.tz():
return self.tz().tzname(self.time)
else:
return ""
def utc_start_time(self):
if self.tz():
local_start_time = self.tz().localize(self.time)
return local_start_time.astimezone(pytz.utc)
else:
return None
def utc_end_time(self):
utc_start = self.utc_start_time()
# Add duration after converting start time, otherwise errors creep in around DST change
return None if utc_start is None else utc_start + self.duration
def local_start_time(self):
if self.tz():
return self.tz().localize(self.time)
else:
return None
def local_end_time(self):
local_start = self.local_start_time()
# Add duration after converting start time, otherwise errors creep in around DST change
return None if local_start is None else local_start + self.duration
@property
def js_identifier(self):
# this returns a unique identifier that is js happy.
# {{s.timeslot.time|date:'Y-m-d'}}_{{ s.timeslot.time|date:'Hi' }}"
# also must match:
# {{r|slugify}}_{{day}}_{{slot.0|date:'Hi'}}
dom_id="ts%u" % (self.pk)
if self.location is not None:
dom_id = self.location.dom_id()
return "%s_%s_%s" % (dom_id, self.time.strftime('%Y-%m-%d'), self.time.strftime('%H%M'))
def delete_concurrent_timeslots(self):
"""Delete all timeslots which are in the same time as this slot"""
# can not include duration in filter, because there is no support
# for having it a WHERE clause.
# below will delete self as well.
for ts in self.meeting.timeslot_set.filter(time=self.time).all():
if ts.duration!=self.duration:
continue
# now remove any schedule that might have been made to this
# timeslot.
ts.sessionassignments.all().delete()
ts.delete()
"""
Find a timeslot that comes next, in the same room. It must be on the same day,
and it must have a gap of less than 11 minutes. (10 is the spec)
"""
@property
def slot_to_the_right(self):
return self.meeting.timeslot_set.filter(
location = self.location, # same room!
type = self.type, # must be same type (usually session)
time__gt = self.time + self.duration, # must be after this session
time__lt = self.time + self.duration + datetime.timedelta(seconds=11*60)).first()
class Meta:
ordering = ["-time", "-id"]
indexes = [
models.Index(fields=['-time', '-id']),
]
# end of TimeSlot
class Schedule(models.Model):
"""
Each person may have multiple schedules saved.
A Schedule may be made visible, which means that it will show up in
public drop down menus, etc. It may also be made public, which means
that someone who knows about it by name/id would be able to reference
it. A non-visible, public schedule might be passed around by the
Secretariat to IESG members for review. Only the owner may edit the
schedule, others may copy it
"""
meeting = ForeignKey(Meeting, null=True, related_name='schedule_set')
name = models.CharField(max_length=64, blank=False, help_text="Letters, numbers and -:_ allowed.", validators=[RegexValidator(r'^[A-Za-z0-9-:_]*$')])
owner = ForeignKey(Person)
visible = models.BooleanField("Show in agenda list", default=True, help_text="Show in the list of possible agendas for the meeting.")
public = models.BooleanField(default=True, help_text="Allow others to see this agenda.")
badness = models.IntegerField(null=True, blank=True)
notes = models.TextField(blank=True)
origin = ForeignKey('Schedule', blank=True, null=True, on_delete=models.SET_NULL, related_name="+")
base = ForeignKey('Schedule', blank=True, null=True, on_delete=models.SET_NULL,
help_text="Sessions scheduled in the base schedule show up in this schedule too.", related_name="derivedschedule_set",
limit_choices_to={'base': None}) # prevent the inheritance from being more than one layer deep (no recursion)
def __str__(self):
return u"%s:%s(%s)" % (self.meeting, self.name, self.owner)
def base_url(self):
return "/meeting/%s/agenda/%s/%s" % (self.meeting.number, self.owner_email(), self.name)
# temporary property to pacify the places where Schedule.assignments is used
# @property
# def schedtimesessassignment_set(self):
# return self.assignments
#
# def url_edit(self):
# return "/meeting/%s/agenda/%s/edit" % (self.meeting.number, self.name)
#
# @property
# def relurl_edit(self):
# return self.url_edit("")
def owner_email(self):
return self.owner.email_address() or "noemail"
@property
def is_official(self):
return (self.meeting.schedule == self)
@property
def is_official_record(self):
return (self.is_official and
self.meeting.end_date() <= datetime.date.today() )
# returns a dictionary {group -> [schedtimesessassignment+]}
# and it has [] if the session is not placed.
# if there is more than one session for that group,
# then a list of them is returned (always a list)
@property
def official_token(self):
if self.is_official:
return "official"
else:
return "unofficial"
def delete_assignments(self):
self.assignments.all().delete()
@property
def qs_assignments_with_sessions(self):
return self.assignments.filter(session__isnull=False)
def qs_timeslots_in_use(self):
"""Get QuerySet containing timeslots used by the schedule"""
return TimeSlot.objects.filter(sessionassignments__schedule=self)
def qs_sessions_scheduled(self):
"""Get QuerySet containing sessions assigned to timeslots by this schedule"""
return Session.objects.filter(timeslotassignments__schedule=self)
def delete_schedule(self):
self.assignments.all().delete()
self.delete()
# to be renamed SchedTimeSessAssignments (stsa)
class SchedTimeSessAssignment(models.Model):
"""
This model provides an N:M relationship between Session and TimeSlot.
Each relationship is attached to the named schedule, which is owned by
a specific person/user.
"""
timeslot = ForeignKey('TimeSlot', null=False, blank=False, related_name='sessionassignments')
session = ForeignKey('Session', null=True, default=None, related_name='timeslotassignments', help_text="Scheduled session.")
schedule = ForeignKey('Schedule', null=False, blank=False, related_name='assignments')
extendedfrom = ForeignKey('self', null=True, default=None, help_text="Timeslot this session is an extension of.")
modified = models.DateTimeField(auto_now=True)
notes = models.TextField(blank=True)
badness = models.IntegerField(default=0, blank=True, null=True)
pinned = models.BooleanField(default=False, help_text="Do not move session during automatic placement.")
class Meta:
ordering = ["timeslot__time", "timeslot__type__slug", "session__group__parent__name", "session__group__acronym", "session__name", ]
def __str__(self):
return u"%s [%s<->%s]" % (self.schedule, self.session, self.timeslot)
@property
def room_name(self):
return self.timeslot.location.name if self.timeslot and self.timeslot.location else None
@property
def acronym(self):
if self.session and self.session.group:
return self.session.group.acronym
@property
def slot_to_the_right(self):
s = self.timeslot.slot_to_the_right
if s:
return self.schedule.assignments.filter(timeslot=s).first()
else:
return None
def meeting(self):
"""Get the meeting to which this assignment belongs"""
return self.session.meeting
def slot_type(self):
"""Get the TimeSlotTypeName that applies to this assignment"""
return self.timeslot.type
def slug(self):
"""Return sensible id string for session, e.g. suitable for use as HTML anchor."""
components = []
components.append(self.schedule.meeting.number)
if not self.timeslot:
components.append("unknown")
if not self.session or not (getattr(self.session, "historic_group", None) or self.session.group):
components.append("unknown")
else:
components.append(self.timeslot.time.strftime("%Y-%m-%d-%a-%H%M"))
g = getattr(self.session, "historic_group", None) or self.session.group
if self.timeslot.type.slug in ('break', 'reg', 'other'):
components.append(g.acronym)
components.append(slugify(self.session.name))
if self.timeslot.type.slug in ('regular', 'plenary'):
if self.timeslot.type.slug == "plenary":
components.append("1plenary")
else:
p = getattr(g, "historic_parent", None) or g.parent
if p and p.type_id in ("area", "irtf", 'ietf'):
components.append(p.acronym)
components.append(g.acronym)
return "-".join(components).lower()
class BusinessConstraint(models.Model):
"""
Constraints on the scheduling that apply across all qualifying
sessions in all meetings. Used by the ScheduleGenerator.
"""
slug = models.CharField(max_length=32, primary_key=True)
name = models.CharField(max_length=255)
penalty = models.IntegerField(default=0, help_text="The penalty for violating this kind of constraint; for instance 10 (small penalty) or 10000 (large penalty)")
class Constraint(models.Model):
"""
Specifies a constraint on the scheduling.
These constraints apply to a specific group during a specific meeting.
Available types are:
- conflict/conflic2/conflic3: a conflict between source and target WG/session,
with varying priority. The first is used for a chair conflict, the second for
technology overlap, third for key person conflict
- bethere: a constraint between source WG and a particular person
- timerange: can not meet during these times
- time_relation: preference for a time difference between sessions
- wg_adjacent: request for source WG to be adjacent (directly before or after,
no breaks, same room) the target WG
In the schedule editor, run-time, a couple non-persistent ConstraintName instances
are created for rendering purposes. This is done in
meeting.utils.preprocess_constraints_for_meeting_schedule_editor(). This adds:
- joint_with_groups
- responsible_ad
"""
TIME_RELATION_CHOICES = (
('subsequent-days', 'Schedule the sessions on subsequent days'),
('one-day-seperation', 'Leave at least one free day in between the two sessions'),
)
meeting = ForeignKey(Meeting)
source = ForeignKey(Group, related_name="constraint_source_set")
target = ForeignKey(Group, related_name="constraint_target_set", null=True)
person = ForeignKey(Person, null=True, blank=True)
name = ForeignKey(ConstraintName)
time_relation = models.CharField(max_length=200, choices=TIME_RELATION_CHOICES, blank=True)
timeranges = models.ManyToManyField(TimerangeName)
active_status = None
def __str__(self):
return u"%s %s target=%s person=%s" % (self.source, self.name.name.lower(), self.target, self.person)
def brief_display(self):
if self.name.slug == "wg_adjacent":
return "Adjacent with %s" % self.target.acronym
elif self.name.slug == "time_relation":
return self.get_time_relation_display()
elif self.name.slug == "timerange":
timeranges_str = ", ".join([t.desc for t in self.timeranges.all()])
return "Can't meet %s" % timeranges_str
elif self.target and self.person:
return "%s ; %s" % (self.target.acronym, self.person)
elif self.target and not self.person:
return "%s " % (self.target.acronym)
elif not self.target and self.person:
return "%s " % (self.person)
class SessionPresentation(models.Model):
session = ForeignKey('Session')
document = ForeignKey(Document)
rev = models.CharField(verbose_name="revision", max_length=16, null=True, blank=True)
order = models.PositiveSmallIntegerField(default=0)
class Meta:
db_table = 'meeting_session_materials'
ordering = ('order',)
unique_together = (('session', 'document'),)
def __str__(self):
return u"%s -> %s-%s" % (self.session, self.document.name, self.rev)
constraint_cache_uses = 0
constraint_cache_initials = 0
class SessionQuerySet(models.QuerySet):
def with_current_status(self):
"""Annotate session with its current status
Adds current_status, containing the text representation of the status.
"""
return self.annotate(
# coalesce with '' to avoid nulls which give funny
# results, e.g. .exclude(current_status='canceled') also
# skips rows with null in them
current_status=Coalesce(
Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'-time', '-id'
).values('status')[:1]),
Value(''),
output_field=TextField()),
)
def with_requested_by(self):
"""Annotate session with requested_by field
Adds requested_by field - pk of the Person who made the request
"""
return self.annotate(
requested_by=Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'time', 'id'
).values('by')[:1]),
)
def with_requested_time(self):
"""Annotate session with requested_time field"""
return self.annotate(
requested_time=Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'time', 'id'
).values('time')[:1]),
)
def not_canceled(self):
"""Queryset containing all sessions not canceled
Results annotated with current_status
"""
return self.with_current_status().exclude(current_status__in=Session.CANCELED_STATUSES)
def not_deleted(self):
"""Queryset containing all sessions not deleted
Results annotated with current_status
"""
return self.with_current_status().exclude(current_status='deleted')
def that_can_meet(self):
"""Queryset containing sessions that can meet
Results annotated with current_status
"""
return self.with_current_status().exclude(
current_status__in=['notmeet', 'disappr', 'deleted', 'apprw']
).filter(
type__slug='regular'
)
def requests(self):
"""Queryset containing sessions that may be handled as requests"""
return self.exclude(
type__in=('offagenda', 'reserved', 'unavail')
)
class Session(models.Model):
"""Session records that a group should have a session on the
meeting (time and location is stored in a TimeSlot) - if multiple
timeslots are needed, multiple sessions will have to be created.
Training sessions and similar are modeled by filling in a
responsible group (e.g. Edu team) and filling in the name."""
objects = SessionQuerySet.as_manager() # sets default query manager
meeting = ForeignKey(Meeting)
name = models.CharField(blank=True, max_length=255, help_text="Name of session, in case the session has a purpose rather than just being a group meeting.")
short = models.CharField(blank=True, max_length=32, help_text="Short version of 'name' above, for use in filenames.")
purpose = ForeignKey(SessionPurposeName, null=False, help_text='Purpose of the session')
type = ForeignKey(TimeSlotTypeName)
group = ForeignKey(Group) # The group type historically determined the session type. BOFs also need to be added as a group. Note that not all meeting requests have a natural group to associate with.
joint_with_groups = models.ManyToManyField(Group, related_name='sessions_joint_in',blank=True)
attendees = models.IntegerField(null=True, blank=True)
agenda_note = models.CharField(blank=True, max_length=255)
requested_duration = models.DurationField(default=datetime.timedelta(0))
comments = models.TextField(blank=True)
scheduled = models.DateTimeField(null=True, blank=True)
modified = models.DateTimeField(auto_now=True)
remote_instructions = models.CharField(blank=True,max_length=1024)
on_agenda = models.BooleanField(default=True, help_text='Is this session visible on the meeting agenda?')
tombstone_for = models.ForeignKey('Session', blank=True, null=True, help_text="This session is the tombstone for a session that was rescheduled", on_delete=models.CASCADE)
materials = models.ManyToManyField(Document, through=SessionPresentation, blank=True)
resources = models.ManyToManyField(ResourceAssociation, blank=True)
unique_constraints_dict = None
CANCELED_STATUSES = ['canceled', 'canceledpa']
# Should work on how materials are captured so that deleted things are no longer associated with the session
# (We can keep the information about something being added to and removed from a session in the document's history)
def get_material(self, material_type, only_one):
if hasattr(self, "prefetched_active_materials"):
l = [d for d in self.prefetched_active_materials if d.type_id == material_type]
for d in l:
d.meeting_related = lambda: True
else:
l = self.materials.filter(type=material_type).exclude(states__type=material_type, states__slug='deleted').order_by('sessionpresentation__order')
if only_one:
if l:
return l[0]
else:
return None
else:
return l
def agenda(self):
if not hasattr(self, "_agenda_cache"):
self._agenda_cache = self.get_material("agenda", only_one=True)
return self._agenda_cache
def minutes(self):
if not hasattr(self, '_cached_minutes'):
self._cached_minutes = self.get_material("minutes", only_one=True)
return self._cached_minutes
def recordings(self):
return list(self.get_material("recording", only_one=False))
def bluesheets(self):
return list(self.get_material("bluesheets", only_one=False))
def slides(self):
if not hasattr(self, "_slides_cache"):
self._slides_cache = list(self.get_material("slides", only_one=False))
return self._slides_cache
def drafts(self):
return list(self.materials.filter(type='draft'))
# The utilities below are used in the proceedings and materials
# templates, and should be moved there - then we could also query
# out the needed information in a few passes and speed up those
# pages.
def all_meeting_sessions_for_group(self):
from ietf.meeting.utils import add_event_info_to_session_qs
if self.group.features.has_meetings:
if not hasattr(self, "_all_meeting_sessions_for_group_cache"):
sessions = [s for s in add_event_info_to_session_qs(self.meeting.session_set.filter(group=self.group,type=self.type)) if s.official_timeslotassignment()]
self._all_meeting_sessions_for_group_cache = sorted(sessions, key = lambda x: x.official_timeslotassignment().timeslot.time)
return self._all_meeting_sessions_for_group_cache
else:
return [self]
def order_in_meeting(self):
if not hasattr(self, '_order_in_meeting'):
session_list = self.all_meeting_sessions_for_group()
self._order_in_meeting = session_list.index(self) + 1 if self in session_list else 0
return self._order_in_meeting
def all_meeting_sessions_cancelled(self):
return set(s.current_status for s in self.all_meeting_sessions_for_group()) == {'canceled'}
def all_meeting_recordings(self):
recordings = [] # These are not sets because we need to preserve relative ordering or redo the ordering work later
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
recordings.extend([r for r in session.recordings() if r not in recordings])
return recordings
def all_meeting_bluesheets(self):
bluesheets = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
bluesheets.extend([b for b in session.bluesheets() if b not in bluesheets])
return bluesheets
def all_meeting_drafts(self):
drafts = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
drafts.extend([d for d in session.drafts() if d not in drafts])
return drafts
def all_meeting_agendas(self):
agendas = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
agenda = session.agenda()
if agenda and agenda not in agendas:
agendas.append(agenda)
return agendas
def all_meeting_slides(self):
slides = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
slides.extend([s for s in session.slides() if s not in slides])
return slides
def all_meeting_minutes(self):
minutes = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
minutes_doc = session.minutes()
if minutes_doc and minutes_doc not in minutes:
minutes.append(minutes_doc)
return minutes
def can_manage_materials(self, user):
return can_manage_materials(user,self.group)
def is_material_submission_cutoff(self):
return datetime.date.today() > self.meeting.get_submission_correction_date()
def joint_with_groups_acronyms(self):
return [group.acronym for group in self.joint_with_groups.all()]
def __str__(self):
if self.meeting.type_id == "interim":
return self.meeting.number
status_id = None
if hasattr(self, 'current_status'):
status_id = self.current_status
elif self.pk is not None:
latest_event = SchedulingEvent.objects.filter(session=self.pk).order_by('-time', '-id').first()
if latest_event:
status_id = latest_event.status_id
if status_id in ('canceled','disappr','notmeet','deleted'):
ss0name = "(%s)" % SessionStatusName.objects.get(slug=status_id).name
else:
ss0name = "(unscheduled)"
ss = self.timeslotassignments.filter(schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting.schedule else None]).order_by('timeslot__time')
if ss:
ss0name = ','.join(x.timeslot.time.strftime("%a-%H%M") for x in ss)
return "%s: %s %s %s" % (self.meeting, self.group.acronym, self.name, ss0name)
@property
def short_name(self):
if self.name:
return self.name
if self.short:
return self.short
if self.group:
return self.group.acronym
return "req#%u" % (id)
@property
def special_request_token(self):
if self.comments is not None and len(self.comments)>0:
return "*"
else:
return ""
def docname_token(self):
sess_mtg = Session.objects.filter(meeting=self.meeting, group=self.group).order_by('pk')
index = list(sess_mtg).index(self)
return 'sess%s' % (string.ascii_lowercase[index])
def docname_token_only_for_multiple(self):
sess_mtg = Session.objects.filter(meeting=self.meeting, group=self.group).order_by('pk')
if len(list(sess_mtg)) > 1:
index = list(sess_mtg).index(self)
if index < 26:
token = 'sess%s' % (string.ascii_lowercase[index])
else:
token = 'sess%s%s' % (string.ascii_lowercase[index//26],string.ascii_lowercase[index%26])
return token
return None
def constraints(self):
return Constraint.objects.filter(source=self.group, meeting=self.meeting).order_by('name__name', 'target__acronym', 'person__name').prefetch_related("source","target","person")
def reverse_constraints(self):
return Constraint.objects.filter(target=self.group, meeting=self.meeting).order_by('name__name')
def official_timeslotassignment(self):
return self.timeslotassignments.filter(schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting.schedule else None]).first()
@property
def people_constraints(self):
return self.group.constraint_source_set.filter(meeting=self.meeting, name='bethere')
def agenda_text(self):
doc = self.agenda()
if doc:
path = os.path.join(settings.AGENDA_PATH, self.meeting.number, "agenda", doc.uploaded_filename)
if os.path.exists(path):
with io.open(path) as f:
return f.read()
else:
return "No agenda file found"
else:
return "The agenda has not been uploaded yet."
def agenda_file(self):
if not hasattr(self, '_agenda_file'):
self._agenda_file = ""
agenda = self.agenda()
if not agenda:
return ""
# FIXME: uploaded_filename should be replaced with a function that computes filenames when they are of a fixed schema and not uploaded names
self._agenda_file = "%s/agenda/%s" % (self.meeting.number, agenda.uploaded_filename)
return self._agenda_file
def jabber_room_name(self):
if self.type_id=='plenary':
return 'plenary'
elif self.historic_group:
return self.historic_group.acronym
else:
return self.group.acronym
def notes_id(self):
note_id_fragment = 'plenary' if self.type.slug == 'plenary' else self.group.acronym
return f'notes-ietf-{self.meeting.number}-{note_id_fragment}'
def notes_url(self):
return urljoin(settings.IETF_NOTES_URL, self.notes_id())
class SchedulingEvent(models.Model):
session = ForeignKey(Session)
time = models.DateTimeField(default=datetime.datetime.now, help_text="When the event happened")
status = ForeignKey(SessionStatusName)
by = ForeignKey(Person)
def __str__(self):
return u'%s : %s : %s : %s' % (self.session, self.status, self.time, self.by)
class ImportantDate(models.Model):
meeting = ForeignKey(Meeting)
date = models.DateField()
name = ForeignKey(ImportantDateName)
class Meta:
ordering = ["-meeting_id","date", ]
def __str__(self):
return u'%s : %s : %s' % ( self.meeting, self.name, self.date )
class SlideSubmission(models.Model):
time = models.DateTimeField(auto_now=True)
session = ForeignKey(Session)
title = models.CharField(max_length=255)
filename = models.CharField(max_length=255)
apply_to_all = models.BooleanField(default=False)
submitter = ForeignKey(Person)
status = ForeignKey(SlideSubmissionStatusName, null=True, default='pending', on_delete=models.SET_NULL)
doc = ForeignKey(Document, null=True, on_delete=models.SET_NULL)
def staged_filepath(self):
return os.path.join(settings.SLIDE_STAGING_PATH , self.filename)
def staged_url(self):
return "".join([settings.SLIDE_STAGING_URL, self.filename])
class ProceedingsMaterial(models.Model):
meeting = ForeignKey(Meeting, related_name='proceedings_materials')
document = ForeignKey(
Document,
limit_choices_to=dict(type_id='procmaterials'),
unique=True,
)
type = ForeignKey(ProceedingsMaterialTypeName)
class Meta:
unique_together = (('meeting', 'type'),)
def __str__(self):
return self.document.title
def get_href(self):
return f'{self.document.get_href(self.meeting)}'
def active(self):
return self.document.get_state().slug == 'active'
def is_url(self):
return len(self.document.external_url) > 0
def _host_upload_path(instance : 'MeetingHost', filename):
"""Compute filename relative to the storage location
Must live outside a class to allow migrations to deconstruct fields that use it
"""
num = instance.meeting.number
path = (
Path(num) / 'meetinghosts' / f'logo-{"".join(random.choices(string.ascii_lowercase, k=10))}'
).with_suffix(
Path(filename).suffix
)
return str(path)
class MeetingHost(models.Model):
"""Meeting sponsor"""
meeting = ForeignKey(Meeting, related_name='meetinghosts')
name = models.CharField(max_length=255, blank=False)
logo = MissingOkImageField(
storage=NoLocationMigrationFileSystemStorage(location=settings.MEETINGHOST_LOGO_PATH),
upload_to=_host_upload_path,
width_field='logo_width',
height_field='logo_height',
blank=False,
validators=[
MaxImageSizeValidator(
settings.MEETINGHOST_LOGO_MAX_UPLOAD_WIDTH,
settings.MEETINGHOST_LOGO_MAX_UPLOAD_HEIGHT,
),
WrappedValidator(validate_file_size, True),
WrappedValidator(
validate_file_extension,
settings.MEETING_VALID_UPLOAD_EXTENSIONS['meetinghostlogo'],
),
WrappedValidator(
validate_mime_type,
settings.MEETING_VALID_UPLOAD_MIME_TYPES['meetinghostlogo'],
True,
),
],
)
# These are filled in by the ImageField allow retrieval of image dimensions
# without processing the image each time it's loaded.
logo_width = models.PositiveIntegerField(null=True)
logo_height = models.PositiveIntegerField(null=True)
class Meta:
unique_together = (('meeting', 'name'),)
ordering = ('pk',) |
import functools
import time
import weakref
from enum import IntFlag
from itertools import count
from logging import LoggerAdapter, getLogger
from typing import ClassVar, FrozenSet
from .log import control_layer_logger
def select_version(cls, version):
"""Select closest compatible version to requested version
Compatible is defined as ``class_version <= requested_version``
as defined by the types used to denote the versions.
Parameters
----------
cls : type
The base class to find a version of
version : any
Must be the same type as used to define the class versions.
"""
all_versions = cls._class_info_['versions']
matched_version = max(ver for ver in all_versions if ver <= version)
return all_versions[matched_version]
try:
from enum import KEEP
class IFBase(IntFlag, boundary=KEEP):
...
except ImportError:
IFBase = IntFlag
class Kind(IFBase):
"""
This is used in the .kind attribute of all OphydObj (Signals, Devices).
A Device examines its components' .kind atttribute to decide whether to
traverse it in read(), read_configuration(), or neither. Additionally, if
decides whether to include its name in `hints['fields']`.
"""
omitted = 0b000
normal = 0b001
config = 0b010
hinted = 0b101 # Notice that bool(hinted & normal) is True.
class UnknownSubscription(KeyError):
"Subclass of KeyError. Raised for unknown event type"
...
def register_instances_keyed_on_name(fail_if_late=False):
"""Register OphydObj instances in a WeakValueDictionary keyed on name.
Be advised that ophyd does not require 'name' to be unique and is
configurable by the user at run-time so this should
not be relied on unless name uniqueness is enforced by other means.
Parameters
----------
fail_if_late : boolean
If True, verify that OphydObj has not yet been instantiated and raise
``RuntimeError`` if it has, as a way of verify that no instances will
be "missed" by this registry. False by default.
Returns
-------
WeakValueDictionary
"""
weak_dict = weakref.WeakValueDictionary()
def register(instance):
weak_dict[instance.name] = instance
OphydObject.add_instantiation_callback(register, fail_if_late)
return weak_dict
def register_instances_in_weakset(fail_if_late=False):
"""Register OphydObj instances in a WeakSet.
Be advised that OphydObj may not always be hashable.
Parameters
----------
fail_if_late : boolean
If True, verify that OphydObj has not yet been instantiated and raise
``RuntimeError`` if it has, as a way of verify that no instances will
be "missed" by this registry. False by default.
Returns
-------
WeakSet
"""
weak_set = weakref.WeakSet()
def register(instance):
weak_set.add(instance)
OphydObject.add_instantiation_callback(register, fail_if_late)
return weak_set
class OphydObject:
'''The base class for all objects in Ophyd
Handles:
* Subscription/callback mechanism
Parameters
----------
name : str, optional
The name of the object.
attr_name : str, optional
The attr name on it's parent (if it has one)
ex ``getattr(self.parent, self.attr_name) is self``
parent : parent, optional
The object's parent, if it exists in a hierarchy
kind : a member of the :class:`~ophydobj.Kind` :class:`~enum.IntEnum`
(or equivalent integer), optional
Default is ``Kind.normal``. See :class:`~ophydobj.Kind` for options.
Attributes
----------
name
'''
# Any callables appended to this mutable class variable will be notified
# one time when a new instance of OphydObj is instantiated. See
# OphydObject.add_instantiation_callback().
__instantiation_callbacks = []
_default_sub = None
# This is set to True when the first OphydObj is instantiated. This may be
# of interest to code that adds something to instantiation_callbacks, which
# may want to know whether it has already "missed" any instances.
__any_instantiated = False
subscriptions: ClassVar[FrozenSet[str]] = frozenset()
def __init__(self, *, name=None, attr_name='', parent=None, labels=None,
kind=None):
if labels is None:
labels = set()
self._ophyd_labels_ = set(labels)
if kind is None:
kind = Kind.normal
self.kind = kind
super().__init__()
# base name and ref to parent, these go with properties
if name is None:
name = ''
self._attr_name = attr_name
if not isinstance(name, str):
raise ValueError("name must be a string.")
self._name = name
self._parent = parent
# dictionary of wrapped callbacks
self._callbacks = {k: {} for k in self.subscriptions}
# this is to maintain api on clear_sub
self._unwrapped_callbacks = {k: {} for k in self.subscriptions}
# map cid -> back to which event it is in
self._cid_to_event_mapping = dict()
# cache of last inputs to _run_subs, the semi-private way
# to trigger the callbacks for a given subscription to be run
self._args_cache = {k: None for k in self.subscriptions}
# count of subscriptions we have handed out, used to give unique ids
self._cb_count = count()
self.log = LoggerAdapter(getLogger('ophyd.objects'), {'ophyd_object_name': name})
self.control_layer_log = LoggerAdapter(control_layer_logger, {'ophyd_object_name': name})
if not self.__any_instantiated:
self.log.debug("first instance of OphydObject: id=%s", id(self))
OphydObject._mark_as_instantiated()
self.__register_instance(self)
@classmethod
def _mark_as_instantiated(cls):
cls.__any_instantiated = True
@classmethod
def add_instantiation_callback(cls, callback, fail_if_late=False):
"""
Register a callback which will receive each OphydObject instance.
Parameters
----------
callback : callable
Expected signature: ``f(ophydobj_instance)``
fail_if_late : boolean
If True, verify that OphydObj has not yet been instantiated and raise
``RuntimeError`` if it has, as a way of verify that no instances will
be "missed" by this registry. False by default.
"""
if fail_if_late and OphydObject.__any_instantiated:
raise RuntimeError(
"OphydObject has already been instantiated at least once, and "
"this callback will not be notified of those instances that "
"have already been created. If that is acceptable for this "
"application, set fail_if_false=False.")
# This is a class variable.
cls.__instantiation_callbacks.append(callback)
@classmethod
def __register_instance(cls, instance):
"""
Notify the callbacks in OphydObject.instantiation_callbacks of an instance.
"""
for callback in cls.__instantiation_callbacks:
callback(instance)
def __init_subclass__(cls, version=None, version_of=None,
version_type=None, **kwargs):
'This is called automatically in Python for all subclasses of OphydObject'
super().__init_subclass__(**kwargs)
cls.subscriptions = frozenset(
{
getattr(cls, key)
for key in dir(cls)
if key.startswith('SUB') or key.startswith('_SUB')
}
)
if version is None:
if version_of is not None:
raise RuntimeError('Must specify a version if `version_of` '
'is specified')
if version_type is None:
return
# Allow specification of version_type without specifying a version,
# for use in a base class
cls._class_info_ = dict(
versions={},
version=None,
version_type=version_type,
version_of=version_of
)
return
if version_of is None:
versions = {}
version_of = cls
else:
versions = version_of._class_info_['versions']
if version_type is None:
version_type = version_of._class_info_['version_type']
elif version_type != version_of._class_info_['version_type']:
raise RuntimeError(
"version_type with in a family must be consistent, "
f"you passed in {version_type}, to {cls.__name__} "
f"but {version_of.__name__} has version_type "
f"{version_of._class_info_["version_type"]}")
if not issubclass(cls, version_of):
raise RuntimeError(
f'Versions are only valid for classes in the same '
f'hierarchy. {cls.__name__} is not a subclass of '
f'{version_of.__name__}.'
)
if versions is not None and version in versions:
getLogger('ophyd.object').warning(
'Redefining %r version %s: old=%r new=%r',
version_of, version, versions[version], cls
)
versions[version] = cls
cls._class_info_ = dict(
versions=versions,
version=version,
version_type=version_type,
version_of=version_of
)
def _validate_kind(self, val):
if isinstance(val, str):
return Kind[val.lower()]
return Kind(val)
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, val):
self._kind = self._validate_kind(val)
@property
def dotted_name(self) -> str:
"""Return the dotted name
"""
names = []
obj = self
while obj.parent is not None:
names.append(obj.attr_name)
obj = obj.parent
return '.'.join(names[::-1])
@property
def name(self):
'''name of the device'''
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attr_name(self):
return self._attr_name
@property
def connected(self):
'''If the device is connected.
Subclasses should override this'''
return True
def destroy(self):
'''Disconnect the object from the underlying control layer'''
self.unsubscribe_all()
@property
def parent(self):
'''The parent of the ophyd object.
If at the top of its hierarchy, `parent` will be None
'''
return self._parent
@property
def root(self):
"Walk parents to find ultimate ancestor (parent's parent...)."
root = self
while True:
if root.parent is None:
return root
root = root.parent
@property
def report(self):
'''A report on the object.'''
return {}
@property
def event_types(self):
'''Events that can be subscribed to via `obj.subscribe`
'''
return tuple(self.subscriptions)
def _run_subs(self, *args, sub_type, **kwargs):
'''Run a set of subscription callbacks
Only the kwarg ``sub_type`` is required, indicating
the type of callback to perform. All other positional arguments
and kwargs are passed directly to the callback function.
The host object will be injected into kwargs as 'obj' unless that key
already exists.
If the `timestamp` is None, then it will be replaced by the current
time.
No exceptions are raised if the callback functions fail.
'''
if sub_type not in self.subscriptions:
raise UnknownSubscription(
"Unknown subscription {!r}, must be one of {!r}"
.format(sub_type, self.subscriptions))
kwargs['sub_type'] = sub_type
# Guarantee that the object will be in the kwargs
kwargs.setdefault('obj', self)
# And if a timestamp key exists, but isn't filled -- supply it with
# a new timestamp
if 'timestamp' in kwargs and kwargs['timestamp'] is None:
kwargs['timestamp'] = time.time()
# Shallow-copy the callback arguments for replaying the
# callback at a later time (e.g., when a new subscription is made)
self._args_cache[sub_type] = (tuple(args), dict(kwargs))
for cb in list(self._callbacks[sub_type].values()):
cb(*args, **kwargs)
def subscribe(self, callback, event_type=None, run=True):
'''Subscribe to events this event_type generates.
The callback will be called as ``cb(*args, **kwargs)`` with
the values passed to `_run_subs` with the following additional keys:
sub_type : the string value of the event_type
obj : the host object, added if 'obj' not already in kwargs
if the key 'timestamp' is in kwargs _and_ is None, then it will
be replaced with the current time before running the callback.
The ``*args``, ``**kwargs`` passed to _run_subs will be cached as
shallow copies, be aware of passing in mutable data.
.. warning::
If the callback raises any exceptions when run they will be
silently ignored.
Parameters
----------
callback : callable
A callable function (that takes kwargs) to be run when the event is
generated. The expected signature is ::
def cb(*args, obj: OphydObject, sub_type: str, **kwargs) -> None:
The exact args/kwargs passed are whatever are passed to
``_run_subs``
event_type : str, optional
The name of the event to subscribe to (if None, defaults to
the default sub for the instance - obj._default_sub)
This maps to the ``sub_type`` kwargs in `_run_subs`
run : bool, optional
Run the callback now
See Also
--------
clear_sub, _run_subs
Returns
-------
cid : int
id of callback, can be passed to `unsubscribe` to remove the
callback
'''
if not callable(callback):
raise ValueError("callback must be callable")
# do default event type
if event_type is None:
# warnings.warn("Please specify which call back you wish to "
# "attach to defaulting to {}"
# .format(self._default_sub), stacklevel=2)
event_type = self._default_sub
if event_type is None:
raise ValueError('Subscription type not set and object {} of class'
' {} has no default subscription set'
''.format(self.name, self.__class__.__name__))
# check that this is a valid event type
if event_type not in self.subscriptions:
raise UnknownSubscription(
"Unknown subscription {!r}, must be one of {!r}"
.format(event_type, self.subscriptions))
# wrapper for callback to snarf exceptions
def wrap_cb(cb):
@functools.wraps(cb)
def inner(*args, **kwargs):
try:
cb(*args, **kwargs)
except Exception:
sub_type = kwargs['sub_type']
self.log.exception(
'Subscription %s callback exception (%s)',
sub_type, self)
return inner
# get next cid
cid = next(self._cb_count)
wrapped = wrap_cb(callback)
self._unwrapped_callbacks[event_type][cid] = callback
self._callbacks[event_type][cid] = wrapped
self._cid_to_event_mapping[cid] = event_type
if run:
cached = self._args_cache[event_type]
if cached is not None:
args, kwargs = cached
wrapped(*args, **kwargs)
return cid
def _reset_sub(self, event_type):
'''Remove all subscriptions in an event type'''
self._callbacks[event_type].clear()
self._unwrapped_callbacks[event_type].clear()
def clear_sub(self, cb, event_type=None):
'''Remove a subscription, given the original callback function
See also :meth:`subscribe`, :meth:`unsubscribe`
Parameters
----------
cb : callable
The callback
event_type : str, optional
The event to unsubscribe from (if None, removes it from all event
types)
'''
if event_type is None:
event_types = self.event_types
else:
event_types = [event_type]
cid_list = []
for et in event_types:
for cid, target in self._unwrapped_callbacks[et].items():
if cb == target:
cid_list.append(cid)
for cid in cid_list:
self.unsubscribe(cid)
def unsubscribe(self, cid):
"""Remove a subscription
See also :meth:`subscribe`, :meth:`clear_sub`
Parameters
----------
cid : int
token return by :meth:`subscribe`
"""
ev_type = self._cid_to_event_mapping.pop(cid, None)
if ev_type is None:
return
del self._unwrapped_callbacks[ev_type][cid]
del self._callbacks[ev_type][cid]
def unsubscribe_all(self):
for ev_type in self._callbacks:
self._reset_sub(ev_type)
def check_value(self, value, **kwargs):
'''Check if the value is valid for this object
This function does no normalization, but may raise if the
value is invalid.
Raises
------
ValueError
'''
pass
def __repr__(self):
info = self._repr_info()
info = ', '.join('{}={!r}'.format(key, value) for key, value in info)
return '{}({})'.format(self.__class__.__name__, info)
def _repr_info(self):
'Yields pairs of (key, value) to generate the object repr'
if self.name is not None:
yield ('name', self.name)
if self._parent is not None:
yield ('parent', self.parent.name)
def __copy__(self):
'''Copy the ophyd object
Shallow copying ophyd objects uses the repr information from the
_repr_info method to create a new object.
'''
kwargs = dict(self._repr_info())
return self.__class__(**kwargs)
def __getnewargs_ex__(self):
'''Used by pickle to serialize an ophyd object
Returns
-------
(args, kwargs)
Arguments to be passed to __init__, necessary to recreate this
object
'''
kwargs = dict(self._repr_info())
return ((), kwargs)
| import functools
import time
import weakref
from enum import IntFlag
from itertools import count
from logging import LoggerAdapter, getLogger
from typing import ClassVar, FrozenSet
from .log import control_layer_logger
def select_version(cls, version):
"""Select closest compatible version to requested version
Compatible is defined as ``class_version <= requested_version``
as defined by the types used to denote the versions.
Parameters
----------
cls : type
The base class to find a version of
version : any
Must be the same type as used to define the class versions.
"""
all_versions = cls._class_info_['versions']
matched_version = max(ver for ver in all_versions if ver <= version)
return all_versions[matched_version]
try:
from enum import KEEP
class IFBase(IntFlag, boundary=KEEP):
...
except ImportError:
IFBase = IntFlag
class Kind(IFBase):
"""
This is used in the .kind attribute of all OphydObj (Signals, Devices).
A Device examines its components' .kind atttribute to decide whether to
traverse it in read(), read_configuration(), or neither. Additionally, if
decides whether to include its name in `hints['fields']`.
"""
omitted = 0b000
normal = 0b001
config = 0b010
hinted = 0b101 # Notice that bool(hinted & normal) is True.
class UnknownSubscription(KeyError):
"Subclass of KeyError. Raised for unknown event type"
...
def register_instances_keyed_on_name(fail_if_late=False):
"""Register OphydObj instances in a WeakValueDictionary keyed on name.
Be advised that ophyd does not require 'name' to be unique and is
configurable by the user at run-time so this should
not be relied on unless name uniqueness is enforced by other means.
Parameters
----------
fail_if_late : boolean
If True, verify that OphydObj has not yet been instantiated and raise
``RuntimeError`` if it has, as a way of verify that no instances will
be "missed" by this registry. False by default.
Returns
-------
WeakValueDictionary
"""
weak_dict = weakref.WeakValueDictionary()
def register(instance):
weak_dict[instance.name] = instance
OphydObject.add_instantiation_callback(register, fail_if_late)
return weak_dict
def register_instances_in_weakset(fail_if_late=False):
"""Register OphydObj instances in a WeakSet.
Be advised that OphydObj may not always be hashable.
Parameters
----------
fail_if_late : boolean
If True, verify that OphydObj has not yet been instantiated and raise
``RuntimeError`` if it has, as a way of verify that no instances will
be "missed" by this registry. False by default.
Returns
-------
WeakSet
"""
weak_set = weakref.WeakSet()
def register(instance):
weak_set.add(instance)
OphydObject.add_instantiation_callback(register, fail_if_late)
return weak_set
class OphydObject:
'''The base class for all objects in Ophyd
Handles:
* Subscription/callback mechanism
Parameters
----------
name : str, optional
The name of the object.
attr_name : str, optional
The attr name on it's parent (if it has one)
ex ``getattr(self.parent, self.attr_name) is self``
parent : parent, optional
The object's parent, if it exists in a hierarchy
kind : a member of the :class:`~ophydobj.Kind` :class:`~enum.IntEnum`
(or equivalent integer), optional
Default is ``Kind.normal``. See :class:`~ophydobj.Kind` for options.
Attributes
----------
name
'''
# Any callables appended to this mutable class variable will be notified
# one time when a new instance of OphydObj is instantiated. See
# OphydObject.add_instantiation_callback().
__instantiation_callbacks = []
_default_sub = None
# This is set to True when the first OphydObj is instantiated. This may be
# of interest to code that adds something to instantiation_callbacks, which
# may want to know whether it has already "missed" any instances.
__any_instantiated = False
subscriptions: ClassVar[FrozenSet[str]] = frozenset()
def __init__(self, *, name=None, attr_name='', parent=None, labels=None,
kind=None):
if labels is None:
labels = set()
self._ophyd_labels_ = set(labels)
if kind is None:
kind = Kind.normal
self.kind = kind
super().__init__()
# base name and ref to parent, these go with properties
if name is None:
name = ''
self._attr_name = attr_name
if not isinstance(name, str):
raise ValueError("name must be a string.")
self._name = name
self._parent = parent
# dictionary of wrapped callbacks
self._callbacks = {k: {} for k in self.subscriptions}
# this is to maintain api on clear_sub
self._unwrapped_callbacks = {k: {} for k in self.subscriptions}
# map cid -> back to which event it is in
self._cid_to_event_mapping = dict()
# cache of last inputs to _run_subs, the semi-private way
# to trigger the callbacks for a given subscription to be run
self._args_cache = {k: None for k in self.subscriptions}
# count of subscriptions we have handed out, used to give unique ids
self._cb_count = count()
self.log = LoggerAdapter(getLogger('ophyd.objects'), {'ophyd_object_name': name})
self.control_layer_log = LoggerAdapter(control_layer_logger, {'ophyd_object_name': name})
if not self.__any_instantiated:
self.log.debug("first instance of OphydObject: id=%s", id(self))
OphydObject._mark_as_instantiated()
self.__register_instance(self)
@classmethod
def _mark_as_instantiated(cls):
cls.__any_instantiated = True
@classmethod
def add_instantiation_callback(cls, callback, fail_if_late=False):
"""
Register a callback which will receive each OphydObject instance.
Parameters
----------
callback : callable
Expected signature: ``f(ophydobj_instance)``
fail_if_late : boolean
If True, verify that OphydObj has not yet been instantiated and raise
``RuntimeError`` if it has, as a way of verify that no instances will
be "missed" by this registry. False by default.
"""
if fail_if_late and OphydObject.__any_instantiated:
raise RuntimeError(
"OphydObject has already been instantiated at least once, and "
"this callback will not be notified of those instances that "
"have already been created. If that is acceptable for this "
"application, set fail_if_false=False.")
# This is a class variable.
cls.__instantiation_callbacks.append(callback)
@classmethod
def __register_instance(cls, instance):
"""
Notify the callbacks in OphydObject.instantiation_callbacks of an instance.
"""
for callback in cls.__instantiation_callbacks:
callback(instance)
def __init_subclass__(cls, version=None, version_of=None,
version_type=None, **kwargs):
'This is called automatically in Python for all subclasses of OphydObject'
super().__init_subclass__(**kwargs)
cls.subscriptions = frozenset(
{
getattr(cls, key)
for key in dir(cls)
if key.startswith('SUB') or key.startswith('_SUB')
}
)
if version is None:
if version_of is not None:
raise RuntimeError('Must specify a version if `version_of` '
'is specified')
if version_type is None:
return
# Allow specification of version_type without specifying a version,
# for use in a base class
cls._class_info_ = dict(
versions={},
version=None,
version_type=version_type,
version_of=version_of
)
return
if version_of is None:
versions = {}
version_of = cls
else:
versions = version_of._class_info_['versions']
if version_type is None:
version_type = version_of._class_info_['version_type']
elif version_type != version_of._class_info_['version_type']:
raise RuntimeError(
"version_type with in a family must be consistent, "
f"you passed in {version_type}, to {cls.__name__} "
f"but {version_of.__name__} has version_type "
f"{version_of._class_info_['version_type']}")
if not issubclass(cls, version_of):
raise RuntimeError(
f'Versions are only valid for classes in the same '
f'hierarchy. {cls.__name__} is not a subclass of '
f'{version_of.__name__}.'
)
if versions is not None and version in versions:
getLogger('ophyd.object').warning(
'Redefining %r version %s: old=%r new=%r',
version_of, version, versions[version], cls
)
versions[version] = cls
cls._class_info_ = dict(
versions=versions,
version=version,
version_type=version_type,
version_of=version_of
)
def _validate_kind(self, val):
if isinstance(val, str):
return Kind[val.lower()]
return Kind(val)
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, val):
self._kind = self._validate_kind(val)
@property
def dotted_name(self) -> str:
"""Return the dotted name
"""
names = []
obj = self
while obj.parent is not None:
names.append(obj.attr_name)
obj = obj.parent
return '.'.join(names[::-1])
@property
def name(self):
'''name of the device'''
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attr_name(self):
return self._attr_name
@property
def connected(self):
'''If the device is connected.
Subclasses should override this'''
return True
def destroy(self):
'''Disconnect the object from the underlying control layer'''
self.unsubscribe_all()
@property
def parent(self):
'''The parent of the ophyd object.
If at the top of its hierarchy, `parent` will be None
'''
return self._parent
@property
def root(self):
"Walk parents to find ultimate ancestor (parent's parent...)."
root = self
while True:
if root.parent is None:
return root
root = root.parent
@property
def report(self):
'''A report on the object.'''
return {}
@property
def event_types(self):
'''Events that can be subscribed to via `obj.subscribe`
'''
return tuple(self.subscriptions)
def _run_subs(self, *args, sub_type, **kwargs):
'''Run a set of subscription callbacks
Only the kwarg ``sub_type`` is required, indicating
the type of callback to perform. All other positional arguments
and kwargs are passed directly to the callback function.
The host object will be injected into kwargs as 'obj' unless that key
already exists.
If the `timestamp` is None, then it will be replaced by the current
time.
No exceptions are raised if the callback functions fail.
'''
if sub_type not in self.subscriptions:
raise UnknownSubscription(
"Unknown subscription {!r}, must be one of {!r}"
.format(sub_type, self.subscriptions))
kwargs['sub_type'] = sub_type
# Guarantee that the object will be in the kwargs
kwargs.setdefault('obj', self)
# And if a timestamp key exists, but isn't filled -- supply it with
# a new timestamp
if 'timestamp' in kwargs and kwargs['timestamp'] is None:
kwargs['timestamp'] = time.time()
# Shallow-copy the callback arguments for replaying the
# callback at a later time (e.g., when a new subscription is made)
self._args_cache[sub_type] = (tuple(args), dict(kwargs))
for cb in list(self._callbacks[sub_type].values()):
cb(*args, **kwargs)
def subscribe(self, callback, event_type=None, run=True):
'''Subscribe to events this event_type generates.
The callback will be called as ``cb(*args, **kwargs)`` with
the values passed to `_run_subs` with the following additional keys:
sub_type : the string value of the event_type
obj : the host object, added if 'obj' not already in kwargs
if the key 'timestamp' is in kwargs _and_ is None, then it will
be replaced with the current time before running the callback.
The ``*args``, ``**kwargs`` passed to _run_subs will be cached as
shallow copies, be aware of passing in mutable data.
.. warning::
If the callback raises any exceptions when run they will be
silently ignored.
Parameters
----------
callback : callable
A callable function (that takes kwargs) to be run when the event is
generated. The expected signature is ::
def cb(*args, obj: OphydObject, sub_type: str, **kwargs) -> None:
The exact args/kwargs passed are whatever are passed to
``_run_subs``
event_type : str, optional
The name of the event to subscribe to (if None, defaults to
the default sub for the instance - obj._default_sub)
This maps to the ``sub_type`` kwargs in `_run_subs`
run : bool, optional
Run the callback now
See Also
--------
clear_sub, _run_subs
Returns
-------
cid : int
id of callback, can be passed to `unsubscribe` to remove the
callback
'''
if not callable(callback):
raise ValueError("callback must be callable")
# do default event type
if event_type is None:
# warnings.warn("Please specify which call back you wish to "
# "attach to defaulting to {}"
# .format(self._default_sub), stacklevel=2)
event_type = self._default_sub
if event_type is None:
raise ValueError('Subscription type not set and object {} of class'
' {} has no default subscription set'
''.format(self.name, self.__class__.__name__))
# check that this is a valid event type
if event_type not in self.subscriptions:
raise UnknownSubscription(
"Unknown subscription {!r}, must be one of {!r}"
.format(event_type, self.subscriptions))
# wrapper for callback to snarf exceptions
def wrap_cb(cb):
@functools.wraps(cb)
def inner(*args, **kwargs):
try:
cb(*args, **kwargs)
except Exception:
sub_type = kwargs['sub_type']
self.log.exception(
'Subscription %s callback exception (%s)',
sub_type, self)
return inner
# get next cid
cid = next(self._cb_count)
wrapped = wrap_cb(callback)
self._unwrapped_callbacks[event_type][cid] = callback
self._callbacks[event_type][cid] = wrapped
self._cid_to_event_mapping[cid] = event_type
if run:
cached = self._args_cache[event_type]
if cached is not None:
args, kwargs = cached
wrapped(*args, **kwargs)
return cid
def _reset_sub(self, event_type):
'''Remove all subscriptions in an event type'''
self._callbacks[event_type].clear()
self._unwrapped_callbacks[event_type].clear()
def clear_sub(self, cb, event_type=None):
'''Remove a subscription, given the original callback function
See also :meth:`subscribe`, :meth:`unsubscribe`
Parameters
----------
cb : callable
The callback
event_type : str, optional
The event to unsubscribe from (if None, removes it from all event
types)
'''
if event_type is None:
event_types = self.event_types
else:
event_types = [event_type]
cid_list = []
for et in event_types:
for cid, target in self._unwrapped_callbacks[et].items():
if cb == target:
cid_list.append(cid)
for cid in cid_list:
self.unsubscribe(cid)
def unsubscribe(self, cid):
"""Remove a subscription
See also :meth:`subscribe`, :meth:`clear_sub`
Parameters
----------
cid : int
token return by :meth:`subscribe`
"""
ev_type = self._cid_to_event_mapping.pop(cid, None)
if ev_type is None:
return
del self._unwrapped_callbacks[ev_type][cid]
del self._callbacks[ev_type][cid]
def unsubscribe_all(self):
for ev_type in self._callbacks:
self._reset_sub(ev_type)
def check_value(self, value, **kwargs):
'''Check if the value is valid for this object
This function does no normalization, but may raise if the
value is invalid.
Raises
------
ValueError
'''
pass
def __repr__(self):
info = self._repr_info()
info = ', '.join('{}={!r}'.format(key, value) for key, value in info)
return '{}({})'.format(self.__class__.__name__, info)
def _repr_info(self):
'Yields pairs of (key, value) to generate the object repr'
if self.name is not None:
yield ('name', self.name)
if self._parent is not None:
yield ('parent', self.parent.name)
def __copy__(self):
'''Copy the ophyd object
Shallow copying ophyd objects uses the repr information from the
_repr_info method to create a new object.
'''
kwargs = dict(self._repr_info())
return self.__class__(**kwargs)
def __getnewargs_ex__(self):
'''Used by pickle to serialize an ophyd object
Returns
-------
(args, kwargs)
Arguments to be passed to __init__, necessary to recreate this
object
'''
kwargs = dict(self._repr_info())
return ((), kwargs)
|
import base64
import os
import random
import re
import shutil
import string
import subprocess
import sys
import traceback
from datetime import datetime, timedelta
from random import randint
from subprocess import PIPE
from time import sleep
import colorama
from colorama import Fore, Style, AnsiToWin32
import insomniac.__version__ as __version__
import insomniac.globals as insomniac_globals
random.seed()
# Init colorama but set "wrap" to False to not replace sys.stdout with a proxy object. It's meaningless as
# sys.stdout is set to a custom Logger object in utils.py
colorama.init(wrap=False)
COLOR_HEADER = Fore.MAGENTA
COLOR_OKBLUE = Fore.BLUE
COLOR_OKGREEN = Fore.GREEN
COLOR_REPORT = Fore.YELLOW
COLOR_FAIL = Fore.RED
COLOR_ENDC = Style.RESET_ALL
COLOR_BOLD = Style.BRIGHT
ENGINE_LOGS_DIR_NAME = 'logs'
UI_LOGS_DIR_NAME = 'ui-logs'
APP_REOPEN_WARNING = "Warning: Activity not started, intent has been delivered to currently running top-most instance."
def get_instagram_version(device_id, app_id):
stream = os.popen("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell dumpsys package {app_id}")
output = stream.read()
version_match = re.findall('versionName=(\\S+)', output)
if len(version_match) == 1:
version = version_match[0]
else:
version = "not found"
stream.close()
return version
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def get_connected_devices_adb_ids():
stream = os.popen('adb devices')
output = stream.read()
devices_count = len(re.findall('device\n', output))
stream.close()
if devices_count == 0:
return []
devices = []
for line in output.split('\n'):
if '\tdevice' in line:
devices.append(line.split('\t')[0])
return devices
def check_adb_connection(device_id, wait_for_device):
is_device_id_provided = device_id is not None
while True:
print_timeless("Looking for ADB devices...")
stream = os.popen('adb devices')
output = stream.read()
devices_count = len(re.findall('device\n', output))
stream.close()
if not wait_for_device:
break
if devices_count == 0:
print_timeless(COLOR_HEADER + "Couldn't find any ADB-device available, sleeping a bit and trying again..." + COLOR_ENDC)
sleep(10)
continue
if not is_device_id_provided:
break
found_device = False
for line in output.split('\n'):
if device_id in line and 'device' in line:
found_device = True
break
if found_device:
break
print_timeless(COLOR_HEADER + "Couldn't find ADB-device " + device_id + " available, sleeping a bit and trying again..." + COLOR_ENDC)
sleep(10)
continue
is_ok = True
message = "That's ok."
if devices_count == 0:
is_ok = False
message = "Cannot proceed."
elif devices_count > 1 and not is_device_id_provided:
is_ok = False
message = "Use --device to specify a device."
print_timeless(("" if is_ok else COLOR_FAIL) + "Connected devices via adb: " + str(devices_count) + ". " + message +
COLOR_ENDC)
return is_ok
def open_instagram(device_id, app_id):
print("Open Instagram app")
cmd = ("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell am start -n {app_id}/com.instagram.mainactivity.MainActivity")
cmd_res = subprocess.run(cmd, stdout=PIPE, stderr=PIPE, shell=True, encoding="utf8")
err = cmd_res.stderr.strip()
if err and err != APP_REOPEN_WARNING:
print(COLOR_FAIL + err + COLOR_ENDC)
def open_instagram_with_url(device_id, app_id, url):
print("Open Instagram app with url: {}".format(url))
cmd = ("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell am start -a android.intent.action.VIEW -d {url} {app_id}")
cmd_res = subprocess.run(cmd, stdout=PIPE, stderr=PIPE, shell=True, encoding="utf8")
err = cmd_res.stderr.strip()
if err and err != APP_REOPEN_WARNING:
print(COLOR_FAIL + err + COLOR_ENDC)
return False
return True
def close_instagram(device_id, app_id):
print("Close Instagram app")
os.popen("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell am force-stop {app_id}").close()
# Press HOME to leave a possible state of opened system dialog(s)
os.popen("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell input keyevent 3").close()
def clear_instagram_data(device_id, app_id):
print("Clear Instagram data")
os.popen("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell pm clear {app_id}").close()
def save_crash(device, ex=None):
global print_log
try:
device.wake_up()
directory_name = "Crash-" + datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
try:
os.makedirs(os.path.join("crashes", directory_name), exist_ok=False)
except OSError:
print(COLOR_FAIL + "Directory " + directory_name + " already exists." + COLOR_ENDC)
return
screenshot_format = ".png" if device.is_old() else ".jpg"
try:
device.screenshot(os.path.join("crashes", directory_name, "screenshot" + screenshot_format))
except RuntimeError:
print(COLOR_FAIL + "Cannot save screenshot." + COLOR_ENDC)
view_hierarchy_format = ".xml"
try:
device.dump_hierarchy(os.path.join("crashes", directory_name, "view_hierarchy" + view_hierarchy_format))
except RuntimeError:
print(COLOR_FAIL + "Cannot save view hierarchy." + COLOR_ENDC)
with open(os.path.join("crashes", directory_name, "logs.txt"), 'w', encoding="utf-8") as outfile:
outfile.write(print_log)
if ex:
outfile.write("\n")
outfile.write(describe_exception(ex))
shutil.make_archive(os.path.join("crashes", directory_name), 'zip', os.path.join("crashes", directory_name))
shutil.rmtree(os.path.join("crashes", directory_name))
if insomniac_globals.is_insomniac():
print(COLOR_OKGREEN + "Crash saved as \"crashes/" + directory_name + ".zip\"." + COLOR_ENDC)
print(COLOR_OKGREEN + "Please attach this file if you gonna report the crash at" + COLOR_ENDC)
print(COLOR_OKGREEN + "https://github.com/alexal1/Insomniac/issues\n" + COLOR_ENDC)
except Exception as e:
print(COLOR_FAIL + f"Could not save crash after an error. Crash-save-error: {str(e)}" + COLOR_ENDC)
print(COLOR_FAIL + describe_exception(e) + COLOR_ENDC)
def print_copyright():
if insomniac_globals.is_insomniac():
print_timeless("\nIf you like this bot, please " + COLOR_BOLD + "give us a star" + COLOR_ENDC + ":")
print_timeless(COLOR_BOLD + "https://github.com/alexal1/Insomniac\n" + COLOR_ENDC)
def _print_with_time_decorator(standard_print, print_time, debug, ui_log):
def wrapper(*args, **kwargs):
if insomniac_globals.is_ui_process and not ui_log:
return
if debug and not __version__.__debug_mode__:
return
global print_log
if print_time:
time = datetime.now().strftime("%m/%d %H:%M:%S")
print_log += re.sub(r"\[\d+m", '', ("[" + time + "] " + str(*args, **kwargs) + "\n"))
return standard_print("[" + time + "]", *args, **kwargs)
else:
print_log += re.sub(r"\[\d+m", '', (str(*args, **kwargs) + "\n"))
return standard_print(*args, **kwargs)
return wrapper
def get_value(count: str, name: str, default: int, max_count=None):
return _get_value(count, name, default, max_count, is_float=False)
def get_float_value(count: str, name: str, default: float, max_count=None):
return _get_value(count, name, default, max_count, is_float=True)
def _get_value(count, name, default, max_count, is_float):
def print_error():
print(COLOR_FAIL + name.format(default) + f". Using default value instead of \"{count}\", because it must be "
"either a number (e.g. 2) or a range (e.g. 2-4)." + COLOR_ENDC)
parts = count.split("-")
if len(parts) <= 0:
value = default
print_error()
elif len(parts) == 1:
try:
value = float(count) if is_float else int(count)
print(COLOR_BOLD + name.format(value, "%.2f") + COLOR_ENDC)
except ValueError:
value = default
print_error()
elif len(parts) == 2:
try:
value = random.uniform(float(parts[0]), float(parts[1])) if is_float \
else randint(int(parts[0]), int(parts[1]))
print(COLOR_BOLD + name.format(value, "%.2f") + COLOR_ENDC)
except ValueError:
value = default
print_error()
else:
value = default
print_error()
if max_count is not None and value > max_count:
print(COLOR_FAIL + name.format(max_count) + f". This is max value." + COLOR_ENDC)
value = max_count
return value
def get_left_right_values(left_right_str, name, default):
def print_error():
print(COLOR_FAIL + name.format(default) + f". Using default value instead of \"{left_right_str}\", because it "
"must be either a number (e.g. 2) or a range (e.g. 2-4)." + COLOR_ENDC)
parts = left_right_str.split("-")
if len(parts) <= 0:
value = default
print_error()
elif len(parts) == 1:
try:
value = (int(left_right_str), int(left_right_str))
print(COLOR_BOLD + name.format(value) + COLOR_ENDC)
except ValueError:
value = default
print_error()
elif len(parts) == 2:
try:
value = (int(parts[0]), int(parts[1]))
print(COLOR_BOLD + name.format(value) + COLOR_ENDC)
except ValueError:
value = default
print_error()
else:
value = default
print_error()
return value
def get_from_to_timestamps_by_hours(hours):
"""Returns a tuple of two timestamps: (given number of hours before; current time)"""
return get_from_to_timestamps_by_minutes(hours*60)
def get_from_to_timestamps_by_minutes(minutes):
"""Returns a tuple of two timestamps: (given number of minutes before; current time)"""
time_to = datetime.now().timestamp()
delta = timedelta(minutes=minutes).total_seconds()
time_from = time_to - delta
return time_from, time_to
def get_count_of_nums_in_str(str_to_check):
count = 0
for i in range(0, 10):
count += str_to_check.count(str(i))
return count
def get_random_string(length):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=length))
def describe_exception(ex, with_stacktrace=True):
trace = ''.join(traceback.format_exception(etype=type(ex), value=ex, tb=ex.__traceback__)) if with_stacktrace else ''
description = f"Error - {str(ex)}\n{trace}"
return description
def split_list_items_with_separator(original_list, separator):
values = []
for record in original_list:
for value in record.split(separator):
stripped_value = value.strip()
if stripped_value:
values.append(stripped_value)
return values
def to_base_64(text):
text_bytes = text.encode(encoding='UTF-8', errors='strict')
base64_bytes = base64.b64encode(text_bytes)
base64_text = base64_bytes.decode(encoding='UTF-8', errors='strict')
return base64_text
def from_base_64(base64_text):
base64_bytes = base64_text.encode(encoding='UTF-8', errors='strict')
text_bytes = base64.b64decode(base64_bytes)
text = text_bytes.decode(encoding='UTF-8', errors='strict')
return text
def _get_logs_dir_name():
if insomniac_globals.is_ui_process:
return UI_LOGS_DIR_NAME
return ENGINE_LOGS_DIR_NAME
def _get_log_file_name(logs_directory_name):
os.makedirs(os.path.join(logs_directory_name), exist_ok=True)
curr_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
log_name = f"{insomniac_globals.executable_name}_log-{curr_time}{"-"+insomniac_globals.execution_id if insomniac_globals.execution_id != "" else ""}.log"
log_path = os.path.join(logs_directory_name, log_name)
return log_path
class Timer:
duration = None
start_time = None
end_time = None
def __init__(self, seconds):
self.duration = timedelta(seconds=seconds)
self.start()
def start(self):
self.start_time = datetime.now()
self.end_time = self.start_time + self.duration
def is_expired(self):
return datetime.now() > self.end_time
def get_seconds_left(self):
time_since_start = datetime.now() - self.start_time
if time_since_start >= self.duration:
return 0
else:
return int((self.duration - time_since_start).total_seconds())
class Logger(object):
is_log_initiated = False
def __init__(self):
sys.stdout.reconfigure(encoding='utf-8')
self.wrapped_stdout = AnsiToWin32(sys.stdout)
self.terminal = self.wrapped_stdout.stream
self.log = None
def _init_log(self):
if not self.is_log_initiated:
self.log = AnsiToWin32(open(_get_log_file_name(_get_logs_dir_name()), "a", encoding="utf-8")).stream
self.is_log_initiated = True
def write(self, message):
self._init_log()
self.terminal.write(message)
self.terminal.flush()
self.log.write(message)
self.log.flush()
def flush(self):
self._init_log()
self.terminal.flush()
self.log.flush()
def fileno(self):
return self.wrapped_stdout.wrapped.fileno()
sys.stdout = Logger()
print_log = ""
print_timeless = _print_with_time_decorator(print, False, False, False)
print_timeless_ui = _print_with_time_decorator(print, False, False, True)
print_debug = _print_with_time_decorator(print, True, True, False)
print_ui = _print_with_time_decorator(print, True, False, True)
print_debug_ui = _print_with_time_decorator(print, True, True, True)
print = _print_with_time_decorator(print, True, False, False)
| import base64
import os
import random
import re
import shutil
import string
import subprocess
import sys
import traceback
from datetime import datetime, timedelta
from random import randint
from subprocess import PIPE
from time import sleep
import colorama
from colorama import Fore, Style, AnsiToWin32
import insomniac.__version__ as __version__
import insomniac.globals as insomniac_globals
random.seed()
# Init colorama but set "wrap" to False to not replace sys.stdout with a proxy object. It's meaningless as
# sys.stdout is set to a custom Logger object in utils.py
colorama.init(wrap=False)
COLOR_HEADER = Fore.MAGENTA
COLOR_OKBLUE = Fore.BLUE
COLOR_OKGREEN = Fore.GREEN
COLOR_REPORT = Fore.YELLOW
COLOR_FAIL = Fore.RED
COLOR_ENDC = Style.RESET_ALL
COLOR_BOLD = Style.BRIGHT
ENGINE_LOGS_DIR_NAME = 'logs'
UI_LOGS_DIR_NAME = 'ui-logs'
APP_REOPEN_WARNING = "Warning: Activity not started, intent has been delivered to currently running top-most instance."
def get_instagram_version(device_id, app_id):
stream = os.popen("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell dumpsys package {app_id}")
output = stream.read()
version_match = re.findall('versionName=(\\S+)', output)
if len(version_match) == 1:
version = version_match[0]
else:
version = "not found"
stream.close()
return version
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def get_connected_devices_adb_ids():
stream = os.popen('adb devices')
output = stream.read()
devices_count = len(re.findall('device\n', output))
stream.close()
if devices_count == 0:
return []
devices = []
for line in output.split('\n'):
if '\tdevice' in line:
devices.append(line.split('\t')[0])
return devices
def check_adb_connection(device_id, wait_for_device):
is_device_id_provided = device_id is not None
while True:
print_timeless("Looking for ADB devices...")
stream = os.popen('adb devices')
output = stream.read()
devices_count = len(re.findall('device\n', output))
stream.close()
if not wait_for_device:
break
if devices_count == 0:
print_timeless(COLOR_HEADER + "Couldn't find any ADB-device available, sleeping a bit and trying again..." + COLOR_ENDC)
sleep(10)
continue
if not is_device_id_provided:
break
found_device = False
for line in output.split('\n'):
if device_id in line and 'device' in line:
found_device = True
break
if found_device:
break
print_timeless(COLOR_HEADER + "Couldn't find ADB-device " + device_id + " available, sleeping a bit and trying again..." + COLOR_ENDC)
sleep(10)
continue
is_ok = True
message = "That's ok."
if devices_count == 0:
is_ok = False
message = "Cannot proceed."
elif devices_count > 1 and not is_device_id_provided:
is_ok = False
message = "Use --device to specify a device."
print_timeless(("" if is_ok else COLOR_FAIL) + "Connected devices via adb: " + str(devices_count) + ". " + message +
COLOR_ENDC)
return is_ok
def open_instagram(device_id, app_id):
print("Open Instagram app")
cmd = ("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell am start -n {app_id}/com.instagram.mainactivity.MainActivity")
cmd_res = subprocess.run(cmd, stdout=PIPE, stderr=PIPE, shell=True, encoding="utf8")
err = cmd_res.stderr.strip()
if err and err != APP_REOPEN_WARNING:
print(COLOR_FAIL + err + COLOR_ENDC)
def open_instagram_with_url(device_id, app_id, url):
print("Open Instagram app with url: {}".format(url))
cmd = ("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell am start -a android.intent.action.VIEW -d {url} {app_id}")
cmd_res = subprocess.run(cmd, stdout=PIPE, stderr=PIPE, shell=True, encoding="utf8")
err = cmd_res.stderr.strip()
if err and err != APP_REOPEN_WARNING:
print(COLOR_FAIL + err + COLOR_ENDC)
return False
return True
def close_instagram(device_id, app_id):
print("Close Instagram app")
os.popen("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell am force-stop {app_id}").close()
# Press HOME to leave a possible state of opened system dialog(s)
os.popen("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell input keyevent 3").close()
def clear_instagram_data(device_id, app_id):
print("Clear Instagram data")
os.popen("adb" + ("" if device_id is None else " -s " + device_id) +
f" shell pm clear {app_id}").close()
def save_crash(device, ex=None):
global print_log
try:
device.wake_up()
directory_name = "Crash-" + datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
try:
os.makedirs(os.path.join("crashes", directory_name), exist_ok=False)
except OSError:
print(COLOR_FAIL + "Directory " + directory_name + " already exists." + COLOR_ENDC)
return
screenshot_format = ".png" if device.is_old() else ".jpg"
try:
device.screenshot(os.path.join("crashes", directory_name, "screenshot" + screenshot_format))
except RuntimeError:
print(COLOR_FAIL + "Cannot save screenshot." + COLOR_ENDC)
view_hierarchy_format = ".xml"
try:
device.dump_hierarchy(os.path.join("crashes", directory_name, "view_hierarchy" + view_hierarchy_format))
except RuntimeError:
print(COLOR_FAIL + "Cannot save view hierarchy." + COLOR_ENDC)
with open(os.path.join("crashes", directory_name, "logs.txt"), 'w', encoding="utf-8") as outfile:
outfile.write(print_log)
if ex:
outfile.write("\n")
outfile.write(describe_exception(ex))
shutil.make_archive(os.path.join("crashes", directory_name), 'zip', os.path.join("crashes", directory_name))
shutil.rmtree(os.path.join("crashes", directory_name))
if insomniac_globals.is_insomniac():
print(COLOR_OKGREEN + "Crash saved as \"crashes/" + directory_name + ".zip\"." + COLOR_ENDC)
print(COLOR_OKGREEN + "Please attach this file if you gonna report the crash at" + COLOR_ENDC)
print(COLOR_OKGREEN + "https://github.com/alexal1/Insomniac/issues\n" + COLOR_ENDC)
except Exception as e:
print(COLOR_FAIL + f"Could not save crash after an error. Crash-save-error: {str(e)}" + COLOR_ENDC)
print(COLOR_FAIL + describe_exception(e) + COLOR_ENDC)
def print_copyright():
if insomniac_globals.is_insomniac():
print_timeless("\nIf you like this bot, please " + COLOR_BOLD + "give us a star" + COLOR_ENDC + ":")
print_timeless(COLOR_BOLD + "https://github.com/alexal1/Insomniac\n" + COLOR_ENDC)
def _print_with_time_decorator(standard_print, print_time, debug, ui_log):
def wrapper(*args, **kwargs):
if insomniac_globals.is_ui_process and not ui_log:
return
if debug and not __version__.__debug_mode__:
return
global print_log
if print_time:
time = datetime.now().strftime("%m/%d %H:%M:%S")
print_log += re.sub(r"\[\d+m", '', ("[" + time + "] " + str(*args, **kwargs) + "\n"))
return standard_print("[" + time + "]", *args, **kwargs)
else:
print_log += re.sub(r"\[\d+m", '', (str(*args, **kwargs) + "\n"))
return standard_print(*args, **kwargs)
return wrapper
def get_value(count: str, name: str, default: int, max_count=None):
return _get_value(count, name, default, max_count, is_float=False)
def get_float_value(count: str, name: str, default: float, max_count=None):
return _get_value(count, name, default, max_count, is_float=True)
def _get_value(count, name, default, max_count, is_float):
def print_error():
print(COLOR_FAIL + name.format(default) + f". Using default value instead of \"{count}\", because it must be "
"either a number (e.g. 2) or a range (e.g. 2-4)." + COLOR_ENDC)
parts = count.split("-")
if len(parts) <= 0:
value = default
print_error()
elif len(parts) == 1:
try:
value = float(count) if is_float else int(count)
print(COLOR_BOLD + name.format(value, "%.2f") + COLOR_ENDC)
except ValueError:
value = default
print_error()
elif len(parts) == 2:
try:
value = random.uniform(float(parts[0]), float(parts[1])) if is_float \
else randint(int(parts[0]), int(parts[1]))
print(COLOR_BOLD + name.format(value, "%.2f") + COLOR_ENDC)
except ValueError:
value = default
print_error()
else:
value = default
print_error()
if max_count is not None and value > max_count:
print(COLOR_FAIL + name.format(max_count) + f". This is max value." + COLOR_ENDC)
value = max_count
return value
def get_left_right_values(left_right_str, name, default):
def print_error():
print(COLOR_FAIL + name.format(default) + f". Using default value instead of \"{left_right_str}\", because it "
"must be either a number (e.g. 2) or a range (e.g. 2-4)." + COLOR_ENDC)
parts = left_right_str.split("-")
if len(parts) <= 0:
value = default
print_error()
elif len(parts) == 1:
try:
value = (int(left_right_str), int(left_right_str))
print(COLOR_BOLD + name.format(value) + COLOR_ENDC)
except ValueError:
value = default
print_error()
elif len(parts) == 2:
try:
value = (int(parts[0]), int(parts[1]))
print(COLOR_BOLD + name.format(value) + COLOR_ENDC)
except ValueError:
value = default
print_error()
else:
value = default
print_error()
return value
def get_from_to_timestamps_by_hours(hours):
"""Returns a tuple of two timestamps: (given number of hours before; current time)"""
return get_from_to_timestamps_by_minutes(hours*60)
def get_from_to_timestamps_by_minutes(minutes):
"""Returns a tuple of two timestamps: (given number of minutes before; current time)"""
time_to = datetime.now().timestamp()
delta = timedelta(minutes=minutes).total_seconds()
time_from = time_to - delta
return time_from, time_to
def get_count_of_nums_in_str(str_to_check):
count = 0
for i in range(0, 10):
count += str_to_check.count(str(i))
return count
def get_random_string(length):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=length))
def describe_exception(ex, with_stacktrace=True):
trace = ''.join(traceback.format_exception(etype=type(ex), value=ex, tb=ex.__traceback__)) if with_stacktrace else ''
description = f"Error - {str(ex)}\n{trace}"
return description
def split_list_items_with_separator(original_list, separator):
values = []
for record in original_list:
for value in record.split(separator):
stripped_value = value.strip()
if stripped_value:
values.append(stripped_value)
return values
def to_base_64(text):
text_bytes = text.encode(encoding='UTF-8', errors='strict')
base64_bytes = base64.b64encode(text_bytes)
base64_text = base64_bytes.decode(encoding='UTF-8', errors='strict')
return base64_text
def from_base_64(base64_text):
base64_bytes = base64_text.encode(encoding='UTF-8', errors='strict')
text_bytes = base64.b64decode(base64_bytes)
text = text_bytes.decode(encoding='UTF-8', errors='strict')
return text
def _get_logs_dir_name():
if insomniac_globals.is_ui_process:
return UI_LOGS_DIR_NAME
return ENGINE_LOGS_DIR_NAME
def _get_log_file_name(logs_directory_name):
os.makedirs(os.path.join(logs_directory_name), exist_ok=True)
curr_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
log_name = f"{insomniac_globals.executable_name}_log-{curr_time}{'-'+insomniac_globals.execution_id if insomniac_globals.execution_id != '' else ''}.log"
log_path = os.path.join(logs_directory_name, log_name)
return log_path
class Timer:
duration = None
start_time = None
end_time = None
def __init__(self, seconds):
self.duration = timedelta(seconds=seconds)
self.start()
def start(self):
self.start_time = datetime.now()
self.end_time = self.start_time + self.duration
def is_expired(self):
return datetime.now() > self.end_time
def get_seconds_left(self):
time_since_start = datetime.now() - self.start_time
if time_since_start >= self.duration:
return 0
else:
return int((self.duration - time_since_start).total_seconds())
class Logger(object):
is_log_initiated = False
def __init__(self):
sys.stdout.reconfigure(encoding='utf-8')
self.wrapped_stdout = AnsiToWin32(sys.stdout)
self.terminal = self.wrapped_stdout.stream
self.log = None
def _init_log(self):
if not self.is_log_initiated:
self.log = AnsiToWin32(open(_get_log_file_name(_get_logs_dir_name()), "a", encoding="utf-8")).stream
self.is_log_initiated = True
def write(self, message):
self._init_log()
self.terminal.write(message)
self.terminal.flush()
self.log.write(message)
self.log.flush()
def flush(self):
self._init_log()
self.terminal.flush()
self.log.flush()
def fileno(self):
return self.wrapped_stdout.wrapped.fileno()
sys.stdout = Logger()
print_log = ""
print_timeless = _print_with_time_decorator(print, False, False, False)
print_timeless_ui = _print_with_time_decorator(print, False, False, True)
print_debug = _print_with_time_decorator(print, True, True, False)
print_ui = _print_with_time_decorator(print, True, False, True)
print_debug_ui = _print_with_time_decorator(print, True, True, True)
print = _print_with_time_decorator(print, True, False, False)
|
import datetime
import discord
import math
import random
from discord.ext import commands
from utils import crud
from utils.checks import is_staff
class Memes(commands.Cog):
"""
Meme commands
"""
def __init__(self, bot):
self.bot = bot
async def _meme(self, ctx, msg, directed: bool = False, imagelink=None, allowed_mentions=None):
author = ctx.author
if isinstance(ctx.channel, discord.abc.GuildChannel) and (ctx.channel in self.bot.assistance_channels or (self.bot.roles['No-Memes'] in author.roles)):
await ctx.message.delete()
try:
await ctx.author.send("Meme commands are disabled in this channel, or your privileges have been revoked.")
except discord.errors.Forbidden:
await ctx.send(f"{ctx.author.mention} Meme commands are disabled in this channel, or your privileges have been revoked.")
elif imagelink is not None:
title = f"{self.bot.escape_text(ctx.author.display_name) + ":" if not directed else ""} {msg}"
embed = discord.Embed(title=title, color=discord.Color.default())
embed.set_image(url=imagelink)
await ctx.send(embed=embed)
else:
await ctx.send(f"{self.bot.escape_text(ctx.author.display_name) + ":" if not directed else ""} {msg}", allowed_mentions=allowed_mentions)
async def _meme2(self, ctx, msg, directed: bool = False, imagelink=None, allowed_mentions=None):
author = ctx.author
if isinstance(ctx.channel, discord.abc.GuildChannel) and (ctx.channel in self.bot.assistance_channels or (self.bot.roles['No-Memes'] in author.roles) or ctx.channel is self.bot.channels['bot-cmds']):
await ctx.message.delete()
try:
await ctx.author.send("Meme commands are disabled in this channel, or your privileges have been revoked.")
except discord.errors.Forbidden:
await ctx.send(f"{ctx.author.mention} Meme commands are disabled in this channel, or your privileges have been revoked.")
elif imagelink is not None:
title = f"{self.bot.escape_text(ctx.author.display_name) + ":" if not directed else ""} {msg}"
embed = discord.Embed(title=title, color=discord.Color.default())
embed.set_image(url=imagelink)
await ctx.send(embed=embed)
else:
await ctx.send(f"{self.bot.escape_text(ctx.author.display_name) + ":" if not directed else ""} {msg}", allowed_mentions=allowed_mentions)
# list memes
@commands.command(name="listmemes")
async def _listmemes(self, ctx):
"""List meme commands."""
cmds = ", ".join([x.name for x in self.get_commands()][1:])
await self._meme(ctx, f"```{cmds}```")
# 3dshacks memes
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def s_99(self, ctx):
"""Memes."""
await self._meme(ctx, "**ALL HAIL BRITANNIA!**")
@commands.command(hidden=True)
@commands.cooldown(rate=5, per=30.0, type=commands.BucketType.channel)
async def honk(self, ctx):
"""honk"""
await self._meme(ctx, "`R A K E I N T H E L A K E`")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def screams(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/j0Dkv2Z.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def eeh(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/2SBC1Qo.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def dubyadud(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/Sohsi8s.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def megumi(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/GMRp1dj.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="https://i.imgur.com/WLncIsi.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori2(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/V0uu99A.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori3(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/so8thgu.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori4(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/267IXh1.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori5(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/lKcsiBP.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori6(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/SIJzpau.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def shotsfired(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/zf2XrNk.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def rusure(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="https://imgur.com/sXnVRLw.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def r34(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/sjQZKBF.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def lenny(self, ctx):
"""Memes."""
await self._meme(ctx, "( ͡° ͜ʖ ͡°)")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def rip(self, ctx):
"""Memes."""
await self._meme(ctx, "Press F to pay respects.")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def permabrocked(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/ARsOh3p.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def knp(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/EsJ191C.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def lucina(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/tnWSXf7.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def lucina2(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/ZPMveve.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def xarec(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/A59RbRT.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def clap(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/UYbIZYs.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def ayyy(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/bgvuHAd.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def hazel(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/vpu8bX3.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def thumbsup(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/hki1IIs.gifv")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def pbanjo(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/sBJKzuK.png")
# Cute commands :3
@commands.command(hidden=True, aliases=["pat"])
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def headpat(self, ctx, u: discord.Member):
"""headpat"""
await self._meme(ctx, f"{self.bot.escape_text(u.display_name)} has been gently patted.", True, "http://i.imgur.com/7V6gIIW.jpg")
@commands.command(hidden=True, aliases=["pat2"])
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def headpat2(self, ctx, u: discord.Member):
"""headpat 2"""
await self._meme(ctx, f"{self.bot.escape_text(u.display_name)} has been gently patted.", True, "http://i.imgur.com/djhHX0n.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def headrub(self, ctx, u: discord.Member):
"""headrub"""
await self._meme(ctx, f"{self.bot.escape_text(u.display_name)} has received a head rub.", True, "http://i.imgur.com/j6xSoKv.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def sudoku(self, ctx):
"""Cute"""
await self._meme(ctx, "", imagelink="http://i.imgur.com/VHlIZRC.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def baka(self, ctx):
"""Cute"""
await self._meme(ctx, "", imagelink="http://i.imgur.com/OyjCHNe.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def mugi(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/lw80tT0.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def lisp(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/RQeZErU.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def dev(self, ctx):
"""Reminds user where they are."""
await self._meme(ctx, f"You {"do not " if ctx.channel != self.bot.channels["dev"] else ""}seem to be in {self.bot.channels["dev"].mention}.", True)
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def meta(self, ctx):
"""Reminds user where they are. (2)"""
await self._meme(ctx, f"You {"do not " if ctx.channel != self.bot.channels["meta"] else ""}seem to be in {self.bot.channels["meta"].mention}. Please take this subject {"there" if ctx.channel != self.bot.channels["meta"] else "somewhere else"}.", True)
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def appeals(self, ctx):
"""Reminds user where they are. (3)"""
await self._meme(ctx, f"You {"do not " if ctx.channel != self.bot.channels["appeals"] else ""}seem to be in {self.bot.channels["appeals"].mention}. Please take this subject {"there" if ctx.channel != self.bot.channels["appeals"] else "somewhere else"}.", True)
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def blackalabi(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/JzFem4y.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def whoops(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="https://i.imgur.com/caF9KHk.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def nom(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/p1r53ni.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def soghax(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/oQJy2eN.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def weebs(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/sPjRKUB.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def whatisr(self, ctx):
"""MEMES?"""
await self._meme(ctx, "", imagelink="http://i.imgur.com/Z8HhfzJ.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def helpers(self, ctx):
"""MEMES?"""
await self._meme(ctx, "", imagelink="http://i.imgur.com/0v1EgMX.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def concern(self, ctx):
"""MEMES?"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/cWXBb5g.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def fuck(self, ctx):
"""MEMES?"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/4lNA5Ud.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def goose(self, ctx):
"""MEMES?"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/pZUeBql.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def planet(self, ctx):
"""haha yes!"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/YIBADGT.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def pbanj(self, ctx):
"""he has the power"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/EQy9pl3.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def pbanj2(self, ctx):
"""pbanos"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/oZx7Qid.gif")
# Begin code from https://github.com/reswitched/robocop-ng
@staticmethod
def c_to_f(c):
"""this is where we take memes too far"""
return math.floor(1.8 * c + 32)
@staticmethod
def c_to_k(c):
"""this is where we take memes REALLY far"""
return math.floor(c + 273.15)
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def warm(self, ctx, u: discord.Member):
"""Warms a user :3"""
celsius = random.randint(38, 100)
fahrenheit = self.c_to_f(celsius)
kelvin = self.c_to_k(celsius)
await self._meme(ctx, f"{u.mention} warmed. User is now {celsius}°C ({fahrenheit}°F, {kelvin}K).", True)
# adding it here cause its pretty much the same code
@commands.command(hidden=True, aliases=["cool"])
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def chill(self, ctx, u: discord.Member):
"""Cools a user :3"""
celsius = random.randint(-273, 34)
fahrenheit = self.c_to_f(celsius)
kelvin = self.c_to_k(celsius)
await self._meme(ctx, f"{u.mention} cooled. User is now {celsius}°C ({fahrenheit}°F, {kelvin}K).", True)
# End code from https://github.com/reswitched/robocop-ng
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def bean(self, ctx, u: discord.Member):
"""swing the beanhammer"""
await self._meme(ctx, f"{u.mention} is now beaned. <a:bean:462076812076384257>", True)
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def nogas(self, ctx):
"""shhhh no one gives a shit!"""
await self._meme(ctx, "https://imgur.com/a/5IcfK6N")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def cosmic(self, ctx):
"""Cosmic ban"""
await self._meme(ctx, "https://i.imgur.com/V4TVpbC.gifv")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def menuhax(self, ctx):
"""menuhax 11.4 wen"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/fUiZ2c3.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def magic(self, ctx):
"""shrug.avi"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/k9111dq.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def mouse(self, ctx):
"""Whaaaa"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/0YHBP7l.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def bananoose(self, ctx):
""":)"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/VUmkXDd.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def goosenana(self, ctx):
""":)"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/dLZOK5c.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def eel(self, ctx, u: discord.Member):
"""eel"""
await self._meme(ctx, f"{self.bot.escape_text(u.display_name)} has been eel slapped.", True, "https://i.imgur.com/QXF2Pcn.gif")
@commands.command(hidden=True, aliases=["bruh", "yolo", "swag"])
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def dab(self, ctx):
"""Memes."""
await self._meme(ctx, "No. I might be controlled by you idiots, but I have *some* self respect, baka...")
@commands.command(hidden=True, aliases=["hacc"])
@commands.cooldown(rate=5, per=60.0, type=commands.BucketType.channel)
async def nobrain(self, ctx, *, action="hacc"):
"""h a c c"""
await self._meme(ctx, f'`I have no brain and I must {' '.join(action.replace('`',''))}`')
@commands.command(hidden=True, aliases=["wheresource", "sauce", "github"])
@commands.cooldown(rate=5, per=30.0, type=commands.BucketType.channel)
async def source(self, ctx):
"""You *did* read the GPL, *right?*"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/ceLGvc4.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def pirate2(self, ctx):
"""stop right there, criminal scum"""
await self._meme(ctx, "", imagelink="https://cdn.discordapp.com/attachments/508390946753216528/695752500179107910/giphy.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def source2(self, ctx):
"""citation needed"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/b5c031e07ddbc3e48d0853f2d7064f66.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def disgraceful(self, ctx):
"""YOU DIDN'T SEE IT BECAUSE IT WASN'T THERE"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/b93b2a99bc28df4a192fc7eb8ccc01a9.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def greatness(self, ctx):
"""We were this close."""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/f2b1e87af1fcdcd34f0dff65d7696deb.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def shovels(self, ctx):
"""Do you need more?"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/b798edd56662f1bde15ae4b6bc9c9fba.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def value(self, ctx):
"""smug.png"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/f882b32a3f051f474572b018d053bd7b.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def superiority(self, ctx):
"""opinions"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/e2cbbf7c808e21fb6c5ab603f6a89a3f.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def dolar(self, ctx):
"""mcdondal"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/3ecd851953906ecc2387cfd592ac97e7.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def serotonin(self, ctx):
"""i really want to know"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/2549ac8b197ae68080041d3966a887e8.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=300.0, type=commands.BucketType.channel)
async def cadealert(self, ctx):
"""stop! cade time."""
await self._meme2(ctx, "excuse me <@&575940388452433940>, it is time for cade", allowed_mentions=discord.AllowedMentions(roles=True))
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=300.0, type=commands.BucketType.channel)
async def birbalert(self, ctx):
"""stop! birb time."""
await self._meme2(ctx, "excuse me <@&805294876673572884>, it is time for birb", allowed_mentions=discord.AllowedMentions(roles=True))
@is_staff("OP")
@commands.command(hidden=True, aliases=['🍰'])
async def birthday(self, ctx, member: discord.Member):
"""Wishes a happy birthday. Do not abuse pls."""
await ctx.message.delete()
await member.add_roles(self.bot.roles['🍰'])
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=86400)
expiring_time = timestamp + delta
await crud.add_timed_role(member.id, self.bot.roles['🍰'].id, expiring_time)
await ctx.send(f"Happy birthday {member.mention}!")
@commands.command(hidden=True, aliases=["departure"])
@commands.cooldown(rate=5, per=30.0, type=commands.BucketType.channel)
async def depart(self, ctx):
"""From the amazing Mr. Burguers"""
departure_gifs = ["https://i.imgur.com/Kbyp7i4.gif", "https://i.imgur.com/Wv8DoGC.gif"]
await self._meme(ctx, "", imagelink=random.choice(departure_gifs))
def setup(bot):
bot.add_cog(Memes(bot))
| import datetime
import discord
import math
import random
from discord.ext import commands
from utils import crud
from utils.checks import is_staff
class Memes(commands.Cog):
"""
Meme commands
"""
def __init__(self, bot):
self.bot = bot
async def _meme(self, ctx, msg, directed: bool = False, imagelink=None, allowed_mentions=None):
author = ctx.author
if isinstance(ctx.channel, discord.abc.GuildChannel) and (ctx.channel in self.bot.assistance_channels or (self.bot.roles['No-Memes'] in author.roles)):
await ctx.message.delete()
try:
await ctx.author.send("Meme commands are disabled in this channel, or your privileges have been revoked.")
except discord.errors.Forbidden:
await ctx.send(f"{ctx.author.mention} Meme commands are disabled in this channel, or your privileges have been revoked.")
elif imagelink is not None:
title = f"{self.bot.escape_text(ctx.author.display_name) + ':' if not directed else ''} {msg}"
embed = discord.Embed(title=title, color=discord.Color.default())
embed.set_image(url=imagelink)
await ctx.send(embed=embed)
else:
await ctx.send(f"{self.bot.escape_text(ctx.author.display_name) + ':' if not directed else ''} {msg}", allowed_mentions=allowed_mentions)
async def _meme2(self, ctx, msg, directed: bool = False, imagelink=None, allowed_mentions=None):
author = ctx.author
if isinstance(ctx.channel, discord.abc.GuildChannel) and (ctx.channel in self.bot.assistance_channels or (self.bot.roles['No-Memes'] in author.roles) or ctx.channel is self.bot.channels['bot-cmds']):
await ctx.message.delete()
try:
await ctx.author.send("Meme commands are disabled in this channel, or your privileges have been revoked.")
except discord.errors.Forbidden:
await ctx.send(f"{ctx.author.mention} Meme commands are disabled in this channel, or your privileges have been revoked.")
elif imagelink is not None:
title = f"{self.bot.escape_text(ctx.author.display_name) + ':' if not directed else ''} {msg}"
embed = discord.Embed(title=title, color=discord.Color.default())
embed.set_image(url=imagelink)
await ctx.send(embed=embed)
else:
await ctx.send(f"{self.bot.escape_text(ctx.author.display_name) + ':' if not directed else ''} {msg}", allowed_mentions=allowed_mentions)
# list memes
@commands.command(name="listmemes")
async def _listmemes(self, ctx):
"""List meme commands."""
cmds = ", ".join([x.name for x in self.get_commands()][1:])
await self._meme(ctx, f"```{cmds}```")
# 3dshacks memes
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def s_99(self, ctx):
"""Memes."""
await self._meme(ctx, "**ALL HAIL BRITANNIA!**")
@commands.command(hidden=True)
@commands.cooldown(rate=5, per=30.0, type=commands.BucketType.channel)
async def honk(self, ctx):
"""honk"""
await self._meme(ctx, "`R A K E I N T H E L A K E`")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def screams(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/j0Dkv2Z.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def eeh(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/2SBC1Qo.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def dubyadud(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/Sohsi8s.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def megumi(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/GMRp1dj.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="https://i.imgur.com/WLncIsi.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori2(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/V0uu99A.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori3(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/so8thgu.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori4(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/267IXh1.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori5(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/lKcsiBP.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def inori6(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/SIJzpau.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def shotsfired(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/zf2XrNk.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def rusure(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="https://imgur.com/sXnVRLw.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def r34(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/sjQZKBF.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def lenny(self, ctx):
"""Memes."""
await self._meme(ctx, "( ͡° ͜ʖ ͡°)")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def rip(self, ctx):
"""Memes."""
await self._meme(ctx, "Press F to pay respects.")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def permabrocked(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/ARsOh3p.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def knp(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/EsJ191C.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def lucina(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/tnWSXf7.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def lucina2(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/ZPMveve.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def xarec(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/A59RbRT.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def clap(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/UYbIZYs.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def ayyy(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/bgvuHAd.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def hazel(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/vpu8bX3.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def thumbsup(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/hki1IIs.gifv")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def pbanjo(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/sBJKzuK.png")
# Cute commands :3
@commands.command(hidden=True, aliases=["pat"])
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def headpat(self, ctx, u: discord.Member):
"""headpat"""
await self._meme(ctx, f"{self.bot.escape_text(u.display_name)} has been gently patted.", True, "http://i.imgur.com/7V6gIIW.jpg")
@commands.command(hidden=True, aliases=["pat2"])
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def headpat2(self, ctx, u: discord.Member):
"""headpat 2"""
await self._meme(ctx, f"{self.bot.escape_text(u.display_name)} has been gently patted.", True, "http://i.imgur.com/djhHX0n.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def headrub(self, ctx, u: discord.Member):
"""headrub"""
await self._meme(ctx, f"{self.bot.escape_text(u.display_name)} has received a head rub.", True, "http://i.imgur.com/j6xSoKv.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def sudoku(self, ctx):
"""Cute"""
await self._meme(ctx, "", imagelink="http://i.imgur.com/VHlIZRC.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def baka(self, ctx):
"""Cute"""
await self._meme(ctx, "", imagelink="http://i.imgur.com/OyjCHNe.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def mugi(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/lw80tT0.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def lisp(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/RQeZErU.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def dev(self, ctx):
"""Reminds user where they are."""
await self._meme(ctx, f"You {'do not ' if ctx.channel != self.bot.channels['dev'] else ''}seem to be in {self.bot.channels['dev'].mention}.", True)
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def meta(self, ctx):
"""Reminds user where they are. (2)"""
await self._meme(ctx, f"You {'do not ' if ctx.channel != self.bot.channels['meta'] else ''}seem to be in {self.bot.channels['meta'].mention}. Please take this subject {'there' if ctx.channel != self.bot.channels['meta'] else 'somewhere else'}.", True)
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def appeals(self, ctx):
"""Reminds user where they are. (3)"""
await self._meme(ctx, f"You {'do not ' if ctx.channel != self.bot.channels['appeals'] else ''}seem to be in {self.bot.channels['appeals'].mention}. Please take this subject {'there' if ctx.channel != self.bot.channels['appeals'] else 'somewhere else'}.", True)
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def blackalabi(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/JzFem4y.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def whoops(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="https://i.imgur.com/caF9KHk.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def nom(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/p1r53ni.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def soghax(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/oQJy2eN.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def weebs(self, ctx):
"""Memes."""
await self._meme(ctx, "", imagelink="http://i.imgur.com/sPjRKUB.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def whatisr(self, ctx):
"""MEMES?"""
await self._meme(ctx, "", imagelink="http://i.imgur.com/Z8HhfzJ.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def helpers(self, ctx):
"""MEMES?"""
await self._meme(ctx, "", imagelink="http://i.imgur.com/0v1EgMX.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def concern(self, ctx):
"""MEMES?"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/cWXBb5g.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def fuck(self, ctx):
"""MEMES?"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/4lNA5Ud.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def goose(self, ctx):
"""MEMES?"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/pZUeBql.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def planet(self, ctx):
"""haha yes!"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/YIBADGT.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def pbanj(self, ctx):
"""he has the power"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/EQy9pl3.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def pbanj2(self, ctx):
"""pbanos"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/oZx7Qid.gif")
# Begin code from https://github.com/reswitched/robocop-ng
@staticmethod
def c_to_f(c):
"""this is where we take memes too far"""
return math.floor(1.8 * c + 32)
@staticmethod
def c_to_k(c):
"""this is where we take memes REALLY far"""
return math.floor(c + 273.15)
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def warm(self, ctx, u: discord.Member):
"""Warms a user :3"""
celsius = random.randint(38, 100)
fahrenheit = self.c_to_f(celsius)
kelvin = self.c_to_k(celsius)
await self._meme(ctx, f"{u.mention} warmed. User is now {celsius}°C ({fahrenheit}°F, {kelvin}K).", True)
# adding it here cause its pretty much the same code
@commands.command(hidden=True, aliases=["cool"])
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def chill(self, ctx, u: discord.Member):
"""Cools a user :3"""
celsius = random.randint(-273, 34)
fahrenheit = self.c_to_f(celsius)
kelvin = self.c_to_k(celsius)
await self._meme(ctx, f"{u.mention} cooled. User is now {celsius}°C ({fahrenheit}°F, {kelvin}K).", True)
# End code from https://github.com/reswitched/robocop-ng
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def bean(self, ctx, u: discord.Member):
"""swing the beanhammer"""
await self._meme(ctx, f"{u.mention} is now beaned. <a:bean:462076812076384257>", True)
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def nogas(self, ctx):
"""shhhh no one gives a shit!"""
await self._meme(ctx, "https://imgur.com/a/5IcfK6N")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def cosmic(self, ctx):
"""Cosmic ban"""
await self._meme(ctx, "https://i.imgur.com/V4TVpbC.gifv")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def menuhax(self, ctx):
"""menuhax 11.4 wen"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/fUiZ2c3.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def magic(self, ctx):
"""shrug.avi"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/k9111dq.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def mouse(self, ctx):
"""Whaaaa"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/0YHBP7l.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def bananoose(self, ctx):
""":)"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/VUmkXDd.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def goosenana(self, ctx):
""":)"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/dLZOK5c.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def eel(self, ctx, u: discord.Member):
"""eel"""
await self._meme(ctx, f"{self.bot.escape_text(u.display_name)} has been eel slapped.", True, "https://i.imgur.com/QXF2Pcn.gif")
@commands.command(hidden=True, aliases=["bruh", "yolo", "swag"])
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def dab(self, ctx):
"""Memes."""
await self._meme(ctx, "No. I might be controlled by you idiots, but I have *some* self respect, baka...")
@commands.command(hidden=True, aliases=["hacc"])
@commands.cooldown(rate=5, per=60.0, type=commands.BucketType.channel)
async def nobrain(self, ctx, *, action="hacc"):
"""h a c c"""
await self._meme(ctx, f'`I have no brain and I must {" ".join(action.replace("`",""))}`')
@commands.command(hidden=True, aliases=["wheresource", "sauce", "github"])
@commands.cooldown(rate=5, per=30.0, type=commands.BucketType.channel)
async def source(self, ctx):
"""You *did* read the GPL, *right?*"""
await self._meme(ctx, "", imagelink="https://i.imgur.com/ceLGvc4.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def pirate2(self, ctx):
"""stop right there, criminal scum"""
await self._meme(ctx, "", imagelink="https://cdn.discordapp.com/attachments/508390946753216528/695752500179107910/giphy.gif")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def source2(self, ctx):
"""citation needed"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/b5c031e07ddbc3e48d0853f2d7064f66.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def disgraceful(self, ctx):
"""YOU DIDN'T SEE IT BECAUSE IT WASN'T THERE"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/b93b2a99bc28df4a192fc7eb8ccc01a9.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def greatness(self, ctx):
"""We were this close."""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/f2b1e87af1fcdcd34f0dff65d7696deb.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def shovels(self, ctx):
"""Do you need more?"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/b798edd56662f1bde15ae4b6bc9c9fba.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def value(self, ctx):
"""smug.png"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/f882b32a3f051f474572b018d053bd7b.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def superiority(self, ctx):
"""opinions"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/e2cbbf7c808e21fb6c5ab603f6a89a3f.jpg")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def dolar(self, ctx):
"""mcdondal"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/3ecd851953906ecc2387cfd592ac97e7.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=30.0, type=commands.BucketType.channel)
async def serotonin(self, ctx):
"""i really want to know"""
await self._meme(ctx, "", imagelink="https://album.eiphax.tech/uploads/big/2549ac8b197ae68080041d3966a887e8.png")
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=300.0, type=commands.BucketType.channel)
async def cadealert(self, ctx):
"""stop! cade time."""
await self._meme2(ctx, "excuse me <@&575940388452433940>, it is time for cade", allowed_mentions=discord.AllowedMentions(roles=True))
@commands.command(hidden=True)
@commands.cooldown(rate=1, per=300.0, type=commands.BucketType.channel)
async def birbalert(self, ctx):
"""stop! birb time."""
await self._meme2(ctx, "excuse me <@&805294876673572884>, it is time for birb", allowed_mentions=discord.AllowedMentions(roles=True))
@is_staff("OP")
@commands.command(hidden=True, aliases=['🍰'])
async def birthday(self, ctx, member: discord.Member):
"""Wishes a happy birthday. Do not abuse pls."""
await ctx.message.delete()
await member.add_roles(self.bot.roles['🍰'])
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=86400)
expiring_time = timestamp + delta
await crud.add_timed_role(member.id, self.bot.roles['🍰'].id, expiring_time)
await ctx.send(f"Happy birthday {member.mention}!")
@commands.command(hidden=True, aliases=["departure"])
@commands.cooldown(rate=5, per=30.0, type=commands.BucketType.channel)
async def depart(self, ctx):
"""From the amazing Mr. Burguers"""
departure_gifs = ["https://i.imgur.com/Kbyp7i4.gif", "https://i.imgur.com/Wv8DoGC.gif"]
await self._meme(ctx, "", imagelink=random.choice(departure_gifs))
def setup(bot):
bot.add_cog(Memes(bot))
|
import re
class BasicPreprocessing:
def __init__(self):
self.name = "basic"
def __call__(self, text: str):
return clean_text(text)
def clean_text(text):
text = text.lower()
text = replace_all(text, [
("n't ", " not "),
("'ve ", " have "),
("'ll ", " will "),
("'s ", " "),
("'d ", " ")
])
text = sub_iso(text)
text = sub_unknown_chars(text)
text = sub_numbers(text)
text = sub_multiple_spaces(text)
return text
def sub_iso(text):
return re.sub(
r"\biso (\d+)(-(\d+))?\b",
lambda m: f"iso_{m.group(1)}{"_" + m.group(3) if m.group(3) else ""}",
text)
def sub_unknown_chars(text):
return re.sub(r"([^a-z0-9_ ])+", " ", text)
def sub_numbers(text):
return re.sub(r"\b(\d+)\b", " ", text)
def sub_multiple_spaces(text):
return re.sub(r"\s\s+", " ", text)
def replace_all(text, replacement_list):
for old, new in replacement_list:
text = text.replace(old, new)
return text
| import re
class BasicPreprocessing:
def __init__(self):
self.name = "basic"
def __call__(self, text: str):
return clean_text(text)
def clean_text(text):
text = text.lower()
text = replace_all(text, [
("n't ", " not "),
("'ve ", " have "),
("'ll ", " will "),
("'s ", " "),
("'d ", " ")
])
text = sub_iso(text)
text = sub_unknown_chars(text)
text = sub_numbers(text)
text = sub_multiple_spaces(text)
return text
def sub_iso(text):
return re.sub(
r"\biso (\d+)(-(\d+))?\b",
lambda m: f"iso_{m.group(1)}{'_' + m.group(3) if m.group(3) else ''}",
text)
def sub_unknown_chars(text):
return re.sub(r"([^a-z0-9_ ])+", " ", text)
def sub_numbers(text):
return re.sub(r"\b(\d+)\b", " ", text)
def sub_multiple_spaces(text):
return re.sub(r"\s\s+", " ", text)
def replace_all(text, replacement_list):
for old, new in replacement_list:
text = text.replace(old, new)
return text
|
# /usr/bin/python
# -*- coding: utf-8 -*-
"""
MongoDB logger module
"""
import datetime
import logging
import os
import json
import inspect
import random
from typing import Optional
from pymongo import MongoClient
class MongoLogger:
"""
MongoDB logger class.\n
"""
LEVELS = {'crit': 50, 'err': 40, 'warn': 30, 'info': 20, 'debug': 10}
def __init__(self, collection_name='default_logger', pid='', config_file="monolog.json"):
self._pid = pid
if self._pid == '':
self._generate_pid()
self.config = self._get_merged_config(config_file)
self._current_level = self.config["currentLevel"]
self._collection = collection_name
self._std_logger_duplicate = self.config.get("stdLoggerDuplicate", True)
self._mongo_logger_duplicate = self.config.get("mongoLoggerDuplicate", True)
if self._mongo_logger_duplicate:
self._mongo_cli = MongoClient(self.config["connection"]["serv"],
self.config["connection"]["port"],
username=self.config["connection"]["username"],
password=self.config["connection"]["password"],
authSource=self.config["connection"]["authSource"],
authMechanism=self.config["connection"]["authMechanism"])
self._db = self._mongo_cli[self.config["connection"]["dataBase"]]
self._node = self.config["node"]
else:
self._mongo_cli = None
self._db = None
self._node = None
try:
self._std_logger = self._build_std_logger()
except Exception as ex_error:
print(f"MongoLogger error. ex_error: {ex_error}.")
self._std_logger = None
self._std_logger_duplicate = False
def _get_merged_config(self, config_file: str) -> dict:
"""
Find and merge configs.\n
config.local.json takes precedence over config.json
"""
_local_config_file_name = f'{'.'.join(config_file.split('.')[:-1])}.local.{config_file.split('.')[-1]}'
_config = {}
if _config_path := self._find_config(config_file):
_config = json.load(open(_config_path))
if _local_config_path := self._find_config(_local_config_file_name):
_config = self._merge_configs(_config, json.load(open(_local_config_path)))
return _config
def _merge_configs(self, first_dict: dict, second_dict: dict) -> dict:
"""
Merge second_dict on first_dict.\n
"""
out = {}
for key in first_dict.keys():
out[key] = first_dict[key]
if key in second_dict:
if isinstance(second_dict[key], dict):
out[key] = self._merge_configs(first_dict[key], second_dict[key])
else:
out[key] = second_dict[key]
for key in second_dict.keys():
if key not in first_dict:
out[key] = second_dict[key]
return out
@staticmethod
def _find_config(config_file_name: str) -> Optional[str]:
"""
Find config file.\n Will check current_dir, current_dir/config, ../current_dir/config.\n
config.local.json takes precedence over config.json
"""
if os.path.exists(config_file_name):
return config_file_name
_path_config = os.path.join("config", config_file_name)
if os.path.exists(_path_config):
return _path_config
_upper_current_dir = os.path.split(os.getcwd())[0]
_path_config = os.path.join(_upper_current_dir, "config", config_file_name)
if os.path.exists(_path_config):
return _path_config
return None
def _generate_pid(self):
"""
Generate process id.\n
"""
self._pid = f"{random.randrange(1000, 9999)}_{datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")}"
def set_pid(self, pid):
"""
Set session(process) id.\n
"""
self._pid = pid
def get_pid(self):
"""
Get current session(process) id.\n
"""
return self._pid
def _build_std_logger(self):
"""
Make std logger.\n
"""
logger = logging.getLogger(self._collection)
_log_format = "[%(levelname)-8s][%(asctime)s][%(module)-10s]%(message)s"
logger.setLevel(self.LEVELS[self._current_level])
logger.addHandler(self._get_file_handler(_log_format))
logger.addHandler(self._get_stream_handler(_log_format))
return logger
def _get_file_handler(self, _log_format):
"""
Make file handler for std logger.\n
"""
file_handler = logging.FileHandler(f"{self._collection}.log")
file_handler.setFormatter(logging.Formatter(_log_format))
return file_handler
@staticmethod
def _get_stream_handler(_log_format):
"""
Make console handler for std logger.\n
"""
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(_log_format))
return stream_handler
def _emit(self, level: str, msg: str, data: dict):
"""
Emit log message
:param level: message level
:param msg: critical message
:param data: dump dict
:return: None
"""
current_frame = inspect.currentframe()
emit_func = current_frame.f_back.f_back.f_code.co_name
if self._std_logger_duplicate:
self._emit_std_logger(emit_func, level, msg, data)
if self._mongo_logger_duplicate:
self._emit_mongo(emit_func, level, msg, data)
def _emit_std_logger(self, emit_func: str, level: str, msg: str, data: dict):
"""
Emit msg to std logger.\n
:param emit_func: emitter function
:param level: message level
:param msg: critical message
:param data: dump dict
:return: None
"""
try:
self._std_logger.log(self.LEVELS[level], "[%s][%s] %s %s.", emit_func, self._pid, msg, data)
except UnicodeEncodeError:
dump_data = json.dumps(data)
self._std_logger.log(self.LEVELS[level], "[%s][%s][%s] %s %s.", level, emit_func, self._pid, msg,
dump_data)
def _emit_mongo(self, emit_func: str, level: str, msg: str, data: dict):
"""
Emit msg to mongo.\n
:param emit_func: emitter function
:param level: message level
:param msg: critical message
:param data: dump dict
:return: None
"""
try:
collection = self._db[datetime.datetime.now().strftime(self._collection)]
data["function"] = emit_func
var = {
"created": datetime.datetime.now(),
"node": self._node,
"ssid": self._pid,
"raddr": "",
"level": level,
"msg": msg,
"dump": data
}
collection.insert_one(var)
except Exception as ex_error:
if self._std_logger:
self._std_logger.critical("MongoLogger Critical error. %s dump: [%s][%s] %s %s.",
ex_error, level, self._pid, msg, data)
else:
print("MongoLogger Critical error. %s dump: [%s][%s] %s %s.",
ex_error, level, self._pid, msg, data)
def critical(self, msg: str, data=None) -> None:
"""
Critical message.
:param msg: critical message
:param data: dump dict
:return: None
"""
if data is None:
data = {}
traceback = list(map(lambda x: {"function": x.function, "lineno": x.lineno}, inspect.stack()))
data["traceback"] = traceback
self._emit('crit', msg, data)
def error(self, msg: str, data=None) -> None:
"""
Error message.
:param msg: error message
:param data: dump dict
:return: None
"""
if data is None:
data = {}
self._emit('err', msg, data)
def warning(self, msg: str, data=None) -> None:
"""
Warning message.
:param msg: warning message
:param data: dump dict
:return: None
"""
if data is None:
data = {}
self._emit('warn', msg, data)
def info(self, msg: str, data=None) -> None:
"""
Info message.
:param msg: info message
:param data: dump dict
:return: None
"""
if data is None:
data = {}
self._emit('info', msg, data)
def debug(self, msg: str, data=None) -> None:
"""
Debug message.
:param msg: debug message
:param data: dump dict
:return: None
"""
if data is None:
data = {}
self._emit('debug', msg, data)
| # /usr/bin/python
# -*- coding: utf-8 -*-
"""
MongoDB logger module
"""
import datetime
import logging
import os
import json
import inspect
import random
from typing import Optional
from pymongo import MongoClient
class MongoLogger:
"""
MongoDB logger class.\n
"""
LEVELS = {'crit': 50, 'err': 40, 'warn': 30, 'info': 20, 'debug': 10}
def __init__(self, collection_name='default_logger', pid='', config_file="monolog.json"):
self._pid = pid
if self._pid == '':
self._generate_pid()
self.config = self._get_merged_config(config_file)
self._current_level = self.config["currentLevel"]
self._collection = collection_name
self._std_logger_duplicate = self.config.get("stdLoggerDuplicate", True)
self._mongo_logger_duplicate = self.config.get("mongoLoggerDuplicate", True)
if self._mongo_logger_duplicate:
self._mongo_cli = MongoClient(self.config["connection"]["serv"],
self.config["connection"]["port"],
username=self.config["connection"]["username"],
password=self.config["connection"]["password"],
authSource=self.config["connection"]["authSource"],
authMechanism=self.config["connection"]["authMechanism"])
self._db = self._mongo_cli[self.config["connection"]["dataBase"]]
self._node = self.config["node"]
else:
self._mongo_cli = None
self._db = None
self._node = None
try:
self._std_logger = self._build_std_logger()
except Exception as ex_error:
print(f"MongoLogger error. ex_error: {ex_error}.")
self._std_logger = None
self._std_logger_duplicate = False
def _get_merged_config(self, config_file: str) -> dict:
"""
Find and merge configs.\n
config.local.json takes precedence over config.json
"""
_local_config_file_name = f'{".".join(config_file.split(".")[:-1])}.local.{config_file.split(".")[-1]}'
_config = {}
if _config_path := self._find_config(config_file):
_config = json.load(open(_config_path))
if _local_config_path := self._find_config(_local_config_file_name):
_config = self._merge_configs(_config, json.load(open(_local_config_path)))
return _config
def _merge_configs(self, first_dict: dict, second_dict: dict) -> dict:
"""
Merge second_dict on first_dict.\n
"""
out = {}
for key in first_dict.keys():
out[key] = first_dict[key]
if key in second_dict:
if isinstance(second_dict[key], dict):
out[key] = self._merge_configs(first_dict[key], second_dict[key])
else:
out[key] = second_dict[key]
for key in second_dict.keys():
if key not in first_dict:
out[key] = second_dict[key]
return out
@staticmethod
def _find_config(config_file_name: str) -> Optional[str]:
"""
Find config file.\n Will check current_dir, current_dir/config, ../current_dir/config.\n
config.local.json takes precedence over config.json
"""
if os.path.exists(config_file_name):
return config_file_name
_path_config = os.path.join("config", config_file_name)
if os.path.exists(_path_config):
return _path_config
_upper_current_dir = os.path.split(os.getcwd())[0]
_path_config = os.path.join(_upper_current_dir, "config", config_file_name)
if os.path.exists(_path_config):
return _path_config
return None
def _generate_pid(self):
"""
Generate process id.\n
"""
self._pid = f"{random.randrange(1000, 9999)}_{datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}"
def set_pid(self, pid):
"""
Set session(process) id.\n
"""
self._pid = pid
def get_pid(self):
"""
Get current session(process) id.\n
"""
return self._pid
def _build_std_logger(self):
"""
Make std logger.\n
"""
logger = logging.getLogger(self._collection)
_log_format = "[%(levelname)-8s][%(asctime)s][%(module)-10s]%(message)s"
logger.setLevel(self.LEVELS[self._current_level])
logger.addHandler(self._get_file_handler(_log_format))
logger.addHandler(self._get_stream_handler(_log_format))
return logger
def _get_file_handler(self, _log_format):
"""
Make file handler for std logger.\n
"""
file_handler = logging.FileHandler(f"{self._collection}.log")
file_handler.setFormatter(logging.Formatter(_log_format))
return file_handler
@staticmethod
def _get_stream_handler(_log_format):
"""
Make console handler for std logger.\n
"""
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(_log_format))
return stream_handler
def _emit(self, level: str, msg: str, data: dict):
"""
Emit log message
:param level: message level
:param msg: critical message
:param data: dump dict
:return: None
"""
current_frame = inspect.currentframe()
emit_func = current_frame.f_back.f_back.f_code.co_name
if self._std_logger_duplicate:
self._emit_std_logger(emit_func, level, msg, data)
if self._mongo_logger_duplicate:
self._emit_mongo(emit_func, level, msg, data)
def _emit_std_logger(self, emit_func: str, level: str, msg: str, data: dict):
"""
Emit msg to std logger.\n
:param emit_func: emitter function
:param level: message level
:param msg: critical message
:param data: dump dict
:return: None
"""
try:
self._std_logger.log(self.LEVELS[level], "[%s][%s] %s %s.", emit_func, self._pid, msg, data)
except UnicodeEncodeError:
dump_data = json.dumps(data)
self._std_logger.log(self.LEVELS[level], "[%s][%s][%s] %s %s.", level, emit_func, self._pid, msg,
dump_data)
def _emit_mongo(self, emit_func: str, level: str, msg: str, data: dict):
"""
Emit msg to mongo.\n
:param emit_func: emitter function
:param level: message level
:param msg: critical message
:param data: dump dict
:return: None
"""
try:
collection = self._db[datetime.datetime.now().strftime(self._collection)]
data["function"] = emit_func
var = {
"created": datetime.datetime.now(),
"node": self._node,
"ssid": self._pid,
"raddr": "",
"level": level,
"msg": msg,
"dump": data
}
collection.insert_one(var)
except Exception as ex_error:
if self._std_logger:
self._std_logger.critical("MongoLogger Critical error. %s dump: [%s][%s] %s %s.",
ex_error, level, self._pid, msg, data)
else:
print("MongoLogger Critical error. %s dump: [%s][%s] %s %s.",
ex_error, level, self._pid, msg, data)
def critical(self, msg: str, data=None) -> None:
"""
Critical message.
:param msg: critical message
:param data: dump dict
:return: None
"""
if data is None:
data = {}
traceback = list(map(lambda x: {"function": x.function, "lineno": x.lineno}, inspect.stack()))
data["traceback"] = traceback
self._emit('crit', msg, data)
def error(self, msg: str, data=None) -> None:
"""
Error message.
:param msg: error message
:param data: dump dict
:return: None
"""
if data is None:
data = {}
self._emit('err', msg, data)
def warning(self, msg: str, data=None) -> None:
"""
Warning message.
:param msg: warning message
:param data: dump dict
:return: None
"""
if data is None:
data = {}
self._emit('warn', msg, data)
def info(self, msg: str, data=None) -> None:
"""
Info message.
:param msg: info message
:param data: dump dict
:return: None
"""
if data is None:
data = {}
self._emit('info', msg, data)
def debug(self, msg: str, data=None) -> None:
"""
Debug message.
:param msg: debug message
:param data: dump dict
:return: None
"""
if data is None:
data = {}
self._emit('debug', msg, data)
|
import tkinter as tk
import subprocess
import json
import os
import re
class PingAnalysis:
def __init__(self):
self.settings_file = open("settings.json", "r")
self.settings_json = json.load(self.settings_file)
self.settings_file.close()
self.FONT_SMALL = (f"{self.settings_json["FONT"]}", "16", "bold")
self.FONT_MEDIUM = (f"{self.settings_json["FONT"]}", "20", "bold")
self.FONT_LARGE = (f"{self.settings_json["FONT"]}", "24", "bold")
self.DEFAULT_BG = self.settings_json["DEFAULT_BG"]
self.DEFAULT_FG = self.settings_json["DEFAULT_FG"]
self.DEFAULT_DARK_BG = self.settings_json["DEFAULT_DARK_BG"]
self.DEFAULT_DARK_FG = self.settings_json["DEFAULT_DARK_FG"]
self.DIRECTORY = self.settings_json["DIRECTORY"]
self.theme = self.settings_json["theme"]
self.tiny_ping = self.settings_json["tiny_ping"]
self.small_ping = self.settings_json["small_ping"]
self.medium_ping = self.settings_json["medium_ping"]
self.large_ping = self.settings_json["large_ping"]
self.extreme_ping = self.settings_json["extreme_ping"]
self.version = self.settings_json["version"]
self.selection_index = None
self.selection = None
self.ping_list = []
self.ping_count = 0
self.ping_time = 0
self.lag_count = 0
self.lag_percentage = 0.0
self.mean_ping = 0
self.tiny_ping_count = 0
self.small_ping_count = 0
self.medium_ping_count = 0
self.large_ping_count = 0
self.extreme_ping_count = 0
self.root = tk.Tk()
self.root.title(f"Ping Analysis {self.version}")
self.root.geometry("750x600")
self.root.iconbitmap("images/favicon/favicon.ico")
self.root.bind("<Return>", self.user_keypress)
self.frame_data = tk.Frame(self.root)
self.frame_data.pack(side=tk.RIGHT, fill=tk.BOTH)
self.frame_main = tk.Frame(self.root)
self.frame_main.pack(fill=tk.BOTH, expand=1)
self.listbox_data = tk.Listbox(self.frame_data, height=25)
self.listbox_data.pack(side=tk.TOP)
self.label_header = tk.Label(self.frame_main, font=self.FONT_LARGE, text="Ping Analysis")
self.label_header.pack(pady=(50, 50))
self.label_file = tk.Label(self.frame_main, font=self.FONT_LARGE, anchor="w")
self.label_file.pack(fill=tk.X, pady=(0, 25), padx=(10, 0))
self.label_lag_analysis = tk.Label(self.frame_main, font=self.FONT_MEDIUM, anchor="w")
self.label_lag_analysis.pack(fill=tk.X, padx=(10, 0))
self.label_max_ping = tk.Label(self.frame_main, font=self.FONT_MEDIUM, anchor="w")
self.label_max_ping.pack(fill=tk.X, padx=(10, 0))
self.label_min_ping = tk.Label(self.frame_main, font=self.FONT_MEDIUM, anchor="w")
self.label_min_ping.pack(fill=tk.X, padx=(10, 0))
self.label_mean_ping = tk.Label(self.frame_main, font=self.FONT_MEDIUM, anchor="w")
self.label_mean_ping.pack(fill=tk.X, padx=(10, 0))
self.label_lag_count = tk.Label(self.frame_main, font=self.FONT_MEDIUM, anchor="w")
self.label_lag_count.pack(fill=tk.X, pady=(0, 25), padx=(10, 0))
self.label_tiny_ping = tk.Label(self.frame_main, font=self.FONT_SMALL, anchor="w")
self.label_tiny_ping.pack(fill=tk.X, padx=(10, 0))
self.label_small_ping = tk.Label(self.frame_main, font=self.FONT_SMALL, anchor="w")
self.label_small_ping.pack(fill=tk.X, padx=(10, 0))
self.label_medium_ping = tk.Label(self.frame_main, font=self.FONT_SMALL, anchor="w")
self.label_medium_ping.pack(fill=tk.X, padx=(10, 0))
self.label_large_ping = tk.Label(self.frame_main, font=self.FONT_SMALL, anchor="w")
self.label_large_ping.pack(fill=tk.X, padx=(10, 0))
self.label_extreme_ping = tk.Label(self.frame_main, font=self.FONT_SMALL, anchor="w")
self.label_extreme_ping.pack(fill=tk.X, padx=(10, 0))
self.button_analyze = tk.Button(self.frame_data, text="Analyze")
self.button_analyze.pack(fill=tk.BOTH, expand=1)
self.button_refresh = tk.Button(self.frame_data, text="Refresh List")
self.button_refresh.pack(fill=tk.BOTH, expand=1)
self.button_open_cmd = tk.Button(self.frame_data, text="Open CMD")
self.button_open_cmd.pack(fill=tk.BOTH, expand=1)
self.button_open_folder = tk.Button(self.frame_data, text="Open Folder")
self.button_open_folder.pack(fill=tk.BOTH, expand=1)
self.button_toggle_theme = tk.Button(self.frame_data, text="Dark Theme")
self.button_toggle_theme.pack(fill=tk.BOTH, expand=1)
self.widget_list = [widget for frame in self.root.children.values() for widget in frame.children.values()]
self.widget_list.extend(frame for frame in self.root.children.values())
def start_mainloop(self):
self.root.mainloop()
def populate_listbox(self, files):
self.clear_listbox()
for file in files:
self.listbox_data.insert("end", file)
def clear_listbox(self):
self.listbox_data.delete(0, tk.END)
def clear_labels(self):
self.label_tiny_ping.configure(text="")
self.label_small_ping.configure(text="")
self.label_medium_ping.configure(text="")
self.label_large_ping.configure(text="")
self.label_extreme_ping.configure(text="")
self.label_max_ping.configure(text="")
self.label_min_ping.configure(text="")
self.label_mean_ping.configure(text="")
self.label_file.configure(text="")
self.label_lag_count.configure(text="")
self.label_lag_analysis.configure(text="")
def clear_variables(self):
self.ping_list = []
self.ping_count = 0
self.ping_time = 0
self.lag_count = 0
self.lag_percentage = 0.0
self.mean_ping = 0
self.tiny_ping_count = 0
self.small_ping_count = 0
self.medium_ping_count = 0
self.large_ping_count = 0
self.extreme_ping_count = 0
def set_theme(self, new_theme):
if new_theme == "light":
self.button_toggle_theme.configure(text="Light Theme")
for widget in self.widget_list:
if widget.__class__.__name__ == "Frame":
widget.configure(bg=self.DEFAULT_DARK_BG)
else:
widget.configure(fg=self.DEFAULT_DARK_FG, bg=self.DEFAULT_DARK_BG)
elif new_theme == "dark":
self.button_toggle_theme.configure(text="Dark Theme")
for widget in self.widget_list:
if widget.__class__.__name__ == "Frame":
widget.configure(bg=self.DEFAULT_BG)
else:
widget.configure(fg=self.DEFAULT_FG, bg=self.DEFAULT_BG)
self.theme = new_theme
def get_data(self):
files = os.listdir(self.DIRECTORY)
return files
def get_selection(self):
self.selection_index = self.listbox_data.curselection()
if self.selection_index != ():
self.selection = self.listbox_data.get(self.selection_index)
else:
self.selection = None
self.clear_labels()
self.label_header.configure(text="ERROR: No file selected")
def open_folder(self):
subprocess.run(f"explorer {self.DIRECTORY}")
def open_cmd(self):
cwd = os.getcwd()
if cwd[-8:] != self.DIRECTORY:
os.chdir(self.DIRECTORY)
os.system("start cmd")
else:
os.system("start cmd")
os.chdir("..")
def configure_commands(self):
self.button_analyze.configure(command=self.analyze)
self.button_refresh.configure(command=lambda: self.populate_listbox(self.get_data()))
self.button_open_folder.configure(command=self.open_folder)
self.button_toggle_theme.configure(command=lambda: self.set_theme("light" if self.theme == "dark" else "dark"))
self.button_open_cmd.configure(command=self.open_cmd)
self.root.protocol("WM_DELETE_WINDOW", self.close_and_save)
def close_and_save(self):
self.settings_file = open("settings.json", "w")
self.settings_json["theme"] = self.theme
json.dump(self.settings_json, self.settings_file, indent=4)
self.settings_file.close()
self.root.destroy()
def generate_list(self):
raw_data_file = open(self.DIRECTORY + "/" + self.selection)
for line in raw_data_file:
if line[0:5] == "Reply":
try:
self.ping_time = int(re.split(" ", line)[4][5:][:-2])
self.ping_count += 1
self.ping_list.append(self.ping_time)
except ValueError:
self.label_header.configure(text="ERROR: One or more lines could not be read")
raw_data_file.close()
def user_keypress(self, event):
self.analyze()
def analyze(self):
self.clear_variables()
self.clear_labels()
self.label_header.configure(text="Ping Analysis")
self.get_selection()
if self.selection is not None:
self.generate_list()
else:
self.clear_labels()
self.label_header.configure(text="ERROR: No file selected")
raise Exception("ERROR: No file selected")
if len(self.ping_list) != 0:
self.mean_ping = int((sum(self.ping_list) / len(self.ping_list)))
else:
self.clear_labels()
self.label_header.configure(text="ERROR: No data or file is not UTF-8")
raise Exception("ERROR: No data or file is not UTF-8")
for ping in self.ping_list:
if ping > self.extreme_ping:
self.extreme_ping_count += 1
elif ping > self.large_ping:
self.large_ping_count += 1
elif ping > self.medium_ping:
self.medium_ping_count += 1
elif ping > self.small_ping:
self.small_ping_count += 1
elif ping > self.tiny_ping:
self.tiny_ping_count += 1
self.lag_count = self.medium_ping_count + self.large_ping_count + self.extreme_ping_count
self.lag_percentage = round(self.lag_count / self.ping_count * 100, 2)
self.label_file.configure(text=f"[{self.selection}] Total ping count: {self.ping_count}")
self.label_tiny_ping.configure(text=f"Tiny ping count: {self.tiny_ping_count} (>{self.tiny_ping}ms)")
self.label_small_ping.configure(text=f"Small ping count: {self.small_ping_count} (>{self.small_ping}ms)")
self.label_medium_ping.configure(text=f"Medium ping count: {self.medium_ping_count} (>{self.medium_ping}ms)")
self.label_large_ping.configure(text=f"Large ping count: {self.large_ping_count} (>{self.large_ping}ms)")
self.label_extreme_ping.configure(text=f"Extreme ping count: {self.extreme_ping_count} (>{self.extreme_ping}ms)")
self.label_max_ping.configure(text=f"MAXIMUM ping: {max(self.ping_list)}")
self.label_min_ping.configure(text=f"MINIMUM ping: {min(self.ping_list)}")
self.label_mean_ping.configure(text=f"MEAN ping: {self.mean_ping}")
self.label_lag_count.configure(text=f"Lagged {self.lag_count} {"time" if self.lag_count == 1 else "times"} "
f"out of {self.ping_count} ({self.lag_percentage}%)")
if self.lag_percentage > 10.0:
self.label_lag_analysis.configure(text="Extremely Severe Lag", fg="red")
elif self.lag_percentage > 5.0:
self.label_lag_analysis.configure(text="Severe Lag", fg="orange")
elif self.lag_percentage > 3.0:
self.label_lag_analysis.configure(text="Moderate Lag", fg="yellow")
elif self.lag_percentage > 1.0:
self.label_lag_analysis.configure(text="Low Lag", fg="green")
else:
self.label_lag_analysis.configure(text="Extremely Low Lag", fg="cyan")
def main():
pa = PingAnalysis()
pa.set_theme(pa.theme)
pa.configure_commands()
pa.populate_listbox(pa.get_data())
pa.start_mainloop()
if __name__ == "__main__":
main()
| import tkinter as tk
import subprocess
import json
import os
import re
class PingAnalysis:
def __init__(self):
self.settings_file = open("settings.json", "r")
self.settings_json = json.load(self.settings_file)
self.settings_file.close()
self.FONT_SMALL = (f"{self.settings_json['FONT']}", "16", "bold")
self.FONT_MEDIUM = (f"{self.settings_json['FONT']}", "20", "bold")
self.FONT_LARGE = (f"{self.settings_json['FONT']}", "24", "bold")
self.DEFAULT_BG = self.settings_json["DEFAULT_BG"]
self.DEFAULT_FG = self.settings_json["DEFAULT_FG"]
self.DEFAULT_DARK_BG = self.settings_json["DEFAULT_DARK_BG"]
self.DEFAULT_DARK_FG = self.settings_json["DEFAULT_DARK_FG"]
self.DIRECTORY = self.settings_json["DIRECTORY"]
self.theme = self.settings_json["theme"]
self.tiny_ping = self.settings_json["tiny_ping"]
self.small_ping = self.settings_json["small_ping"]
self.medium_ping = self.settings_json["medium_ping"]
self.large_ping = self.settings_json["large_ping"]
self.extreme_ping = self.settings_json["extreme_ping"]
self.version = self.settings_json["version"]
self.selection_index = None
self.selection = None
self.ping_list = []
self.ping_count = 0
self.ping_time = 0
self.lag_count = 0
self.lag_percentage = 0.0
self.mean_ping = 0
self.tiny_ping_count = 0
self.small_ping_count = 0
self.medium_ping_count = 0
self.large_ping_count = 0
self.extreme_ping_count = 0
self.root = tk.Tk()
self.root.title(f"Ping Analysis {self.version}")
self.root.geometry("750x600")
self.root.iconbitmap("images/favicon/favicon.ico")
self.root.bind("<Return>", self.user_keypress)
self.frame_data = tk.Frame(self.root)
self.frame_data.pack(side=tk.RIGHT, fill=tk.BOTH)
self.frame_main = tk.Frame(self.root)
self.frame_main.pack(fill=tk.BOTH, expand=1)
self.listbox_data = tk.Listbox(self.frame_data, height=25)
self.listbox_data.pack(side=tk.TOP)
self.label_header = tk.Label(self.frame_main, font=self.FONT_LARGE, text="Ping Analysis")
self.label_header.pack(pady=(50, 50))
self.label_file = tk.Label(self.frame_main, font=self.FONT_LARGE, anchor="w")
self.label_file.pack(fill=tk.X, pady=(0, 25), padx=(10, 0))
self.label_lag_analysis = tk.Label(self.frame_main, font=self.FONT_MEDIUM, anchor="w")
self.label_lag_analysis.pack(fill=tk.X, padx=(10, 0))
self.label_max_ping = tk.Label(self.frame_main, font=self.FONT_MEDIUM, anchor="w")
self.label_max_ping.pack(fill=tk.X, padx=(10, 0))
self.label_min_ping = tk.Label(self.frame_main, font=self.FONT_MEDIUM, anchor="w")
self.label_min_ping.pack(fill=tk.X, padx=(10, 0))
self.label_mean_ping = tk.Label(self.frame_main, font=self.FONT_MEDIUM, anchor="w")
self.label_mean_ping.pack(fill=tk.X, padx=(10, 0))
self.label_lag_count = tk.Label(self.frame_main, font=self.FONT_MEDIUM, anchor="w")
self.label_lag_count.pack(fill=tk.X, pady=(0, 25), padx=(10, 0))
self.label_tiny_ping = tk.Label(self.frame_main, font=self.FONT_SMALL, anchor="w")
self.label_tiny_ping.pack(fill=tk.X, padx=(10, 0))
self.label_small_ping = tk.Label(self.frame_main, font=self.FONT_SMALL, anchor="w")
self.label_small_ping.pack(fill=tk.X, padx=(10, 0))
self.label_medium_ping = tk.Label(self.frame_main, font=self.FONT_SMALL, anchor="w")
self.label_medium_ping.pack(fill=tk.X, padx=(10, 0))
self.label_large_ping = tk.Label(self.frame_main, font=self.FONT_SMALL, anchor="w")
self.label_large_ping.pack(fill=tk.X, padx=(10, 0))
self.label_extreme_ping = tk.Label(self.frame_main, font=self.FONT_SMALL, anchor="w")
self.label_extreme_ping.pack(fill=tk.X, padx=(10, 0))
self.button_analyze = tk.Button(self.frame_data, text="Analyze")
self.button_analyze.pack(fill=tk.BOTH, expand=1)
self.button_refresh = tk.Button(self.frame_data, text="Refresh List")
self.button_refresh.pack(fill=tk.BOTH, expand=1)
self.button_open_cmd = tk.Button(self.frame_data, text="Open CMD")
self.button_open_cmd.pack(fill=tk.BOTH, expand=1)
self.button_open_folder = tk.Button(self.frame_data, text="Open Folder")
self.button_open_folder.pack(fill=tk.BOTH, expand=1)
self.button_toggle_theme = tk.Button(self.frame_data, text="Dark Theme")
self.button_toggle_theme.pack(fill=tk.BOTH, expand=1)
self.widget_list = [widget for frame in self.root.children.values() for widget in frame.children.values()]
self.widget_list.extend(frame for frame in self.root.children.values())
def start_mainloop(self):
self.root.mainloop()
def populate_listbox(self, files):
self.clear_listbox()
for file in files:
self.listbox_data.insert("end", file)
def clear_listbox(self):
self.listbox_data.delete(0, tk.END)
def clear_labels(self):
self.label_tiny_ping.configure(text="")
self.label_small_ping.configure(text="")
self.label_medium_ping.configure(text="")
self.label_large_ping.configure(text="")
self.label_extreme_ping.configure(text="")
self.label_max_ping.configure(text="")
self.label_min_ping.configure(text="")
self.label_mean_ping.configure(text="")
self.label_file.configure(text="")
self.label_lag_count.configure(text="")
self.label_lag_analysis.configure(text="")
def clear_variables(self):
self.ping_list = []
self.ping_count = 0
self.ping_time = 0
self.lag_count = 0
self.lag_percentage = 0.0
self.mean_ping = 0
self.tiny_ping_count = 0
self.small_ping_count = 0
self.medium_ping_count = 0
self.large_ping_count = 0
self.extreme_ping_count = 0
def set_theme(self, new_theme):
if new_theme == "light":
self.button_toggle_theme.configure(text="Light Theme")
for widget in self.widget_list:
if widget.__class__.__name__ == "Frame":
widget.configure(bg=self.DEFAULT_DARK_BG)
else:
widget.configure(fg=self.DEFAULT_DARK_FG, bg=self.DEFAULT_DARK_BG)
elif new_theme == "dark":
self.button_toggle_theme.configure(text="Dark Theme")
for widget in self.widget_list:
if widget.__class__.__name__ == "Frame":
widget.configure(bg=self.DEFAULT_BG)
else:
widget.configure(fg=self.DEFAULT_FG, bg=self.DEFAULT_BG)
self.theme = new_theme
def get_data(self):
files = os.listdir(self.DIRECTORY)
return files
def get_selection(self):
self.selection_index = self.listbox_data.curselection()
if self.selection_index != ():
self.selection = self.listbox_data.get(self.selection_index)
else:
self.selection = None
self.clear_labels()
self.label_header.configure(text="ERROR: No file selected")
def open_folder(self):
subprocess.run(f"explorer {self.DIRECTORY}")
def open_cmd(self):
cwd = os.getcwd()
if cwd[-8:] != self.DIRECTORY:
os.chdir(self.DIRECTORY)
os.system("start cmd")
else:
os.system("start cmd")
os.chdir("..")
def configure_commands(self):
self.button_analyze.configure(command=self.analyze)
self.button_refresh.configure(command=lambda: self.populate_listbox(self.get_data()))
self.button_open_folder.configure(command=self.open_folder)
self.button_toggle_theme.configure(command=lambda: self.set_theme("light" if self.theme == "dark" else "dark"))
self.button_open_cmd.configure(command=self.open_cmd)
self.root.protocol("WM_DELETE_WINDOW", self.close_and_save)
def close_and_save(self):
self.settings_file = open("settings.json", "w")
self.settings_json["theme"] = self.theme
json.dump(self.settings_json, self.settings_file, indent=4)
self.settings_file.close()
self.root.destroy()
def generate_list(self):
raw_data_file = open(self.DIRECTORY + "/" + self.selection)
for line in raw_data_file:
if line[0:5] == "Reply":
try:
self.ping_time = int(re.split(" ", line)[4][5:][:-2])
self.ping_count += 1
self.ping_list.append(self.ping_time)
except ValueError:
self.label_header.configure(text="ERROR: One or more lines could not be read")
raw_data_file.close()
def user_keypress(self, event):
self.analyze()
def analyze(self):
self.clear_variables()
self.clear_labels()
self.label_header.configure(text="Ping Analysis")
self.get_selection()
if self.selection is not None:
self.generate_list()
else:
self.clear_labels()
self.label_header.configure(text="ERROR: No file selected")
raise Exception("ERROR: No file selected")
if len(self.ping_list) != 0:
self.mean_ping = int((sum(self.ping_list) / len(self.ping_list)))
else:
self.clear_labels()
self.label_header.configure(text="ERROR: No data or file is not UTF-8")
raise Exception("ERROR: No data or file is not UTF-8")
for ping in self.ping_list:
if ping > self.extreme_ping:
self.extreme_ping_count += 1
elif ping > self.large_ping:
self.large_ping_count += 1
elif ping > self.medium_ping:
self.medium_ping_count += 1
elif ping > self.small_ping:
self.small_ping_count += 1
elif ping > self.tiny_ping:
self.tiny_ping_count += 1
self.lag_count = self.medium_ping_count + self.large_ping_count + self.extreme_ping_count
self.lag_percentage = round(self.lag_count / self.ping_count * 100, 2)
self.label_file.configure(text=f"[{self.selection}] Total ping count: {self.ping_count}")
self.label_tiny_ping.configure(text=f"Tiny ping count: {self.tiny_ping_count} (>{self.tiny_ping}ms)")
self.label_small_ping.configure(text=f"Small ping count: {self.small_ping_count} (>{self.small_ping}ms)")
self.label_medium_ping.configure(text=f"Medium ping count: {self.medium_ping_count} (>{self.medium_ping}ms)")
self.label_large_ping.configure(text=f"Large ping count: {self.large_ping_count} (>{self.large_ping}ms)")
self.label_extreme_ping.configure(text=f"Extreme ping count: {self.extreme_ping_count} (>{self.extreme_ping}ms)")
self.label_max_ping.configure(text=f"MAXIMUM ping: {max(self.ping_list)}")
self.label_min_ping.configure(text=f"MINIMUM ping: {min(self.ping_list)}")
self.label_mean_ping.configure(text=f"MEAN ping: {self.mean_ping}")
self.label_lag_count.configure(text=f"Lagged {self.lag_count} {'time' if self.lag_count == 1 else 'times'} "
f"out of {self.ping_count} ({self.lag_percentage}%)")
if self.lag_percentage > 10.0:
self.label_lag_analysis.configure(text="Extremely Severe Lag", fg="red")
elif self.lag_percentage > 5.0:
self.label_lag_analysis.configure(text="Severe Lag", fg="orange")
elif self.lag_percentage > 3.0:
self.label_lag_analysis.configure(text="Moderate Lag", fg="yellow")
elif self.lag_percentage > 1.0:
self.label_lag_analysis.configure(text="Low Lag", fg="green")
else:
self.label_lag_analysis.configure(text="Extremely Low Lag", fg="cyan")
def main():
pa = PingAnalysis()
pa.set_theme(pa.theme)
pa.configure_commands()
pa.populate_listbox(pa.get_data())
pa.start_mainloop()
if __name__ == "__main__":
main()
|
import os
import subprocess
import logging
import filecmp
import copy
import base64
import json
import pykube
import kubernetes_validate
from kubernetes_validate.utils import (
SchemaNotFoundError,
VersionNotSupportedError,
InvalidSchemaError,
ValidationError,
)
from toscaparser.tosca_template import ToscaTemplate
from submitter import utils
from submitter.abstracts import base_adaptor
from submitter.abstracts.exceptions import AdaptorCritical, TranslateError
from .zorp import ZorpManifests
from .translator import get_translator
from .tosca import Prefix, NodeType, Interface, NetworkProxy
logger = logging.getLogger("adaptors.k8s_adaptor")
class KubernetesAdaptor(base_adaptor.Adaptor):
""" The Kubernetes Adaptor class
Carry out a translation from a TOSCA ADT to a Kubernetes Manifest,
and the subsequent execution, update and undeployment of the translation.
"""
def __init__(
self, adaptor_id, config, dryrun, validate=False, template=None
):
""" init method of the Adaptor """
super().__init__()
logger.debug("Initialising Kubernetes Adaptor class...")
self.status = "Initialising..."
if template and not isinstance(template, ToscaTemplate):
raise AdaptorCritical("Template is not a valid TOSCAParser object")
self.ID = adaptor_id
self.dryrun = dryrun
self.short_id = "_".join(adaptor_id.split("_")[:-1]) or adaptor_id
self.config = config
self.tpl = template
out_volume = self.config.get("volume", "files/output_configs")
self.manifest_path = f"{out_volume}{self.ID}.yaml"
self.manifest_tmp_path = f"{out_volume}tmp_{self.ID}.yaml"
sys_volume = self.config.get("system", "system/")
self.cadvisor_manifest_path = f"{sys_volume}cadvisor.yaml"
self.nodex_manifest_path = f"{sys_volume}nodex.yaml"
self.manifests = []
self.services = []
self.volumes = {}
self.output = {}
self.tcp_ports = []
self.ingress_conf = []
self.ingress_secrets = {}
self.validate = validate
logger.info("Kubernetes Adaptor is ready.")
self.status = "Initialised"
def translate(self, update=False, write_files=True):
""" Translate sections of the ADT into a Kubernetes Manifest """
logger.info("Translating into Kubernetes Manifests")
self.status = "Translating..."
self.manifests = []
self.tcp_ports = []
self.ingress_conf = []
self.ingress_secrets = {}
for node in self.tpl.nodetemplates:
if node.type.startswith("tosca.nodes.MiCADO"):
self._translate_node_templates(node)
# Look for a monitoring policy and attach default
# metric exporters to the application
for policy in self.tpl.policies:
if policy.type.startswith(Prefix.MONITOR_POLICY):
self._translate_monitoring_policy(policy)
if policy.type.startswith(Prefix.NETWORK_POLICY):
self._translate_security_policy(policy)
if self.ingress_conf:
self._deploy_zorp()
self._manifest_secrets()
if not self.manifests:
logger.info("No nodes to orchestrate with Kubernetes. Skipping")
self.status = "Skipped Translation"
return
unvalidated_kinds = self.config.get("unvalidated_kinds", [])
k8s_version = self.config.get("k8s_version", "1.18.0")
for manifest in self.manifests:
if manifest["kind"] in unvalidated_kinds:
continue
try:
kubernetes_validate.validate(manifest, k8s_version, strict=True)
except ValidationError as err:
message = f"Invalid K8s Manifest: {err.message}"
logger.error(message)
raise AdaptorCritical(message) from None
except (InvalidSchemaError, SchemaNotFoundError):
message = (
f"Schema for {manifest["apiVersion"]}/{manifest["kind"]} "
f"not found in Kubernetes v{k8s_version}"
)
logger.error(message)
raise AdaptorCritical(message) from None
except VersionNotSupportedError:
pass
if not write_files:
pass
elif update:
utils.dump_list_yaml(self.manifests, self.manifest_tmp_path)
elif self.validate is False:
utils.dump_list_yaml(self.manifests, self.manifest_path)
logger.info("Translation complete")
self.status = "Translated"
def _translate_node_templates(self, node):
_name_check_node(node)
node = copy.deepcopy(node)
manifests = []
if not utils.check_lifecycle(node, Interface.KUBERNETES):
return
translator = get_translator(node)
tosca_translator = translator.from_toscaparser(
self.short_id, node, self.tpl.repositories
)
manifests = tosca_translator.build()
self.manifests += manifests
def _translate_monitoring_policy(self, policy):
if policy.get_property_value("enable_container_metrics"):
self._translate_container_monitoring_policy()
if policy.get_property_value("enable_node_metrics"):
self._translate_node_monitoring_policy()
def _translate_container_monitoring_policy(self):
try:
cadvisor = utils.get_yaml_data(self.cadvisor_manifest_path)
cadvisor["metadata"]["labels"][
"app.kubernetes.io/instance"
] = self.short_id
self.manifests.append(cadvisor)
except FileNotFoundError:
logger.warning(
"Could not find cAdvisor manifest"
f" at {self.cadvisor_manifest_path}"
)
def _translate_node_monitoring_policy(self):
try:
nodex = utils.get_yaml_data(self.nodex_manifest_path)
nodex["metadata"]["labels"][
"app.kubernetes.io/instance"
] = self.short_id
self.manifests.append(nodex)
except FileNotFoundError:
logger.warning(
"Could not find NodeExporter manifest"
f" at {self.nodex_manifest_path}"
)
def _translate_security_policy(self, policy):
if policy.type == str(NetworkProxy.PASSTHROUGH):
# This should now work as expected
pass
elif policy.type in NetworkProxy.values():
self._translate_level7_policy(policy)
else:
logger.warning(f"Unknown network security policy: {policy.type}")
def _translate_level7_policy(self, policy):
ingress = {"policy_type": policy.type.split(".")[-1]}
ingress.update(
{
key: value.value
for key, value in policy.get_properties().items()
}
)
self._extract_ports(ingress)
self._translate_tls_secrets(ingress, policy)
self.ingress_conf.append(ingress)
def _extract_ports(self, ingress):
try:
self.tcp_ports.extend(ingress["target_ports"])
except KeyError:
pass
def _translate_tls_secrets(self, ingress, policy):
if ingress.get("encryption", False):
if (
"encryption_key" not in ingress
or "encryption_cert" not in ingress
):
error = f"Key and/or cert missing for policy {policy.type}"
logger.error(error)
raise TranslateError(error)
index = "krumpli" + str(len(self.ingress_secrets))
self.ingress_secrets[index] = {
"tls.key": ingress["encryption_key"],
"tls.crt": ingress["encryption_cert"],
}
ingress["encryption_key"] = index
ingress["encryption_cert"] = index
else:
try:
del ingress["encryption_key"]
del ingress["encryption_cert"]
except KeyError:
pass
def _deploy_zorp(self):
zorp = ZorpManifests()
ports_list = self._list_ports()
ingress_conf = json.dumps(self.ingress_conf)
self.manifests.append(zorp.service_account())
self.manifests.append(zorp.cluster_role())
self.manifests.append(zorp.role_binding())
self.manifests.append(zorp.daemon_set(ports_list))
self.manifests.append(zorp.ingress(ingress_conf))
def _list_ports(self):
return [
{
"name": "port-" + str(port),
"containerPort": port,
"hostPort": port,
}
for port in self.tcp_ports
]
def _manifest_secrets(self):
for name, secret in self.ingress_secrets.items():
self.manifests.append(self._k8s_secret(name, secret))
def _k8s_secret(self, name, secret):
return {
"apiVersion": "v1",
"kind": "Secret",
"metadata": {"name": name, "namespace": "micado-worker"},
"type": "Opaque",
"data": {
key: base64.b64encode(value.encode("UTF-8")).decode("ASCII")
for key, value in secret.items()
},
}
def execute(self, update=False):
""" Execute """
logger.info("Executing Kubernetes Manifests...")
self.status = "executing..."
if self._skip_check():
return
if update:
operation = [
"kubectl",
"apply",
"--prune",
"-l",
f"app.kubernetes.io/instance={self.short_id}",
"-f",
self.manifest_path,
]
else:
operation = [
"kubectl",
"create",
"-f",
self.manifest_path,
"--save-config",
]
try:
logger.debug(f"Executing {operation}")
subprocess.run(operation, stderr=subprocess.PIPE, check=True)
except subprocess.CalledProcessError as e:
logger.error(f"kubectl: {e.stderr}")
raise AdaptorCritical(f"kubectl: {e.stderr}")
logger.info("Kube objects deployed, trying to get outputs...")
self._get_outputs()
logger.info("Execution complete")
self.status = "Executed"
def update(self):
""" Update """
logger.info("Updating Kubernetes Manifests")
self.status = "Updating..."
logger.debug("Creating tmp translation...")
self.manifests = []
self.translate(True)
if not self.manifests and self._config_file_exists():
self.undeploy(False)
self.cleanup()
logger.info("Updated (removed all Kubernetes workloads)")
self.status = "Updated (removed all Kubernetes workloads)"
elif not self.manifests:
logger.info("No nodes to orchestrate with Kubernetes. Skipping...")
self.status = "Skipped Update"
elif os.path.exists(self.manifest_path) and filecmp.cmp(
self.manifest_path, self.manifest_tmp_path
):
logger.debug(f"No update - removing {self.manifest_tmp_path}")
os.remove(self.manifest_tmp_path)
logger.info("Nothing to update")
self.status = "Updated (nothing to update)"
else:
logger.debug("Updating Kubernetes workloads")
os.rename(self.manifest_tmp_path, self.manifest_path)
self.execute(True)
logger.info("Update complete")
self.status = "Updated"
def undeploy(self, kill_nodes=True):
""" Undeploy """
logger.info("Undeploying Kubernetes workloads")
self.status = "Undeploying..."
error = False
if self._skip_check():
return
if kill_nodes:
# Delete nodes from the cluster
operation = [
"kubectl",
"delete",
"no",
"-l",
"micado.eu/node_type",
]
try:
logger.debug(f"Undeploy {operation}")
subprocess.run(operation, stderr=subprocess.PIPE, check=True)
except subprocess.CalledProcessError:
logger.debug("Got error deleting nodes")
error = True
# Delete resources in the manifest
operation = [
"kubectl",
"delete",
"-f",
self.manifest_path,
"--timeout",
"90s",
]
try:
logger.debug(f"Undeploy {operation}")
subprocess.run(operation, stderr=subprocess.PIPE, check=True)
except subprocess.CalledProcessError:
logger.debug("Had some trouble removing Kubernetes workloads...")
error = True
if error:
raise AdaptorCritical("Had some trouble removing workloads!")
logger.info("Undeployment complete")
self.status = "Undeployed"
def cleanup(self):
""" Cleanup """
logger.info("Cleaning-up...")
self.status = "cleaning-up..."
try:
os.remove(self.manifest_path)
except OSError:
logger.warning("Could not remove manifest file")
self.status = "Clean!"
def query(self, query):
""" Query """
logger.info(f"Query ID {self.ID}")
kube_config = pykube.KubeConfig.from_file("~/.kube/config")
api = pykube.HTTPClient(kube_config)
if query == "nodes":
nodes = pykube.Node.objects(api)
return [x.name for x in nodes.iterator()]
elif query == "services":
pods = pykube.Pod.objects(api)
return [x.name for x in pods.iterator()]
def _get_outputs(self):
"""Get outputs and their resultant attributes"""
logger.info("Fetching outputs...")
for output in self.tpl.outputs:
node = output.value.get_referenced_node_template()
# TODO Use ONLY is_derived_from when v9 API deprecated
if node.is_derived_from(
NodeType.DOCKER_CONTAINER
) or node.type.startswith(str(NodeType.DOCKER_CONTAINER)):
logger.debug(f"Inspect node: {node.name}")
query = output.value.attribute_name
if query == "port":
self.output.setdefault(node.name, {})[query] = query_port(
node.name
)
else:
logger.warning(f"{node.name} is not a Docker container!")
def _config_file_exists(self):
""" Check if config file was generated during translation """
return os.path.exists(self.manifest_path)
def _skip_check(self):
if not self._config_file_exists:
logger.info(f"No config generated, skipping {self.status} step...")
self.status = "Skipped"
return True
elif self.dryrun:
logger.info(
f"DRY-RUN: Kubernetes {self.status} in dry-run mode..."
)
self.status = "DRY-RUN Deployment"
return True
def _name_check_node(node):
errors = []
if "_" in node.name:
errors.append("TOSCA node names")
if "_" in (node.get_property_value("name") or ""):
errors.append("property: 'name'")
if "_" in (node.get_property_value("container_name") or ""):
errors.append("property: 'container_name'")
if errors:
errors = ", ".join(errors)
logger.error(
f"Failed name convention check (underscores) on node: {node.name}"
)
raise AdaptorCritical(
f"Underscores in node {node.name} not allowed for {errors}"
)
def query_port(service_name):
"""Queries a specific service for its port listing
Args:
service_name (string): Name of service to query
Returns:
dict: port listing
"""
kube_config = pykube.KubeConfig.from_file("~/.kube/config")
api = pykube.HTTPClient(kube_config)
try:
service = pykube.Service.objects(api).get_by_name(service_name)
except Exception:
return f"Service {service_name} not found"
return service.obj.get("spec", {}).get("ports", {})
| import os
import subprocess
import logging
import filecmp
import copy
import base64
import json
import pykube
import kubernetes_validate
from kubernetes_validate.utils import (
SchemaNotFoundError,
VersionNotSupportedError,
InvalidSchemaError,
ValidationError,
)
from toscaparser.tosca_template import ToscaTemplate
from submitter import utils
from submitter.abstracts import base_adaptor
from submitter.abstracts.exceptions import AdaptorCritical, TranslateError
from .zorp import ZorpManifests
from .translator import get_translator
from .tosca import Prefix, NodeType, Interface, NetworkProxy
logger = logging.getLogger("adaptors.k8s_adaptor")
class KubernetesAdaptor(base_adaptor.Adaptor):
""" The Kubernetes Adaptor class
Carry out a translation from a TOSCA ADT to a Kubernetes Manifest,
and the subsequent execution, update and undeployment of the translation.
"""
def __init__(
self, adaptor_id, config, dryrun, validate=False, template=None
):
""" init method of the Adaptor """
super().__init__()
logger.debug("Initialising Kubernetes Adaptor class...")
self.status = "Initialising..."
if template and not isinstance(template, ToscaTemplate):
raise AdaptorCritical("Template is not a valid TOSCAParser object")
self.ID = adaptor_id
self.dryrun = dryrun
self.short_id = "_".join(adaptor_id.split("_")[:-1]) or adaptor_id
self.config = config
self.tpl = template
out_volume = self.config.get("volume", "files/output_configs")
self.manifest_path = f"{out_volume}{self.ID}.yaml"
self.manifest_tmp_path = f"{out_volume}tmp_{self.ID}.yaml"
sys_volume = self.config.get("system", "system/")
self.cadvisor_manifest_path = f"{sys_volume}cadvisor.yaml"
self.nodex_manifest_path = f"{sys_volume}nodex.yaml"
self.manifests = []
self.services = []
self.volumes = {}
self.output = {}
self.tcp_ports = []
self.ingress_conf = []
self.ingress_secrets = {}
self.validate = validate
logger.info("Kubernetes Adaptor is ready.")
self.status = "Initialised"
def translate(self, update=False, write_files=True):
""" Translate sections of the ADT into a Kubernetes Manifest """
logger.info("Translating into Kubernetes Manifests")
self.status = "Translating..."
self.manifests = []
self.tcp_ports = []
self.ingress_conf = []
self.ingress_secrets = {}
for node in self.tpl.nodetemplates:
if node.type.startswith("tosca.nodes.MiCADO"):
self._translate_node_templates(node)
# Look for a monitoring policy and attach default
# metric exporters to the application
for policy in self.tpl.policies:
if policy.type.startswith(Prefix.MONITOR_POLICY):
self._translate_monitoring_policy(policy)
if policy.type.startswith(Prefix.NETWORK_POLICY):
self._translate_security_policy(policy)
if self.ingress_conf:
self._deploy_zorp()
self._manifest_secrets()
if not self.manifests:
logger.info("No nodes to orchestrate with Kubernetes. Skipping")
self.status = "Skipped Translation"
return
unvalidated_kinds = self.config.get("unvalidated_kinds", [])
k8s_version = self.config.get("k8s_version", "1.18.0")
for manifest in self.manifests:
if manifest["kind"] in unvalidated_kinds:
continue
try:
kubernetes_validate.validate(manifest, k8s_version, strict=True)
except ValidationError as err:
message = f"Invalid K8s Manifest: {err.message}"
logger.error(message)
raise AdaptorCritical(message) from None
except (InvalidSchemaError, SchemaNotFoundError):
message = (
f"Schema for {manifest['apiVersion']}/{manifest['kind']} "
f"not found in Kubernetes v{k8s_version}"
)
logger.error(message)
raise AdaptorCritical(message) from None
except VersionNotSupportedError:
pass
if not write_files:
pass
elif update:
utils.dump_list_yaml(self.manifests, self.manifest_tmp_path)
elif self.validate is False:
utils.dump_list_yaml(self.manifests, self.manifest_path)
logger.info("Translation complete")
self.status = "Translated"
def _translate_node_templates(self, node):
_name_check_node(node)
node = copy.deepcopy(node)
manifests = []
if not utils.check_lifecycle(node, Interface.KUBERNETES):
return
translator = get_translator(node)
tosca_translator = translator.from_toscaparser(
self.short_id, node, self.tpl.repositories
)
manifests = tosca_translator.build()
self.manifests += manifests
def _translate_monitoring_policy(self, policy):
if policy.get_property_value("enable_container_metrics"):
self._translate_container_monitoring_policy()
if policy.get_property_value("enable_node_metrics"):
self._translate_node_monitoring_policy()
def _translate_container_monitoring_policy(self):
try:
cadvisor = utils.get_yaml_data(self.cadvisor_manifest_path)
cadvisor["metadata"]["labels"][
"app.kubernetes.io/instance"
] = self.short_id
self.manifests.append(cadvisor)
except FileNotFoundError:
logger.warning(
"Could not find cAdvisor manifest"
f" at {self.cadvisor_manifest_path}"
)
def _translate_node_monitoring_policy(self):
try:
nodex = utils.get_yaml_data(self.nodex_manifest_path)
nodex["metadata"]["labels"][
"app.kubernetes.io/instance"
] = self.short_id
self.manifests.append(nodex)
except FileNotFoundError:
logger.warning(
"Could not find NodeExporter manifest"
f" at {self.nodex_manifest_path}"
)
def _translate_security_policy(self, policy):
if policy.type == str(NetworkProxy.PASSTHROUGH):
# This should now work as expected
pass
elif policy.type in NetworkProxy.values():
self._translate_level7_policy(policy)
else:
logger.warning(f"Unknown network security policy: {policy.type}")
def _translate_level7_policy(self, policy):
ingress = {"policy_type": policy.type.split(".")[-1]}
ingress.update(
{
key: value.value
for key, value in policy.get_properties().items()
}
)
self._extract_ports(ingress)
self._translate_tls_secrets(ingress, policy)
self.ingress_conf.append(ingress)
def _extract_ports(self, ingress):
try:
self.tcp_ports.extend(ingress["target_ports"])
except KeyError:
pass
def _translate_tls_secrets(self, ingress, policy):
if ingress.get("encryption", False):
if (
"encryption_key" not in ingress
or "encryption_cert" not in ingress
):
error = f"Key and/or cert missing for policy {policy.type}"
logger.error(error)
raise TranslateError(error)
index = "krumpli" + str(len(self.ingress_secrets))
self.ingress_secrets[index] = {
"tls.key": ingress["encryption_key"],
"tls.crt": ingress["encryption_cert"],
}
ingress["encryption_key"] = index
ingress["encryption_cert"] = index
else:
try:
del ingress["encryption_key"]
del ingress["encryption_cert"]
except KeyError:
pass
def _deploy_zorp(self):
zorp = ZorpManifests()
ports_list = self._list_ports()
ingress_conf = json.dumps(self.ingress_conf)
self.manifests.append(zorp.service_account())
self.manifests.append(zorp.cluster_role())
self.manifests.append(zorp.role_binding())
self.manifests.append(zorp.daemon_set(ports_list))
self.manifests.append(zorp.ingress(ingress_conf))
def _list_ports(self):
return [
{
"name": "port-" + str(port),
"containerPort": port,
"hostPort": port,
}
for port in self.tcp_ports
]
def _manifest_secrets(self):
for name, secret in self.ingress_secrets.items():
self.manifests.append(self._k8s_secret(name, secret))
def _k8s_secret(self, name, secret):
return {
"apiVersion": "v1",
"kind": "Secret",
"metadata": {"name": name, "namespace": "micado-worker"},
"type": "Opaque",
"data": {
key: base64.b64encode(value.encode("UTF-8")).decode("ASCII")
for key, value in secret.items()
},
}
def execute(self, update=False):
""" Execute """
logger.info("Executing Kubernetes Manifests...")
self.status = "executing..."
if self._skip_check():
return
if update:
operation = [
"kubectl",
"apply",
"--prune",
"-l",
f"app.kubernetes.io/instance={self.short_id}",
"-f",
self.manifest_path,
]
else:
operation = [
"kubectl",
"create",
"-f",
self.manifest_path,
"--save-config",
]
try:
logger.debug(f"Executing {operation}")
subprocess.run(operation, stderr=subprocess.PIPE, check=True)
except subprocess.CalledProcessError as e:
logger.error(f"kubectl: {e.stderr}")
raise AdaptorCritical(f"kubectl: {e.stderr}")
logger.info("Kube objects deployed, trying to get outputs...")
self._get_outputs()
logger.info("Execution complete")
self.status = "Executed"
def update(self):
""" Update """
logger.info("Updating Kubernetes Manifests")
self.status = "Updating..."
logger.debug("Creating tmp translation...")
self.manifests = []
self.translate(True)
if not self.manifests and self._config_file_exists():
self.undeploy(False)
self.cleanup()
logger.info("Updated (removed all Kubernetes workloads)")
self.status = "Updated (removed all Kubernetes workloads)"
elif not self.manifests:
logger.info("No nodes to orchestrate with Kubernetes. Skipping...")
self.status = "Skipped Update"
elif os.path.exists(self.manifest_path) and filecmp.cmp(
self.manifest_path, self.manifest_tmp_path
):
logger.debug(f"No update - removing {self.manifest_tmp_path}")
os.remove(self.manifest_tmp_path)
logger.info("Nothing to update")
self.status = "Updated (nothing to update)"
else:
logger.debug("Updating Kubernetes workloads")
os.rename(self.manifest_tmp_path, self.manifest_path)
self.execute(True)
logger.info("Update complete")
self.status = "Updated"
def undeploy(self, kill_nodes=True):
""" Undeploy """
logger.info("Undeploying Kubernetes workloads")
self.status = "Undeploying..."
error = False
if self._skip_check():
return
if kill_nodes:
# Delete nodes from the cluster
operation = [
"kubectl",
"delete",
"no",
"-l",
"micado.eu/node_type",
]
try:
logger.debug(f"Undeploy {operation}")
subprocess.run(operation, stderr=subprocess.PIPE, check=True)
except subprocess.CalledProcessError:
logger.debug("Got error deleting nodes")
error = True
# Delete resources in the manifest
operation = [
"kubectl",
"delete",
"-f",
self.manifest_path,
"--timeout",
"90s",
]
try:
logger.debug(f"Undeploy {operation}")
subprocess.run(operation, stderr=subprocess.PIPE, check=True)
except subprocess.CalledProcessError:
logger.debug("Had some trouble removing Kubernetes workloads...")
error = True
if error:
raise AdaptorCritical("Had some trouble removing workloads!")
logger.info("Undeployment complete")
self.status = "Undeployed"
def cleanup(self):
""" Cleanup """
logger.info("Cleaning-up...")
self.status = "cleaning-up..."
try:
os.remove(self.manifest_path)
except OSError:
logger.warning("Could not remove manifest file")
self.status = "Clean!"
def query(self, query):
""" Query """
logger.info(f"Query ID {self.ID}")
kube_config = pykube.KubeConfig.from_file("~/.kube/config")
api = pykube.HTTPClient(kube_config)
if query == "nodes":
nodes = pykube.Node.objects(api)
return [x.name for x in nodes.iterator()]
elif query == "services":
pods = pykube.Pod.objects(api)
return [x.name for x in pods.iterator()]
def _get_outputs(self):
"""Get outputs and their resultant attributes"""
logger.info("Fetching outputs...")
for output in self.tpl.outputs:
node = output.value.get_referenced_node_template()
# TODO Use ONLY is_derived_from when v9 API deprecated
if node.is_derived_from(
NodeType.DOCKER_CONTAINER
) or node.type.startswith(str(NodeType.DOCKER_CONTAINER)):
logger.debug(f"Inspect node: {node.name}")
query = output.value.attribute_name
if query == "port":
self.output.setdefault(node.name, {})[query] = query_port(
node.name
)
else:
logger.warning(f"{node.name} is not a Docker container!")
def _config_file_exists(self):
""" Check if config file was generated during translation """
return os.path.exists(self.manifest_path)
def _skip_check(self):
if not self._config_file_exists:
logger.info(f"No config generated, skipping {self.status} step...")
self.status = "Skipped"
return True
elif self.dryrun:
logger.info(
f"DRY-RUN: Kubernetes {self.status} in dry-run mode..."
)
self.status = "DRY-RUN Deployment"
return True
def _name_check_node(node):
errors = []
if "_" in node.name:
errors.append("TOSCA node names")
if "_" in (node.get_property_value("name") or ""):
errors.append("property: 'name'")
if "_" in (node.get_property_value("container_name") or ""):
errors.append("property: 'container_name'")
if errors:
errors = ", ".join(errors)
logger.error(
f"Failed name convention check (underscores) on node: {node.name}"
)
raise AdaptorCritical(
f"Underscores in node {node.name} not allowed for {errors}"
)
def query_port(service_name):
"""Queries a specific service for its port listing
Args:
service_name (string): Name of service to query
Returns:
dict: port listing
"""
kube_config = pykube.KubeConfig.from_file("~/.kube/config")
api = pykube.HTTPClient(kube_config)
try:
service = pykube.Service.objects(api).get_by_name(service_name)
except Exception:
return f"Service {service_name} not found"
return service.obj.get("spec", {}).get("ports", {})
|
# fmt: off
import json
import logging
import threading
import time
from collections import OrderedDict
from datetime import timedelta
from enum import Enum
from inspect import signature
from pathlib import Path
from typing import Any, Dict, get_type_hints
import requests
from ipywidgets import (HTML, Button, Checkbox, Dropdown, FloatText, HBox,
IntProgress, IntText, Label, Layout, Output,
SelectMultiple, Tab)
from ipywidgets import Text as TextWidget
from ipywidgets import VBox
from .loghandler import OutputWidgetHandler
# fmt: on
info_loghandler = OutputWidgetHandler()
sname_url = "http://{host}:{port}/{path}/services/{sname}"
class Solver(Enum):
SGD = "SGD"
ADAM = "ADAM"
RMSPROP = "RMSPROP"
AMSGRAD = "AMSGRAD"
ADAGRAD = "ADAGRAD"
ADADELTA = "ADADELTA"
NESTEROV = "NESTEROV"
class SolverDropdown(Dropdown):
def __init__(self, *args, **kwargs):
Dropdown.__init__(
self, *args, options=list(e.name for e in Solver), **kwargs
)
class GPUIndex(tuple):
pass
class GPUSelect(SelectMultiple):
def __init__(self, host="localhost", *args, **kwargs):
if "value" in kwargs:
kwargs["index"] = kwargs["value"]
del kwargs["value"]
if kwargs["index"] is None:
kwargs["index"] = tuple()
if isinstance(kwargs["index"], int):
kwargs["index"] = (kwargs["index"],)
try:
c = requests.get("http://{}:12345".format(host))
assert c.status_code == 200
SelectMultiple.__init__(
self,
*args,
options=list(
"GPU {index} ({utilization}%)".format(
index=x["index"], utilization=x["utilization.gpu"]
)
for x in c.json()["gpus"]
),
**kwargs
)
except Exception:
SelectMultiple.__init__(
self,
*args,
options=list(range(8)), # default, just in case
**kwargs
)
# -- Core 'abstract' widget for many tasks
class MLWidget:
_fields = { # typing: Dict[str, str]
"sname": "Model name",
"training_repo": "Training directory",
"testing_repo": "Testing directory",
}
_widget_type = {
int: IntText,
float: FloatText,
bool: Checkbox,
Solver: SolverDropdown,
GPUIndex: GPUSelect,
}
# host: TextWidget
# port: TextWidget
def typing_info(self, local_vars: Dict[str, Any]):
fun = self.__init__ # type: ignore
typing_dict = get_type_hints(fun)
for param in signature(fun).parameters.values():
if param.name != "sname":
yield (
param.name,
eval(param.name, local_vars),
typing_dict[param.name],
)
@property
def status(self):
return self.status_label.value
@status.setter
def status(self, value):
label = []
if "status" in value:
label.append("status: {}".format(value["status"]))
if "time" in value:
label.append(
"elapsed time: {}".format(timedelta(seconds=value["time"]))
)
self.status_label.value = ", ".join(label)
def widgets_refresh(self, *_):
with self.output:
from . import logfile_name
with open(logfile_name, "r") as fh:
l = fh.readlines()
self.debug.value = (
"<code style='display: block; white-space: pre-wrap;'>"
+ "".join(l[-200:])
+ "</code>"
)
def __init__(self, sname: str, local_vars: Dict[str, Any], *args) -> None:
from . import logfile_name
# logger.addHandler(log_viewer(self.output),)
super().__init__(*args)
self.sname = sname
self.output = Output(layout=Layout(max_width="650px"))
self.pbar = IntProgress(
min=0,
max=100,
description="Progression",
layout=Layout(margin="18px"),
)
self.status_label = Label(
value="Status: unknown", layout=Layout(margin="18px")
)
self.run_button = Button(description="Run training")
self.info_button = Button(description="Info")
self.stop_button = Button(description="Delete service")
self.hardclear_button = Button(description="Hard clear")
self._widgets = [ # typing: List[Widget]
HTML(
value="<h2>{task} task: {sname}</h2>".format(
task=self.__class__.__name__, sname=self.sname
)
),
HBox([self.run_button, self.stop_button]),
HBox([self.info_button, self.hardclear_button]),
]
self.run_button.on_click(self.run)
self.info_button.on_click(self.info)
self.stop_button.on_click(self.stop)
self.hardclear_button.on_click(self.hardclear)
for name, value, type_hint in self.typing_info(local_vars):
self._add_widget(name, value, type_hint)
self._configuration = VBox(
self._widgets, layout=Layout(min_width="250px")
)
self._tabs = Tab(layout=Layout(height=""))
self._output = VBox([HBox([self.pbar, self.status_label]), self._tabs])
self._main_elt = HBox(
[self._configuration, self._output], layout=Layout(width="1200px")
)
self._img_explorer = VBox(
[self.output], layout=Layout(min_height="800px", width="590px")
)
self.debug = HTML(
layout={"width": "590px", "height": "800px", "border": "none"}
)
self.refresh_button = Button(description="Refresh")
self.refresh_button.on_click(self.widgets_refresh)
self._tabs.children = [
self._img_explorer,
info_loghandler.out,
VBox([self.refresh_button, self.debug]),
]
self._tabs.set_title(0, "Exploration")
self._tabs.set_title(1, "Logs (INFO)")
self._tabs.set_title(2, f"{logfile_name.split("/")[-1]} (tail)")
self.file_list = SelectMultiple(
options=[],
value=[],
rows=10,
description="File list",
layout=Layout(height="200px", width="560px"),
)
def _add_widget(self, name, value, type_hint):
widget_type = self._widget_type.get(type_hint, None)
if widget_type is None:
setattr(
self,
name,
TextWidget( # Widget type by default then convert to str
value="" if value is None else str(value),
layout=Layout(min_width="20ex", margin="-2px 2px 4px 2px"),
),
)
self._widgets.append(
VBox(
[
Label(self._fields.get(name, name) + ":"),
getattr(self, name),
]
)
)
else:
default_params = dict(
value=type_hint() if value is None else (value),
layout=Layout(width="100px", margin="4px 2px 4px 2px"),
)
if name == "gpuid":
default_params["host"] = self.host.value
setattr(self, name, widget_type(**default_params))
self._widgets.append(
HBox(
[
Label(
self._fields.get(name, name),
layout=Layout(min_width="180px"),
),
getattr(self, name),
],
layout=Layout(margin="4px 2px 4px 2px"),
)
)
def _ipython_display_(self):
self._main_elt._ipython_display_()
def stop(self, *_):
info_loghandler.out.clear_output()
self.output.clear_output()
with self.output:
request = sname_url.format(
host=self.host.value,
port=self.port.value,
path=self.path.value,
sname=self.sname,
)
c = requests.delete(request)
logging.info(
"Stop service {sname}: {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
json_dict = c.json()
if "head" in json_dict:
self.status = json_dict["head"]
print(json.dumps(json_dict, indent=2))
return json_dict
def hardclear(self, *_):
# The basic version
info_loghandler.out.clear_output()
self.output.clear_output()
with self.output:
MLWidget.create_service(self)
request = (sname_url + "?clear=full").format(
host=self.host.value,
port=self.port.value,
path=self.path.value,
sname=self.sname,
)
c = requests.delete(request)
logging.info(
"Clearing (full) service {sname}: {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
json_dict = c.json()
if "head" in json_dict:
self.status = json_dict["head"]
print(json.dumps(json_dict, indent=2))
# return json_dict
def create_service(self, *_):
info_loghandler.out.clear_output()
with self.output:
host = self.host.value
port = self.port.value
body = OrderedDict(
[
("mllib", "caffe"),
("description", self.sname),
("type", "supervised"),
(
"parameters",
{
"mllib": {"nclasses": 42}, # why not?
"input": {"connector": "csv"},
},
),
(
"model",
{
"repository": self.model_repo.value,
"create_repository": True,
# "templates": "../templates/caffe/"
},
),
]
)
logging.info(
"Creating service '{sname}':\n {body}".format(
sname=self.sname, body=json.dumps(body, indent=2)
)
)
c = requests.put(
sname_url.format(
host=host, port=port, path=self.path.value, sname=self.sname
),
json.dumps(body),
)
if c.json()["status"]["code"] != 201:
logging.warning(
"Reply from creating service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
raise RuntimeError(
"Error code {code}: {msg}".format(
code=c.json()["status"]["dd_code"],
msg=c.json()["status"]["dd_msg"],
)
)
else:
logging.info(
"Reply from creating service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
json_dict = c.json()
if "head" in json_dict:
self.status = json_dict["head"]
print(json.dumps(json_dict, indent=2))
return json_dict
def run(self, *_):
logging.info("Entering run method")
self.output.clear_output()
with self.output:
host = self.host.value
port = self.port.value
body = self._create_service_body()
logging.info(
"Sending request "
+ sname_url.format(
host=host, port=port, path=self.path.value, sname=self.sname
)
)
c = requests.get(
sname_url.format(
host=host, port=port, path=self.path.value, sname=self.sname
)
)
logging.info(
"Current state of service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
if c.json()["status"]["msg"] != "NotFound":
# self.clear()
logging.warning(
(
"Since service '{sname}' was still there, "
"it has been fully cleared: {json}"
).format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
logging.info(
"Creating service '{sname}':\n {body}".format(
sname=self.sname, body=json.dumps(body, indent=2)
)
)
c = requests.put(
sname_url.format(
host=host, port=port, path=self.path.value, sname=self.sname
),
json.dumps(body),
)
if c.json()["status"]["code"] != 201:
logging.warning(
"Reply from creating service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
raise RuntimeError(
"Error code {code}: {msg}".format(
code=c.json()["status"]["dd_code"],
msg=c.json()["status"]["dd_msg"],
)
)
else:
logging.info(
"Reply from creating service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
body = self._train_body()
logging.info(
"Start training phase: {body}".format(
body=json.dumps(body, indent=2)
)
)
c = requests.post(
"http://{host}:{port}/{path}/train".format(
host=host, port=port, path=self.path.value
),
json.dumps(body),
)
logging.info(
"Reply from training service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
json_dict = c.json()
if "head" in json_dict:
self.status = json_dict["head"]
print(json.dumps(json_dict, indent=2))
self.value = self.iterations.value
self.pbar.bar_style = "info"
self.pbar.max = self.iterations.value
thread = threading.Thread(target=self.update_loop)
thread.start()
def update_loop(self):
while True:
info = self.info(print_output=False)
self.pbar.bar_style = ""
status = info["head"]["status"]
if status == "finished":
self.pbar.value = self.iterations.value
self.pbar.bar_style = "success"
self.on_finished(info)
break
self.pbar.value = info["body"]["measure"].get("iteration", 0)
time.sleep(1)
def on_finished(self, info):
# a minima...
self.last_info = info
def info(self, print_output=True):
with self.output:
# TODO job number
request = (
"http://{host}:{port}/{path}/train?service={sname}&"
"job=1&timeout=10".format(
host=self.host.value,
port=self.port.value,
path=self.path.value,
sname=self.sname,
)
)
c = requests.get(request)
logging.debug(
"Getting info for service {sname}: {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
json_dict = c.json()
if "head" in json_dict:
self.status = json_dict["head"]
if print_output:
print(json.dumps(json_dict, indent=2))
return json_dict
def update_label_list(self, _):
with self.output:
if self.training_repo.value != "":
self.train_labels.options = tuple(
sorted(
f.stem for f in Path(self.training_repo.value).glob("*")
)
)
self.train_labels.rows = min(10, len(self.train_labels.options))
if self.testing_repo.value != "":
self.test_labels.options = tuple(
sorted(
f.stem for f in Path(self.testing_repo.value).glob("*")
)
)
self.test_labels.rows = min(10, len(self.test_labels.options))
if self.nclasses.value == -1:
self.nclasses.value = str(len(self.train_labels.options))
| # fmt: off
import json
import logging
import threading
import time
from collections import OrderedDict
from datetime import timedelta
from enum import Enum
from inspect import signature
from pathlib import Path
from typing import Any, Dict, get_type_hints
import requests
from ipywidgets import (HTML, Button, Checkbox, Dropdown, FloatText, HBox,
IntProgress, IntText, Label, Layout, Output,
SelectMultiple, Tab)
from ipywidgets import Text as TextWidget
from ipywidgets import VBox
from .loghandler import OutputWidgetHandler
# fmt: on
info_loghandler = OutputWidgetHandler()
sname_url = "http://{host}:{port}/{path}/services/{sname}"
class Solver(Enum):
SGD = "SGD"
ADAM = "ADAM"
RMSPROP = "RMSPROP"
AMSGRAD = "AMSGRAD"
ADAGRAD = "ADAGRAD"
ADADELTA = "ADADELTA"
NESTEROV = "NESTEROV"
class SolverDropdown(Dropdown):
def __init__(self, *args, **kwargs):
Dropdown.__init__(
self, *args, options=list(e.name for e in Solver), **kwargs
)
class GPUIndex(tuple):
pass
class GPUSelect(SelectMultiple):
def __init__(self, host="localhost", *args, **kwargs):
if "value" in kwargs:
kwargs["index"] = kwargs["value"]
del kwargs["value"]
if kwargs["index"] is None:
kwargs["index"] = tuple()
if isinstance(kwargs["index"], int):
kwargs["index"] = (kwargs["index"],)
try:
c = requests.get("http://{}:12345".format(host))
assert c.status_code == 200
SelectMultiple.__init__(
self,
*args,
options=list(
"GPU {index} ({utilization}%)".format(
index=x["index"], utilization=x["utilization.gpu"]
)
for x in c.json()["gpus"]
),
**kwargs
)
except Exception:
SelectMultiple.__init__(
self,
*args,
options=list(range(8)), # default, just in case
**kwargs
)
# -- Core 'abstract' widget for many tasks
class MLWidget:
_fields = { # typing: Dict[str, str]
"sname": "Model name",
"training_repo": "Training directory",
"testing_repo": "Testing directory",
}
_widget_type = {
int: IntText,
float: FloatText,
bool: Checkbox,
Solver: SolverDropdown,
GPUIndex: GPUSelect,
}
# host: TextWidget
# port: TextWidget
def typing_info(self, local_vars: Dict[str, Any]):
fun = self.__init__ # type: ignore
typing_dict = get_type_hints(fun)
for param in signature(fun).parameters.values():
if param.name != "sname":
yield (
param.name,
eval(param.name, local_vars),
typing_dict[param.name],
)
@property
def status(self):
return self.status_label.value
@status.setter
def status(self, value):
label = []
if "status" in value:
label.append("status: {}".format(value["status"]))
if "time" in value:
label.append(
"elapsed time: {}".format(timedelta(seconds=value["time"]))
)
self.status_label.value = ", ".join(label)
def widgets_refresh(self, *_):
with self.output:
from . import logfile_name
with open(logfile_name, "r") as fh:
l = fh.readlines()
self.debug.value = (
"<code style='display: block; white-space: pre-wrap;'>"
+ "".join(l[-200:])
+ "</code>"
)
def __init__(self, sname: str, local_vars: Dict[str, Any], *args) -> None:
from . import logfile_name
# logger.addHandler(log_viewer(self.output),)
super().__init__(*args)
self.sname = sname
self.output = Output(layout=Layout(max_width="650px"))
self.pbar = IntProgress(
min=0,
max=100,
description="Progression",
layout=Layout(margin="18px"),
)
self.status_label = Label(
value="Status: unknown", layout=Layout(margin="18px")
)
self.run_button = Button(description="Run training")
self.info_button = Button(description="Info")
self.stop_button = Button(description="Delete service")
self.hardclear_button = Button(description="Hard clear")
self._widgets = [ # typing: List[Widget]
HTML(
value="<h2>{task} task: {sname}</h2>".format(
task=self.__class__.__name__, sname=self.sname
)
),
HBox([self.run_button, self.stop_button]),
HBox([self.info_button, self.hardclear_button]),
]
self.run_button.on_click(self.run)
self.info_button.on_click(self.info)
self.stop_button.on_click(self.stop)
self.hardclear_button.on_click(self.hardclear)
for name, value, type_hint in self.typing_info(local_vars):
self._add_widget(name, value, type_hint)
self._configuration = VBox(
self._widgets, layout=Layout(min_width="250px")
)
self._tabs = Tab(layout=Layout(height=""))
self._output = VBox([HBox([self.pbar, self.status_label]), self._tabs])
self._main_elt = HBox(
[self._configuration, self._output], layout=Layout(width="1200px")
)
self._img_explorer = VBox(
[self.output], layout=Layout(min_height="800px", width="590px")
)
self.debug = HTML(
layout={"width": "590px", "height": "800px", "border": "none"}
)
self.refresh_button = Button(description="Refresh")
self.refresh_button.on_click(self.widgets_refresh)
self._tabs.children = [
self._img_explorer,
info_loghandler.out,
VBox([self.refresh_button, self.debug]),
]
self._tabs.set_title(0, "Exploration")
self._tabs.set_title(1, "Logs (INFO)")
self._tabs.set_title(2, f"{logfile_name.split('/')[-1]} (tail)")
self.file_list = SelectMultiple(
options=[],
value=[],
rows=10,
description="File list",
layout=Layout(height="200px", width="560px"),
)
def _add_widget(self, name, value, type_hint):
widget_type = self._widget_type.get(type_hint, None)
if widget_type is None:
setattr(
self,
name,
TextWidget( # Widget type by default then convert to str
value="" if value is None else str(value),
layout=Layout(min_width="20ex", margin="-2px 2px 4px 2px"),
),
)
self._widgets.append(
VBox(
[
Label(self._fields.get(name, name) + ":"),
getattr(self, name),
]
)
)
else:
default_params = dict(
value=type_hint() if value is None else (value),
layout=Layout(width="100px", margin="4px 2px 4px 2px"),
)
if name == "gpuid":
default_params["host"] = self.host.value
setattr(self, name, widget_type(**default_params))
self._widgets.append(
HBox(
[
Label(
self._fields.get(name, name),
layout=Layout(min_width="180px"),
),
getattr(self, name),
],
layout=Layout(margin="4px 2px 4px 2px"),
)
)
def _ipython_display_(self):
self._main_elt._ipython_display_()
def stop(self, *_):
info_loghandler.out.clear_output()
self.output.clear_output()
with self.output:
request = sname_url.format(
host=self.host.value,
port=self.port.value,
path=self.path.value,
sname=self.sname,
)
c = requests.delete(request)
logging.info(
"Stop service {sname}: {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
json_dict = c.json()
if "head" in json_dict:
self.status = json_dict["head"]
print(json.dumps(json_dict, indent=2))
return json_dict
def hardclear(self, *_):
# The basic version
info_loghandler.out.clear_output()
self.output.clear_output()
with self.output:
MLWidget.create_service(self)
request = (sname_url + "?clear=full").format(
host=self.host.value,
port=self.port.value,
path=self.path.value,
sname=self.sname,
)
c = requests.delete(request)
logging.info(
"Clearing (full) service {sname}: {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
json_dict = c.json()
if "head" in json_dict:
self.status = json_dict["head"]
print(json.dumps(json_dict, indent=2))
# return json_dict
def create_service(self, *_):
info_loghandler.out.clear_output()
with self.output:
host = self.host.value
port = self.port.value
body = OrderedDict(
[
("mllib", "caffe"),
("description", self.sname),
("type", "supervised"),
(
"parameters",
{
"mllib": {"nclasses": 42}, # why not?
"input": {"connector": "csv"},
},
),
(
"model",
{
"repository": self.model_repo.value,
"create_repository": True,
# "templates": "../templates/caffe/"
},
),
]
)
logging.info(
"Creating service '{sname}':\n {body}".format(
sname=self.sname, body=json.dumps(body, indent=2)
)
)
c = requests.put(
sname_url.format(
host=host, port=port, path=self.path.value, sname=self.sname
),
json.dumps(body),
)
if c.json()["status"]["code"] != 201:
logging.warning(
"Reply from creating service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
raise RuntimeError(
"Error code {code}: {msg}".format(
code=c.json()["status"]["dd_code"],
msg=c.json()["status"]["dd_msg"],
)
)
else:
logging.info(
"Reply from creating service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
json_dict = c.json()
if "head" in json_dict:
self.status = json_dict["head"]
print(json.dumps(json_dict, indent=2))
return json_dict
def run(self, *_):
logging.info("Entering run method")
self.output.clear_output()
with self.output:
host = self.host.value
port = self.port.value
body = self._create_service_body()
logging.info(
"Sending request "
+ sname_url.format(
host=host, port=port, path=self.path.value, sname=self.sname
)
)
c = requests.get(
sname_url.format(
host=host, port=port, path=self.path.value, sname=self.sname
)
)
logging.info(
"Current state of service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
if c.json()["status"]["msg"] != "NotFound":
# self.clear()
logging.warning(
(
"Since service '{sname}' was still there, "
"it has been fully cleared: {json}"
).format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
logging.info(
"Creating service '{sname}':\n {body}".format(
sname=self.sname, body=json.dumps(body, indent=2)
)
)
c = requests.put(
sname_url.format(
host=host, port=port, path=self.path.value, sname=self.sname
),
json.dumps(body),
)
if c.json()["status"]["code"] != 201:
logging.warning(
"Reply from creating service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
raise RuntimeError(
"Error code {code}: {msg}".format(
code=c.json()["status"]["dd_code"],
msg=c.json()["status"]["dd_msg"],
)
)
else:
logging.info(
"Reply from creating service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
body = self._train_body()
logging.info(
"Start training phase: {body}".format(
body=json.dumps(body, indent=2)
)
)
c = requests.post(
"http://{host}:{port}/{path}/train".format(
host=host, port=port, path=self.path.value
),
json.dumps(body),
)
logging.info(
"Reply from training service '{sname}': {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
json_dict = c.json()
if "head" in json_dict:
self.status = json_dict["head"]
print(json.dumps(json_dict, indent=2))
self.value = self.iterations.value
self.pbar.bar_style = "info"
self.pbar.max = self.iterations.value
thread = threading.Thread(target=self.update_loop)
thread.start()
def update_loop(self):
while True:
info = self.info(print_output=False)
self.pbar.bar_style = ""
status = info["head"]["status"]
if status == "finished":
self.pbar.value = self.iterations.value
self.pbar.bar_style = "success"
self.on_finished(info)
break
self.pbar.value = info["body"]["measure"].get("iteration", 0)
time.sleep(1)
def on_finished(self, info):
# a minima...
self.last_info = info
def info(self, print_output=True):
with self.output:
# TODO job number
request = (
"http://{host}:{port}/{path}/train?service={sname}&"
"job=1&timeout=10".format(
host=self.host.value,
port=self.port.value,
path=self.path.value,
sname=self.sname,
)
)
c = requests.get(request)
logging.debug(
"Getting info for service {sname}: {json}".format(
sname=self.sname, json=json.dumps(c.json(), indent=2)
)
)
json_dict = c.json()
if "head" in json_dict:
self.status = json_dict["head"]
if print_output:
print(json.dumps(json_dict, indent=2))
return json_dict
def update_label_list(self, _):
with self.output:
if self.training_repo.value != "":
self.train_labels.options = tuple(
sorted(
f.stem for f in Path(self.training_repo.value).glob("*")
)
)
self.train_labels.rows = min(10, len(self.train_labels.options))
if self.testing_repo.value != "":
self.test_labels.options = tuple(
sorted(
f.stem for f in Path(self.testing_repo.value).glob("*")
)
)
self.test_labels.rows = min(10, len(self.test_labels.options))
if self.nclasses.value == -1:
self.nclasses.value = str(len(self.train_labels.options))
|
from .units import Quantity, units
from .common import (
invert_dict,
CP_symbUpper_to_units,
preferred_units_from_type,
preferred_units_from_symbol,
)
from .realfluid import Properties as rfprop
from .plotting import PropertyPlot, plt
import CoolProp
from CoolProp.CoolProp import HAPropsSI,set_reference_state
import numpy as np
import re
from numpy import floor,ceil,log10
import functools
# Default CoolProps units for symbols
CP_HA_units_to_symb = {
'K':['T','B','Twb','T_wb','WetBulb','D','Tdp','DewPoint','T_dp','Tdb','T_db'],
'Pa':['P','P_w'],
'J/kg_dry_air/K':['C','cp','CV','S','Sda','Entropy'],
'J/kg_dry_air/K':['Cw','cpw','CV','S','Sda','Entropy'],
'J/kg_humid_air/K':['Cha','cp_ha','CVha','cv_ha','Sha'],
'J/kg_dry_air':['H','Hda','Enthalpy'],
'J/kg_humid_air':['Hha'],
'J/lb_water':['Hw'],
'W/m/degK':['K','k','Conductivity'],
'Pa*s':['M','Visc','mu'],
'mol_water/mol_humid_air':['psi_w','Y'],
'm^3/kg_dry_air':['V','Vda'],
'm^3/kg_humid_air':['Vha'],
'kg_water/kg_dry_air':['W','Omega','HumRat'],
' ':['R','RH','RelHum','phi']
}
CP_HA_symb_to_units = invert_dict(CP_HA_units_to_symb)
CP_HA_trans_inv = {
'Twb':['B','Twb','T_wb','WetBulb'],
'Tdb':['Tdb','T_db','DryBulb','T'],
'Tdp':['Tdp','D','DewPoint','T_dp'],
'C':['C','cp','Cp','C_p','c_p'],
'Cha':['Cha','C_ha','cha','c_ha'],
'Cv':['Cv','Cv','cv','c_v'],
'Cvha':['Cvha','Cv_ha','cvha','c_v_ha'],
'H':['H','Hda','Enthalpy','h','hda','h_da'],
'Hha':['Hha','h_ha','hha','Enthalpy_Humid_Air'],
'K':['K','k','conductivity','Conductivity'],
'M':['M','Visc','mu','viscosity'],
'Y':['Y','psi_w','mole_fraction','y'],
'P':['P','p','pressure'],
'P_w':['P_w','p_w','partial_pressure_water'],
'R':['R','RelHum','RH','rel_hum','phi'],
'S':['S','s','sda','Sda','s_da','Entropy'],
'Sha':['Sha','s_ha','sha'],
'V':['V','v','v_da','vda'],
'Vha':['Vha','v_ha','vha'],
'W':['W','w','Omega','HumRat','spec_hum','specific_humidity','omega','humidity','absolute_humidity'],
'Z':['Z','compressibility_factor'],
}
CP_HA_trans = invert_dict(CP_HA_trans_inv)
CP_HA_symb_to_local = {
'Twb':'T_wb',
'Tdb':'T_db',
'Tdp':'T_dp',
'C':'Cp',
'Cha':'Cp_ha',
'Cv':'Cv',
'Cvha':'Cv_ha',
'H':'h',
'Hha':'h_ha',
'K':'conductivity',
'M':'viscosity',
'Y':'psi_w',
'P':'p',
'P_w':'p_w',
'R':'rel_hum',
'S':'s',
'Sha':'s_ha',
'V':'v',
'Vha':'v_ha',
'W':'spec_hum',
'Z':'Z'
}
CP_HA_type_to_symb = {
'temperature':['B','Twb','T_wb','WetBulb','Tdb','T_db','DryBulb','T','Tdp','D','DewPoint','T_dp'],
'pressure':['P','p','pressure','P_w','p_w','partial_pressure_water'],
'density':['D','d','rho'],
'dry air specific volume':['V','v','v_da','vda'],
'humid air specific volume':['Vha','v_ha','vha'],
'dry air specific energy':['H','Hda','Enthalpy','h','hda','h_da'],
'humid air specific energy':['Hha','h_ha','hha','Enthalpy_Humid_Air'],
'dry air specific heat':['C','cp','Cp','C_p','c_p','Cv','Cv','cv','c_v'],
'dry air specific entropy':['S','s','sda','Sda','s_da','Entropy'],
'humid air specific heat':['Cha','C_ha','cha','c_ha','Cvha','Cv_ha','cvha','c_v_ha'],
'humid air specific entropy':['Sha','s_ha','sha'],
'conductivity':['K','k','conductivity','Conductivity'],
'viscosity':['M','Visc','mu','viscosity'],
'water mole fraction':['Y','psi_w','y'],
'humidity ratio':['W','Omega','HumRat','spec_hum','specific_humidity','omega','humidity','absolute_humidity'],
'dimensionless':['R','RelHum','RH','rel_hum','phi','Z']
}
CP_HA_symb_to_type = invert_dict(CP_HA_type_to_symb)
def PropertyLookup(
desired,
unit_system=None,
verbose=False,
**kwargs,
):
"""
Each of the follow properties/parameters is expected to be a quantity with units
:param desired: Dependent from two of the following independent properties
:param T: dry-bulb Temperature (Default value = None)
:param T_wb: wet-bulb Temperature (Default value = None)
:param T_dp: dew-point Temperature (Default value = None)
:param p: pressure (Default value = None)
:param p_w: partial pressure of water vapor (Default value = None)
:param w: humidity ratio (Default value = None)
:param v: mixture volume per unit dry air (Default value = None)
:param v_ha: mixture volume per unit humid air (Default value = None)
:param h: mixture enthalpy per unit dry air (Default value = None)
:param h_ha: mixture enthalpy per unit humid air (Default value = None)
:param s: mixture entropy per unit dry air (Default value = None)
:param rel_hum: relative humidity (Default value = None)
:param y: water mole fraction (Default value = None)
:param unit_system: unit system for return value - one of 'SI_C', 'SI_K', 'English_F', 'English_R' (Default value = )
:param verbose: show debug information (Default value = False)
:param **kwargs:
"""
desired = CP_HA_trans[desired]
PropsSI_args =[desired] # add the desired parameter as the first argument to pass to CoolProp.PropsSI
def process_indep_arg(arg, CPSymb):
"""
Add a property symbol and its value to the CoolProp.PropSI argument string
:param arg: value of independent parameter
:param CPSymb: CoolProp symbol
:param exponent: exponent used to invert the value (Default value = 1)
:param AltSymb: symbol to use for inverted values (Default value = None)
"""
if arg is not None:
# if AltSymb: PropsSI_args.append(AltSymb)
# else:
PropsSI_args.append(CPSymb) # Add independent parameter symbol to argument list
if CP_HA_symb_to_units[CPSymb] is not None:
value = float(arg.to(CP_HA_symb_to_units[CPSymb]).magnitude) # Add independent parameter value to argument list with appropriate magnitude and units stripped
elif isinstance(arg,Quantity):
value = float(arg.magnitude)
else:
value = float(arg) # Add independent paramter value directly to argument list if it has no units that need to be adjusted
PropsSI_args.append(value)
for k,v in kwargs.items():
if k in CP_HA_trans.keys():
process_indep_arg(v,CP_HA_trans[k])
def humidity_search(PropsSI_args):
desired = PropsSI_args[0]
for i,v in enumerate(PropsSI_args):
if v == 'P':
P = PropsSI_args[i+1]
elif v == 'R':
R_target = PropsSI_args[i+1]
elif v == 'W':
W = PropsSI_args[i+1]
T = 273.15 # starting guess
T_guess = T
n_steps = 100
search_steps = [5,-5,1,-1,0.1,-0.1,0.01,-0.01]
for step in search_steps:
cont = True
n_step = 0
while cont:
if n_step > 0:
T_guess += step
try:
R = HAPropsSI('R','T',T_guess,'W',W,'P',P)
error = abs(R_target-R)
if step>0:
T = T_guess
if R<R_target:
cont=False
elif step<0 and R<R_target:
T = T_guess
else:
cont=False
except ValueError:
if step<0: cont=False
n_step += 1
if n_step > n_steps: cont=False
if desired == 'Tdb':
return T
else:
return HAPropsSI(desired,'P',P,'W',W,'Tdb',T)
if verbose:
print('Calling: CoolProp.CoolProp.HAPropsSI({})'.format(','.join([str(i) for i in PropsSI_args])))
print(PropsSI_args)
if "R" in PropsSI_args[1:] and "W" in PropsSI_args[1:]:
result = humidity_search(PropsSI_args)
else:
result = HAPropsSI(*PropsSI_args)
# Determine the units of the value as returned from CoolProp
CP_return_units = CP_HA_symb_to_units[desired]
CP_return_type = CP_HA_symb_to_type[desired]
# Determine the preferred units for the value
if unit_system is None:
result_units = preferred_units_from_type(CP_return_type, units.preferred_units)
else:
result_units = preferred_units_from_type(CP_return_type, unit_system)
# Convert the returned value to the preferred units
if result_units is not None:
result = Quantity(result,CP_return_units).to(result_units)
return result
class Properties:
"""
A class to return thermodynamic properties for a real fluid
:param p: pressure (Default value = 1 atm)
:param unit_system: units for return values - one of 'SI_C','SI_K','English_F','English_R' (Default = 'SI_C')
:returns: an object with methods to evaluate real fluid properties
"""
def __init__(self, p=None, unit_system="kSI_C"):
self.fluid='humidair'
if p is None:
self.__p = Quantity(1.0,'atm')
else:
self.__p = p
self.unit_system = unit_system
# legacy definitions/aliases
self.relhum = self.phi = self.rel_hum
self.omega = self.hum_rat = self.humrat = self.w
self.Cp = self.cp
self.Cv = self.cv
self.mu = self.viscosity
self.nu = self.kinematic_viscosity
self.water = rfprop('Water',unit_system=unit_system)
def _lookup(self, desired, **kwargs):
"""
Call PropertyLookup to evaluate the desired property for the indepent properties specified
as keyword arguments
:param desired: desired property
:param **kwargs: any three dimensional quantities of T,T_wb,T_dp,p,p_w,w,v,v_ha,h,h_ha,s,s_ha,rel_hum,mole_fraction,
"""
unit_system = kwargs.pop('unit_system',self.unit_system)
return PropertyLookup(
desired, unit_system=self.unit_system, **kwargs
)
def _update_kwargs(self, args, kwargs, water=False):
"""use argument unit to identify appropriate keyword"""
for arg in args:
if isinstance(arg, Quantity):
try:
arg_symb = arg.property_symbol
arg_dict = {arg_symb:arg}
kwargs = dict(**arg_dict, **kwargs)
except:
try:
arg.to('K') # Temperature
kwargs = dict(T=arg, **kwargs)
except:
try:
arg.to('kPa') # pressure
kwargs = dict(p=arg, **kwargs)
except:
try:
arg.to('m^3/kg') # specific volume
kwargs = dict(v=arg, **kwargs)
except:
try:
arg.to('kJ/kg/K') # entropy
kwargs = dict(s=arg, **kwargs)
except:
try:
arg.to('J/kg_dry_air') # enthalpy
kwargs = dict(h=arg, **kwargs)
except:
try:
arg.to('J/kg_humid_air') # enthalpy humid air
kwargs = dict(h_ha=arg, **kwargs)
except:
try:
arg.to('kg_water/kg_dry_air') # molar density
kwargs = dict(w=arg, **kwargs)
except:
try:
if arg.dimensionless and (0<= arg <= 1): # relative humidity
kwargs = dict(rel_hum=arg, **kwargs)
except:
print(f'Unable to determine property type for {f} based on units')
elif 0<= arg <= 1: # quality
kwargs = dict(rel_hum=arg, **kwargs)
if not water and "p" not in kwargs.keys():
kwargs = dict(p=self.__p, **kwargs)
return kwargs
@property
def p(self):
"""
set or retrieve pressure for humid air
example:
>> humair.p = Quantity(1,'atm')
>> humair.p
'1 atm'
:param pressure: pressure as a dimensional quantity
:returns: pressure as a dimensional quantity
"""
return self.__p
@p.setter
def p(self, pressure):
self.__p = pressure
def T(self, *args, **kwargs):
"""
Dry-bulb Temperature from two independent intensive properties
example:
>> humair.T(rel_hum=rel_hum_2, h=h_1)
:param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: Dry-bulb Temperature as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("T", **kwargs)
def T_wb(self, *args, **kwargs):
"""
Wet-bulb Temperature from two independent intensive properties
example:
>> humair.T_wb(rel_hum=rel_hum_2, h=h_1)
:param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: Wet-bulb Temperature as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("T_wb", **kwargs)
def T_dp(self, *args, **kwargs):
"""
Dew-point Temperature from two independent intensive properties
example:
>> humair.T_dp(rel_hum=rel_hum_2, h=h_1)
:param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: Dew-point Temperature as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("T_dp", **kwargs)
def w(self, *args, **kwargs):
"""
humidity ratio from two independent intensive properties
example:
>> fluid.v(T=T_1, h=h_2)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: humidity ratio as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("w", **kwargs)
def v(self, *args, **kwargs):
"""
mixture volume per unit of dry air from two independent intensive properties
example:
>> fluid.v(T=T_1, h=p_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: specific volume per unit dry air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("v", **kwargs)
def v_ha(self, *args, **kwargs):
"""
mixture volume per unit of humid air from two independent intensive properties
example:
>> fluid.v_ha(T=T_1, h=p_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: specific volume per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("v_ha", **kwargs)
def v_w(self, *args, **kwargs):
"""
specific volume of water per unit of humid water from two independent intensive properties
example:
>> fluid.v_w(T=T_1, x=x_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: specific volume per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs,water=True)
return Quantity(self.water.v(**kwargs).to('m^3/kg').magnitude, 'm^3/kg_water')
def h(self, *args, **kwargs):
"""
enthalpy per unit dry air from two independent intensive properties
example:
>> fluid.h(T=T_1, rel_hum=re1_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: specific enthalpy per unit dry air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("h", **kwargs)
def h_ha(self, *args, **kwargs):
"""
enthalpy per unit humid air from two independent intensive properties
example:
>> fluid.h_ha(T=T_1, rel_hum=re1_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: specific enthalpy per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("h_ha", **kwargs)
def h_w(self, *args, **kwargs):
"""
specific enthalpy of water per unit of humid water from two independent intensive properties
example:
>> fluid.h_w(T=T_1, x=x_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: specific volume per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs,water=True)
return Quantity(self.water.h(**kwargs).to('kJ/kg').magnitude, 'kJ/kg_water')
def s(self, *args, **kwargs):
"""
entropy per unit dry air from two independent intensive properties
example:
>> fluid.s(T=T_1, h=h_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: specific entropy per unit dry air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("s", **kwargs)
def s_ha(self, *args, **kwargs):
"""
entropy per unit humid air from two independent intensive properties
example:
>> fluid.s_ha(T=T_1, h=h_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: specific entropy per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("s_ha", **kwargs)
def s_w(self, *args, **kwargs):
"""
specific entropy of water per unit of humid water from two independent intensive properties
example:
>> fluid.s_w(T=T_1, x=x_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: specific volume per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs,water=True)
return Quantity(self.water.s(**kwargs).to('kJ/kg/K').magnitude, 'kJ/kg_water/K')
def rel_hum(self, *args, **kwargs):
"""
relative humidity from two independent intensive properties
example:
>> fluid.rel_hum(T=T_1, h=h_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: relative humidity as a dimensionless quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("rel_hum", **kwargs)
def y(self, *args, **kwargs):
"""
water mole fraction from two independent intensive properties
example:
>> fluid.y(T=T_1, h=h_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: water mole fraction as a dimensionless quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("Y", **kwargs)
def cp(self, *args, **kwargs):
"""
specific heat per unit dry air from two independent intensive properties
example:
>> fluid.cp(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: specific heat per unit dry air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("cp", **kwargs)
def cp_ha(self, *args, **kwargs):
"""
specific heat per unit humid air from two independent intensive properties
example:
>> fluid.cp_ha(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: specific heat per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("cp_ha", **kwargs)
def cv(self, *args, **kwargs):
"""
constant volume specific heat per unit dry air from two independent intensive properties
example:
>> fluid.cv(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: constant volume specific heat per unit dry air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("cv", **kwargs)
def cv_ha(self, *args, **kwargs):
"""
constant volume specific heat per unit humid air from two independent intensive properties
example:
>> fluid.cv_ha(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: constant volume specific heat per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("cv_ha", **kwargs)
def conductivity(self, *args, **kwargs):
"""
thermal conductivity from two independent intensive properties
example:
>> fluid.conductivity(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: thermal conductivity as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("k", **kwargs)
def viscosity(self, *args, **kwargs):
"""
dynamic viscosity from two independent intensive properties
example:
>> fluid.viscosity(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: dynamic viscosity as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("mu", **kwargs)
def kinematic_viscosity(self, *args, **kwargs):
"""
dynamic viscosity from two independent intensive properties
example:
>> fluid.kinematic_viscosity(T=T1, p=p1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: kinematic viscosity as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("viscosity", **kwargs)/self._lookup("v", **kwargs)
def Z(self, *args, **kwargs):
"""
Compressibility factor
example:
>> fluid.Pr(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar,d_molar
:returns: Compressibility factor as a dimensionless quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("Z", **kwargs)
def property_diagram(
self,
x=None,
y=None,
x_units=None,
y_units=None,
saturation=False,
unit_system=None,
**kwargs,
):
unit_system = unit_system or self.unit_system
return PropertyPlot(
x=x,
y=y,
x_units=x_units,
y_units=y_units,
property_table=self,
saturation=saturation,
unit_system=unit_system,
**kwargs,
)
def format_units(self,units,displaystyle=True):
units = re.sub('_water','_w',units)
units = re.sub('_dry_air','_a',units)
units = re.sub('deg',r'^\\circ{}\!',units)
match = re.match('(.*)/(.*)',units)
if match and displaystyle:
units = f'\\frac{{{match.group(1)}}}{{{match.group(2)}}}'
return units
def rounded_array(self,val1,val2,n=20,spacing=None):
if spacing is not None:
spacing_mag = floor(log10(spacing))
start = spacing*10**spacing_mag*round(val1/(spacing*10**spacing_mag))
ret_array = np.arange(start, val2+spacing, spacing)
else:
dir = 1 if val2>val1 else -1
delta = abs(val2-val1)
mag_delta = floor(log10(delta))
spacing = round(delta/n,-int(floor(log10(delta/n))))
spacing_mag = floor(log10(spacing))
spacings={}
lists={}
lengths={}
for i in [1,2,2.5,5,10]:
spacings[i] = dir*i*10**spacing_mag*round(spacing/(i*10**spacing_mag))
spacings[i] = dir*i*10**spacing_mag
start = i*10**spacing_mag*round(val1/(i*10**spacing_mag))
if spacings[i] == 0: spacings[i] = i*10**spacing_mag
lists[i] = np.arange(start,val2+spacings[i],spacings[i])
if lists[i][0] == -0: lists[i][0]=0
lengths[i] = len(lists[i])
kys= list(lengths.keys())
lst = list(lengths.values())
L = lst[min(range(len(lst)), key = lambda i: abs(lst[i]-n))]
K = kys[lst.index(L)]
ret_array = lists[K]
if ret_array[0] == -0: ret_array[0]=0
if ret_array[-1]>val2 or ret_array[-1]<val1: ret_array = ret_array[:-1]
if ret_array[0]<val1 or ret_array[-1]>val2: ret_array = ret_array[1:]
return ret_array
def psychrometric_chart(
self,
Tmin=None,
Tmax=None,
wmin=None,
wmax=None,
main_labels_color=None,
major_grid_style=None,
minor_grid_style=None,
n_h = 15,
n_v = 20,
h_isoline_style=None,
v_isoline_style=None,
rel_hum_isoline_style=None,
Twb_isoline_style=None,
unit_system=None,
redraw=False,
cache=True,
**kwargs
):
if self.cached_psychrometric_chart.cache_info().currsize>0:
show_psych = True
else:
show_psych = False
if redraw or not cache:
self.cached_psychrometric_chart.cache_clear()
psych = self.cached_psychrometric_chart(
Tmin,
Tmax,
wmin,
wmax,
main_labels_color,
major_grid_style,
minor_grid_style,
n_h,
n_v,
h_isoline_style,
v_isoline_style,
rel_hum_isoline_style,
Twb_isoline_style,
unit_system,
**kwargs
)
if show_psych: psych.show()
return psych
@functools.lru_cache()
def cached_psychrometric_chart(
self,
Tmin=None,
Tmax=None,
wmin=None,
wmax=None,
main_labels_color=None,
major_grid_style=None,
minor_grid_style=None,
n_h = 15,
n_v = 20,
h_isoline_style=None,
v_isoline_style=None,
rel_hum_isoline_style=None,
Twb_isoline_style=None,
unit_system=None,
**kwargs
):
unit_system = unit_system or self.unit_system
psych = self.property_diagram(x="T", y="omega", saturation=False, unit_system=unit_system, p=self.__p, **kwargs)
# Line Styles
main_labels_color = main_labels_color or 'black'
major_grid_style = major_grid_style or dict(
linestyle='-',
linewidth=0.5,
color=[0.4,0.4,0.4,0.4]
)
minor_grid_style = minor_grid_style or dict(
linestyle='-',
linewidth=0.25,
color=[0.4,0.4,0.4,0.4]
)
h_isoline_style = h_isoline_style or dict(
linestyle='-',
linewidth=0.5,
color=[0.4,0.4,0.4,0.4],
pos=0,
labelprops=dict(
ha='right',
va='center',
pos=0.0
)
)
v_isoline_style = v_isoline_style or dict(
linestyle='-',
linewidth=0.5,
color=[0.4,0.4,0.4,0.4],
labelprops=dict(color='grey',offset=2))
rel_hum_isoline_style = rel_hum_isoline_style or dict(
linestyle='-',
linewidth=0.5,
color=[0.4,0.4,0.4,0.4],
labelprops=dict(
ha='right',
color='grey',
offset=2
)
)
Twb_isoline_style = Twb_isoline_style or dict(
linestyle=(0,(5,10)),
linewidth=0.5,
color=[0.4,0.4,0.4,0.4],
pos=0.2,
labelprops=dict(
ha='left',
color='grey',
offset=2
)
)
# Set Axis limits
if Tmin is None: Tmin = Quantity(30.0,'degF')
Tmin = Tmin.to(psych.x_units)
if Tmax is None: Tmax = Quantity(50.0,'degC')
Tmax = Tmax.to(psych.x_units)
if wmin is None: wmin = Quantity(0.0,'kg_water/kg_dry_air')
wmin = wmin.to(psych.y_units)
if wmax is None: wmax = Quantity(0.03,'kg_water/kg_dry_air')
wmax = wmax.to(psych.y_units)
psych.Tmin,psych.Tmax,psych.wmin,psych.wmax = Tmin,Tmax,wmin,wmax
psych.ax.set_xlim(left=Tmin.magnitude,right=Tmax.magnitude)
psych.ax.set_ylim(bottom=wmin.magnitude,top=wmax.magnitude)
# Set axis labels
x_units_str = f"{self.format_units(f"{psych.x_units}")}"
y_units_str = f"{self.format_units(f"{psych.y_units}")}"
psych.ax.set_xlabel(f"Dry-Bulb Temperature, $T_{{\\mathrm{{db}}}}\\ [\\mathrm{{{x_units_str}}}]$")
psych.ax.set_ylabel(f"Humidity Ratio, $\\omega\\ \\left[\mathrm{{{y_units_str}}}\\right]$")
# Set axis style
psych.ax.yaxis.tick_right()
psych.ax.yaxis.set_label_position("right")
psych.ax.spines["right"].set_visible(True)
psych.ax.spines["left"].set_visible(False)
# Add Plot Title
try:
pressure_str = f'{psych.props.p}'
except:
pressure_str = f'{psych.props.p:~L}'
title = f'Psychrometric Chart\nPressure: $\mathrm{{{pressure_str}}}$'
psych.text((0.05*(Tmax-Tmin)+Tmin).magnitude, (0.9*(wmax-wmin)+wmin).magnitude, title, fontsize=12)
# Draw grid
# Dry-bulb grid
tickscale=1
x_major_ticks = self.rounded_array(Tmin.magnitude,Tmax.magnitude,spacing=5)
x_minor_ticks = self.rounded_array(Tmin.magnitude,Tmax.magnitude,spacing=1)
plt.xticks(x_major_ticks)
ymin = wmin
for i in x_major_ticks:
ymax = min(psych.props.w(T_db=Quantity(i,psych.x_units),rel_hum=1),wmax)
psych.ax.plot([i,i],[ymin.magnitude,ymax.magnitude],**major_grid_style)
for i in x_minor_ticks:
ymax = min(psych.props.w(T_db=Quantity(i,psych.x_units),rel_hum=1),wmax)
psych.ax.plot([i,i],[ymin.magnitude,ymax.magnitude],**minor_grid_style)
# Humidity ratio grid
y_minor_ticks = self.rounded_array(wmin.magnitude,wmax.magnitude,spacing=0.001)
y_major_ticks = self.rounded_array(wmin.magnitude,wmax.magnitude,spacing=0.005)
plt.yticks(y_major_ticks)
xmax = Tmax
for i in y_major_ticks:
xmin=Tmin
try:
phi_left_lim = psych.props.rel_hum(T_db=Tmin,w=Quantity(i,psych.y_units))
except:
xmin = psych.props.T(w=Quantity(i,psych.y_units),rel_hum=1).to(psych.x_units)
psych.ax.plot([xmin.magnitude,xmax.magnitude],[i,i],**major_grid_style)
for i in y_minor_ticks:
xmin=Tmin
try:
phi_left_lim = psych.props.rel_hum(T_db=Tmin,w=Quantity(i,psych.y_units))
except:
xmin = psych.props.T(w=Quantity(i,psych.y_units),rel_hum=1).to(psych.x_units)
psych.ax.plot([xmin.magnitude,xmax.magnitude],[i,i],**minor_grid_style)
# Saturated line
psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=1,label=False,linestyle='-',color='black')
# Relative humidity lines
for i in [0.1]:
lstyle = dict(**rel_hum_isoline_style)
lstyle['labelprops'] = dict(**rel_hum_isoline_style['labelprops'])
lstyle['labelprops']['color'] = main_labels_color
psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=i,label=f'$\phi=10\%$',xcoor=(Tmin+0.95*(Tmax-Tmin)).magnitude,**lstyle)
for i in [0.02,0.04,0.06,0.08,0.15,0.2,0.25,0.3,0.4,0.5,0.6,0.7,0.8,0.9]:
rel_hum = i
xmin,xmax = Tmin,Tmax
if psych.props.w(rel_hum=rel_hum,T=Tmax) > wmax:
xmax = psych.props.T(w=wmax,rel_hum=rel_hum)
psych.plot_iso_line(iso_symb='rel_hum',iso_value=rel_hum,x_range=[xmin,xmax],label=f'{int(i*100)}%',ycoor=(wmin+0.95*(wmax-wmin)).magnitude,**rel_hum_isoline_style)
else:
psych.plot_iso_line(iso_symb='rel_hum',iso_value=rel_hum,x_range=[xmin,xmax],label=f'{int(i*100)}%',xcoor=(Tmin+0.95*(Tmax-Tmin)).magnitude,**rel_hum_isoline_style)
# Enthalpy lines
hmin = psych.props.h(T=Tmin,w=wmin)
hmax = psych.props.h(T=Tmax,w=wmax)
h_units = hmin.units
h_units_str = f"{self.format_units(f"{h_units}")}"
for i in self.rounded_array(hmin.magnitude,hmax.magnitude,15):
h = Quantity(i,h_units)
xmin = max(psych.props.T(h=h,rel_hum=1),Tmin,psych.props.T(h=h,w=wmax))
xmax = min(psych.props.T(h=h,w=wmin),Tmax,psych.props.T(h=h,w=wmin))
try:
psych.plot_iso_line(iso_symb='h',iso_value=h,x_range=[xmin,xmax],label=f'{int(i) if i.is_integer() else i}',**h_isoline_style)
except:
pass
# Enthalpy axis label
psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=1,label=f'Enthalpy, $h$ $\\left[\\mathrm{{{h_units_str}}}\\right]$',linewidth=0,pos=0.5,labelprops=dict(offset=25))
# Specific volume lines
vmin = psych.props.v(T=Tmin,omega=wmin)
vmax = psych.props.v(T=Tmax,omega=wmax)
v_units = vmin.units
v_units_str = f"{self.format_units(f"{v_units}",displaystyle=False)}"
v_list = self.rounded_array(vmin.magnitude,vmax.magnitude,20)
v_main_label_index = int(len(v_list)*0.6)
for i,val in enumerate(v_list):
v = Quantity(val,v_units)
ymax = min(psych.props.w(v=v,rel_hum=1),wmax)
try:
ymin = max(psych.props.w(T=Tmax,v=v),wmin)
except ValueError:
ymin = wmin
v_string = int(val) if val.is_integer() else f'{val:.5}'.rstrip()
if i == v_main_label_index:
lstyle = dict(**v_isoline_style)
lstyle['labelprops'] = dict(**v_isoline_style['labelprops'])
lstyle['labelprops']['color'] = main_labels_color
psych.plot_iso_line(iso_symb='v',iso_value=v,y_range=[ymax,ymin],n_points=10,label=f'$v={v_string}\ \mathrm{{{v_units_str}}}$',pos=0.7,**lstyle)
else:
try:
psych.plot_iso_line(iso_symb='v',iso_value=v,y_range=[ymax,ymin],label=v_string,n_points=10,pos=0.7,**v_isoline_style)
except:
pass
# Wet-bulb Temperature lines
T_units = Tmin.units
T_units_str = f"{self.format_units(f"{T_units}",displaystyle=False)}"
Twb_main_label_index = int(len(x_major_ticks)*0.5)
for i,T in enumerate(x_major_ticks[:-1]):
Twb = Quantity(T,psych.x_units)
ymax = min(psych.props.w(T=Twb,rel_hum=1),wmax)
try:
ymin = max(psych.props.w(T=Tmax,T_wb=Twb),wmin)
except ValueError:
ymin = wmin
if ymin<wmax:
if i == Twb_main_label_index:
lstyle = dict(**Twb_isoline_style)
lstyle['labelprops'] = dict(**Twb_isoline_style['labelprops'])
lstyle['labelprops']['color'] = main_labels_color
psych.plot_iso_line(iso_symb='T_wb',iso_value=Twb,y_range=[ymax,ymin],n_points=10,label=f'$T_\mathrm{{wb}}={int(T)}\mathrm{{{T_units_str}}}$',**lstyle)
else:
psych.plot_iso_line(iso_symb='T_wb',iso_value=Twb,y_range=[ymax,ymin],n_points=10,label=f'${int(T)}\mathrm{{{T_units_str}}}$',**Twb_isoline_style)
return psych
def Ts_diagram(self, unit_system=None, saturation=False, **kwargs):
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="s", y="T", unit_system=unit_system, saturation=saturation, **kwargs
)
def pv_diagram(self, unit_system=None, saturation=None, log_x=None, log_y=None, **kwargs):
if self.fluid == 'Air':
saturation = saturation or False
log_x = log_x or False
log_y = log_y or False
else:
saturation = True
log_x = log_x or True
log_y = log_y or True
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="v", y="p", unit_system=unit_system, saturation=saturation, log_x=log_x, log_y=log_y, **kwargs
)
def Tv_diagram(self, unit_system=None, saturation=None, **kwargs):
if self.fluid == 'Air': saturation = saturation or False
else: saturation = saturation or True
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="v", y="T", unit_system=unit_system, saturation=saturation, **kwargs
)
def hs_diagram(self, unit_system=None, saturation=None, **kwargs):
if self.fluid == 'Air': saturation = saturation or False
else: saturation = saturation or True
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="s", y="h", unit_system=unit_system, saturation=saturation, **kwargs
)
def ph_diagram(self, unit_system=None, saturation=None, **kwargs):
if self.fluid == 'Air': saturation = saturation or False
else: saturation = saturation or True
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="h", y="p", unit_system=unit_system, saturation=saturation, **kwargs
)
def pT_diagram(self, unit_system=None, saturation=None, **kwargs):
if self.fluid == 'Air': saturation = saturation or False
else: saturation = saturation or True
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="T", y="p", unit_system=unit_system, saturation=saturation, **kwargs
)
def LegacyPropertyPlot(
x=None,
y=None,
x_units=None,
y_units=None,
plot_type=None,
fluid=None,
saturation=False,
unit_system="SI_C",
**kwargs,
):
props = Properties(fluid=fluid, unit_system=unit_system, **kwargs)
return PropertyPlot(
x=x,
y=y,
x_units=x_units,
y_units=y_units,
property_table=props,
saturation=saturation,
unit_system=unit_system,
**kwargs,
)
| from .units import Quantity, units
from .common import (
invert_dict,
CP_symbUpper_to_units,
preferred_units_from_type,
preferred_units_from_symbol,
)
from .realfluid import Properties as rfprop
from .plotting import PropertyPlot, plt
import CoolProp
from CoolProp.CoolProp import HAPropsSI,set_reference_state
import numpy as np
import re
from numpy import floor,ceil,log10
import functools
# Default CoolProps units for symbols
CP_HA_units_to_symb = {
'K':['T','B','Twb','T_wb','WetBulb','D','Tdp','DewPoint','T_dp','Tdb','T_db'],
'Pa':['P','P_w'],
'J/kg_dry_air/K':['C','cp','CV','S','Sda','Entropy'],
'J/kg_dry_air/K':['Cw','cpw','CV','S','Sda','Entropy'],
'J/kg_humid_air/K':['Cha','cp_ha','CVha','cv_ha','Sha'],
'J/kg_dry_air':['H','Hda','Enthalpy'],
'J/kg_humid_air':['Hha'],
'J/lb_water':['Hw'],
'W/m/degK':['K','k','Conductivity'],
'Pa*s':['M','Visc','mu'],
'mol_water/mol_humid_air':['psi_w','Y'],
'm^3/kg_dry_air':['V','Vda'],
'm^3/kg_humid_air':['Vha'],
'kg_water/kg_dry_air':['W','Omega','HumRat'],
' ':['R','RH','RelHum','phi']
}
CP_HA_symb_to_units = invert_dict(CP_HA_units_to_symb)
CP_HA_trans_inv = {
'Twb':['B','Twb','T_wb','WetBulb'],
'Tdb':['Tdb','T_db','DryBulb','T'],
'Tdp':['Tdp','D','DewPoint','T_dp'],
'C':['C','cp','Cp','C_p','c_p'],
'Cha':['Cha','C_ha','cha','c_ha'],
'Cv':['Cv','Cv','cv','c_v'],
'Cvha':['Cvha','Cv_ha','cvha','c_v_ha'],
'H':['H','Hda','Enthalpy','h','hda','h_da'],
'Hha':['Hha','h_ha','hha','Enthalpy_Humid_Air'],
'K':['K','k','conductivity','Conductivity'],
'M':['M','Visc','mu','viscosity'],
'Y':['Y','psi_w','mole_fraction','y'],
'P':['P','p','pressure'],
'P_w':['P_w','p_w','partial_pressure_water'],
'R':['R','RelHum','RH','rel_hum','phi'],
'S':['S','s','sda','Sda','s_da','Entropy'],
'Sha':['Sha','s_ha','sha'],
'V':['V','v','v_da','vda'],
'Vha':['Vha','v_ha','vha'],
'W':['W','w','Omega','HumRat','spec_hum','specific_humidity','omega','humidity','absolute_humidity'],
'Z':['Z','compressibility_factor'],
}
CP_HA_trans = invert_dict(CP_HA_trans_inv)
CP_HA_symb_to_local = {
'Twb':'T_wb',
'Tdb':'T_db',
'Tdp':'T_dp',
'C':'Cp',
'Cha':'Cp_ha',
'Cv':'Cv',
'Cvha':'Cv_ha',
'H':'h',
'Hha':'h_ha',
'K':'conductivity',
'M':'viscosity',
'Y':'psi_w',
'P':'p',
'P_w':'p_w',
'R':'rel_hum',
'S':'s',
'Sha':'s_ha',
'V':'v',
'Vha':'v_ha',
'W':'spec_hum',
'Z':'Z'
}
CP_HA_type_to_symb = {
'temperature':['B','Twb','T_wb','WetBulb','Tdb','T_db','DryBulb','T','Tdp','D','DewPoint','T_dp'],
'pressure':['P','p','pressure','P_w','p_w','partial_pressure_water'],
'density':['D','d','rho'],
'dry air specific volume':['V','v','v_da','vda'],
'humid air specific volume':['Vha','v_ha','vha'],
'dry air specific energy':['H','Hda','Enthalpy','h','hda','h_da'],
'humid air specific energy':['Hha','h_ha','hha','Enthalpy_Humid_Air'],
'dry air specific heat':['C','cp','Cp','C_p','c_p','Cv','Cv','cv','c_v'],
'dry air specific entropy':['S','s','sda','Sda','s_da','Entropy'],
'humid air specific heat':['Cha','C_ha','cha','c_ha','Cvha','Cv_ha','cvha','c_v_ha'],
'humid air specific entropy':['Sha','s_ha','sha'],
'conductivity':['K','k','conductivity','Conductivity'],
'viscosity':['M','Visc','mu','viscosity'],
'water mole fraction':['Y','psi_w','y'],
'humidity ratio':['W','Omega','HumRat','spec_hum','specific_humidity','omega','humidity','absolute_humidity'],
'dimensionless':['R','RelHum','RH','rel_hum','phi','Z']
}
CP_HA_symb_to_type = invert_dict(CP_HA_type_to_symb)
def PropertyLookup(
desired,
unit_system=None,
verbose=False,
**kwargs,
):
"""
Each of the follow properties/parameters is expected to be a quantity with units
:param desired: Dependent from two of the following independent properties
:param T: dry-bulb Temperature (Default value = None)
:param T_wb: wet-bulb Temperature (Default value = None)
:param T_dp: dew-point Temperature (Default value = None)
:param p: pressure (Default value = None)
:param p_w: partial pressure of water vapor (Default value = None)
:param w: humidity ratio (Default value = None)
:param v: mixture volume per unit dry air (Default value = None)
:param v_ha: mixture volume per unit humid air (Default value = None)
:param h: mixture enthalpy per unit dry air (Default value = None)
:param h_ha: mixture enthalpy per unit humid air (Default value = None)
:param s: mixture entropy per unit dry air (Default value = None)
:param rel_hum: relative humidity (Default value = None)
:param y: water mole fraction (Default value = None)
:param unit_system: unit system for return value - one of 'SI_C', 'SI_K', 'English_F', 'English_R' (Default value = )
:param verbose: show debug information (Default value = False)
:param **kwargs:
"""
desired = CP_HA_trans[desired]
PropsSI_args =[desired] # add the desired parameter as the first argument to pass to CoolProp.PropsSI
def process_indep_arg(arg, CPSymb):
"""
Add a property symbol and its value to the CoolProp.PropSI argument string
:param arg: value of independent parameter
:param CPSymb: CoolProp symbol
:param exponent: exponent used to invert the value (Default value = 1)
:param AltSymb: symbol to use for inverted values (Default value = None)
"""
if arg is not None:
# if AltSymb: PropsSI_args.append(AltSymb)
# else:
PropsSI_args.append(CPSymb) # Add independent parameter symbol to argument list
if CP_HA_symb_to_units[CPSymb] is not None:
value = float(arg.to(CP_HA_symb_to_units[CPSymb]).magnitude) # Add independent parameter value to argument list with appropriate magnitude and units stripped
elif isinstance(arg,Quantity):
value = float(arg.magnitude)
else:
value = float(arg) # Add independent paramter value directly to argument list if it has no units that need to be adjusted
PropsSI_args.append(value)
for k,v in kwargs.items():
if k in CP_HA_trans.keys():
process_indep_arg(v,CP_HA_trans[k])
def humidity_search(PropsSI_args):
desired = PropsSI_args[0]
for i,v in enumerate(PropsSI_args):
if v == 'P':
P = PropsSI_args[i+1]
elif v == 'R':
R_target = PropsSI_args[i+1]
elif v == 'W':
W = PropsSI_args[i+1]
T = 273.15 # starting guess
T_guess = T
n_steps = 100
search_steps = [5,-5,1,-1,0.1,-0.1,0.01,-0.01]
for step in search_steps:
cont = True
n_step = 0
while cont:
if n_step > 0:
T_guess += step
try:
R = HAPropsSI('R','T',T_guess,'W',W,'P',P)
error = abs(R_target-R)
if step>0:
T = T_guess
if R<R_target:
cont=False
elif step<0 and R<R_target:
T = T_guess
else:
cont=False
except ValueError:
if step<0: cont=False
n_step += 1
if n_step > n_steps: cont=False
if desired == 'Tdb':
return T
else:
return HAPropsSI(desired,'P',P,'W',W,'Tdb',T)
if verbose:
print('Calling: CoolProp.CoolProp.HAPropsSI({})'.format(','.join([str(i) for i in PropsSI_args])))
print(PropsSI_args)
if "R" in PropsSI_args[1:] and "W" in PropsSI_args[1:]:
result = humidity_search(PropsSI_args)
else:
result = HAPropsSI(*PropsSI_args)
# Determine the units of the value as returned from CoolProp
CP_return_units = CP_HA_symb_to_units[desired]
CP_return_type = CP_HA_symb_to_type[desired]
# Determine the preferred units for the value
if unit_system is None:
result_units = preferred_units_from_type(CP_return_type, units.preferred_units)
else:
result_units = preferred_units_from_type(CP_return_type, unit_system)
# Convert the returned value to the preferred units
if result_units is not None:
result = Quantity(result,CP_return_units).to(result_units)
return result
class Properties:
"""
A class to return thermodynamic properties for a real fluid
:param p: pressure (Default value = 1 atm)
:param unit_system: units for return values - one of 'SI_C','SI_K','English_F','English_R' (Default = 'SI_C')
:returns: an object with methods to evaluate real fluid properties
"""
def __init__(self, p=None, unit_system="kSI_C"):
self.fluid='humidair'
if p is None:
self.__p = Quantity(1.0,'atm')
else:
self.__p = p
self.unit_system = unit_system
# legacy definitions/aliases
self.relhum = self.phi = self.rel_hum
self.omega = self.hum_rat = self.humrat = self.w
self.Cp = self.cp
self.Cv = self.cv
self.mu = self.viscosity
self.nu = self.kinematic_viscosity
self.water = rfprop('Water',unit_system=unit_system)
def _lookup(self, desired, **kwargs):
"""
Call PropertyLookup to evaluate the desired property for the indepent properties specified
as keyword arguments
:param desired: desired property
:param **kwargs: any three dimensional quantities of T,T_wb,T_dp,p,p_w,w,v,v_ha,h,h_ha,s,s_ha,rel_hum,mole_fraction,
"""
unit_system = kwargs.pop('unit_system',self.unit_system)
return PropertyLookup(
desired, unit_system=self.unit_system, **kwargs
)
def _update_kwargs(self, args, kwargs, water=False):
"""use argument unit to identify appropriate keyword"""
for arg in args:
if isinstance(arg, Quantity):
try:
arg_symb = arg.property_symbol
arg_dict = {arg_symb:arg}
kwargs = dict(**arg_dict, **kwargs)
except:
try:
arg.to('K') # Temperature
kwargs = dict(T=arg, **kwargs)
except:
try:
arg.to('kPa') # pressure
kwargs = dict(p=arg, **kwargs)
except:
try:
arg.to('m^3/kg') # specific volume
kwargs = dict(v=arg, **kwargs)
except:
try:
arg.to('kJ/kg/K') # entropy
kwargs = dict(s=arg, **kwargs)
except:
try:
arg.to('J/kg_dry_air') # enthalpy
kwargs = dict(h=arg, **kwargs)
except:
try:
arg.to('J/kg_humid_air') # enthalpy humid air
kwargs = dict(h_ha=arg, **kwargs)
except:
try:
arg.to('kg_water/kg_dry_air') # molar density
kwargs = dict(w=arg, **kwargs)
except:
try:
if arg.dimensionless and (0<= arg <= 1): # relative humidity
kwargs = dict(rel_hum=arg, **kwargs)
except:
print(f'Unable to determine property type for {f} based on units')
elif 0<= arg <= 1: # quality
kwargs = dict(rel_hum=arg, **kwargs)
if not water and "p" not in kwargs.keys():
kwargs = dict(p=self.__p, **kwargs)
return kwargs
@property
def p(self):
"""
set or retrieve pressure for humid air
example:
>> humair.p = Quantity(1,'atm')
>> humair.p
'1 atm'
:param pressure: pressure as a dimensional quantity
:returns: pressure as a dimensional quantity
"""
return self.__p
@p.setter
def p(self, pressure):
self.__p = pressure
def T(self, *args, **kwargs):
"""
Dry-bulb Temperature from two independent intensive properties
example:
>> humair.T(rel_hum=rel_hum_2, h=h_1)
:param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: Dry-bulb Temperature as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("T", **kwargs)
def T_wb(self, *args, **kwargs):
"""
Wet-bulb Temperature from two independent intensive properties
example:
>> humair.T_wb(rel_hum=rel_hum_2, h=h_1)
:param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: Wet-bulb Temperature as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("T_wb", **kwargs)
def T_dp(self, *args, **kwargs):
"""
Dew-point Temperature from two independent intensive properties
example:
>> humair.T_dp(rel_hum=rel_hum_2, h=h_1)
:param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: Dew-point Temperature as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("T_dp", **kwargs)
def w(self, *args, **kwargs):
"""
humidity ratio from two independent intensive properties
example:
>> fluid.v(T=T_1, h=h_2)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: humidity ratio as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("w", **kwargs)
def v(self, *args, **kwargs):
"""
mixture volume per unit of dry air from two independent intensive properties
example:
>> fluid.v(T=T_1, h=p_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: specific volume per unit dry air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("v", **kwargs)
def v_ha(self, *args, **kwargs):
"""
mixture volume per unit of humid air from two independent intensive properties
example:
>> fluid.v_ha(T=T_1, h=p_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: specific volume per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("v_ha", **kwargs)
def v_w(self, *args, **kwargs):
"""
specific volume of water per unit of humid water from two independent intensive properties
example:
>> fluid.v_w(T=T_1, x=x_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: specific volume per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs,water=True)
return Quantity(self.water.v(**kwargs).to('m^3/kg').magnitude, 'm^3/kg_water')
def h(self, *args, **kwargs):
"""
enthalpy per unit dry air from two independent intensive properties
example:
>> fluid.h(T=T_1, rel_hum=re1_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: specific enthalpy per unit dry air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("h", **kwargs)
def h_ha(self, *args, **kwargs):
"""
enthalpy per unit humid air from two independent intensive properties
example:
>> fluid.h_ha(T=T_1, rel_hum=re1_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: specific enthalpy per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("h_ha", **kwargs)
def h_w(self, *args, **kwargs):
"""
specific enthalpy of water per unit of humid water from two independent intensive properties
example:
>> fluid.h_w(T=T_1, x=x_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: specific volume per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs,water=True)
return Quantity(self.water.h(**kwargs).to('kJ/kg').magnitude, 'kJ/kg_water')
def s(self, *args, **kwargs):
"""
entropy per unit dry air from two independent intensive properties
example:
>> fluid.s(T=T_1, h=h_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: specific entropy per unit dry air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("s", **kwargs)
def s_ha(self, *args, **kwargs):
"""
entropy per unit humid air from two independent intensive properties
example:
>> fluid.s_ha(T=T_1, h=h_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: specific entropy per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("s_ha", **kwargs)
def s_w(self, *args, **kwargs):
"""
specific entropy of water per unit of humid water from two independent intensive properties
example:
>> fluid.s_w(T=T_1, x=x_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar
:returns: specific volume per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs,water=True)
return Quantity(self.water.s(**kwargs).to('kJ/kg/K').magnitude, 'kJ/kg_water/K')
def rel_hum(self, *args, **kwargs):
"""
relative humidity from two independent intensive properties
example:
>> fluid.rel_hum(T=T_1, h=h_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: relative humidity as a dimensionless quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("rel_hum", **kwargs)
def y(self, *args, **kwargs):
"""
water mole fraction from two independent intensive properties
example:
>> fluid.y(T=T_1, h=h_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar
:returns: water mole fraction as a dimensionless quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("Y", **kwargs)
def cp(self, *args, **kwargs):
"""
specific heat per unit dry air from two independent intensive properties
example:
>> fluid.cp(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: specific heat per unit dry air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("cp", **kwargs)
def cp_ha(self, *args, **kwargs):
"""
specific heat per unit humid air from two independent intensive properties
example:
>> fluid.cp_ha(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: specific heat per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("cp_ha", **kwargs)
def cv(self, *args, **kwargs):
"""
constant volume specific heat per unit dry air from two independent intensive properties
example:
>> fluid.cv(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: constant volume specific heat per unit dry air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("cv", **kwargs)
def cv_ha(self, *args, **kwargs):
"""
constant volume specific heat per unit humid air from two independent intensive properties
example:
>> fluid.cv_ha(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: constant volume specific heat per unit humid air as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("cv_ha", **kwargs)
def conductivity(self, *args, **kwargs):
"""
thermal conductivity from two independent intensive properties
example:
>> fluid.conductivity(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: thermal conductivity as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("k", **kwargs)
def viscosity(self, *args, **kwargs):
"""
dynamic viscosity from two independent intensive properties
example:
>> fluid.viscosity(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: dynamic viscosity as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("mu", **kwargs)
def kinematic_viscosity(self, *args, **kwargs):
"""
dynamic viscosity from two independent intensive properties
example:
>> fluid.kinematic_viscosity(T=T1, p=p1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar
:returns: kinematic viscosity as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("viscosity", **kwargs)/self._lookup("v", **kwargs)
def Z(self, *args, **kwargs):
"""
Compressibility factor
example:
>> fluid.Pr(T=T_1, rel_hum=rel_hum_1)
:param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar,d_molar
:returns: Compressibility factor as a dimensionless quantity
"""
kwargs = self._update_kwargs(args,kwargs)
return self._lookup("Z", **kwargs)
def property_diagram(
self,
x=None,
y=None,
x_units=None,
y_units=None,
saturation=False,
unit_system=None,
**kwargs,
):
unit_system = unit_system or self.unit_system
return PropertyPlot(
x=x,
y=y,
x_units=x_units,
y_units=y_units,
property_table=self,
saturation=saturation,
unit_system=unit_system,
**kwargs,
)
def format_units(self,units,displaystyle=True):
units = re.sub('_water','_w',units)
units = re.sub('_dry_air','_a',units)
units = re.sub('deg',r'^\\circ{}\!',units)
match = re.match('(.*)/(.*)',units)
if match and displaystyle:
units = f'\\frac{{{match.group(1)}}}{{{match.group(2)}}}'
return units
def rounded_array(self,val1,val2,n=20,spacing=None):
if spacing is not None:
spacing_mag = floor(log10(spacing))
start = spacing*10**spacing_mag*round(val1/(spacing*10**spacing_mag))
ret_array = np.arange(start, val2+spacing, spacing)
else:
dir = 1 if val2>val1 else -1
delta = abs(val2-val1)
mag_delta = floor(log10(delta))
spacing = round(delta/n,-int(floor(log10(delta/n))))
spacing_mag = floor(log10(spacing))
spacings={}
lists={}
lengths={}
for i in [1,2,2.5,5,10]:
spacings[i] = dir*i*10**spacing_mag*round(spacing/(i*10**spacing_mag))
spacings[i] = dir*i*10**spacing_mag
start = i*10**spacing_mag*round(val1/(i*10**spacing_mag))
if spacings[i] == 0: spacings[i] = i*10**spacing_mag
lists[i] = np.arange(start,val2+spacings[i],spacings[i])
if lists[i][0] == -0: lists[i][0]=0
lengths[i] = len(lists[i])
kys= list(lengths.keys())
lst = list(lengths.values())
L = lst[min(range(len(lst)), key = lambda i: abs(lst[i]-n))]
K = kys[lst.index(L)]
ret_array = lists[K]
if ret_array[0] == -0: ret_array[0]=0
if ret_array[-1]>val2 or ret_array[-1]<val1: ret_array = ret_array[:-1]
if ret_array[0]<val1 or ret_array[-1]>val2: ret_array = ret_array[1:]
return ret_array
def psychrometric_chart(
self,
Tmin=None,
Tmax=None,
wmin=None,
wmax=None,
main_labels_color=None,
major_grid_style=None,
minor_grid_style=None,
n_h = 15,
n_v = 20,
h_isoline_style=None,
v_isoline_style=None,
rel_hum_isoline_style=None,
Twb_isoline_style=None,
unit_system=None,
redraw=False,
cache=True,
**kwargs
):
if self.cached_psychrometric_chart.cache_info().currsize>0:
show_psych = True
else:
show_psych = False
if redraw or not cache:
self.cached_psychrometric_chart.cache_clear()
psych = self.cached_psychrometric_chart(
Tmin,
Tmax,
wmin,
wmax,
main_labels_color,
major_grid_style,
minor_grid_style,
n_h,
n_v,
h_isoline_style,
v_isoline_style,
rel_hum_isoline_style,
Twb_isoline_style,
unit_system,
**kwargs
)
if show_psych: psych.show()
return psych
@functools.lru_cache()
def cached_psychrometric_chart(
self,
Tmin=None,
Tmax=None,
wmin=None,
wmax=None,
main_labels_color=None,
major_grid_style=None,
minor_grid_style=None,
n_h = 15,
n_v = 20,
h_isoline_style=None,
v_isoline_style=None,
rel_hum_isoline_style=None,
Twb_isoline_style=None,
unit_system=None,
**kwargs
):
unit_system = unit_system or self.unit_system
psych = self.property_diagram(x="T", y="omega", saturation=False, unit_system=unit_system, p=self.__p, **kwargs)
# Line Styles
main_labels_color = main_labels_color or 'black'
major_grid_style = major_grid_style or dict(
linestyle='-',
linewidth=0.5,
color=[0.4,0.4,0.4,0.4]
)
minor_grid_style = minor_grid_style or dict(
linestyle='-',
linewidth=0.25,
color=[0.4,0.4,0.4,0.4]
)
h_isoline_style = h_isoline_style or dict(
linestyle='-',
linewidth=0.5,
color=[0.4,0.4,0.4,0.4],
pos=0,
labelprops=dict(
ha='right',
va='center',
pos=0.0
)
)
v_isoline_style = v_isoline_style or dict(
linestyle='-',
linewidth=0.5,
color=[0.4,0.4,0.4,0.4],
labelprops=dict(color='grey',offset=2))
rel_hum_isoline_style = rel_hum_isoline_style or dict(
linestyle='-',
linewidth=0.5,
color=[0.4,0.4,0.4,0.4],
labelprops=dict(
ha='right',
color='grey',
offset=2
)
)
Twb_isoline_style = Twb_isoline_style or dict(
linestyle=(0,(5,10)),
linewidth=0.5,
color=[0.4,0.4,0.4,0.4],
pos=0.2,
labelprops=dict(
ha='left',
color='grey',
offset=2
)
)
# Set Axis limits
if Tmin is None: Tmin = Quantity(30.0,'degF')
Tmin = Tmin.to(psych.x_units)
if Tmax is None: Tmax = Quantity(50.0,'degC')
Tmax = Tmax.to(psych.x_units)
if wmin is None: wmin = Quantity(0.0,'kg_water/kg_dry_air')
wmin = wmin.to(psych.y_units)
if wmax is None: wmax = Quantity(0.03,'kg_water/kg_dry_air')
wmax = wmax.to(psych.y_units)
psych.Tmin,psych.Tmax,psych.wmin,psych.wmax = Tmin,Tmax,wmin,wmax
psych.ax.set_xlim(left=Tmin.magnitude,right=Tmax.magnitude)
psych.ax.set_ylim(bottom=wmin.magnitude,top=wmax.magnitude)
# Set axis labels
x_units_str = f"{self.format_units(f'{psych.x_units}')}"
y_units_str = f"{self.format_units(f'{psych.y_units}')}"
psych.ax.set_xlabel(f"Dry-Bulb Temperature, $T_{{\\mathrm{{db}}}}\\ [\\mathrm{{{x_units_str}}}]$")
psych.ax.set_ylabel(f"Humidity Ratio, $\\omega\\ \\left[\mathrm{{{y_units_str}}}\\right]$")
# Set axis style
psych.ax.yaxis.tick_right()
psych.ax.yaxis.set_label_position("right")
psych.ax.spines["right"].set_visible(True)
psych.ax.spines["left"].set_visible(False)
# Add Plot Title
try:
pressure_str = f'{psych.props.p}'
except:
pressure_str = f'{psych.props.p:~L}'
title = f'Psychrometric Chart\nPressure: $\mathrm{{{pressure_str}}}$'
psych.text((0.05*(Tmax-Tmin)+Tmin).magnitude, (0.9*(wmax-wmin)+wmin).magnitude, title, fontsize=12)
# Draw grid
# Dry-bulb grid
tickscale=1
x_major_ticks = self.rounded_array(Tmin.magnitude,Tmax.magnitude,spacing=5)
x_minor_ticks = self.rounded_array(Tmin.magnitude,Tmax.magnitude,spacing=1)
plt.xticks(x_major_ticks)
ymin = wmin
for i in x_major_ticks:
ymax = min(psych.props.w(T_db=Quantity(i,psych.x_units),rel_hum=1),wmax)
psych.ax.plot([i,i],[ymin.magnitude,ymax.magnitude],**major_grid_style)
for i in x_minor_ticks:
ymax = min(psych.props.w(T_db=Quantity(i,psych.x_units),rel_hum=1),wmax)
psych.ax.plot([i,i],[ymin.magnitude,ymax.magnitude],**minor_grid_style)
# Humidity ratio grid
y_minor_ticks = self.rounded_array(wmin.magnitude,wmax.magnitude,spacing=0.001)
y_major_ticks = self.rounded_array(wmin.magnitude,wmax.magnitude,spacing=0.005)
plt.yticks(y_major_ticks)
xmax = Tmax
for i in y_major_ticks:
xmin=Tmin
try:
phi_left_lim = psych.props.rel_hum(T_db=Tmin,w=Quantity(i,psych.y_units))
except:
xmin = psych.props.T(w=Quantity(i,psych.y_units),rel_hum=1).to(psych.x_units)
psych.ax.plot([xmin.magnitude,xmax.magnitude],[i,i],**major_grid_style)
for i in y_minor_ticks:
xmin=Tmin
try:
phi_left_lim = psych.props.rel_hum(T_db=Tmin,w=Quantity(i,psych.y_units))
except:
xmin = psych.props.T(w=Quantity(i,psych.y_units),rel_hum=1).to(psych.x_units)
psych.ax.plot([xmin.magnitude,xmax.magnitude],[i,i],**minor_grid_style)
# Saturated line
psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=1,label=False,linestyle='-',color='black')
# Relative humidity lines
for i in [0.1]:
lstyle = dict(**rel_hum_isoline_style)
lstyle['labelprops'] = dict(**rel_hum_isoline_style['labelprops'])
lstyle['labelprops']['color'] = main_labels_color
psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=i,label=f'$\phi=10\%$',xcoor=(Tmin+0.95*(Tmax-Tmin)).magnitude,**lstyle)
for i in [0.02,0.04,0.06,0.08,0.15,0.2,0.25,0.3,0.4,0.5,0.6,0.7,0.8,0.9]:
rel_hum = i
xmin,xmax = Tmin,Tmax
if psych.props.w(rel_hum=rel_hum,T=Tmax) > wmax:
xmax = psych.props.T(w=wmax,rel_hum=rel_hum)
psych.plot_iso_line(iso_symb='rel_hum',iso_value=rel_hum,x_range=[xmin,xmax],label=f'{int(i*100)}%',ycoor=(wmin+0.95*(wmax-wmin)).magnitude,**rel_hum_isoline_style)
else:
psych.plot_iso_line(iso_symb='rel_hum',iso_value=rel_hum,x_range=[xmin,xmax],label=f'{int(i*100)}%',xcoor=(Tmin+0.95*(Tmax-Tmin)).magnitude,**rel_hum_isoline_style)
# Enthalpy lines
hmin = psych.props.h(T=Tmin,w=wmin)
hmax = psych.props.h(T=Tmax,w=wmax)
h_units = hmin.units
h_units_str = f"{self.format_units(f'{h_units}')}"
for i in self.rounded_array(hmin.magnitude,hmax.magnitude,15):
h = Quantity(i,h_units)
xmin = max(psych.props.T(h=h,rel_hum=1),Tmin,psych.props.T(h=h,w=wmax))
xmax = min(psych.props.T(h=h,w=wmin),Tmax,psych.props.T(h=h,w=wmin))
try:
psych.plot_iso_line(iso_symb='h',iso_value=h,x_range=[xmin,xmax],label=f'{int(i) if i.is_integer() else i}',**h_isoline_style)
except:
pass
# Enthalpy axis label
psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=1,label=f'Enthalpy, $h$ $\\left[\\mathrm{{{h_units_str}}}\\right]$',linewidth=0,pos=0.5,labelprops=dict(offset=25))
# Specific volume lines
vmin = psych.props.v(T=Tmin,omega=wmin)
vmax = psych.props.v(T=Tmax,omega=wmax)
v_units = vmin.units
v_units_str = f"{self.format_units(f'{v_units}',displaystyle=False)}"
v_list = self.rounded_array(vmin.magnitude,vmax.magnitude,20)
v_main_label_index = int(len(v_list)*0.6)
for i,val in enumerate(v_list):
v = Quantity(val,v_units)
ymax = min(psych.props.w(v=v,rel_hum=1),wmax)
try:
ymin = max(psych.props.w(T=Tmax,v=v),wmin)
except ValueError:
ymin = wmin
v_string = int(val) if val.is_integer() else f'{val:.5}'.rstrip()
if i == v_main_label_index:
lstyle = dict(**v_isoline_style)
lstyle['labelprops'] = dict(**v_isoline_style['labelprops'])
lstyle['labelprops']['color'] = main_labels_color
psych.plot_iso_line(iso_symb='v',iso_value=v,y_range=[ymax,ymin],n_points=10,label=f'$v={v_string}\ \mathrm{{{v_units_str}}}$',pos=0.7,**lstyle)
else:
try:
psych.plot_iso_line(iso_symb='v',iso_value=v,y_range=[ymax,ymin],label=v_string,n_points=10,pos=0.7,**v_isoline_style)
except:
pass
# Wet-bulb Temperature lines
T_units = Tmin.units
T_units_str = f"{self.format_units(f'{T_units}',displaystyle=False)}"
Twb_main_label_index = int(len(x_major_ticks)*0.5)
for i,T in enumerate(x_major_ticks[:-1]):
Twb = Quantity(T,psych.x_units)
ymax = min(psych.props.w(T=Twb,rel_hum=1),wmax)
try:
ymin = max(psych.props.w(T=Tmax,T_wb=Twb),wmin)
except ValueError:
ymin = wmin
if ymin<wmax:
if i == Twb_main_label_index:
lstyle = dict(**Twb_isoline_style)
lstyle['labelprops'] = dict(**Twb_isoline_style['labelprops'])
lstyle['labelprops']['color'] = main_labels_color
psych.plot_iso_line(iso_symb='T_wb',iso_value=Twb,y_range=[ymax,ymin],n_points=10,label=f'$T_\mathrm{{wb}}={int(T)}\mathrm{{{T_units_str}}}$',**lstyle)
else:
psych.plot_iso_line(iso_symb='T_wb',iso_value=Twb,y_range=[ymax,ymin],n_points=10,label=f'${int(T)}\mathrm{{{T_units_str}}}$',**Twb_isoline_style)
return psych
def Ts_diagram(self, unit_system=None, saturation=False, **kwargs):
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="s", y="T", unit_system=unit_system, saturation=saturation, **kwargs
)
def pv_diagram(self, unit_system=None, saturation=None, log_x=None, log_y=None, **kwargs):
if self.fluid == 'Air':
saturation = saturation or False
log_x = log_x or False
log_y = log_y or False
else:
saturation = True
log_x = log_x or True
log_y = log_y or True
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="v", y="p", unit_system=unit_system, saturation=saturation, log_x=log_x, log_y=log_y, **kwargs
)
def Tv_diagram(self, unit_system=None, saturation=None, **kwargs):
if self.fluid == 'Air': saturation = saturation or False
else: saturation = saturation or True
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="v", y="T", unit_system=unit_system, saturation=saturation, **kwargs
)
def hs_diagram(self, unit_system=None, saturation=None, **kwargs):
if self.fluid == 'Air': saturation = saturation or False
else: saturation = saturation or True
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="s", y="h", unit_system=unit_system, saturation=saturation, **kwargs
)
def ph_diagram(self, unit_system=None, saturation=None, **kwargs):
if self.fluid == 'Air': saturation = saturation or False
else: saturation = saturation or True
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="h", y="p", unit_system=unit_system, saturation=saturation, **kwargs
)
def pT_diagram(self, unit_system=None, saturation=None, **kwargs):
if self.fluid == 'Air': saturation = saturation or False
else: saturation = saturation or True
unit_system = unit_system or self.unit_system
return self.property_diagram(
x="T", y="p", unit_system=unit_system, saturation=saturation, **kwargs
)
def LegacyPropertyPlot(
x=None,
y=None,
x_units=None,
y_units=None,
plot_type=None,
fluid=None,
saturation=False,
unit_system="SI_C",
**kwargs,
):
props = Properties(fluid=fluid, unit_system=unit_system, **kwargs)
return PropertyPlot(
x=x,
y=y,
x_units=x_units,
y_units=y_units,
property_table=props,
saturation=saturation,
unit_system=unit_system,
**kwargs,
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
from collections import defaultdict
from json import dumps as json_dumps
from typing import (
Any,
DefaultDict,
Dict,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
Union,
)
import numpy as np
from pytext.utils import cuda
from pytext.utils.ascii_table import ascii_table, ascii_table_from_dict
RECALL_AT_PRECISION_THRESHOLDS = [0.2, 0.4, 0.6, 0.8, 0.9]
"""
Basic metric classes and functions for single-label prediction problems.
Extending to multi-label support
"""
class LabelPrediction(NamedTuple):
"""
Label predictions of an example.
Attributes:
label_scores: Confidence scores that each label receives.
predicted_label: Index of the predicted label. This is usually the label with
the highest confidence score in label_scores.
expected_label: Index of the true label.
"""
label_scores: List[float]
predicted_label: int
expected_label: int
class LabelListPrediction(NamedTuple):
"""
Label list predictions of an example.
Attributes:
label_scores: Confidence scores that each label receives.
predicted_label: List of indices of the predicted label.
expected_label: List of indices of the true label.
"""
label_scores: List[float]
predicted_label: List[int]
expected_label: List[int]
class PRF1Scores(NamedTuple):
"""
Precision/recall/F1 scores for a collection of predictions.
Attributes:
true_positives: Number of true positives.
false_positives: Number of false positives.
false_negatives: Number of false negatives.
precision: TP / (TP + FP).
recall: TP / (TP + FN).
f1: 2 * TP / (2 * TP + FP + FN).
"""
true_positives: int
false_positives: int
false_negatives: int
precision: float
recall: float
f1: float
class SoftClassificationMetrics(NamedTuple):
"""
Classification scores that are independent of thresholds.
"""
average_precision: float
recall_at_precision: Dict[float, float]
decision_thresh_at_precision: Dict[float, float]
roc_auc: Optional[float]
class MacroPRF1Scores(NamedTuple):
"""
Macro precision/recall/F1 scores (averages across each label).
Attributes:
num_label: Number of distinct labels.
precision: Equally weighted average of precisions for each label.
recall: Equally weighted average of recalls for each label.
f1: Equally weighted average of F1 scores for each label.
"""
num_labels: int
precision: float
recall: float
f1: float
class MacroPRF1Metrics(NamedTuple):
"""
Aggregated metric class for macro precision/recall/F1 scores.
Attributes:
per_label_scores: Mapping from label string to the corresponding
precision/recall/F1 scores.
macro_scores: Macro precision/recall/F1 scores across the labels in
`per_label_scores`.
"""
per_label_scores: Dict[str, PRF1Scores]
macro_scores: MacroPRF1Scores
def print_metrics(self, indentation="") -> None:
print(
ascii_table(
[
{
"label": label,
"precision": f"{metrics.precision:.2f}",
"recall": f"{metrics.recall:.2f}",
"f1": f"{metrics.f1:.2f}",
"support": metrics.true_positives + metrics.false_negatives,
}
for label, metrics in sorted(self.per_label_scores.items())
],
human_column_names={
"label": "Label",
"precision": "Precision",
"recall": "Recall",
"f1": "F1",
"support": "Support",
},
footer={
"label": "Overall macro scores",
"precision": f"{self.macro_scores.precision:.2f}",
"recall": f"{self.macro_scores.recall:.2f}",
"f1": f"{self.macro_scores.f1:.2f}",
},
alignments={"label": "<"},
indentation=indentation,
)
)
class PRF1Metrics(NamedTuple):
"""
Metric class for all types of precision/recall/F1 scores.
Attributes:
per_label_scores: Map from label string to the corresponding precision/recall/F1
scores.
macro_scores: Macro precision/recall/F1 scores across the labels in
`per_label_scores`.
micro_scores: Micro (regular) precision/recall/F1 scores for the same
collection of predictions.
"""
per_label_scores: Dict[str, PRF1Scores]
macro_scores: MacroPRF1Scores
micro_scores: PRF1Scores
def print_metrics(self) -> None:
res = (
f"\t{"Per label scores":<40}"
f"\t{"Precision":<10}"
f"\t{"Recall":<10}"
f"\t{"F1":<10}"
f"\t{"Support":<10}\n\n"
)
for label, label_metrics in self.per_label_scores.items():
support = label_metrics.true_positives + label_metrics.false_negatives
res += (
f"\t{label:<40}"
f"\t{label_metrics.precision * 100:<10.3f}"
f"\t{label_metrics.recall * 100:<10.3f}"
f"\t{label_metrics.f1 * 100:<10.3f}"
f"\t{support:<10}\n"
)
support = self.micro_scores.true_positives + self.micro_scores.false_negatives
res += (
f"\n\t{"Overall micro scores":<40}"
f"\t{self.micro_scores.precision * 100:<10.3f}"
f"\t{self.micro_scores.recall * 100:<10.3f}"
f"\t{self.micro_scores.f1 * 100:<10.3f}"
f"\t{support:<10}\n"
)
res += (
f"\t{"Overall macro scores":<40}"
f"\t{self.macro_scores.precision * 100:<10.3f}"
f"\t{self.macro_scores.recall * 100:<10.3f}"
f"\t{self.macro_scores.f1 * 100:<10.3f}\n"
)
print(res)
class ClassificationMetrics(NamedTuple):
"""
Metric class for various classification metrics.
Attributes:
accuracy: Overall accuracy of predictions.
macro_prf1_metrics: Macro precision/recall/F1 scores.
per_label_soft_scores: Per label soft metrics.
mcc: Matthews correlation coefficient.
roc_auc: Area under the Receiver Operating Characteristic curve.
loss: Training loss (only used for selecting best model, no need to print).
"""
accuracy: float
macro_prf1_metrics: MacroPRF1Metrics
per_label_soft_scores: Optional[Dict[str, SoftClassificationMetrics]]
mcc: Optional[float]
roc_auc: Optional[float]
loss: float
def print_metrics(self, report_pep=False) -> None:
print(f"Accuracy: {self.accuracy * 100:.2f}")
print("\nSoft Metrics:")
if self.per_label_soft_scores:
soft_scores = [
{
"label": label,
"avg_pr": f"{metrics.average_precision:.3f}",
"roc_auc": f"{(metrics.roc_auc or 0.0):.3f}",
}
for label, metrics in sorted(self.per_label_soft_scores.items())
]
columns = {
"label": "Label",
"avg_pr": "Average precision",
"roc_auc": "ROC AUC",
}
print(ascii_table(soft_scores, columns))
all_thresholds = set(
itertools.chain.from_iterable(
metrics.recall_at_precision
for metrics in self.per_label_soft_scores.values()
)
)
print("\nRecall at Precision")
print(
ascii_table(
(
dict(
{"label": label},
**{
str(p): f"{r:.3f}"
for p, r in metrics.recall_at_precision.items()
},
)
for label, metrics in sorted(self.per_label_soft_scores.items())
),
dict(
{"label": "Label"},
**{str(t): f"R@P {t}" for t in all_thresholds},
),
alignments={"label": "<"},
)
)
if self.mcc:
print(f"\nMatthews correlation coefficient: {self.mcc :.3f}")
if self.roc_auc:
print(f"\nROC AUC: {self.roc_auc:.3f}")
if report_pep:
self.print_pep()
def print_pep(self):
metrics = {"Accuracy": f"{self.accuracy * 100:.2f}"}
if self.roc_auc:
metrics["ROC AUC"] = f"{self.roc_auc :.3f}"
for key, value in metrics.items():
info = {"type": "NET", "metric": key, "unit": "None", "value": value}
print("PyTorchObserver " + json_dumps(info))
class Confusions:
"""
Confusion information for a collection of predictions.
Attributes:
TP: Number of true positives.
FP: Number of false positives.
FN: Number of false negatives.
"""
__slots__ = "TP", "FP", "FN"
def __init__(self, TP: int = 0, FP: int = 0, FN: int = 0) -> None:
self.TP: int = TP
self.FP: int = FP
self.FN: int = FN
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Confusions):
return NotImplemented
return self.TP == other.TP and self.FP == other.FP and self.FN == other.FN
def __add__(self, other: "Confusions") -> "Confusions":
return Confusions(
TP=self.TP + other.TP, FP=self.FP + other.FP, FN=self.FN + other.FN
)
def __iadd__(self, other: "Confusions") -> "Confusions":
self.TP += other.TP
self.FP += other.FP
self.FN += other.FN
return self
def _asdict(self) -> Dict:
return {"TP": self.TP, "FP": self.FP, "FN": self.FN}
def compute_metrics(self) -> PRF1Scores:
precision, recall, f1 = compute_prf1(self.TP, self.FP, self.FN)
return PRF1Scores(
true_positives=self.TP,
false_positives=self.FP,
false_negatives=self.FN,
precision=precision,
recall=recall,
f1=f1,
)
class PerLabelConfusions:
"""
Per label confusion information.
Attributes:
label_confusions_map: Map from label string to the corresponding confusion
counts.
"""
__slots__ = "label_confusions_map"
def __init__(self) -> None:
self.label_confusions_map: DefaultDict[str, Confusions] = defaultdict(
Confusions
)
def update(self, label: str, item: str, count: int) -> None:
"""
Increase one of TP, FP or FN count for a label by certain amount.
Args:
label: Label to be modified.
item: Type of count to be modified, should be one of "TP", "FP" or "FN".
count: Amount to be added to the count.
Returns:
None
"""
confusions = self.label_confusions_map[label]
setattr(confusions, item, getattr(confusions, item) + count)
def compute_metrics(self) -> MacroPRF1Metrics:
per_label_scores: Dict[str, PRF1Scores] = {}
precision_sum, recall_sum, f1_sum = 0.0, 0.0, 0.0
for label, confusions in sorted(self.label_confusions_map.items()):
scores = confusions.compute_metrics()
per_label_scores[label] = scores
if confusions.TP + confusions.FN > 0:
precision_sum += scores.precision
recall_sum += scores.recall
f1_sum += scores.f1
num_labels = len(self.label_confusions_map)
return MacroPRF1Metrics(
per_label_scores=per_label_scores,
macro_scores=MacroPRF1Scores(
num_labels=num_labels,
precision=safe_division(precision_sum, num_labels),
recall=safe_division(recall_sum, num_labels),
f1=safe_division(f1_sum, num_labels),
),
)
class AllConfusions:
"""
Aggregated class for per label confusions.
Attributes:
per_label_confusions: Per label confusion information.
confusions: Overall TP, FP and FN counts across the labels in
`per_label_confusions`.
"""
__slots__ = "per_label_confusions", "confusions"
def __init__(self) -> None:
self.per_label_confusions = PerLabelConfusions()
self.confusions = Confusions()
def compute_metrics(self) -> PRF1Metrics:
per_label_metrics = self.per_label_confusions.compute_metrics()
return PRF1Metrics(
per_label_scores=per_label_metrics.per_label_scores,
macro_scores=per_label_metrics.macro_scores,
micro_scores=self.confusions.compute_metrics(),
)
class PairwiseRankingMetrics(NamedTuple):
"""
Metric class for pairwise ranking
Attributes:
num_examples (int): number of samples
accuracy (float): how many times did we rank in the correct order
average_score_difference (float): average score(higherRank) - score(lowerRank)
"""
num_examples: int
accuracy: float
average_score_difference: float
def print_metrics(self) -> None:
print(f"RankingAccuracy: {self.accuracy * 100:.2f}")
print(f"AvgScoreDiff: {self.average_score_difference}")
print(f"NumExamples: {self.num_examples}")
class RegressionMetrics(NamedTuple):
"""
Metrics for regression tasks.
Attributes:
num_examples (int): number of examples
pearson_correlation (float): correlation between predictions and labels
mse (float): mean-squared error between predictions and labels
"""
num_examples: int
pearson_correlation: float
mse: float
def print_metrics(self):
print(f"Num examples: {self.num_examples}")
print(f"Pearson correlation: {self.pearson_correlation:.3f}")
print(f"Mean squared error: {self.mse:.3f}")
class RealtimeMetrics(NamedTuple):
"""
Realtime Metrics for tracking training progress and performance.
Attributes:
samples (int): number of samples
tps (float): tokens per second
ups (float): updates per second
"""
samples: int
tps: float
ups: float
def _format(self, key, value):
if key in ("tps", "ups"):
return round(value)
return value
def __str__(self):
metrics = {"num_gpus": cuda.DISTRIBUTED_WORLD_SIZE}
for key, value in self._asdict().items():
if not value:
continue
metrics[key] = self._format(key, value)
return str(metrics)
def safe_division(n: Union[int, float], d: int) -> float:
return float(n) / d if d else 0.0
def compute_prf1(tp: int, fp: int, fn: int) -> Tuple[float, float, float]:
precision = safe_division(tp, tp + fp)
recall = safe_division(tp, tp + fn)
f1 = safe_division(2 * tp, 2 * tp + fp + fn)
return (precision, recall, f1)
def average_precision_score(
y_true_sorted: np.ndarray, y_score_sorted: np.ndarray
) -> float:
"""
Computes average precision, which summarizes the precision-recall curve as the
precisions achieved at each threshold weighted by the increase in recall since the
previous threshold.
Args:
y_true_sorted: Numpy array sorted according to decreasing confidence scores
indicating whether each prediction is correct.
y_score_sorted Numpy array of confidence scores for the predictions in
decreasing order.
Returns:
Average precision score.
TODO: This is too slow, improve the performance
"""
ap = 0.0
tp = 0
threshold = y_score_sorted[0]
y_score_sorted = np.append(y_score_sorted[1:], np.NAN)
total_positive = np.sum(y_true_sorted)
added_positives = 0
for k, (label, score) in enumerate(zip(y_true_sorted, y_score_sorted)):
if label:
added_positives += 1
if score != threshold:
threshold = score
recall_diff = added_positives / total_positive
tp += added_positives
added_positives = 0
p_at_tresh = tp / (k + 1)
ap += p_at_tresh * recall_diff
return float(ap)
def sort_by_score(y_true_list: Sequence[bool], y_score_list: Sequence[float]):
y_true = np.array(y_true_list)
y_score = np.array(y_score_list)
sort_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_true = y_true[sort_indices]
y_score = y_score[sort_indices]
return y_true, y_score
def recall_at_precision(
y_true_sorted: np.ndarray, y_score_sorted: np.ndarray, thresholds: Sequence[float]
) -> Dict[float, float]:
"""
Computes recall at various precision levels
Args:
y_true_sorted: Numpy array sorted according to decreasing confidence scores
indicating whether each prediction is correct.
y_score_sorted: Numpy array of confidence scores for the predictions in
decreasing order.
thresholds: Sequence of floats indicating the requested precision thresholds
Returns:
Dictionary of maximum recall at requested precision thresholds.
"""
y_score_shift = np.append(y_score_sorted[1:], np.nan)
score_change = (y_score_sorted - y_score_shift) != 0
cum_sum = np.cumsum(y_true_sorted)
recall_at_precision_dict = {t: 0.0 for t in thresholds}
decision_thresh_at_precision_dict = {t: 0.0 for t in thresholds}
sum_y_true = y_true_sorted.sum()
if sum_y_true == 0:
return recall_at_precision_dict, decision_thresh_at_precision_dict
recall = cum_sum / sum_y_true
precision = cum_sum / np.array(range(1, len(y_true_sorted) + 1))
for threshold in thresholds:
meets_requirements = np.logical_and(precision >= threshold, score_change)
if not np.any(meets_requirements):
continue
recall_at_precision_dict[threshold] = float(
max(np.extract(meets_requirements, recall))
)
decision_thresh_at_precision_dict[threshold] = float(
min(np.extract(meets_requirements, y_score_sorted))
)
return recall_at_precision_dict, decision_thresh_at_precision_dict
def compute_soft_metrics(
predictions: Sequence[LabelPrediction],
label_names: Sequence[str],
recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,
) -> Dict[str, SoftClassificationMetrics]:
"""
Computes soft classification metrics (for now, average precision) given a list of
label predictions.
Args:
predictions: Label predictions, including the confidence score for each label.
label_names: Indexed label names.
recall_at_precision_thresholds: precision thresholds at which to calculate
recall
Returns:
Dict from label strings to their corresponding soft metrics.
"""
soft_metrics = {}
for i, label_name in enumerate(label_names):
y_true = []
y_score = []
for label_scores, _, expected in predictions:
y_true.append(expected == i)
y_score.append(label_scores[i])
y_true_sorted, y_score_sorted = sort_by_score(y_true, y_score)
ap = average_precision_score(y_true_sorted, y_score_sorted)
recall_at_precision_dict, decision_thresh_at_precision = recall_at_precision(
y_true_sorted, y_score_sorted, recall_at_precision_thresholds
)
roc_auc = compute_roc_auc(predictions, target_class=i)
soft_metrics[label_name] = SoftClassificationMetrics(
average_precision=ap,
recall_at_precision=recall_at_precision_dict,
decision_thresh_at_precision=decision_thresh_at_precision,
roc_auc=roc_auc,
)
return soft_metrics
def compute_multi_label_soft_metrics(
predictions: Sequence[LabelListPrediction],
label_names: Sequence[str],
recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,
) -> Dict[str, SoftClassificationMetrics]:
"""
Computes multi-label soft classification metrics
(for now, average precision)
Args:
predictions: multi-label predictions,
including the confidence score for each label.
label_names: Indexed label names.
recall_at_precision_thresholds: precision thresholds at which to calculate
recall
Returns:
Dict from label strings to their corresponding soft metrics.
"""
soft_metrics = {}
for i, label_name in enumerate(label_names):
y_true = []
y_score = []
for label_scores, _, expected in predictions:
y_true.append(i in expected)
y_score.append(label_scores[i])
y_true_sorted, y_score_sorted = sort_by_score(y_true, y_score)
ap = average_precision_score(y_true_sorted, y_score_sorted)
recall_at_precision_dict, decision_thresh_at_precision = recall_at_precision(
y_true_sorted, y_score_sorted, recall_at_precision_thresholds
)
roc_auc = compute_roc_auc(predictions, target_class=i)
soft_metrics[label_name] = SoftClassificationMetrics(
average_precision=ap,
recall_at_precision=recall_at_precision_dict,
decision_thresh_at_precision=decision_thresh_at_precision,
roc_auc=roc_auc,
)
return soft_metrics
def compute_matthews_correlation_coefficients(
TP: int, FP: int, FN: int, TN: int
) -> float:
"""
Computes Matthews correlation coefficient, a way to summarize all four counts (TP,
FP, FN, TN) in the confusion matrix of binary classification.
Args:
TP: Number of true positives.
FP: Number of false positives.
FN: Number of false negatives.
TN: Number of true negatives.
Returns:
Matthews correlation coefficient, which is `sqrt((TP + FP) * (TP + FN) *
(TN + FP) * (TN + FN))`.
"""
mcc = safe_division(
(TP * TN) - (FP * FN),
np.sqrt(float((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))),
)
return mcc
def compute_roc_auc(
predictions: Sequence[LabelPrediction], target_class: int = 0
) -> Optional[float]:
"""
Computes area under the Receiver Operating Characteristic curve, for binary
classification. Implementation based off of (and explained at)
https://www.ibm.com/developerworks/community/blogs/jfp/entry/Fast_Computation_of_AUC_ROC_score?lang=en.
"""
# Collect scores
y_true = [expected == target_class for _, _, expected in predictions]
y_score = [label_scores[target_class] for label_scores, _, _ in predictions]
y_true_sorted, _ = sort_by_score(y_true, y_score)
# Compute auc as probability that a positive example is scored higher than
# a negative example.
n_false = 0
n_correct_pair_order = 0
for y in reversed(y_true_sorted): # want low predicted to high predicted
if y:
n_correct_pair_order += n_false
else:
n_false += 1
n_true = len(y_true) - n_false
if n_true == 0 or n_false == 0:
return None
return float(n_correct_pair_order / (n_true * n_false))
def compute_classification_metrics(
predictions: Sequence[LabelPrediction],
label_names: Sequence[str],
loss: float,
average_precisions: bool = True,
recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,
) -> ClassificationMetrics:
"""
A general function that computes classification metrics given a list of label
predictions.
Args:
predictions: Label predictions, including the confidence score for each label.
label_names: Indexed label names.
average_precisions: Whether to compute average precisions for labels or not.
Defaults to True.
recall_at_precision_thresholds: precision thresholds at which to calculate recall
Returns:
ClassificationMetrics which contains various classification metrics.
"""
num_correct = 0
per_label_confusions = PerLabelConfusions()
for _, predicted, expected in predictions:
predicted_label = label_names[predicted]
expected_label = label_names[expected]
if predicted_label == expected_label:
num_correct += 1
per_label_confusions.update(expected_label, "TP", 1)
else:
per_label_confusions.update(expected_label, "FN", 1)
per_label_confusions.update(predicted_label, "FP", 1)
accuracy = safe_division(num_correct, len(predictions))
macro_prf1_metrics = per_label_confusions.compute_metrics()
soft_metrics = (
compute_soft_metrics(predictions, label_names, recall_at_precision_thresholds)
if average_precisions
else None
)
if len(label_names) == 2:
confusion_dict = per_label_confusions.label_confusions_map
# Since MCC is symmetric, it doesn't matter which label is 0 and which is 1
TP = confusion_dict[label_names[0]].TP
FP = confusion_dict[label_names[0]].FP
FN = confusion_dict[label_names[0]].FN
TN = confusion_dict[label_names[1]].TP
mcc: Optional[float] = compute_matthews_correlation_coefficients(TP, FP, FN, TN)
roc_auc: Optional[float] = compute_roc_auc(predictions)
else:
mcc = None
roc_auc = None
return ClassificationMetrics(
accuracy=accuracy,
macro_prf1_metrics=macro_prf1_metrics,
per_label_soft_scores=soft_metrics,
mcc=mcc,
roc_auc=roc_auc,
loss=loss,
)
def compute_multi_label_classification_metrics(
predictions: Sequence[LabelListPrediction],
label_names: Sequence[str],
loss: float,
average_precisions: bool = True,
recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,
) -> ClassificationMetrics:
"""
A general function that computes classification metrics given a list of multi-label
predictions.
Args:
predictions: multi-label predictions,
including the confidence score for each label.
label_names: Indexed label names.
average_precisions: Whether to compute average precisions for labels or not.
Defaults to True.
recall_at_precision_thresholds: precision thresholds at which
to calculate recall
Returns:
ClassificationMetrics which contains various classification metrics.
"""
num_correct = 0
num_expected_labels = 0
per_label_confusions = PerLabelConfusions()
for _, predicted, expected in predictions:
# "predicted" is in the format of n_hot_encoding
# Calculate TP & FN
for true_label_idx in expected:
if true_label_idx < 0:
# padded label "-1"
break
num_expected_labels += 1
expected_label = label_names[true_label_idx]
if predicted[true_label_idx] == 1:
num_correct += 1
per_label_confusions.update(expected_label, "TP", 1)
else:
per_label_confusions.update(expected_label, "FN", 1)
# Calculate FP
for idx, pred in enumerate(predicted):
if pred == 1 and idx not in expected:
predicted_label = label_names[idx]
per_label_confusions.update(predicted_label, "FP", 1)
accuracy = safe_division(num_correct, num_expected_labels)
macro_prf1_metrics = per_label_confusions.compute_metrics()
soft_metrics = (
compute_multi_label_soft_metrics(
predictions, label_names, recall_at_precision_thresholds
)
if average_precisions
else None
)
if len(label_names) == 2:
confusion_dict = per_label_confusions.label_confusions_map
# Since MCC is symmetric, it doesn't matter which label is 0 and which is 1
TP = confusion_dict[label_names[0]].TP
FP = confusion_dict[label_names[0]].FP
FN = confusion_dict[label_names[0]].FN
TN = confusion_dict[label_names[1]].TP
mcc: Optional[float] = compute_matthews_correlation_coefficients(TP, FP, FN, TN)
roc_auc: Optional[float] = compute_roc_auc(predictions)
else:
mcc = None
roc_auc = None
return ClassificationMetrics(
accuracy=accuracy,
macro_prf1_metrics=macro_prf1_metrics,
per_label_soft_scores=soft_metrics,
mcc=mcc,
roc_auc=roc_auc,
loss=loss,
)
def compute_pairwise_ranking_metrics(
predictions: Sequence[int], scores: Sequence[float]
) -> PairwiseRankingMetrics:
"""
Computes metrics for pairwise ranking given sequences of predictions and scores
Args:
predictions : 1 if ranking was correct, 0 if ranking was incorrect
scores : score(higher-ranked-sample) - score(lower-ranked-sample)
Returns:
PairwiseRankingMetrics object
"""
return PairwiseRankingMetrics(
num_examples=len(predictions),
accuracy=safe_division(sum(predictions), len(predictions)),
average_score_difference=safe_division(sum(scores), len(predictions)),
)
def compute_regression_metrics(
predictions: Sequence[float], targets: Sequence[float]
) -> RegressionMetrics:
"""
Computes metrics for regression tasks.abs
Args:
predictions: 1-D sequence of float predictions
targets: 1-D sequence of float labels
Returns:
RegressionMetrics object
"""
preds, targs = np.array(predictions), np.array(targets)
pred_mean, targ_mean = preds.mean(), targs.mean()
covariance = (preds - pred_mean).dot(targs - targ_mean) / preds.size
corr = covariance / preds.std() / targs.std()
mse = np.square(preds - targs).mean()
return RegressionMetrics(num_examples=len(preds), pearson_correlation=corr, mse=mse)
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
from collections import defaultdict
from json import dumps as json_dumps
from typing import (
Any,
DefaultDict,
Dict,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
Union,
)
import numpy as np
from pytext.utils import cuda
from pytext.utils.ascii_table import ascii_table, ascii_table_from_dict
RECALL_AT_PRECISION_THRESHOLDS = [0.2, 0.4, 0.6, 0.8, 0.9]
"""
Basic metric classes and functions for single-label prediction problems.
Extending to multi-label support
"""
class LabelPrediction(NamedTuple):
"""
Label predictions of an example.
Attributes:
label_scores: Confidence scores that each label receives.
predicted_label: Index of the predicted label. This is usually the label with
the highest confidence score in label_scores.
expected_label: Index of the true label.
"""
label_scores: List[float]
predicted_label: int
expected_label: int
class LabelListPrediction(NamedTuple):
"""
Label list predictions of an example.
Attributes:
label_scores: Confidence scores that each label receives.
predicted_label: List of indices of the predicted label.
expected_label: List of indices of the true label.
"""
label_scores: List[float]
predicted_label: List[int]
expected_label: List[int]
class PRF1Scores(NamedTuple):
"""
Precision/recall/F1 scores for a collection of predictions.
Attributes:
true_positives: Number of true positives.
false_positives: Number of false positives.
false_negatives: Number of false negatives.
precision: TP / (TP + FP).
recall: TP / (TP + FN).
f1: 2 * TP / (2 * TP + FP + FN).
"""
true_positives: int
false_positives: int
false_negatives: int
precision: float
recall: float
f1: float
class SoftClassificationMetrics(NamedTuple):
"""
Classification scores that are independent of thresholds.
"""
average_precision: float
recall_at_precision: Dict[float, float]
decision_thresh_at_precision: Dict[float, float]
roc_auc: Optional[float]
class MacroPRF1Scores(NamedTuple):
"""
Macro precision/recall/F1 scores (averages across each label).
Attributes:
num_label: Number of distinct labels.
precision: Equally weighted average of precisions for each label.
recall: Equally weighted average of recalls for each label.
f1: Equally weighted average of F1 scores for each label.
"""
num_labels: int
precision: float
recall: float
f1: float
class MacroPRF1Metrics(NamedTuple):
"""
Aggregated metric class for macro precision/recall/F1 scores.
Attributes:
per_label_scores: Mapping from label string to the corresponding
precision/recall/F1 scores.
macro_scores: Macro precision/recall/F1 scores across the labels in
`per_label_scores`.
"""
per_label_scores: Dict[str, PRF1Scores]
macro_scores: MacroPRF1Scores
def print_metrics(self, indentation="") -> None:
print(
ascii_table(
[
{
"label": label,
"precision": f"{metrics.precision:.2f}",
"recall": f"{metrics.recall:.2f}",
"f1": f"{metrics.f1:.2f}",
"support": metrics.true_positives + metrics.false_negatives,
}
for label, metrics in sorted(self.per_label_scores.items())
],
human_column_names={
"label": "Label",
"precision": "Precision",
"recall": "Recall",
"f1": "F1",
"support": "Support",
},
footer={
"label": "Overall macro scores",
"precision": f"{self.macro_scores.precision:.2f}",
"recall": f"{self.macro_scores.recall:.2f}",
"f1": f"{self.macro_scores.f1:.2f}",
},
alignments={"label": "<"},
indentation=indentation,
)
)
class PRF1Metrics(NamedTuple):
"""
Metric class for all types of precision/recall/F1 scores.
Attributes:
per_label_scores: Map from label string to the corresponding precision/recall/F1
scores.
macro_scores: Macro precision/recall/F1 scores across the labels in
`per_label_scores`.
micro_scores: Micro (regular) precision/recall/F1 scores for the same
collection of predictions.
"""
per_label_scores: Dict[str, PRF1Scores]
macro_scores: MacroPRF1Scores
micro_scores: PRF1Scores
def print_metrics(self) -> None:
res = (
f"\t{'Per label scores':<40}"
f"\t{'Precision':<10}"
f"\t{'Recall':<10}"
f"\t{'F1':<10}"
f"\t{'Support':<10}\n\n"
)
for label, label_metrics in self.per_label_scores.items():
support = label_metrics.true_positives + label_metrics.false_negatives
res += (
f"\t{label:<40}"
f"\t{label_metrics.precision * 100:<10.3f}"
f"\t{label_metrics.recall * 100:<10.3f}"
f"\t{label_metrics.f1 * 100:<10.3f}"
f"\t{support:<10}\n"
)
support = self.micro_scores.true_positives + self.micro_scores.false_negatives
res += (
f"\n\t{'Overall micro scores':<40}"
f"\t{self.micro_scores.precision * 100:<10.3f}"
f"\t{self.micro_scores.recall * 100:<10.3f}"
f"\t{self.micro_scores.f1 * 100:<10.3f}"
f"\t{support:<10}\n"
)
res += (
f"\t{'Overall macro scores':<40}"
f"\t{self.macro_scores.precision * 100:<10.3f}"
f"\t{self.macro_scores.recall * 100:<10.3f}"
f"\t{self.macro_scores.f1 * 100:<10.3f}\n"
)
print(res)
class ClassificationMetrics(NamedTuple):
"""
Metric class for various classification metrics.
Attributes:
accuracy: Overall accuracy of predictions.
macro_prf1_metrics: Macro precision/recall/F1 scores.
per_label_soft_scores: Per label soft metrics.
mcc: Matthews correlation coefficient.
roc_auc: Area under the Receiver Operating Characteristic curve.
loss: Training loss (only used for selecting best model, no need to print).
"""
accuracy: float
macro_prf1_metrics: MacroPRF1Metrics
per_label_soft_scores: Optional[Dict[str, SoftClassificationMetrics]]
mcc: Optional[float]
roc_auc: Optional[float]
loss: float
def print_metrics(self, report_pep=False) -> None:
print(f"Accuracy: {self.accuracy * 100:.2f}")
print("\nSoft Metrics:")
if self.per_label_soft_scores:
soft_scores = [
{
"label": label,
"avg_pr": f"{metrics.average_precision:.3f}",
"roc_auc": f"{(metrics.roc_auc or 0.0):.3f}",
}
for label, metrics in sorted(self.per_label_soft_scores.items())
]
columns = {
"label": "Label",
"avg_pr": "Average precision",
"roc_auc": "ROC AUC",
}
print(ascii_table(soft_scores, columns))
all_thresholds = set(
itertools.chain.from_iterable(
metrics.recall_at_precision
for metrics in self.per_label_soft_scores.values()
)
)
print("\nRecall at Precision")
print(
ascii_table(
(
dict(
{"label": label},
**{
str(p): f"{r:.3f}"
for p, r in metrics.recall_at_precision.items()
},
)
for label, metrics in sorted(self.per_label_soft_scores.items())
),
dict(
{"label": "Label"},
**{str(t): f"R@P {t}" for t in all_thresholds},
),
alignments={"label": "<"},
)
)
if self.mcc:
print(f"\nMatthews correlation coefficient: {self.mcc :.3f}")
if self.roc_auc:
print(f"\nROC AUC: {self.roc_auc:.3f}")
if report_pep:
self.print_pep()
def print_pep(self):
metrics = {"Accuracy": f"{self.accuracy * 100:.2f}"}
if self.roc_auc:
metrics["ROC AUC"] = f"{self.roc_auc :.3f}"
for key, value in metrics.items():
info = {"type": "NET", "metric": key, "unit": "None", "value": value}
print("PyTorchObserver " + json_dumps(info))
class Confusions:
"""
Confusion information for a collection of predictions.
Attributes:
TP: Number of true positives.
FP: Number of false positives.
FN: Number of false negatives.
"""
__slots__ = "TP", "FP", "FN"
def __init__(self, TP: int = 0, FP: int = 0, FN: int = 0) -> None:
self.TP: int = TP
self.FP: int = FP
self.FN: int = FN
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Confusions):
return NotImplemented
return self.TP == other.TP and self.FP == other.FP and self.FN == other.FN
def __add__(self, other: "Confusions") -> "Confusions":
return Confusions(
TP=self.TP + other.TP, FP=self.FP + other.FP, FN=self.FN + other.FN
)
def __iadd__(self, other: "Confusions") -> "Confusions":
self.TP += other.TP
self.FP += other.FP
self.FN += other.FN
return self
def _asdict(self) -> Dict:
return {"TP": self.TP, "FP": self.FP, "FN": self.FN}
def compute_metrics(self) -> PRF1Scores:
precision, recall, f1 = compute_prf1(self.TP, self.FP, self.FN)
return PRF1Scores(
true_positives=self.TP,
false_positives=self.FP,
false_negatives=self.FN,
precision=precision,
recall=recall,
f1=f1,
)
class PerLabelConfusions:
"""
Per label confusion information.
Attributes:
label_confusions_map: Map from label string to the corresponding confusion
counts.
"""
__slots__ = "label_confusions_map"
def __init__(self) -> None:
self.label_confusions_map: DefaultDict[str, Confusions] = defaultdict(
Confusions
)
def update(self, label: str, item: str, count: int) -> None:
"""
Increase one of TP, FP or FN count for a label by certain amount.
Args:
label: Label to be modified.
item: Type of count to be modified, should be one of "TP", "FP" or "FN".
count: Amount to be added to the count.
Returns:
None
"""
confusions = self.label_confusions_map[label]
setattr(confusions, item, getattr(confusions, item) + count)
def compute_metrics(self) -> MacroPRF1Metrics:
per_label_scores: Dict[str, PRF1Scores] = {}
precision_sum, recall_sum, f1_sum = 0.0, 0.0, 0.0
for label, confusions in sorted(self.label_confusions_map.items()):
scores = confusions.compute_metrics()
per_label_scores[label] = scores
if confusions.TP + confusions.FN > 0:
precision_sum += scores.precision
recall_sum += scores.recall
f1_sum += scores.f1
num_labels = len(self.label_confusions_map)
return MacroPRF1Metrics(
per_label_scores=per_label_scores,
macro_scores=MacroPRF1Scores(
num_labels=num_labels,
precision=safe_division(precision_sum, num_labels),
recall=safe_division(recall_sum, num_labels),
f1=safe_division(f1_sum, num_labels),
),
)
class AllConfusions:
"""
Aggregated class for per label confusions.
Attributes:
per_label_confusions: Per label confusion information.
confusions: Overall TP, FP and FN counts across the labels in
`per_label_confusions`.
"""
__slots__ = "per_label_confusions", "confusions"
def __init__(self) -> None:
self.per_label_confusions = PerLabelConfusions()
self.confusions = Confusions()
def compute_metrics(self) -> PRF1Metrics:
per_label_metrics = self.per_label_confusions.compute_metrics()
return PRF1Metrics(
per_label_scores=per_label_metrics.per_label_scores,
macro_scores=per_label_metrics.macro_scores,
micro_scores=self.confusions.compute_metrics(),
)
class PairwiseRankingMetrics(NamedTuple):
"""
Metric class for pairwise ranking
Attributes:
num_examples (int): number of samples
accuracy (float): how many times did we rank in the correct order
average_score_difference (float): average score(higherRank) - score(lowerRank)
"""
num_examples: int
accuracy: float
average_score_difference: float
def print_metrics(self) -> None:
print(f"RankingAccuracy: {self.accuracy * 100:.2f}")
print(f"AvgScoreDiff: {self.average_score_difference}")
print(f"NumExamples: {self.num_examples}")
class RegressionMetrics(NamedTuple):
"""
Metrics for regression tasks.
Attributes:
num_examples (int): number of examples
pearson_correlation (float): correlation between predictions and labels
mse (float): mean-squared error between predictions and labels
"""
num_examples: int
pearson_correlation: float
mse: float
def print_metrics(self):
print(f"Num examples: {self.num_examples}")
print(f"Pearson correlation: {self.pearson_correlation:.3f}")
print(f"Mean squared error: {self.mse:.3f}")
class RealtimeMetrics(NamedTuple):
"""
Realtime Metrics for tracking training progress and performance.
Attributes:
samples (int): number of samples
tps (float): tokens per second
ups (float): updates per second
"""
samples: int
tps: float
ups: float
def _format(self, key, value):
if key in ("tps", "ups"):
return round(value)
return value
def __str__(self):
metrics = {"num_gpus": cuda.DISTRIBUTED_WORLD_SIZE}
for key, value in self._asdict().items():
if not value:
continue
metrics[key] = self._format(key, value)
return str(metrics)
def safe_division(n: Union[int, float], d: int) -> float:
return float(n) / d if d else 0.0
def compute_prf1(tp: int, fp: int, fn: int) -> Tuple[float, float, float]:
precision = safe_division(tp, tp + fp)
recall = safe_division(tp, tp + fn)
f1 = safe_division(2 * tp, 2 * tp + fp + fn)
return (precision, recall, f1)
def average_precision_score(
y_true_sorted: np.ndarray, y_score_sorted: np.ndarray
) -> float:
"""
Computes average precision, which summarizes the precision-recall curve as the
precisions achieved at each threshold weighted by the increase in recall since the
previous threshold.
Args:
y_true_sorted: Numpy array sorted according to decreasing confidence scores
indicating whether each prediction is correct.
y_score_sorted Numpy array of confidence scores for the predictions in
decreasing order.
Returns:
Average precision score.
TODO: This is too slow, improve the performance
"""
ap = 0.0
tp = 0
threshold = y_score_sorted[0]
y_score_sorted = np.append(y_score_sorted[1:], np.NAN)
total_positive = np.sum(y_true_sorted)
added_positives = 0
for k, (label, score) in enumerate(zip(y_true_sorted, y_score_sorted)):
if label:
added_positives += 1
if score != threshold:
threshold = score
recall_diff = added_positives / total_positive
tp += added_positives
added_positives = 0
p_at_tresh = tp / (k + 1)
ap += p_at_tresh * recall_diff
return float(ap)
def sort_by_score(y_true_list: Sequence[bool], y_score_list: Sequence[float]):
y_true = np.array(y_true_list)
y_score = np.array(y_score_list)
sort_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_true = y_true[sort_indices]
y_score = y_score[sort_indices]
return y_true, y_score
def recall_at_precision(
y_true_sorted: np.ndarray, y_score_sorted: np.ndarray, thresholds: Sequence[float]
) -> Dict[float, float]:
"""
Computes recall at various precision levels
Args:
y_true_sorted: Numpy array sorted according to decreasing confidence scores
indicating whether each prediction is correct.
y_score_sorted: Numpy array of confidence scores for the predictions in
decreasing order.
thresholds: Sequence of floats indicating the requested precision thresholds
Returns:
Dictionary of maximum recall at requested precision thresholds.
"""
y_score_shift = np.append(y_score_sorted[1:], np.nan)
score_change = (y_score_sorted - y_score_shift) != 0
cum_sum = np.cumsum(y_true_sorted)
recall_at_precision_dict = {t: 0.0 for t in thresholds}
decision_thresh_at_precision_dict = {t: 0.0 for t in thresholds}
sum_y_true = y_true_sorted.sum()
if sum_y_true == 0:
return recall_at_precision_dict, decision_thresh_at_precision_dict
recall = cum_sum / sum_y_true
precision = cum_sum / np.array(range(1, len(y_true_sorted) + 1))
for threshold in thresholds:
meets_requirements = np.logical_and(precision >= threshold, score_change)
if not np.any(meets_requirements):
continue
recall_at_precision_dict[threshold] = float(
max(np.extract(meets_requirements, recall))
)
decision_thresh_at_precision_dict[threshold] = float(
min(np.extract(meets_requirements, y_score_sorted))
)
return recall_at_precision_dict, decision_thresh_at_precision_dict
def compute_soft_metrics(
predictions: Sequence[LabelPrediction],
label_names: Sequence[str],
recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,
) -> Dict[str, SoftClassificationMetrics]:
"""
Computes soft classification metrics (for now, average precision) given a list of
label predictions.
Args:
predictions: Label predictions, including the confidence score for each label.
label_names: Indexed label names.
recall_at_precision_thresholds: precision thresholds at which to calculate
recall
Returns:
Dict from label strings to their corresponding soft metrics.
"""
soft_metrics = {}
for i, label_name in enumerate(label_names):
y_true = []
y_score = []
for label_scores, _, expected in predictions:
y_true.append(expected == i)
y_score.append(label_scores[i])
y_true_sorted, y_score_sorted = sort_by_score(y_true, y_score)
ap = average_precision_score(y_true_sorted, y_score_sorted)
recall_at_precision_dict, decision_thresh_at_precision = recall_at_precision(
y_true_sorted, y_score_sorted, recall_at_precision_thresholds
)
roc_auc = compute_roc_auc(predictions, target_class=i)
soft_metrics[label_name] = SoftClassificationMetrics(
average_precision=ap,
recall_at_precision=recall_at_precision_dict,
decision_thresh_at_precision=decision_thresh_at_precision,
roc_auc=roc_auc,
)
return soft_metrics
def compute_multi_label_soft_metrics(
predictions: Sequence[LabelListPrediction],
label_names: Sequence[str],
recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,
) -> Dict[str, SoftClassificationMetrics]:
"""
Computes multi-label soft classification metrics
(for now, average precision)
Args:
predictions: multi-label predictions,
including the confidence score for each label.
label_names: Indexed label names.
recall_at_precision_thresholds: precision thresholds at which to calculate
recall
Returns:
Dict from label strings to their corresponding soft metrics.
"""
soft_metrics = {}
for i, label_name in enumerate(label_names):
y_true = []
y_score = []
for label_scores, _, expected in predictions:
y_true.append(i in expected)
y_score.append(label_scores[i])
y_true_sorted, y_score_sorted = sort_by_score(y_true, y_score)
ap = average_precision_score(y_true_sorted, y_score_sorted)
recall_at_precision_dict, decision_thresh_at_precision = recall_at_precision(
y_true_sorted, y_score_sorted, recall_at_precision_thresholds
)
roc_auc = compute_roc_auc(predictions, target_class=i)
soft_metrics[label_name] = SoftClassificationMetrics(
average_precision=ap,
recall_at_precision=recall_at_precision_dict,
decision_thresh_at_precision=decision_thresh_at_precision,
roc_auc=roc_auc,
)
return soft_metrics
def compute_matthews_correlation_coefficients(
TP: int, FP: int, FN: int, TN: int
) -> float:
"""
Computes Matthews correlation coefficient, a way to summarize all four counts (TP,
FP, FN, TN) in the confusion matrix of binary classification.
Args:
TP: Number of true positives.
FP: Number of false positives.
FN: Number of false negatives.
TN: Number of true negatives.
Returns:
Matthews correlation coefficient, which is `sqrt((TP + FP) * (TP + FN) *
(TN + FP) * (TN + FN))`.
"""
mcc = safe_division(
(TP * TN) - (FP * FN),
np.sqrt(float((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))),
)
return mcc
def compute_roc_auc(
predictions: Sequence[LabelPrediction], target_class: int = 0
) -> Optional[float]:
"""
Computes area under the Receiver Operating Characteristic curve, for binary
classification. Implementation based off of (and explained at)
https://www.ibm.com/developerworks/community/blogs/jfp/entry/Fast_Computation_of_AUC_ROC_score?lang=en.
"""
# Collect scores
y_true = [expected == target_class for _, _, expected in predictions]
y_score = [label_scores[target_class] for label_scores, _, _ in predictions]
y_true_sorted, _ = sort_by_score(y_true, y_score)
# Compute auc as probability that a positive example is scored higher than
# a negative example.
n_false = 0
n_correct_pair_order = 0
for y in reversed(y_true_sorted): # want low predicted to high predicted
if y:
n_correct_pair_order += n_false
else:
n_false += 1
n_true = len(y_true) - n_false
if n_true == 0 or n_false == 0:
return None
return float(n_correct_pair_order / (n_true * n_false))
def compute_classification_metrics(
predictions: Sequence[LabelPrediction],
label_names: Sequence[str],
loss: float,
average_precisions: bool = True,
recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,
) -> ClassificationMetrics:
"""
A general function that computes classification metrics given a list of label
predictions.
Args:
predictions: Label predictions, including the confidence score for each label.
label_names: Indexed label names.
average_precisions: Whether to compute average precisions for labels or not.
Defaults to True.
recall_at_precision_thresholds: precision thresholds at which to calculate recall
Returns:
ClassificationMetrics which contains various classification metrics.
"""
num_correct = 0
per_label_confusions = PerLabelConfusions()
for _, predicted, expected in predictions:
predicted_label = label_names[predicted]
expected_label = label_names[expected]
if predicted_label == expected_label:
num_correct += 1
per_label_confusions.update(expected_label, "TP", 1)
else:
per_label_confusions.update(expected_label, "FN", 1)
per_label_confusions.update(predicted_label, "FP", 1)
accuracy = safe_division(num_correct, len(predictions))
macro_prf1_metrics = per_label_confusions.compute_metrics()
soft_metrics = (
compute_soft_metrics(predictions, label_names, recall_at_precision_thresholds)
if average_precisions
else None
)
if len(label_names) == 2:
confusion_dict = per_label_confusions.label_confusions_map
# Since MCC is symmetric, it doesn't matter which label is 0 and which is 1
TP = confusion_dict[label_names[0]].TP
FP = confusion_dict[label_names[0]].FP
FN = confusion_dict[label_names[0]].FN
TN = confusion_dict[label_names[1]].TP
mcc: Optional[float] = compute_matthews_correlation_coefficients(TP, FP, FN, TN)
roc_auc: Optional[float] = compute_roc_auc(predictions)
else:
mcc = None
roc_auc = None
return ClassificationMetrics(
accuracy=accuracy,
macro_prf1_metrics=macro_prf1_metrics,
per_label_soft_scores=soft_metrics,
mcc=mcc,
roc_auc=roc_auc,
loss=loss,
)
def compute_multi_label_classification_metrics(
predictions: Sequence[LabelListPrediction],
label_names: Sequence[str],
loss: float,
average_precisions: bool = True,
recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,
) -> ClassificationMetrics:
"""
A general function that computes classification metrics given a list of multi-label
predictions.
Args:
predictions: multi-label predictions,
including the confidence score for each label.
label_names: Indexed label names.
average_precisions: Whether to compute average precisions for labels or not.
Defaults to True.
recall_at_precision_thresholds: precision thresholds at which
to calculate recall
Returns:
ClassificationMetrics which contains various classification metrics.
"""
num_correct = 0
num_expected_labels = 0
per_label_confusions = PerLabelConfusions()
for _, predicted, expected in predictions:
# "predicted" is in the format of n_hot_encoding
# Calculate TP & FN
for true_label_idx in expected:
if true_label_idx < 0:
# padded label "-1"
break
num_expected_labels += 1
expected_label = label_names[true_label_idx]
if predicted[true_label_idx] == 1:
num_correct += 1
per_label_confusions.update(expected_label, "TP", 1)
else:
per_label_confusions.update(expected_label, "FN", 1)
# Calculate FP
for idx, pred in enumerate(predicted):
if pred == 1 and idx not in expected:
predicted_label = label_names[idx]
per_label_confusions.update(predicted_label, "FP", 1)
accuracy = safe_division(num_correct, num_expected_labels)
macro_prf1_metrics = per_label_confusions.compute_metrics()
soft_metrics = (
compute_multi_label_soft_metrics(
predictions, label_names, recall_at_precision_thresholds
)
if average_precisions
else None
)
if len(label_names) == 2:
confusion_dict = per_label_confusions.label_confusions_map
# Since MCC is symmetric, it doesn't matter which label is 0 and which is 1
TP = confusion_dict[label_names[0]].TP
FP = confusion_dict[label_names[0]].FP
FN = confusion_dict[label_names[0]].FN
TN = confusion_dict[label_names[1]].TP
mcc: Optional[float] = compute_matthews_correlation_coefficients(TP, FP, FN, TN)
roc_auc: Optional[float] = compute_roc_auc(predictions)
else:
mcc = None
roc_auc = None
return ClassificationMetrics(
accuracy=accuracy,
macro_prf1_metrics=macro_prf1_metrics,
per_label_soft_scores=soft_metrics,
mcc=mcc,
roc_auc=roc_auc,
loss=loss,
)
def compute_pairwise_ranking_metrics(
predictions: Sequence[int], scores: Sequence[float]
) -> PairwiseRankingMetrics:
"""
Computes metrics for pairwise ranking given sequences of predictions and scores
Args:
predictions : 1 if ranking was correct, 0 if ranking was incorrect
scores : score(higher-ranked-sample) - score(lower-ranked-sample)
Returns:
PairwiseRankingMetrics object
"""
return PairwiseRankingMetrics(
num_examples=len(predictions),
accuracy=safe_division(sum(predictions), len(predictions)),
average_score_difference=safe_division(sum(scores), len(predictions)),
)
def compute_regression_metrics(
predictions: Sequence[float], targets: Sequence[float]
) -> RegressionMetrics:
"""
Computes metrics for regression tasks.abs
Args:
predictions: 1-D sequence of float predictions
targets: 1-D sequence of float labels
Returns:
RegressionMetrics object
"""
preds, targs = np.array(predictions), np.array(targets)
pred_mean, targ_mean = preds.mean(), targs.mean()
covariance = (preds - pred_mean).dot(targs - targ_mean) / preds.size
corr = covariance / preds.std() / targs.std()
mse = np.square(preds - targs).mean()
return RegressionMetrics(num_examples=len(preds), pearson_correlation=corr, mse=mse)
|
"""Requires Python 3"""
# General imports
import os, sys, shutil
# Third-Party imports
from PySide2 import QtCore
import maya.cmds as cmds
from maya.app.startup import basic
import maya.utils
# Base path definitions
MODULENAME = "depthOfFieldTool"
DRAGGEDFROMPATH = os.path.dirname(__file__)
DEFAULTMODULEPATH = f"{os.environ["MAYA_APP_DIR"]}/modules"
DEFAULTSCRIPTSPATH = f"{os.environ["MAYA_APP_DIR"]}/scripts"
# Custom module path definitions
MODULESCRIPTSPATH = f"{DEFAULTMODULEPATH}/{MODULENAME}/scripts"
# List of required files to install
INSTALLATIONPACKAGE = [
f"{DRAGGEDFROMPATH}/{MODULENAME}/plug-ins/windows/2022/{MODULENAME}.mll",
f"{DRAGGEDFROMPATH}/{MODULENAME}/plug-ins/windows/2020/{MODULENAME}.mll",
f"{DRAGGEDFROMPATH}/{MODULENAME}/plug-ins/linux/2022/{MODULENAME}.so",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/{MODULENAME}Properties.mel",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/{MODULENAME}Values.mel",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/depthOfField.mel",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/userSetup.py",
f"{DRAGGEDFROMPATH}/{MODULENAME}/icons/{MODULENAME}.png",
f"{DRAGGEDFROMPATH}/{MODULENAME}.mod"
]
def validatePythonVersion():
"""Required python version validation function."""
if os.environ['MAYA_PYTHON_VERSION'] == "2":
raise RuntimeError("Drag and drop installer requires Python 3, aborting installation!")
def _validateInstallationFiles():
"""Checks if all required installation files exist in source."""
missingFilesList = []
for pkg in INSTALLATIONPACKAGE:
if not QtCore.QFileInfo(pkg).exists():
missingFilesList.append(pkg)
if missingFilesList:
raise RuntimeError(
f"Installation package reported missing files: {missingFilesList}, aborting!"
)
def _removePreviousModule():
installationDestination = QtCore.QDir(f"{DEFAULTMODULEPATH}/{MODULENAME}")
if installationDestination.exists():
installationDestination.removeRecursively()
previousModFile = QtCore.QFile(f"{DEFAULTMODULEPATH}/{MODULENAME}.mod")
if previousModFile.exists():
previousModFile.remove()
def _createDirsForCopying():
"""TODO: Create a proper recrusive functrion for copying files over - temp workaround
but at least we don't have to deal with '\\' '/' slashes
"""
modulePath = QtCore.QDir(DEFAULTMODULEPATH)
modulePath.mkpath(f"{MODULENAME}/plug-ins/windows/2022/")
modulePath.mkpath(f"{MODULENAME}/plug-ins/windows/2020/")
modulePath.mkpath(f"{MODULENAME}/plug-ins/linux/2020/")
modulePath.mkpath(f"{MODULENAME}/scripts/")
modulePath.mkpath(f"{MODULENAME}/icons/")
def clearMemory():
"""Clean the current sys.path and sys.modules from anything to do with MODULENAME."""
pathsList = sys.path[:]
for index, path in enumerate(pathsList[::-1]):
if MODULENAME in path.lower():
sys.path.remove(path)
for module in list(sys.modules):
if MODULENAME in module:
del sys.modules[module]
def createDialog(message="Default Message", title="Default Title", icon="question",
buttons=["Install", "Cancel"], cancelButton="Cancel") -> str:
"""Convinience wrapper method for creating confirmDialogs."""
return(
cmds.confirmDialog(
title=title,
message=message,
icon=icon,
button=buttons,
cancelButton=cancelButton,
dismissString=cancelButton
)
)
def _finalizeInstallation():
"""Performs final installation procedures."""
clearMemory()
# Add path if its not already there
if not MODULESCRIPTSPATH in sys.path:
sys.path.append(MODULESCRIPTSPATH)
# Reload all the modules
cmds.loadModule(scan=True)
cmds.loadModule(allModules=True)
# Reload userSetup files
basic.executeUserSetup()
def onMayaDroppedPythonFile(*args, **kwargs):
"""Main function that runs when dragging the file into Maya.
Installation is performed by copying the module to the user preferences and creating
a module file.
"""
validatePythonVersion()
_validateInstallationFiles()
# Create install dialog
input = createDialog(
message=f"This will install SpeedLocator in:\n{DEFAULTMODULEPATH}",
title="SpeedLocator Installer"
)
if input == "Cancel": # Installation was cancelled
raise RuntimeError("Installation of SpeedLocator has been cancelled!")
else: # Installation continues
_createDirsForCopying()
finished = False
for pkg in INSTALLATIONPACKAGE:
pkgQt = QtCore.QFile(pkg)
finished = pkgQt.copy(pkg.replace(DRAGGEDFROMPATH, DEFAULTMODULEPATH))
if finished:
_finalizeInstallation()
| """Requires Python 3"""
# General imports
import os, sys, shutil
# Third-Party imports
from PySide2 import QtCore
import maya.cmds as cmds
from maya.app.startup import basic
import maya.utils
# Base path definitions
MODULENAME = "depthOfFieldTool"
DRAGGEDFROMPATH = os.path.dirname(__file__)
DEFAULTMODULEPATH = f"{os.environ['MAYA_APP_DIR']}/modules"
DEFAULTSCRIPTSPATH = f"{os.environ['MAYA_APP_DIR']}/scripts"
# Custom module path definitions
MODULESCRIPTSPATH = f"{DEFAULTMODULEPATH}/{MODULENAME}/scripts"
# List of required files to install
INSTALLATIONPACKAGE = [
f"{DRAGGEDFROMPATH}/{MODULENAME}/plug-ins/windows/2022/{MODULENAME}.mll",
f"{DRAGGEDFROMPATH}/{MODULENAME}/plug-ins/windows/2020/{MODULENAME}.mll",
f"{DRAGGEDFROMPATH}/{MODULENAME}/plug-ins/linux/2022/{MODULENAME}.so",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/{MODULENAME}Properties.mel",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/{MODULENAME}Values.mel",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/depthOfField.mel",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/userSetup.py",
f"{DRAGGEDFROMPATH}/{MODULENAME}/icons/{MODULENAME}.png",
f"{DRAGGEDFROMPATH}/{MODULENAME}.mod"
]
def validatePythonVersion():
"""Required python version validation function."""
if os.environ['MAYA_PYTHON_VERSION'] == "2":
raise RuntimeError("Drag and drop installer requires Python 3, aborting installation!")
def _validateInstallationFiles():
"""Checks if all required installation files exist in source."""
missingFilesList = []
for pkg in INSTALLATIONPACKAGE:
if not QtCore.QFileInfo(pkg).exists():
missingFilesList.append(pkg)
if missingFilesList:
raise RuntimeError(
f"Installation package reported missing files: {missingFilesList}, aborting!"
)
def _removePreviousModule():
installationDestination = QtCore.QDir(f"{DEFAULTMODULEPATH}/{MODULENAME}")
if installationDestination.exists():
installationDestination.removeRecursively()
previousModFile = QtCore.QFile(f"{DEFAULTMODULEPATH}/{MODULENAME}.mod")
if previousModFile.exists():
previousModFile.remove()
def _createDirsForCopying():
"""TODO: Create a proper recrusive functrion for copying files over - temp workaround
but at least we don't have to deal with '\\' '/' slashes
"""
modulePath = QtCore.QDir(DEFAULTMODULEPATH)
modulePath.mkpath(f"{MODULENAME}/plug-ins/windows/2022/")
modulePath.mkpath(f"{MODULENAME}/plug-ins/windows/2020/")
modulePath.mkpath(f"{MODULENAME}/plug-ins/linux/2020/")
modulePath.mkpath(f"{MODULENAME}/scripts/")
modulePath.mkpath(f"{MODULENAME}/icons/")
def clearMemory():
"""Clean the current sys.path and sys.modules from anything to do with MODULENAME."""
pathsList = sys.path[:]
for index, path in enumerate(pathsList[::-1]):
if MODULENAME in path.lower():
sys.path.remove(path)
for module in list(sys.modules):
if MODULENAME in module:
del sys.modules[module]
def createDialog(message="Default Message", title="Default Title", icon="question",
buttons=["Install", "Cancel"], cancelButton="Cancel") -> str:
"""Convinience wrapper method for creating confirmDialogs."""
return(
cmds.confirmDialog(
title=title,
message=message,
icon=icon,
button=buttons,
cancelButton=cancelButton,
dismissString=cancelButton
)
)
def _finalizeInstallation():
"""Performs final installation procedures."""
clearMemory()
# Add path if its not already there
if not MODULESCRIPTSPATH in sys.path:
sys.path.append(MODULESCRIPTSPATH)
# Reload all the modules
cmds.loadModule(scan=True)
cmds.loadModule(allModules=True)
# Reload userSetup files
basic.executeUserSetup()
def onMayaDroppedPythonFile(*args, **kwargs):
"""Main function that runs when dragging the file into Maya.
Installation is performed by copying the module to the user preferences and creating
a module file.
"""
validatePythonVersion()
_validateInstallationFiles()
# Create install dialog
input = createDialog(
message=f"This will install SpeedLocator in:\n{DEFAULTMODULEPATH}",
title="SpeedLocator Installer"
)
if input == "Cancel": # Installation was cancelled
raise RuntimeError("Installation of SpeedLocator has been cancelled!")
else: # Installation continues
_createDirsForCopying()
finished = False
for pkg in INSTALLATIONPACKAGE:
pkgQt = QtCore.QFile(pkg)
finished = pkgQt.copy(pkg.replace(DRAGGEDFROMPATH, DEFAULTMODULEPATH))
if finished:
_finalizeInstallation()
|
import re
import os
from typing import Optional, Union, List, Dict
from os.path import expandvars
from itertools import chain
from pathlib import Path
from pydantic import (
BaseModel,
SecretStr,
BaseSettings,
PositiveInt,
FilePath,
Field,
validator,
root_validator,
)
from . import consts
__all__ = [
"AppConfig",
"Credential",
"InventorySpec",
"OSNameSpec",
"LinterSpec",
"GitSpec",
"JumphostSpec",
]
_var_re = re.compile(
r"\${(?P<bname>[a-z0-9_]+)}" r"|" r"\$(?P<name>[^{][a-z_0-9]+)", flags=re.IGNORECASE
)
class NoExtraBaseModel(BaseModel):
class Config:
extra = "forbid"
class EnvExpand(str):
"""
When a string value contains a reference to an environment variable, use
this type to expand the contents of the variable using os.path.expandvars.
For example like:
password = "$MY_PASSWORD"
foo_password = "${MY_PASSWORD}_foo"
will be expanded, given MY_PASSWORD is set to 'boo!' in the environment:
password -> "boo!"
foo_password -> "boo!_foo"
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if found_vars := list(filter(len, chain.from_iterable(_var_re.findall(v)))):
for var in found_vars:
if (var_val := os.getenv(var)) is None:
raise ValueError(f'Environment variable "{var}" missing.')
if not len(var_val):
raise ValueError(f'Environment variable "{var}" empty.')
return expandvars(v)
return v
class EnvSecretStr(EnvExpand, SecretStr):
@classmethod
def validate(cls, v):
return SecretStr.validate(EnvExpand.validate(v))
class Credential(NoExtraBaseModel):
username: EnvExpand
password: EnvSecretStr
class DefaultCredential(Credential, BaseSettings):
username: EnvExpand = Field(..., env="NETCFGBU_DEFAULT_USERNAME")
password: EnvSecretStr = Field(..., env="NETCFGBU_DEFAULT_PASSWORD")
class Defaults(NoExtraBaseModel, BaseSettings):
configs_dir: Optional[EnvExpand] = Field(..., env=("NETCFGBU_CONFIGSDIR", "PWD"))
plugins_dir: Optional[EnvExpand] = Field(..., env=("NETCFGBU_PLUGINSDIR", "PWD"))
inventory: EnvExpand = Field(..., env="NETCFGBU_INVENTORY")
credentials: DefaultCredential
@validator("inventory")
def _inventory_provided(cls, value): # noqa
if not len(value):
raise ValueError("inventory empty value not allowed")
return value
@validator("configs_dir")
def _configs_dir(cls, value): # noqa
return Path(value).absolute()
@validator("plugins_dir")
def _plugins_dir(cls, value): # noqa
if value == os.getenv("PWD") and "/plugins" not in value:
value = value + "/plugins"
return Path(value).absolute()
class FilePathEnvExpand(FilePath):
""" A FilePath field whose value can interpolated from env vars """
@classmethod
def __get_validators__(cls):
yield from EnvExpand.__get_validators__()
yield from FilePath.__get_validators__()
class GitSpec(NoExtraBaseModel):
name: Optional[str]
repo: EnvExpand
email: Optional[str]
username: Optional[EnvExpand]
password: Optional[EnvExpand]
token: Optional[EnvSecretStr]
deploy_key: Optional[FilePathEnvExpand]
deploy_passphrase: Optional[EnvSecretStr]
@validator("repo")
def validate_repo(cls, repo): # noqa
expected = ("https:", "git@")
if not repo.startswith(expected):
raise ValueError(
f"Bad repo URL [{repo}]: expected to start with {expected}."
)
return repo
@root_validator
def enure_proper_auth(cls, values):
req = ("token", "deploy_key", "password")
auth_vals = list(filter(None, (values.get(auth) for auth in req)))
auth_c = len(auth_vals)
if not auth_c:
raise ValueError(
f'Missing one of required auth method fields: {'|'.join(req)}'
)
if auth_c > 1:
raise ValueError(f'Only one of {'|'.join(req)} allowed')
if values.get("deploy_passphrase") and not values.get("deploy_key"):
raise ValueError("deploy_key required when using deploy_passphrase")
return values
class OSNameSpec(NoExtraBaseModel):
credentials: Optional[List[Credential]]
pre_get_config: Optional[Union[str, List[str]]]
get_config: Optional[str]
connection: Optional[str]
linter: Optional[str]
timeout: PositiveInt = Field(consts.DEFAULT_GETCONFIG_TIMEOUT)
ssh_configs: Optional[Dict]
prompt_pattern: Optional[str]
class LinterSpec(NoExtraBaseModel):
config_starts_after: Optional[str]
config_ends_at: Optional[str]
class InventorySpec(NoExtraBaseModel):
name: Optional[str]
script: EnvExpand
@validator("script")
def validate_script(cls, script_exec): # noqa
script_bin, *script_vargs = script_exec.split()
if not os.path.isfile(script_bin):
raise ValueError(f"File not found: {script_bin}")
if not os.access(script_bin, os.X_OK):
raise ValueError(f"{script_bin} is not executable")
return script_exec
class JumphostSpec(NoExtraBaseModel):
proxy: str
name: Optional[str]
include: Optional[List[str]]
exclude: Optional[List[str]]
timeout: PositiveInt = Field(consts.DEFAULT_LOGIN_TIMEOUT)
@validator("name", always=True)
def _default_name(cls, value, values): # noqa
return values["proxy"] if not value else value
class AppConfig(NoExtraBaseModel):
defaults: Defaults
credentials: Optional[List[Credential]]
linters: Optional[Dict[str, LinterSpec]]
os_name: Optional[Dict[str, OSNameSpec]]
inventory: Optional[List[InventorySpec]]
logging: Optional[Dict]
ssh_configs: Optional[Dict]
git: Optional[List[GitSpec]]
jumphost: Optional[List[JumphostSpec]]
@validator("os_name")
def _linters(cls, v, values): # noqa
linters = values.get("linters") or {}
for os_name, os_spec in v.items():
if os_spec.linter and os_spec.linter not in linters:
raise ValueError(
f'OS spec "{os_name}" using undefined linter "{os_spec.linter}"'
)
return v
| import re
import os
from typing import Optional, Union, List, Dict
from os.path import expandvars
from itertools import chain
from pathlib import Path
from pydantic import (
BaseModel,
SecretStr,
BaseSettings,
PositiveInt,
FilePath,
Field,
validator,
root_validator,
)
from . import consts
__all__ = [
"AppConfig",
"Credential",
"InventorySpec",
"OSNameSpec",
"LinterSpec",
"GitSpec",
"JumphostSpec",
]
_var_re = re.compile(
r"\${(?P<bname>[a-z0-9_]+)}" r"|" r"\$(?P<name>[^{][a-z_0-9]+)", flags=re.IGNORECASE
)
class NoExtraBaseModel(BaseModel):
class Config:
extra = "forbid"
class EnvExpand(str):
"""
When a string value contains a reference to an environment variable, use
this type to expand the contents of the variable using os.path.expandvars.
For example like:
password = "$MY_PASSWORD"
foo_password = "${MY_PASSWORD}_foo"
will be expanded, given MY_PASSWORD is set to 'boo!' in the environment:
password -> "boo!"
foo_password -> "boo!_foo"
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if found_vars := list(filter(len, chain.from_iterable(_var_re.findall(v)))):
for var in found_vars:
if (var_val := os.getenv(var)) is None:
raise ValueError(f'Environment variable "{var}" missing.')
if not len(var_val):
raise ValueError(f'Environment variable "{var}" empty.')
return expandvars(v)
return v
class EnvSecretStr(EnvExpand, SecretStr):
@classmethod
def validate(cls, v):
return SecretStr.validate(EnvExpand.validate(v))
class Credential(NoExtraBaseModel):
username: EnvExpand
password: EnvSecretStr
class DefaultCredential(Credential, BaseSettings):
username: EnvExpand = Field(..., env="NETCFGBU_DEFAULT_USERNAME")
password: EnvSecretStr = Field(..., env="NETCFGBU_DEFAULT_PASSWORD")
class Defaults(NoExtraBaseModel, BaseSettings):
configs_dir: Optional[EnvExpand] = Field(..., env=("NETCFGBU_CONFIGSDIR", "PWD"))
plugins_dir: Optional[EnvExpand] = Field(..., env=("NETCFGBU_PLUGINSDIR", "PWD"))
inventory: EnvExpand = Field(..., env="NETCFGBU_INVENTORY")
credentials: DefaultCredential
@validator("inventory")
def _inventory_provided(cls, value): # noqa
if not len(value):
raise ValueError("inventory empty value not allowed")
return value
@validator("configs_dir")
def _configs_dir(cls, value): # noqa
return Path(value).absolute()
@validator("plugins_dir")
def _plugins_dir(cls, value): # noqa
if value == os.getenv("PWD") and "/plugins" not in value:
value = value + "/plugins"
return Path(value).absolute()
class FilePathEnvExpand(FilePath):
""" A FilePath field whose value can interpolated from env vars """
@classmethod
def __get_validators__(cls):
yield from EnvExpand.__get_validators__()
yield from FilePath.__get_validators__()
class GitSpec(NoExtraBaseModel):
name: Optional[str]
repo: EnvExpand
email: Optional[str]
username: Optional[EnvExpand]
password: Optional[EnvExpand]
token: Optional[EnvSecretStr]
deploy_key: Optional[FilePathEnvExpand]
deploy_passphrase: Optional[EnvSecretStr]
@validator("repo")
def validate_repo(cls, repo): # noqa
expected = ("https:", "git@")
if not repo.startswith(expected):
raise ValueError(
f"Bad repo URL [{repo}]: expected to start with {expected}."
)
return repo
@root_validator
def enure_proper_auth(cls, values):
req = ("token", "deploy_key", "password")
auth_vals = list(filter(None, (values.get(auth) for auth in req)))
auth_c = len(auth_vals)
if not auth_c:
raise ValueError(
f'Missing one of required auth method fields: {"|".join(req)}'
)
if auth_c > 1:
raise ValueError(f'Only one of {"|".join(req)} allowed')
if values.get("deploy_passphrase") and not values.get("deploy_key"):
raise ValueError("deploy_key required when using deploy_passphrase")
return values
class OSNameSpec(NoExtraBaseModel):
credentials: Optional[List[Credential]]
pre_get_config: Optional[Union[str, List[str]]]
get_config: Optional[str]
connection: Optional[str]
linter: Optional[str]
timeout: PositiveInt = Field(consts.DEFAULT_GETCONFIG_TIMEOUT)
ssh_configs: Optional[Dict]
prompt_pattern: Optional[str]
class LinterSpec(NoExtraBaseModel):
config_starts_after: Optional[str]
config_ends_at: Optional[str]
class InventorySpec(NoExtraBaseModel):
name: Optional[str]
script: EnvExpand
@validator("script")
def validate_script(cls, script_exec): # noqa
script_bin, *script_vargs = script_exec.split()
if not os.path.isfile(script_bin):
raise ValueError(f"File not found: {script_bin}")
if not os.access(script_bin, os.X_OK):
raise ValueError(f"{script_bin} is not executable")
return script_exec
class JumphostSpec(NoExtraBaseModel):
proxy: str
name: Optional[str]
include: Optional[List[str]]
exclude: Optional[List[str]]
timeout: PositiveInt = Field(consts.DEFAULT_LOGIN_TIMEOUT)
@validator("name", always=True)
def _default_name(cls, value, values): # noqa
return values["proxy"] if not value else value
class AppConfig(NoExtraBaseModel):
defaults: Defaults
credentials: Optional[List[Credential]]
linters: Optional[Dict[str, LinterSpec]]
os_name: Optional[Dict[str, OSNameSpec]]
inventory: Optional[List[InventorySpec]]
logging: Optional[Dict]
ssh_configs: Optional[Dict]
git: Optional[List[GitSpec]]
jumphost: Optional[List[JumphostSpec]]
@validator("os_name")
def _linters(cls, v, values): # noqa
linters = values.get("linters") or {}
for os_name, os_spec in v.items():
if os_spec.linter and os_spec.linter not in linters:
raise ValueError(
f'OS spec "{os_name}" using undefined linter "{os_spec.linter}"'
)
return v
|
"""
## Pump curve fitting and drawing
- Establish an equation for the pump curve from measured points on the curve in the pump's data sheet
- Get the coefficients of the 2nd order polynomial describing the pump curve and determined via curve fitting
- Draw the pump curve in a diagram
"""
from typing import List, Tuple, Dict, Optional
import numpy as np
import quantities as qty
from nummath.interpolation import PolyFit
from nummath.graphing2 import LineGraph
class PumpCurve:
def __init__(self, dest_units: Dict[str, str]):
"""
Create *PumpCurve* object.
**Parameters:**
- `dest_units`: (*Dict[str, str]*)<br>
The measuring units in which the pump curve will be expressed. Keys:
+ 'flow_rate'
+ 'pressure'
"""
self._meas_points: List[Tuple[qty.VolumeFlowRate, qty.Pressure]] = []
self._dest_units: Dict[str, str] = dest_units
self._coefficients: Optional[np.array] = None
def add_measuring_points(self, points: List[Tuple[float, float]], units: Dict[str, str]):
"""
Add some data points taken from the pump curve in the data sheet. This will execute the curve fitting
algorithm that approaches the pump curve with a 2nd order polynomial.
**Parameters:**
- `points`: (*List[Tuple[float, float]]*)<br>
List of tuples. The 1st element of the tuple is flow rate, the 2nd element is pressure.
- `units`: (*Dict[str, str]*)<br>
Dictionary that contains the measuring units in which the values of the data points are expressed. Keys:
+ 'flow_rate'
+ 'pressure'
"""
self._meas_points = [
(qty.VolumeFlowRate(V, units['flow_rate']), qty.Pressure(p, units['pressure'])) for V, p in points
]
self._curve_fitting()
def _curve_fitting(self):
pf = PolyFit(
x_data=[V(self._dest_units['flow_rate']) for V, _ in self._meas_points],
y_data=[p(self._dest_units['pressure']) for _, p in self._meas_points],
m=2
)
self._coefficients = pf.solve()
def get_coefficients(self, units: Optional[Dict[str, str]] = None) -> Optional[List[float]]:
"""
Get the coefficients of the 2nd order polynomial describing the pump curve.
**Parameters:**
- `units`: (*Optional[[Dict[str, str]]*)<br>
Optional dictionary that contains the measuring units in which the returned coefficients must be expressed.
Default is None, which means that the coefficients will be returned expressed in the measuring units passed in
at the instantiation of the *PumpCurve* object. Keys:
+ 'flow_rate'
+ 'pressure'
"""
if units is not None:
p_src = qty.Pressure(1.0, self._dest_units['pressure'])
V_src = qty.VolumeFlowRate(1.0, self._dest_units['flow_rate'])
p_des = p_src(units['pressure'])
V_des = V_src(units['flow_rate'])
else:
p_des = 1.0
V_des = 1.0
a0 = self._coefficients[0] * p_des
a1 = self._coefficients[1] * (p_des / V_des)
a2 = self._coefficients[2] * (p_des / V_des ** 2)
return [a0, a1, a2]
def set_coefficients(self, coeff: Tuple[float, float, float], units: Dict[str, str]):
"""
Set the known coefficients of the 2nd order polynomial describing the pump curve.
**Parameters:**
- `coeff`: (*Tuple[float, float, float]*)<br>
Tuple of 3 floats: a0, a1 and a2 as in the equation dp_pump = a0 + a1 * V + a2 * V **2
- `units`: (*Dict[str, str]*)<br>
Dictionary that contains the measuring units in which the pump coefficients are expressed. Keys:
+ 'flow_rate'
+ 'pressure'
"""
p_src = qty.Pressure(1.0, units['pressure'])
V_src = qty.VolumeFlowRate(1.0, units['flow_rate'])
p_des = p_src(self._dest_units['pressure'])
V_des = V_src(self._dest_units['flow_rate'])
a0 = coeff[0] * p_des
a1 = coeff[1] * (p_des / V_des)
a2 = coeff[2] * (p_des / V_des ** 2)
self._coefficients = np.array([a0, a1, a2])
def create_pump_curve(self, V_initial: qty.VolumeFlowRate, V_final: qty.VolumeFlowRate, num: int = 50):
"""
Calculate the pump curve between an initial and final flow rate.
**Parameters:**
- `V_initial`: (*quantities.VolumeFlowRate*) = initial flow rate
- `V_final`: (*quantities.VolumeFlowRate*) = final flow rate
- `num`: (*int*) = number of calculation points (default = 50)
**Returns:** (*Tuple[np.array, np.array]*)
Tuple with 1st element a numpy array of the flow rates and 2nd element a numpy array of the corresponding
pressures, both expressed in the desired measuring units set at instantiation of the *PumpCurve*-object.
"""
V_i = V_initial(self._dest_units['flow_rate'])
V_f = V_final(self._dest_units['flow_rate'])
V = np.linspace(V_i, V_f, num, endpoint=True)
a0 = self._coefficients[0]; a1 = self._coefficients[1]; a2 = self._coefficients[2]
p = a0 + a1 * V + a2 * V ** 2
return V, p
def draw_pump_curve(self, V_initial: qty.VolumeFlowRate, V_final: qty.VolumeFlowRate, **kwargs):
"""
Draw the calculated pump curve.
**Parameters:**
- `V_initial`: (*quantities.VolumeFlowRate*) = initial flow rate
- `V_final`: (*quantities.VolumeFlowRate*) = final flow rate
- `kwargs`: optional keyword arguments
+ `fig_size`: (*Tuple[float, float]*) = the width and height of the figure in inches
+ `dpi`: (*int*) = dots per inch of the figure
+ `num`: (*int*) = number of calculated points to draw
+ `V_step`: (*quantities.VolumeFlowRate*) = step between ticks on the flow rate axis of the diagram
+ `V_max`: (*quantities.VolumeFlowRate*) = the maximum flow rate shown on the axis
+ `p_step`: (*quantities.Pressure*) = step between ticks on the pressure axis of the diagram
+ `p_max`: (*quantities.Pressure*) = maximum pressure shown on the axis
+ `working_point`: (*Tuple[qty.VolumeFlowRate, qty.Pressure]*) = working point of the pump (shown as a red
dot on the diagram)
**Returns:** (*nummath.graphing2.LineGraph*)<br>
Call show() on the returned *LineGraph* object to show the diagram.
"""
if self._coefficients is not None:
fig_size: Tuple[int, int] = kwargs.get('fig_size', (6, 4))
dpi: int = kwargs.get('dpi', 96)
num: int = kwargs.get('num', 50)
V_step: qty.VolumeFlowRate = kwargs.get('V_step')
V_max: qty.VolumeFlowRate = kwargs.get('V_max')
p_step: qty.Pressure = kwargs.get('p_step')
p_max: qty.Pressure = kwargs.get('p_max')
working_point: Tuple[qty.VolumeFlowRate, qty.Pressure] = kwargs.get('working_point')
V, p = self.create_pump_curve(V_initial, V_final, num)
graph = LineGraph(fig_size=fig_size, dpi=dpi)
graph.add_dataset(name="pump curve", x1_data=V, y1_data=p)
if self._meas_points:
graph.add_dataset(
name="measured points",
x1_data=[V(self._dest_units['flow_rate']) for V, _ in self._meas_points],
y1_data=[p(self._dest_units['pressure']) for _, p in self._meas_points],
layout={'marker': 'o', 'linestyle': 'None'}
)
if working_point:
graph.add_dataset(
name="working point",
x1_data=working_point[0](self._dest_units['flow_rate']),
y1_data=working_point[1](self._dest_units['pressure']),
layout={'marker': 'o', 'linestyle': 'None', 'color': 'red'}
)
graph.x1.set_title(f'flow rate [{self._dest_units['flow_rate']}]')
if V_max is not None and V_step is not None:
graph.x1.scale(
lim_down=0.0,
lim_up=V_max(self._dest_units['flow_rate']),
step_size=V_step(self._dest_units['flow_rate'])
)
graph.y1.set_title(f'pressure [{self._dest_units['pressure']}]')
if p_max is not None and p_step is not None:
graph.y1.scale(
lim_down=0.0,
lim_up=p_max(self._dest_units['pressure']),
step_size=p_step(self._dest_units['pressure'])
)
return graph
def pump_head(self, V: qty.VolumeFlowRate) -> qty.Pressure:
"""
Get the pump head (*quantities.Pressure*) if the flow rate (*quantities.VolumeFlowRate*) is given.
"""
a0 = self._coefficients[0]
a1 = self._coefficients[1]
a2 = self._coefficients[2]
V = V(self._dest_units['flow_rate'])
return qty.Pressure(a0 + a1 * V + a2 * V ** 2, self._dest_units['pressure'])
if __name__ == '__main__':
pump_curve = PumpCurve(dest_units={'flow_rate': 'L/s', 'pressure': 'bar'})
pump_curve.add_measuring_points(
points=[(0.0, 60.0), (2.4, 52.0), (4.2, 48.0), (6.0, 36.0)],
units={'flow_rate': 'm^3/h', 'pressure': 'm'}
)
coeff1 = pump_curve.get_coefficients(units={'pressure': 'Pa', 'flow_rate': 'm^3/s'})
print(coeff1)
coeff2 = pump_curve.get_coefficients(units={'pressure': 'bar', 'flow_rate': 'L/s'})
print(coeff2)
graph_ = pump_curve.draw_pump_curve(
V_initial=qty.VolumeFlowRate(0.0, 'm^3/h'),
V_final=qty.VolumeFlowRate(7.2, 'm^3/h'),
fig_size=(10, 8),
dpi=150,
num=100,
V_max=qty.VolumeFlowRate(3.0, 'L/s'),
V_step=qty.VolumeFlowRate(0.5, 'L/s'),
p_max=qty.Pressure(8.0, 'bar'),
p_step=qty.Pressure(2.0, 'bar')
)
graph_.show()
| """
## Pump curve fitting and drawing
- Establish an equation for the pump curve from measured points on the curve in the pump's data sheet
- Get the coefficients of the 2nd order polynomial describing the pump curve and determined via curve fitting
- Draw the pump curve in a diagram
"""
from typing import List, Tuple, Dict, Optional
import numpy as np
import quantities as qty
from nummath.interpolation import PolyFit
from nummath.graphing2 import LineGraph
class PumpCurve:
def __init__(self, dest_units: Dict[str, str]):
"""
Create *PumpCurve* object.
**Parameters:**
- `dest_units`: (*Dict[str, str]*)<br>
The measuring units in which the pump curve will be expressed. Keys:
+ 'flow_rate'
+ 'pressure'
"""
self._meas_points: List[Tuple[qty.VolumeFlowRate, qty.Pressure]] = []
self._dest_units: Dict[str, str] = dest_units
self._coefficients: Optional[np.array] = None
def add_measuring_points(self, points: List[Tuple[float, float]], units: Dict[str, str]):
"""
Add some data points taken from the pump curve in the data sheet. This will execute the curve fitting
algorithm that approaches the pump curve with a 2nd order polynomial.
**Parameters:**
- `points`: (*List[Tuple[float, float]]*)<br>
List of tuples. The 1st element of the tuple is flow rate, the 2nd element is pressure.
- `units`: (*Dict[str, str]*)<br>
Dictionary that contains the measuring units in which the values of the data points are expressed. Keys:
+ 'flow_rate'
+ 'pressure'
"""
self._meas_points = [
(qty.VolumeFlowRate(V, units['flow_rate']), qty.Pressure(p, units['pressure'])) for V, p in points
]
self._curve_fitting()
def _curve_fitting(self):
pf = PolyFit(
x_data=[V(self._dest_units['flow_rate']) for V, _ in self._meas_points],
y_data=[p(self._dest_units['pressure']) for _, p in self._meas_points],
m=2
)
self._coefficients = pf.solve()
def get_coefficients(self, units: Optional[Dict[str, str]] = None) -> Optional[List[float]]:
"""
Get the coefficients of the 2nd order polynomial describing the pump curve.
**Parameters:**
- `units`: (*Optional[[Dict[str, str]]*)<br>
Optional dictionary that contains the measuring units in which the returned coefficients must be expressed.
Default is None, which means that the coefficients will be returned expressed in the measuring units passed in
at the instantiation of the *PumpCurve* object. Keys:
+ 'flow_rate'
+ 'pressure'
"""
if units is not None:
p_src = qty.Pressure(1.0, self._dest_units['pressure'])
V_src = qty.VolumeFlowRate(1.0, self._dest_units['flow_rate'])
p_des = p_src(units['pressure'])
V_des = V_src(units['flow_rate'])
else:
p_des = 1.0
V_des = 1.0
a0 = self._coefficients[0] * p_des
a1 = self._coefficients[1] * (p_des / V_des)
a2 = self._coefficients[2] * (p_des / V_des ** 2)
return [a0, a1, a2]
def set_coefficients(self, coeff: Tuple[float, float, float], units: Dict[str, str]):
"""
Set the known coefficients of the 2nd order polynomial describing the pump curve.
**Parameters:**
- `coeff`: (*Tuple[float, float, float]*)<br>
Tuple of 3 floats: a0, a1 and a2 as in the equation dp_pump = a0 + a1 * V + a2 * V **2
- `units`: (*Dict[str, str]*)<br>
Dictionary that contains the measuring units in which the pump coefficients are expressed. Keys:
+ 'flow_rate'
+ 'pressure'
"""
p_src = qty.Pressure(1.0, units['pressure'])
V_src = qty.VolumeFlowRate(1.0, units['flow_rate'])
p_des = p_src(self._dest_units['pressure'])
V_des = V_src(self._dest_units['flow_rate'])
a0 = coeff[0] * p_des
a1 = coeff[1] * (p_des / V_des)
a2 = coeff[2] * (p_des / V_des ** 2)
self._coefficients = np.array([a0, a1, a2])
def create_pump_curve(self, V_initial: qty.VolumeFlowRate, V_final: qty.VolumeFlowRate, num: int = 50):
"""
Calculate the pump curve between an initial and final flow rate.
**Parameters:**
- `V_initial`: (*quantities.VolumeFlowRate*) = initial flow rate
- `V_final`: (*quantities.VolumeFlowRate*) = final flow rate
- `num`: (*int*) = number of calculation points (default = 50)
**Returns:** (*Tuple[np.array, np.array]*)
Tuple with 1st element a numpy array of the flow rates and 2nd element a numpy array of the corresponding
pressures, both expressed in the desired measuring units set at instantiation of the *PumpCurve*-object.
"""
V_i = V_initial(self._dest_units['flow_rate'])
V_f = V_final(self._dest_units['flow_rate'])
V = np.linspace(V_i, V_f, num, endpoint=True)
a0 = self._coefficients[0]; a1 = self._coefficients[1]; a2 = self._coefficients[2]
p = a0 + a1 * V + a2 * V ** 2
return V, p
def draw_pump_curve(self, V_initial: qty.VolumeFlowRate, V_final: qty.VolumeFlowRate, **kwargs):
"""
Draw the calculated pump curve.
**Parameters:**
- `V_initial`: (*quantities.VolumeFlowRate*) = initial flow rate
- `V_final`: (*quantities.VolumeFlowRate*) = final flow rate
- `kwargs`: optional keyword arguments
+ `fig_size`: (*Tuple[float, float]*) = the width and height of the figure in inches
+ `dpi`: (*int*) = dots per inch of the figure
+ `num`: (*int*) = number of calculated points to draw
+ `V_step`: (*quantities.VolumeFlowRate*) = step between ticks on the flow rate axis of the diagram
+ `V_max`: (*quantities.VolumeFlowRate*) = the maximum flow rate shown on the axis
+ `p_step`: (*quantities.Pressure*) = step between ticks on the pressure axis of the diagram
+ `p_max`: (*quantities.Pressure*) = maximum pressure shown on the axis
+ `working_point`: (*Tuple[qty.VolumeFlowRate, qty.Pressure]*) = working point of the pump (shown as a red
dot on the diagram)
**Returns:** (*nummath.graphing2.LineGraph*)<br>
Call show() on the returned *LineGraph* object to show the diagram.
"""
if self._coefficients is not None:
fig_size: Tuple[int, int] = kwargs.get('fig_size', (6, 4))
dpi: int = kwargs.get('dpi', 96)
num: int = kwargs.get('num', 50)
V_step: qty.VolumeFlowRate = kwargs.get('V_step')
V_max: qty.VolumeFlowRate = kwargs.get('V_max')
p_step: qty.Pressure = kwargs.get('p_step')
p_max: qty.Pressure = kwargs.get('p_max')
working_point: Tuple[qty.VolumeFlowRate, qty.Pressure] = kwargs.get('working_point')
V, p = self.create_pump_curve(V_initial, V_final, num)
graph = LineGraph(fig_size=fig_size, dpi=dpi)
graph.add_dataset(name="pump curve", x1_data=V, y1_data=p)
if self._meas_points:
graph.add_dataset(
name="measured points",
x1_data=[V(self._dest_units['flow_rate']) for V, _ in self._meas_points],
y1_data=[p(self._dest_units['pressure']) for _, p in self._meas_points],
layout={'marker': 'o', 'linestyle': 'None'}
)
if working_point:
graph.add_dataset(
name="working point",
x1_data=working_point[0](self._dest_units['flow_rate']),
y1_data=working_point[1](self._dest_units['pressure']),
layout={'marker': 'o', 'linestyle': 'None', 'color': 'red'}
)
graph.x1.set_title(f'flow rate [{self._dest_units["flow_rate"]}]')
if V_max is not None and V_step is not None:
graph.x1.scale(
lim_down=0.0,
lim_up=V_max(self._dest_units['flow_rate']),
step_size=V_step(self._dest_units['flow_rate'])
)
graph.y1.set_title(f'pressure [{self._dest_units["pressure"]}]')
if p_max is not None and p_step is not None:
graph.y1.scale(
lim_down=0.0,
lim_up=p_max(self._dest_units['pressure']),
step_size=p_step(self._dest_units['pressure'])
)
return graph
def pump_head(self, V: qty.VolumeFlowRate) -> qty.Pressure:
"""
Get the pump head (*quantities.Pressure*) if the flow rate (*quantities.VolumeFlowRate*) is given.
"""
a0 = self._coefficients[0]
a1 = self._coefficients[1]
a2 = self._coefficients[2]
V = V(self._dest_units['flow_rate'])
return qty.Pressure(a0 + a1 * V + a2 * V ** 2, self._dest_units['pressure'])
if __name__ == '__main__':
pump_curve = PumpCurve(dest_units={'flow_rate': 'L/s', 'pressure': 'bar'})
pump_curve.add_measuring_points(
points=[(0.0, 60.0), (2.4, 52.0), (4.2, 48.0), (6.0, 36.0)],
units={'flow_rate': 'm^3/h', 'pressure': 'm'}
)
coeff1 = pump_curve.get_coefficients(units={'pressure': 'Pa', 'flow_rate': 'm^3/s'})
print(coeff1)
coeff2 = pump_curve.get_coefficients(units={'pressure': 'bar', 'flow_rate': 'L/s'})
print(coeff2)
graph_ = pump_curve.draw_pump_curve(
V_initial=qty.VolumeFlowRate(0.0, 'm^3/h'),
V_final=qty.VolumeFlowRate(7.2, 'm^3/h'),
fig_size=(10, 8),
dpi=150,
num=100,
V_max=qty.VolumeFlowRate(3.0, 'L/s'),
V_step=qty.VolumeFlowRate(0.5, 'L/s'),
p_max=qty.Pressure(8.0, 'bar'),
p_step=qty.Pressure(2.0, 'bar')
)
graph_.show()
|
# Native
import os
from math import *
from cmath import *
from time import time
# Installed
import cv2 as cv
# Custom Modules
from assets.utils import float_range
from JuliaSet.core import JuliaSet
class Animation:
def __init__(self, range_from: float or int, range_to: float or int, range_step: float, frames_folder: str = "out/JuliaSet/tmp", vid_name: str = "Animation.avi", fps: int = 60) -> None:
os.mkdir("out/JuliaSet/tmp") if not os.path.exists("out/JuliaSet/tmp") else None
[os.remove(f"out/JuliaSet/tmp/{x}") for x in os.listdir(frames_folder)] if frames_folder == "out/JuliaSet/tmp" else None
self.rngf = range_from
self.rngt = range_to
self.rngs = range_step
self.framesfd = frames_folder
self.fps = fps
self.vname = vid_name if vid_name.endswith(".mp4") else f"{vid_name}.mp4"
os.mkdir("out/JuliaSet/Video") if not os.path.exists("out/JuliaSet/Video") else None
self.save_path = f"out/JuliaSet/Video/{self.vname}"
(print("This name is already taken... Please find a new one."), quit()) if self.vname in os.listdir("out/JuliaSet/Video") else None
def animation_from_iamges(self) -> tuple:
print("Making frames into a video...")
s = time()
images = [img for img in os.listdir(self.framesfd) if img.endswith(".png")]
frame = cv.imread(os.path.join(self.framesfd, images[0]))
height, width, layers = frame.shape
video = cv.VideoWriter(self.save_path, 0, self.fps, (width,height))
[(video.write(cv.imread(os.path.join(self.framesfd, img))), print(f"Current IMG: '{img}'")) for img in images]
cv.destroyAllWindows()
video.release()
print(f"Video finished in {round(time() - s, 3)} sec. Cleaning up tmp files...")
[os.remove(f"out/JuliaSet/tmp/{x}") for x in os.listdir(self.framesfd)]
print(f"Cleaning finished. Video saved here: {self.save_path}")
return self.save_path, round(time() - s, 3)
def create_animation(self, mult: int, maxit: int, cmap: str, c_func: str) -> tuple:
inv_step = int(f"1{"0" * (len(str(self.rngs)) - 2)}")
rng = list(float_range(self.rngf, self.rngt, self.rngs))
for x in rng:
c = eval(c_func.replace("X", f"{x}"))
fn = f"{"0" * (len(str(int(rng[-1]*inv_step))) - len(str(int(x*inv_step))))}{int(x*inv_step)}.png"
jset = JuliaSet(c, mult, maxit, auto_name=False, for_anim=True, silent=True)
jset.save(fn=fn, cmaps=[cmap,])
return self.animation_from_iamges()
| # Native
import os
from math import *
from cmath import *
from time import time
# Installed
import cv2 as cv
# Custom Modules
from assets.utils import float_range
from JuliaSet.core import JuliaSet
class Animation:
def __init__(self, range_from: float or int, range_to: float or int, range_step: float, frames_folder: str = "out/JuliaSet/tmp", vid_name: str = "Animation.avi", fps: int = 60) -> None:
os.mkdir("out/JuliaSet/tmp") if not os.path.exists("out/JuliaSet/tmp") else None
[os.remove(f"out/JuliaSet/tmp/{x}") for x in os.listdir(frames_folder)] if frames_folder == "out/JuliaSet/tmp" else None
self.rngf = range_from
self.rngt = range_to
self.rngs = range_step
self.framesfd = frames_folder
self.fps = fps
self.vname = vid_name if vid_name.endswith(".mp4") else f"{vid_name}.mp4"
os.mkdir("out/JuliaSet/Video") if not os.path.exists("out/JuliaSet/Video") else None
self.save_path = f"out/JuliaSet/Video/{self.vname}"
(print("This name is already taken... Please find a new one."), quit()) if self.vname in os.listdir("out/JuliaSet/Video") else None
def animation_from_iamges(self) -> tuple:
print("Making frames into a video...")
s = time()
images = [img for img in os.listdir(self.framesfd) if img.endswith(".png")]
frame = cv.imread(os.path.join(self.framesfd, images[0]))
height, width, layers = frame.shape
video = cv.VideoWriter(self.save_path, 0, self.fps, (width,height))
[(video.write(cv.imread(os.path.join(self.framesfd, img))), print(f"Current IMG: '{img}'")) for img in images]
cv.destroyAllWindows()
video.release()
print(f"Video finished in {round(time() - s, 3)} sec. Cleaning up tmp files...")
[os.remove(f"out/JuliaSet/tmp/{x}") for x in os.listdir(self.framesfd)]
print(f"Cleaning finished. Video saved here: {self.save_path}")
return self.save_path, round(time() - s, 3)
def create_animation(self, mult: int, maxit: int, cmap: str, c_func: str) -> tuple:
inv_step = int(f"1{'0' * (len(str(self.rngs)) - 2)}")
rng = list(float_range(self.rngf, self.rngt, self.rngs))
for x in rng:
c = eval(c_func.replace("X", f"{x}"))
fn = f"{'0' * (len(str(int(rng[-1]*inv_step))) - len(str(int(x*inv_step))))}{int(x*inv_step)}.png"
jset = JuliaSet(c, mult, maxit, auto_name=False, for_anim=True, silent=True)
jset.save(fn=fn, cmaps=[cmap,])
return self.animation_from_iamges()
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Meta information about the service.
Currently this only provides API versioning information
"""
from datetime import datetime
from flask import current_app
from colin_api.exceptions import BusinessNotFoundException
from colin_api.resources.db import DB
from colin_api.utils import convert_to_json_date, convert_to_json_datetime, stringify_list
class Business:
"""Class to contain all model-like functions such as getting and setting from database."""
business = None
def __init__(self):
"""Initialize with all values None."""
def get_corp_num(self):
"""Get corporation number, aka identifier."""
return self.business['identifier']
def as_dict(self):
"""Return dict version of self."""
return {
'business': self.business
}
@classmethod
def _get_last_ar_dates_for_reset(cls, cursor, event_info: list, event_ids: list):
"""Get the previous AR/AGM dates."""
events_by_corp_num = {}
for info in event_info:
if info['corp_num'] not in events_by_corp_num or events_by_corp_num[info['corp_num']] > info['event_id']:
events_by_corp_num[info['corp_num']] = info['event_id']
dates_by_corp_num = []
for corp_num in events_by_corp_num:
cursor.execute(f"""
SELECT event.corp_num, event.event_timestmp, filing.period_end_dt, filing.agm_date, filing.filing_typ_cd
FROM event
JOIN filing on filing.event_id = event.event_id
WHERE event.event_id not in ({stringify_list(event_ids)}) AND event.corp_num=:corp_num
ORDER BY event.event_timestmp desc
""",
corp_num=corp_num
)
dates = {'corp_num': corp_num}
for row in cursor.fetchall():
row = dict(zip([x[0].lower() for x in cursor.description], row))
if 'event_date' not in dates or dates['event_date'] < row['event_timestmp']:
dates['event_date'] = row['event_timestmp']
# set ar_date to closest period_end_dt.
# this is not always the first one that gets returned if 2 were filed on the same day
if row['period_end_dt'] and ('ar_date' not in dates or dates['ar_date'] < row['period_end_dt']):
dates['ar_date'] = row['period_end_dt']
dates['ar_filed_date'] = row['event_timestmp']
# this may be different than ar_date if the last ar had no agm
if row['agm_date'] and ('agm_date' not in dates or dates['agm_date'] < row['agm_date']):
dates['agm_date'] = row['agm_date']
# if there are no ARs for this coop then use date of incorporation
if row['filing_typ_cd'] == 'OTINC' and 'agm_date' not in dates:
dates['agm_date'] = row['event_timestmp']
dates['ar_filed_date'] = row['event_timestmp']
dates_by_corp_num.append(dates)
return dates_by_corp_num
@classmethod
def find_by_identifier(cls, identifier: str = None): # pylint: disable=too-many-statements;
"""Return a Business by identifier."""
business = None
if not identifier:
return None
try:
# get record
cursor = DB.connection.cursor()
cursor.execute("""
select corp.CORP_NUM as identifier, CORP_FROZEN_TYP_CD, corp_typ_cd type,
filing.period_end_dt as last_ar_date, LAST_AR_FILED_DT as last_ar_filed_date, LAST_AGM_DATE,
corp_op_state.full_desc as state, corp_state.state_typ_cd as corp_state,
t_name.corp_nme as legal_name,
t_assumed_name.CORP_NME as assumed_name, RECOGNITION_DTS as founding_date,
BN_15 as business_number, CAN_JUR_TYP_CD, OTHR_JURIS_DESC
from CORPORATION corp
left join CORP_NAME t_name on t_name.corp_num = corp.corp_num and t_name.CORP_NAME_TYP_CD='CO'
AND t_name.END_EVENT_ID is null
left join CORP_NAME t_assumed_name on t_assumed_name.corp_num = corp.corp_num
and t_assumed_name.CORP_NAME_TYP_CD='AS' AND t_assumed_name.END_EVENT_ID is null
join CORP_STATE on CORP_STATE.corp_num = corp.corp_num and CORP_STATE.end_event_id is null
join CORP_OP_STATE on CORP_OP_STATE.state_typ_cd = CORP_STATE.state_typ_cd
left join JURISDICTION on JURISDICTION.corp_num = corp.corp_num
join event on corp.corp_num = event.corp_num
left join filing on event.event_id = filing.event_id and filing.filing_typ_cd = 'OTANN'
where corp_typ_cd = 'CP'
and corp.CORP_NUM=:corp_num
order by last_ar_date desc nulls last""", corp_num=identifier)
business = cursor.fetchone()
if not business:
raise BusinessNotFoundException(identifier=identifier)
# add column names to resultset to build out correct json structure and make manipulation below more robust
# (better than column numbers)
business = dict(zip([x[0].lower() for x in cursor.description], business))
# get last ledger date from EVENT table and add to business record
# note - FILE event type is correct for new filings; CONVOTHER is for events/filings pulled over from COBRS
# during initial data import for Coops.
cursor.execute("""
select max(EVENT_TIMESTMP) as last_ledger_timestamp from EVENT
where EVENT_TYP_CD in('FILE', 'CONVOTHER') and CORP_NUM = '{}'""".format(identifier))
last_ledger_timestamp = cursor.fetchone()[0]
business['last_ledger_timestamp'] = last_ledger_timestamp
# if this is an XPRO, get correct jurisdiction; otherwise, it's BC
if business['type'] == 'XCP':
if business['can_jur_typ_cd'] == 'OT':
business['jurisdiction'] = business['othr_juris_desc']
else:
business['jurisdiction'] = business['can_jur_typ_cd']
else:
business['jurisdiction'] = 'BC'
# set name
if business['assumed_name']:
business['legal_name'] = business['assumed_name']
# set status - In Good Standing if certain criteria met, otherwise use original value
if business['state'] == 'Active' and \
business['last_ar_filed_date'] is not None and \
isinstance(business['last_ar_filed_date'], datetime) and \
business['last_agm_date'] is not None and isinstance(business['last_agm_date'], datetime):
if business['last_ar_filed_date'] > business['last_agm_date']:
business['status'] = 'In Good Standing'
else:
business['status'] = business['state']
else:
business['status'] = business['state']
# convert dates and date-times to correct json format and convert to camel case for schema names
business['foundingDate'] = convert_to_json_datetime(business['founding_date'])
business['lastAgmDate'] = convert_to_json_date(business['last_agm_date'])
business['lastArDate'] = convert_to_json_date(business['last_ar_date']) if business['last_ar_date'] \
else business['lastAgmDate']
business['lastLedgerTimestamp'] = convert_to_json_datetime(business['last_ledger_timestamp'])
business['businessNumber'] = business['business_number']
business['corpState'] = business['corp_state']
business['legalName'] = business['legal_name']
business['legalType'] = business['type']
# remove unnecessary fields (
del business['can_jur_typ_cd']
del business['othr_juris_desc']
del business['assumed_name']
del business['state']
del business['business_number']
del business['corp_frozen_typ_cd']
del business['corp_state']
del business['founding_date']
del business['last_agm_date']
del business['last_ar_filed_date']
del business['last_ledger_timestamp']
del business['legal_name']
del business['type']
del business['last_ar_date']
# add cache_id todo: set to real value
business['cacheId'] = 0
# convert to Business object
business_obj = Business()
business_obj.business = business
return business_obj
except Exception as err:
# general catch-all exception
current_app.logger.error(err.with_traceback(None))
# pass through exception to caller
raise err
@classmethod
def update_corporation(cls, cursor, corp_num: str = None, date: str = None, annual_report: bool = False):
"""Update corporation record.
:param cursor: oracle cursor
:param corp_num: (str) corporation number
:param date: (str) last agm date
:param annual_report: (bool) whether or not this was an annual report
"""
try:
if annual_report:
if date:
cursor.execute("""
UPDATE corporation
SET
LAST_AR_FILED_DT = sysdate,
LAST_AGM_DATE = TO_DATE(:agm_date, 'YYYY-mm-dd'),
LAST_LEDGER_DT = sysdate
WHERE corp_num = :corp_num
""",
agm_date=date,
corp_num=corp_num
)
else:
cursor.execute("""
UPDATE corporation
SET
LAST_AR_FILED_DT = sysdate,
LAST_LEDGER_DT = sysdate
WHERE corp_num = :corp_num
""",
corp_num=corp_num
)
else:
cursor.execute("""
UPDATE corporation
SET
LAST_LEDGER_DT = sysdate
WHERE corp_num = :corp_num
""",
corp_num=corp_num
)
except Exception as err:
current_app.logger.error(err.with_traceback(None))
raise err
@classmethod
def update_corp_state(cls, cursor, event_id, corp_num, state='ACT'):
"""Update corporation state.
End previous corp_state record (end event id) and and create new corp_state record.
:param cursor: oracle cursor
:param event_id: (int) event id for corresponding event
:param corp_num: (str) corporation number
:param state: (str) state of corporation
"""
try:
cursor.execute("""
UPDATE corp_state
SET end_event_id = :event_id
WHERE corp_num = :corp_num and end_event_id is NULL
""",
event_id=event_id,
corp_num=corp_num
)
except Exception as err:
current_app.logger.error(err.with_traceback(None))
raise err
try:
cursor.execute("""
INSERT INTO corp_state (corp_num, start_event_id, state_typ_cd)
VALUES (:corp_num, :event_id, :state)
""",
event_id=event_id,
corp_num=corp_num,
state=state
)
except Exception as err:
current_app.logger.error(err.with_traceback(None))
raise err
@classmethod
def reset_corporations(cls, cursor, event_info: list, event_ids: list):
"""Reset the corporations to what they were before the given events."""
if len(event_info) < 1:
return
dates_by_corp_num = cls._get_last_ar_dates_for_reset(cursor=cursor, event_info=event_info, event_ids=event_ids)
for item in dates_by_corp_num:
try:
cursor.execute("""
UPDATE corporation
SET
LAST_AR_FILED_DT = :ar_filed_date,
LAST_AGM_DATE = :agm_date,
LAST_LEDGER_DT = :event_date
WHERE corp_num = :corp_num
""",
agm_date=item['agm_date'] if item['agm_date'] else item['ar_date'],
ar_filed_date=item['ar_filed_date'],
event_date=item['event_date'],
corp_num=item['corp_num']
)
except Exception as err:
current_app.logger.error(f'Error in Business: Failed to reset corporation for {item['corp_num']}')
raise err
@classmethod
def reset_corp_states(cls, cursor, event_ids: list):
"""Reset the corp states to what they were before the given events."""
if len(event_ids) < 1:
return
# delete corp_state rows created on these events
try:
cursor.execute(f"""
DELETE FROM corp_state
WHERE start_event_id in ({stringify_list(event_ids)})
""")
except Exception as err:
current_app.logger.error(f'Error in Business: Failed delete corp_state rows for events {event_ids}')
raise err
# reset corp_state rows ended on these events
try:
cursor.execute(f"""
UPDATE corp_state
SET end_event_id = null
WHERE end_event_id in ({stringify_list(event_ids)})
""")
except Exception as err:
current_app.logger.error(f'Error in Business: Failed reset ended corp_state rows for events {event_ids}')
raise err
| # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Meta information about the service.
Currently this only provides API versioning information
"""
from datetime import datetime
from flask import current_app
from colin_api.exceptions import BusinessNotFoundException
from colin_api.resources.db import DB
from colin_api.utils import convert_to_json_date, convert_to_json_datetime, stringify_list
class Business:
"""Class to contain all model-like functions such as getting and setting from database."""
business = None
def __init__(self):
"""Initialize with all values None."""
def get_corp_num(self):
"""Get corporation number, aka identifier."""
return self.business['identifier']
def as_dict(self):
"""Return dict version of self."""
return {
'business': self.business
}
@classmethod
def _get_last_ar_dates_for_reset(cls, cursor, event_info: list, event_ids: list):
"""Get the previous AR/AGM dates."""
events_by_corp_num = {}
for info in event_info:
if info['corp_num'] not in events_by_corp_num or events_by_corp_num[info['corp_num']] > info['event_id']:
events_by_corp_num[info['corp_num']] = info['event_id']
dates_by_corp_num = []
for corp_num in events_by_corp_num:
cursor.execute(f"""
SELECT event.corp_num, event.event_timestmp, filing.period_end_dt, filing.agm_date, filing.filing_typ_cd
FROM event
JOIN filing on filing.event_id = event.event_id
WHERE event.event_id not in ({stringify_list(event_ids)}) AND event.corp_num=:corp_num
ORDER BY event.event_timestmp desc
""",
corp_num=corp_num
)
dates = {'corp_num': corp_num}
for row in cursor.fetchall():
row = dict(zip([x[0].lower() for x in cursor.description], row))
if 'event_date' not in dates or dates['event_date'] < row['event_timestmp']:
dates['event_date'] = row['event_timestmp']
# set ar_date to closest period_end_dt.
# this is not always the first one that gets returned if 2 were filed on the same day
if row['period_end_dt'] and ('ar_date' not in dates or dates['ar_date'] < row['period_end_dt']):
dates['ar_date'] = row['period_end_dt']
dates['ar_filed_date'] = row['event_timestmp']
# this may be different than ar_date if the last ar had no agm
if row['agm_date'] and ('agm_date' not in dates or dates['agm_date'] < row['agm_date']):
dates['agm_date'] = row['agm_date']
# if there are no ARs for this coop then use date of incorporation
if row['filing_typ_cd'] == 'OTINC' and 'agm_date' not in dates:
dates['agm_date'] = row['event_timestmp']
dates['ar_filed_date'] = row['event_timestmp']
dates_by_corp_num.append(dates)
return dates_by_corp_num
@classmethod
def find_by_identifier(cls, identifier: str = None): # pylint: disable=too-many-statements;
"""Return a Business by identifier."""
business = None
if not identifier:
return None
try:
# get record
cursor = DB.connection.cursor()
cursor.execute("""
select corp.CORP_NUM as identifier, CORP_FROZEN_TYP_CD, corp_typ_cd type,
filing.period_end_dt as last_ar_date, LAST_AR_FILED_DT as last_ar_filed_date, LAST_AGM_DATE,
corp_op_state.full_desc as state, corp_state.state_typ_cd as corp_state,
t_name.corp_nme as legal_name,
t_assumed_name.CORP_NME as assumed_name, RECOGNITION_DTS as founding_date,
BN_15 as business_number, CAN_JUR_TYP_CD, OTHR_JURIS_DESC
from CORPORATION corp
left join CORP_NAME t_name on t_name.corp_num = corp.corp_num and t_name.CORP_NAME_TYP_CD='CO'
AND t_name.END_EVENT_ID is null
left join CORP_NAME t_assumed_name on t_assumed_name.corp_num = corp.corp_num
and t_assumed_name.CORP_NAME_TYP_CD='AS' AND t_assumed_name.END_EVENT_ID is null
join CORP_STATE on CORP_STATE.corp_num = corp.corp_num and CORP_STATE.end_event_id is null
join CORP_OP_STATE on CORP_OP_STATE.state_typ_cd = CORP_STATE.state_typ_cd
left join JURISDICTION on JURISDICTION.corp_num = corp.corp_num
join event on corp.corp_num = event.corp_num
left join filing on event.event_id = filing.event_id and filing.filing_typ_cd = 'OTANN'
where corp_typ_cd = 'CP'
and corp.CORP_NUM=:corp_num
order by last_ar_date desc nulls last""", corp_num=identifier)
business = cursor.fetchone()
if not business:
raise BusinessNotFoundException(identifier=identifier)
# add column names to resultset to build out correct json structure and make manipulation below more robust
# (better than column numbers)
business = dict(zip([x[0].lower() for x in cursor.description], business))
# get last ledger date from EVENT table and add to business record
# note - FILE event type is correct for new filings; CONVOTHER is for events/filings pulled over from COBRS
# during initial data import for Coops.
cursor.execute("""
select max(EVENT_TIMESTMP) as last_ledger_timestamp from EVENT
where EVENT_TYP_CD in('FILE', 'CONVOTHER') and CORP_NUM = '{}'""".format(identifier))
last_ledger_timestamp = cursor.fetchone()[0]
business['last_ledger_timestamp'] = last_ledger_timestamp
# if this is an XPRO, get correct jurisdiction; otherwise, it's BC
if business['type'] == 'XCP':
if business['can_jur_typ_cd'] == 'OT':
business['jurisdiction'] = business['othr_juris_desc']
else:
business['jurisdiction'] = business['can_jur_typ_cd']
else:
business['jurisdiction'] = 'BC'
# set name
if business['assumed_name']:
business['legal_name'] = business['assumed_name']
# set status - In Good Standing if certain criteria met, otherwise use original value
if business['state'] == 'Active' and \
business['last_ar_filed_date'] is not None and \
isinstance(business['last_ar_filed_date'], datetime) and \
business['last_agm_date'] is not None and isinstance(business['last_agm_date'], datetime):
if business['last_ar_filed_date'] > business['last_agm_date']:
business['status'] = 'In Good Standing'
else:
business['status'] = business['state']
else:
business['status'] = business['state']
# convert dates and date-times to correct json format and convert to camel case for schema names
business['foundingDate'] = convert_to_json_datetime(business['founding_date'])
business['lastAgmDate'] = convert_to_json_date(business['last_agm_date'])
business['lastArDate'] = convert_to_json_date(business['last_ar_date']) if business['last_ar_date'] \
else business['lastAgmDate']
business['lastLedgerTimestamp'] = convert_to_json_datetime(business['last_ledger_timestamp'])
business['businessNumber'] = business['business_number']
business['corpState'] = business['corp_state']
business['legalName'] = business['legal_name']
business['legalType'] = business['type']
# remove unnecessary fields (
del business['can_jur_typ_cd']
del business['othr_juris_desc']
del business['assumed_name']
del business['state']
del business['business_number']
del business['corp_frozen_typ_cd']
del business['corp_state']
del business['founding_date']
del business['last_agm_date']
del business['last_ar_filed_date']
del business['last_ledger_timestamp']
del business['legal_name']
del business['type']
del business['last_ar_date']
# add cache_id todo: set to real value
business['cacheId'] = 0
# convert to Business object
business_obj = Business()
business_obj.business = business
return business_obj
except Exception as err:
# general catch-all exception
current_app.logger.error(err.with_traceback(None))
# pass through exception to caller
raise err
@classmethod
def update_corporation(cls, cursor, corp_num: str = None, date: str = None, annual_report: bool = False):
"""Update corporation record.
:param cursor: oracle cursor
:param corp_num: (str) corporation number
:param date: (str) last agm date
:param annual_report: (bool) whether or not this was an annual report
"""
try:
if annual_report:
if date:
cursor.execute("""
UPDATE corporation
SET
LAST_AR_FILED_DT = sysdate,
LAST_AGM_DATE = TO_DATE(:agm_date, 'YYYY-mm-dd'),
LAST_LEDGER_DT = sysdate
WHERE corp_num = :corp_num
""",
agm_date=date,
corp_num=corp_num
)
else:
cursor.execute("""
UPDATE corporation
SET
LAST_AR_FILED_DT = sysdate,
LAST_LEDGER_DT = sysdate
WHERE corp_num = :corp_num
""",
corp_num=corp_num
)
else:
cursor.execute("""
UPDATE corporation
SET
LAST_LEDGER_DT = sysdate
WHERE corp_num = :corp_num
""",
corp_num=corp_num
)
except Exception as err:
current_app.logger.error(err.with_traceback(None))
raise err
@classmethod
def update_corp_state(cls, cursor, event_id, corp_num, state='ACT'):
"""Update corporation state.
End previous corp_state record (end event id) and and create new corp_state record.
:param cursor: oracle cursor
:param event_id: (int) event id for corresponding event
:param corp_num: (str) corporation number
:param state: (str) state of corporation
"""
try:
cursor.execute("""
UPDATE corp_state
SET end_event_id = :event_id
WHERE corp_num = :corp_num and end_event_id is NULL
""",
event_id=event_id,
corp_num=corp_num
)
except Exception as err:
current_app.logger.error(err.with_traceback(None))
raise err
try:
cursor.execute("""
INSERT INTO corp_state (corp_num, start_event_id, state_typ_cd)
VALUES (:corp_num, :event_id, :state)
""",
event_id=event_id,
corp_num=corp_num,
state=state
)
except Exception as err:
current_app.logger.error(err.with_traceback(None))
raise err
@classmethod
def reset_corporations(cls, cursor, event_info: list, event_ids: list):
"""Reset the corporations to what they were before the given events."""
if len(event_info) < 1:
return
dates_by_corp_num = cls._get_last_ar_dates_for_reset(cursor=cursor, event_info=event_info, event_ids=event_ids)
for item in dates_by_corp_num:
try:
cursor.execute("""
UPDATE corporation
SET
LAST_AR_FILED_DT = :ar_filed_date,
LAST_AGM_DATE = :agm_date,
LAST_LEDGER_DT = :event_date
WHERE corp_num = :corp_num
""",
agm_date=item['agm_date'] if item['agm_date'] else item['ar_date'],
ar_filed_date=item['ar_filed_date'],
event_date=item['event_date'],
corp_num=item['corp_num']
)
except Exception as err:
current_app.logger.error(f'Error in Business: Failed to reset corporation for {item["corp_num"]}')
raise err
@classmethod
def reset_corp_states(cls, cursor, event_ids: list):
"""Reset the corp states to what they were before the given events."""
if len(event_ids) < 1:
return
# delete corp_state rows created on these events
try:
cursor.execute(f"""
DELETE FROM corp_state
WHERE start_event_id in ({stringify_list(event_ids)})
""")
except Exception as err:
current_app.logger.error(f'Error in Business: Failed delete corp_state rows for events {event_ids}')
raise err
# reset corp_state rows ended on these events
try:
cursor.execute(f"""
UPDATE corp_state
SET end_event_id = null
WHERE end_event_id in ({stringify_list(event_ids)})
""")
except Exception as err:
current_app.logger.error(f'Error in Business: Failed reset ended corp_state rows for events {event_ids}')
raise err
|
import datetime
from pathlib import Path
import numpy as np
import scipy.io
class CameraCalibration(object):
"""Camera calibration saved in .mat file and method to assemble Projective (P) martrix.
Notes:
- Inspired by example code + notes from CiRC which are derived from Hartley and Zisserman (20030.)
- Asssumes calibration saved in .mat file
Args:
calibration_file (str): Path to camera calibration file.
Attributes:
fname (str): Name of camera calibration file
serial_number (int): Camera serial number
camera_number (str): Camera number (e.g. 'c5')
calibration_date (datetime): Date of camera calibration
coordinate_system (str): Coordinate system used for calibration (e.g. 'xyz')
beta (np.ndarray): Camera extrinsic calibration
x (across shore), y (longshore), z (vertical), azimuth, tilt, roll
lcp (dict): Lens Calibration Profile structure, the intrinsic camera calibration
P (np.ndarray): Matrix containing intrinsic and extrinsic calibration
"""
def __init__(self, calibration_file):
calibration_file = Path(calibration_file)
self.fname = calibration_file.name
sn, cn, dc, cs, _ = self.fname.split('_')
self.serial_number = int(sn)
self.camera_number = cn
self.calibration_date = datetime.datetime.strptime(dc, '%Y%m%d')
self.coordinate_system = cs
mat_data = scipy.io.loadmat(calibration_file)
self.beta = mat_data['beta'][0]
self.lcp = self._load_lcp(mat_data['lcp'])
self.P = self._assembleP()
def _load_lcp(self, lcp):
"""Return dict of lcp from lcp loaded from mat file"""
NU = lcp[0, 0][0][0][0]
NV = lcp[0, 0][1][0][0]
c0U = lcp[0, 0][2][0][0]
c0V = lcp[0, 0][3][0][0]
fx = lcp[0, 0][4][0][0]
fy = lcp[0, 0][5][0][0]
d1 = lcp[0, 0][6][0][0]
d2 = lcp[0, 0][7][0][0]
d3 = lcp[0, 0][8][0][0]
t1 = lcp[0, 0][9][0][0]
t2 = lcp[0, 0][10][0][0]
r = lcp[0, 0][11][0, :]
caltech_fname = lcp[0, 0][12][0]
fr = lcp[0, 0][13][0, :]
x = lcp[0, 0][14][0, :]
y = lcp[0, 0][15][0, :]
dx = lcp[0, 0][16][:, :]
dy = lcp[0, 0][17][:, :]
return {
'NU': NU,
'NV': NV,
'c0U': c0U,
'c0V': c0V,
'fx': fx,
'fy': fy,
'd1': d1,
'd2': d2,
'd3': d3,
't1': t1,
't2': t2,
'r': r,
'fr': fr,
'caltech_fname': caltech_fname,
'x': x,
'y': y,
'dx': dx,
'dy': dy
}
def __repr__(self):
msg = (
f'serial_number: {self.serial_number}\n'
f'camera_number: {self.camera_number}\n'
f'calibration_date: {self.calibration_date}\n'
f'coordinate_system: {self.coordinate_system}\n'
f'beta: {self.beta}\n'
f"sum of lcp r: {np.nansum(self.lcp["r"])}"
)
return msg
def __str__(self):
msg = (
f'serial_number: {self.serial_number}, '
f'camera_number: {self.camera_number}, '
f'calibration_date: {self.calibration_date}, '
f'coordinate_system: {self.coordinate_system}'
)
return msg
def _assembleP(self):
"""Assembles and returns Projective (P) matrix from LCP and Beta values.
Notes:
- Derived from lcpBeta2P.m + CiRN notes
- K converts angle away from the center of view into camera coordinates
- R describes the 3D viewing direction of camera compared to world coordinates
- beta[:3] camera location in world coordinates (x,y,z)
- beta[3::] camera orientation (azimuth, tilt, roll)
Returns:
P (np.ndarray): Projective matrix
"""
# K: intrinsic matrix, puts image in pixel units of the specific camera
K = np.array([
[self.lcp['fx'], 0, self.lcp['c0U']],
[0, -self.lcp['fy'], self.lcp['c0V']],
[0, 0, 1]
])
# R: rotation matrix, puts image in camera orientation
R = angle2R(
self.beta[3],
self.beta[4],
self.beta[5]
)
# I: identify matrix augmented by camera center, puts image in camera coordinates
IC = np.vstack((
np.eye(3),
-self.beta[:3]
)).T
KR = np.matmul(K, R)
P = np.matmul(KR, IC)
# Make the matrix homogenous, methods use homogenous coordinates for easier math
# - normalize to make last element equal 1
P = P/P[-1, -1]
return P
def angle2R(azimuth, tilt, swing):
"""Assembles and returns a rotation matrix R from azimuth, tilt, and swing (roll)
Notes:
- derived from angles2R.m by Costal Imaging Research Network and Oregon State University
- From p 612 of Wolf, 1983
Arguments:
azimuth (float): Azimuth
tilt (float): Tilt
swith (float): swing
Returns:
R (np.ndarray): Rotation matrix
"""
a = azimuth
t = tilt
s = swing
R = np.zeros((3, 3))
R[0, 0] = np.cos(a) * np.cos(s) + np.sin(a) * np.cos(t) * np.sin(s)
R[0, 1] = -np.cos(s) * np.sin(a) + np.sin(s) * np.cos(t) * np.cos(a)
R[0, 2] = np.sin(s) * np.sin(t)
R[1, 0] = -np.sin(s) * np.cos(a) + np.cos(s) * np.cos(t) * np.sin(a)
R[1, 1] = np.sin(s) * np.sin(a) + np.cos(s) * np.cos(t) * np.cos(a)
R[1, 2] = np.cos(s) * np.sin(t)
R[2, 0] = np.sin(t) * np.sin(a)
R[2, 1] = np.sin(t) * np.cos(a)
R[2, 2] = -np.cos(t)
return R
| import datetime
from pathlib import Path
import numpy as np
import scipy.io
class CameraCalibration(object):
"""Camera calibration saved in .mat file and method to assemble Projective (P) martrix.
Notes:
- Inspired by example code + notes from CiRC which are derived from Hartley and Zisserman (20030.)
- Asssumes calibration saved in .mat file
Args:
calibration_file (str): Path to camera calibration file.
Attributes:
fname (str): Name of camera calibration file
serial_number (int): Camera serial number
camera_number (str): Camera number (e.g. 'c5')
calibration_date (datetime): Date of camera calibration
coordinate_system (str): Coordinate system used for calibration (e.g. 'xyz')
beta (np.ndarray): Camera extrinsic calibration
x (across shore), y (longshore), z (vertical), azimuth, tilt, roll
lcp (dict): Lens Calibration Profile structure, the intrinsic camera calibration
P (np.ndarray): Matrix containing intrinsic and extrinsic calibration
"""
def __init__(self, calibration_file):
calibration_file = Path(calibration_file)
self.fname = calibration_file.name
sn, cn, dc, cs, _ = self.fname.split('_')
self.serial_number = int(sn)
self.camera_number = cn
self.calibration_date = datetime.datetime.strptime(dc, '%Y%m%d')
self.coordinate_system = cs
mat_data = scipy.io.loadmat(calibration_file)
self.beta = mat_data['beta'][0]
self.lcp = self._load_lcp(mat_data['lcp'])
self.P = self._assembleP()
def _load_lcp(self, lcp):
"""Return dict of lcp from lcp loaded from mat file"""
NU = lcp[0, 0][0][0][0]
NV = lcp[0, 0][1][0][0]
c0U = lcp[0, 0][2][0][0]
c0V = lcp[0, 0][3][0][0]
fx = lcp[0, 0][4][0][0]
fy = lcp[0, 0][5][0][0]
d1 = lcp[0, 0][6][0][0]
d2 = lcp[0, 0][7][0][0]
d3 = lcp[0, 0][8][0][0]
t1 = lcp[0, 0][9][0][0]
t2 = lcp[0, 0][10][0][0]
r = lcp[0, 0][11][0, :]
caltech_fname = lcp[0, 0][12][0]
fr = lcp[0, 0][13][0, :]
x = lcp[0, 0][14][0, :]
y = lcp[0, 0][15][0, :]
dx = lcp[0, 0][16][:, :]
dy = lcp[0, 0][17][:, :]
return {
'NU': NU,
'NV': NV,
'c0U': c0U,
'c0V': c0V,
'fx': fx,
'fy': fy,
'd1': d1,
'd2': d2,
'd3': d3,
't1': t1,
't2': t2,
'r': r,
'fr': fr,
'caltech_fname': caltech_fname,
'x': x,
'y': y,
'dx': dx,
'dy': dy
}
def __repr__(self):
msg = (
f'serial_number: {self.serial_number}\n'
f'camera_number: {self.camera_number}\n'
f'calibration_date: {self.calibration_date}\n'
f'coordinate_system: {self.coordinate_system}\n'
f'beta: {self.beta}\n'
f"sum of lcp r: {np.nansum(self.lcp['r'])}"
)
return msg
def __str__(self):
msg = (
f'serial_number: {self.serial_number}, '
f'camera_number: {self.camera_number}, '
f'calibration_date: {self.calibration_date}, '
f'coordinate_system: {self.coordinate_system}'
)
return msg
def _assembleP(self):
"""Assembles and returns Projective (P) matrix from LCP and Beta values.
Notes:
- Derived from lcpBeta2P.m + CiRN notes
- K converts angle away from the center of view into camera coordinates
- R describes the 3D viewing direction of camera compared to world coordinates
- beta[:3] camera location in world coordinates (x,y,z)
- beta[3::] camera orientation (azimuth, tilt, roll)
Returns:
P (np.ndarray): Projective matrix
"""
# K: intrinsic matrix, puts image in pixel units of the specific camera
K = np.array([
[self.lcp['fx'], 0, self.lcp['c0U']],
[0, -self.lcp['fy'], self.lcp['c0V']],
[0, 0, 1]
])
# R: rotation matrix, puts image in camera orientation
R = angle2R(
self.beta[3],
self.beta[4],
self.beta[5]
)
# I: identify matrix augmented by camera center, puts image in camera coordinates
IC = np.vstack((
np.eye(3),
-self.beta[:3]
)).T
KR = np.matmul(K, R)
P = np.matmul(KR, IC)
# Make the matrix homogenous, methods use homogenous coordinates for easier math
# - normalize to make last element equal 1
P = P/P[-1, -1]
return P
def angle2R(azimuth, tilt, swing):
"""Assembles and returns a rotation matrix R from azimuth, tilt, and swing (roll)
Notes:
- derived from angles2R.m by Costal Imaging Research Network and Oregon State University
- From p 612 of Wolf, 1983
Arguments:
azimuth (float): Azimuth
tilt (float): Tilt
swith (float): swing
Returns:
R (np.ndarray): Rotation matrix
"""
a = azimuth
t = tilt
s = swing
R = np.zeros((3, 3))
R[0, 0] = np.cos(a) * np.cos(s) + np.sin(a) * np.cos(t) * np.sin(s)
R[0, 1] = -np.cos(s) * np.sin(a) + np.sin(s) * np.cos(t) * np.cos(a)
R[0, 2] = np.sin(s) * np.sin(t)
R[1, 0] = -np.sin(s) * np.cos(a) + np.cos(s) * np.cos(t) * np.sin(a)
R[1, 1] = np.sin(s) * np.sin(a) + np.cos(s) * np.cos(t) * np.cos(a)
R[1, 2] = np.cos(s) * np.sin(t)
R[2, 0] = np.sin(t) * np.sin(a)
R[2, 1] = np.sin(t) * np.cos(a)
R[2, 2] = -np.cos(t)
return R
|
# -*- coding: utf-8 -*-
import datetime
import pytest
from flask import url_for
from flask_login import current_user
from scout.server.extensions import store
TEST_SUBPANEL = dict(
title="Subp title",
subtitle="Subp subtitle",
created=datetime.datetime.now(),
updated=datetime.datetime.now(),
)
def test_advanced_phenotypes_POST(app, user_obj, institute_obj):
"""Test the view showing the available phenotype models for an institute, after sending POST request with new phenotype model data"""
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
form_data = dict(model_name="A test model", model_desc="Test model description")
# WHEN user creates a new phenotype model using the phenomodel page
resp = client.post(
url_for(
"overview.advanced_phenotypes",
institute_id=institute_obj["internal_id"],
),
data=form_data,
)
assert resp.status_code == 200
# THEN the new model should be visible in the page
assert form_data["model_name"] in str(resp.data)
def test_remove_phenomodel(app, user_obj, institute_obj, mocker, mock_redirect):
"""Testing the endpoint to remove an existing phenotype model for an institute"""
mocker.patch("scout.server.blueprints.institutes.views.redirect", return_value=mock_redirect)
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
assert model_obj
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
form_data = {"model_id": model_obj["_id"]}
# WHEN the user removes the model via the remove_phenomodel endpoint
resp = client.post(
url_for("overview.remove_phenomodel", institute_id=institute_obj["internal_id"]),
data=form_data,
)
# THEN the phenotype model should be deleted from the database
assert store.phenomodel_collection.find_one() is None
def test_phenomodel_GET(app, user_obj, institute_obj):
"""test the phenomodel page endpoint, GET request"""
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
# THEN the phenomodel endpoint should shown phenotype model info
resp = client.get(
url_for(
"overview.phenomodel",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
)
)
assert "Test model" in str(resp.data)
def test_phenomodel_lock(app, user_obj, institute_obj, mocker, mock_redirect):
"""Test the endpoint to lock a phenomodel and make it editable only by admins"""
mocker.patch("scout.server.blueprints.institutes.views.redirect", return_value=mock_redirect)
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model = store.phenomodel_collection.find_one()
assert "admins" not in model
admins = ["user1@email", "user2@email"]
# GIVEN an initialized app
with app.test_client() as client:
# GIVEN that the user could be logged in
client.get(url_for("auto_login"))
# WHEN the user locks model up using two admins
form_data = dict(model_id=model["_id"], lock="", user_admins=admins)
resp = client.post(
url_for("overview.lock_phenomodel"),
data=form_data,
)
# Then the page should redirect
assert resp.status_code == 302
# And current user + admins emails will be registered as the emails of the admins
locked_model = store.phenomodel_collection.find_one()
assert locked_model["admins"] == [current_user.email] + admins
def test_phenomodel_unlock(app, user_obj, institute_obj, mocker, mock_redirect):
"""Test the endpoint to unlock a phenomodel and make it editable only by all users"""
mocker.patch("scout.server.blueprints.institutes.views.redirect", return_value=mock_redirect)
# GIVEN an institute with phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model = store.phenomodel_collection.find_one()
# GIVEN an initialized app
with app.test_client() as client:
# GIVEN that the user could be logged in
client.get(url_for("auto_login"))
# Given that the phenomodel is locked and current user is admin
model["admins"] = [current_user.email]
store.update_phenomodel(model["_id"], model)
locked_model = store.phenomodel_collection.find_one()
assert locked_model["admins"] == [current_user.email]
# When the test_phenomodel_lock endpoint is used to unlock the model
form_data = dict(
model_id=model["_id"],
)
resp = client.post(
url_for("overview.lock_phenomodel"),
data=form_data,
)
# Then the page should redirect
assert resp.status_code == 302
# And the model will have no admins
unlocked_model = store.phenomodel_collection.find_one()
assert unlocked_model["admins"] == []
def test_phenomodel_POST_rename_model(app, user_obj, institute_obj):
"""Test the phenomodel endpoing, POST request for updating model info"""
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Old model", "Old description")
model_obj = store.phenomodel_collection.find_one()
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
# WHEN the user updates model info using a POST request
form_data = dict(
update_model="update", model_name="New model", model_desc="New description"
)
resp = client.post(
url_for(
"overview.phenomodel",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# THEN the model in the database should be updated
updated_model = store.phenomodel_collection.find_one()
assert updated_model["name"] == "New model"
def test_phenomodel_POST_add_delete_subpanel(app, user_obj, institute_obj):
"""Test the phenomodel endpoint, by sending requests for adding and deleting a subpanel"""
# GIVEN an institute with a phenotype model having no subpanels
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
assert model_obj["subpanels"] == {}
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
form_data = dict(
title="Phenotype subpanel title",
subtitle="Phenotype subpanel subtitle",
add_subpanel="Save phenotype subpanel",
)
# WHEN the user creates subpanel in phenotype model via POST request
resp = client.post(
url_for(
"overview.phenomodel",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# Then the subpanel dictionary should be added to model subpanels
updated_model = store.phenomodel_collection.find_one()
subpanel_id = list(updated_model["subpanels"].keys())[0]
assert updated_model["subpanels"][subpanel_id]["title"] == "Phenotype subpanel title"
assert updated_model["subpanels"][subpanel_id]["subtitle"] == "Phenotype subpanel subtitle"
# WHEN the user sends a POST request to remove the subpanel
form_data = dict(subpanel_delete=subpanel_id)
resp = client.post(
url_for(
"overview.phenomodel",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# THEN the model should be removed from models subpanels
updated_model = store.phenomodel_collection.find_one()
assert updated_model["subpanels"] == {}
def test_phenomodel_POST_add_omim_checkbox_to_subpanel(app, user_obj, institute_obj, omim_checkbox):
"""Test adding an OMIM checkbox to a subpanel of a phenotype model via POST request"""
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
# containing a subpanel
model_obj["subpanels"] = {"subpanel_x": TEST_SUBPANEL}
store.update_phenomodel(model_obj["_id"], model_obj)
model_obj = store.phenomodel_collection.find_one()
assert model_obj["subpanels"]["subpanel_x"]
# GIVEN that database contains the HPO term to add to the subopanel
store.disease_term_collection.insert_one(omim_checkbox)
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
# WHEN the user creates an OMIM checkbox using the endpoint
form_data = dict(
omim_subpanel_id="subpanel_x",
omimHasTitle="on",
omimTermTitle="Title for term",
omim_term=" | ".join([omim_checkbox["_id"], omim_checkbox["description"]]),
omim_custom_name="Alternative OMIM name",
add_omim="",
)
resp = client.post(
url_for(
"overview.checkbox_edit",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# THEN the term should have been added to the subpanel checkboxe
updated_model = store.phenomodel_collection.find_one()
checkbox = updated_model["subpanels"]["subpanel_x"]["checkboxes"]["OMIM:121210"]
assert checkbox["name"] == "OMIM:121210"
assert checkbox["checkbox_type"] == "omim"
assert checkbox["description"] == "Febrile seizures familial 1"
assert checkbox["term_title"] == form_data["omimTermTitle"]
assert checkbox["custom_name"] == form_data["omim_custom_name"]
def test_phenomodel_POST_add_hpo_checkbox_to_subpanel(app, user_obj, institute_obj, hpo_checkboxes):
"""Test adding an HPO checkbox with its children to a subpanel of a phenotype model via POST request"""
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
# containing a subpanel
model_obj["subpanels"] = {"subpanel_x": TEST_SUBPANEL}
store.update_phenomodel(model_obj["_id"], model_obj)
# GIVEN a database with the required HPO terms (one parent term and one child term)
store.hpo_term_collection.insert_many(hpo_checkboxes)
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
# WHEN the user creates an HPO checkbox using the endpoint
form_data = dict(
hpo_subpanel_id="subpanel_x",
hpoHasTitle="on",
hpoTermTitle="Title for term",
hpo_term=" | ".join([hpo_checkboxes[0]["_id"], hpo_checkboxes[0]["description"]]),
hpo_custom_name="Alternative HPO name",
add_hpo="",
includeChildren="on",
)
resp = client.post(
url_for(
"overview.checkbox_edit",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# THEN the term should have been added to the subpanel checkboxes
updated_model = store.phenomodel_collection.find_one()
checkbox = updated_model["subpanels"]["subpanel_x"]["checkboxes"]["HP:0025190"]
assert checkbox["name"] == "HP:0025190"
assert checkbox["checkbox_type"] == "hpo"
assert checkbox["description"] == "Bilateral tonic-clonic seizure with generalized onset"
assert checkbox["term_title"] == form_data["hpoTermTitle"]
assert checkbox["custom_name"] == form_data["hpo_custom_name"]
# Additionally, the HPO term checkbox should contain a nested HPO term:
nested_hpo_term = {
"name": hpo_checkboxes[1]["_id"],
"description": hpo_checkboxes[1]["description"],
}
assert checkbox["children"] == [nested_hpo_term]
def test_phenomodel_POST_remove_subpanel_checkbox(app, user_obj, institute_obj):
"""Test removing a single checkbox from a phenotype model subpanel"""
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
# containing a subpanel with a checkbox
TEST_SUBPANEL["checkboxes"] = {"HP:000001": {"name": "HP:000001"}}
model_obj["subpanels"] = {"subpanel_x": TEST_SUBPANEL}
store.update_phenomodel(model_obj["_id"], model_obj)
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
# WHEN the user removes the checkbox using the endpoint, POST request
form_data = dict(checkgroup_remove="#".join(["HP:000001", "subpanel_x"]))
resp = client.post(
url_for(
"overview.checkbox_edit",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# THEN the checkbox should be removed from the subpanel
updated_model = store.phenomodel_collection.find_one()
assert updated_model["subpanels"]["subpanel_x"]["checkboxes"] == {}
def test_overview(app, user_obj, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page
resp = client.get(url_for("overview.institutes"))
# THEN it should return a page
assert resp.status_code == 200
def test_institute_settings(app, user_obj, institute_obj):
"""Test function that creates institute update form and updates an institute"""
# GIVEN a gene panel
test_panel = store.panel_collection.find_one()
assert test_panel
# AND 2 mock HPO terms in database
mock_disease_terms = [
{"_id": "HP:0001298", "description": "Encephalopathy", "hpo_id": "HP:0001298"},
{"_id": "HP:0001250", "description": "Seizures", "hpo_id": "HP:0001250"},
]
for term in mock_disease_terms:
store.load_hpo_term(term)
assert store.hpo_term(term["_id"])
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
client.get(url_for("auto_login"))
# WHEN accessing the cases page (GET method)
resp = client.get(
url_for("overview.institute_settings", institute_id=institute_obj["internal_id"])
)
# THEN it should return a page
assert resp.status_code == 200
# WHEN updating an institute using the following form
form_data = {
"display_name": "updated name",
"sanger_emails": ["john@doe.com"],
"coverage_cutoff": "15",
"frequency_cutoff": "0.001",
"cohorts": ["test cohort 1", "test cohort 2"],
"institutes": ["cust111", "cust222"],
"pheno_groups": [
"HP:0001298 , Encephalopathy ( ENC )",
"HP:0001250 , Seizures ( EP )",
],
"gene_panels": [test_panel["panel_name"]],
"alamut_key": "test_alamut_key",
}
# via POST request
resp = client.post(
url_for("overview.institute_settings", institute_id=institute_obj["internal_id"]),
data=form_data,
)
assert resp.status_code == 200
# THEN the institute object should be updated with the provided form data
updated_institute = store.institute_collection.find_one()
assert updated_institute["display_name"] == form_data["display_name"]
assert updated_institute["sanger_recipients"] == form_data["sanger_emails"]
assert updated_institute["coverage_cutoff"] == int(form_data["coverage_cutoff"])
assert updated_institute["frequency_cutoff"] == float(form_data["frequency_cutoff"])
assert updated_institute["cohorts"] == form_data["cohorts"]
assert updated_institute["collaborators"] == form_data["institutes"]
assert len(updated_institute["phenotype_groups"]) == 2 # one for each HPO term
assert updated_institute["gene_panels"] == {
test_panel["panel_name"]: test_panel["display_name"]
}
assert updated_institute["alamut_key"] == form_data["alamut_key"]
def test_cases(app, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page
resp = client.get(url_for("overview.cases", institute_id=institute_obj["internal_id"]))
# THEN it should return a page
assert resp.status_code == 200
# test query passing parameters in seach form
request_data = {
"limit": "100",
"skip_assigned": "on",
"is_research": "on",
"query": "case_id",
}
resp = client.get(
url_for(
"overview.cases",
institute_id=institute_obj["internal_id"],
params=request_data,
)
)
# response should return a page
assert resp.status_code == 200
sorting_options = ["analysis_date", "track", "status"]
for option in sorting_options:
# test query passing the sorting option to the cases view
request_data = {"sort": option}
resp = client.get(
url_for(
"overview.cases",
institute_id=institute_obj["internal_id"],
params=request_data,
)
)
# response should return a page
assert resp.status_code == 200
def test_cases_query_case_name(app, case_obj, institute_obj):
"""Test cases filtering by case display name"""
slice_query = f"case:{case_obj["display_name"]}"
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page with a query
resp = client.get(
url_for(
"overview.cases",
query=slice_query,
institute_id=institute_obj["internal_id"],
)
)
# THEN it should return a page with the case
assert resp.status_code == 200
assert case_obj["display_name"] in str(resp.data)
def test_cases_panel_query(app, case_obj, parsed_panel, institute_obj):
"""Test cases filtering by gene panel"""
slice_query = f"panel:{parsed_panel["panel_id"]}"
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page with a query
resp = client.get(
url_for(
"overview.cases",
query=slice_query,
institute_id=institute_obj["internal_id"],
)
)
# THEN it should return a page with the case
assert resp.status_code == 200
assert case_obj["display_name"] in str(resp.data)
def test_cases_by_pinned_gene_query(app, case_obj, institute_obj):
"""Test cases filtering by providing the gene of one of its pinned variants"""
# GIVEN a test variant hitting POT1 gene (hgnc_id:17284)
suspects = []
test_variant = store.variant_collection.find_one({"genes.hgnc_id": {"$in": [17284]}})
assert test_variant
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# GIVEN a case with this variant pinned
form = {
"action": "ADD",
}
client.post(
url_for(
"cases.pin_variant",
institute_id=institute_obj["internal_id"],
case_name=case_obj["display_name"],
variant_id=test_variant["_id"],
),
data=form,
)
updated_case = store.case_collection.find_one({"suspects": {"$in": [test_variant["_id"]]}})
assert updated_case
# WHEN the case search is performed using the POT1 gene
slice_query = f"pinned:POT1"
resp = client.get(
url_for(
"overview.cases",
query=slice_query,
institute_id=institute_obj["internal_id"],
)
)
# THEN it should return a page with the case
assert resp.status_code == 200
assert case_obj["display_name"] in str(resp.data)
def test_cases_exact_phenotype_query(app, case_obj, institute_obj, test_hpo_terms):
"""Test cases filtering by providing one HPO term"""
# GIVEN a case with some HPO terms
store.case_collection.find_one_and_update(
{"_id": case_obj["_id"]},
{"$set": {"phenotype_terms": test_hpo_terms}},
)
one_hpo_term = test_hpo_terms[0]["phenotype_id"]
slice_query = f"exact_pheno:{one_hpo_term}"
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page with the query
resp = client.get(
url_for(
"overview.cases",
query=slice_query,
institute_id=institute_obj["internal_id"],
)
)
# THEN it should return a page with the case
assert resp.status_code == 200
assert case_obj["display_name"] in str(resp.data)
def test_cases_similar_phenotype_query(app, case_obj, institute_obj, test_hpo_terms):
"""Test cases filtering by providing HPO terms that are related to case phenotype"""
# GIVEN a case with some HPO terms
store.case_collection.find_one_and_update(
{"_id": case_obj["_id"]},
{"$set": {"phenotype_terms": test_hpo_terms}},
)
# WHEN similar but distinct HPO terms are used in the query
similar_hpo_terms = ["HP:0012047", "HP:0000618"]
for term in test_hpo_terms:
assert term["phenotype_id"] not in similar_hpo_terms
similar_hpo_terms = ",".join(similar_hpo_terms)
slice_query = f"similar_pheno:{similar_hpo_terms}"
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page with the query
resp = client.get(
url_for(
"overview.cases",
query=slice_query,
institute_id=institute_obj["internal_id"],
)
)
# THEN it should return a page with the case
assert resp.status_code == 200
assert case_obj["display_name"] in str(resp.data)
def test_causatives(app, user_obj, institute_obj, case_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
# There should be no causative variants for test case:
assert "causatives" not in case_obj
var1_id = "4c7d5c70d955875504db72ef8e1abe77" # in POT1 gene
var2_id = "e24b65bf27feacec6a81c8e9e19bd5f1" # in TBX1 gene
var_ids = [var1_id, var2_id]
# for each variant
for var_id in var_ids:
# update case by marking variant as causative:
variant_obj = store.variant(document_id=var_id)
store.mark_causative(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="causative_var_link/{}".format(variant_obj["_id"]),
variant=variant_obj,
)
updated_case = store.case_collection.find_one({"_id": case_obj["_id"]})
# The above variants should be registered as causatives in case object
assert updated_case["causatives"] == var_ids
# Call scout causatives view and check if the above causatives are displayed
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the case page
resp = client.get(url_for("overview.causatives", institute_id=institute_obj["internal_id"]))
# THEN it should return a page
assert resp.status_code == 200
# with variant 1
assert var1_id in str(resp.data)
# and variant 2
assert var2_id in str(resp.data)
# Filter causatives by gene (POT1)
resp = client.get(
url_for(
"overview.causatives",
institute_id=institute_obj["internal_id"],
query="17284 | POT1 (DKFZp586D211, hPot1, POT1)",
)
)
# THEN it should return a page
assert resp.status_code == 200
# with variant 1
assert var1_id in str(resp.data)
# but NOT variant 2
assert var2_id not in str(resp.data)
def test_gene_variants_filter(app, institute_obj, case_obj):
"""Test the function that allows searching SNVs and INDELS using filters"""
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# When user submits a query for a variants in a specific gene and a rank score
filter_query = {
"hgnc_symbols": "POT1",
"variant_type": ["clinical"],
"rank_score": 11,
}
resp = client.post(
url_for("overview.gene_variants", institute_id=institute_obj["internal_id"]),
data=filter_query,
)
# THEN it should return a page
assert resp.status_code == 200
# containing variants in that genes
assert "POT1" in str(resp.data)
def test_gene_variants_no_valid_gene(app, institute_obj, case_obj, mocker, mock_redirect):
"""Test the gene_variant endpoint with a gene symbol not in database"""
mocker.patch("scout.server.blueprints.institutes.views.redirect", return_value=mock_redirect)
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# When user submits a query for a variants in a gene that is not in database
filter_query = {
"hgnc_symbols": "UNKNOWN-GENE",
"variant_type": ["clinical"],
"rank_score": 11,
}
resp = client.post(
url_for("overview.gene_variants", institute_id=institute_obj["internal_id"]),
data=filter_query,
)
# THEN the page should redirect
assert resp.status_code == 302
def test_institute_users(app, institute_obj, user_obj):
"""Test the link to all institute users"""
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
# WHEN accessing the cases page
resp = client.get(
url_for("overview.institute_users", institute_id=institute_obj["internal_id"])
)
# THEN it should return a page
assert resp.status_code == 200
# Containing the test user's name
assert user_obj["name"] in str(resp.data)
def test_filters(app, institute_obj, user_obj, case_obj, filter_obj):
"""Test the link to all institute users"""
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
category = "snv"
filter_id = store.stash_filter(filter_obj, institute_obj, case_obj, user_obj, category)
store.lock_filter(filter_id, user_obj.get("email"))
# WHEN accessing the cases page
resp = client.get(url_for("overview.filters", institute_id=institute_obj["internal_id"]))
# THEN it should return a page
assert resp.status_code == 200
# Containing the test user's name
assert user_obj["name"] in str(resp.data)
def test_clinvar_submissions(app, institute_obj, clinvar_variant, clinvar_casedata):
"""Test the web page containing the clinvar submissions for an institute"""
# GIVEN an institute with a clinvar submission
store.create_submission(institute_obj["_id"])
open_submission = store.get_open_clinvar_submission(institute_obj["_id"])
submission_with_data = store.add_to_submission(
open_submission["_id"], ([clinvar_variant], [clinvar_casedata])
)
assert submission_with_data
# GIVEN an initialized app and a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# When visiting the clinvar submission page (get request)
resp = client.get(
url_for(
"overview.clinvar_submissions",
institute_id=institute_obj["internal_id"],
)
)
# a successful response should be returned
assert resp.status_code == 200
assert str(submission_with_data["_id"]) in str(resp.data)
def test_rename_clinvar_samples(app, institute_obj, clinvar_variant, clinvar_casedata):
"""Test the form button triggering the renaming of samples for a clinvar submission"""
# GIVEN an institute with a clinvar submission
store.create_submission(institute_obj["_id"])
open_submission = store.get_open_clinvar_submission(institute_obj["_id"])
submission_with_data = store.add_to_submission(
open_submission["_id"], ([clinvar_variant], [clinvar_casedata])
)
assert submission_with_data["_id"]
# GIVEN an initialized app and a valid user
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
case_id = clinvar_casedata["case_id"]
old_name = clinvar_casedata["individual_id"]
form_data = dict(
new_name="new_sample_name",
)
referer = url_for("overview.clinvar_submissions", institute_id=institute_obj["internal_id"])
# WHEN the sample name is edited from the submission page (POST request)
resp = client.post(
url_for(
f"overview.clinvar_rename_casedata",
submission=submission_with_data["_id"],
case=case_id,
old_name=old_name,
),
data=form_data,
headers={"referer": referer},
)
# a successful response should be redirect to the submssions page
assert resp.status_code == 302
# And the sample name should have been updated in the database
updated_casedata = store.clinvar_collection.find_one({"_id": clinvar_casedata["_id"]})
assert updated_casedata["individual_id"] != clinvar_casedata["individual_id"]
| # -*- coding: utf-8 -*-
import datetime
import pytest
from flask import url_for
from flask_login import current_user
from scout.server.extensions import store
TEST_SUBPANEL = dict(
title="Subp title",
subtitle="Subp subtitle",
created=datetime.datetime.now(),
updated=datetime.datetime.now(),
)
def test_advanced_phenotypes_POST(app, user_obj, institute_obj):
"""Test the view showing the available phenotype models for an institute, after sending POST request with new phenotype model data"""
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
form_data = dict(model_name="A test model", model_desc="Test model description")
# WHEN user creates a new phenotype model using the phenomodel page
resp = client.post(
url_for(
"overview.advanced_phenotypes",
institute_id=institute_obj["internal_id"],
),
data=form_data,
)
assert resp.status_code == 200
# THEN the new model should be visible in the page
assert form_data["model_name"] in str(resp.data)
def test_remove_phenomodel(app, user_obj, institute_obj, mocker, mock_redirect):
"""Testing the endpoint to remove an existing phenotype model for an institute"""
mocker.patch("scout.server.blueprints.institutes.views.redirect", return_value=mock_redirect)
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
assert model_obj
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
form_data = {"model_id": model_obj["_id"]}
# WHEN the user removes the model via the remove_phenomodel endpoint
resp = client.post(
url_for("overview.remove_phenomodel", institute_id=institute_obj["internal_id"]),
data=form_data,
)
# THEN the phenotype model should be deleted from the database
assert store.phenomodel_collection.find_one() is None
def test_phenomodel_GET(app, user_obj, institute_obj):
"""test the phenomodel page endpoint, GET request"""
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
# THEN the phenomodel endpoint should shown phenotype model info
resp = client.get(
url_for(
"overview.phenomodel",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
)
)
assert "Test model" in str(resp.data)
def test_phenomodel_lock(app, user_obj, institute_obj, mocker, mock_redirect):
"""Test the endpoint to lock a phenomodel and make it editable only by admins"""
mocker.patch("scout.server.blueprints.institutes.views.redirect", return_value=mock_redirect)
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model = store.phenomodel_collection.find_one()
assert "admins" not in model
admins = ["user1@email", "user2@email"]
# GIVEN an initialized app
with app.test_client() as client:
# GIVEN that the user could be logged in
client.get(url_for("auto_login"))
# WHEN the user locks model up using two admins
form_data = dict(model_id=model["_id"], lock="", user_admins=admins)
resp = client.post(
url_for("overview.lock_phenomodel"),
data=form_data,
)
# Then the page should redirect
assert resp.status_code == 302
# And current user + admins emails will be registered as the emails of the admins
locked_model = store.phenomodel_collection.find_one()
assert locked_model["admins"] == [current_user.email] + admins
def test_phenomodel_unlock(app, user_obj, institute_obj, mocker, mock_redirect):
"""Test the endpoint to unlock a phenomodel and make it editable only by all users"""
mocker.patch("scout.server.blueprints.institutes.views.redirect", return_value=mock_redirect)
# GIVEN an institute with phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model = store.phenomodel_collection.find_one()
# GIVEN an initialized app
with app.test_client() as client:
# GIVEN that the user could be logged in
client.get(url_for("auto_login"))
# Given that the phenomodel is locked and current user is admin
model["admins"] = [current_user.email]
store.update_phenomodel(model["_id"], model)
locked_model = store.phenomodel_collection.find_one()
assert locked_model["admins"] == [current_user.email]
# When the test_phenomodel_lock endpoint is used to unlock the model
form_data = dict(
model_id=model["_id"],
)
resp = client.post(
url_for("overview.lock_phenomodel"),
data=form_data,
)
# Then the page should redirect
assert resp.status_code == 302
# And the model will have no admins
unlocked_model = store.phenomodel_collection.find_one()
assert unlocked_model["admins"] == []
def test_phenomodel_POST_rename_model(app, user_obj, institute_obj):
"""Test the phenomodel endpoing, POST request for updating model info"""
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Old model", "Old description")
model_obj = store.phenomodel_collection.find_one()
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
# WHEN the user updates model info using a POST request
form_data = dict(
update_model="update", model_name="New model", model_desc="New description"
)
resp = client.post(
url_for(
"overview.phenomodel",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# THEN the model in the database should be updated
updated_model = store.phenomodel_collection.find_one()
assert updated_model["name"] == "New model"
def test_phenomodel_POST_add_delete_subpanel(app, user_obj, institute_obj):
"""Test the phenomodel endpoint, by sending requests for adding and deleting a subpanel"""
# GIVEN an institute with a phenotype model having no subpanels
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
assert model_obj["subpanels"] == {}
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
form_data = dict(
title="Phenotype subpanel title",
subtitle="Phenotype subpanel subtitle",
add_subpanel="Save phenotype subpanel",
)
# WHEN the user creates subpanel in phenotype model via POST request
resp = client.post(
url_for(
"overview.phenomodel",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# Then the subpanel dictionary should be added to model subpanels
updated_model = store.phenomodel_collection.find_one()
subpanel_id = list(updated_model["subpanels"].keys())[0]
assert updated_model["subpanels"][subpanel_id]["title"] == "Phenotype subpanel title"
assert updated_model["subpanels"][subpanel_id]["subtitle"] == "Phenotype subpanel subtitle"
# WHEN the user sends a POST request to remove the subpanel
form_data = dict(subpanel_delete=subpanel_id)
resp = client.post(
url_for(
"overview.phenomodel",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# THEN the model should be removed from models subpanels
updated_model = store.phenomodel_collection.find_one()
assert updated_model["subpanels"] == {}
def test_phenomodel_POST_add_omim_checkbox_to_subpanel(app, user_obj, institute_obj, omim_checkbox):
"""Test adding an OMIM checkbox to a subpanel of a phenotype model via POST request"""
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
# containing a subpanel
model_obj["subpanels"] = {"subpanel_x": TEST_SUBPANEL}
store.update_phenomodel(model_obj["_id"], model_obj)
model_obj = store.phenomodel_collection.find_one()
assert model_obj["subpanels"]["subpanel_x"]
# GIVEN that database contains the HPO term to add to the subopanel
store.disease_term_collection.insert_one(omim_checkbox)
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
# WHEN the user creates an OMIM checkbox using the endpoint
form_data = dict(
omim_subpanel_id="subpanel_x",
omimHasTitle="on",
omimTermTitle="Title for term",
omim_term=" | ".join([omim_checkbox["_id"], omim_checkbox["description"]]),
omim_custom_name="Alternative OMIM name",
add_omim="",
)
resp = client.post(
url_for(
"overview.checkbox_edit",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# THEN the term should have been added to the subpanel checkboxe
updated_model = store.phenomodel_collection.find_one()
checkbox = updated_model["subpanels"]["subpanel_x"]["checkboxes"]["OMIM:121210"]
assert checkbox["name"] == "OMIM:121210"
assert checkbox["checkbox_type"] == "omim"
assert checkbox["description"] == "Febrile seizures familial 1"
assert checkbox["term_title"] == form_data["omimTermTitle"]
assert checkbox["custom_name"] == form_data["omim_custom_name"]
def test_phenomodel_POST_add_hpo_checkbox_to_subpanel(app, user_obj, institute_obj, hpo_checkboxes):
"""Test adding an HPO checkbox with its children to a subpanel of a phenotype model via POST request"""
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
# containing a subpanel
model_obj["subpanels"] = {"subpanel_x": TEST_SUBPANEL}
store.update_phenomodel(model_obj["_id"], model_obj)
# GIVEN a database with the required HPO terms (one parent term and one child term)
store.hpo_term_collection.insert_many(hpo_checkboxes)
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
# WHEN the user creates an HPO checkbox using the endpoint
form_data = dict(
hpo_subpanel_id="subpanel_x",
hpoHasTitle="on",
hpoTermTitle="Title for term",
hpo_term=" | ".join([hpo_checkboxes[0]["_id"], hpo_checkboxes[0]["description"]]),
hpo_custom_name="Alternative HPO name",
add_hpo="",
includeChildren="on",
)
resp = client.post(
url_for(
"overview.checkbox_edit",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# THEN the term should have been added to the subpanel checkboxes
updated_model = store.phenomodel_collection.find_one()
checkbox = updated_model["subpanels"]["subpanel_x"]["checkboxes"]["HP:0025190"]
assert checkbox["name"] == "HP:0025190"
assert checkbox["checkbox_type"] == "hpo"
assert checkbox["description"] == "Bilateral tonic-clonic seizure with generalized onset"
assert checkbox["term_title"] == form_data["hpoTermTitle"]
assert checkbox["custom_name"] == form_data["hpo_custom_name"]
# Additionally, the HPO term checkbox should contain a nested HPO term:
nested_hpo_term = {
"name": hpo_checkboxes[1]["_id"],
"description": hpo_checkboxes[1]["description"],
}
assert checkbox["children"] == [nested_hpo_term]
def test_phenomodel_POST_remove_subpanel_checkbox(app, user_obj, institute_obj):
"""Test removing a single checkbox from a phenotype model subpanel"""
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
# containing a subpanel with a checkbox
TEST_SUBPANEL["checkboxes"] = {"HP:000001": {"name": "HP:000001"}}
model_obj["subpanels"] = {"subpanel_x": TEST_SUBPANEL}
store.update_phenomodel(model_obj["_id"], model_obj)
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
# WHEN the user removes the checkbox using the endpoint, POST request
form_data = dict(checkgroup_remove="#".join(["HP:000001", "subpanel_x"]))
resp = client.post(
url_for(
"overview.checkbox_edit",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# THEN the checkbox should be removed from the subpanel
updated_model = store.phenomodel_collection.find_one()
assert updated_model["subpanels"]["subpanel_x"]["checkboxes"] == {}
def test_overview(app, user_obj, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page
resp = client.get(url_for("overview.institutes"))
# THEN it should return a page
assert resp.status_code == 200
def test_institute_settings(app, user_obj, institute_obj):
"""Test function that creates institute update form and updates an institute"""
# GIVEN a gene panel
test_panel = store.panel_collection.find_one()
assert test_panel
# AND 2 mock HPO terms in database
mock_disease_terms = [
{"_id": "HP:0001298", "description": "Encephalopathy", "hpo_id": "HP:0001298"},
{"_id": "HP:0001250", "description": "Seizures", "hpo_id": "HP:0001250"},
]
for term in mock_disease_terms:
store.load_hpo_term(term)
assert store.hpo_term(term["_id"])
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
client.get(url_for("auto_login"))
# WHEN accessing the cases page (GET method)
resp = client.get(
url_for("overview.institute_settings", institute_id=institute_obj["internal_id"])
)
# THEN it should return a page
assert resp.status_code == 200
# WHEN updating an institute using the following form
form_data = {
"display_name": "updated name",
"sanger_emails": ["john@doe.com"],
"coverage_cutoff": "15",
"frequency_cutoff": "0.001",
"cohorts": ["test cohort 1", "test cohort 2"],
"institutes": ["cust111", "cust222"],
"pheno_groups": [
"HP:0001298 , Encephalopathy ( ENC )",
"HP:0001250 , Seizures ( EP )",
],
"gene_panels": [test_panel["panel_name"]],
"alamut_key": "test_alamut_key",
}
# via POST request
resp = client.post(
url_for("overview.institute_settings", institute_id=institute_obj["internal_id"]),
data=form_data,
)
assert resp.status_code == 200
# THEN the institute object should be updated with the provided form data
updated_institute = store.institute_collection.find_one()
assert updated_institute["display_name"] == form_data["display_name"]
assert updated_institute["sanger_recipients"] == form_data["sanger_emails"]
assert updated_institute["coverage_cutoff"] == int(form_data["coverage_cutoff"])
assert updated_institute["frequency_cutoff"] == float(form_data["frequency_cutoff"])
assert updated_institute["cohorts"] == form_data["cohorts"]
assert updated_institute["collaborators"] == form_data["institutes"]
assert len(updated_institute["phenotype_groups"]) == 2 # one for each HPO term
assert updated_institute["gene_panels"] == {
test_panel["panel_name"]: test_panel["display_name"]
}
assert updated_institute["alamut_key"] == form_data["alamut_key"]
def test_cases(app, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page
resp = client.get(url_for("overview.cases", institute_id=institute_obj["internal_id"]))
# THEN it should return a page
assert resp.status_code == 200
# test query passing parameters in seach form
request_data = {
"limit": "100",
"skip_assigned": "on",
"is_research": "on",
"query": "case_id",
}
resp = client.get(
url_for(
"overview.cases",
institute_id=institute_obj["internal_id"],
params=request_data,
)
)
# response should return a page
assert resp.status_code == 200
sorting_options = ["analysis_date", "track", "status"]
for option in sorting_options:
# test query passing the sorting option to the cases view
request_data = {"sort": option}
resp = client.get(
url_for(
"overview.cases",
institute_id=institute_obj["internal_id"],
params=request_data,
)
)
# response should return a page
assert resp.status_code == 200
def test_cases_query_case_name(app, case_obj, institute_obj):
"""Test cases filtering by case display name"""
slice_query = f"case:{case_obj['display_name']}"
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page with a query
resp = client.get(
url_for(
"overview.cases",
query=slice_query,
institute_id=institute_obj["internal_id"],
)
)
# THEN it should return a page with the case
assert resp.status_code == 200
assert case_obj["display_name"] in str(resp.data)
def test_cases_panel_query(app, case_obj, parsed_panel, institute_obj):
"""Test cases filtering by gene panel"""
slice_query = f"panel:{parsed_panel['panel_id']}"
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page with a query
resp = client.get(
url_for(
"overview.cases",
query=slice_query,
institute_id=institute_obj["internal_id"],
)
)
# THEN it should return a page with the case
assert resp.status_code == 200
assert case_obj["display_name"] in str(resp.data)
def test_cases_by_pinned_gene_query(app, case_obj, institute_obj):
"""Test cases filtering by providing the gene of one of its pinned variants"""
# GIVEN a test variant hitting POT1 gene (hgnc_id:17284)
suspects = []
test_variant = store.variant_collection.find_one({"genes.hgnc_id": {"$in": [17284]}})
assert test_variant
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# GIVEN a case with this variant pinned
form = {
"action": "ADD",
}
client.post(
url_for(
"cases.pin_variant",
institute_id=institute_obj["internal_id"],
case_name=case_obj["display_name"],
variant_id=test_variant["_id"],
),
data=form,
)
updated_case = store.case_collection.find_one({"suspects": {"$in": [test_variant["_id"]]}})
assert updated_case
# WHEN the case search is performed using the POT1 gene
slice_query = f"pinned:POT1"
resp = client.get(
url_for(
"overview.cases",
query=slice_query,
institute_id=institute_obj["internal_id"],
)
)
# THEN it should return a page with the case
assert resp.status_code == 200
assert case_obj["display_name"] in str(resp.data)
def test_cases_exact_phenotype_query(app, case_obj, institute_obj, test_hpo_terms):
"""Test cases filtering by providing one HPO term"""
# GIVEN a case with some HPO terms
store.case_collection.find_one_and_update(
{"_id": case_obj["_id"]},
{"$set": {"phenotype_terms": test_hpo_terms}},
)
one_hpo_term = test_hpo_terms[0]["phenotype_id"]
slice_query = f"exact_pheno:{one_hpo_term}"
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page with the query
resp = client.get(
url_for(
"overview.cases",
query=slice_query,
institute_id=institute_obj["internal_id"],
)
)
# THEN it should return a page with the case
assert resp.status_code == 200
assert case_obj["display_name"] in str(resp.data)
def test_cases_similar_phenotype_query(app, case_obj, institute_obj, test_hpo_terms):
"""Test cases filtering by providing HPO terms that are related to case phenotype"""
# GIVEN a case with some HPO terms
store.case_collection.find_one_and_update(
{"_id": case_obj["_id"]},
{"$set": {"phenotype_terms": test_hpo_terms}},
)
# WHEN similar but distinct HPO terms are used in the query
similar_hpo_terms = ["HP:0012047", "HP:0000618"]
for term in test_hpo_terms:
assert term["phenotype_id"] not in similar_hpo_terms
similar_hpo_terms = ",".join(similar_hpo_terms)
slice_query = f"similar_pheno:{similar_hpo_terms}"
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the cases page with the query
resp = client.get(
url_for(
"overview.cases",
query=slice_query,
institute_id=institute_obj["internal_id"],
)
)
# THEN it should return a page with the case
assert resp.status_code == 200
assert case_obj["display_name"] in str(resp.data)
def test_causatives(app, user_obj, institute_obj, case_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
# There should be no causative variants for test case:
assert "causatives" not in case_obj
var1_id = "4c7d5c70d955875504db72ef8e1abe77" # in POT1 gene
var2_id = "e24b65bf27feacec6a81c8e9e19bd5f1" # in TBX1 gene
var_ids = [var1_id, var2_id]
# for each variant
for var_id in var_ids:
# update case by marking variant as causative:
variant_obj = store.variant(document_id=var_id)
store.mark_causative(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="causative_var_link/{}".format(variant_obj["_id"]),
variant=variant_obj,
)
updated_case = store.case_collection.find_one({"_id": case_obj["_id"]})
# The above variants should be registered as causatives in case object
assert updated_case["causatives"] == var_ids
# Call scout causatives view and check if the above causatives are displayed
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN accessing the case page
resp = client.get(url_for("overview.causatives", institute_id=institute_obj["internal_id"]))
# THEN it should return a page
assert resp.status_code == 200
# with variant 1
assert var1_id in str(resp.data)
# and variant 2
assert var2_id in str(resp.data)
# Filter causatives by gene (POT1)
resp = client.get(
url_for(
"overview.causatives",
institute_id=institute_obj["internal_id"],
query="17284 | POT1 (DKFZp586D211, hPot1, POT1)",
)
)
# THEN it should return a page
assert resp.status_code == 200
# with variant 1
assert var1_id in str(resp.data)
# but NOT variant 2
assert var2_id not in str(resp.data)
def test_gene_variants_filter(app, institute_obj, case_obj):
"""Test the function that allows searching SNVs and INDELS using filters"""
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# When user submits a query for a variants in a specific gene and a rank score
filter_query = {
"hgnc_symbols": "POT1",
"variant_type": ["clinical"],
"rank_score": 11,
}
resp = client.post(
url_for("overview.gene_variants", institute_id=institute_obj["internal_id"]),
data=filter_query,
)
# THEN it should return a page
assert resp.status_code == 200
# containing variants in that genes
assert "POT1" in str(resp.data)
def test_gene_variants_no_valid_gene(app, institute_obj, case_obj, mocker, mock_redirect):
"""Test the gene_variant endpoint with a gene symbol not in database"""
mocker.patch("scout.server.blueprints.institutes.views.redirect", return_value=mock_redirect)
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# When user submits a query for a variants in a gene that is not in database
filter_query = {
"hgnc_symbols": "UNKNOWN-GENE",
"variant_type": ["clinical"],
"rank_score": 11,
}
resp = client.post(
url_for("overview.gene_variants", institute_id=institute_obj["internal_id"]),
data=filter_query,
)
# THEN the page should redirect
assert resp.status_code == 302
def test_institute_users(app, institute_obj, user_obj):
"""Test the link to all institute users"""
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
# WHEN accessing the cases page
resp = client.get(
url_for("overview.institute_users", institute_id=institute_obj["internal_id"])
)
# THEN it should return a page
assert resp.status_code == 200
# Containing the test user's name
assert user_obj["name"] in str(resp.data)
def test_filters(app, institute_obj, user_obj, case_obj, filter_obj):
"""Test the link to all institute users"""
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
category = "snv"
filter_id = store.stash_filter(filter_obj, institute_obj, case_obj, user_obj, category)
store.lock_filter(filter_id, user_obj.get("email"))
# WHEN accessing the cases page
resp = client.get(url_for("overview.filters", institute_id=institute_obj["internal_id"]))
# THEN it should return a page
assert resp.status_code == 200
# Containing the test user's name
assert user_obj["name"] in str(resp.data)
def test_clinvar_submissions(app, institute_obj, clinvar_variant, clinvar_casedata):
"""Test the web page containing the clinvar submissions for an institute"""
# GIVEN an institute with a clinvar submission
store.create_submission(institute_obj["_id"])
open_submission = store.get_open_clinvar_submission(institute_obj["_id"])
submission_with_data = store.add_to_submission(
open_submission["_id"], ([clinvar_variant], [clinvar_casedata])
)
assert submission_with_data
# GIVEN an initialized app and a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# When visiting the clinvar submission page (get request)
resp = client.get(
url_for(
"overview.clinvar_submissions",
institute_id=institute_obj["internal_id"],
)
)
# a successful response should be returned
assert resp.status_code == 200
assert str(submission_with_data["_id"]) in str(resp.data)
def test_rename_clinvar_samples(app, institute_obj, clinvar_variant, clinvar_casedata):
"""Test the form button triggering the renaming of samples for a clinvar submission"""
# GIVEN an institute with a clinvar submission
store.create_submission(institute_obj["_id"])
open_submission = store.get_open_clinvar_submission(institute_obj["_id"])
submission_with_data = store.add_to_submission(
open_submission["_id"], ([clinvar_variant], [clinvar_casedata])
)
assert submission_with_data["_id"]
# GIVEN an initialized app and a valid user
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
case_id = clinvar_casedata["case_id"]
old_name = clinvar_casedata["individual_id"]
form_data = dict(
new_name="new_sample_name",
)
referer = url_for("overview.clinvar_submissions", institute_id=institute_obj["internal_id"])
# WHEN the sample name is edited from the submission page (POST request)
resp = client.post(
url_for(
f"overview.clinvar_rename_casedata",
submission=submission_with_data["_id"],
case=case_id,
old_name=old_name,
),
data=form_data,
headers={"referer": referer},
)
# a successful response should be redirect to the submssions page
assert resp.status_code == 302
# And the sample name should have been updated in the database
updated_casedata = store.clinvar_collection.find_one({"_id": clinvar_casedata["_id"]})
assert updated_casedata["individual_id"] != clinvar_casedata["individual_id"]
|
#!/usr/bin/env python
"""
desc goes here
"""
import json
import logging
import os
import time
import uuid
from urllib.parse import urljoin, quote
import requests
from requests import HTTPError
from ingest.api.requests_utils import optimistic_session
class IngestApi:
def __init__(self, url=None, ingest_api_root=None):
format = '[%(filename)s:%(lineno)s - %(funcName)20s() ] %(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=format)
logging.getLogger("requests").setLevel(logging.WARNING)
self.logger = logging.getLogger(__name__)
if not url and 'INGEST_API' in os.environ:
url = os.environ['INGEST_API']
# expand interpolated env vars
url = os.path.expandvars(url)
self.logger.info("using " + url + " for ingest API")
self.url = url if url else "http://localhost:8080"
self.headers = {'Content-type': 'application/json'}
self.submission_links = {}
self.token = None
self.ingest_api_root = ingest_api_root if ingest_api_root is not None else self.get_root_url()
def set_token(self, token):
self.token = token
def get_root_url(self):
reply = requests.get(self.url, headers=self.headers)
return reply.json()["_links"]
def get_link_from_resource_url(self, resource_url, link_name):
r = requests.get(resource_url, headers=self.headers)
r.raise_for_status()
links = r.json().get('_links', {})
return links.get(link_name, {}).get('href')
def get_link_from_resource(self, resource, link_name):
links = resource.get('_links', {})
return links.get(link_name, {}).get('href')
def get_schemas(self, latest_only=True, high_level_entity=None, domain_entity=None, concrete_entity=None):
schema_url = self.get_schemas_url()
all_schemas = []
filtered_schemas = {}
if latest_only:
search_url = self.get_link_from_resource_url(schema_url, "search")
r = requests.get(search_url, headers=self.headers)
if r.status_code == requests.codes.ok:
response_j = json.loads(r.text)
all_schemas = list(self.getRelatedEntities("latestSchemas", response_j, "schemas"))
else:
all_schemas = list(self.getEntities(schema_url, "schemas"))
if high_level_entity:
all_schemas = list(filter(lambda schema: schema.get('highLevelEntity') == high_level_entity, all_schemas))
if domain_entity:
all_schemas = list(filter(lambda schema: schema.get('domainEntity') == domain_entity, all_schemas))
if concrete_entity:
all_schemas = list(filter(lambda schema: schema.get('concreteEntity') == concrete_entity, all_schemas))
return all_schemas
def get_schemas_url(self):
if "schemas" in self.ingest_api_root:
return self.ingest_api_root["schemas"]["href"].rsplit("{")[0]
return None
def getSubmissions(self):
params = {'sort': 'submissionDate,desc'}
r = requests.get(self.ingest_api_root["submissionEnvelopes"]["href"].rsplit("{")[0], params=params,
headers=self.headers)
if r.status_code == requests.codes.ok:
return json.loads(r.text)["_embedded"]["submissionEnvelopes"]
def getSubmissionIfModifiedSince(self, submissionId, datetimeUTC):
submissionUrl = self.getSubmissionUri(submissionId)
headers = self.headers
if datetimeUTC:
headers = {'If-Modified-Since': datetimeUTC}
self.logger.info('headers:' + str(headers))
r = requests.get(submissionUrl, headers=headers)
if r.status_code == requests.codes.ok:
submission = json.loads(r.text)
return submission
else:
self.logger.error(str(r))
def getProjects(self, id):
submissionUrl = self.url + '/submissionEnvelopes/' + id + '/projects'
r = requests.get(submissionUrl, headers=self.headers)
projects = []
if r.status_code == requests.codes.ok:
projects = json.loads(r.text)
return projects
def getProjectById(self, id):
submissionUrl = self.url + '/projects/' + id
r = requests.get(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.ok:
project = json.loads(r.text)
return project
else:
raise ValueError("Project " + id + " could not be retrieved")
def getProjectByUuid(self, uuid):
return self.getEntityByUuid('projects', uuid)
def getEntityByUuid(self, entity_type, uuid):
url = self.url + f'/{entity_type}/search/findByUuid?uuid=' + uuid
# TODO make the endpoint consistent
if entity_type == 'submissionEnvelopes':
url = self.url + f'/{entity_type}/search/findByUuidUuid?uuid=' + uuid
r = requests.get(url, headers=self.headers)
r.raise_for_status()
return r.json()
def getFileBySubmissionUrlAndFileName(self, submissionUrl, fileName):
searchUrl = self._get_url_for_link(self.url + '/files/search', 'findBySubmissionEnvelopesInAndFileName')
searchUrl = searchUrl.replace('{?submissionEnvelope,fileName}', '')
r = requests.get(searchUrl, params={'submissionEnvelope': submissionUrl, 'fileName': fileName})
if r.status_code == requests.codes.ok:
return r.json()
return None
def getSubmissionEnvelope(self, submissionUrl):
r = requests.get(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.ok:
submissionEnvelope = json.loads(r.text)
return submissionEnvelope
else:
raise ValueError("Submission Envelope " + submissionUrl + " could not be retrieved")
def getSubmissionByUuid(self, submissionUuid):
searchByUuidLink = self.get_link_from_resource_url(self.url + '/submissionEnvelopes/search', 'findByUuid')
searchByUuidLink = searchByUuidLink.replace('{?uuid}', '') # TODO: use a REST traverser instead of requests?
r = requests.get(searchByUuidLink, params={'uuid': submissionUuid})
if 200 <= r.status_code < 300:
return r.json()
else:
r.raise_for_status()
def getFiles(self, id):
submissionUrl = self.url + '/submissionEnvelopes/' + id + '/files'
r = requests.get(submissionUrl, headers=self.headers)
files = []
if r.status_code == requests.codes.ok:
files = json.loads(r.text)
return files
def getBundleManifests(self, id):
submissionUrl = self.url + '/submissionEnvelopes/' + id + '/bundleManifests'
r = requests.get(submissionUrl, headers=self.headers)
bundleManifests = []
if r.status_code == requests.codes.ok:
bundleManifests = json.loads(r.text)
return bundleManifests
def createSubmission(self, token):
auth_headers = {
'Content-type': 'application/json',
'Authorization': token
}
try:
r = requests.post(self.ingest_api_root["submissionEnvelopes"]["href"].rsplit("{")[0], data="{}",
headers=auth_headers)
r.raise_for_status()
submission = r.json()
submission_url = submission["_links"]["self"]["href"].rsplit("{")[0]
self.submission_links[submission_url] = submission["_links"]
return submission_url
except requests.exceptions.RequestException as err:
self.logger.error("Request failed: " + str(err))
raise
def get_submission_links(self, submission_url):
if not self.submission_links.get(submission_url):
r = requests.get(submission_url, headers=self.headers)
r.raise_for_status()
self.submission_links[submission_url] = r.json()["_links"]
return self.submission_links.get(submission_url)
def get_link_in_submisssion(self, submission_url, link_name):
links = self.get_submission_links(submission_url)
link_obj = links.get(link_name) # TODO what if link doesn't exist
link = link_obj['href'].rsplit("{")[0]
return link
def finishSubmission(self, submissionUrl):
r = requests.put(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.update:
self.logger.info("Submission complete!")
return r.text
def updateSubmissionState(self, submissionId, state):
state_url = self.getSubmissionStateUrl(submissionId, state)
if state_url:
r = requests.put(state_url, headers=self.headers)
return self.handleResponse(r)
def getSubmissionStateUrl(self, submissionId, state):
submissionUrl = self.getSubmissionUri(submissionId)
response = requests.get(submissionUrl, headers=self.headers)
submission = self.handleResponse(response)
if submission and state in submission['_links']:
return submission['_links'][state]["href"].rsplit("{")[0]
return None
def handleResponse(self, response):
if response.ok:
return json.loads(response.text)
else:
self.logger.error('Response:' + response.text)
return None
def getSubmissionUri(self, submissionId):
return self.ingest_api_root["submissionEnvelopes"]["href"].rsplit("{")[0] + "/" + submissionId
def get_full_url(self, callback_link):
return urljoin(self.url, callback_link)
def get_process(self, process_url):
r = requests.get(process_url, headers=self.headers)
r.raise_for_status()
return r.json()
def getAnalyses(self, submissionUrl):
return self.getEntities(submissionUrl, "analyses")
def getEntities(self, submissionUrl, entityType, pageSize=None):
r = requests.get(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.ok:
if entityType in json.loads(r.text)["_links"]:
if not pageSize:
yield from self._getAllObjectsFromSet(json.loads(r.text)["_links"][entityType]["href"], entityType)
else:
yield from self._getAllObjectsFromSet(json.loads(r.text)["_links"][entityType]["href"], entityType, pageSize)
def _getAllObjectsFromSet(self, url, entityType, pageSize=None):
params = dict()
if pageSize:
params = {"size": pageSize}
r = requests.get(url, headers=self.headers, params=params)
r.raise_for_status()
if r.status_code == requests.codes.ok:
if "_embedded" in json.loads(r.text):
for entity in json.loads(r.text)["_embedded"][entityType]:
yield entity
if "next" in json.loads(r.text)["_links"]:
for entity2 in self._getAllObjectsFromSet(json.loads(r.text)["_links"]["next"]["href"], entityType):
yield entity2
def getRelatedEntities(self, relation, entity, entityType):
# get the self link from entity
if relation in entity["_links"]:
entityUri = entity["_links"][relation]["href"]
for entity in self._getAllObjectsFromSet(entityUri, entityType):
yield entity
def _updateStatusToPending(self, submissionUrl):
r = requests.patch(submissionUrl, data="{\"submissionStatus\" : \"Pending\"}", headers=self.headers)
def createProject(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "projects", self.token)
def createBiomaterial(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "biomaterials")
def createProcess(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "processes")
def createSubmissionManifest(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, 'submissionManifest')
def patch(self, url, patch):
r = requests.patch(url, json=patch)
r.raise_for_status()
return r
def createSubmissionError(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, 'submissionErrors')
def createProtocol(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "protocols")
def createFile(self, submissionUrl, file_name, jsonObject):
# TODO: why do we need the submission's links before we can create a file on it?
# TODO: submission_links should be a cache;
# TODO: getting a submission's links should look in the cache before retrieving it from the API
fileSubmissionsUrl = self.get_link_in_submisssion(submissionUrl, 'files')
fileSubmissionsUrl = fileSubmissionsUrl + "/" + quote(file_name)
fileToCreateObject = {
"fileName": file_name,
"content": json.loads(jsonObject) # TODO jsonObject should be a dict()
}
time.sleep(0.001)
with optimistic_session(fileSubmissionsUrl) as session:
r = session.post(fileSubmissionsUrl, data=json.dumps(fileToCreateObject),
headers=self.headers)
# TODO Investigate why core is returning internal server error
if r.status_code == requests.codes.conflict or r.status_code == requests.codes.internal_server_error:
searchFiles = self.getFileBySubmissionUrlAndFileName(submissionUrl, file_name)
if searchFiles and searchFiles.get('_embedded') and searchFiles['_embedded'].get('files'):
fileInIngest = searchFiles['_embedded'].get('files')[0]
content = fileInIngest.get('content')
newContent = json.loads(jsonObject)
if content:
content.update(newContent)
else:
content = newContent
fileUrl = fileInIngest['_links']['self']['href']
time.sleep(0.001)
r = requests.patch(fileUrl, data=json.dumps({'content': content}), headers=self.headers)
self.logger.debug(f'Updating existing content of file {fileUrl}.')
r.raise_for_status()
return r.json()
def createEntity(self, submissionUrl, jsonObject, entityType, token=None):
auth_headers = {'Content-type': 'application/json',
'Authorization': token
}
submissionUrl = self.get_link_in_submisssion(submissionUrl, entityType)
self.logger.debug("posting " + submissionUrl)
with optimistic_session(submissionUrl) as session:
r = session.post(submissionUrl, data=jsonObject, headers=auth_headers)
r.raise_for_status()
return r.json()
# given a HCA object return the URI for the object from ingest
def getObjectId(self, entity):
if "_links" in entity:
entityUrl = entity["_links"]["self"]["href"].rsplit("{")[0]
return entityUrl
raise ValueError('Can\'t get id for ' + json.dumps(entity) + ' is it a HCA entity?')
def getObjectUuid(self, entityUri):
r = requests.get(entityUri,
headers=self.headers)
if r.status_code == requests.codes.ok:
return json.loads(r.text)["uuid"]["uuid"]
def linkEntity(self, fromEntity, toEntity, relationship):
if not fromEntity:
raise ValueError("Error: fromEntity is None")
if not toEntity:
raise ValueError("Error: toEntity is None")
if not relationship:
raise ValueError("Error: relationship is None")
# check each dict in turn for non-None-ness
fromEntityLinks = fromEntity["_links"] if "_links" in fromEntity else None
if not fromEntityLinks:
raise ValueError("Error: fromEntity has no _links")
fromEntityLinksRelationship = fromEntityLinks[relationship] if relationship in fromEntityLinks else None
if not fromEntityLinksRelationship:
raise ValueError("Error: fromEntityLinks has no {0} relationship".format(relationship))
fromEntityLinksRelationshipHref = fromEntityLinksRelationship["href"] if "href" in fromEntityLinksRelationship else None
if not fromEntityLinksRelationshipHref:
raise ValueError("Error: fromEntityLinksRelationship for relationship {0} has no href".format(relationship))
fromUri = fromEntity["_links"][relationship]["href"]
toUri = self.getObjectId(toEntity)
self._retry_when_http_error(0, self._post_link_entity, fromUri, toUri)
def _post_link_entity(self, fromUri, toUri):
self.logger.debug('fromUri ' + fromUri + ' toUri:' + toUri);
headers = {'Content-type': 'text/uri-list'}
r = requests.post(fromUri.rsplit("{")[0],
data=toUri.rsplit("{")[0], headers=headers)
return r
def _retry_when_http_error(self, tries, func, *args):
max_retries = 5
if tries < max_retries:
if tries > 1:
self.logger.info("no of tries: " + str(tries + 1))
r = None
try:
time.sleep(0.001)
r = func(*args)
r.raise_for_status()
except HTTPError:
self.logger.error("\nResponse was: " + str(r.status_code) + " (" + r.text + ")")
tries += 1
time.sleep(1)
r = self._retry_when_http_error(tries, func, *args)
except requests.ConnectionError as e:
self.logger.exception(str(e))
tries += 1
time.sleep(1)
r = self._retry_when_http_error(tries, func, *args)
except Exception as e:
self.logger.exception(str(e))
tries += 1
time.sleep(1)
r = self._retry_when_http_error(tries, func, *args)
return r
else:
error_message = "Maximum no of tries reached: " + str(max_retries)
self.logger.error(error_message)
return None
def _request_post(self, url, data, params, headers):
if params:
return requests.post(url, data=data, params=params, headers=headers)
return requests.post(url, data=data, headers=headers)
def _request_put(self, url, data, params, headers):
if params:
return requests.put(url, data=data, params=params, headers=headers)
return requests.put(url, data=data, headers=headers)
def createBundleManifest(self, bundleManifest):
r = self._retry_when_http_error(0, self._post_bundle_manifest, bundleManifest, self.ingest_api_root["bundleManifests"]["href"].rsplit("{")[0])
if not (200 <= r.status_code < 300):
error_message = "Failed to create bundle manifest at URL {0} with request payload: {1}".format(self.ingest_api_root["bundleManifests"]["href"].rsplit("{")[0],
json.dumps(bundleManifest.__dict__))
self.logger.error(error_message)
raise ValueError(error_message)
else:
self.logger.info("successfully created bundle manifest")
def _post_bundle_manifest(self, bundleManifest, url):
return requests.post(url, data=json.dumps(bundleManifest.__dict__), headers=self.headers)
def updateSubmissionWithStagingCredentials(self, subUrl, uuid, submissionCredentials):
stagingDetails = \
{
"stagingDetails": {
"stagingAreaUuid": {
"uuid": uuid
},
"stagingAreaLocation": {
"value": submissionCredentials
}
}
}
if self.retrySubmissionUpdateWithStagingDetails(subUrl, stagingDetails, 0):
self.logger.debug("envelope updated with staging details " + json.dumps(stagingDetails))
else:
self.logger.error("Failed to update envelope with staging details: " + json.dumps(stagingDetails))
def retrySubmissionUpdateWithStagingDetails(self, subUrl, stagingDetails, tries):
if tries < 5:
# do a GET request to get latest submission envelope
entity_response = requests.get(subUrl)
etag = entity_response.headers['ETag']
if etag:
# set the etag header so we get 412 if someone beats us to set validating
self.headers['If-Match'] = etag
r = requests.patch(subUrl, data=json.dumps(stagingDetails))
try:
r.raise_for_status()
return True
except HTTPError:
self.logger.error("PATCHing submission envelope with creds failed, retrying")
tries += 1
self.retrySubmissionUpdateWithStagingDetails(subUrl, stagingDetails, tries)
else:
return False
class BundleManifest:
def __init__(self):
self.bundleUuid = str(uuid.uuid4())
self.envelopeUuid = {}
self.dataFiles = []
self.fileBiomaterialMap = {}
self.fileProcessMap = {}
self.fileFilesMap = {}
self.fileProjectMap = {}
self.fileProtocolMap = {} | #!/usr/bin/env python
"""
desc goes here
"""
import json
import logging
import os
import time
import uuid
from urllib.parse import urljoin, quote
import requests
from requests import HTTPError
from ingest.api.requests_utils import optimistic_session
class IngestApi:
def __init__(self, url=None, ingest_api_root=None):
format = '[%(filename)s:%(lineno)s - %(funcName)20s() ] %(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=format)
logging.getLogger("requests").setLevel(logging.WARNING)
self.logger = logging.getLogger(__name__)
if not url and 'INGEST_API' in os.environ:
url = os.environ['INGEST_API']
# expand interpolated env vars
url = os.path.expandvars(url)
self.logger.info("using " + url + " for ingest API")
self.url = url if url else "http://localhost:8080"
self.headers = {'Content-type': 'application/json'}
self.submission_links = {}
self.token = None
self.ingest_api_root = ingest_api_root if ingest_api_root is not None else self.get_root_url()
def set_token(self, token):
self.token = token
def get_root_url(self):
reply = requests.get(self.url, headers=self.headers)
return reply.json()["_links"]
def get_link_from_resource_url(self, resource_url, link_name):
r = requests.get(resource_url, headers=self.headers)
r.raise_for_status()
links = r.json().get('_links', {})
return links.get(link_name, {}).get('href')
def get_link_from_resource(self, resource, link_name):
links = resource.get('_links', {})
return links.get(link_name, {}).get('href')
def get_schemas(self, latest_only=True, high_level_entity=None, domain_entity=None, concrete_entity=None):
schema_url = self.get_schemas_url()
all_schemas = []
filtered_schemas = {}
if latest_only:
search_url = self.get_link_from_resource_url(schema_url, "search")
r = requests.get(search_url, headers=self.headers)
if r.status_code == requests.codes.ok:
response_j = json.loads(r.text)
all_schemas = list(self.getRelatedEntities("latestSchemas", response_j, "schemas"))
else:
all_schemas = list(self.getEntities(schema_url, "schemas"))
if high_level_entity:
all_schemas = list(filter(lambda schema: schema.get('highLevelEntity') == high_level_entity, all_schemas))
if domain_entity:
all_schemas = list(filter(lambda schema: schema.get('domainEntity') == domain_entity, all_schemas))
if concrete_entity:
all_schemas = list(filter(lambda schema: schema.get('concreteEntity') == concrete_entity, all_schemas))
return all_schemas
def get_schemas_url(self):
if "schemas" in self.ingest_api_root:
return self.ingest_api_root["schemas"]["href"].rsplit("{")[0]
return None
def getSubmissions(self):
params = {'sort': 'submissionDate,desc'}
r = requests.get(self.ingest_api_root["submissionEnvelopes"]["href"].rsplit("{")[0], params=params,
headers=self.headers)
if r.status_code == requests.codes.ok:
return json.loads(r.text)["_embedded"]["submissionEnvelopes"]
def getSubmissionIfModifiedSince(self, submissionId, datetimeUTC):
submissionUrl = self.getSubmissionUri(submissionId)
headers = self.headers
if datetimeUTC:
headers = {'If-Modified-Since': datetimeUTC}
self.logger.info('headers:' + str(headers))
r = requests.get(submissionUrl, headers=headers)
if r.status_code == requests.codes.ok:
submission = json.loads(r.text)
return submission
else:
self.logger.error(str(r))
def getProjects(self, id):
submissionUrl = self.url + '/submissionEnvelopes/' + id + '/projects'
r = requests.get(submissionUrl, headers=self.headers)
projects = []
if r.status_code == requests.codes.ok:
projects = json.loads(r.text)
return projects
def getProjectById(self, id):
submissionUrl = self.url + '/projects/' + id
r = requests.get(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.ok:
project = json.loads(r.text)
return project
else:
raise ValueError("Project " + id + " could not be retrieved")
def getProjectByUuid(self, uuid):
return self.getEntityByUuid('projects', uuid)
def getEntityByUuid(self, entity_type, uuid):
url = self.url + f'/{entity_type}/search/findByUuid?uuid=' + uuid
# TODO make the endpoint consistent
if entity_type == 'submissionEnvelopes':
url = self.url + f'/{entity_type}/search/findByUuidUuid?uuid=' + uuid
r = requests.get(url, headers=self.headers)
r.raise_for_status()
return r.json()
def getFileBySubmissionUrlAndFileName(self, submissionUrl, fileName):
searchUrl = self._get_url_for_link(self.url + '/files/search', 'findBySubmissionEnvelopesInAndFileName')
searchUrl = searchUrl.replace('{?submissionEnvelope,fileName}', '')
r = requests.get(searchUrl, params={'submissionEnvelope': submissionUrl, 'fileName': fileName})
if r.status_code == requests.codes.ok:
return r.json()
return None
def getSubmissionEnvelope(self, submissionUrl):
r = requests.get(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.ok:
submissionEnvelope = json.loads(r.text)
return submissionEnvelope
else:
raise ValueError("Submission Envelope " + submissionUrl + " could not be retrieved")
def getSubmissionByUuid(self, submissionUuid):
searchByUuidLink = self.get_link_from_resource_url(self.url + '/submissionEnvelopes/search', 'findByUuid')
searchByUuidLink = searchByUuidLink.replace('{?uuid}', '') # TODO: use a REST traverser instead of requests?
r = requests.get(searchByUuidLink, params={'uuid': submissionUuid})
if 200 <= r.status_code < 300:
return r.json()
else:
r.raise_for_status()
def getFiles(self, id):
submissionUrl = self.url + '/submissionEnvelopes/' + id + '/files'
r = requests.get(submissionUrl, headers=self.headers)
files = []
if r.status_code == requests.codes.ok:
files = json.loads(r.text)
return files
def getBundleManifests(self, id):
submissionUrl = self.url + '/submissionEnvelopes/' + id + '/bundleManifests'
r = requests.get(submissionUrl, headers=self.headers)
bundleManifests = []
if r.status_code == requests.codes.ok:
bundleManifests = json.loads(r.text)
return bundleManifests
def createSubmission(self, token):
auth_headers = {
'Content-type': 'application/json',
'Authorization': token
}
try:
r = requests.post(self.ingest_api_root["submissionEnvelopes"]["href"].rsplit("{")[0], data="{}",
headers=auth_headers)
r.raise_for_status()
submission = r.json()
submission_url = submission["_links"]["self"]["href"].rsplit("{")[0]
self.submission_links[submission_url] = submission["_links"]
return submission_url
except requests.exceptions.RequestException as err:
self.logger.error("Request failed: " + str(err))
raise
def get_submission_links(self, submission_url):
if not self.submission_links.get(submission_url):
r = requests.get(submission_url, headers=self.headers)
r.raise_for_status()
self.submission_links[submission_url] = r.json()["_links"]
return self.submission_links.get(submission_url)
def get_link_in_submisssion(self, submission_url, link_name):
links = self.get_submission_links(submission_url)
link_obj = links.get(link_name) # TODO what if link doesn't exist
link = link_obj['href'].rsplit("{")[0]
return link
def finishSubmission(self, submissionUrl):
r = requests.put(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.update:
self.logger.info("Submission complete!")
return r.text
def updateSubmissionState(self, submissionId, state):
state_url = self.getSubmissionStateUrl(submissionId, state)
if state_url:
r = requests.put(state_url, headers=self.headers)
return self.handleResponse(r)
def getSubmissionStateUrl(self, submissionId, state):
submissionUrl = self.getSubmissionUri(submissionId)
response = requests.get(submissionUrl, headers=self.headers)
submission = self.handleResponse(response)
if submission and state in submission['_links']:
return submission['_links'][state]["href"].rsplit("{")[0]
return None
def handleResponse(self, response):
if response.ok:
return json.loads(response.text)
else:
self.logger.error('Response:' + response.text)
return None
def getSubmissionUri(self, submissionId):
return self.ingest_api_root["submissionEnvelopes"]["href"].rsplit("{")[0] + "/" + submissionId
def get_full_url(self, callback_link):
return urljoin(self.url, callback_link)
def get_process(self, process_url):
r = requests.get(process_url, headers=self.headers)
r.raise_for_status()
return r.json()
def getAnalyses(self, submissionUrl):
return self.getEntities(submissionUrl, "analyses")
def getEntities(self, submissionUrl, entityType, pageSize=None):
r = requests.get(submissionUrl, headers=self.headers)
if r.status_code == requests.codes.ok:
if entityType in json.loads(r.text)["_links"]:
if not pageSize:
yield from self._getAllObjectsFromSet(json.loads(r.text)["_links"][entityType]["href"], entityType)
else:
yield from self._getAllObjectsFromSet(json.loads(r.text)["_links"][entityType]["href"], entityType, pageSize)
def _getAllObjectsFromSet(self, url, entityType, pageSize=None):
params = dict()
if pageSize:
params = {"size": pageSize}
r = requests.get(url, headers=self.headers, params=params)
r.raise_for_status()
if r.status_code == requests.codes.ok:
if "_embedded" in json.loads(r.text):
for entity in json.loads(r.text)["_embedded"][entityType]:
yield entity
if "next" in json.loads(r.text)["_links"]:
for entity2 in self._getAllObjectsFromSet(json.loads(r.text)["_links"]["next"]["href"], entityType):
yield entity2
def getRelatedEntities(self, relation, entity, entityType):
# get the self link from entity
if relation in entity["_links"]:
entityUri = entity["_links"][relation]["href"]
for entity in self._getAllObjectsFromSet(entityUri, entityType):
yield entity
def _updateStatusToPending(self, submissionUrl):
r = requests.patch(submissionUrl, data="{\"submissionStatus\" : \"Pending\"}", headers=self.headers)
def createProject(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "projects", self.token)
def createBiomaterial(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "biomaterials")
def createProcess(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "processes")
def createSubmissionManifest(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, 'submissionManifest')
def patch(self, url, patch):
r = requests.patch(url, json=patch)
r.raise_for_status()
return r
def createSubmissionError(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, 'submissionErrors')
def createProtocol(self, submissionUrl, jsonObject):
return self.createEntity(submissionUrl, jsonObject, "protocols")
def createFile(self, submissionUrl, file_name, jsonObject):
# TODO: why do we need the submission's links before we can create a file on it?
# TODO: submission_links should be a cache;
# TODO: getting a submission's links should look in the cache before retrieving it from the API
fileSubmissionsUrl = self.get_link_in_submisssion(submissionUrl, 'files')
fileSubmissionsUrl = fileSubmissionsUrl + "/" + quote(file_name)
fileToCreateObject = {
"fileName": file_name,
"content": json.loads(jsonObject) # TODO jsonObject should be a dict()
}
time.sleep(0.001)
with optimistic_session(fileSubmissionsUrl) as session:
r = session.post(fileSubmissionsUrl, data=json.dumps(fileToCreateObject),
headers=self.headers)
# TODO Investigate why core is returning internal server error
if r.status_code == requests.codes.conflict or r.status_code == requests.codes.internal_server_error:
searchFiles = self.getFileBySubmissionUrlAndFileName(submissionUrl, file_name)
if searchFiles and searchFiles.get('_embedded') and searchFiles['_embedded'].get('files'):
fileInIngest = searchFiles['_embedded'].get('files')[0]
content = fileInIngest.get('content')
newContent = json.loads(jsonObject)
if content:
content.update(newContent)
else:
content = newContent
fileUrl = fileInIngest['_links']['self']['href']
time.sleep(0.001)
r = requests.patch(fileUrl, data=json.dumps({'content': content}), headers=self.headers)
self.logger.debug(f'Updating existing content of file {fileUrl}.')
r.raise_for_status()
return r.json()
def createEntity(self, submissionUrl, jsonObject, entityType, token=None):
auth_headers = {'Content-type': 'application/json',
'Authorization': token
}
submissionUrl = self.get_link_in_submisssion(submissionUrl, entityType)
self.logger.debug("posting " + submissionUrl)
with optimistic_session(submissionUrl) as session:
r = session.post(submissionUrl, data=jsonObject, headers=auth_headers)
r.raise_for_status()
return r.json()
# given a HCA object return the URI for the object from ingest
def getObjectId(self, entity):
if "_links" in entity:
entityUrl = entity["_links"]["self"]["href"].rsplit("{")[0]
return entityUrl
raise ValueError('Can\'t get id for ' + json.dumps(entity) + ' is it a HCA entity?')
def getObjectUuid(self, entityUri):
r = requests.get(entityUri,
headers=self.headers)
if r.status_code == requests.codes.ok:
return json.loads(r.text)["uuid"]["uuid"]
def linkEntity(self, fromEntity, toEntity, relationship):
if not fromEntity:
raise ValueError("Error: fromEntity is None")
if not toEntity:
raise ValueError("Error: toEntity is None")
if not relationship:
raise ValueError("Error: relationship is None")
# check each dict in turn for non-None-ness
fromEntityLinks = fromEntity["_links"] if "_links" in fromEntity else None
if not fromEntityLinks:
raise ValueError("Error: fromEntity has no _links")
fromEntityLinksRelationship = fromEntityLinks[relationship] if relationship in fromEntityLinks else None
if not fromEntityLinksRelationship:
raise ValueError("Error: fromEntityLinks has no {0} relationship".format(relationship))
fromEntityLinksRelationshipHref = fromEntityLinksRelationship["href"] if "href" in fromEntityLinksRelationship else None
if not fromEntityLinksRelationshipHref:
raise ValueError("Error: fromEntityLinksRelationship for relationship {0} has no href".format(relationship))
fromUri = fromEntity["_links"][relationship]["href"]
toUri = self.getObjectId(toEntity)
self._retry_when_http_error(0, self._post_link_entity, fromUri, toUri)
def _post_link_entity(self, fromUri, toUri):
self.logger.debug('fromUri ' + fromUri + ' toUri:' + toUri);
headers = {'Content-type': 'text/uri-list'}
r = requests.post(fromUri.rsplit("{")[0],
data=toUri.rsplit("{")[0], headers=headers)
return r
def _retry_when_http_error(self, tries, func, *args):
max_retries = 5
if tries < max_retries:
if tries > 1:
self.logger.info("no of tries: " + str(tries + 1))
r = None
try:
time.sleep(0.001)
r = func(*args)
r.raise_for_status()
except HTTPError:
self.logger.error("\nResponse was: " + str(r.status_code) + " (" + r.text + ")")
tries += 1
time.sleep(1)
r = self._retry_when_http_error(tries, func, *args)
except requests.ConnectionError as e:
self.logger.exception(str(e))
tries += 1
time.sleep(1)
r = self._retry_when_http_error(tries, func, *args)
except Exception as e:
self.logger.exception(str(e))
tries += 1
time.sleep(1)
r = self._retry_when_http_error(tries, func, *args)
return r
else:
error_message = "Maximum no of tries reached: " + str(max_retries)
self.logger.error(error_message)
return None
def _request_post(self, url, data, params, headers):
if params:
return requests.post(url, data=data, params=params, headers=headers)
return requests.post(url, data=data, headers=headers)
def _request_put(self, url, data, params, headers):
if params:
return requests.put(url, data=data, params=params, headers=headers)
return requests.put(url, data=data, headers=headers)
def createBundleManifest(self, bundleManifest):
r = self._retry_when_http_error(0, self._post_bundle_manifest, bundleManifest, self.ingest_api_root["bundleManifests"]["href"].rsplit("{")[0])
if not (200 <= r.status_code < 300):
error_message = "Failed to create bundle manifest at URL {0} with request payload: {1}".format(self.ingest_api_root["bundleManifests"]["href"].rsplit("{")[0],
json.dumps(bundleManifest.__dict__))
self.logger.error(error_message)
raise ValueError(error_message)
else:
self.logger.info("successfully created bundle manifest")
def _post_bundle_manifest(self, bundleManifest, url):
return requests.post(url, data=json.dumps(bundleManifest.__dict__), headers=self.headers)
def updateSubmissionWithStagingCredentials(self, subUrl, uuid, submissionCredentials):
stagingDetails = \
{
"stagingDetails": {
"stagingAreaUuid": {
"uuid": uuid
},
"stagingAreaLocation": {
"value": submissionCredentials
}
}
}
if self.retrySubmissionUpdateWithStagingDetails(subUrl, stagingDetails, 0):
self.logger.debug("envelope updated with staging details " + json.dumps(stagingDetails))
else:
self.logger.error("Failed to update envelope with staging details: " + json.dumps(stagingDetails))
def retrySubmissionUpdateWithStagingDetails(self, subUrl, stagingDetails, tries):
if tries < 5:
# do a GET request to get latest submission envelope
entity_response = requests.get(subUrl)
etag = entity_response.headers['ETag']
if etag:
# set the etag header so we get 412 if someone beats us to set validating
self.headers['If-Match'] = etag
r = requests.patch(subUrl, data=json.dumps(stagingDetails))
try:
r.raise_for_status()
return True
except HTTPError:
self.logger.error("PATCHing submission envelope with creds failed, retrying")
tries += 1
self.retrySubmissionUpdateWithStagingDetails(subUrl, stagingDetails, tries)
else:
return False
class BundleManifest:
def __init__(self):
self.bundleUuid = str(uuid.uuid4())
self.envelopeUuid = {}
self.dataFiles = []
self.fileBiomaterialMap = {}
self.fileProcessMap = {}
self.fileFilesMap = {}
self.fileProjectMap = {}
self.fileProtocolMap = {} |
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from utils import convert_timestamp
from rasa_sdk.events import AllSlotsReset
import datetime
from datetime import timedelta, date
import dateutil.parser
import boto3
from boto3.dynamodb.conditions import Key
# class ActionHelloWorld(Action):
#
# def name(self) -> Text:
# return "action_hello_world"
#
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# dispatcher.utter_message(text="Hello World!")
# return []
def get_doc(date):
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table('sensor_daily_summary')
doc = table.get_item(Key={'farmId':'demo_farm_1','date': date})
return doc['Item']
# class ActionSearchRestaurant(Action):
# def name(self) -> Text:
# return "action_search_restaurant"
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# entities = tracker.latest_message['entities']
# print(entities)
# for e in entities:
# if e['entity'] == 'type':
# name = e['value']
# if name == 'indian':
# message = "Items: Indian1, Indian2, Indian3, Indian4"
# dispatcher.utter_message(text=message)
# return []
param_arr = ["salinity", "solarRad", "airTemp", "aeration", "potassium", "moisture", "soilTemp", "respiration", "pressure", "phosphorus", "pH", "humidity", "nitrogen", "evapotranspiration(ET)"]
class ActionGetDate(Action):
def name(self):
return 'action_date' #****This is used in the story!****
def run(self, dispatcher, tracker, domain):
try:
slots = tracker.current_slot_values()
slot_time = slots['time']
f_date = convert_timestamp(slot_time)
date_s = f_date.strftime("%Y-%m-%d")
str_date = f_date.strftime('%B %d, %Y')
except:
f_date = date.today()
date_s = f_date.strftime("%Y-%m-%d")
str_date = f_date.strftime('%B %d, %Y')
# dispatcher.utter_message(text='Please enter the date properly')
# return [AllSlotsReset()]
try:
doc = get_doc(date_s)
# st = f"""DATE: {date}\nAir Temparature: {doc["airTemp"]}\nSoil Temparature: {doc["soilTemp"]}\nMoisture: {doc["moisture"]}\nPressure: {doc["pressure"]}\nHumidity: {doc["humidity"]}\nPhosphorus: {doc["phosphorus"]}\nNitrogen: {doc["nitrogen"]}\nPotassium: {doc["potassium"]}\nSolar Radiation: {doc["solarRad"]}\nSalinity: {doc["salinity"]}\nPH: {doc["pH"]}"""
st = f'Sensor data on {str_date}'
for key in param_arr:
st += '\n{:<12}: {:.2f}'.format(key, float(doc[key]))
dispatcher.utter_message(text=st)
except:
dispatcher.utter_message(text='No data recorded on '+str_date)
return [AllSlotsReset()]
| from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from utils import convert_timestamp
from rasa_sdk.events import AllSlotsReset
import datetime
from datetime import timedelta, date
import dateutil.parser
import boto3
from boto3.dynamodb.conditions import Key
# class ActionHelloWorld(Action):
#
# def name(self) -> Text:
# return "action_hello_world"
#
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# dispatcher.utter_message(text="Hello World!")
# return []
def get_doc(date):
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table('sensor_daily_summary')
doc = table.get_item(Key={'farmId':'demo_farm_1','date': date})
return doc['Item']
# class ActionSearchRestaurant(Action):
# def name(self) -> Text:
# return "action_search_restaurant"
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# entities = tracker.latest_message['entities']
# print(entities)
# for e in entities:
# if e['entity'] == 'type':
# name = e['value']
# if name == 'indian':
# message = "Items: Indian1, Indian2, Indian3, Indian4"
# dispatcher.utter_message(text=message)
# return []
param_arr = ["salinity", "solarRad", "airTemp", "aeration", "potassium", "moisture", "soilTemp", "respiration", "pressure", "phosphorus", "pH", "humidity", "nitrogen", "evapotranspiration(ET)"]
class ActionGetDate(Action):
def name(self):
return 'action_date' #****This is used in the story!****
def run(self, dispatcher, tracker, domain):
try:
slots = tracker.current_slot_values()
slot_time = slots['time']
f_date = convert_timestamp(slot_time)
date_s = f_date.strftime("%Y-%m-%d")
str_date = f_date.strftime('%B %d, %Y')
except:
f_date = date.today()
date_s = f_date.strftime("%Y-%m-%d")
str_date = f_date.strftime('%B %d, %Y')
# dispatcher.utter_message(text='Please enter the date properly')
# return [AllSlotsReset()]
try:
doc = get_doc(date_s)
# st = f"""DATE: {date}\nAir Temparature: {doc['airTemp']}\nSoil Temparature: {doc['soilTemp']}\nMoisture: {doc['moisture']}\nPressure: {doc['pressure']}\nHumidity: {doc['humidity']}\nPhosphorus: {doc['phosphorus']}\nNitrogen: {doc['nitrogen']}\nPotassium: {doc['potassium']}\nSolar Radiation: {doc['solarRad']}\nSalinity: {doc['salinity']}\nPH: {doc['pH']}"""
st = f'Sensor data on {str_date}'
for key in param_arr:
st += '\n{:<12}: {:.2f}'.format(key, float(doc[key]))
dispatcher.utter_message(text=st)
except:
dispatcher.utter_message(text='No data recorded on '+str_date)
return [AllSlotsReset()]
|
from logging import log
import dash
import dash_leaflet as dl
import dash_leaflet.express as dlx
from dash.dependencies import Input, Output
from dash_extensions.javascript import assign, arrow_function
import pandas as pd
import dash_html_components as html
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
js_style = assign("""
function(feature) {
let t = {bon: "#1E90FF", moyen: "#FF7F50", mauvais: "#FF4500"};
console.log(feature);
return {color: t[feature.properties.etat_zh]}
}""")
fonction = """
(feature, layer) => {
//console.log("feature =",feature);
if(!feature.properties){
return
}
if(feature.properties.etat_zh){
layer.bindTooltip(feature.properties.etat_zh)
}
}
"""
hoverStyle = arrow_function(dict(weight=5, fillOpacity=1))
app.layout = html.Div([
dl.Map(children=[dl.TileLayer(),
dl.GeoJSON(id="site-4", hoverStyle=hoverStyle,
url=app.get_asset_url('sites/4.json'),
options=dict(onEachFeature=assign(fonction), style=js_style
))],
center=[44.3, 7], zoom=9,
style={'width': '100%', 'height': '50vh', 'margin': "auto", "display": "block"}),
html.Div(id="zh_selectionnee"), html.Div(id="zh_survolee")])
@ app.callback(Output("zh_selectionnee", "children"), [Input("site-4", "click_feature")])
def site_click(feature):
if feature is not None:
return f"You clicked {feature["properties"]["id"]}"
@ app.callback(Output("zh_survolee", "children"), [Input("site-4", "hover_feature")])
def site_hover(feature):
if feature is not None:
return f"You hovered above {feature["properties"]["id"]}"
if __name__ == '__main__':
app.run_server(debug=True)
| from logging import log
import dash
import dash_leaflet as dl
import dash_leaflet.express as dlx
from dash.dependencies import Input, Output
from dash_extensions.javascript import assign, arrow_function
import pandas as pd
import dash_html_components as html
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
js_style = assign("""
function(feature) {
let t = {bon: "#1E90FF", moyen: "#FF7F50", mauvais: "#FF4500"};
console.log(feature);
return {color: t[feature.properties.etat_zh]}
}""")
fonction = """
(feature, layer) => {
//console.log("feature =",feature);
if(!feature.properties){
return
}
if(feature.properties.etat_zh){
layer.bindTooltip(feature.properties.etat_zh)
}
}
"""
hoverStyle = arrow_function(dict(weight=5, fillOpacity=1))
app.layout = html.Div([
dl.Map(children=[dl.TileLayer(),
dl.GeoJSON(id="site-4", hoverStyle=hoverStyle,
url=app.get_asset_url('sites/4.json'),
options=dict(onEachFeature=assign(fonction), style=js_style
))],
center=[44.3, 7], zoom=9,
style={'width': '100%', 'height': '50vh', 'margin': "auto", "display": "block"}),
html.Div(id="zh_selectionnee"), html.Div(id="zh_survolee")])
@ app.callback(Output("zh_selectionnee", "children"), [Input("site-4", "click_feature")])
def site_click(feature):
if feature is not None:
return f"You clicked {feature['properties']['id']}"
@ app.callback(Output("zh_survolee", "children"), [Input("site-4", "hover_feature")])
def site_hover(feature):
if feature is not None:
return f"You hovered above {feature['properties']['id']}"
if __name__ == '__main__':
app.run_server(debug=True)
|
#!/usr/bin/python
import sys
import macro_compiler as mc
import os
import app_ui as ui
import terminal_colors as tc
import argparse
import led_command as lc
import math
import utils as u
import datetime
global app_description, verbose_mode, random_seed
app_description = None
verbose_mode = None
random_seed = 1
global device_profile, num_leds, starting_macro, num_macro_chars, ending_macro, number_of_macros, char_buffer_size, number_of_fine_zones, number_of_colors, number_of_sequencers, quiet_mode
device_profile = None
num_leds = None
starting_macro = None
num_macro_chars = None
ending_macro = None
number_of_macros = None
char_buffer_size = None
number_of_fine_zones = None
number_of_colors = None
number_of_sequencers = None
quiet_mode = None
def get_options():
global verbose_mode, quiet_mode
parser = argparse.ArgumentParser(description=app_description)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="display verbose info (False)")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", help="don't use terminal colors (False)")
args = parser.parse_args()
verbose_mode = args.verbose
quiet_mode = args.quiet
def initialize():
global app_description
global device_profile, num_leds, starting_macro, num_macro_chars, ending_macro, number_of_macros, char_buffer_size, number_of_fine_zones, number_of_colors, number_of_sequencers
app_description = "Apollo Lighting System - Macro Compiler Specs v.2.0 12-0-2018"
get_options()
ui.begin(verbose_mode, quiet_mode)
lc.begin(verbose_mode)
lc.stop_all()
device_profile = lc.get_device_profile()
num_leds = device_profile["NUM-LEDS"]
starting_macro = device_profile["START-MACRO"]
num_macro_chars = device_profile["NUM-MACRO-BYTES"]
ending_macro = device_profile["END-MACRO"]
number_of_macros = device_profile["NUM-MACROS"]
char_buffer_size = device_profile["CHAR-BUFFER-SIZE"]
number_of_fine_zones = device_profile["NUM-FINE-ZONES"]
number_of_colors = device_profile["NUM-PALETTE-COLORS"]
number_of_sequencers = device_profile["NUM-SEQUENCERS"]
total_macro_bytes = device_profile["TOTAL-MACRO-BYTES"]
last_macro_bytes = device_profile["LAST-MACRO-BYTES"]
mc.begin(lc, verbose_mode, quiet_mode, presets(), starting_macro, ending_macro, number_of_sequencers, num_macro_chars, char_buffer_size, last_macro_bytes)
ui.app_description(app_description)
ui.report_info(ui.intro_entry("Number of LEDs", num_leds))
ui.report_info(ui.intro_entry("Number of macros", number_of_macros))
ui.report_info(ui.intro_entry("Number of sequencers", number_of_sequencers))
ui.report_info(ui.intro_entry("Bytes per macro", num_macro_chars))
ui.report_info(ui.intro_entry("First macro", starting_macro))
ui.report_info(ui.intro_entry("Last macro", ending_macro))
ui.report_info(ui.intro_entry("Last macro bytes", last_macro_bytes))
ui.report_info(ui.intro_entry("Char buffer size", char_buffer_size))
def presets():
return {
"NUM-LEDS": num_leds,
"NUM-MACROS": number_of_macros,
"NUM-SEQUENCERS": number_of_sequencers,
"START-MACRO": starting_macro,
"END-MACRO": ending_macro,
"NUM-MACRO-CHARS": num_macro_chars,
"CHAR-BUFFER-SIZE": char_buffer_size,
"NUM-SEQUENCERS": number_of_sequencers,
"NUM-FINE-ZONES": number_of_fine_zones
}
def report_failed(description, expected, got):
description_ = tc.yellow(description)
expected_ = "\t" + tc.cyan("Expected:") + "\n\t\t" + tc.green(str(expected))
got_ = "\t" + tc.cyan("Got: ") + "\n\t\t" + tc.red(str(got))
ui.report_error("\nTest failed! %s\n%s\n%s\n" % (description_, expected_, got_))
def report_failed_script(description, expected, got):
description_ = tc.yellow(description)
expected_ = tc.cyan("Expected:\n")
for line in expected:
expected_ += tc.green(line) + "\n"
got_ = tc.cyan("Got:\n")
for line in got:
got_ += tc.red(line) + "\n"
ui.report_error("\nTest failed! %s\n%s\n%s\n" % (description_, expected_, got_))
def report_worked(description, expected, got):
description_ = tc.yellow(description)
expected_ = "\t" + tc.cyan("Expected:") + "\n\t\t" + tc.green(str(expected))
got_ = "\t" + tc.cyan("Got Same:") + "\n\t\t" + tc.red(str(got))
ui.report_error("\nTest failed! %s\n%s\n%s\n" % (description_, expected_, got_))
def report_worked_script(description, expected, got):
description_ = tc.yellow(description)
expected_ = tc.cyan("Expected:\n")
for line in expected:
expected_ += tc.green(line) + "\n"
got_ = tc.cyan("Got Same:\n")
for line in got:
got_ += tc.red(line) + "\n"
ui.report_error("\nTest failed! %s\n%s\n%s\n" % (description_, expected_, got_))
def report_success(description, expected, got):
if not verbose_mode:
sys.stdout.write(tc.green("."))
sys.stdout.flush()
def report_test(type, description):
print tc.cyan(type) + " " + tc.green(description)
def expect(description, got, expected):
if expected != got:
report_failed(description, expected, got)
else:
report_success(description, expected, got)
def expect_script(description, got, expected):
if expected != got:
report_failed_script(description, expected, got)
else:
report_success(description, expected, got)
def not_expect(description, got, expected):
if expected == got:
report_worked(description, expected, got)
else:
report_success(description, expected, got)
def not_expect_script(description, got, expected):
if expected == got:
report_worked_script(description, expected, got)
else:
report_success(description, expected, got)
def print_script(script):
for line in script:
print line
print
def test(description):
return True
########################################################################
########################################################################
def do_compilation(filename):
u.randomize(random_seed)
return mc.compile_file(filename)
def specs():
if verbose_mode:
report_test("String manipulation tests", "extract_args()")
expect("extract args 1", u.extract_args("[test]", {"[" : "]"}), ["test"])
expect("extract args 2", u.extract_args(" [test] ", {"[" : "]"}), ["test"])
expect("extract args 3", u.extract_args("[ test ]", {"[" : "]"}), ["test"])
expect("extract args 4", u.extract_args("/test/", {"/" : "/"}), ["test"])
expect("extract args 5", u.extract_args("(t e s t)", {"(" : ")"}), ["t", "e", "s", "t"])
expect("extract args 6", u.extract_args("[test] abc", {"[" : "]"}), ["test"])
expect("extract args 7", u.extract_args("abc [test] def", {"[" : "]"}), ["test"])
expect("extract args 8", u.extract_args("(t e s t)", {"(" : ")"}), ["t", "e", "s", "t"])
expect("extract args 9", u.extract_args("abc [test] def [test2]", {"[" : "]"}), ["test"])
expect("extract args 10", u.extract_args("( t e s t )", {"(" : ")"}), ["t", "e", "s", "t"])
expect("extract args 11", u.extract_args("[test", {"[" : "]"}), [])
expect("extract args 12", u.extract_args("test]", {"[" : "]"}), [])
expect("extract args 13", u.extract_args("test", {"[" : "]"}), [])
expect("extract args 14", u.extract_args("[test", {"[" : "]"}), [])
expect("extract args 15", u.extract_args("[]", {"[" : "]"}), [])
expect("extract args 16", u.extract_args("[[test]]", {"[[" : "]]"}), ["test"])
expect("extract args 16", u.extract_args("[[[test]]]", {"[[[" : "]]]"}), ["test"])
expect("extract args 17", u.extract_args("(((test 1 2 3)))", {"(((" : ")))"}), ["test", '1', '2', '3'])
if verbose_mode:
report_test("String manipulation tests", "replace_args()")
expect("replace args 1", u.replace_args("[test]", {"[" : "]"}, "abc"), "abc")
expect("replace args 2", u.replace_args(" [test] ", {"[" : "]"}, "abc"), " abc ")
expect("replace args 3", u.replace_args("[test][]", {"[" : "]"}, "abc"), "abc[]")
expect("replace args 4", u.replace_args("[test", {"[" : "]"}, "abc"), "[test")
expect("replace args 5", u.replace_args("[]", {"[" : "]"}, "abc"), "abc")
if verbose_mode:
report_test("String manipulation tests", "get_key_args()")
expect("get key args 1", u.get_key_args("$abc", "$"), ["abc"])
expect("get key args 2", u.get_key_args(" $abc", "$"), ["abc"])
expect("get key args 3", u.get_key_args("$abc ", "$"), ["abc"])
expect("get key args 4", u.get_key_args(" $abc ", "$"), ["abc"])
expect("get key args 5", u.get_key_args("$abc def", "$"), ["abc", "def"])
expect("get key args 6", u.get_key_args("$abc def", "$"), ["abc", "def"])
expect("get key args 7", u.get_key_args("$", "$"), [])
expect("get key args 8", u.get_key_args("", "$"), [])
expect("get key args 1", u.get_key_args("$$abc", "$$"), ["abc"])
# crash tests
# these are expected to raise compilation errors
fixture_filename = "spec_fixtures/crash_script%d.mac"
expected_filename = "spec_fixtures/crash_script%d_expected.txt"
script_number = 1
while(True):
fixture_file = fixture_filename % script_number
expected_file = expected_filename % script_number
script_number += 1
if(os.path.exists(fixture_file)):
if verbose_mode:
report_test("Crash script", fixture_file)
try:
compiled_script = do_compilation(fixture_file)
expect("Script " + fixture_file + " was expected to raise an error", "no error raised", "error raised")
if verbose_mode:
print_script(compiled_script)
except ValueError as error:
expected_error = mc.load_file(expected_file, ".txt")
expect("Compilation crashes with expected message - script: " + fixture_file, [str(error)], expected_error)
continue
finally:
mc.reset()
else:
break
# negative tests
# these are expected to fail to compile but not crash
fixture_filename = "spec_fixtures/bad_script%d.mac"
script_number = 1
while(True):
fixture_file = fixture_filename % script_number
script_number += 1
if(os.path.exists(fixture_file)):
if verbose_mode:
report_test("Negative script", fixture_file)
try:
compiled_script = do_compilation(fixture_file)
if verbose_mode:
print_script(compiled_script)
expect("Invalid compilation of: " + fixture_file, mc.compilation_valid(compiled_script), False)
mc.reset()
except ValueError, e:
ui.report_error("Compilation error in " + fixture_file + ": " + str(e))
print_script(mc.get_saved_bad_script())
expect("Exception caught", True, False)
else:
break
# positive tests
fixture_filename = "spec_fixtures/test_script%d.mac"
expected_filename = "spec_fixtures/test_script%d_expected.txt"
script_number = 1
while(True):
fixture_file = fixture_filename % script_number
expected_file = expected_filename % script_number
script_number += 1
if(os.path.exists(fixture_file)):
if verbose_mode:
report_test("Positive script", fixture_file)
try:
compiled_script = do_compilation(fixture_file)
except ValueError, e:
ui.report_error("Compilation error in " + fixture_file + ": " + str(e))
break
if verbose_mode:
print_script(compiled_script)
expected_script = mc.load_file(expected_file, ".txt")
expect_script("Valid compilation of: " + fixture_file, compiled_script, expected_script)
mc.reset()
else:
break
############################################################################
def setup():
initialize()
def loop():
start_time = datetime.datetime.now()
specs()
end_time = datetime.datetime.now()
print
print tc.white("tests ran in: " + str(end_time - start_time))
print
sys.exit()
if __name__ == '__main__':
setup()
# while True:
# try:
loop()
# except KeyboardInterrupt:
# sys.exit("\nExiting...\n")
# except Exception:
# raise
| #!/usr/bin/python
import sys
import macro_compiler as mc
import os
import app_ui as ui
import terminal_colors as tc
import argparse
import led_command as lc
import math
import utils as u
import datetime
global app_description, verbose_mode, random_seed
app_description = None
verbose_mode = None
random_seed = 1
global device_profile, num_leds, starting_macro, num_macro_chars, ending_macro, number_of_macros, char_buffer_size, number_of_fine_zones, number_of_colors, number_of_sequencers, quiet_mode
device_profile = None
num_leds = None
starting_macro = None
num_macro_chars = None
ending_macro = None
number_of_macros = None
char_buffer_size = None
number_of_fine_zones = None
number_of_colors = None
number_of_sequencers = None
quiet_mode = None
def get_options():
global verbose_mode, quiet_mode
parser = argparse.ArgumentParser(description=app_description)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="display verbose info (False)")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", help="don't use terminal colors (False)")
args = parser.parse_args()
verbose_mode = args.verbose
quiet_mode = args.quiet
def initialize():
global app_description
global device_profile, num_leds, starting_macro, num_macro_chars, ending_macro, number_of_macros, char_buffer_size, number_of_fine_zones, number_of_colors, number_of_sequencers
app_description = "Apollo Lighting System - Macro Compiler Specs v.2.0 12-0-2018"
get_options()
ui.begin(verbose_mode, quiet_mode)
lc.begin(verbose_mode)
lc.stop_all()
device_profile = lc.get_device_profile()
num_leds = device_profile["NUM-LEDS"]
starting_macro = device_profile["START-MACRO"]
num_macro_chars = device_profile["NUM-MACRO-BYTES"]
ending_macro = device_profile["END-MACRO"]
number_of_macros = device_profile["NUM-MACROS"]
char_buffer_size = device_profile["CHAR-BUFFER-SIZE"]
number_of_fine_zones = device_profile["NUM-FINE-ZONES"]
number_of_colors = device_profile["NUM-PALETTE-COLORS"]
number_of_sequencers = device_profile["NUM-SEQUENCERS"]
total_macro_bytes = device_profile["TOTAL-MACRO-BYTES"]
last_macro_bytes = device_profile["LAST-MACRO-BYTES"]
mc.begin(lc, verbose_mode, quiet_mode, presets(), starting_macro, ending_macro, number_of_sequencers, num_macro_chars, char_buffer_size, last_macro_bytes)
ui.app_description(app_description)
ui.report_info(ui.intro_entry("Number of LEDs", num_leds))
ui.report_info(ui.intro_entry("Number of macros", number_of_macros))
ui.report_info(ui.intro_entry("Number of sequencers", number_of_sequencers))
ui.report_info(ui.intro_entry("Bytes per macro", num_macro_chars))
ui.report_info(ui.intro_entry("First macro", starting_macro))
ui.report_info(ui.intro_entry("Last macro", ending_macro))
ui.report_info(ui.intro_entry("Last macro bytes", last_macro_bytes))
ui.report_info(ui.intro_entry("Char buffer size", char_buffer_size))
def presets():
return {
"NUM-LEDS": num_leds,
"NUM-MACROS": number_of_macros,
"NUM-SEQUENCERS": number_of_sequencers,
"START-MACRO": starting_macro,
"END-MACRO": ending_macro,
"NUM-MACRO-CHARS": num_macro_chars,
"CHAR-BUFFER-SIZE": char_buffer_size,
"NUM-SEQUENCERS": number_of_sequencers,
"NUM-FINE-ZONES": number_of_fine_zones
}
def report_failed(description, expected, got):
description_ = tc.yellow(description)
expected_ = "\t" + tc.cyan("Expected:") + "\n\t\t" + tc.green(str(expected))
got_ = "\t" + tc.cyan("Got: ") + "\n\t\t" + tc.red(str(got))
ui.report_error("\nTest failed! %s\n%s\n%s\n" % (description_, expected_, got_))
def report_failed_script(description, expected, got):
description_ = tc.yellow(description)
expected_ = tc.cyan("Expected:\n")
for line in expected:
expected_ += tc.green(line) + "\n"
got_ = tc.cyan("Got:\n")
for line in got:
got_ += tc.red(line) + "\n"
ui.report_error("\nTest failed! %s\n%s\n%s\n" % (description_, expected_, got_))
def report_worked(description, expected, got):
description_ = tc.yellow(description)
expected_ = "\t" + tc.cyan("Expected:") + "\n\t\t" + tc.green(str(expected))
got_ = "\t" + tc.cyan("Got Same:") + "\n\t\t" + tc.red(str(got))
ui.report_error("\nTest failed! %s\n%s\n%s\n" % (description_, expected_, got_))
def report_worked_script(description, expected, got):
description_ = tc.yellow(description)
expected_ = tc.cyan("Expected:\n")
for line in expected:
expected_ += tc.green(line) + "\n"
got_ = tc.cyan("Got Same:\n")
for line in got:
got_ += tc.red(line) + "\n"
ui.report_error("\nTest failed! %s\n%s\n%s\n" % (description_, expected_, got_))
def report_success(description, expected, got):
if not verbose_mode:
sys.stdout.write(tc.green("."))
sys.stdout.flush()
def report_test(type, description):
print tc.cyan(type) + " " + tc.green(description)
def expect(description, got, expected):
if expected != got:
report_failed(description, expected, got)
else:
report_success(description, expected, got)
def expect_script(description, got, expected):
if expected != got:
report_failed_script(description, expected, got)
else:
report_success(description, expected, got)
def not_expect(description, got, expected):
if expected == got:
report_worked(description, expected, got)
else:
report_success(description, expected, got)
def not_expect_script(description, got, expected):
if expected == got:
report_worked_script(description, expected, got)
else:
report_success(description, expected, got)
def print_script(script):
for line in script:
print line
print
def test(description):
return True
########################################################################
########################################################################
def do_compilation(filename):
u.randomize(random_seed)
return mc.compile_file(filename)
def specs():
if verbose_mode:
report_test("String manipulation tests", "extract_args()")
expect("extract args 1", u.extract_args("[test]", {"[" : "]"}), ["test"])
expect("extract args 2", u.extract_args(" [test] ", {"[" : "]"}), ["test"])
expect("extract args 3", u.extract_args("[ test ]", {"[" : "]"}), ["test"])
expect("extract args 4", u.extract_args("/test/", {"/" : "/"}), ["test"])
expect("extract args 5", u.extract_args("(t e s t)", {"(" : ")"}), ["t", "e", "s", "t"])
expect("extract args 6", u.extract_args("[test] abc", {"[" : "]"}), ["test"])
expect("extract args 7", u.extract_args("abc [test] def", {"[" : "]"}), ["test"])
expect("extract args 8", u.extract_args("(t e s t)", {"(" : ")"}), ["t", "e", "s", "t"])
expect("extract args 9", u.extract_args("abc [test] def [test2]", {"[" : "]"}), ["test"])
expect("extract args 10", u.extract_args("( t e s t )", {"(" : ")"}), ["t", "e", "s", "t"])
expect("extract args 11", u.extract_args("[test", {"[" : "]"}), [])
expect("extract args 12", u.extract_args("test]", {"[" : "]"}), [])
expect("extract args 13", u.extract_args("test", {"[" : "]"}), [])
expect("extract args 14", u.extract_args("[test", {"[" : "]"}), [])
expect("extract args 15", u.extract_args("[]", {"[" : "]"}), [])
expect("extract args 16", u.extract_args("[[test]]", {"[[" : "]]"}), ["test"])
expect("extract args 16", u.extract_args("[[[test]]]", {"[[[" : "]]]"}), ["test"])
expect("extract args 17", u.extract_args("(((test 1 2 3)))", {"(((" : ")))"}), ["test", '1', '2', '3'])
if verbose_mode:
report_test("String manipulation tests", "replace_args()")
expect("replace args 1", u.replace_args("[test]", {"[" : "]"}, "abc"), "abc")
expect("replace args 2", u.replace_args(" [test] ", {"[" : "]"}, "abc"), " abc ")
expect("replace args 3", u.replace_args("[test][]", {"[" : "]"}, "abc"), "abc[]")
expect("replace args 4", u.replace_args("[test", {"[" : "]"}, "abc"), "[test")
expect("replace args 5", u.replace_args("[]", {"[" : "]"}, "abc"), "abc")
if verbose_mode:
report_test("String manipulation tests", "get_key_args()")
expect("get key args 1", u.get_key_args("$abc", "$"), ["abc"])
expect("get key args 2", u.get_key_args(" $abc", "$"), ["abc"])
expect("get key args 3", u.get_key_args("$abc ", "$"), ["abc"])
expect("get key args 4", u.get_key_args(" $abc ", "$"), ["abc"])
expect("get key args 5", u.get_key_args("$abc def", "$"), ["abc", "def"])
expect("get key args 6", u.get_key_args("$abc def", "$"), ["abc", "def"])
expect("get key args 7", u.get_key_args("$", "$"), [])
expect("get key args 8", u.get_key_args("", "$"), [])
expect("get key args 1", u.get_key_args("$$abc", "$$"), ["abc"])
# crash tests
# these are expected to raise compilation errors
fixture_filename = "spec_fixtures/crash_script%d.mac"
expected_filename = "spec_fixtures/crash_script%d_expected.txt"
script_number = 1
while(True):
fixture_file = fixture_filename % script_number
expected_file = expected_filename % script_number
script_number += 1
if(os.path.exists(fixture_file)):
if verbose_mode:
report_test("Crash script", fixture_file)
try:
compiled_script = do_compilation(fixture_file)
expect("Script " + fixture_file + " was expected to raise an error", "no error raised", "error raised")
if verbose_mode:
print_script(compiled_script)
except ValueError as error:
expected_error = mc.load_file(expected_file, ".txt")
expect("Compilation crashes with expected message - script: " + fixture_file, [str(error)], expected_error)
continue
finally:
mc.reset()
else:
break
# negative tests
# these are expected to fail to compile but not crash
fixture_filename = "spec_fixtures/bad_script%d.mac"
script_number = 1
while(True):
fixture_file = fixture_filename % script_number
script_number += 1
if(os.path.exists(fixture_file)):
if verbose_mode:
report_test("Negative script", fixture_file)
try:
compiled_script = do_compilation(fixture_file)
if verbose_mode:
print_script(compiled_script)
expect("Invalid compilation of: " + fixture_file, mc.compilation_valid(compiled_script), False)
mc.reset()
except ValueError, e:
ui.report_error("Compilation error in " + fixture_file + ": " + str(e))
print_script(mc.get_saved_bad_script())
expect("Exception caught", True, False)
else:
break
# positive tests
fixture_filename = "spec_fixtures/test_script%d.mac"
expected_filename = "spec_fixtures/test_script%d_expected.txt"
script_number = 1
while(True):
fixture_file = fixture_filename % script_number
expected_file = expected_filename % script_number
script_number += 1
if(os.path.exists(fixture_file)):
if verbose_mode:
report_test("Positive script", fixture_file)
try:
compiled_script = do_compilation(fixture_file)
except ValueError, e:
ui.report_error("Compilation error in " + fixture_file + ": " + str(e))
break
if verbose_mode:
print_script(compiled_script)
expected_script = mc.load_file(expected_file, ".txt")
expect_script("Valid compilation of: " + fixture_file, compiled_script, expected_script)
mc.reset()
else:
break
############################################################################
def setup():
initialize()
def loop():
start_time = datetime.datetime.now()
specs()
end_time = datetime.datetime.now()
print
print tc.white("tests ran in: " + str(end_time - start_time))
print
sys.exit()
if __name__ == '__main__':
setup()
# while True:
# try:
loop()
# except KeyboardInterrupt:
# sys.exit("\nExiting...\n")
# except Exception:
# raise
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.