edited_code stringlengths 17 978k | original_code stringlengths 17 978k |
|---|---|
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.kubernetes.checks.resource.base_spec_check import BaseK8Check
strongCiphers = ["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305","TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384","TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305","TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384","TLS_RSA_WITH_AES_256_GCM_SHA384","TLS_RSA_WITH_AES_128_GCM_SHA256"]
class KubeletCryptographicCiphers(BaseK8Check):
def __init__(self):
# CIS-1.6 4.2.13
id = "CKV_K8S_151"
name = "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers"
categories = [CheckCategories.KUBERNETES]
supported_entities = ['containers']
super().__init__(name=name, id=id, categories=categories, supported_entities=supported_entities)
def get_resource_id(self, conf):
return f'{conf['parent']} - {conf['name']}' if conf.get('name') else conf["parent"]
def scan_spec_conf(self, conf):
if "command" in conf:
if "kubelet" in conf["command"]:
for command in conf["command"]:
if command.startswith("--tls-cipher-suites"):
value = command.split("=")[1]
ciphers = value.split(",")
for cipher in ciphers:
if cipher not in strongCiphers:
return CheckResult.FAILED
return CheckResult.PASSED
check = KubeletCryptographicCiphers() | from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.kubernetes.checks.resource.base_spec_check import BaseK8Check
strongCiphers = ["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305","TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384","TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305","TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384","TLS_RSA_WITH_AES_256_GCM_SHA384","TLS_RSA_WITH_AES_128_GCM_SHA256"]
class KubeletCryptographicCiphers(BaseK8Check):
def __init__(self):
# CIS-1.6 4.2.13
id = "CKV_K8S_151"
name = "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers"
categories = [CheckCategories.KUBERNETES]
supported_entities = ['containers']
super().__init__(name=name, id=id, categories=categories, supported_entities=supported_entities)
def get_resource_id(self, conf):
return f'{conf["parent"]} - {conf["name"]}' if conf.get('name') else conf["parent"]
def scan_spec_conf(self, conf):
if "command" in conf:
if "kubelet" in conf["command"]:
for command in conf["command"]:
if command.startswith("--tls-cipher-suites"):
value = command.split("=")[1]
ciphers = value.split(",")
for cipher in ciphers:
if cipher not in strongCiphers:
return CheckResult.FAILED
return CheckResult.PASSED
check = KubeletCryptographicCiphers() |
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2022 Amano Team
import os
import shutil
import tempfile
from PIL import Image
from pyrogram import Client, filters
from pyrogram.enums import MessageEntityType
from pyrogram.errors import PeerIdInvalid, StickersetInvalid
from pyrogram.raw.functions.messages import GetStickerSet, SendMedia
from pyrogram.raw.functions.stickers import AddStickerToSet, CreateStickerSet
from pyrogram.raw.types import (
DocumentAttributeFilename,
InputDocument,
InputMediaUploadedDocument,
InputStickerSetItem,
InputStickerSetShortName,
)
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message
from eduu.config import LOG_CHAT, PREFIXES
from eduu.utils import EMOJI_PATTERN, http
from eduu.utils.localization import use_chat_lang
@Client.on_message(filters.command(["kang", "kibe", "steal"], PREFIXES))
@use_chat_lang()
async def kang_sticker(c: Client, m: Message, strings):
prog_msg = await m.reply_text(strings("kanging_sticker_msg"))
bot_username = c.me.username
sticker_emoji = "🤔"
packnum = 0
packname_found = False
resize = False
animated = False
reply = m.reply_to_message
user = await c.resolve_peer(m.from_user.username or m.from_user.id)
if reply and reply.media:
if reply.photo:
resize = True
elif reply.document:
if "image" in reply.document.mime_type:
# mime_type: image/webp
resize = True
elif "tgsticker" in reply.document.mime_type:
# mime_type: application/x-tgsticker
animated = True
elif reply.sticker:
if not reply.sticker.file_name:
return await prog_msg.edit_text(strings("err_sticker_no_file_name"))
if reply.sticker.emoji:
sticker_emoji = reply.sticker.emoji
animated = reply.sticker.is_animated
if not reply.sticker.file_name.endswith(".tgs"):
resize = True
else:
return await prog_msg.edit_text(strings("invalid_media_string"))
pack_prefix = "anim" if animated else "a"
packname = f"{pack_prefix}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 1:
if m.command[1].isdigit() and int(m.command[1]) > 0:
# provide pack number to kang in desired pack
packnum = m.command.pop(1)
packname = f"{pack_prefix}{packnum}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 1:
# matches all valid emojis in input
sticker_emoji = (
"".join(set(EMOJI_PATTERN.findall("".join(m.command[1:]))))
or sticker_emoji
)
filename = await c.download_media(m.reply_to_message)
if not filename:
# Failed to download
await prog_msg.delete()
return
elif m.entities and len(m.entities) > 1:
packname = f"a_{m.from_user.id}_by_{bot_username}"
pack_prefix = "a"
# searching if image_url is given
img_url = None
filename = "sticker.png"
for y in m.entities:
if y.type == MessageEntityType.URL:
img_url = m.text[y.offset : (y.offset + y.length)]
break
if not img_url:
await prog_msg.delete()
return
try:
r = await http.get(img_url)
if r.status_code == 200:
with open(filename, mode="wb") as f:
f.write(r.read())
except Exception as r_e:
return await prog_msg.edit_text(f"{r_e.__class__.__name__} : {r_e}")
if len(m.command) > 2:
# m.command[1] is image_url
if m.command[2].isdigit() and int(m.command[2]) > 0:
packnum = m.command.pop(2)
packname = f"a{packnum}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 2:
sticker_emoji = (
"".join(set(EMOJI_PATTERN.findall("".join(m.command[2:]))))
or sticker_emoji
)
resize = True
else:
return await prog_msg.delete()
try:
if resize:
filename = resize_image(filename)
max_stickers = 50 if animated else 120
while not packname_found:
try:
stickerset = await c.invoke(
GetStickerSet(
stickerset=InputStickerSetShortName(short_name=packname),
hash=0,
)
)
if stickerset.set.count >= max_stickers:
packnum += 1
packname = (
f"{pack_prefix}_{packnum}_{m.from_user.id}_by_{bot_username}"
)
else:
packname_found = True
except StickersetInvalid:
break
file = await c.save_file(filename)
media = await c.invoke(
SendMedia(
peer=(await c.resolve_peer(LOG_CHAT)),
media=InputMediaUploadedDocument(
file=file,
mime_type=c.guess_mime_type(filename),
attributes=[DocumentAttributeFilename(file_name=filename)],
),
message=f"#Sticker kang by UserID -> {m.from_user.id}",
random_id=c.rnd_id(),
)
)
stkr_file = media.updates[-1].message.media.document
if packname_found:
await prog_msg.edit_text(strings("use_existing_pack"))
await c.invoke(
AddStickerToSet(
stickerset=InputStickerSetShortName(short_name=packname),
sticker=InputStickerSetItem(
document=InputDocument(
id=stkr_file.id,
access_hash=stkr_file.access_hash,
file_reference=stkr_file.file_reference,
),
emoji=sticker_emoji,
),
)
)
else:
await prog_msg.edit_text(strings("create_new_pack_string"))
u_name = m.from_user.username
if u_name:
u_name = f"@{u_name}"
else:
u_name = str(m.from_user.id)
stkr_title = f"{u_name}'s "
if animated:
stkr_title += "Anim. "
stkr_title += "EduuPack"
if packnum != 0:
stkr_title += f" v{packnum}"
try:
await c.invoke(
CreateStickerSet(
user_id=user,
title=stkr_title,
short_name=packname,
stickers=[
InputStickerSetItem(
document=InputDocument(
id=stkr_file.id,
access_hash=stkr_file.access_hash,
file_reference=stkr_file.file_reference,
),
emoji=sticker_emoji,
)
],
animated=animated,
)
)
except PeerIdInvalid:
return await prog_msg.edit_text(
strings("cant_create_sticker_pack_string"),
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"/start", url=f"https://t.me/{bot_username}?start"
)
]
]
),
)
except Exception as all_e:
await prog_msg.edit_text(f"{all_e.__class__.__name__} : {all_e}")
else:
markup = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
strings("view_sticker_pack_btn"),
url=f"t.me/addstickers/{packname}",
)
]
]
)
kanged_success_msg = strings("sticker_kanged_string")
await prog_msg.edit_text(
kanged_success_msg.format(sticker_emoji=sticker_emoji), reply_markup=markup
)
# Cleanup
try:
os.remove(filename)
except OSError:
pass
def resize_image(filename: str) -> str:
im = Image.open(filename)
maxsize = 512
scale = maxsize / max(im.width, im.height)
sizenew = (int(im.width * scale), int(im.height * scale))
im = im.resize(sizenew, Image.NEAREST)
downpath, f_name = os.path.split(filename)
# not hardcoding png_image as "sticker.png"
png_image = os.path.join(downpath, f"{f_name.split(".", 1)[0]}.png")
im.save(png_image, "PNG")
if png_image != filename:
os.remove(filename)
return png_image
@Client.on_message(filters.command("stickerid", PREFIXES) & filters.reply)
@use_chat_lang()
async def getstickerid(c: Client, m: Message, strings):
if m.reply_to_message.sticker:
await m.reply_text(
strings("get_sticker_id_string").format(
stickerid=m.reply_to_message.sticker.file_id
)
)
@Client.on_message(filters.command("getsticker", PREFIXES) & filters.reply)
@use_chat_lang()
async def getstickeraspng(c: Client, m: Message, strings):
sticker = m.reply_to_message.sticker
if sticker:
if sticker.is_animated:
await m.reply_text(strings("animated_not_supported"))
elif not sticker.is_animated:
with tempfile.TemporaryDirectory() as tempdir:
path = os.path.join(tempdir, "getsticker")
sticker_file = await c.download_media(
message=m.reply_to_message,
file_name=f"{path}/{sticker.set_name}.png",
)
await m.reply_to_message.reply_document(
document=sticker_file,
caption=strings("sticker_info").format(
emoji=sticker.emoji, id=sticker.file_id
),
)
shutil.rmtree(tempdir, ignore_errors=True)
else:
await m.reply_text(strings("not_sticker"))
| # SPDX-License-Identifier: MIT
# Copyright (c) 2018-2022 Amano Team
import os
import shutil
import tempfile
from PIL import Image
from pyrogram import Client, filters
from pyrogram.enums import MessageEntityType
from pyrogram.errors import PeerIdInvalid, StickersetInvalid
from pyrogram.raw.functions.messages import GetStickerSet, SendMedia
from pyrogram.raw.functions.stickers import AddStickerToSet, CreateStickerSet
from pyrogram.raw.types import (
DocumentAttributeFilename,
InputDocument,
InputMediaUploadedDocument,
InputStickerSetItem,
InputStickerSetShortName,
)
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message
from eduu.config import LOG_CHAT, PREFIXES
from eduu.utils import EMOJI_PATTERN, http
from eduu.utils.localization import use_chat_lang
@Client.on_message(filters.command(["kang", "kibe", "steal"], PREFIXES))
@use_chat_lang()
async def kang_sticker(c: Client, m: Message, strings):
prog_msg = await m.reply_text(strings("kanging_sticker_msg"))
bot_username = c.me.username
sticker_emoji = "🤔"
packnum = 0
packname_found = False
resize = False
animated = False
reply = m.reply_to_message
user = await c.resolve_peer(m.from_user.username or m.from_user.id)
if reply and reply.media:
if reply.photo:
resize = True
elif reply.document:
if "image" in reply.document.mime_type:
# mime_type: image/webp
resize = True
elif "tgsticker" in reply.document.mime_type:
# mime_type: application/x-tgsticker
animated = True
elif reply.sticker:
if not reply.sticker.file_name:
return await prog_msg.edit_text(strings("err_sticker_no_file_name"))
if reply.sticker.emoji:
sticker_emoji = reply.sticker.emoji
animated = reply.sticker.is_animated
if not reply.sticker.file_name.endswith(".tgs"):
resize = True
else:
return await prog_msg.edit_text(strings("invalid_media_string"))
pack_prefix = "anim" if animated else "a"
packname = f"{pack_prefix}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 1:
if m.command[1].isdigit() and int(m.command[1]) > 0:
# provide pack number to kang in desired pack
packnum = m.command.pop(1)
packname = f"{pack_prefix}{packnum}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 1:
# matches all valid emojis in input
sticker_emoji = (
"".join(set(EMOJI_PATTERN.findall("".join(m.command[1:]))))
or sticker_emoji
)
filename = await c.download_media(m.reply_to_message)
if not filename:
# Failed to download
await prog_msg.delete()
return
elif m.entities and len(m.entities) > 1:
packname = f"a_{m.from_user.id}_by_{bot_username}"
pack_prefix = "a"
# searching if image_url is given
img_url = None
filename = "sticker.png"
for y in m.entities:
if y.type == MessageEntityType.URL:
img_url = m.text[y.offset : (y.offset + y.length)]
break
if not img_url:
await prog_msg.delete()
return
try:
r = await http.get(img_url)
if r.status_code == 200:
with open(filename, mode="wb") as f:
f.write(r.read())
except Exception as r_e:
return await prog_msg.edit_text(f"{r_e.__class__.__name__} : {r_e}")
if len(m.command) > 2:
# m.command[1] is image_url
if m.command[2].isdigit() and int(m.command[2]) > 0:
packnum = m.command.pop(2)
packname = f"a{packnum}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 2:
sticker_emoji = (
"".join(set(EMOJI_PATTERN.findall("".join(m.command[2:]))))
or sticker_emoji
)
resize = True
else:
return await prog_msg.delete()
try:
if resize:
filename = resize_image(filename)
max_stickers = 50 if animated else 120
while not packname_found:
try:
stickerset = await c.invoke(
GetStickerSet(
stickerset=InputStickerSetShortName(short_name=packname),
hash=0,
)
)
if stickerset.set.count >= max_stickers:
packnum += 1
packname = (
f"{pack_prefix}_{packnum}_{m.from_user.id}_by_{bot_username}"
)
else:
packname_found = True
except StickersetInvalid:
break
file = await c.save_file(filename)
media = await c.invoke(
SendMedia(
peer=(await c.resolve_peer(LOG_CHAT)),
media=InputMediaUploadedDocument(
file=file,
mime_type=c.guess_mime_type(filename),
attributes=[DocumentAttributeFilename(file_name=filename)],
),
message=f"#Sticker kang by UserID -> {m.from_user.id}",
random_id=c.rnd_id(),
)
)
stkr_file = media.updates[-1].message.media.document
if packname_found:
await prog_msg.edit_text(strings("use_existing_pack"))
await c.invoke(
AddStickerToSet(
stickerset=InputStickerSetShortName(short_name=packname),
sticker=InputStickerSetItem(
document=InputDocument(
id=stkr_file.id,
access_hash=stkr_file.access_hash,
file_reference=stkr_file.file_reference,
),
emoji=sticker_emoji,
),
)
)
else:
await prog_msg.edit_text(strings("create_new_pack_string"))
u_name = m.from_user.username
if u_name:
u_name = f"@{u_name}"
else:
u_name = str(m.from_user.id)
stkr_title = f"{u_name}'s "
if animated:
stkr_title += "Anim. "
stkr_title += "EduuPack"
if packnum != 0:
stkr_title += f" v{packnum}"
try:
await c.invoke(
CreateStickerSet(
user_id=user,
title=stkr_title,
short_name=packname,
stickers=[
InputStickerSetItem(
document=InputDocument(
id=stkr_file.id,
access_hash=stkr_file.access_hash,
file_reference=stkr_file.file_reference,
),
emoji=sticker_emoji,
)
],
animated=animated,
)
)
except PeerIdInvalid:
return await prog_msg.edit_text(
strings("cant_create_sticker_pack_string"),
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"/start", url=f"https://t.me/{bot_username}?start"
)
]
]
),
)
except Exception as all_e:
await prog_msg.edit_text(f"{all_e.__class__.__name__} : {all_e}")
else:
markup = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
strings("view_sticker_pack_btn"),
url=f"t.me/addstickers/{packname}",
)
]
]
)
kanged_success_msg = strings("sticker_kanged_string")
await prog_msg.edit_text(
kanged_success_msg.format(sticker_emoji=sticker_emoji), reply_markup=markup
)
# Cleanup
try:
os.remove(filename)
except OSError:
pass
def resize_image(filename: str) -> str:
im = Image.open(filename)
maxsize = 512
scale = maxsize / max(im.width, im.height)
sizenew = (int(im.width * scale), int(im.height * scale))
im = im.resize(sizenew, Image.NEAREST)
downpath, f_name = os.path.split(filename)
# not hardcoding png_image as "sticker.png"
png_image = os.path.join(downpath, f"{f_name.split('.', 1)[0]}.png")
im.save(png_image, "PNG")
if png_image != filename:
os.remove(filename)
return png_image
@Client.on_message(filters.command("stickerid", PREFIXES) & filters.reply)
@use_chat_lang()
async def getstickerid(c: Client, m: Message, strings):
if m.reply_to_message.sticker:
await m.reply_text(
strings("get_sticker_id_string").format(
stickerid=m.reply_to_message.sticker.file_id
)
)
@Client.on_message(filters.command("getsticker", PREFIXES) & filters.reply)
@use_chat_lang()
async def getstickeraspng(c: Client, m: Message, strings):
sticker = m.reply_to_message.sticker
if sticker:
if sticker.is_animated:
await m.reply_text(strings("animated_not_supported"))
elif not sticker.is_animated:
with tempfile.TemporaryDirectory() as tempdir:
path = os.path.join(tempdir, "getsticker")
sticker_file = await c.download_media(
message=m.reply_to_message,
file_name=f"{path}/{sticker.set_name}.png",
)
await m.reply_to_message.reply_document(
document=sticker_file,
caption=strings("sticker_info").format(
emoji=sticker.emoji, id=sticker.file_id
),
)
shutil.rmtree(tempdir, ignore_errors=True)
else:
await m.reply_text(strings("not_sticker"))
|
#!usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'yanqiong'
import random
import secrets
from bisect import bisect_right
from sgqlc.operation import Operation
from pandas.core.internals import BlockManager
from tqsdk.ins_schema import ins_schema, _add_all_frags
RD = random.Random(secrets.randbits(128)) # 初始化随机数引擎,使用随机数作为seed,防止用户同时拉起多个策略,产生同样的 seed
def _generate_uuid(prefix=''):
return f"{prefix + "_" if prefix else ""}{RD.getrandbits(128):032x}"
def _query_for_quote(symbol):
"""
返回请求某个合约的合约信息的 query_pack
调用次函数应该全部都是sdk的代码主动请求合约信息
用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的
"""
symbol_list = symbol if isinstance(symbol, list) else [symbol]
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(instrument_id=symbol_list)
_add_all_frags(query)
return {
"aid": "ins_query",
"query_id": _generate_uuid(prefix='PYSDK_quote_'),
"query": op.__to_graphql__()
}
def _query_for_init():
"""
返回某些类型合约的 query
todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]
"""
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(class_=["FUTURE", "INDEX", "OPTION", "COMBINE", "CONT"],
exchange_id=["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"])
_add_all_frags(query)
return op.__to_graphql__()
night_trading_table = {
"DCE.a": ["21:00:00", "23:00:00"],
"DCE.b": ["21:00:00", "23:00:00"],
"DCE.c": ["21:00:00", "23:00:00"],
"DCE.cs": ["21:00:00", "23:00:00"],
"DCE.m": ["21:00:00", "23:00:00"],
"DCE.y": ["21:00:00", "23:00:00"],
"DCE.p": ["21:00:00", "23:00:00"],
"DCE.l": ["21:00:00", "23:00:00"],
"DCE.v": ["21:00:00", "23:00:00"],
"DCE.pp": ["21:00:00", "23:00:00"],
"DCE.j": ["21:00:00", "23:00:00"],
"DCE.jm": ["21:00:00", "23:00:00"],
"DCE.i": ["21:00:00", "23:00:00"],
"DCE.eg": ["21:00:00", "23:00:00"],
"DCE.eb": ["21:00:00", "23:00:00"],
"DCE.rr": ["21:00:00", "23:00:00"],
"DCE.pg": ["21:00:00", "23:00:00"],
"CZCE.CF": ["21:00:00", "23:00:00"],
"CZCE.CY": ["21:00:00", "23:00:00"],
"CZCE.SA": ["21:00:00", "23:00:00"],
"CZCE.SR": ["21:00:00", "23:00:00"],
"CZCE.TA": ["21:00:00", "23:00:00"],
"CZCE.OI": ["21:00:00", "23:00:00"],
"CZCE.MA": ["21:00:00", "23:00:00"],
"CZCE.FG": ["21:00:00", "23:00:00"],
"CZCE.RM": ["21:00:00", "23:00:00"],
"CZCE.ZC": ["21:00:00", "23:00:00"],
"CZCE.TC": ["21:00:00", "23:00:00"],
"SHFE.rb": ["21:00:00", "23:00:00"],
"SHFE.hc": ["21:00:00", "23:00:00"],
"SHFE.fu": ["21:00:00", "23:00:00"],
"SHFE.bu": ["21:00:00", "23:00:00"],
"SHFE.ru": ["21:00:00", "23:00:00"],
"SHFE.sp": ["21:00:00", "23:00:00"],
"INE.nr": ["21:00:00", "23:00:00"],
"SHFE.cu": ["21:00:00", "25:00:00"],
"SHFE.al": ["21:00:00", "25:00:00"],
"SHFE.zn": ["21:00:00", "25:00:00"],
"SHFE.pb": ["21:00:00", "25:00:00"],
"SHFE.ni": ["21:00:00", "25:00:00"],
"SHFE.sn": ["21:00:00", "25:00:00"],
"SHFE.ss": ["21:00:00", "25:00:00"],
"SHFE.au": ["21:00:00", "26:30:00"],
"SHFE.ag": ["21:00:00", "26:30:00"],
"INE.sc": ["21:00:00", "26:30:00"],
}
def _quotes_add_night(quotes):
"""为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间"""
for symbol in quotes:
product_id = quotes[symbol].get("product_id")
if quotes[symbol].get("trading_time") and product_id:
key = f"{quotes[symbol].get("exchange_id")}.{product_id}"
if key in night_trading_table and (not quotes[symbol]["trading_time"].get("night")):
quotes[symbol]["trading_time"]["night"] = [night_trading_table[key]]
def _bisect_value(a, x, priority="right"):
"""
返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值
a: 必须是已经排序好(升序排列)的 list
bisect_right : Return the index where to insert item x in list a, assuming a is sorted.
"""
assert priority in ['left', 'right']
insert_index = bisect_right(a, x)
if 0 < insert_index < len(a):
left_dis = x - a[insert_index - 1]
right_dis = a[insert_index] - x
if left_dis == right_dis:
mid_index = insert_index - 1 if priority == "left" else insert_index
elif left_dis < right_dis:
mid_index = insert_index - 1
else:
mid_index = insert_index
else:
assert insert_index == 0 or insert_index == len(a)
mid_index = 0 if insert_index == 0 else (len(a) - 1)
return a[mid_index]
class BlockManagerUnconsolidated(BlockManager):
"""mock BlockManager for unconsolidated, 不会因为自动合并同类型的 blocks 而导致 k 线数据不更新"""
def __init__(self, *args, **kwargs):
BlockManager.__init__(self, *args, **kwargs)
self._is_consolidated = False
self._known_consolidated = False
def _consolidate_inplace(self): pass
| #!usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'yanqiong'
import random
import secrets
from bisect import bisect_right
from sgqlc.operation import Operation
from pandas.core.internals import BlockManager
from tqsdk.ins_schema import ins_schema, _add_all_frags
RD = random.Random(secrets.randbits(128)) # 初始化随机数引擎,使用随机数作为seed,防止用户同时拉起多个策略,产生同样的 seed
def _generate_uuid(prefix=''):
return f"{prefix + '_' if prefix else ''}{RD.getrandbits(128):032x}"
def _query_for_quote(symbol):
"""
返回请求某个合约的合约信息的 query_pack
调用次函数应该全部都是sdk的代码主动请求合约信息
用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的
"""
symbol_list = symbol if isinstance(symbol, list) else [symbol]
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(instrument_id=symbol_list)
_add_all_frags(query)
return {
"aid": "ins_query",
"query_id": _generate_uuid(prefix='PYSDK_quote_'),
"query": op.__to_graphql__()
}
def _query_for_init():
"""
返回某些类型合约的 query
todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]
"""
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(class_=["FUTURE", "INDEX", "OPTION", "COMBINE", "CONT"],
exchange_id=["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"])
_add_all_frags(query)
return op.__to_graphql__()
night_trading_table = {
"DCE.a": ["21:00:00", "23:00:00"],
"DCE.b": ["21:00:00", "23:00:00"],
"DCE.c": ["21:00:00", "23:00:00"],
"DCE.cs": ["21:00:00", "23:00:00"],
"DCE.m": ["21:00:00", "23:00:00"],
"DCE.y": ["21:00:00", "23:00:00"],
"DCE.p": ["21:00:00", "23:00:00"],
"DCE.l": ["21:00:00", "23:00:00"],
"DCE.v": ["21:00:00", "23:00:00"],
"DCE.pp": ["21:00:00", "23:00:00"],
"DCE.j": ["21:00:00", "23:00:00"],
"DCE.jm": ["21:00:00", "23:00:00"],
"DCE.i": ["21:00:00", "23:00:00"],
"DCE.eg": ["21:00:00", "23:00:00"],
"DCE.eb": ["21:00:00", "23:00:00"],
"DCE.rr": ["21:00:00", "23:00:00"],
"DCE.pg": ["21:00:00", "23:00:00"],
"CZCE.CF": ["21:00:00", "23:00:00"],
"CZCE.CY": ["21:00:00", "23:00:00"],
"CZCE.SA": ["21:00:00", "23:00:00"],
"CZCE.SR": ["21:00:00", "23:00:00"],
"CZCE.TA": ["21:00:00", "23:00:00"],
"CZCE.OI": ["21:00:00", "23:00:00"],
"CZCE.MA": ["21:00:00", "23:00:00"],
"CZCE.FG": ["21:00:00", "23:00:00"],
"CZCE.RM": ["21:00:00", "23:00:00"],
"CZCE.ZC": ["21:00:00", "23:00:00"],
"CZCE.TC": ["21:00:00", "23:00:00"],
"SHFE.rb": ["21:00:00", "23:00:00"],
"SHFE.hc": ["21:00:00", "23:00:00"],
"SHFE.fu": ["21:00:00", "23:00:00"],
"SHFE.bu": ["21:00:00", "23:00:00"],
"SHFE.ru": ["21:00:00", "23:00:00"],
"SHFE.sp": ["21:00:00", "23:00:00"],
"INE.nr": ["21:00:00", "23:00:00"],
"SHFE.cu": ["21:00:00", "25:00:00"],
"SHFE.al": ["21:00:00", "25:00:00"],
"SHFE.zn": ["21:00:00", "25:00:00"],
"SHFE.pb": ["21:00:00", "25:00:00"],
"SHFE.ni": ["21:00:00", "25:00:00"],
"SHFE.sn": ["21:00:00", "25:00:00"],
"SHFE.ss": ["21:00:00", "25:00:00"],
"SHFE.au": ["21:00:00", "26:30:00"],
"SHFE.ag": ["21:00:00", "26:30:00"],
"INE.sc": ["21:00:00", "26:30:00"],
}
def _quotes_add_night(quotes):
"""为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间"""
for symbol in quotes:
product_id = quotes[symbol].get("product_id")
if quotes[symbol].get("trading_time") and product_id:
key = f"{quotes[symbol].get('exchange_id')}.{product_id}"
if key in night_trading_table and (not quotes[symbol]["trading_time"].get("night")):
quotes[symbol]["trading_time"]["night"] = [night_trading_table[key]]
def _bisect_value(a, x, priority="right"):
"""
返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值
a: 必须是已经排序好(升序排列)的 list
bisect_right : Return the index where to insert item x in list a, assuming a is sorted.
"""
assert priority in ['left', 'right']
insert_index = bisect_right(a, x)
if 0 < insert_index < len(a):
left_dis = x - a[insert_index - 1]
right_dis = a[insert_index] - x
if left_dis == right_dis:
mid_index = insert_index - 1 if priority == "left" else insert_index
elif left_dis < right_dis:
mid_index = insert_index - 1
else:
mid_index = insert_index
else:
assert insert_index == 0 or insert_index == len(a)
mid_index = 0 if insert_index == 0 else (len(a) - 1)
return a[mid_index]
class BlockManagerUnconsolidated(BlockManager):
"""mock BlockManager for unconsolidated, 不会因为自动合并同类型的 blocks 而导致 k 线数据不更新"""
def __init__(self, *args, **kwargs):
BlockManager.__init__(self, *args, **kwargs)
self._is_consolidated = False
self._known_consolidated = False
def _consolidate_inplace(self): pass
|
"""
# Sheets Account
Read a Google Sheet as if it were are realtime source of transactions
for a GL account. Columns are mapped to attributes. The
assumption is that the sheet maps to a single account, and the
rows are the credit/debits to that account.
Can be used as a plugin, which will write new entries (for reference)
to a file, but also maintain a "live" view of the transactions.
We support most of the sane columns on a sheet:
- date
- narration
- payee
- account
- amount
- currency
- tags
- links
- Anything else, if non-empty cell, gets added as a META
Some things to look at are:
- Multi-currency Support
- Lot support?
- Other Directives: Note, Document, Balance?
- Smarter per-sheet caching of local results
I strongly suggest using "Transfer" accounts for all asset movements between
two accounts both of which are tracked via a Sheet. This simplifies the
"Matching" and allows each side to be reconciled independently.
TODO: Default Account when account column is blank?
"""
# stdlib imports
import logging
import decimal
import pprint
import typing
import datetime
import dateparser
import pathlib
import slugify
# Beancount imports
from beancount.core import data
from coolbeans.utils import safe_plugin, get_setting
from coolbeans.tools.sheets import google_connect, safe_open_sheet
from coolbeans.plugins.accountsync import apply_coolbean_settings
import gspread
STRIP_SYMOLS = '₱$'
DEFAULT_CURRENCY = "USD"
logger = logging.getLogger(__name__)
__plugins__ = ['apply_coolbean_settings', 'remote_entries_plugin']
def clean_slug(slug):
"""Clean a possible Slug string to remove dashes and lower case."""
return slug.replace('-', '').lower()
def coolbean_sheets(entries, context):
"""Given a set of entries, pull out any slugs and add them to the context"""
settings = context.setdefault('coolbean-accounts', {})
# Pull out any 'slug' meta data
for entry in entries:
if isinstance(entry, data.Open):
document = entry.meta.get('document_name', None)
tab = entry.meta.get('document_tab', None)
slug = entry.meta.get('slug', "")
if document and tab and slug:
settings[slug] = {
'account': entry.account,
'document': document,
'tab': tab,
'currencies': entry.currencies
}
else:
if document or tab:
print(f"Skipping {entry.account}: {document}/{tab}/{slug}")
return entries, []
def remote_entries(entries, options_map):
"""
@param entries:
@param options_map:
@return:
"""
errors = []
settings = options_map['coolbeans']
secrets_file = get_setting('google-apis', settings)
connection = google_connect(secrets_file)
new_entries_path = None
new_entries_file = get_setting('new-entries-bean', settings)
if new_entries_file:
new_entries_path = pathlib.Path(new_entries_file)
# Capture the configuration off the Open
remote_accounts = {}
for entry in entries:
if not isinstance(entry, data.Open):
continue
document_name = entry.meta.get('document_name', None)
default_currency = entry.currencies[0] if entry.currencies else DEFAULT_CURRENCY
if document_name:
options = dict(
document_name=document_name,
document_tab=entry.meta.get('document_tab', None),
reverse_amount=entry.meta.get('reverse', False),
default_currency=default_currency,
entry=entry,
entry_file=new_entries_path
)
remote_accounts[entry.account] = options
new_entries = []
for account, options in remote_accounts.items():
try:
new_entries += load_remote_account(
connection=connection,
errors=errors,
account=account,
options=options
)
except Exception as exc:
logger.error(f"while processing {account}", exc_info=exc)
if new_entries and new_entries_path:
from beancount.parser import printer
with new_entries_path.open("w") as stream:
printer.print_entries(new_entries, file=stream)
logger.info(f"Wrote {len(new_entries)} new account(s) to {new_entries_path}.")
return entries+new_entries, errors
remote_entries_plugin = safe_plugin(remote_entries)
ALIASES = {
'narration': ['description', 'notes', 'details', 'memo']
}
def clean_record(record: typing.Dict[str, str]):
"""This is a bit of a hack. But using get_all_records doesn't leave many
options"""
new_record = {}
for k, v in record.items():
k = slugify.slugify(k.lower().strip())
v = str(v)
# Combine multiple narration columns if needed:
for field, names in ALIASES.items():
new_record.setdefault(field, '')
if k in names:
# Add the value to Narration:
new_record[field] += ('. ' if new_record[field] else '') + v
k = None # Clear this Key
break
# Really Ugly hack around embeded currency symbols. Needs Cleanup
if k == 'amount':
v = v.replace(',', '')
for s in STRIP_SYMOLS:
v = v.replace(s, '')
if v and not v[0].isdecimal() and not v[0]=='-':
v = v[1:]
# Pull currency?
# Decimal is fussy
try:
v = decimal.Decimal(v)
except decimal.InvalidOperation:
v = 0
if k:
new_record[k] = v
return new_record
def load_remote_account(
connection: gspread.Client,
errors: list,
account: str,
options: typing.Dict[str, str]
):
"""Try to Load Entries from URL into Account.
options include:
- document_name -- the Actual Google Doc name
- document_tab -- the Tab name on the Doc
- default_currency - the entry currency if None is provided
- reverse_amount - if true, assume positive entries are credits
"""
entries = []
document_name = options['document_name']
document_tab = options.get('document_tab', 0) or 0
default_currency = options['default_currency']
reverse_amount = options.get('reverse_amount', False)
if not document_name:
return
m = -1 if reverse_amount else 1
logger.info(f"Attempting to download entries for {account} from {document_name}.{document_tab}")
workbook = connection.open(document_name)
sheet = None
try:
document_tab = int(document_tab)
sheet = workbook.get_worksheet(document_tab)
except ValueError:
pass
if sheet is None:
sheet = workbook.worksheet(document_tab)
records = sheet.get_all_records()
import re
row = 0
# logger.info(f"Found {len(records)} entries.")
for record in records:
row += 1
record = clean_record(record)
if 'date' not in record or not record['date']:
continue
if 'amount' not in record or not record['amount']:
continue
#if 'account' not in record or not record['account'].strip():
# continue
narration = record.pop('narration', None)
payee = record.pop('payee', None)
tagstr = record.pop('tags', '')
tags = set(re.split(r'\W+', tagstr)) if tagstr else set()
date = dateparser.parse(record.pop('date'))
if date:
date = datetime.date(year=date.year, month=date.month, day=date.day)
linkstr = record.pop('links', '')
links = set(re.split(r'\W+', linkstr)) if linkstr else set()
meta = {
'filename': str(options['entry_file']),
'lineno': 0,
'document-sheet-row': f"{document_name}/{document_tab}/{row+1}"
}
amount = decimal.Decimal(record.pop('amount')) * m
currency = record.pop('currency', default_currency)
entry_account = record.pop('account')
for k, v in record.items():
if v:
meta[k] = v
try:
if not entry_account:
errors.append(f"Skipping Record with Blank Account: {meta["document-sheet-row"]}")
logger.warning(f"Skipping Record with Blank Account: {meta["document-sheet-row"]}")
continue
entry = data.Transaction(
date=date,
narration=narration,
payee=payee,
tags=tags,
meta=meta,
links=links,
flag='*',
postings=[
data.Posting(
account=account,
units=data.Amount(amount, currency),
cost=None,
price=None,
flag='*',
meta={}
),
data.Posting(
account=entry_account,
units=data.Amount(-amount, currency),
cost=None,
price=None,
flag='*',
meta={}
)
]
)
entries.append(entry)
except Exception as exc:
logger.error(f"Error while parsing {record}", exc_info=exc)
errors.append(str(exc))
logger.info(f"Loaded {len(entries)} entries for {account} from {document_name}.{document_tab}")
return entries
| """
# Sheets Account
Read a Google Sheet as if it were are realtime source of transactions
for a GL account. Columns are mapped to attributes. The
assumption is that the sheet maps to a single account, and the
rows are the credit/debits to that account.
Can be used as a plugin, which will write new entries (for reference)
to a file, but also maintain a "live" view of the transactions.
We support most of the sane columns on a sheet:
- date
- narration
- payee
- account
- amount
- currency
- tags
- links
- Anything else, if non-empty cell, gets added as a META
Some things to look at are:
- Multi-currency Support
- Lot support?
- Other Directives: Note, Document, Balance?
- Smarter per-sheet caching of local results
I strongly suggest using "Transfer" accounts for all asset movements between
two accounts both of which are tracked via a Sheet. This simplifies the
"Matching" and allows each side to be reconciled independently.
TODO: Default Account when account column is blank?
"""
# stdlib imports
import logging
import decimal
import pprint
import typing
import datetime
import dateparser
import pathlib
import slugify
# Beancount imports
from beancount.core import data
from coolbeans.utils import safe_plugin, get_setting
from coolbeans.tools.sheets import google_connect, safe_open_sheet
from coolbeans.plugins.accountsync import apply_coolbean_settings
import gspread
STRIP_SYMOLS = '₱$'
DEFAULT_CURRENCY = "USD"
logger = logging.getLogger(__name__)
__plugins__ = ['apply_coolbean_settings', 'remote_entries_plugin']
def clean_slug(slug):
"""Clean a possible Slug string to remove dashes and lower case."""
return slug.replace('-', '').lower()
def coolbean_sheets(entries, context):
"""Given a set of entries, pull out any slugs and add them to the context"""
settings = context.setdefault('coolbean-accounts', {})
# Pull out any 'slug' meta data
for entry in entries:
if isinstance(entry, data.Open):
document = entry.meta.get('document_name', None)
tab = entry.meta.get('document_tab', None)
slug = entry.meta.get('slug', "")
if document and tab and slug:
settings[slug] = {
'account': entry.account,
'document': document,
'tab': tab,
'currencies': entry.currencies
}
else:
if document or tab:
print(f"Skipping {entry.account}: {document}/{tab}/{slug}")
return entries, []
def remote_entries(entries, options_map):
"""
@param entries:
@param options_map:
@return:
"""
errors = []
settings = options_map['coolbeans']
secrets_file = get_setting('google-apis', settings)
connection = google_connect(secrets_file)
new_entries_path = None
new_entries_file = get_setting('new-entries-bean', settings)
if new_entries_file:
new_entries_path = pathlib.Path(new_entries_file)
# Capture the configuration off the Open
remote_accounts = {}
for entry in entries:
if not isinstance(entry, data.Open):
continue
document_name = entry.meta.get('document_name', None)
default_currency = entry.currencies[0] if entry.currencies else DEFAULT_CURRENCY
if document_name:
options = dict(
document_name=document_name,
document_tab=entry.meta.get('document_tab', None),
reverse_amount=entry.meta.get('reverse', False),
default_currency=default_currency,
entry=entry,
entry_file=new_entries_path
)
remote_accounts[entry.account] = options
new_entries = []
for account, options in remote_accounts.items():
try:
new_entries += load_remote_account(
connection=connection,
errors=errors,
account=account,
options=options
)
except Exception as exc:
logger.error(f"while processing {account}", exc_info=exc)
if new_entries and new_entries_path:
from beancount.parser import printer
with new_entries_path.open("w") as stream:
printer.print_entries(new_entries, file=stream)
logger.info(f"Wrote {len(new_entries)} new account(s) to {new_entries_path}.")
return entries+new_entries, errors
remote_entries_plugin = safe_plugin(remote_entries)
ALIASES = {
'narration': ['description', 'notes', 'details', 'memo']
}
def clean_record(record: typing.Dict[str, str]):
"""This is a bit of a hack. But using get_all_records doesn't leave many
options"""
new_record = {}
for k, v in record.items():
k = slugify.slugify(k.lower().strip())
v = str(v)
# Combine multiple narration columns if needed:
for field, names in ALIASES.items():
new_record.setdefault(field, '')
if k in names:
# Add the value to Narration:
new_record[field] += ('. ' if new_record[field] else '') + v
k = None # Clear this Key
break
# Really Ugly hack around embeded currency symbols. Needs Cleanup
if k == 'amount':
v = v.replace(',', '')
for s in STRIP_SYMOLS:
v = v.replace(s, '')
if v and not v[0].isdecimal() and not v[0]=='-':
v = v[1:]
# Pull currency?
# Decimal is fussy
try:
v = decimal.Decimal(v)
except decimal.InvalidOperation:
v = 0
if k:
new_record[k] = v
return new_record
def load_remote_account(
connection: gspread.Client,
errors: list,
account: str,
options: typing.Dict[str, str]
):
"""Try to Load Entries from URL into Account.
options include:
- document_name -- the Actual Google Doc name
- document_tab -- the Tab name on the Doc
- default_currency - the entry currency if None is provided
- reverse_amount - if true, assume positive entries are credits
"""
entries = []
document_name = options['document_name']
document_tab = options.get('document_tab', 0) or 0
default_currency = options['default_currency']
reverse_amount = options.get('reverse_amount', False)
if not document_name:
return
m = -1 if reverse_amount else 1
logger.info(f"Attempting to download entries for {account} from {document_name}.{document_tab}")
workbook = connection.open(document_name)
sheet = None
try:
document_tab = int(document_tab)
sheet = workbook.get_worksheet(document_tab)
except ValueError:
pass
if sheet is None:
sheet = workbook.worksheet(document_tab)
records = sheet.get_all_records()
import re
row = 0
# logger.info(f"Found {len(records)} entries.")
for record in records:
row += 1
record = clean_record(record)
if 'date' not in record or not record['date']:
continue
if 'amount' not in record or not record['amount']:
continue
#if 'account' not in record or not record['account'].strip():
# continue
narration = record.pop('narration', None)
payee = record.pop('payee', None)
tagstr = record.pop('tags', '')
tags = set(re.split(r'\W+', tagstr)) if tagstr else set()
date = dateparser.parse(record.pop('date'))
if date:
date = datetime.date(year=date.year, month=date.month, day=date.day)
linkstr = record.pop('links', '')
links = set(re.split(r'\W+', linkstr)) if linkstr else set()
meta = {
'filename': str(options['entry_file']),
'lineno': 0,
'document-sheet-row': f"{document_name}/{document_tab}/{row+1}"
}
amount = decimal.Decimal(record.pop('amount')) * m
currency = record.pop('currency', default_currency)
entry_account = record.pop('account')
for k, v in record.items():
if v:
meta[k] = v
try:
if not entry_account:
errors.append(f"Skipping Record with Blank Account: {meta['document-sheet-row']}")
logger.warning(f"Skipping Record with Blank Account: {meta['document-sheet-row']}")
continue
entry = data.Transaction(
date=date,
narration=narration,
payee=payee,
tags=tags,
meta=meta,
links=links,
flag='*',
postings=[
data.Posting(
account=account,
units=data.Amount(amount, currency),
cost=None,
price=None,
flag='*',
meta={}
),
data.Posting(
account=entry_account,
units=data.Amount(-amount, currency),
cost=None,
price=None,
flag='*',
meta={}
)
]
)
entries.append(entry)
except Exception as exc:
logger.error(f"Error while parsing {record}", exc_info=exc)
errors.append(str(exc))
logger.info(f"Loaded {len(entries)} entries for {account} from {document_name}.{document_tab}")
return entries
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from msccl.collectives import *
from msccl.algorithm import *
from msccl.instance import *
from msccl.topologies import *
def _alltoall_subproblem(local_nodes, num_copies):
remote_node = local_nodes
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
def cases(chunk, local,remote_out,remote_in):
if chunk < local_end:
return local(chunk)
elif chunk < remote_out_end:
return remote_out(chunk - local_end)
else:
return remote_in(chunk - remote_out_end)
def pre(rank, chunk):
return cases(chunk,
lambda c: rank == c % local_nodes,
lambda c: rank == (c // (num_copies - 1)) % local_nodes,
lambda c: rank == remote_node)
def post(rank, chunk):
return cases(chunk,
lambda c: rank == c // local_nodes,
lambda c: rank == remote_node,
lambda c: rank == (c // (num_copies - 1)) // local_nodes)
def trigger(rank, chunk):
if rank == remote_node:
return cases(chunk,
lambda c: None,
lambda c: chunk + num_remote_pairs,
lambda c: chunk - num_remote_pairs)
else:
return None
return build_collective(f'AlltoallSubproblem(n={local_nodes},copies={num_copies})',
local_nodes + 1, num_chunks,
pre, post, trigger=trigger)
def make_alltoall_subproblem_collective_and_topology(topology, num_copies, relay_nodes, bw = 1, share_bw = False):
local_nodes = topology.num_nodes()
remote_node = local_nodes
links = [[0 for _ in range(local_nodes + 1)] for _ in range(local_nodes + 1)]
for src in range(local_nodes):
for dst in range(local_nodes):
links[dst][src] = topology.link(src, dst)
for relay in relay_nodes:
links[remote_node][relay] = bw
links[relay][remote_node] = bw
switches = topology.switches.copy()
if share_bw:
switches.append((relay_nodes, [num_nodes + 1], bw, 'remote_out'))
switches.append(([num_nodes + 1], relay_nodes, bw, 'remote_in'))
collective = _alltoall_subproblem(local_nodes, num_copies)
topology = Topology(f'Subtopo(local={topology.name},relays=({','.join(str(i) for i in relay_nodes)}))', links, topology.switches)
return collective, topology
def synthesize_alltoall_subproblem(subproblem_algo, num_copies, logging=False):
if subproblem_algo.is_pipelined():
raise ValueError('Pipelining is not supported.')
local_topology = subproblem_algo.topology
chunks = subproblem_algo.instance.chunks
local_nodes = local_topology.num_nodes() - 1
remote_node = local_nodes
nodes = local_nodes * num_copies
collective = alltoall(nodes).chunk_up(chunks)
# Create a distributed topology where copies of relay nodes that connect to the remote node in the subproblem
# topology are connected to all the relay nodes in the other copies.
links = [[0 for _ in range(nodes)] for _ in range(nodes)]
for dst in range(nodes):
for src in range(nodes):
local_src = src % local_nodes
local_dst = dst % local_nodes
if src // local_nodes != dst // local_nodes:
bw = min(local_topology.link(local_src, remote_node), local_topology.link(remote_node, local_dst))
links[dst][src] = bw
else:
links[dst][src] = local_topology.link(local_src, local_dst)
# Also make copies of switches with a similar expansion of the remote node into the nodes of other copies.
switches = []
for srcs, dsts, bw, name in local_topology.switches:
for i in range(num_copies):
def to_dist(ranks):
for rank in ranks:
if rank < remote_node:
# Non-remote nodes are just translated to the distributed numbering of ranks.
yield rank + i * local_nodes
else:
# Include all remote nodes in the switch. This is fine because the links already limit
# connectivity to just the relay nodes.
for r in range(nodes):
if r // local_nodes != i:
yield r
dist_srcs = list(to_dist(srcs))
dist_dsts = list(to_dist(dsts))
switches.append((dist_srcs, dist_dsts, bw, f'copy_{i}_{name}_local'))
topology = Topology(f'Stiched(sub={local_topology.name},copies={num_copies})', links, switches)
def nth_chunk_for_pair(src, dst, idx):
# The following chunk calculation respects both the _scattered and _transpose
# pre/postconditions in Alltoall. When substituting it in:
# -the precondition (chunk % self.num_nodes) simplifies to src
# -the postcondition ((chunk // self.num_nodes) % self.num_nodes) simplifies to dst
return (src + dst * collective.num_nodes) * chunks + idx
steps = []
# Calculate the ranges of the differently handled chunks
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
for local_step in subproblem_algo.steps:
sends = []
# These are used to track operations involving remote nodes that get matched with another operation in the same
# step.
unmatched_sends = {}
unmatched_recvs = {}
# Stitch together copies of the subproblem algorithm
for chunk, src, dst in local_step.sends:
for i in range(num_copies):
def to_dist(rank):
# Translates ranks from the local to the distributed topology
return rank + i * local_nodes
def other_start(c):
# Given a relative remote chunk return local rank 0 in the copy it corresponds to
other_i = c % (num_copies - 1)
if other_i >= i:
other_i += 1
return other_i * local_nodes
# Calculate origin and target ranks that match the Alltoall pre/postconditions
if chunk < local_end:
assert src != remote_node and dst != remote_node
origin = to_dist((chunk // chunks) % local_nodes)
target = to_dist((chunk // chunks) // local_nodes)
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk)
elif chunk < remote_out_end:
c = chunk - local_end
local_origin = ((c // chunks) // (num_copies - 1)) % local_nodes
origin = to_dist(local_origin)
target = other_start(c) + ((c // (num_copies - 1))) // local_nodes
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(local_origin, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk + num_remote_pairs)
else:
assert chunk < num_chunks
c = chunk - remote_out_end
local_target = ((c // chunks) // (num_copies - 1)) // local_nodes
target = to_dist(local_target)
origin = other_start(c) + ((c // (num_copies - 1))) % local_nodes
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk - num_remote_pairs)
assert subproblem_algo.collective.postcondition(local_target, chunk)
# Get the chunk number in the distributed algorithm
chunk_idx = chunk % chunks
# Translate send src and dst to distributed space and add the send to the distributed algorithm
dist_chunk = nth_chunk_for_pair(origin, target, chunk_idx)
if dst == remote_node:
assert chunk < remote_out_end
# Sends to remote nodes have to find a matched receive
if dist_chunk in unmatched_recvs:
dist_dst = unmatched_recvs.pop(dist_chunk)
sends.append((dist_chunk, to_dist(src), dist_dst))
else:
unmatched_sends[dist_chunk] = to_dist(src)
elif src == remote_node:
assert chunk < num_chunks
# Receives from remote nodes have to find a matched send
if dist_chunk in unmatched_sends:
dist_src = unmatched_sends.pop(dist_chunk)
sends.append((dist_chunk, dist_src, to_dist(dst)))
else:
unmatched_recvs[dist_chunk] = to_dist(dst)
else:
# Sends locally are just translated to the new distributed space of ranks
sends.append((dist_chunk, to_dist(src), to_dist(dst)))
if len(unmatched_sends) > 0 or len(unmatched_recvs) > 0:
raise ValueError('Subproblem algorithm has unpaired sends/recvs.')
steps.append(Step(local_step.rounds, sends))
instance = Instance(
steps=len(steps),
extra_rounds=sum(step.rounds - 1 for step in steps),
chunks=chunks,
)
return Algorithm.make_implementation(collective, topology, instance, steps)
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from msccl.collectives import *
from msccl.algorithm import *
from msccl.instance import *
from msccl.topologies import *
def _alltoall_subproblem(local_nodes, num_copies):
remote_node = local_nodes
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
def cases(chunk, local,remote_out,remote_in):
if chunk < local_end:
return local(chunk)
elif chunk < remote_out_end:
return remote_out(chunk - local_end)
else:
return remote_in(chunk - remote_out_end)
def pre(rank, chunk):
return cases(chunk,
lambda c: rank == c % local_nodes,
lambda c: rank == (c // (num_copies - 1)) % local_nodes,
lambda c: rank == remote_node)
def post(rank, chunk):
return cases(chunk,
lambda c: rank == c // local_nodes,
lambda c: rank == remote_node,
lambda c: rank == (c // (num_copies - 1)) // local_nodes)
def trigger(rank, chunk):
if rank == remote_node:
return cases(chunk,
lambda c: None,
lambda c: chunk + num_remote_pairs,
lambda c: chunk - num_remote_pairs)
else:
return None
return build_collective(f'AlltoallSubproblem(n={local_nodes},copies={num_copies})',
local_nodes + 1, num_chunks,
pre, post, trigger=trigger)
def make_alltoall_subproblem_collective_and_topology(topology, num_copies, relay_nodes, bw = 1, share_bw = False):
local_nodes = topology.num_nodes()
remote_node = local_nodes
links = [[0 for _ in range(local_nodes + 1)] for _ in range(local_nodes + 1)]
for src in range(local_nodes):
for dst in range(local_nodes):
links[dst][src] = topology.link(src, dst)
for relay in relay_nodes:
links[remote_node][relay] = bw
links[relay][remote_node] = bw
switches = topology.switches.copy()
if share_bw:
switches.append((relay_nodes, [num_nodes + 1], bw, 'remote_out'))
switches.append(([num_nodes + 1], relay_nodes, bw, 'remote_in'))
collective = _alltoall_subproblem(local_nodes, num_copies)
topology = Topology(f'Subtopo(local={topology.name},relays=({",".join(str(i) for i in relay_nodes)}))', links, topology.switches)
return collective, topology
def synthesize_alltoall_subproblem(subproblem_algo, num_copies, logging=False):
if subproblem_algo.is_pipelined():
raise ValueError('Pipelining is not supported.')
local_topology = subproblem_algo.topology
chunks = subproblem_algo.instance.chunks
local_nodes = local_topology.num_nodes() - 1
remote_node = local_nodes
nodes = local_nodes * num_copies
collective = alltoall(nodes).chunk_up(chunks)
# Create a distributed topology where copies of relay nodes that connect to the remote node in the subproblem
# topology are connected to all the relay nodes in the other copies.
links = [[0 for _ in range(nodes)] for _ in range(nodes)]
for dst in range(nodes):
for src in range(nodes):
local_src = src % local_nodes
local_dst = dst % local_nodes
if src // local_nodes != dst // local_nodes:
bw = min(local_topology.link(local_src, remote_node), local_topology.link(remote_node, local_dst))
links[dst][src] = bw
else:
links[dst][src] = local_topology.link(local_src, local_dst)
# Also make copies of switches with a similar expansion of the remote node into the nodes of other copies.
switches = []
for srcs, dsts, bw, name in local_topology.switches:
for i in range(num_copies):
def to_dist(ranks):
for rank in ranks:
if rank < remote_node:
# Non-remote nodes are just translated to the distributed numbering of ranks.
yield rank + i * local_nodes
else:
# Include all remote nodes in the switch. This is fine because the links already limit
# connectivity to just the relay nodes.
for r in range(nodes):
if r // local_nodes != i:
yield r
dist_srcs = list(to_dist(srcs))
dist_dsts = list(to_dist(dsts))
switches.append((dist_srcs, dist_dsts, bw, f'copy_{i}_{name}_local'))
topology = Topology(f'Stiched(sub={local_topology.name},copies={num_copies})', links, switches)
def nth_chunk_for_pair(src, dst, idx):
# The following chunk calculation respects both the _scattered and _transpose
# pre/postconditions in Alltoall. When substituting it in:
# -the precondition (chunk % self.num_nodes) simplifies to src
# -the postcondition ((chunk // self.num_nodes) % self.num_nodes) simplifies to dst
return (src + dst * collective.num_nodes) * chunks + idx
steps = []
# Calculate the ranges of the differently handled chunks
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
for local_step in subproblem_algo.steps:
sends = []
# These are used to track operations involving remote nodes that get matched with another operation in the same
# step.
unmatched_sends = {}
unmatched_recvs = {}
# Stitch together copies of the subproblem algorithm
for chunk, src, dst in local_step.sends:
for i in range(num_copies):
def to_dist(rank):
# Translates ranks from the local to the distributed topology
return rank + i * local_nodes
def other_start(c):
# Given a relative remote chunk return local rank 0 in the copy it corresponds to
other_i = c % (num_copies - 1)
if other_i >= i:
other_i += 1
return other_i * local_nodes
# Calculate origin and target ranks that match the Alltoall pre/postconditions
if chunk < local_end:
assert src != remote_node and dst != remote_node
origin = to_dist((chunk // chunks) % local_nodes)
target = to_dist((chunk // chunks) // local_nodes)
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk)
elif chunk < remote_out_end:
c = chunk - local_end
local_origin = ((c // chunks) // (num_copies - 1)) % local_nodes
origin = to_dist(local_origin)
target = other_start(c) + ((c // (num_copies - 1))) // local_nodes
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(local_origin, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk + num_remote_pairs)
else:
assert chunk < num_chunks
c = chunk - remote_out_end
local_target = ((c // chunks) // (num_copies - 1)) // local_nodes
target = to_dist(local_target)
origin = other_start(c) + ((c // (num_copies - 1))) % local_nodes
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk - num_remote_pairs)
assert subproblem_algo.collective.postcondition(local_target, chunk)
# Get the chunk number in the distributed algorithm
chunk_idx = chunk % chunks
# Translate send src and dst to distributed space and add the send to the distributed algorithm
dist_chunk = nth_chunk_for_pair(origin, target, chunk_idx)
if dst == remote_node:
assert chunk < remote_out_end
# Sends to remote nodes have to find a matched receive
if dist_chunk in unmatched_recvs:
dist_dst = unmatched_recvs.pop(dist_chunk)
sends.append((dist_chunk, to_dist(src), dist_dst))
else:
unmatched_sends[dist_chunk] = to_dist(src)
elif src == remote_node:
assert chunk < num_chunks
# Receives from remote nodes have to find a matched send
if dist_chunk in unmatched_sends:
dist_src = unmatched_sends.pop(dist_chunk)
sends.append((dist_chunk, dist_src, to_dist(dst)))
else:
unmatched_recvs[dist_chunk] = to_dist(dst)
else:
# Sends locally are just translated to the new distributed space of ranks
sends.append((dist_chunk, to_dist(src), to_dist(dst)))
if len(unmatched_sends) > 0 or len(unmatched_recvs) > 0:
raise ValueError('Subproblem algorithm has unpaired sends/recvs.')
steps.append(Step(local_step.rounds, sends))
instance = Instance(
steps=len(steps),
extra_rounds=sum(step.rounds - 1 for step in steps),
chunks=chunks,
)
return Algorithm.make_implementation(collective, topology, instance, steps)
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import os
import zipfile
from dataclasses import dataclass
from io import BytesIO
from typing import Iterable
from pants.backend.python.subsystems.setuptools import PythonDistributionFieldSet
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import Pex, PexRequest, PexRequirements
from pants.backend.python.util_rules.pex import rules as pex_rules
from pants.backend.python.util_rules.python_sources import PythonSourceFiles
from pants.build_graph.address import Address
from pants.core.goals.package import BuiltPackage, PackageFieldSet
from pants.core.util_rules.source_files import SourceFiles
from pants.engine.addresses import Addresses
from pants.engine.fs import (
EMPTY_SNAPSHOT,
Digest,
DigestContents,
DigestSubset,
MergeDigests,
PathGlobs,
Snapshot,
)
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest
from pants.util.docutil import doc_url
from pants.util.meta import frozen_after_init
logger = logging.getLogger(__name__)
@frozen_after_init
@dataclass(unsafe_hash=True)
class LocalDistsPexRequest:
"""Request to build the local dists from the dependency closure of a set of addresses."""
addresses: Addresses
interpreter_constraints: InterpreterConstraints
# The result will return these with the sources provided by the dists subtracted out.
# This will help the caller prevent sources from appearing twice on sys.path.
sources: PythonSourceFiles
def __init__(
self,
addresses: Iterable[Address],
*,
interpreter_constraints: InterpreterConstraints = InterpreterConstraints(),
sources: PythonSourceFiles = PythonSourceFiles(
SourceFiles(EMPTY_SNAPSHOT, tuple()), tuple()
),
) -> None:
self.addresses = Addresses(addresses)
self.interpreter_constraints = interpreter_constraints
self.sources = sources
@dataclass(frozen=True)
class LocalDistsPex:
"""A PEX file containing locally-built dists.
Can be consumed from another PEX, e.g., by adding to PEX_PATH.
Lists the files provided by the dists on sys.path, so they can be subtracted from
sources digests, to prevent the same file ending up on sys.path twice.
"""
pex: Pex
# The sources from the request, but with any files provided by the local dists subtracted out.
remaining_sources: PythonSourceFiles
@rule(desc="Building local distributions")
async def build_local_dists(
request: LocalDistsPexRequest,
) -> LocalDistsPex:
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
applicable_targets = [
tgt for tgt in transitive_targets.closure if PythonDistributionFieldSet.is_applicable(tgt)
]
python_dist_field_sets = [
PythonDistributionFieldSet.create(target) for target in applicable_targets
]
dists = await MultiGet(
[Get(BuiltPackage, PackageFieldSet, field_set) for field_set in python_dist_field_sets]
)
# The primary use-case of the "local dists" feature is to support consuming native extensions
# as wheels without having to publish them first.
# It doesn't seem very useful to consume locally-built sdists, and it makes it hard to
# reason about possible sys.path collisions between the in-repo sources and whatever the
# sdist will place on the sys.path when it's installed.
# So for now we simply ignore sdists, with a warning if necessary.
provided_files = set()
wheels = []
all_contents = await MultiGet(Get(DigestContents, Digest, dist.digest) for dist in dists)
for dist, contents, tgt in zip(dists, all_contents, applicable_targets):
artifacts = set((a.relpath or "") for a in dist.artifacts)
# A given local dist might build a wheel and an sdist (and maybe other artifacts -
# we don't know what setup command was run...)
# As long as there is a wheel, we can ignore the other artifacts.
wheel = next((art for art in artifacts if art.endswith(".whl")), None)
if wheel:
wheel_content = next(content for content in contents if content.path == wheel)
wheels.append(wheel)
buf = BytesIO()
buf.write(wheel_content.content)
buf.seek(0)
with zipfile.ZipFile(buf) as zf:
provided_files.update(zf.namelist())
else:
logger.warning(
f"Encountered a dependency on the {tgt.alias} target at {tgt.address.spec}, but "
"this target does not produce a Python wheel artifact. Therefore this target's "
"code will be used directly from sources, without a distribution being built, "
"and therefore any native extensions in it will not be built.\n\n"
f"See {doc_url("python-distributions")} for details on how to set up a {tgt.alias} "
"target to produce a wheel."
)
dists_digest = await Get(Digest, MergeDigests([dist.digest for dist in dists]))
wheels_digest = await Get(Digest, DigestSubset(dists_digest, PathGlobs(["**/*.whl"])))
dists_pex = await Get(
Pex,
PexRequest(
output_filename="local_dists.pex",
requirements=PexRequirements(wheels),
interpreter_constraints=request.interpreter_constraints,
additional_inputs=wheels_digest,
internal_only=True,
),
)
# We check source roots in reverse lexicographic order,
# so we'll find the innermost root that matches.
source_roots = list(reversed(sorted(request.sources.source_roots)))
remaining_sources = set(request.sources.source_files.files)
unrooted_files_set = set(request.sources.source_files.unrooted_files)
for source in request.sources.source_files.files:
if source not in unrooted_files_set:
for source_root in source_roots:
if (
source.startswith(source_root)
and os.path.relpath(source, source_root) in provided_files
):
remaining_sources.remove(source)
remaining_sources_snapshot = await Get(
Snapshot,
DigestSubset(
request.sources.source_files.snapshot.digest, PathGlobs(sorted(remaining_sources))
),
)
subtracted_sources = PythonSourceFiles(
SourceFiles(remaining_sources_snapshot, request.sources.source_files.unrooted_files),
request.sources.source_roots,
)
return LocalDistsPex(dists_pex, subtracted_sources)
def rules():
return (*collect_rules(), *pex_rules())
| # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import os
import zipfile
from dataclasses import dataclass
from io import BytesIO
from typing import Iterable
from pants.backend.python.subsystems.setuptools import PythonDistributionFieldSet
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import Pex, PexRequest, PexRequirements
from pants.backend.python.util_rules.pex import rules as pex_rules
from pants.backend.python.util_rules.python_sources import PythonSourceFiles
from pants.build_graph.address import Address
from pants.core.goals.package import BuiltPackage, PackageFieldSet
from pants.core.util_rules.source_files import SourceFiles
from pants.engine.addresses import Addresses
from pants.engine.fs import (
EMPTY_SNAPSHOT,
Digest,
DigestContents,
DigestSubset,
MergeDigests,
PathGlobs,
Snapshot,
)
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest
from pants.util.docutil import doc_url
from pants.util.meta import frozen_after_init
logger = logging.getLogger(__name__)
@frozen_after_init
@dataclass(unsafe_hash=True)
class LocalDistsPexRequest:
"""Request to build the local dists from the dependency closure of a set of addresses."""
addresses: Addresses
interpreter_constraints: InterpreterConstraints
# The result will return these with the sources provided by the dists subtracted out.
# This will help the caller prevent sources from appearing twice on sys.path.
sources: PythonSourceFiles
def __init__(
self,
addresses: Iterable[Address],
*,
interpreter_constraints: InterpreterConstraints = InterpreterConstraints(),
sources: PythonSourceFiles = PythonSourceFiles(
SourceFiles(EMPTY_SNAPSHOT, tuple()), tuple()
),
) -> None:
self.addresses = Addresses(addresses)
self.interpreter_constraints = interpreter_constraints
self.sources = sources
@dataclass(frozen=True)
class LocalDistsPex:
"""A PEX file containing locally-built dists.
Can be consumed from another PEX, e.g., by adding to PEX_PATH.
Lists the files provided by the dists on sys.path, so they can be subtracted from
sources digests, to prevent the same file ending up on sys.path twice.
"""
pex: Pex
# The sources from the request, but with any files provided by the local dists subtracted out.
remaining_sources: PythonSourceFiles
@rule(desc="Building local distributions")
async def build_local_dists(
request: LocalDistsPexRequest,
) -> LocalDistsPex:
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
applicable_targets = [
tgt for tgt in transitive_targets.closure if PythonDistributionFieldSet.is_applicable(tgt)
]
python_dist_field_sets = [
PythonDistributionFieldSet.create(target) for target in applicable_targets
]
dists = await MultiGet(
[Get(BuiltPackage, PackageFieldSet, field_set) for field_set in python_dist_field_sets]
)
# The primary use-case of the "local dists" feature is to support consuming native extensions
# as wheels without having to publish them first.
# It doesn't seem very useful to consume locally-built sdists, and it makes it hard to
# reason about possible sys.path collisions between the in-repo sources and whatever the
# sdist will place on the sys.path when it's installed.
# So for now we simply ignore sdists, with a warning if necessary.
provided_files = set()
wheels = []
all_contents = await MultiGet(Get(DigestContents, Digest, dist.digest) for dist in dists)
for dist, contents, tgt in zip(dists, all_contents, applicable_targets):
artifacts = set((a.relpath or "") for a in dist.artifacts)
# A given local dist might build a wheel and an sdist (and maybe other artifacts -
# we don't know what setup command was run...)
# As long as there is a wheel, we can ignore the other artifacts.
wheel = next((art for art in artifacts if art.endswith(".whl")), None)
if wheel:
wheel_content = next(content for content in contents if content.path == wheel)
wheels.append(wheel)
buf = BytesIO()
buf.write(wheel_content.content)
buf.seek(0)
with zipfile.ZipFile(buf) as zf:
provided_files.update(zf.namelist())
else:
logger.warning(
f"Encountered a dependency on the {tgt.alias} target at {tgt.address.spec}, but "
"this target does not produce a Python wheel artifact. Therefore this target's "
"code will be used directly from sources, without a distribution being built, "
"and therefore any native extensions in it will not be built.\n\n"
f"See {doc_url('python-distributions')} for details on how to set up a {tgt.alias} "
"target to produce a wheel."
)
dists_digest = await Get(Digest, MergeDigests([dist.digest for dist in dists]))
wheels_digest = await Get(Digest, DigestSubset(dists_digest, PathGlobs(["**/*.whl"])))
dists_pex = await Get(
Pex,
PexRequest(
output_filename="local_dists.pex",
requirements=PexRequirements(wheels),
interpreter_constraints=request.interpreter_constraints,
additional_inputs=wheels_digest,
internal_only=True,
),
)
# We check source roots in reverse lexicographic order,
# so we'll find the innermost root that matches.
source_roots = list(reversed(sorted(request.sources.source_roots)))
remaining_sources = set(request.sources.source_files.files)
unrooted_files_set = set(request.sources.source_files.unrooted_files)
for source in request.sources.source_files.files:
if source not in unrooted_files_set:
for source_root in source_roots:
if (
source.startswith(source_root)
and os.path.relpath(source, source_root) in provided_files
):
remaining_sources.remove(source)
remaining_sources_snapshot = await Get(
Snapshot,
DigestSubset(
request.sources.source_files.snapshot.digest, PathGlobs(sorted(remaining_sources))
),
)
subtracted_sources = PythonSourceFiles(
SourceFiles(remaining_sources_snapshot, request.sources.source_files.unrooted_files),
request.sources.source_roots,
)
return LocalDistsPex(dists_pex, subtracted_sources)
def rules():
return (*collect_rules(), *pex_rules())
|
"""
Save module for the console.
"""
import json
from typing import List, Optional
from spotdl.utils.search import parse_query
from spotdl.utils.m3u import create_m3u_file
def save(
query: List[str],
save_path: str,
downloader,
m3u_file: Optional[str] = None,
) -> None:
"""
Save metadata from spotify to the disk.
### Arguments
- query: list of strings to search for.
- save_path: Path to the file to save the metadata to.
- threads: Number of threads to use.
### Notes
- This function is multi-threaded.
"""
# Parse the query
songs = parse_query(query, downloader.threads)
# Convert the songs to JSON
save_data = [song.json for song in songs]
# Save the songs to a file
with open(save_path, "w", encoding="utf-8") as save_file:
json.dump(save_data, save_file, indent=4, ensure_ascii=False)
if m3u_file:
create_m3u_file(
m3u_file, songs, downloader.output, downloader.output_format, False
)
downloader.progress_handler.log(
f"Saved {len(save_data)} song{"s" if len(save_data) > 1 else ""} to {save_path}"
)
| """
Save module for the console.
"""
import json
from typing import List, Optional
from spotdl.utils.search import parse_query
from spotdl.utils.m3u import create_m3u_file
def save(
query: List[str],
save_path: str,
downloader,
m3u_file: Optional[str] = None,
) -> None:
"""
Save metadata from spotify to the disk.
### Arguments
- query: list of strings to search for.
- save_path: Path to the file to save the metadata to.
- threads: Number of threads to use.
### Notes
- This function is multi-threaded.
"""
# Parse the query
songs = parse_query(query, downloader.threads)
# Convert the songs to JSON
save_data = [song.json for song in songs]
# Save the songs to a file
with open(save_path, "w", encoding="utf-8") as save_file:
json.dump(save_data, save_file, indent=4, ensure_ascii=False)
if m3u_file:
create_m3u_file(
m3u_file, songs, downloader.output, downloader.output_format, False
)
downloader.progress_handler.log(
f"Saved {len(save_data)} song{'s' if len(save_data) > 1 else ''} to {save_path}"
)
|
import asyncio
import datetime
import importlib
import itertools
import os
import random
import re
import shutil
import signal
import subprocess
import sys
import time
import zipfile
import discord
import psutil
from src import const
from src.algorithms import levenshtein_distance
from src.bc import DoNotUpdateFlag
from src.bot_cache import BotCache
from src.bot_instance import BotInstance
from src.config import Command, Config, GuildSettings, SecretConfig, User, bc
from src.embed import DiscordEmbed
from src.emoji import get_clock_emoji
from src.ff import FF
from src.info import BotInfo
from src.log import log
from src.mail import Mail
from src.markov import Markov
from src.message import Msg
from src.reminder import Reminder
from src.repl import Repl
from src.utils import Util
from src.voice import VoiceRoutine
class WalBot(discord.Client):
def __init__(self, name: str, config: Config, secret_config: SecretConfig, intents: discord.Intents) -> None:
super().__init__(intents=intents)
self.repl = None
bc.instance_name = self.instance_name = name
self.config = config
self.secret_config = secret_config
self.bot_cache = BotCache(True)
self.loop.create_task(self._process_reminders())
self.loop.create_task(VoiceRoutine(self.bot_cache).start())
self.loop.create_task(self._repl_routine())
bc.config = self.config
bc.commands = self.config.commands
bc.background_loop = self.loop
bc.latency = lambda: self.latency
bc.change_status = self._change_status
bc.change_presence = self.change_presence
bc.close = self.close
bc.secret_config = self.secret_config
bc.info = BotInfo()
bc.plugin_manager.register()
bc.fetch_channel = self.fetch_channel
if not bc.args.fast_start:
log.debug("Started Markov model checks...")
if bc.markov.check():
log.info("Markov model has passed all checks")
else:
log.info("Markov model has not passed checks, but all errors were fixed")
async def _bot_runner_task(self, *args, **kwargs):
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def run(self, *args, **kwargs):
# Sightly patched implementation from discord.py discord.Client (parent) class
# Reference: https://github.com/Rapptz/discord.py/blob/master/discord/client.py
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
asyncio.ensure_future(self._bot_runner_task(*args, *kwargs), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
log.info('Received signal to terminate bot and event loop')
log.info("Shutting down the bot...")
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
for task in tasks:
if not task.cancelled():
log.error("Asynchronous task cancel failed!")
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(self._on_shutdown())
loop.close()
log.info("Bot is shut down!")
async def _on_shutdown(self) -> None:
if self.repl is not None:
self.repl.stop()
for event in bc.background_events:
event.cancel()
bc.background_loop = None
await bc.plugin_manager.broadcast_command("close")
@Mail.send_exception_info_to_admin_emails_async
async def _precompile(self) -> None:
log.debug("Started precompiling functions...")
levenshtein_distance("", "")
log.debug("Finished precompiling functions")
async def _change_status(self, string: str, type_: discord.ActivityType) -> None:
await self.change_presence(activity=discord.Activity(name=string, type=type_))
async def _config_autosave(self) -> None:
await self.wait_until_ready()
index = 1
while not self.is_closed():
await asyncio.sleep(self.config.saving["period"] * 60)
if index % self.config.saving["backup"]["period"] == 0:
self.config.backup(const.CONFIG_PATH, const.MARKOV_PATH)
self.config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH)
index += 1
async def _process_reminders_iteration(self) -> None:
log.debug3("Reminder processing iteration has started")
now = datetime.datetime.now().replace(second=0).strftime(const.REMINDER_DATETIME_FORMAT)
to_remove = []
to_append = []
reminder_do_not_update_flag = False
for key, rem in self.config.reminders.items():
for i in range(len(rem.prereminders_list)):
prereminder = rem.prereminders_list[i]
used_prereminder = rem.used_prereminders_list[i]
if prereminder == 0 or used_prereminder:
continue
prereminder_time = (
datetime.datetime.now().replace(second=0) + datetime.timedelta(minutes=prereminder))
if rem == prereminder_time.strftime(const.REMINDER_DATETIME_FORMAT):
channel = self.get_channel(rem.channel_id)
e = DiscordEmbed()
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e.title(f"{prereminder} minutes left until reminder")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(
datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(minutes=prereminder))
e.footer(text=rem.author)
await channel.send("", embed=e.get())
rem.used_prereminders_list[i] = True
if rem == now:
channel = self.get_channel(rem.channel_id)
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e = DiscordEmbed()
e.title(f"{clock_emoji} You asked to remind")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(datetime.datetime.now(datetime.timezone.utc))
e.footer(text=rem.author)
await channel.send(' '.join(rem.ping_users if rem.ping_users else ""), embed=e.get())
for user_id in rem.whisper_users:
await Msg.send_direct_message(
self.get_user(user_id), f"You asked to remind at {now} -> {rem.message}", False)
if rem.email_users:
mail = Mail(self.secret_config)
mail.send(
rem.email_users,
f"Reminder: {rem.message}",
f"You asked to remind at {now} -> {rem.message}")
if rem.repeat_after > 0:
new_time = datetime.datetime.now().replace(second=0, microsecond=0) + rem.get_next_event_delta()
new_time = new_time.strftime(const.REMINDER_DATETIME_FORMAT)
to_append.append(
Reminder(str(new_time), rem.message, rem.channel_id, rem.author, rem.time_created))
to_append[-1].repeat_after = rem.repeat_after
to_append[-1].repeat_interval_measure = rem.repeat_interval_measure
to_append[-1].prereminders_list = rem.prereminders_list
to_append[-1].used_prereminders_list = [False] * len(rem.prereminders_list)
to_append[-1].notes = rem.notes
log.debug2(f"Scheduled renew of recurring reminder - old id: {key}")
to_remove.append(key)
elif rem < now:
log.debug2(f"Scheduled reminder with id {key} removal")
to_remove.append(key)
else:
prereminders_delay = 0
if rem.prereminders_list:
prereminders_delay = max(rem.prereminders_list)
if ((datetime.datetime.strptime(rem.time, const.REMINDER_DATETIME_FORMAT) - datetime.datetime.now())
< datetime.timedelta(minutes=(5 + prereminders_delay / 60))):
reminder_do_not_update_flag = True
bc.do_not_update[DoNotUpdateFlag.REMINDER] = reminder_do_not_update_flag
for key in to_remove:
self.config.reminders.pop(key)
for item in to_append:
key = self.config.ids["reminder"]
self.config.reminders[key] = item
self.config.ids["reminder"] += 1
log.debug3("Reminder processing iteration has finished")
@Mail.send_exception_info_to_admin_emails_async
async def _process_reminders(self) -> None:
await self.wait_until_ready()
while not self.is_closed():
await self._process_reminders_iteration()
await asyncio.sleep(const.REMINDER_POLLING_INTERVAL)
async def _repl_routine(self) -> None:
self.repl = Repl(self.config.repl["port"])
await self.repl.start()
@Mail.send_exception_info_to_admin_emails_async
async def on_ready(self) -> None:
self._load_plugins()
log.info(
f"Logged in as: {self.user.name} {self.user.id} ({self.__class__.__name__}), "
f"instance: {self.instance_name}")
self.bot_cache.update({
"ready": True,
})
self.bot_cache.dump_to_file()
bc.guilds = self.guilds
for guild in self.guilds:
if guild.id not in self.config.guilds.keys():
self.config.guilds[guild.id] = GuildSettings(guild.id)
bc.bot_user = self.user
self.loop.create_task(self._config_autosave())
self.loop.create_task(self._precompile())
def _load_plugins(self) -> None:
for plugin_name in bc.plugin_manager.get_plugins_list():
if plugin_name not in self.config.plugins.keys():
self.config.plugins[plugin_name] = {
"autostart": False,
}
for plugin_name, plugin_state in self.config.plugins.items():
if plugin_state["autostart"]:
asyncio.create_task(bc.plugin_manager.send_command(plugin_name, "init"))
@Mail.send_exception_info_to_admin_emails_async
async def on_message(self, message: discord.Message) -> None:
await bc.plugin_manager.broadcast_command("on_message", message)
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
else:
await self._process_regular_message(message)
await self._process_repetitions(message)
@Mail.send_exception_info_to_admin_emails_async
async def on_message_edit(self, old_message: discord.Message, message: discord.Message) -> None:
if message.embeds != old_message.embeds:
log.info(f"<{message.id}> (edit, embed update) {message.author} -> {message.content}")
return
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> (edit) {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
async def _process_repetitions(self, message: discord.Message) -> None:
m = tuple(bc.message_buffer.get(message.channel.id, i) for i in range(3))
if (all(m) and m[0].content and m[0].content == m[1].content == m[2].content and
(m[0].author.id != self.user.id and
m[1].author.id != self.user.id and
m[2].author.id != self.user.id)):
await message.channel.send(m[0].content)
async def _process_regular_message(self, message: discord.Message) -> None:
channel_id = message.channel.id
if isinstance(message.channel, discord.Thread): # Inherit parent channel settings for threads
channel_id = message.channel.parent_id
if (self.user.mentioned_in(message) or self.user.id in [
member.id for member in list(
itertools.chain(*[role.members for role in message.role_mentions]))]):
if channel_id in self.config.guilds[message.channel.guild.id].markov_responses_whitelist:
result = await self.config.disable_pings_in_response(message, bc.markov.generate())
await message.channel.send(message.author.mention + ' ' + result)
elif channel_id in self.config.guilds[message.channel.guild.id].markov_logging_whitelist:
needs_to_be_added = True
for ignored_prefix in bc.markov.ignored_prefixes.values():
if message.content.startswith(ignored_prefix):
needs_to_be_added = False
break
if needs_to_be_added:
bc.markov.add_string(message.content)
if channel_id in self.config.guilds[message.channel.guild.id].responses_whitelist:
responses_count = 0
for response in self.config.responses.values():
if responses_count >= const.MAX_BOT_RESPONSES_ON_ONE_MESSAGE:
break
if re.search(response.regex, message.content):
text = await Command.process_subcommands(
response.text, message, self.config.users[message.author.id])
await Msg.reply(message, text, False)
responses_count += 1
if channel_id in self.config.guilds[message.channel.guild.id].reactions_whitelist:
for reaction in self.config.reactions.values():
if re.search(reaction.regex, message.content):
log.info("Added reaction " + reaction.emoji)
try:
await message.add_reaction(reaction.emoji)
except discord.HTTPException:
pass
async def _process_command(self, message: discord.Message) -> None:
command = message.content.split(' ')
command = list(filter(None, command))
command[0] = command[0][1:]
if not command[0]:
return log.debug("Ignoring empty command")
if command[0] not in self.config.commands.data.keys():
if command[0] in self.config.commands.aliases.keys():
command[0] = self.config.commands.aliases[command[0]]
else:
await message.channel.send(
f"Unknown command '{command[0]}', "
f"probably you meant '{self._suggest_similar_command(command[0])}'")
return
if command[0] not in (
"poll",
"timer",
"stopwatch",
"vqpush",
):
timeout_error, _ = await Util.run_function_with_time_limit(
self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id]),
const.MAX_COMMAND_EXECUTION_TIME)
if command[0] not in (
"silent",
) and timeout_error:
await message.channel.send(f"Command '{" ".join(command)}' took too long to execute")
else:
await self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id])
def _suggest_similar_command(self, unknown_command: str) -> str:
min_dist = 100000
suggestion = ""
for command in self.config.commands.data.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
for command in self.config.commands.aliases.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
return suggestion
async def on_raw_message_edit(self, payload: discord.RawMessageUpdateEvent) -> None:
try:
log.info(f"<{payload.message_id}> (raw_edit) {payload.data["author"]["username"]}#"
f"{payload.data["author"]["discriminator"]} -> {payload.data["content"]}")
except KeyError:
pass
async def on_raw_message_delete(self, payload: discord.RawMessageDeleteEvent) -> None:
log.info(f"<{payload.message_id}> (delete)")
class DiscordBotInstance(BotInstance):
def start(self, args, main_bot=True):
# Check whether bot is already running
bot_cache = BotCache(main_bot).parse()
if bot_cache is not None:
pid = bot_cache["pid"]
if pid is not None and psutil.pid_exists(pid):
return log.error("Bot is already running!")
# Some variable initializations
config = None
secret_config = None
bc.restart_flag = False
bc.args = args
# Handle --nohup flag
if sys.platform in ("linux", "darwin") and args.nohup:
fd = os.open(const.NOHUP_FILE_PATH, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
log.info(f"Output is redirected to {const.NOHUP_FILE_PATH}")
os.dup2(fd, sys.stdout.fileno())
os.dup2(sys.stdout.fileno(), sys.stderr.fileno())
os.close(fd)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
# Selecting YAML parser
bc.yaml_loader, bc.yaml_dumper = Util.get_yaml(verbose=True)
# Saving application pd in order to safely stop it later
BotCache(main_bot).dump_to_file()
# Executing patch tool if it is necessary
if args.patch:
cmd = f"'{sys.executable}" "{os.path.dirname(__file__) + "/../tools/patch.py"}' all"
log.info("Executing patch tool: " + cmd)
subprocess.call(cmd)
# Read configuration files
config = Util.read_config_file(const.CONFIG_PATH)
if config is None:
config = Config()
secret_config = Util.read_config_file(const.SECRET_CONFIG_PATH)
if secret_config is None:
secret_config = SecretConfig()
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None and os.path.isdir("backup"):
# Check available backups
markov_backups = sorted(
[x for x in os.listdir("backup") if x.startswith("markov_") and x.endswith(".zip")])
if markov_backups:
# Restore Markov model from backup
with zipfile.ZipFile("backup/" + markov_backups[-1], 'r') as zip_ref:
zip_ref.extractall(".")
log.info(f"Restoring Markov model from backup/{markov_backups[-1]}")
shutil.move(markov_backups[-1][:-4], "markov.yaml")
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None:
bc.markov = Markov()
log.warning("Failed to restore Markov model from backup. Creating new Markov model...")
if bc.markov is None:
bc.markov = Markov()
log.info("Created empty Markov model")
# Check config versions
ok = True
ok &= Util.check_version(
"discord.py", discord.__version__, const.DISCORD_LIB_VERSION,
solutions=[
"execute: python -m pip install -r requirements.txt",
])
ok &= Util.check_version(
"Config", config.version, const.CONFIG_VERSION,
solutions=[
"run patch tool",
"remove config.yaml (settings will be lost!)",
])
ok &= Util.check_version(
"Markov config", bc.markov.version, const.MARKOV_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove markov.yaml (Markov model will be lost!)",
])
ok &= Util.check_version(
"Secret config", secret_config.version, const.SECRET_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove secret.yaml (your Discord authentication token will be lost!)",
])
if main_bot and not ok:
sys.exit(const.ExitStatus.CONFIG_FILE_ERROR)
config.commands.update()
# Checking authentication token
if secret_config.token is None:
secret_config = SecretConfig()
if not FF.is_enabled("WALBOT_FEATURE_NEW_CONFIG"):
secret_config.token = input("Enter your token: ")
# Constructing bot instance
if main_bot:
intents = discord.Intents.all()
walbot = WalBot(args.name, config, secret_config, intents=intents)
else:
walbot = importlib.import_module("src.minibot").MiniWalBot(args.name, config, secret_config, args.message)
# Starting the bot
try:
walbot.run(secret_config.token)
except discord.errors.PrivilegedIntentsRequired:
log.error("Privileged Gateway Intents are not enabled! Shutting down the bot...")
# After stopping the bot
log.info("Bot is disconnected!")
if main_bot:
config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH, wait=True)
BotCache(main_bot).remove()
if bc.restart_flag:
cmd = f"'{sys.executable}" "{os.path.dirname(os.path.dirname(__file__)) + "/walbot.py"}' start"
log.info("Calling: " + cmd)
if sys.platform in ("linux", "darwin"):
fork = os.fork()
if fork == 0:
subprocess.call(cmd)
elif fork > 0:
log.info("Stopping current instance of the bot")
sys.exit(const.ExitStatus.NO_ERROR)
else:
subprocess.call(cmd)
def stop(self, _, main_bot=True):
if not BotCache(main_bot).exists():
return log.error("Could not stop the bot (cache file does not exist)")
bot_cache = BotCache(main_bot).parse()
pid = bot_cache["pid"]
if pid is None:
return log.error("Could not stop the bot (cache file does not contain pid)")
if psutil.pid_exists(pid):
if sys.platform == "win32":
# Reference to the original solution:
# https://stackoverflow.com/a/64357453
import ctypes
kernel = ctypes.windll.kernel32
kernel.FreeConsole()
kernel.AttachConsole(pid)
kernel.SetConsoleCtrlHandler(None, 1)
kernel.GenerateConsoleCtrlEvent(0, 0)
else:
os.kill(pid, signal.SIGINT)
while psutil.pid_exists(pid):
log.debug("Bot is still running. Please, wait...")
time.sleep(0.5)
log.info("Bot is stopped!")
else:
log.error("Could not stop the bot (bot is not running)")
BotCache(main_bot).remove()
| import asyncio
import datetime
import importlib
import itertools
import os
import random
import re
import shutil
import signal
import subprocess
import sys
import time
import zipfile
import discord
import psutil
from src import const
from src.algorithms import levenshtein_distance
from src.bc import DoNotUpdateFlag
from src.bot_cache import BotCache
from src.bot_instance import BotInstance
from src.config import Command, Config, GuildSettings, SecretConfig, User, bc
from src.embed import DiscordEmbed
from src.emoji import get_clock_emoji
from src.ff import FF
from src.info import BotInfo
from src.log import log
from src.mail import Mail
from src.markov import Markov
from src.message import Msg
from src.reminder import Reminder
from src.repl import Repl
from src.utils import Util
from src.voice import VoiceRoutine
class WalBot(discord.Client):
def __init__(self, name: str, config: Config, secret_config: SecretConfig, intents: discord.Intents) -> None:
super().__init__(intents=intents)
self.repl = None
bc.instance_name = self.instance_name = name
self.config = config
self.secret_config = secret_config
self.bot_cache = BotCache(True)
self.loop.create_task(self._process_reminders())
self.loop.create_task(VoiceRoutine(self.bot_cache).start())
self.loop.create_task(self._repl_routine())
bc.config = self.config
bc.commands = self.config.commands
bc.background_loop = self.loop
bc.latency = lambda: self.latency
bc.change_status = self._change_status
bc.change_presence = self.change_presence
bc.close = self.close
bc.secret_config = self.secret_config
bc.info = BotInfo()
bc.plugin_manager.register()
bc.fetch_channel = self.fetch_channel
if not bc.args.fast_start:
log.debug("Started Markov model checks...")
if bc.markov.check():
log.info("Markov model has passed all checks")
else:
log.info("Markov model has not passed checks, but all errors were fixed")
async def _bot_runner_task(self, *args, **kwargs):
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def run(self, *args, **kwargs):
# Sightly patched implementation from discord.py discord.Client (parent) class
# Reference: https://github.com/Rapptz/discord.py/blob/master/discord/client.py
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
asyncio.ensure_future(self._bot_runner_task(*args, *kwargs), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
log.info('Received signal to terminate bot and event loop')
log.info("Shutting down the bot...")
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
for task in tasks:
if not task.cancelled():
log.error("Asynchronous task cancel failed!")
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(self._on_shutdown())
loop.close()
log.info("Bot is shut down!")
async def _on_shutdown(self) -> None:
if self.repl is not None:
self.repl.stop()
for event in bc.background_events:
event.cancel()
bc.background_loop = None
await bc.plugin_manager.broadcast_command("close")
@Mail.send_exception_info_to_admin_emails_async
async def _precompile(self) -> None:
log.debug("Started precompiling functions...")
levenshtein_distance("", "")
log.debug("Finished precompiling functions")
async def _change_status(self, string: str, type_: discord.ActivityType) -> None:
await self.change_presence(activity=discord.Activity(name=string, type=type_))
async def _config_autosave(self) -> None:
await self.wait_until_ready()
index = 1
while not self.is_closed():
await asyncio.sleep(self.config.saving["period"] * 60)
if index % self.config.saving["backup"]["period"] == 0:
self.config.backup(const.CONFIG_PATH, const.MARKOV_PATH)
self.config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH)
index += 1
async def _process_reminders_iteration(self) -> None:
log.debug3("Reminder processing iteration has started")
now = datetime.datetime.now().replace(second=0).strftime(const.REMINDER_DATETIME_FORMAT)
to_remove = []
to_append = []
reminder_do_not_update_flag = False
for key, rem in self.config.reminders.items():
for i in range(len(rem.prereminders_list)):
prereminder = rem.prereminders_list[i]
used_prereminder = rem.used_prereminders_list[i]
if prereminder == 0 or used_prereminder:
continue
prereminder_time = (
datetime.datetime.now().replace(second=0) + datetime.timedelta(minutes=prereminder))
if rem == prereminder_time.strftime(const.REMINDER_DATETIME_FORMAT):
channel = self.get_channel(rem.channel_id)
e = DiscordEmbed()
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e.title(f"{prereminder} minutes left until reminder")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(
datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(minutes=prereminder))
e.footer(text=rem.author)
await channel.send("", embed=e.get())
rem.used_prereminders_list[i] = True
if rem == now:
channel = self.get_channel(rem.channel_id)
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e = DiscordEmbed()
e.title(f"{clock_emoji} You asked to remind")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(datetime.datetime.now(datetime.timezone.utc))
e.footer(text=rem.author)
await channel.send(' '.join(rem.ping_users if rem.ping_users else ""), embed=e.get())
for user_id in rem.whisper_users:
await Msg.send_direct_message(
self.get_user(user_id), f"You asked to remind at {now} -> {rem.message}", False)
if rem.email_users:
mail = Mail(self.secret_config)
mail.send(
rem.email_users,
f"Reminder: {rem.message}",
f"You asked to remind at {now} -> {rem.message}")
if rem.repeat_after > 0:
new_time = datetime.datetime.now().replace(second=0, microsecond=0) + rem.get_next_event_delta()
new_time = new_time.strftime(const.REMINDER_DATETIME_FORMAT)
to_append.append(
Reminder(str(new_time), rem.message, rem.channel_id, rem.author, rem.time_created))
to_append[-1].repeat_after = rem.repeat_after
to_append[-1].repeat_interval_measure = rem.repeat_interval_measure
to_append[-1].prereminders_list = rem.prereminders_list
to_append[-1].used_prereminders_list = [False] * len(rem.prereminders_list)
to_append[-1].notes = rem.notes
log.debug2(f"Scheduled renew of recurring reminder - old id: {key}")
to_remove.append(key)
elif rem < now:
log.debug2(f"Scheduled reminder with id {key} removal")
to_remove.append(key)
else:
prereminders_delay = 0
if rem.prereminders_list:
prereminders_delay = max(rem.prereminders_list)
if ((datetime.datetime.strptime(rem.time, const.REMINDER_DATETIME_FORMAT) - datetime.datetime.now())
< datetime.timedelta(minutes=(5 + prereminders_delay / 60))):
reminder_do_not_update_flag = True
bc.do_not_update[DoNotUpdateFlag.REMINDER] = reminder_do_not_update_flag
for key in to_remove:
self.config.reminders.pop(key)
for item in to_append:
key = self.config.ids["reminder"]
self.config.reminders[key] = item
self.config.ids["reminder"] += 1
log.debug3("Reminder processing iteration has finished")
@Mail.send_exception_info_to_admin_emails_async
async def _process_reminders(self) -> None:
await self.wait_until_ready()
while not self.is_closed():
await self._process_reminders_iteration()
await asyncio.sleep(const.REMINDER_POLLING_INTERVAL)
async def _repl_routine(self) -> None:
self.repl = Repl(self.config.repl["port"])
await self.repl.start()
@Mail.send_exception_info_to_admin_emails_async
async def on_ready(self) -> None:
self._load_plugins()
log.info(
f"Logged in as: {self.user.name} {self.user.id} ({self.__class__.__name__}), "
f"instance: {self.instance_name}")
self.bot_cache.update({
"ready": True,
})
self.bot_cache.dump_to_file()
bc.guilds = self.guilds
for guild in self.guilds:
if guild.id not in self.config.guilds.keys():
self.config.guilds[guild.id] = GuildSettings(guild.id)
bc.bot_user = self.user
self.loop.create_task(self._config_autosave())
self.loop.create_task(self._precompile())
def _load_plugins(self) -> None:
for plugin_name in bc.plugin_manager.get_plugins_list():
if plugin_name not in self.config.plugins.keys():
self.config.plugins[plugin_name] = {
"autostart": False,
}
for plugin_name, plugin_state in self.config.plugins.items():
if plugin_state["autostart"]:
asyncio.create_task(bc.plugin_manager.send_command(plugin_name, "init"))
@Mail.send_exception_info_to_admin_emails_async
async def on_message(self, message: discord.Message) -> None:
await bc.plugin_manager.broadcast_command("on_message", message)
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
else:
await self._process_regular_message(message)
await self._process_repetitions(message)
@Mail.send_exception_info_to_admin_emails_async
async def on_message_edit(self, old_message: discord.Message, message: discord.Message) -> None:
if message.embeds != old_message.embeds:
log.info(f"<{message.id}> (edit, embed update) {message.author} -> {message.content}")
return
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> (edit) {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
async def _process_repetitions(self, message: discord.Message) -> None:
m = tuple(bc.message_buffer.get(message.channel.id, i) for i in range(3))
if (all(m) and m[0].content and m[0].content == m[1].content == m[2].content and
(m[0].author.id != self.user.id and
m[1].author.id != self.user.id and
m[2].author.id != self.user.id)):
await message.channel.send(m[0].content)
async def _process_regular_message(self, message: discord.Message) -> None:
channel_id = message.channel.id
if isinstance(message.channel, discord.Thread): # Inherit parent channel settings for threads
channel_id = message.channel.parent_id
if (self.user.mentioned_in(message) or self.user.id in [
member.id for member in list(
itertools.chain(*[role.members for role in message.role_mentions]))]):
if channel_id in self.config.guilds[message.channel.guild.id].markov_responses_whitelist:
result = await self.config.disable_pings_in_response(message, bc.markov.generate())
await message.channel.send(message.author.mention + ' ' + result)
elif channel_id in self.config.guilds[message.channel.guild.id].markov_logging_whitelist:
needs_to_be_added = True
for ignored_prefix in bc.markov.ignored_prefixes.values():
if message.content.startswith(ignored_prefix):
needs_to_be_added = False
break
if needs_to_be_added:
bc.markov.add_string(message.content)
if channel_id in self.config.guilds[message.channel.guild.id].responses_whitelist:
responses_count = 0
for response in self.config.responses.values():
if responses_count >= const.MAX_BOT_RESPONSES_ON_ONE_MESSAGE:
break
if re.search(response.regex, message.content):
text = await Command.process_subcommands(
response.text, message, self.config.users[message.author.id])
await Msg.reply(message, text, False)
responses_count += 1
if channel_id in self.config.guilds[message.channel.guild.id].reactions_whitelist:
for reaction in self.config.reactions.values():
if re.search(reaction.regex, message.content):
log.info("Added reaction " + reaction.emoji)
try:
await message.add_reaction(reaction.emoji)
except discord.HTTPException:
pass
async def _process_command(self, message: discord.Message) -> None:
command = message.content.split(' ')
command = list(filter(None, command))
command[0] = command[0][1:]
if not command[0]:
return log.debug("Ignoring empty command")
if command[0] not in self.config.commands.data.keys():
if command[0] in self.config.commands.aliases.keys():
command[0] = self.config.commands.aliases[command[0]]
else:
await message.channel.send(
f"Unknown command '{command[0]}', "
f"probably you meant '{self._suggest_similar_command(command[0])}'")
return
if command[0] not in (
"poll",
"timer",
"stopwatch",
"vqpush",
):
timeout_error, _ = await Util.run_function_with_time_limit(
self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id]),
const.MAX_COMMAND_EXECUTION_TIME)
if command[0] not in (
"silent",
) and timeout_error:
await message.channel.send(f"Command '{' '.join(command)}' took too long to execute")
else:
await self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id])
def _suggest_similar_command(self, unknown_command: str) -> str:
min_dist = 100000
suggestion = ""
for command in self.config.commands.data.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
for command in self.config.commands.aliases.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
return suggestion
async def on_raw_message_edit(self, payload: discord.RawMessageUpdateEvent) -> None:
try:
log.info(f"<{payload.message_id}> (raw_edit) {payload.data['author']['username']}#"
f"{payload.data['author']['discriminator']} -> {payload.data['content']}")
except KeyError:
pass
async def on_raw_message_delete(self, payload: discord.RawMessageDeleteEvent) -> None:
log.info(f"<{payload.message_id}> (delete)")
class DiscordBotInstance(BotInstance):
def start(self, args, main_bot=True):
# Check whether bot is already running
bot_cache = BotCache(main_bot).parse()
if bot_cache is not None:
pid = bot_cache["pid"]
if pid is not None and psutil.pid_exists(pid):
return log.error("Bot is already running!")
# Some variable initializations
config = None
secret_config = None
bc.restart_flag = False
bc.args = args
# Handle --nohup flag
if sys.platform in ("linux", "darwin") and args.nohup:
fd = os.open(const.NOHUP_FILE_PATH, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
log.info(f"Output is redirected to {const.NOHUP_FILE_PATH}")
os.dup2(fd, sys.stdout.fileno())
os.dup2(sys.stdout.fileno(), sys.stderr.fileno())
os.close(fd)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
# Selecting YAML parser
bc.yaml_loader, bc.yaml_dumper = Util.get_yaml(verbose=True)
# Saving application pd in order to safely stop it later
BotCache(main_bot).dump_to_file()
# Executing patch tool if it is necessary
if args.patch:
cmd = f"'{sys.executable}' '{os.path.dirname(__file__) + '/../tools/patch.py'}' all"
log.info("Executing patch tool: " + cmd)
subprocess.call(cmd)
# Read configuration files
config = Util.read_config_file(const.CONFIG_PATH)
if config is None:
config = Config()
secret_config = Util.read_config_file(const.SECRET_CONFIG_PATH)
if secret_config is None:
secret_config = SecretConfig()
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None and os.path.isdir("backup"):
# Check available backups
markov_backups = sorted(
[x for x in os.listdir("backup") if x.startswith("markov_") and x.endswith(".zip")])
if markov_backups:
# Restore Markov model from backup
with zipfile.ZipFile("backup/" + markov_backups[-1], 'r') as zip_ref:
zip_ref.extractall(".")
log.info(f"Restoring Markov model from backup/{markov_backups[-1]}")
shutil.move(markov_backups[-1][:-4], "markov.yaml")
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None:
bc.markov = Markov()
log.warning("Failed to restore Markov model from backup. Creating new Markov model...")
if bc.markov is None:
bc.markov = Markov()
log.info("Created empty Markov model")
# Check config versions
ok = True
ok &= Util.check_version(
"discord.py", discord.__version__, const.DISCORD_LIB_VERSION,
solutions=[
"execute: python -m pip install -r requirements.txt",
])
ok &= Util.check_version(
"Config", config.version, const.CONFIG_VERSION,
solutions=[
"run patch tool",
"remove config.yaml (settings will be lost!)",
])
ok &= Util.check_version(
"Markov config", bc.markov.version, const.MARKOV_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove markov.yaml (Markov model will be lost!)",
])
ok &= Util.check_version(
"Secret config", secret_config.version, const.SECRET_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove secret.yaml (your Discord authentication token will be lost!)",
])
if main_bot and not ok:
sys.exit(const.ExitStatus.CONFIG_FILE_ERROR)
config.commands.update()
# Checking authentication token
if secret_config.token is None:
secret_config = SecretConfig()
if not FF.is_enabled("WALBOT_FEATURE_NEW_CONFIG"):
secret_config.token = input("Enter your token: ")
# Constructing bot instance
if main_bot:
intents = discord.Intents.all()
walbot = WalBot(args.name, config, secret_config, intents=intents)
else:
walbot = importlib.import_module("src.minibot").MiniWalBot(args.name, config, secret_config, args.message)
# Starting the bot
try:
walbot.run(secret_config.token)
except discord.errors.PrivilegedIntentsRequired:
log.error("Privileged Gateway Intents are not enabled! Shutting down the bot...")
# After stopping the bot
log.info("Bot is disconnected!")
if main_bot:
config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH, wait=True)
BotCache(main_bot).remove()
if bc.restart_flag:
cmd = f"'{sys.executable}' '{os.path.dirname(os.path.dirname(__file__)) + '/walbot.py'}' start"
log.info("Calling: " + cmd)
if sys.platform in ("linux", "darwin"):
fork = os.fork()
if fork == 0:
subprocess.call(cmd)
elif fork > 0:
log.info("Stopping current instance of the bot")
sys.exit(const.ExitStatus.NO_ERROR)
else:
subprocess.call(cmd)
def stop(self, _, main_bot=True):
if not BotCache(main_bot).exists():
return log.error("Could not stop the bot (cache file does not exist)")
bot_cache = BotCache(main_bot).parse()
pid = bot_cache["pid"]
if pid is None:
return log.error("Could not stop the bot (cache file does not contain pid)")
if psutil.pid_exists(pid):
if sys.platform == "win32":
# Reference to the original solution:
# https://stackoverflow.com/a/64357453
import ctypes
kernel = ctypes.windll.kernel32
kernel.FreeConsole()
kernel.AttachConsole(pid)
kernel.SetConsoleCtrlHandler(None, 1)
kernel.GenerateConsoleCtrlEvent(0, 0)
else:
os.kill(pid, signal.SIGINT)
while psutil.pid_exists(pid):
log.debug("Bot is still running. Please, wait...")
time.sleep(0.5)
log.info("Bot is stopped!")
else:
log.error("Could not stop the bot (bot is not running)")
BotCache(main_bot).remove()
|
import shlex
import string
import sys
from contextlib import contextmanager
from typing import Any, Callable, Generic, List, Optional, Tuple, Type, TypeVar, cast
import pytest
import simple_parsing
from simple_parsing import ConflictResolution, DashVariant, ParsingError
from simple_parsing.utils import camel_case
from simple_parsing.wrappers.field_wrapper import ArgumentGenerationMode, NestedMode
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
def xfail_param(*args, reason: str):
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
return pytest.param(*args, marks=pytest.mark.xfail(reason=reason))
Dataclass = TypeVar("Dataclass")
@contextmanager
def raises(exception=ParsingError, match=None, code: int = None):
with pytest.raises(exception, match=match):
yield
from io import StringIO
from contextlib import redirect_stderr
@contextmanager
def exits_and_writes_to_stderr(match: str = ""):
s = StringIO()
with redirect_stderr(s), raises(SystemExit):
yield
s.seek(0)
err_string = s.read()
if match:
assert match in err_string, err_string
else:
assert err_string, err_string
@contextmanager
def raises_missing_required_arg():
with exits_and_writes_to_stderr("the following arguments are required"):
yield
@contextmanager
def raises_expected_n_args(n: int):
with exits_and_writes_to_stderr(f"expected {n} arguments"):
yield
@contextmanager
def raises_unrecognized_args(*args: str):
with exits_and_writes_to_stderr("unrecognized arguments: " + " ".join(args or [])):
yield
def assert_help_output_equals(actual: str, expected: str) -> None:
# Replace the start with `prog`, since the test runner might not always be
# `pytest`, could also be __main__ when debugging with VSCode
prog = sys.argv[0].split("/")[-1]
if prog != "pytest":
expected = expected.replace("usage: pytest", f"usage: {prog}")
remove = string.punctuation + string.whitespace
if "optional arguments" in expected and sys.version_info[:2] >= (3, 10):
expected = expected.replace("optional arguments", "options")
actual_str = "".join(actual.split())
actual_str = actual.translate(str.maketrans("", "", remove))
expected_str = expected.translate(str.maketrans("", "", remove))
assert actual_str == expected_str, "\n" + "\n".join([actual_str, expected_str])
T = TypeVar("T")
class TestParser(simple_parsing.ArgumentParser, Generic[T]):
__test__ = False
""" A parser subclass just used for testing.
Makes the retrieval of the arguments a bit easier to read.
"""
def __init__(self, *args, **kwargs):
self._current_dest = None
self._current_dataclass = None
super().__init__(*args, **kwargs)
def add_arguments(self, dataclass: Type, dest, prefix="", default=None):
if self._current_dest == dest and self._current_dataclass == dataclass:
return # already added arguments for that dataclass.
self._current_dest = dest
self._current_dataclass = dataclass
return super().add_arguments(dataclass, dest, prefix=prefix, default=default)
def __call__(self, args: str) -> T:
namespace = self.parse_args(shlex.split(args))
value = getattr(namespace, self._current_dest)
value = cast(T, value)
return value
class TestSetup:
@classmethod
def setup(
cls: Type[Dataclass],
arguments: Optional[str] = "",
dest: Optional[str] = None,
default: Optional[Dataclass] = None,
conflict_resolution_mode: ConflictResolution = ConflictResolution.AUTO,
add_option_string_dash_variants: DashVariant = DashVariant.AUTO,
parse_known_args: bool = False,
attempt_to_reorder: bool = False,
*,
argument_generation_mode: ArgumentGenerationMode = ArgumentGenerationMode.FLAT,
nested_mode: NestedMode = NestedMode.DEFAULT,
) -> Dataclass:
"""Basic setup for a test.
Keyword Arguments:
arguments {Optional[str]} -- The arguments to pass to the parser (default: {""})
dest {Optional[str]} -- the attribute where the argument should be stored. (default: {None})
Returns:
{cls}} -- the class's type.
"""
parser = simple_parsing.ArgumentParser(
conflict_resolution=conflict_resolution_mode,
add_option_string_dash_variants=add_option_string_dash_variants,
argument_generation_mode=argument_generation_mode,
nested_mode=nested_mode,
)
if dest is None:
dest = camel_case(cls.__name__)
parser.add_arguments(cls, dest=dest, default=default)
if arguments is None:
if parse_known_args:
args = parser.parse_known_args(attempt_to_reorder=attempt_to_reorder)
else:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
if parse_known_args:
args, unknown_args = parser.parse_known_args(
splits, attempt_to_reorder=attempt_to_reorder
)
else:
args = parser.parse_args(splits)
assert hasattr(args, dest), f"attribute '{dest}' not found in args {args}"
instance: Dataclass = getattr(args, dest) # type: ignore
delattr(args, dest)
# If there are subgroups, we can allow an extra "subgroups" attribute, otherwise we don't
# expect any other arguments.
args_dict = vars(args).copy()
args_dict.pop("subgroups", None)
assert not args_dict, f"Namespace has leftover garbage values (besides subgroups): {args}"
instance = cast(Dataclass, instance)
return instance
@classmethod
def setup_multiple(
cls: Type[Dataclass], num_to_parse: int, arguments: Optional[str] = ""
) -> Tuple[Dataclass, ...]:
conflict_resolution_mode: ConflictResolution = ConflictResolution.ALWAYS_MERGE
parser = simple_parsing.ArgumentParser(conflict_resolution=conflict_resolution_mode)
class_name = camel_case(cls.__name__)
for i in range(num_to_parse):
parser.add_arguments(cls, f"{class_name}_{i}")
if arguments is None:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
args = parser.parse_args(splits)
return tuple(getattr(args, f"{class_name}_{i}") for i in range(num_to_parse))
@classmethod
def get_help_text(
cls,
argv: Optional[str] = None,
multiple=False,
conflict_resolution_mode: ConflictResolution = ConflictResolution.AUTO,
add_option_string_dash_variants=DashVariant.AUTO,
**parser_kwargs,
) -> str:
import contextlib
from io import StringIO
f = StringIO()
if argv is None:
argv = "--help"
elif not argv.endswith("--help"):
argv = argv + " --help"
with contextlib.suppress(SystemExit), contextlib.redirect_stdout(f):
_ = cls.setup(
argv,
conflict_resolution_mode=conflict_resolution_mode,
add_option_string_dash_variants=add_option_string_dash_variants,
**parser_kwargs,
)
s = f.getvalue()
return s
ListFormattingFunction = Callable[[List[Any]], str]
ListOfListsFormattingFunction = Callable[[List[List[Any]]], str]
def format_list_using_spaces(value_list: List[Any]) -> str:
return " ".join(str(p) for p in value_list)
def format_list_using_brackets(value_list: List[Any]) -> str:
return f"[{",".join(str(p) for p in value_list)}]"
def format_list_using_single_quotes(value_list: List[Any]) -> str:
return f"'{format_list_using_spaces(value_list)}'"
def format_list_using_double_quotes(value_list: List[Any]) -> str:
return f'"{format_list_using_spaces(value_list)}"'
def format_lists_using_brackets(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_brackets(value_list) for value_list in list_of_lists)
def format_lists_using_double_quotes(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_double_quotes(value_list) for value_list in list_of_lists)
def format_lists_using_single_quotes(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_single_quotes(value_list) for value_list in list_of_lists)
| import shlex
import string
import sys
from contextlib import contextmanager
from typing import Any, Callable, Generic, List, Optional, Tuple, Type, TypeVar, cast
import pytest
import simple_parsing
from simple_parsing import ConflictResolution, DashVariant, ParsingError
from simple_parsing.utils import camel_case
from simple_parsing.wrappers.field_wrapper import ArgumentGenerationMode, NestedMode
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
def xfail_param(*args, reason: str):
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
return pytest.param(*args, marks=pytest.mark.xfail(reason=reason))
Dataclass = TypeVar("Dataclass")
@contextmanager
def raises(exception=ParsingError, match=None, code: int = None):
with pytest.raises(exception, match=match):
yield
from io import StringIO
from contextlib import redirect_stderr
@contextmanager
def exits_and_writes_to_stderr(match: str = ""):
s = StringIO()
with redirect_stderr(s), raises(SystemExit):
yield
s.seek(0)
err_string = s.read()
if match:
assert match in err_string, err_string
else:
assert err_string, err_string
@contextmanager
def raises_missing_required_arg():
with exits_and_writes_to_stderr("the following arguments are required"):
yield
@contextmanager
def raises_expected_n_args(n: int):
with exits_and_writes_to_stderr(f"expected {n} arguments"):
yield
@contextmanager
def raises_unrecognized_args(*args: str):
with exits_and_writes_to_stderr("unrecognized arguments: " + " ".join(args or [])):
yield
def assert_help_output_equals(actual: str, expected: str) -> None:
# Replace the start with `prog`, since the test runner might not always be
# `pytest`, could also be __main__ when debugging with VSCode
prog = sys.argv[0].split("/")[-1]
if prog != "pytest":
expected = expected.replace("usage: pytest", f"usage: {prog}")
remove = string.punctuation + string.whitespace
if "optional arguments" in expected and sys.version_info[:2] >= (3, 10):
expected = expected.replace("optional arguments", "options")
actual_str = "".join(actual.split())
actual_str = actual.translate(str.maketrans("", "", remove))
expected_str = expected.translate(str.maketrans("", "", remove))
assert actual_str == expected_str, "\n" + "\n".join([actual_str, expected_str])
T = TypeVar("T")
class TestParser(simple_parsing.ArgumentParser, Generic[T]):
__test__ = False
""" A parser subclass just used for testing.
Makes the retrieval of the arguments a bit easier to read.
"""
def __init__(self, *args, **kwargs):
self._current_dest = None
self._current_dataclass = None
super().__init__(*args, **kwargs)
def add_arguments(self, dataclass: Type, dest, prefix="", default=None):
if self._current_dest == dest and self._current_dataclass == dataclass:
return # already added arguments for that dataclass.
self._current_dest = dest
self._current_dataclass = dataclass
return super().add_arguments(dataclass, dest, prefix=prefix, default=default)
def __call__(self, args: str) -> T:
namespace = self.parse_args(shlex.split(args))
value = getattr(namespace, self._current_dest)
value = cast(T, value)
return value
class TestSetup:
@classmethod
def setup(
cls: Type[Dataclass],
arguments: Optional[str] = "",
dest: Optional[str] = None,
default: Optional[Dataclass] = None,
conflict_resolution_mode: ConflictResolution = ConflictResolution.AUTO,
add_option_string_dash_variants: DashVariant = DashVariant.AUTO,
parse_known_args: bool = False,
attempt_to_reorder: bool = False,
*,
argument_generation_mode: ArgumentGenerationMode = ArgumentGenerationMode.FLAT,
nested_mode: NestedMode = NestedMode.DEFAULT,
) -> Dataclass:
"""Basic setup for a test.
Keyword Arguments:
arguments {Optional[str]} -- The arguments to pass to the parser (default: {""})
dest {Optional[str]} -- the attribute where the argument should be stored. (default: {None})
Returns:
{cls}} -- the class's type.
"""
parser = simple_parsing.ArgumentParser(
conflict_resolution=conflict_resolution_mode,
add_option_string_dash_variants=add_option_string_dash_variants,
argument_generation_mode=argument_generation_mode,
nested_mode=nested_mode,
)
if dest is None:
dest = camel_case(cls.__name__)
parser.add_arguments(cls, dest=dest, default=default)
if arguments is None:
if parse_known_args:
args = parser.parse_known_args(attempt_to_reorder=attempt_to_reorder)
else:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
if parse_known_args:
args, unknown_args = parser.parse_known_args(
splits, attempt_to_reorder=attempt_to_reorder
)
else:
args = parser.parse_args(splits)
assert hasattr(args, dest), f"attribute '{dest}' not found in args {args}"
instance: Dataclass = getattr(args, dest) # type: ignore
delattr(args, dest)
# If there are subgroups, we can allow an extra "subgroups" attribute, otherwise we don't
# expect any other arguments.
args_dict = vars(args).copy()
args_dict.pop("subgroups", None)
assert not args_dict, f"Namespace has leftover garbage values (besides subgroups): {args}"
instance = cast(Dataclass, instance)
return instance
@classmethod
def setup_multiple(
cls: Type[Dataclass], num_to_parse: int, arguments: Optional[str] = ""
) -> Tuple[Dataclass, ...]:
conflict_resolution_mode: ConflictResolution = ConflictResolution.ALWAYS_MERGE
parser = simple_parsing.ArgumentParser(conflict_resolution=conflict_resolution_mode)
class_name = camel_case(cls.__name__)
for i in range(num_to_parse):
parser.add_arguments(cls, f"{class_name}_{i}")
if arguments is None:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
args = parser.parse_args(splits)
return tuple(getattr(args, f"{class_name}_{i}") for i in range(num_to_parse))
@classmethod
def get_help_text(
cls,
argv: Optional[str] = None,
multiple=False,
conflict_resolution_mode: ConflictResolution = ConflictResolution.AUTO,
add_option_string_dash_variants=DashVariant.AUTO,
**parser_kwargs,
) -> str:
import contextlib
from io import StringIO
f = StringIO()
if argv is None:
argv = "--help"
elif not argv.endswith("--help"):
argv = argv + " --help"
with contextlib.suppress(SystemExit), contextlib.redirect_stdout(f):
_ = cls.setup(
argv,
conflict_resolution_mode=conflict_resolution_mode,
add_option_string_dash_variants=add_option_string_dash_variants,
**parser_kwargs,
)
s = f.getvalue()
return s
ListFormattingFunction = Callable[[List[Any]], str]
ListOfListsFormattingFunction = Callable[[List[List[Any]]], str]
def format_list_using_spaces(value_list: List[Any]) -> str:
return " ".join(str(p) for p in value_list)
def format_list_using_brackets(value_list: List[Any]) -> str:
return f"[{','.join(str(p) for p in value_list)}]"
def format_list_using_single_quotes(value_list: List[Any]) -> str:
return f"'{format_list_using_spaces(value_list)}'"
def format_list_using_double_quotes(value_list: List[Any]) -> str:
return f'"{format_list_using_spaces(value_list)}"'
def format_lists_using_brackets(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_brackets(value_list) for value_list in list_of_lists)
def format_lists_using_double_quotes(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_double_quotes(value_list) for value_list in list_of_lists)
def format_lists_using_single_quotes(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_single_quotes(value_list) for value_list in list_of_lists)
|
"""Various functions that interact with Slack, e.g. posting messages."""
import asyncio
import logging
import socket
from pathlib import Path
from typing import Union, Optional
from slack_sdk.errors import SlackApiError
from lsw_slackbot.plots import plot_resource_use
from lsw_slackbot.resources import current_memory_fraction, _get_resource_usage_dataframe
from lsw_slackbot.util import string_time
async def _send_message(client, channel: str, message: str):
"""Sends a message to a channel, with basic logging & error handling."""
try:
await client.chat_postMessage(channel=channel, text=message)
# Handle various different errors, *some* of which are non-critical...
except SlackApiError as e:
logging.exception(f"error from slack API when trying to send message: {e.response["error"]}")
print("Encountered SlackApiError when trying to send message (see logs.)")
except AttributeError:
logging.exception("suspected issue in Slack API when trying to send message. This bug has occured before!")
print("Encountered AttributeError when trying to send message (see logs.)")
async def _send_file(client, channel: str, file: Union[Path, str], title):
"""Sends a file to a channel, with basic logging & error handling."""
if isinstance(file, Path):
file = str(file.absolute())
try:
await client.files_upload(channels=channel, file=file, title=title)
# Handle various different errors, *some* of which are non-critical...
except SlackApiError as e:
logging.exception(f"error from Slack API when trying to upload file: {e.response["error"]}")
print("Encountered SlackApiError when trying to upload file (see logs.)")
except AttributeError:
logging.exception("suspected issue in Slack API when trying to upload file. This bug has occured before!")
print("Encountered AttributeError when trying to upload file (see logs.)")
async def hello_world(client, channel: str):
"""Basic function to post an init message to a channel."""
# Todo: it would be really cool if hello_world also printed the latest commit message.
# This could be done by running the command `git log -1` from Python?
# See https://stackoverflow.com/questions/7293008/display-last-git-commit-comment
logging.info(f"Saying hello world in {channel}!")
system_name = socket.gethostname()
await _send_message(
client, channel, f"Server time & date: {string_time()}\nApp is running on system {system_name}.")
async def send_resource_use_plot(client, channel: str, plot_kwargs: dict, title: Optional[str] = None):
"""Sends a resource usage plot to a given channel."""
if title is None:
title = f"Resource usage plot generated at {string_time()}"
else:
title = title + f" (plot generated at {string_time()})"
# Firstly, let's generate a plot
logging.info("Generating a resource usage plot")
logging.debug(f"plot kwargs: {plot_kwargs}")
location_plot = await plot_resource_use(**plot_kwargs)
# Now, let's try and send it to slack
logging.info(f"Sending to Slack in channel {channel}")
await _send_file(client, channel, location_plot, title)
_LAST_MEMORY_FRACTION = 0.0
async def check_memory(client, channel: str, memory_warn_fraction=0.8, sleep_time=3600):
"""Quick function for checking current server memory and sending a warning to a desired channel if it's
too high."""
global _LAST_MEMORY_FRACTION # Sorry for using global variables =(
current_usage = current_memory_fraction()
# Only warn if we didn't warn before
if _LAST_MEMORY_FRACTION < memory_warn_fraction:
if current_usage > memory_warn_fraction:
# Firstly, prioritise sending a basic warning
await _send_message(client, channel, f"WARNING: current memory usage at {current_usage:.2%}!")
# Next, grab info on currently running threads
thread_df = await _get_resource_usage_dataframe(measurement_time=1.0)
thread_df = thread_df.sort_values("memory")
# ... and format it into something we can send
message = ["Users with something currently running:"]
for i, a_row in thread_df.iterrows():
message.append(f"{a_row.name}: {a_row["cpu_percent"]:.2f}% CPU "
f"-- {a_row["memory"]:.2f} GB"
f"-- {a_row["threads"]} threads")
message.append(f"\n(no further warnings will be sent for a sleep period of {sleep_time/60**2:.2f} hour(s))")
# Send it!
await _send_message(client, channel, "\n".join(message))
# Sleep so we don't spam the chat
await asyncio.sleep(sleep_time)
_LAST_MEMORY_FRACTION = current_usage
| """Various functions that interact with Slack, e.g. posting messages."""
import asyncio
import logging
import socket
from pathlib import Path
from typing import Union, Optional
from slack_sdk.errors import SlackApiError
from lsw_slackbot.plots import plot_resource_use
from lsw_slackbot.resources import current_memory_fraction, _get_resource_usage_dataframe
from lsw_slackbot.util import string_time
async def _send_message(client, channel: str, message: str):
"""Sends a message to a channel, with basic logging & error handling."""
try:
await client.chat_postMessage(channel=channel, text=message)
# Handle various different errors, *some* of which are non-critical...
except SlackApiError as e:
logging.exception(f"error from slack API when trying to send message: {e.response['error']}")
print("Encountered SlackApiError when trying to send message (see logs.)")
except AttributeError:
logging.exception("suspected issue in Slack API when trying to send message. This bug has occured before!")
print("Encountered AttributeError when trying to send message (see logs.)")
async def _send_file(client, channel: str, file: Union[Path, str], title):
"""Sends a file to a channel, with basic logging & error handling."""
if isinstance(file, Path):
file = str(file.absolute())
try:
await client.files_upload(channels=channel, file=file, title=title)
# Handle various different errors, *some* of which are non-critical...
except SlackApiError as e:
logging.exception(f"error from Slack API when trying to upload file: {e.response['error']}")
print("Encountered SlackApiError when trying to upload file (see logs.)")
except AttributeError:
logging.exception("suspected issue in Slack API when trying to upload file. This bug has occured before!")
print("Encountered AttributeError when trying to upload file (see logs.)")
async def hello_world(client, channel: str):
"""Basic function to post an init message to a channel."""
# Todo: it would be really cool if hello_world also printed the latest commit message.
# This could be done by running the command `git log -1` from Python?
# See https://stackoverflow.com/questions/7293008/display-last-git-commit-comment
logging.info(f"Saying hello world in {channel}!")
system_name = socket.gethostname()
await _send_message(
client, channel, f"Server time & date: {string_time()}\nApp is running on system {system_name}.")
async def send_resource_use_plot(client, channel: str, plot_kwargs: dict, title: Optional[str] = None):
"""Sends a resource usage plot to a given channel."""
if title is None:
title = f"Resource usage plot generated at {string_time()}"
else:
title = title + f" (plot generated at {string_time()})"
# Firstly, let's generate a plot
logging.info("Generating a resource usage plot")
logging.debug(f"plot kwargs: {plot_kwargs}")
location_plot = await plot_resource_use(**plot_kwargs)
# Now, let's try and send it to slack
logging.info(f"Sending to Slack in channel {channel}")
await _send_file(client, channel, location_plot, title)
_LAST_MEMORY_FRACTION = 0.0
async def check_memory(client, channel: str, memory_warn_fraction=0.8, sleep_time=3600):
"""Quick function for checking current server memory and sending a warning to a desired channel if it's
too high."""
global _LAST_MEMORY_FRACTION # Sorry for using global variables =(
current_usage = current_memory_fraction()
# Only warn if we didn't warn before
if _LAST_MEMORY_FRACTION < memory_warn_fraction:
if current_usage > memory_warn_fraction:
# Firstly, prioritise sending a basic warning
await _send_message(client, channel, f"WARNING: current memory usage at {current_usage:.2%}!")
# Next, grab info on currently running threads
thread_df = await _get_resource_usage_dataframe(measurement_time=1.0)
thread_df = thread_df.sort_values("memory")
# ... and format it into something we can send
message = ["Users with something currently running:"]
for i, a_row in thread_df.iterrows():
message.append(f"{a_row.name}: {a_row['cpu_percent']:.2f}% CPU "
f"-- {a_row['memory']:.2f} GB"
f"-- {a_row['threads']} threads")
message.append(f"\n(no further warnings will be sent for a sleep period of {sleep_time/60**2:.2f} hour(s))")
# Send it!
await _send_message(client, channel, "\n".join(message))
# Sleep so we don't spam the chat
await asyncio.sleep(sleep_time)
_LAST_MEMORY_FRACTION = current_usage
|
"""
Provides linkedin api-related code
"""
import random
import logging
from time import sleep
import json
from linkedin_api.utils.helpers import get_id_from_urn
from linkedin_api.client import Client
logger = logging.getLogger(__name__)
class Linkedin(object):
"""
Class for accessing Linkedin API.
"""
_MAX_UPDATE_COUNT = 100 # max seems to be 100
_MAX_SEARCH_COUNT = 49 # max seems to be 49
_MAX_REPEATED_REQUESTS = (
200
) # VERY conservative max requests count to avoid rate-limit
def __init__(self, username, password):
self.client = Client(debug=True)
self.client.authenticate(username, password)
self.logger = logger
def search(self, params, max_results=None, results=[]):
"""
Do a search.
"""
sleep(
random.randint(0, 1)
) # sleep a random duration to try and evade suspention
count = (
max_results
if max_results and max_results <= Linkedin._MAX_SEARCH_COUNT
else Linkedin._MAX_SEARCH_COUNT
)
default_params = {
"count": count,
"guides": "List()",
"origin": "GLOBAL_SEARCH_HEADER",
"q": "guided",
"start": len(results),
}
default_params.update(params)
res = self.client.session.get(
f"{self.client.API_BASE_URL}/search/cluster", params=default_params
)
data = res.json()
total_found = data.get("paging", {}).get("total")
# recursive base case
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or total_found is None
or len(results) >= total_found
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"][0]["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.search(params, results=results, max_results=max_results)
def search_people(
self,
keywords=None,
connection_of=None,
network_depth=None,
regions=None,
industries=None,
):
"""
Do a people search.
"""
guides = ["v->PEOPLE"]
if connection_of:
guides.append(f"facetConnectionOf->{connection_of}")
if network_depth:
guides.append(f"facetNetwork->{network_depth}")
if regions:
guides.append(f'facetGeoRegion->{'|'.join(regions)}')
if industries:
guides.append(f'facetIndustry->{'|'.join(industries)}')
params = {"guides": "List({})".format(",".join(guides))}
if keywords:
params["keywords"] = keywords
data = self.search(params)
results = []
for item in data:
search_profile = item["hitInfo"][
"com.linkedin.voyager.search.SearchProfile"
]
profile_id = search_profile["id"]
distance = search_profile["distance"]["value"]
results.append(
{
"urn_id": profile_id,
"distance": distance,
"public_id": search_profile["miniProfile"]["publicIdentifier"],
}
)
return results
def search_companies(self, max_results=None, results=[]):
"""
Do a company search
Note: try swap from blended search to cluster
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
#Search params from main search, here for reference
'''
default_params = {
"count": count,
"guides": "List()",
"origin": "GLOBAL_SEARCH_HEADER",
"q": "guided",
"start": len(results),
}
'''
default_params = {
"origin": "GLOBAL_SEARCH_HEADER",
"guides": "List(resultType->companies)",
"count": "10",
"q": "guided",
"filters": "List(resultType->companies)",
"start": len(results)
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/search/blended?keywords=s&origin=GLOBAL_SEARCH_HEADER&count=10&guides=List(resultType-%3Ecompanies)&q=all&filters=List(resultType-%3Ecompanies)&start={len(results)}"
)
data = res.json()
total_found = data.get("paging", {}).get("total")
if (
len(data["elements"]) == 0 or
len(data["elements"][0]["elements"]) == 0
or total_found is None
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"][0]["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.search_companies(max_results=max_results, results=results)
def get_profile_contact_info(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{public_id or urn_id}/profileContactInfo"
)
data = res.json()
contact_info = {
"email_address": data.get("emailAddress"),
"websites": [],
"phone_numbers": data.get("phoneNumbers", []),
}
websites = data.get("websites", [])
for item in websites:
if "com.linkedin.voyager.identity.profile.StandardWebsite" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.StandardWebsite"
]["category"]
elif "" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.CustomWebsite"
]["label"]
del item["type"]
contact_info["websites"] = websites
return contact_info
def get_profile(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{public_id or urn_id}/profileView"
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
# massage [profile] data
profile = data["profile"]
if "miniProfile" in profile:
if "picture" in profile["miniProfile"]:
profile["displayPictureUrl"] = profile["miniProfile"]["picture"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
profile["profile_id"] = get_id_from_urn(profile["miniProfile"]["entityUrn"])
del profile["miniProfile"]
del profile["defaultLocale"]
del profile["supportedLocales"]
del profile["versionTag"]
del profile["showEducationOnProfileTopCard"]
# massage [experience] data
experience = data["positionView"]["elements"]
for item in experience:
if "company" in item and "miniCompany" in item["company"]:
if "logo" in item["company"]["miniCompany"]:
logo = item["company"]["miniCompany"]["logo"].get(
"com.linkedin.common.VectorImage"
)
if logo:
item["companyLogoUrl"] = logo["rootUrl"]
del item["company"]["miniCompany"]
profile["experience"] = experience
# massage [skills] data
skills = [item["name"] for item in data["skillView"]["elements"]]
profile["skills"] = skills
# massage [education] data
education = data["educationView"]["elements"]
for item in education:
if "school" in item:
if "logo" in item["school"]:
item["school"]["logoUrl"] = item["school"]["logo"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
del item["school"]["logo"]
profile["education"] = education
return profile
def get_profile_connections(self, urn_id):
"""
Return a list of profile ids connected to profile of given [urn_id]
"""
return self.search_people(connection_of=urn_id, network_depth="F")
def get_profile_networkinfo(self, urn_id):
"""
Return the nework info connected to the profile of the given [urn_id]
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{urn_id}/networkinfo"
)
return res.json()
def get_company_updates(self, public_id=None, urn_id=None, max_results=None, results=[]):
""""
Return a list of company posts
[public_id] - public identifier ie - microsoft
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"companyUniversalName": {public_id or urn_id},
"q": "companyFeedByUniversalName",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/feed/updates", params=params
)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_company_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
def get_profile_updates(self, public_id=None, urn_id=None, max_results=None, results=[]):
""""
Return a list of profile posts
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"profileId": {public_id or urn_id},
"q": "memberShareFeed",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/feed/updates", params=params
)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_profile_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
def get_current_profile_views(self):
"""
Get profile view statistics, including chart data.
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/panels"
)
data = res.json()
return data['elements'][0]['value']['com.linkedin.voyager.identity.me.ProfileViewsByTimePanel']
def get_school(self, public_id):
"""
Return data for a single school.
[public_id] - public identifier i.e. uq
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"decoration": (
"""
(
autoGenerated,backgroundCoverImage,
companyEmployeesSearchPageUrl,companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,
entityUrn,followingInfo,foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,
paidCompany,partnerCompanyUrl,partnerLogo,partnerLogoImage,rankForTopCompanies,salesNavigatorCompanyUrl,
school,showcase,staffCount,staffCountRange,staffingCompany,topCompaniesListName,universalName,url,
companyIndustries*,industries,specialities,
acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),
showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)
)
"""
),
"q": "universalName",
"universalName": public_id,
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies", params=params
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
school = data["elements"][0]
return school
def get_similar_companies(self, public_id):
"""
Return similar companies for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies?count={Linkedin._MAX_SEARCH_COUNT}&companyUniversalName={public_id}&q=similarCompanies&start=0&decorationId=com.linkedin.voyager.deco.organization.web.WebSimilarCompanyCardWithRelevanceReason-3"
)
data = res.json()
return data
def get_company(self, public_id):
"""
Return data for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"decoration": (
"""
(
affiliatedCompaniesWithEmployeesRollup,affiliatedCompaniesWithJobsRollup,articlePermalinkForTopCompanies,
autoGenerated,backgroundCoverImage,companyEmployeesSearchPageUrl,
companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,entityUrn,followingInfo,
foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,paidCompany,
partnerCompanyUrl,partnerLogo,partnerLogoImage,permissions,rankForTopCompanies,
salesNavigatorCompanyUrl,school,showcase,staffCount,staffCountRange,staffingCompany,
topCompaniesListName,universalName,url,companyIndustries*,industries,specialities,
acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),
showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)
)
"""
),
"q": "universalName",
"universalName": public_id,
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies", params=params
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
company = data["elements"][0]
return company
def get_conversation_details(self, profile_urn_id):
"""
Return the conversation (or "message thread") details for a given [public_profile_id]
"""
# passing `params` doesn't work properly, think it's to do with List().
# Might be a bug in `requests`?
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations?\
keyVersion=LEGACY_INBOX&q=participants&recipients=List({profile_urn_id})"
)
data = res.json()
item = data["elements"][0]
item["id"] = get_id_from_urn(item["entityUrn"])
return item
def get_conversations(self):
"""
Return list of conversations the user is in.
"""
params = {"keyVersion": "LEGACY_INBOX"}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations", params=params
)
return res.json()
def get_conversation(self, conversation_urn_id):
"""
Return the full conversation at a given [conversation_urn_id]
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events"
)
return res.json()
def send_message(self, conversation_urn_id, message_body):
"""
Return the full conversation at a given [conversation_urn_id]
"""
params = {"action": "create"}
payload = json.dumps(
{
"eventCreate": {
"value": {
"com.linkedin.voyager.messaging.create.MessageCreate": {
"body": message_body,
"attachments": [],
"attributedBody": {"text": message_body, "attributes": []},
"mediaAttachments": [],
}
}
}
}
)
res = self.client.session.post(
f"{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events",
params=params,
data=payload,
)
return res.status_code == 201
| """
Provides linkedin api-related code
"""
import random
import logging
from time import sleep
import json
from linkedin_api.utils.helpers import get_id_from_urn
from linkedin_api.client import Client
logger = logging.getLogger(__name__)
class Linkedin(object):
"""
Class for accessing Linkedin API.
"""
_MAX_UPDATE_COUNT = 100 # max seems to be 100
_MAX_SEARCH_COUNT = 49 # max seems to be 49
_MAX_REPEATED_REQUESTS = (
200
) # VERY conservative max requests count to avoid rate-limit
def __init__(self, username, password):
self.client = Client(debug=True)
self.client.authenticate(username, password)
self.logger = logger
def search(self, params, max_results=None, results=[]):
"""
Do a search.
"""
sleep(
random.randint(0, 1)
) # sleep a random duration to try and evade suspention
count = (
max_results
if max_results and max_results <= Linkedin._MAX_SEARCH_COUNT
else Linkedin._MAX_SEARCH_COUNT
)
default_params = {
"count": count,
"guides": "List()",
"origin": "GLOBAL_SEARCH_HEADER",
"q": "guided",
"start": len(results),
}
default_params.update(params)
res = self.client.session.get(
f"{self.client.API_BASE_URL}/search/cluster", params=default_params
)
data = res.json()
total_found = data.get("paging", {}).get("total")
# recursive base case
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or total_found is None
or len(results) >= total_found
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"][0]["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.search(params, results=results, max_results=max_results)
def search_people(
self,
keywords=None,
connection_of=None,
network_depth=None,
regions=None,
industries=None,
):
"""
Do a people search.
"""
guides = ["v->PEOPLE"]
if connection_of:
guides.append(f"facetConnectionOf->{connection_of}")
if network_depth:
guides.append(f"facetNetwork->{network_depth}")
if regions:
guides.append(f'facetGeoRegion->{"|".join(regions)}')
if industries:
guides.append(f'facetIndustry->{"|".join(industries)}')
params = {"guides": "List({})".format(",".join(guides))}
if keywords:
params["keywords"] = keywords
data = self.search(params)
results = []
for item in data:
search_profile = item["hitInfo"][
"com.linkedin.voyager.search.SearchProfile"
]
profile_id = search_profile["id"]
distance = search_profile["distance"]["value"]
results.append(
{
"urn_id": profile_id,
"distance": distance,
"public_id": search_profile["miniProfile"]["publicIdentifier"],
}
)
return results
def search_companies(self, max_results=None, results=[]):
"""
Do a company search
Note: try swap from blended search to cluster
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
#Search params from main search, here for reference
'''
default_params = {
"count": count,
"guides": "List()",
"origin": "GLOBAL_SEARCH_HEADER",
"q": "guided",
"start": len(results),
}
'''
default_params = {
"origin": "GLOBAL_SEARCH_HEADER",
"guides": "List(resultType->companies)",
"count": "10",
"q": "guided",
"filters": "List(resultType->companies)",
"start": len(results)
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/search/blended?keywords=s&origin=GLOBAL_SEARCH_HEADER&count=10&guides=List(resultType-%3Ecompanies)&q=all&filters=List(resultType-%3Ecompanies)&start={len(results)}"
)
data = res.json()
total_found = data.get("paging", {}).get("total")
if (
len(data["elements"]) == 0 or
len(data["elements"][0]["elements"]) == 0
or total_found is None
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"][0]["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.search_companies(max_results=max_results, results=results)
def get_profile_contact_info(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{public_id or urn_id}/profileContactInfo"
)
data = res.json()
contact_info = {
"email_address": data.get("emailAddress"),
"websites": [],
"phone_numbers": data.get("phoneNumbers", []),
}
websites = data.get("websites", [])
for item in websites:
if "com.linkedin.voyager.identity.profile.StandardWebsite" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.StandardWebsite"
]["category"]
elif "" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.CustomWebsite"
]["label"]
del item["type"]
contact_info["websites"] = websites
return contact_info
def get_profile(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{public_id or urn_id}/profileView"
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
# massage [profile] data
profile = data["profile"]
if "miniProfile" in profile:
if "picture" in profile["miniProfile"]:
profile["displayPictureUrl"] = profile["miniProfile"]["picture"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
profile["profile_id"] = get_id_from_urn(profile["miniProfile"]["entityUrn"])
del profile["miniProfile"]
del profile["defaultLocale"]
del profile["supportedLocales"]
del profile["versionTag"]
del profile["showEducationOnProfileTopCard"]
# massage [experience] data
experience = data["positionView"]["elements"]
for item in experience:
if "company" in item and "miniCompany" in item["company"]:
if "logo" in item["company"]["miniCompany"]:
logo = item["company"]["miniCompany"]["logo"].get(
"com.linkedin.common.VectorImage"
)
if logo:
item["companyLogoUrl"] = logo["rootUrl"]
del item["company"]["miniCompany"]
profile["experience"] = experience
# massage [skills] data
skills = [item["name"] for item in data["skillView"]["elements"]]
profile["skills"] = skills
# massage [education] data
education = data["educationView"]["elements"]
for item in education:
if "school" in item:
if "logo" in item["school"]:
item["school"]["logoUrl"] = item["school"]["logo"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
del item["school"]["logo"]
profile["education"] = education
return profile
def get_profile_connections(self, urn_id):
"""
Return a list of profile ids connected to profile of given [urn_id]
"""
return self.search_people(connection_of=urn_id, network_depth="F")
def get_profile_networkinfo(self, urn_id):
"""
Return the nework info connected to the profile of the given [urn_id]
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{urn_id}/networkinfo"
)
return res.json()
def get_company_updates(self, public_id=None, urn_id=None, max_results=None, results=[]):
""""
Return a list of company posts
[public_id] - public identifier ie - microsoft
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"companyUniversalName": {public_id or urn_id},
"q": "companyFeedByUniversalName",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/feed/updates", params=params
)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_company_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
def get_profile_updates(self, public_id=None, urn_id=None, max_results=None, results=[]):
""""
Return a list of profile posts
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"profileId": {public_id or urn_id},
"q": "memberShareFeed",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/feed/updates", params=params
)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_profile_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
def get_current_profile_views(self):
"""
Get profile view statistics, including chart data.
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/panels"
)
data = res.json()
return data['elements'][0]['value']['com.linkedin.voyager.identity.me.ProfileViewsByTimePanel']
def get_school(self, public_id):
"""
Return data for a single school.
[public_id] - public identifier i.e. uq
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"decoration": (
"""
(
autoGenerated,backgroundCoverImage,
companyEmployeesSearchPageUrl,companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,
entityUrn,followingInfo,foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,
paidCompany,partnerCompanyUrl,partnerLogo,partnerLogoImage,rankForTopCompanies,salesNavigatorCompanyUrl,
school,showcase,staffCount,staffCountRange,staffingCompany,topCompaniesListName,universalName,url,
companyIndustries*,industries,specialities,
acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),
showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)
)
"""
),
"q": "universalName",
"universalName": public_id,
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies", params=params
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
school = data["elements"][0]
return school
def get_similar_companies(self, public_id):
"""
Return similar companies for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies?count={Linkedin._MAX_SEARCH_COUNT}&companyUniversalName={public_id}&q=similarCompanies&start=0&decorationId=com.linkedin.voyager.deco.organization.web.WebSimilarCompanyCardWithRelevanceReason-3"
)
data = res.json()
return data
def get_company(self, public_id):
"""
Return data for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"decoration": (
"""
(
affiliatedCompaniesWithEmployeesRollup,affiliatedCompaniesWithJobsRollup,articlePermalinkForTopCompanies,
autoGenerated,backgroundCoverImage,companyEmployeesSearchPageUrl,
companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,entityUrn,followingInfo,
foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,paidCompany,
partnerCompanyUrl,partnerLogo,partnerLogoImage,permissions,rankForTopCompanies,
salesNavigatorCompanyUrl,school,showcase,staffCount,staffCountRange,staffingCompany,
topCompaniesListName,universalName,url,companyIndustries*,industries,specialities,
acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),
showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)
)
"""
),
"q": "universalName",
"universalName": public_id,
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies", params=params
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
company = data["elements"][0]
return company
def get_conversation_details(self, profile_urn_id):
"""
Return the conversation (or "message thread") details for a given [public_profile_id]
"""
# passing `params` doesn't work properly, think it's to do with List().
# Might be a bug in `requests`?
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations?\
keyVersion=LEGACY_INBOX&q=participants&recipients=List({profile_urn_id})"
)
data = res.json()
item = data["elements"][0]
item["id"] = get_id_from_urn(item["entityUrn"])
return item
def get_conversations(self):
"""
Return list of conversations the user is in.
"""
params = {"keyVersion": "LEGACY_INBOX"}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations", params=params
)
return res.json()
def get_conversation(self, conversation_urn_id):
"""
Return the full conversation at a given [conversation_urn_id]
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events"
)
return res.json()
def send_message(self, conversation_urn_id, message_body):
"""
Return the full conversation at a given [conversation_urn_id]
"""
params = {"action": "create"}
payload = json.dumps(
{
"eventCreate": {
"value": {
"com.linkedin.voyager.messaging.create.MessageCreate": {
"body": message_body,
"attachments": [],
"attributedBody": {"text": message_body, "attributes": []},
"mediaAttachments": [],
}
}
}
}
)
res = self.client.session.post(
f"{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events",
params=params,
data=payload,
)
return res.status_code == 201
|
# Copyright 2020 University of New South Wales, University of Sydney
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import pathlib
import pydicom
import numpy as np
import SimpleITK as sitk
from skimage.draw import polygon
from loguru import logger
from datetime import datetime
def flatten(itr):
if type(itr) in (str, bytes, sitk.Image):
yield itr
else:
for x in itr:
try:
yield from flatten(x)
except TypeError:
yield x
def get_suv_bw_scale_factor(ds):
# Modified from
# https://qibawiki.rsna.org/images/6/62/SUV_vendorneutral_pseudocode_happypathonly_20180626_DAC.pdf
if ds.Units == "CNTS":
# Try to find the Philips private scale factor")
return float(ds[0x7053, 0x1000].value)
assert ds.Modality == "PT"
assert "DECY" in ds.CorrectedImage
assert "ATTN" in ds.CorrectedImage
assert "START" in ds.DecayCorrection
assert ds.Units == "BQML"
half_life = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
if "SeriesTime" in ds:
series_date_time = ds.SeriesDate + "_" + ds.SeriesTime
if "." in series_date_time:
series_date_time = series_date_time[
: -(len(series_date_time) - series_date_time.index("."))
]
series_date_time = datetime.strptime(series_date_time, "%Y%m%d_%H%M%S")
if "SeriesTime" in ds:
start_time = (
ds.SeriesDate
+ "_"
+ ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime
)
if "." in start_time:
start_time = start_time[: -(len(start_time) - start_time.index("."))]
start_time = datetime.strptime(start_time, "%Y%m%d_%H%M%S")
decay_time = (series_date_time - start_time).seconds
injected_dose = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
decayed_dose = injected_dose * pow(2, -decay_time / half_life)
patient_weight = float(ds.PatientWeight)
suv_bw_scale_factor = patient_weight * 1000 / decayed_dose
return suv_bw_scale_factor
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name="UNKNOWN"):
"""
Attempts to return some information from a DICOM
This is typically used for naming converted NIFTI files
Args:
dicom_object (pydicom.dataset.FileDataset): The DICOM object
return_extra (bool, optional): return information that is usually not required
Returns:
info (str): Some extracted information
"""
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f"Could not find DICOM SOP Class UID, using {sop_class_name}.")
dicom_sop_class_name = sop_class_name
if "Image" in dicom_sop_class_name:
# Get the modality
image_modality = dicom_object.Modality
logger.info(f" Image modality: {image_modality}")
if image_modality == "CT":
# There is typically not much extra information
# At the moment, we do not return anything for CT imaging
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if protocol_name != "":
return re.sub(r"[^\w]", "_", protocol_name).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
return ""
elif image_modality == "MR":
# Not much consistency, but we can get the protocol name
try:
protocol_name = re.sub(r"[^\w]", "_", dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
protocol_name = ""
try:
sequence_name = re.sub(r"[^\w]", "_", dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
sequence_name = ""
try:
series_description = re.sub(r"[^\w]", "_", dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
series_description = ""
combined_name = "_".join([protocol_name, sequence_name, series_description])
while "__" in combined_name:
combined_name = combined_name.replace("__", "_")
if protocol_name != "" and not return_extra:
return protocol_name
else:
return combined_name
elif image_modality == "PT":
# Not much experience with this
# We can search through the corrections applied
# Return whether or not attentuation is applied
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = "NONE"
if "ATTN" in corrections:
return "AC"
else:
return "NAC"
def safe_sort_dicom_image_list(dicom_image_list):
"""
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
"""
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = lambda dcm_file: sorted_dict[dcm_file]
return sorted(dicom_image_list, key=sorter_safe)
def fix_missing_data(contour_data_list):
"""
Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
"""
contour_data = np.array(contour_data_list)
if contour_data.any() == "":
logger.warning(" Missing values detected.")
missing_values = np.where(contour_data == "")[0]
if missing_values.shape[0] > 1:
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(" Only one value missing.")
missing_index = missing_values[0]
missing_axis = missing_index % 3
if missing_axis == 0:
logger.warning(" Missing value in x axis: interpolating.")
if missing_index > len(contour_data) - 3:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[0]
elif missing_index == 0:
lower_val = contour_data[-3]
upper_val = contour_data[3]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
elif missing_axis == 1:
logger.warning(" Missing value in y axis: interpolating.")
if missing_index > len(contour_data) - 2:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[1]
elif missing_index == 0:
lower_val = contour_data[-2]
upper_val = contour_data[4]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
else:
logger.warning(" Missing value in z axis: taking slice value")
temp = contour_data[2::3].tolist()
temp.remove("")
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
"""
This function is used to generate a binary mask from a set of vertices.
This allows us to convert from DICOM-RTStruct format to any imaging format.
Args:
image ([SimpleITK.Image]): The image, used to copy imaging information
(e.g. resolution, spacing)
dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file
spacing_override (bool | tuple, optional): Overwrite the spacing.
Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.
Returns:
list, list : final_struct_name_sequence, structure_list
"""
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple(
[
current_spacing[k] if spacing_override[k] == 0 else spacing_override[k]
for k in range(3)
]
)
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = [
"_".join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence
]
structure_list = []
final_struct_name_sequence = []
for structIndex, structure_name in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::-1], dtype=np.uint8)
logger.info(
" Converting structure {0} with name: {1}".format(structIndex, structure_name)
)
if structIndex >= len(struct_point_sequence):
logger.warning(" Contour sequence is missing, skipping.")
continue
if not hasattr(struct_point_sequence[structIndex], "ContourSequence"):
logger.warning(" No contour sequence found for this structure, skipping.")
continue
if len(struct_point_sequence[structIndex].ContourSequence) == 0:
logger.warning(" Contour sequence is empty, skipping.")
continue
if (
not struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType
== "CLOSED_PLANAR"
):
logger.warning(" This is not a closed planar structure, skipping.")
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(
struct_point_sequence[structIndex].ContourSequence[sl].ContourData
)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape(
struct_slice_contour_data.shape[0] // 3, 3
)
point_arr = np.array(
[image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]
).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any(point_arr[2] != zIndex):
logger.error(" Axial slice index varies in contour. Quitting now.")
logger.error(" Structure: {0}".format(structure_name))
logger.error(" Slice index: {0}".format(zIndex))
quit()
if zIndex >= image.GetSize()[2]:
logger.warning(" Slice index greater than image size. Skipping slice.")
logger.warning(" Structure: {0}".format(structure_name))
logger.warning(" Slice index: {0}".format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
filledIndicesX, filledIndicesY = polygon(
xVertexArr_image, yVertexArr_image, shape=sliceArr.shape
)
sliceArr[filledIndicesX, filledIndicesY] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub(r"[^\w]", "_", structure_name).upper()
while "__" in structure_name_clean:
structure_name_clean = structure_name_clean.replace("__", "_")
final_struct_name_sequence.append(structure_name_clean)
return final_struct_name_sequence, structure_list
def process_dicom_file_list(dicom_file_list, parent_sorting_field="PatientName", verbose=False):
"""
Organise the DICOM files by the series UID
"""
dicom_series_dict_parent = {}
for i, dicom_file in enumerate(sorted(dicom_file_list)):
if verbose is True:
logger.debug(f" Sorting file {i}")
dicom_file = dicom_file.as_posix()
if "dicomdir" in dicom_file.lower():
logger.warning(
"DICOMDIR is not supported in this tool, images are read directly. Skipping."
)
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if parent_sorting_field_data not in dicom_series_dict_parent.keys():
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
def process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field="PatientName",
return_extra=True,
individual_file=False,
initial_sop_class_name_default="UNKNOWN",
):
if not individual_file:
logger.info(f" Processing series UID: {series_uid}")
dicom_file_list = dicom_series_dict[series_uid]
else:
logger.info(f" Processing individual file: {individual_file}")
dicom_file_list = [individual_file]
logger.info(f" Number of DICOM files: {len(dicom_file_list)}")
initial_dicom = pydicom.read_file(dicom_file_list[0])
# Get the data in the parent sorting field, clean with RegEx
parent_sorting_data = re.sub(
r"[^\w]", "_", str(initial_dicom[parent_sorting_field].value)
).upper()
if parent_sorting_data == "":
logger.error(
f"Could not find any data in {parent_sorting_field}. This is very bad, the data cannot be sorted properly."
)
"""
! TO DO
Implement a routine to let a user correlate a root directory with a name
"""
parent_sorting_data = "TEMP"
try:
initial_dicom_sop_class_name = initial_dicom.SOPClassUID.name
except AttributeError:
logger.warning(
f"Could not find DICOM SOP Class UID, using {initial_sop_class_name_default}."
)
initial_dicom_sop_class_name = initial_sop_class_name_default
try:
study_uid = initial_dicom.StudyInstanceUID
except AttributeError:
study_uid = "00001"
"""
! TO DO
Need to check for secondary capture image storage
This can include JPEGs with written information on them
This is typically not very useful
We can dump it to file
Or just save the DICOM file in the folder of interest
Not a big problem, sort out another day
"""
# Check the potential types of DICOM files
if (
"Image" in initial_dicom_sop_class_name
and initial_dicom_sop_class_name != "Secondary Capture Image Storage"
):
# Load as an primary image
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list)
try:
image = sitk.ReadImage(sorted_file_list)
except RuntimeError:
logger.warning(" Could not read image into SimpleITK.")
logger.info(" Processing files individually.")
for dicom_file in dicom_file_list:
return process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
individual_file=dicom_file,
initial_sop_class_name_default=initial_sop_class_name_default,
)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
"""
! TO DO - integrity check
Read in all the files here, check the slice location and determine if any are missing
"""
if initial_dicom.Modality == "PT":
# scaling_factor = get_suv_bw_scale_factor(initial_dicom)
# image *= scaling_factor
# !TO DO
# Work on PET SUV conversion
None
"""
! CHECKPOINT
Some DCE MRI sequences have the same series UID
Here we check the sequence name, and split if necessary
"""
if initial_dicom.Modality == "MR":
try:
sequence_names = np.unique(
[pydicom.read_file(x).SequenceName for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SequenceName
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
try:
logger.warning(
" MRI sequence name not found. The SeriesDescription will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).SeriesDescription for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SeriesDescription
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
logger.warning(
" MRI SeriesDescription not found. The AcquisitionComments will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).AcquisitionComments for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.AcquisitionComments
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
if initial_dicom.Manufacturer == "GE MEDICAL SYSTEMS":
# GE use the DICOM tag (0019, 10a2) [Raw data run number]
# in Diffusion weighted MRI sequences
# We need to separate this out to get the difference sequences
if initial_dicom.SeriesDescription == "Diffusion Weighted":
# num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) )
# number_of_images / images_per_seq
num_images_per_seq = initial_dicom[(0x0021, 0x104F)].value
sequence_names = np.unique(
[
f"DWI_{str( ( pydicom.read_file(x)["InstanceNumber"].value - 1) // num_images_per_seq )}"
for x in dicom_file_list
]
)
sequence_name_index_dict = {
name: index for index, name in enumerate(sequence_names)
}
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = f"DWI_{str( ( dcm_obj["InstanceNumber"].value - 1) // num_images_per_seq )}"
var_to_index = sequence_name_index_dict[var]
if var_to_index not in sequence_dict.keys():
sequence_dict[var_to_index] = [dcm_name]
else:
sequence_dict[var_to_index].append(dcm_name)
sequence_names = sorted(sequence_dict.keys())
if np.alen(sequence_names) > 1:
logger.warning(" Two MR sequences were found under a single series UID.")
logger.warning(" These will be split into separate images.")
# Split up the DICOM file list by sequence name
for sequence_name in sequence_names:
dicom_file_list_by_sequence = sequence_dict[sequence_name]
logger.info(sequence_name)
logger.info(len(dicom_file_list_by_sequence))
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list_by_sequence)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
image_by_sequence = sitk.ReadImage(sorted_file_list)
dicom_file_metadata_by_sequence = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
yield "IMAGES", dicom_file_metadata_by_sequence, initial_dicom, image_by_sequence
return # Stop iteration
yield "IMAGES", dicom_file_metadata, initial_dicom, image
if "Structure" in initial_dicom_sop_class_name:
# Load as an RT structure set
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
# We must also read in the corresponding DICOM image
# This can be found by matching the references series UID to the series UID
"""
! TO DO
What happens if there is an RT structure set with different referenced sequences?
"""
# Get the "ReferencedFrameOfReferenceSequence", first item
referenced_frame_of_reference_item = dicom_object.ReferencedFrameOfReferenceSequence[0]
# Get the "RTReferencedStudySequence", first item
# This retrieves the study UID
# This might be useful, but would typically match the actual StudyInstanceUID in the
# DICOM object
rt_referenced_series_item = (
referenced_frame_of_reference_item.RTReferencedStudySequence[0]
)
# Get the "RTReferencedSeriesSequence", first item
# This retreives the actual referenced series UID, which we need to match imaging
# parameters
rt_referenced_series_again_item = rt_referenced_series_item.RTReferencedSeriesSequence[
0
]
# Get the appropriate series instance UID
image_series_uid = rt_referenced_series_again_item.SeriesInstanceUID
logger.info(f" Item {index}: Matched SeriesInstanceUID = {image_series_uid}")
# Read in the corresponding image
sorted_file_list = safe_sort_dicom_image_list(dicom_series_dict[image_series_uid])
image = sitk.ReadImage(sorted_file_list)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
(
structure_name_list,
structure_image_list,
) = transform_point_set_from_dicom_struct(image, dicom_object)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
"structure_name_list": structure_name_list,
}
yield "STRUCTURES", dicom_file_metadata, dicom_object, structure_image_list
if "Dose" in initial_dicom_sop_class_name:
# Load as an RT Dose distribution
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
"""
! CHECKPOINT
There should only be a single RT dose file (with each series UID)
If there are more, yield each
"""
initial_dicom = pydicom.read_file(dicom_file, force=True)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
# We must read in as a float otherwise when we multiply by one later it will not work!
raw_dose_image = sitk.ReadImage(dicom_file, sitk.sitkFloat32)
dose_grid_scaling = dicom_object.DoseGridScaling
logger.debug(f" Dose grid scaling: {dose_grid_scaling} Gy")
scaled_dose_image = raw_dose_image * dose_grid_scaling
yield "DOSES", dicom_file_metadata, dicom_object, scaled_dose_image
"""
! TO DO
1. (DONE) Implement conversion of dose files (to NIFTI images)
2. Implement conversion of RT plan files to text dump
3. Do something with other files (e.g. Deformable Image Registration stuff)
"""
return
def write_output_data_to_disk(
output_data_dict,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
):
"""
Write output to disk
"""
if output_data_dict is None:
return
filename_fields = [i for i in output_data_dict.keys() if i != "parent_sorting_data"]
parent_sorting_data = output_data_dict["parent_sorting_data"]
files_written = {}
"""
Write the the converted images to disk
! CONSIDER
We could simply write as we go?
Pro: save memory, important if processing very large files
Con: Reading as we go allows proper indexing
"""
for field in filename_fields:
logger.info(f" Writing files for field: {field}")
p = pathlib.Path(output_directory) / parent_sorting_data / field
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for field_filename_base, field_list in output_data_dict[field].items():
# Check if there is a list of images with matching names
# This will depend on the name format chosen
# If there is a list, we append an index as we write to disk
if isinstance(field_list, (tuple, list)):
# Flatten
field_list_flat = list(flatten(field_list))
# Iterate
for suffix, file_to_write in enumerate(field_list_flat):
field_filename = field_filename_base + f"_{suffix}"
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
"""
! TO DO
Use pathlib, and perform some checks so we don"t overwrite anything!
"""
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
def process_dicom_directory(
dicom_directory,
parent_sorting_field="PatientName",
output_image_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}",
output_structure_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}",
output_dose_name_format="{parent_sorting_data}_{study_uid_index}_{DoseSummationType}",
return_extra=True,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
write_to_disk=True,
verbose=False,
initial_sop_class_name_default="UNKNOWN",
):
# Check dicom_directory type
if isinstance(dicom_directory, str) or isinstance(dicom_directory, pathlib.Path):
# Get all the DICOM files in the given directory
root_path = pathlib.Path(dicom_directory)
# Find files ending with .dcm, .dc3
dicom_file_list = [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
elif hasattr(dicom_directory, "__iter__"):
dicom_file_list = []
for dicom_dir in dicom_directory:
# Get all the DICOM files in each directory
root_path = pathlib.Path(dicom_dir)
# Find files ending with .dcm, .dc3
dicom_file_list += [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
if len(dicom_file_list) == 0:
logger.info("No DICOM files found in input directory. Exiting now.")
return
# Process the DICOM files
# This returns a dictionary (of dictionaries):
# {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# parent_data_2 : {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# ... }
dicom_series_dict_parent = process_dicom_file_list(
dicom_file_list, parent_sorting_field=parent_sorting_field, verbose=verbose
)
if dicom_series_dict_parent is None:
logger.info("No valid DICOM files found. Ending.")
return None
output = {}
for parent_data, dicom_series_dict in dicom_series_dict_parent.items():
logger.info(f"Processing data for {parent_sorting_field} = {parent_data}.")
logger.info(f" Number of DICOM series = {len(dicom_series_dict.keys())}")
# Set up the output data
# This stores the SimpleITK images and file names
output_data_dict = {}
# Set up the study UID dict
# This helps match structure sets to relevant images
# And paired images to each other (e.g. PET/CT)
study_uid_dict = {}
# Give some user feedback
logger.debug(f" Output image name format: {output_image_name_format}")
logger.debug(f" Output structure name format: {output_structure_name_format}")
logger.debug(f" Output dose name format: {output_dose_name_format}")
# For each unique series UID, process the DICOM files
for series_uid in dicom_series_dict.keys():
# This function returns four values
# 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc
# 2. dicom_file_metadata: Some special metadata extracted from the DICOM header
# 3. initial_dicom: The first DICOM in the series. For doses and structures there is
# (usually) only one DICOM anyway
# 4. dicom_file_data: The actual SimpleITK image data
for (
dicom_type,
dicom_file_metadata,
initial_dicom,
dicom_file_data,
) in process_dicom_series(
dicom_series_dict=dicom_series_dict,
series_uid=series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
initial_sop_class_name_default=initial_sop_class_name_default,
):
# Step 1
# Check the parent sorting field is consistent
# This would usually be the PatientName, PatientID, or similar
# Occasionally these will both be blank
parent_sorting_data = dicom_file_metadata["parent_sorting_data"]
if "parent_sorting_data" not in output_data_dict.keys():
output_data_dict["parent_sorting_data"] = parent_sorting_data
else:
if parent_sorting_data != output_data_dict["parent_sorting_data"]:
logger.error(
f"A conflict was found for the parent sorting field "
f"({parent_sorting_field}): {parent_sorting_data}"
)
logger.error("Quitting now.")
print(dicom_series_dict_parent.keys())
sys.exit()
else:
logger.info(
f" Parent sorting field ({parent_sorting_field}) match found: "
f"{parent_sorting_data}"
)
# Step 2
# Get the study UID
# Used for indexing DICOM series
study_uid = dicom_file_metadata["study_uid"]
if study_uid not in study_uid_dict.keys():
try:
study_uid_index = max(study_uid_dict.values()) + 1
except AttributeError:
study_uid_index = 0 # Study UID dict might not exist
except ValueError:
study_uid_index = 0 # Study UID dict might be empty
logger.info(f" Setting study instance UID index: {study_uid_index}")
study_uid_dict[study_uid] = study_uid_index
else:
logger.info(
f" Study instance UID index already exists: {study_uid_dict[study_uid]}"
)
# Step 3
# Generate names for output files
# Special names
# ! This can be defined once at the start of the function
special_name_fields = [
"parent_sorting_data",
"study_uid_index",
"image_desc",
"structure_name",
]
# Get the image description (other special names are already defined above)
image_desc = get_dicom_info_from_description(
initial_dicom, return_extra=return_extra
)
# Get all the fields from the user-given name format
if dicom_type == "IMAGES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_image_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "STRUCTURES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_structure_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "DOSES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_dose_name_format.split("}")
if len(i) > 0
]
# Now exclude those that aren't derived from the DICOM header
dicom_header_tags = [i for i in all_naming_fields if i not in special_name_fields]
naming_info_dict = {}
for dicom_field in dicom_header_tags:
try:
dicom_field_value = initial_dicom[dicom_field].value
except (AttributeError, KeyError):
logger.warning(
f" Could not find DICOM header {dicom_field}. Setting as 0 to "
f"preserve naming convention."
)
dicom_field_value = 0
naming_info_dict[dicom_field] = dicom_field_value
if dicom_type == "IMAGES":
output_name = output_image_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
**naming_info_dict,
)
if "IMAGES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["IMAGES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["IMAGES"].keys():
output_data_dict["IMAGES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if hasattr(output_data_dict["IMAGES"][output_name], "__iter__"):
output_data_dict["IMAGES"][output_name] = list(
[output_data_dict["IMAGES"][output_name]]
)
output_data_dict["IMAGES"][output_name].append(dicom_file_data)
elif dicom_type == "STRUCTURES":
for structure_name, structure_image in zip(
dicom_file_metadata["structure_name_list"], dicom_file_data
):
output_name = output_structure_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
structure_name=structure_name,
**naming_info_dict,
)
if "STRUCTURES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["STRUCTURES"] = {output_name: structure_image}
else:
# First check if there is another structure of the same name
if output_name not in output_data_dict["STRUCTURES"].keys():
output_data_dict["STRUCTURES"][output_name] = structure_image
else:
logger.info(" A structure with this name exists, appending.")
if hasattr(
output_data_dict["STRUCTURES"][output_name], "__iter__"
):
output_data_dict["STRUCTURES"][output_name] = list(
[output_data_dict["STRUCTURES"][output_name]]
)
output_data_dict["STRUCTURES"][output_name].append(structure_image)
elif dicom_type == "DOSES":
output_name = output_dose_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
**naming_info_dict,
)
if "DOSES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["DOSES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["DOSES"].keys():
output_data_dict["DOSES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if isinstance(output_data_dict["DOSES"][output_name], sitk.Image):
output_data_dict["DOSES"][output_name] = list(
[output_data_dict["DOSES"][output_name]]
)
output_data_dict["DOSES"][output_name].append(dicom_file_data)
if write_to_disk:
output[str(parent_data)] = write_output_data_to_disk(
output_data_dict=output_data_dict,
output_directory=output_directory,
output_file_suffix=output_file_suffix,
overwrite_existing_files=overwrite_existing_files,
)
else:
output[str(parent_data)] = output_data_dict
"""
TO DO!
Memory issue with output_data_dict
Use in inner loop, reset output_data_dict
"""
return output
| # Copyright 2020 University of New South Wales, University of Sydney
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import pathlib
import pydicom
import numpy as np
import SimpleITK as sitk
from skimage.draw import polygon
from loguru import logger
from datetime import datetime
def flatten(itr):
if type(itr) in (str, bytes, sitk.Image):
yield itr
else:
for x in itr:
try:
yield from flatten(x)
except TypeError:
yield x
def get_suv_bw_scale_factor(ds):
# Modified from
# https://qibawiki.rsna.org/images/6/62/SUV_vendorneutral_pseudocode_happypathonly_20180626_DAC.pdf
if ds.Units == "CNTS":
# Try to find the Philips private scale factor")
return float(ds[0x7053, 0x1000].value)
assert ds.Modality == "PT"
assert "DECY" in ds.CorrectedImage
assert "ATTN" in ds.CorrectedImage
assert "START" in ds.DecayCorrection
assert ds.Units == "BQML"
half_life = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
if "SeriesTime" in ds:
series_date_time = ds.SeriesDate + "_" + ds.SeriesTime
if "." in series_date_time:
series_date_time = series_date_time[
: -(len(series_date_time) - series_date_time.index("."))
]
series_date_time = datetime.strptime(series_date_time, "%Y%m%d_%H%M%S")
if "SeriesTime" in ds:
start_time = (
ds.SeriesDate
+ "_"
+ ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime
)
if "." in start_time:
start_time = start_time[: -(len(start_time) - start_time.index("."))]
start_time = datetime.strptime(start_time, "%Y%m%d_%H%M%S")
decay_time = (series_date_time - start_time).seconds
injected_dose = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
decayed_dose = injected_dose * pow(2, -decay_time / half_life)
patient_weight = float(ds.PatientWeight)
suv_bw_scale_factor = patient_weight * 1000 / decayed_dose
return suv_bw_scale_factor
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name="UNKNOWN"):
"""
Attempts to return some information from a DICOM
This is typically used for naming converted NIFTI files
Args:
dicom_object (pydicom.dataset.FileDataset): The DICOM object
return_extra (bool, optional): return information that is usually not required
Returns:
info (str): Some extracted information
"""
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f"Could not find DICOM SOP Class UID, using {sop_class_name}.")
dicom_sop_class_name = sop_class_name
if "Image" in dicom_sop_class_name:
# Get the modality
image_modality = dicom_object.Modality
logger.info(f" Image modality: {image_modality}")
if image_modality == "CT":
# There is typically not much extra information
# At the moment, we do not return anything for CT imaging
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if protocol_name != "":
return re.sub(r"[^\w]", "_", protocol_name).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
return ""
elif image_modality == "MR":
# Not much consistency, but we can get the protocol name
try:
protocol_name = re.sub(r"[^\w]", "_", dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
protocol_name = ""
try:
sequence_name = re.sub(r"[^\w]", "_", dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
sequence_name = ""
try:
series_description = re.sub(r"[^\w]", "_", dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
series_description = ""
combined_name = "_".join([protocol_name, sequence_name, series_description])
while "__" in combined_name:
combined_name = combined_name.replace("__", "_")
if protocol_name != "" and not return_extra:
return protocol_name
else:
return combined_name
elif image_modality == "PT":
# Not much experience with this
# We can search through the corrections applied
# Return whether or not attentuation is applied
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = "NONE"
if "ATTN" in corrections:
return "AC"
else:
return "NAC"
def safe_sort_dicom_image_list(dicom_image_list):
"""
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
"""
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = lambda dcm_file: sorted_dict[dcm_file]
return sorted(dicom_image_list, key=sorter_safe)
def fix_missing_data(contour_data_list):
"""
Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
"""
contour_data = np.array(contour_data_list)
if contour_data.any() == "":
logger.warning(" Missing values detected.")
missing_values = np.where(contour_data == "")[0]
if missing_values.shape[0] > 1:
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(" Only one value missing.")
missing_index = missing_values[0]
missing_axis = missing_index % 3
if missing_axis == 0:
logger.warning(" Missing value in x axis: interpolating.")
if missing_index > len(contour_data) - 3:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[0]
elif missing_index == 0:
lower_val = contour_data[-3]
upper_val = contour_data[3]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
elif missing_axis == 1:
logger.warning(" Missing value in y axis: interpolating.")
if missing_index > len(contour_data) - 2:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[1]
elif missing_index == 0:
lower_val = contour_data[-2]
upper_val = contour_data[4]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
else:
logger.warning(" Missing value in z axis: taking slice value")
temp = contour_data[2::3].tolist()
temp.remove("")
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
"""
This function is used to generate a binary mask from a set of vertices.
This allows us to convert from DICOM-RTStruct format to any imaging format.
Args:
image ([SimpleITK.Image]): The image, used to copy imaging information
(e.g. resolution, spacing)
dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file
spacing_override (bool | tuple, optional): Overwrite the spacing.
Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.
Returns:
list, list : final_struct_name_sequence, structure_list
"""
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple(
[
current_spacing[k] if spacing_override[k] == 0 else spacing_override[k]
for k in range(3)
]
)
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = [
"_".join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence
]
structure_list = []
final_struct_name_sequence = []
for structIndex, structure_name in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::-1], dtype=np.uint8)
logger.info(
" Converting structure {0} with name: {1}".format(structIndex, structure_name)
)
if structIndex >= len(struct_point_sequence):
logger.warning(" Contour sequence is missing, skipping.")
continue
if not hasattr(struct_point_sequence[structIndex], "ContourSequence"):
logger.warning(" No contour sequence found for this structure, skipping.")
continue
if len(struct_point_sequence[structIndex].ContourSequence) == 0:
logger.warning(" Contour sequence is empty, skipping.")
continue
if (
not struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType
== "CLOSED_PLANAR"
):
logger.warning(" This is not a closed planar structure, skipping.")
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(
struct_point_sequence[structIndex].ContourSequence[sl].ContourData
)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape(
struct_slice_contour_data.shape[0] // 3, 3
)
point_arr = np.array(
[image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]
).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any(point_arr[2] != zIndex):
logger.error(" Axial slice index varies in contour. Quitting now.")
logger.error(" Structure: {0}".format(structure_name))
logger.error(" Slice index: {0}".format(zIndex))
quit()
if zIndex >= image.GetSize()[2]:
logger.warning(" Slice index greater than image size. Skipping slice.")
logger.warning(" Structure: {0}".format(structure_name))
logger.warning(" Slice index: {0}".format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
filledIndicesX, filledIndicesY = polygon(
xVertexArr_image, yVertexArr_image, shape=sliceArr.shape
)
sliceArr[filledIndicesX, filledIndicesY] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub(r"[^\w]", "_", structure_name).upper()
while "__" in structure_name_clean:
structure_name_clean = structure_name_clean.replace("__", "_")
final_struct_name_sequence.append(structure_name_clean)
return final_struct_name_sequence, structure_list
def process_dicom_file_list(dicom_file_list, parent_sorting_field="PatientName", verbose=False):
"""
Organise the DICOM files by the series UID
"""
dicom_series_dict_parent = {}
for i, dicom_file in enumerate(sorted(dicom_file_list)):
if verbose is True:
logger.debug(f" Sorting file {i}")
dicom_file = dicom_file.as_posix()
if "dicomdir" in dicom_file.lower():
logger.warning(
"DICOMDIR is not supported in this tool, images are read directly. Skipping."
)
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if parent_sorting_field_data not in dicom_series_dict_parent.keys():
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
def process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field="PatientName",
return_extra=True,
individual_file=False,
initial_sop_class_name_default="UNKNOWN",
):
if not individual_file:
logger.info(f" Processing series UID: {series_uid}")
dicom_file_list = dicom_series_dict[series_uid]
else:
logger.info(f" Processing individual file: {individual_file}")
dicom_file_list = [individual_file]
logger.info(f" Number of DICOM files: {len(dicom_file_list)}")
initial_dicom = pydicom.read_file(dicom_file_list[0])
# Get the data in the parent sorting field, clean with RegEx
parent_sorting_data = re.sub(
r"[^\w]", "_", str(initial_dicom[parent_sorting_field].value)
).upper()
if parent_sorting_data == "":
logger.error(
f"Could not find any data in {parent_sorting_field}. This is very bad, the data cannot be sorted properly."
)
"""
! TO DO
Implement a routine to let a user correlate a root directory with a name
"""
parent_sorting_data = "TEMP"
try:
initial_dicom_sop_class_name = initial_dicom.SOPClassUID.name
except AttributeError:
logger.warning(
f"Could not find DICOM SOP Class UID, using {initial_sop_class_name_default}."
)
initial_dicom_sop_class_name = initial_sop_class_name_default
try:
study_uid = initial_dicom.StudyInstanceUID
except AttributeError:
study_uid = "00001"
"""
! TO DO
Need to check for secondary capture image storage
This can include JPEGs with written information on them
This is typically not very useful
We can dump it to file
Or just save the DICOM file in the folder of interest
Not a big problem, sort out another day
"""
# Check the potential types of DICOM files
if (
"Image" in initial_dicom_sop_class_name
and initial_dicom_sop_class_name != "Secondary Capture Image Storage"
):
# Load as an primary image
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list)
try:
image = sitk.ReadImage(sorted_file_list)
except RuntimeError:
logger.warning(" Could not read image into SimpleITK.")
logger.info(" Processing files individually.")
for dicom_file in dicom_file_list:
return process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
individual_file=dicom_file,
initial_sop_class_name_default=initial_sop_class_name_default,
)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
"""
! TO DO - integrity check
Read in all the files here, check the slice location and determine if any are missing
"""
if initial_dicom.Modality == "PT":
# scaling_factor = get_suv_bw_scale_factor(initial_dicom)
# image *= scaling_factor
# !TO DO
# Work on PET SUV conversion
None
"""
! CHECKPOINT
Some DCE MRI sequences have the same series UID
Here we check the sequence name, and split if necessary
"""
if initial_dicom.Modality == "MR":
try:
sequence_names = np.unique(
[pydicom.read_file(x).SequenceName for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SequenceName
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
try:
logger.warning(
" MRI sequence name not found. The SeriesDescription will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).SeriesDescription for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SeriesDescription
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
logger.warning(
" MRI SeriesDescription not found. The AcquisitionComments will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).AcquisitionComments for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.AcquisitionComments
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
if initial_dicom.Manufacturer == "GE MEDICAL SYSTEMS":
# GE use the DICOM tag (0019, 10a2) [Raw data run number]
# in Diffusion weighted MRI sequences
# We need to separate this out to get the difference sequences
if initial_dicom.SeriesDescription == "Diffusion Weighted":
# num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) )
# number_of_images / images_per_seq
num_images_per_seq = initial_dicom[(0x0021, 0x104F)].value
sequence_names = np.unique(
[
f"DWI_{str( ( pydicom.read_file(x)['InstanceNumber'].value - 1) // num_images_per_seq )}"
for x in dicom_file_list
]
)
sequence_name_index_dict = {
name: index for index, name in enumerate(sequence_names)
}
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = f"DWI_{str( ( dcm_obj['InstanceNumber'].value - 1) // num_images_per_seq )}"
var_to_index = sequence_name_index_dict[var]
if var_to_index not in sequence_dict.keys():
sequence_dict[var_to_index] = [dcm_name]
else:
sequence_dict[var_to_index].append(dcm_name)
sequence_names = sorted(sequence_dict.keys())
if np.alen(sequence_names) > 1:
logger.warning(" Two MR sequences were found under a single series UID.")
logger.warning(" These will be split into separate images.")
# Split up the DICOM file list by sequence name
for sequence_name in sequence_names:
dicom_file_list_by_sequence = sequence_dict[sequence_name]
logger.info(sequence_name)
logger.info(len(dicom_file_list_by_sequence))
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list_by_sequence)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
image_by_sequence = sitk.ReadImage(sorted_file_list)
dicom_file_metadata_by_sequence = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
yield "IMAGES", dicom_file_metadata_by_sequence, initial_dicom, image_by_sequence
return # Stop iteration
yield "IMAGES", dicom_file_metadata, initial_dicom, image
if "Structure" in initial_dicom_sop_class_name:
# Load as an RT structure set
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
# We must also read in the corresponding DICOM image
# This can be found by matching the references series UID to the series UID
"""
! TO DO
What happens if there is an RT structure set with different referenced sequences?
"""
# Get the "ReferencedFrameOfReferenceSequence", first item
referenced_frame_of_reference_item = dicom_object.ReferencedFrameOfReferenceSequence[0]
# Get the "RTReferencedStudySequence", first item
# This retrieves the study UID
# This might be useful, but would typically match the actual StudyInstanceUID in the
# DICOM object
rt_referenced_series_item = (
referenced_frame_of_reference_item.RTReferencedStudySequence[0]
)
# Get the "RTReferencedSeriesSequence", first item
# This retreives the actual referenced series UID, which we need to match imaging
# parameters
rt_referenced_series_again_item = rt_referenced_series_item.RTReferencedSeriesSequence[
0
]
# Get the appropriate series instance UID
image_series_uid = rt_referenced_series_again_item.SeriesInstanceUID
logger.info(f" Item {index}: Matched SeriesInstanceUID = {image_series_uid}")
# Read in the corresponding image
sorted_file_list = safe_sort_dicom_image_list(dicom_series_dict[image_series_uid])
image = sitk.ReadImage(sorted_file_list)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
(
structure_name_list,
structure_image_list,
) = transform_point_set_from_dicom_struct(image, dicom_object)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
"structure_name_list": structure_name_list,
}
yield "STRUCTURES", dicom_file_metadata, dicom_object, structure_image_list
if "Dose" in initial_dicom_sop_class_name:
# Load as an RT Dose distribution
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
"""
! CHECKPOINT
There should only be a single RT dose file (with each series UID)
If there are more, yield each
"""
initial_dicom = pydicom.read_file(dicom_file, force=True)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
# We must read in as a float otherwise when we multiply by one later it will not work!
raw_dose_image = sitk.ReadImage(dicom_file, sitk.sitkFloat32)
dose_grid_scaling = dicom_object.DoseGridScaling
logger.debug(f" Dose grid scaling: {dose_grid_scaling} Gy")
scaled_dose_image = raw_dose_image * dose_grid_scaling
yield "DOSES", dicom_file_metadata, dicom_object, scaled_dose_image
"""
! TO DO
1. (DONE) Implement conversion of dose files (to NIFTI images)
2. Implement conversion of RT plan files to text dump
3. Do something with other files (e.g. Deformable Image Registration stuff)
"""
return
def write_output_data_to_disk(
output_data_dict,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
):
"""
Write output to disk
"""
if output_data_dict is None:
return
filename_fields = [i for i in output_data_dict.keys() if i != "parent_sorting_data"]
parent_sorting_data = output_data_dict["parent_sorting_data"]
files_written = {}
"""
Write the the converted images to disk
! CONSIDER
We could simply write as we go?
Pro: save memory, important if processing very large files
Con: Reading as we go allows proper indexing
"""
for field in filename_fields:
logger.info(f" Writing files for field: {field}")
p = pathlib.Path(output_directory) / parent_sorting_data / field
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for field_filename_base, field_list in output_data_dict[field].items():
# Check if there is a list of images with matching names
# This will depend on the name format chosen
# If there is a list, we append an index as we write to disk
if isinstance(field_list, (tuple, list)):
# Flatten
field_list_flat = list(flatten(field_list))
# Iterate
for suffix, file_to_write in enumerate(field_list_flat):
field_filename = field_filename_base + f"_{suffix}"
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
"""
! TO DO
Use pathlib, and perform some checks so we don"t overwrite anything!
"""
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
def process_dicom_directory(
dicom_directory,
parent_sorting_field="PatientName",
output_image_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}",
output_structure_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}",
output_dose_name_format="{parent_sorting_data}_{study_uid_index}_{DoseSummationType}",
return_extra=True,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
write_to_disk=True,
verbose=False,
initial_sop_class_name_default="UNKNOWN",
):
# Check dicom_directory type
if isinstance(dicom_directory, str) or isinstance(dicom_directory, pathlib.Path):
# Get all the DICOM files in the given directory
root_path = pathlib.Path(dicom_directory)
# Find files ending with .dcm, .dc3
dicom_file_list = [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
elif hasattr(dicom_directory, "__iter__"):
dicom_file_list = []
for dicom_dir in dicom_directory:
# Get all the DICOM files in each directory
root_path = pathlib.Path(dicom_dir)
# Find files ending with .dcm, .dc3
dicom_file_list += [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
if len(dicom_file_list) == 0:
logger.info("No DICOM files found in input directory. Exiting now.")
return
# Process the DICOM files
# This returns a dictionary (of dictionaries):
# {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# parent_data_2 : {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# ... }
dicom_series_dict_parent = process_dicom_file_list(
dicom_file_list, parent_sorting_field=parent_sorting_field, verbose=verbose
)
if dicom_series_dict_parent is None:
logger.info("No valid DICOM files found. Ending.")
return None
output = {}
for parent_data, dicom_series_dict in dicom_series_dict_parent.items():
logger.info(f"Processing data for {parent_sorting_field} = {parent_data}.")
logger.info(f" Number of DICOM series = {len(dicom_series_dict.keys())}")
# Set up the output data
# This stores the SimpleITK images and file names
output_data_dict = {}
# Set up the study UID dict
# This helps match structure sets to relevant images
# And paired images to each other (e.g. PET/CT)
study_uid_dict = {}
# Give some user feedback
logger.debug(f" Output image name format: {output_image_name_format}")
logger.debug(f" Output structure name format: {output_structure_name_format}")
logger.debug(f" Output dose name format: {output_dose_name_format}")
# For each unique series UID, process the DICOM files
for series_uid in dicom_series_dict.keys():
# This function returns four values
# 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc
# 2. dicom_file_metadata: Some special metadata extracted from the DICOM header
# 3. initial_dicom: The first DICOM in the series. For doses and structures there is
# (usually) only one DICOM anyway
# 4. dicom_file_data: The actual SimpleITK image data
for (
dicom_type,
dicom_file_metadata,
initial_dicom,
dicom_file_data,
) in process_dicom_series(
dicom_series_dict=dicom_series_dict,
series_uid=series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
initial_sop_class_name_default=initial_sop_class_name_default,
):
# Step 1
# Check the parent sorting field is consistent
# This would usually be the PatientName, PatientID, or similar
# Occasionally these will both be blank
parent_sorting_data = dicom_file_metadata["parent_sorting_data"]
if "parent_sorting_data" not in output_data_dict.keys():
output_data_dict["parent_sorting_data"] = parent_sorting_data
else:
if parent_sorting_data != output_data_dict["parent_sorting_data"]:
logger.error(
f"A conflict was found for the parent sorting field "
f"({parent_sorting_field}): {parent_sorting_data}"
)
logger.error("Quitting now.")
print(dicom_series_dict_parent.keys())
sys.exit()
else:
logger.info(
f" Parent sorting field ({parent_sorting_field}) match found: "
f"{parent_sorting_data}"
)
# Step 2
# Get the study UID
# Used for indexing DICOM series
study_uid = dicom_file_metadata["study_uid"]
if study_uid not in study_uid_dict.keys():
try:
study_uid_index = max(study_uid_dict.values()) + 1
except AttributeError:
study_uid_index = 0 # Study UID dict might not exist
except ValueError:
study_uid_index = 0 # Study UID dict might be empty
logger.info(f" Setting study instance UID index: {study_uid_index}")
study_uid_dict[study_uid] = study_uid_index
else:
logger.info(
f" Study instance UID index already exists: {study_uid_dict[study_uid]}"
)
# Step 3
# Generate names for output files
# Special names
# ! This can be defined once at the start of the function
special_name_fields = [
"parent_sorting_data",
"study_uid_index",
"image_desc",
"structure_name",
]
# Get the image description (other special names are already defined above)
image_desc = get_dicom_info_from_description(
initial_dicom, return_extra=return_extra
)
# Get all the fields from the user-given name format
if dicom_type == "IMAGES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_image_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "STRUCTURES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_structure_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "DOSES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_dose_name_format.split("}")
if len(i) > 0
]
# Now exclude those that aren't derived from the DICOM header
dicom_header_tags = [i for i in all_naming_fields if i not in special_name_fields]
naming_info_dict = {}
for dicom_field in dicom_header_tags:
try:
dicom_field_value = initial_dicom[dicom_field].value
except (AttributeError, KeyError):
logger.warning(
f" Could not find DICOM header {dicom_field}. Setting as 0 to "
f"preserve naming convention."
)
dicom_field_value = 0
naming_info_dict[dicom_field] = dicom_field_value
if dicom_type == "IMAGES":
output_name = output_image_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
**naming_info_dict,
)
if "IMAGES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["IMAGES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["IMAGES"].keys():
output_data_dict["IMAGES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if hasattr(output_data_dict["IMAGES"][output_name], "__iter__"):
output_data_dict["IMAGES"][output_name] = list(
[output_data_dict["IMAGES"][output_name]]
)
output_data_dict["IMAGES"][output_name].append(dicom_file_data)
elif dicom_type == "STRUCTURES":
for structure_name, structure_image in zip(
dicom_file_metadata["structure_name_list"], dicom_file_data
):
output_name = output_structure_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
structure_name=structure_name,
**naming_info_dict,
)
if "STRUCTURES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["STRUCTURES"] = {output_name: structure_image}
else:
# First check if there is another structure of the same name
if output_name not in output_data_dict["STRUCTURES"].keys():
output_data_dict["STRUCTURES"][output_name] = structure_image
else:
logger.info(" A structure with this name exists, appending.")
if hasattr(
output_data_dict["STRUCTURES"][output_name], "__iter__"
):
output_data_dict["STRUCTURES"][output_name] = list(
[output_data_dict["STRUCTURES"][output_name]]
)
output_data_dict["STRUCTURES"][output_name].append(structure_image)
elif dicom_type == "DOSES":
output_name = output_dose_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
**naming_info_dict,
)
if "DOSES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["DOSES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["DOSES"].keys():
output_data_dict["DOSES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if isinstance(output_data_dict["DOSES"][output_name], sitk.Image):
output_data_dict["DOSES"][output_name] = list(
[output_data_dict["DOSES"][output_name]]
)
output_data_dict["DOSES"][output_name].append(dicom_file_data)
if write_to_disk:
output[str(parent_data)] = write_output_data_to_disk(
output_data_dict=output_data_dict,
output_directory=output_directory,
output_file_suffix=output_file_suffix,
overwrite_existing_files=overwrite_existing_files,
)
else:
output[str(parent_data)] = output_data_dict
"""
TO DO!
Memory issue with output_data_dict
Use in inner loop, reset output_data_dict
"""
return output
|
# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import SimpleITK as sitk
import numpy as np
from loguru import logger
from platipy.imaging.registration.utils import apply_transform, convert_mask_to_reg_structure
from platipy.imaging.registration.linear import (
linear_registration,
)
from platipy.imaging.registration.deformable import (
fast_symmetric_forces_demons_registration,
)
from platipy.imaging.label.fusion import (
process_probability_image,
compute_weight_map,
combine_labels,
)
from platipy.imaging.label.iar import run_iar
from platipy.imaging.utils.vessel import vessel_spline_generation
from platipy.imaging.utils.valve import (
generate_valve_from_great_vessel,
generate_valve_using_cylinder,
)
from platipy.imaging.utils.conduction import (
geometric_sinoatrialnode,
geometric_atrioventricularnode,
)
from platipy.imaging.utils.crop import label_to_roi, crop_to_roi
from platipy.imaging.generation.mask import extend_mask
from platipy.imaging.label.utils import binary_encode_structure_list, correct_volume_overlap
ATLAS_PATH = "/atlas"
if "ATLAS_PATH" in os.environ:
ATLAS_PATH = os.environ["ATLAS_PATH"]
CARDIAC_SETTINGS_DEFAULTS = {
"atlas_settings": {
"atlas_id_list": [
"03",
"05",
"08",
"10",
"11",
"12",
"13",
"16",
"24",
"35",
],
"atlas_structure_list": [
"AORTICVALVE",
"ASCENDINGAORTA",
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"LEFTATRIUM",
"LEFTVENTRICLE",
"MITRALVALVE",
"PULMONARYARTERY",
"PULMONICVALVE",
"RCORONARYARTERY",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"atlas_path": ATLAS_PATH,
"atlas_image_format": "Case_{0}/Images/Case_{0}_CROP.nii.gz",
"atlas_label_format": "Case_{0}/Structures/Case_{0}_{1}_CROP.nii.gz",
"crop_atlas_to_structures": False,
"crop_atlas_expansion_mm": (20, 20, 40),
"guide_structure_name": "WHOLEHEART",
"superior_extension": 30,
},
"auto_crop_target_image_settings": {
"expansion_mm": [20, 20, 40],
},
"linear_registration_settings": {
"reg_method": "affine",
"shrink_factors": [16, 8, 4],
"smooth_sigmas": [0, 0, 0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 50,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
"verbose": False,
},
"structure_guided_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
16,
8,
2,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [50, 50, 50],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"deformable_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
6,
3,
1.5,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [200, 150, 100],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"iar_settings": {
"reference_structure": False,
"smooth_distance_maps": True,
"smooth_sigma": 1,
"z_score_statistic": "mad",
"outlier_method": "iqr",
"outlier_factor": 1.5,
"min_best_atlases": 5,
"project_on_sphere": False,
},
"label_fusion_settings": {
"vote_type": "unweighted",
"vote_params": None,
"optimal_threshold": {
"AORTICVALVE": 0.5,
"ASCENDINGAORTA": 0.44,
"LEFTATRIUM": 0.40,
"LEFTVENTRICLE": 0.45,
"MITRALVALVE": 0.5,
"PULMONARYARTERY": 0.46,
"PULMONICVALVE": 0.5,
"RIGHTATRIUM": 0.38,
"RIGHTVENTRICLE": 0.42,
"SVC": 0.44,
"TRICUSPIDVALVE": 0.5,
"WHOLEHEART": 0.5,
},
},
"vessel_spline_settings": {
"vessel_name_list": [
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"RCORONARYARTERY",
],
"vessel_radius_mm_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
"scan_direction_dict": {
"LANTDESCARTERY": "z",
"LCIRCUMFLEXARTERY": "z",
"LCORONARYARTERY": "x",
"RCORONARYARTERY": "z",
},
"stop_condition_type_dict": {
"LANTDESCARTERY": "count",
"LCIRCUMFLEXARTERY": "count",
"LCORONARYARTERY": "count",
"RCORONARYARTERY": "count",
},
"stop_condition_value_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
},
"geometric_segmentation_settings": {
"run_geometric_algorithms": True,
"geometric_name_suffix": "_GEOMETRIC",
"atlas_structure_names": {
"atlas_left_ventricle": "LEFTVENTRICLE",
"atlas_right_ventricle": "RIGHTVENTRICLE",
"atlas_left_atrium": "LEFTATRIUM",
"atlas_right_atrium": "RIGHTATRIUM",
"atlas_ascending_aorta": "ASCENDINGAORTA",
"atlas_pulmonary_artery": "PULMONARYARTERY",
"atlas_superior_vena_cava": "SVC",
"atlas_whole_heart": "WHOLEHEART",
},
"valve_definitions": {
"mitral_valve_thickness_mm": 10,
"mitral_valve_radius_mm": 15,
"tricuspid_valve_thickness_mm": 10,
"tricuspid_valve_radius_mm": 15,
"pulmonic_valve_thickness_mm": 10,
"aortic_valve_thickness_mm": 10,
},
"conduction_system_definitions": {
"sinoatrial_node_radius_mm": 10,
"atrioventricular_node_radius_mm": 10,
},
},
"postprocessing_settings": {
"run_postprocessing": True,
"binaryfillhole_mm": 3,
"structures_for_binaryfillhole": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"AORTICVALVE",
"MITRALVALVE",
"PULMONICVALVE",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"structures_for_overlap_correction": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"PULMONARYARTERY",
"SVC",
],
},
"return_atlas_guide_structure": False,
"return_as_cropped": False,
"return_proba_as_contours": False,
}
def run_cardiac_segmentation(img, guide_structure=None, settings=CARDIAC_SETTINGS_DEFAULTS):
"""Runs the atlas-based cardiac segmentation
Args:
img (sitk.Image):
settings (dict, optional): Dictionary containing settings for algorithm.
Defaults to default_settings.
Returns:
dict: Dictionary containing output of segmentation
"""
results = {}
results_prob = {}
return_as_cropped = settings["return_as_cropped"]
"""
Initialisation - Read in atlases
- image files
- structure files
Atlas structure:
'ID': 'Original': 'CT Image' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'RIR' : 'CT Image' : sitk.Image
'Transform' : transform parameter map
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'DIR' : 'CT Image' : sitk.Image
'Transform' : displacement field transform
'Weight Map' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
"""
logger.info("")
# Settings
atlas_path = settings["atlas_settings"]["atlas_path"]
atlas_id_list = settings["atlas_settings"]["atlas_id_list"]
atlas_structure_list = settings["atlas_settings"]["atlas_structure_list"]
atlas_image_format = settings["atlas_settings"]["atlas_image_format"]
atlas_label_format = settings["atlas_settings"]["atlas_label_format"]
crop_atlas_to_structures = settings["atlas_settings"]["crop_atlas_to_structures"]
crop_atlas_expansion_mm = settings["atlas_settings"]["crop_atlas_expansion_mm"]
atlas_set = {}
for atlas_id in atlas_id_list:
atlas_set[atlas_id] = {}
atlas_set[atlas_id]["Original"] = {}
image = sitk.ReadImage(f"{atlas_path}/{atlas_image_format.format(atlas_id)}")
structures = {
struct: sitk.ReadImage(f"{atlas_path}/{atlas_label_format.format(atlas_id, struct)}")
for struct in atlas_structure_list
}
if crop_atlas_to_structures:
logger.info(f"Automatically cropping atlas: {atlas_id}")
original_volume = np.product(image.GetSize())
crop_box_size, crop_box_index = label_to_roi(
structures.values(), expansion_mm=crop_atlas_expansion_mm
)
image = crop_to_roi(image, size=crop_box_size, index=crop_box_index)
final_volume = np.product(image.GetSize())
logger.info(f" > Volume reduced by factor {original_volume/final_volume:.2f}")
for struct in atlas_structure_list:
structures[struct] = crop_to_roi(
structures[struct], size=crop_box_size, index=crop_box_index
)
atlas_set[atlas_id]["Original"]["CT Image"] = image
for struct in atlas_structure_list:
atlas_set[atlas_id]["Original"][struct] = structures[struct]
"""
Step 1 - Automatic cropping
If we have a guide structure:
- use structure to crop target image
Otherwise:
- using a quick registration to register each atlas
- expansion of the bounding box to ensure entire volume of interest is enclosed
- target image is cropped
"""
expansion_mm = settings["auto_crop_target_image_settings"]["expansion_mm"]
if guide_structure:
crop_box_size, crop_box_index = label_to_roi(guide_structure, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
guide_structure = crop_to_roi(guide_structure, crop_box_size, crop_box_index)
target_reg_structure = convert_mask_to_reg_structure(guide_structure, expansion=2)
else:
quick_reg_settings = {
"reg_method": "similarity",
"shrink_factors": [8],
"smooth_sigmas": [0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 25,
"final_interp": sitk.sitkLinear,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
}
registered_crop_images = []
logger.info("Running initial Translation tranform to crop image volume")
for atlas_id in atlas_id_list[: min([8, len(atlas_id_list)])]:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["RIR"] = {}
atlas_image = atlas_set[atlas_id]["Original"]["CT Image"]
reg_image, _ = linear_registration(
img,
atlas_image,
**quick_reg_settings,
)
registered_crop_images.append(sitk.Cast(reg_image, sitk.sitkFloat32))
del reg_image
combined_image = sum(registered_crop_images) / len(registered_crop_images) > -1000
crop_box_size, crop_box_index = label_to_roi(combined_image, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
logger.info("Calculated crop box:")
logger.info(f" > {crop_box_index}")
logger.info(f" > {crop_box_size}")
logger.info(f" > Vol reduction = {np.product(img.GetSize())/np.product(crop_box_size):.2f}")
"""
Step 2 - Rigid registration of target images
- Individual atlas images are registered to the target
- The transformation is used to propagate the labels onto the target
"""
linear_registration_settings = settings["linear_registration_settings"]
logger.info(
f"Running {linear_registration_settings["reg_method"]} tranform to align atlas images"
)
for atlas_id in atlas_id_list:
# Register the atlases
logger.info(f" > atlas {atlas_id}")
atlas_set[atlas_id]["RIR"] = {}
if guide_structure:
guide_structure_name = settings["atlas_settings"]["guide_structure_name"]
target_reg_image = target_reg_structure
atlas_reg_image = convert_mask_to_reg_structure(
atlas_set[atlas_id]["Original"][guide_structure_name], expansion=2
)
else:
target_reg_image = img_crop
atlas_reg_image = atlas_set[atlas_id]["Original"]["CT Image"]
_, initial_tfm = linear_registration(
target_reg_image,
atlas_reg_image,
**linear_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["RIR"]["Transform"] = initial_tfm
if guide_structure:
atlas_set[atlas_id]["RIR"]["Reg Mask"] = apply_transform(
input_image=atlas_reg_image,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkLinear,
)
expanded_atlas_guide_structure = extend_mask(
atlas_set[atlas_id]["Original"][guide_structure_name],
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=expanded_atlas_guide_structure,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["Original"]["CT Image"],
reference_image=img_crop,
transform=initial_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
# sitk.WriteImage(rigid_image, f"./RR_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["Original"][struct]
atlas_set[atlas_id]["RIR"][struct] = apply_transform(
input_image=input_struct,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["Original"] = None
"""
Step 3 - Deformable image registration
- Using Fast Symmetric Diffeomorphic Demons
"""
if guide_structure:
structure_guided_registration_settings = settings["structure_guided_registration_settings"]
logger.info("Running structure-guided deformable registration on atlas labels")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR_STRUCT"] = {}
deform_image, struct_guided_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_structure,
atlas_set[atlas_id]["RIR"]["Reg Mask"],
**structure_guided_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR_STRUCT"]["Reg Mask"] = deform_image
atlas_set[atlas_id]["DIR_STRUCT"]["Transform"] = struct_guided_tfm
atlas_set[atlas_id]["DIR_STRUCT"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"]["CT Image"],
transform=struct_guided_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
atlas_set[atlas_id]["DIR_STRUCT"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"],
reference_image=img_crop,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
# sitk.WriteImage(deform_image, f"./DIR_STRUCT_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["RIR"][struct]
atlas_set[atlas_id]["DIR_STRUCT"][struct] = apply_transform(
input_image=input_struct,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"] = None
# Settings
deformable_registration_settings = settings["deformable_registration_settings"]
logger.info("Running DIR to refine atlas image registration")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR"] = {}
if guide_structure:
label = "DIR_STRUCT"
else:
label = "RIR"
atlas_reg_image = atlas_set[atlas_id][label]["CT Image"]
target_reg_image = img_crop
if guide_structure:
expanded_atlas_mask = atlas_set[atlas_id]["DIR_STRUCT"][
guide_structure_name + "EXPANDED"
]
expanded_target_mask = extend_mask(
guide_structure,
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
combined_mask = sitk.Maximum(expanded_atlas_mask, expanded_target_mask)
atlas_reg_image = sitk.Mask(atlas_reg_image, combined_mask, outsideValue=-1000)
atlas_reg_image = sitk.Mask(
atlas_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
target_reg_image = sitk.Mask(target_reg_image, combined_mask, outsideValue=-1000)
target_reg_image = sitk.Mask(
target_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
deform_image, dir_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_image,
atlas_reg_image,
**deformable_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR"]["Transform"] = dir_tfm
atlas_set[atlas_id]["DIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id][label]["CT Image"],
transform=dir_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id][label][struct]
atlas_set[atlas_id]["DIR"][struct] = apply_transform(
input_image=input_struct,
transform=dir_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id][label] = None
"""
Step 4 - Iterative atlas removal
- This is an automatic process that will attempt to remove inconsistent atlases from the entire set
"""
# Compute weight maps
# Here we use simple GWV as this minises the potentially negative influence of mis-registered
# atlases
iar_settings = settings["iar_settings"]
if iar_settings["reference_structure"]:
for atlas_id in atlas_id_list:
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(img_crop, atlas_image, vote_type="global")
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
atlas_set = run_iar(atlas_set=atlas_set, **iar_settings)
else:
logger.info("IAR: No reference structure, skipping iterative atlas removal.")
"""
Step 4 - Vessel Splining
"""
vessel_spline_settings = settings["vessel_spline_settings"]
if len(vessel_spline_settings["vessel_name_list"]) > 0:
segmented_vessel_dict = vessel_spline_generation(
img_crop, atlas_set, **vessel_spline_settings
)
else:
logger.info("No vessel splining required, continue.")
"""
Step 5 - Label Fusion
"""
# Compute weight maps
vote_type = settings["label_fusion_settings"]["vote_type"]
vote_params = settings["label_fusion_settings"]["vote_params"]
# Compute weight maps
for atlas_id in list(atlas_set.keys()):
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(
img_crop, atlas_image, vote_type=vote_type, vote_params=vote_params
)
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
combined_label_dict = combine_labels(atlas_set, atlas_structure_list)
"""
Step 6 - Paste the cropped structure into the original image space
"""
logger.info("Generating binary segmentations.")
template_img_binary = sitk.Cast((img * 0), sitk.sitkUInt8)
template_img_prob = sitk.Cast((img * 0), sitk.sitkFloat64)
vote_structures = settings["label_fusion_settings"]["optimal_threshold"].keys()
vote_structures = [i for i in vote_structures if i in atlas_structure_list]
for structure_name in vote_structures:
probability_map = combined_label_dict[structure_name]
optimal_threshold = settings["label_fusion_settings"]["optimal_threshold"][structure_name]
binary_struct = process_probability_image(probability_map, optimal_threshold)
if return_as_cropped:
results[structure_name] = binary_struct
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
results_prob[structure_name] = binary_encode_structure_list(atlas_contours)
else:
results_prob[structure_name] = probability_map
# We also generate another version of the guide_structure using the atlas contours
# We *can* return this, but probably don't want to
# Here this check is performed
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
results[guide_structure_name] = guide_structure
results_prob[guide_structure_name] = guide_structure
else:
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
probability_img = binary_encode_structure_list(atlas_contours)
template_img_prob = sitk.Cast((img * 0), sitk.sitkUInt32)
else:
probability_img = probability_map
# Un-crop binary structure
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
# Un-crop probability map
paste_prob_img = sitk.Paste(
template_img_prob,
probability_img,
probability_img.GetSize(),
(0, 0, 0),
crop_box_index,
)
results_prob[structure_name] = paste_prob_img
# Un-crop the guide structure
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
new_guide_structure = sitk.Paste(
template_img_binary,
guide_structure,
guide_structure.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[guide_structure_name] = new_guide_structure
results_prob[guide_structure_name] = new_guide_structure
for structure_name in vessel_spline_settings["vessel_name_list"]:
binary_struct = segmented_vessel_dict[structure_name]
if return_as_cropped:
results[structure_name] = binary_struct
vessel_list = [
atlas_set[atlas_id]["DIR"][structure_name] for atlas_id in list(atlas_set.keys())
]
else:
# Un-crop binary vessel
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
vessel_list = []
for atlas_id in list(atlas_set.keys()):
paste_img_binary = sitk.Paste(
template_img_binary,
atlas_set[atlas_id]["DIR"][structure_name],
atlas_set[atlas_id]["DIR"][structure_name].GetSize(),
(0, 0, 0),
crop_box_index,
)
vessel_list.append(paste_img_binary)
# Encode list of vessels
encoded_vessels = binary_encode_structure_list(vessel_list)
results_prob[structure_name] = encoded_vessels
"""
Step 7 - Geometric definitions of cardiac valves and conduction system nodes
"""
geometric_segmentation_settings = settings["geometric_segmentation_settings"]
if geometric_segmentation_settings["run_geometric_algorithms"]:
logger.info("Computing geometric definitions for valves and conduction system.")
geom_atlas_names = geometric_segmentation_settings["atlas_structure_names"]
geom_valve_defs = geometric_segmentation_settings["valve_definitions"]
geom_conduction_defs = geometric_segmentation_settings["conduction_system_definitions"]
# 1 - MITRAL VALVE
mv_name = "MITRALVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[mv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_left_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
radius_mm=geom_valve_defs["mitral_valve_radius_mm"],
height_mm=geom_valve_defs["mitral_valve_thickness_mm"],
)
# 2 - TRICUSPID VALVE
tv_name = "TRICUSPIDVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[tv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_right_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_valve_defs["tricuspid_valve_radius_mm"],
height_mm=geom_valve_defs["tricuspid_valve_thickness_mm"],
)
# 3 - AORTIC VALVE
av_name = "AORTICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[av_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_ascending_aorta"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
valve_thickness_mm=geom_valve_defs["aortic_valve_thickness_mm"],
)
# 4 - PULMONIC VALVE
pv_name = "PULMONICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[pv_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_pulmonary_artery"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
valve_thickness_mm=geom_valve_defs["pulmonic_valve_thickness_mm"],
)
# 5 - SINOATRIAL NODE
san_name = "SAN" + geometric_segmentation_settings["geometric_name_suffix"]
results[san_name] = geometric_sinoatrialnode(
label_svc=results[geom_atlas_names["atlas_superior_vena_cava"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_wholeheart=results[geom_atlas_names["atlas_whole_heart"]],
radius_mm=geom_conduction_defs["sinoatrial_node_radius_mm"],
)
# 6 - ATRIOVENTRICULAR NODE
avn_name = "AVN" + geometric_segmentation_settings["geometric_name_suffix"]
results[avn_name] = geometric_atrioventricularnode(
label_la=results[geom_atlas_names["atlas_left_atrium"]],
label_lv=results[geom_atlas_names["atlas_left_ventricle"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_rv=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_conduction_defs["atrioventricular_node_radius_mm"],
)
"""
Step 8 - Post-processing
"""
postprocessing_settings = settings["postprocessing_settings"]
if postprocessing_settings["run_postprocessing"]:
logger.info("Running post-processing.")
# Remove any smaller components and perform morphological closing (hole filling)
binaryfillhole_img = [
int(postprocessing_settings["binaryfillhole_mm"] / sp) for sp in img.GetSpacing()
]
for structure_name in postprocessing_settings["structures_for_binaryfillhole"]:
if structure_name not in results.keys():
continue
contour_s = results[structure_name]
contour_s = sitk.RelabelComponent(sitk.ConnectedComponent(contour_s)) == 1
contour_s = sitk.BinaryMorphologicalClosing(contour_s, binaryfillhole_img)
results[structure_name] = contour_s
# Remove any overlaps
input_overlap = {
s: results[s] for s in postprocessing_settings["structures_for_overlap_correction"]
}
output_overlap = correct_volume_overlap(input_overlap)
for s in postprocessing_settings["structures_for_overlap_correction"]:
results[s] = output_overlap[s]
if return_as_cropped:
results["CROP_IMAGE"] = img_crop
logger.info("Done!")
return results, results_prob
| # Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import SimpleITK as sitk
import numpy as np
from loguru import logger
from platipy.imaging.registration.utils import apply_transform, convert_mask_to_reg_structure
from platipy.imaging.registration.linear import (
linear_registration,
)
from platipy.imaging.registration.deformable import (
fast_symmetric_forces_demons_registration,
)
from platipy.imaging.label.fusion import (
process_probability_image,
compute_weight_map,
combine_labels,
)
from platipy.imaging.label.iar import run_iar
from platipy.imaging.utils.vessel import vessel_spline_generation
from platipy.imaging.utils.valve import (
generate_valve_from_great_vessel,
generate_valve_using_cylinder,
)
from platipy.imaging.utils.conduction import (
geometric_sinoatrialnode,
geometric_atrioventricularnode,
)
from platipy.imaging.utils.crop import label_to_roi, crop_to_roi
from platipy.imaging.generation.mask import extend_mask
from platipy.imaging.label.utils import binary_encode_structure_list, correct_volume_overlap
ATLAS_PATH = "/atlas"
if "ATLAS_PATH" in os.environ:
ATLAS_PATH = os.environ["ATLAS_PATH"]
CARDIAC_SETTINGS_DEFAULTS = {
"atlas_settings": {
"atlas_id_list": [
"03",
"05",
"08",
"10",
"11",
"12",
"13",
"16",
"24",
"35",
],
"atlas_structure_list": [
"AORTICVALVE",
"ASCENDINGAORTA",
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"LEFTATRIUM",
"LEFTVENTRICLE",
"MITRALVALVE",
"PULMONARYARTERY",
"PULMONICVALVE",
"RCORONARYARTERY",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"atlas_path": ATLAS_PATH,
"atlas_image_format": "Case_{0}/Images/Case_{0}_CROP.nii.gz",
"atlas_label_format": "Case_{0}/Structures/Case_{0}_{1}_CROP.nii.gz",
"crop_atlas_to_structures": False,
"crop_atlas_expansion_mm": (20, 20, 40),
"guide_structure_name": "WHOLEHEART",
"superior_extension": 30,
},
"auto_crop_target_image_settings": {
"expansion_mm": [20, 20, 40],
},
"linear_registration_settings": {
"reg_method": "affine",
"shrink_factors": [16, 8, 4],
"smooth_sigmas": [0, 0, 0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 50,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
"verbose": False,
},
"structure_guided_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
16,
8,
2,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [50, 50, 50],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"deformable_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
6,
3,
1.5,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [200, 150, 100],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"iar_settings": {
"reference_structure": False,
"smooth_distance_maps": True,
"smooth_sigma": 1,
"z_score_statistic": "mad",
"outlier_method": "iqr",
"outlier_factor": 1.5,
"min_best_atlases": 5,
"project_on_sphere": False,
},
"label_fusion_settings": {
"vote_type": "unweighted",
"vote_params": None,
"optimal_threshold": {
"AORTICVALVE": 0.5,
"ASCENDINGAORTA": 0.44,
"LEFTATRIUM": 0.40,
"LEFTVENTRICLE": 0.45,
"MITRALVALVE": 0.5,
"PULMONARYARTERY": 0.46,
"PULMONICVALVE": 0.5,
"RIGHTATRIUM": 0.38,
"RIGHTVENTRICLE": 0.42,
"SVC": 0.44,
"TRICUSPIDVALVE": 0.5,
"WHOLEHEART": 0.5,
},
},
"vessel_spline_settings": {
"vessel_name_list": [
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"RCORONARYARTERY",
],
"vessel_radius_mm_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
"scan_direction_dict": {
"LANTDESCARTERY": "z",
"LCIRCUMFLEXARTERY": "z",
"LCORONARYARTERY": "x",
"RCORONARYARTERY": "z",
},
"stop_condition_type_dict": {
"LANTDESCARTERY": "count",
"LCIRCUMFLEXARTERY": "count",
"LCORONARYARTERY": "count",
"RCORONARYARTERY": "count",
},
"stop_condition_value_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
},
"geometric_segmentation_settings": {
"run_geometric_algorithms": True,
"geometric_name_suffix": "_GEOMETRIC",
"atlas_structure_names": {
"atlas_left_ventricle": "LEFTVENTRICLE",
"atlas_right_ventricle": "RIGHTVENTRICLE",
"atlas_left_atrium": "LEFTATRIUM",
"atlas_right_atrium": "RIGHTATRIUM",
"atlas_ascending_aorta": "ASCENDINGAORTA",
"atlas_pulmonary_artery": "PULMONARYARTERY",
"atlas_superior_vena_cava": "SVC",
"atlas_whole_heart": "WHOLEHEART",
},
"valve_definitions": {
"mitral_valve_thickness_mm": 10,
"mitral_valve_radius_mm": 15,
"tricuspid_valve_thickness_mm": 10,
"tricuspid_valve_radius_mm": 15,
"pulmonic_valve_thickness_mm": 10,
"aortic_valve_thickness_mm": 10,
},
"conduction_system_definitions": {
"sinoatrial_node_radius_mm": 10,
"atrioventricular_node_radius_mm": 10,
},
},
"postprocessing_settings": {
"run_postprocessing": True,
"binaryfillhole_mm": 3,
"structures_for_binaryfillhole": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"AORTICVALVE",
"MITRALVALVE",
"PULMONICVALVE",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"structures_for_overlap_correction": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"PULMONARYARTERY",
"SVC",
],
},
"return_atlas_guide_structure": False,
"return_as_cropped": False,
"return_proba_as_contours": False,
}
def run_cardiac_segmentation(img, guide_structure=None, settings=CARDIAC_SETTINGS_DEFAULTS):
"""Runs the atlas-based cardiac segmentation
Args:
img (sitk.Image):
settings (dict, optional): Dictionary containing settings for algorithm.
Defaults to default_settings.
Returns:
dict: Dictionary containing output of segmentation
"""
results = {}
results_prob = {}
return_as_cropped = settings["return_as_cropped"]
"""
Initialisation - Read in atlases
- image files
- structure files
Atlas structure:
'ID': 'Original': 'CT Image' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'RIR' : 'CT Image' : sitk.Image
'Transform' : transform parameter map
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'DIR' : 'CT Image' : sitk.Image
'Transform' : displacement field transform
'Weight Map' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
"""
logger.info("")
# Settings
atlas_path = settings["atlas_settings"]["atlas_path"]
atlas_id_list = settings["atlas_settings"]["atlas_id_list"]
atlas_structure_list = settings["atlas_settings"]["atlas_structure_list"]
atlas_image_format = settings["atlas_settings"]["atlas_image_format"]
atlas_label_format = settings["atlas_settings"]["atlas_label_format"]
crop_atlas_to_structures = settings["atlas_settings"]["crop_atlas_to_structures"]
crop_atlas_expansion_mm = settings["atlas_settings"]["crop_atlas_expansion_mm"]
atlas_set = {}
for atlas_id in atlas_id_list:
atlas_set[atlas_id] = {}
atlas_set[atlas_id]["Original"] = {}
image = sitk.ReadImage(f"{atlas_path}/{atlas_image_format.format(atlas_id)}")
structures = {
struct: sitk.ReadImage(f"{atlas_path}/{atlas_label_format.format(atlas_id, struct)}")
for struct in atlas_structure_list
}
if crop_atlas_to_structures:
logger.info(f"Automatically cropping atlas: {atlas_id}")
original_volume = np.product(image.GetSize())
crop_box_size, crop_box_index = label_to_roi(
structures.values(), expansion_mm=crop_atlas_expansion_mm
)
image = crop_to_roi(image, size=crop_box_size, index=crop_box_index)
final_volume = np.product(image.GetSize())
logger.info(f" > Volume reduced by factor {original_volume/final_volume:.2f}")
for struct in atlas_structure_list:
structures[struct] = crop_to_roi(
structures[struct], size=crop_box_size, index=crop_box_index
)
atlas_set[atlas_id]["Original"]["CT Image"] = image
for struct in atlas_structure_list:
atlas_set[atlas_id]["Original"][struct] = structures[struct]
"""
Step 1 - Automatic cropping
If we have a guide structure:
- use structure to crop target image
Otherwise:
- using a quick registration to register each atlas
- expansion of the bounding box to ensure entire volume of interest is enclosed
- target image is cropped
"""
expansion_mm = settings["auto_crop_target_image_settings"]["expansion_mm"]
if guide_structure:
crop_box_size, crop_box_index = label_to_roi(guide_structure, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
guide_structure = crop_to_roi(guide_structure, crop_box_size, crop_box_index)
target_reg_structure = convert_mask_to_reg_structure(guide_structure, expansion=2)
else:
quick_reg_settings = {
"reg_method": "similarity",
"shrink_factors": [8],
"smooth_sigmas": [0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 25,
"final_interp": sitk.sitkLinear,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
}
registered_crop_images = []
logger.info("Running initial Translation tranform to crop image volume")
for atlas_id in atlas_id_list[: min([8, len(atlas_id_list)])]:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["RIR"] = {}
atlas_image = atlas_set[atlas_id]["Original"]["CT Image"]
reg_image, _ = linear_registration(
img,
atlas_image,
**quick_reg_settings,
)
registered_crop_images.append(sitk.Cast(reg_image, sitk.sitkFloat32))
del reg_image
combined_image = sum(registered_crop_images) / len(registered_crop_images) > -1000
crop_box_size, crop_box_index = label_to_roi(combined_image, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
logger.info("Calculated crop box:")
logger.info(f" > {crop_box_index}")
logger.info(f" > {crop_box_size}")
logger.info(f" > Vol reduction = {np.product(img.GetSize())/np.product(crop_box_size):.2f}")
"""
Step 2 - Rigid registration of target images
- Individual atlas images are registered to the target
- The transformation is used to propagate the labels onto the target
"""
linear_registration_settings = settings["linear_registration_settings"]
logger.info(
f"Running {linear_registration_settings['reg_method']} tranform to align atlas images"
)
for atlas_id in atlas_id_list:
# Register the atlases
logger.info(f" > atlas {atlas_id}")
atlas_set[atlas_id]["RIR"] = {}
if guide_structure:
guide_structure_name = settings["atlas_settings"]["guide_structure_name"]
target_reg_image = target_reg_structure
atlas_reg_image = convert_mask_to_reg_structure(
atlas_set[atlas_id]["Original"][guide_structure_name], expansion=2
)
else:
target_reg_image = img_crop
atlas_reg_image = atlas_set[atlas_id]["Original"]["CT Image"]
_, initial_tfm = linear_registration(
target_reg_image,
atlas_reg_image,
**linear_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["RIR"]["Transform"] = initial_tfm
if guide_structure:
atlas_set[atlas_id]["RIR"]["Reg Mask"] = apply_transform(
input_image=atlas_reg_image,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkLinear,
)
expanded_atlas_guide_structure = extend_mask(
atlas_set[atlas_id]["Original"][guide_structure_name],
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=expanded_atlas_guide_structure,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["Original"]["CT Image"],
reference_image=img_crop,
transform=initial_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
# sitk.WriteImage(rigid_image, f"./RR_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["Original"][struct]
atlas_set[atlas_id]["RIR"][struct] = apply_transform(
input_image=input_struct,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["Original"] = None
"""
Step 3 - Deformable image registration
- Using Fast Symmetric Diffeomorphic Demons
"""
if guide_structure:
structure_guided_registration_settings = settings["structure_guided_registration_settings"]
logger.info("Running structure-guided deformable registration on atlas labels")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR_STRUCT"] = {}
deform_image, struct_guided_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_structure,
atlas_set[atlas_id]["RIR"]["Reg Mask"],
**structure_guided_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR_STRUCT"]["Reg Mask"] = deform_image
atlas_set[atlas_id]["DIR_STRUCT"]["Transform"] = struct_guided_tfm
atlas_set[atlas_id]["DIR_STRUCT"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"]["CT Image"],
transform=struct_guided_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
atlas_set[atlas_id]["DIR_STRUCT"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"],
reference_image=img_crop,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
# sitk.WriteImage(deform_image, f"./DIR_STRUCT_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["RIR"][struct]
atlas_set[atlas_id]["DIR_STRUCT"][struct] = apply_transform(
input_image=input_struct,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"] = None
# Settings
deformable_registration_settings = settings["deformable_registration_settings"]
logger.info("Running DIR to refine atlas image registration")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR"] = {}
if guide_structure:
label = "DIR_STRUCT"
else:
label = "RIR"
atlas_reg_image = atlas_set[atlas_id][label]["CT Image"]
target_reg_image = img_crop
if guide_structure:
expanded_atlas_mask = atlas_set[atlas_id]["DIR_STRUCT"][
guide_structure_name + "EXPANDED"
]
expanded_target_mask = extend_mask(
guide_structure,
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
combined_mask = sitk.Maximum(expanded_atlas_mask, expanded_target_mask)
atlas_reg_image = sitk.Mask(atlas_reg_image, combined_mask, outsideValue=-1000)
atlas_reg_image = sitk.Mask(
atlas_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
target_reg_image = sitk.Mask(target_reg_image, combined_mask, outsideValue=-1000)
target_reg_image = sitk.Mask(
target_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
deform_image, dir_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_image,
atlas_reg_image,
**deformable_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR"]["Transform"] = dir_tfm
atlas_set[atlas_id]["DIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id][label]["CT Image"],
transform=dir_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id][label][struct]
atlas_set[atlas_id]["DIR"][struct] = apply_transform(
input_image=input_struct,
transform=dir_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id][label] = None
"""
Step 4 - Iterative atlas removal
- This is an automatic process that will attempt to remove inconsistent atlases from the entire set
"""
# Compute weight maps
# Here we use simple GWV as this minises the potentially negative influence of mis-registered
# atlases
iar_settings = settings["iar_settings"]
if iar_settings["reference_structure"]:
for atlas_id in atlas_id_list:
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(img_crop, atlas_image, vote_type="global")
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
atlas_set = run_iar(atlas_set=atlas_set, **iar_settings)
else:
logger.info("IAR: No reference structure, skipping iterative atlas removal.")
"""
Step 4 - Vessel Splining
"""
vessel_spline_settings = settings["vessel_spline_settings"]
if len(vessel_spline_settings["vessel_name_list"]) > 0:
segmented_vessel_dict = vessel_spline_generation(
img_crop, atlas_set, **vessel_spline_settings
)
else:
logger.info("No vessel splining required, continue.")
"""
Step 5 - Label Fusion
"""
# Compute weight maps
vote_type = settings["label_fusion_settings"]["vote_type"]
vote_params = settings["label_fusion_settings"]["vote_params"]
# Compute weight maps
for atlas_id in list(atlas_set.keys()):
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(
img_crop, atlas_image, vote_type=vote_type, vote_params=vote_params
)
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
combined_label_dict = combine_labels(atlas_set, atlas_structure_list)
"""
Step 6 - Paste the cropped structure into the original image space
"""
logger.info("Generating binary segmentations.")
template_img_binary = sitk.Cast((img * 0), sitk.sitkUInt8)
template_img_prob = sitk.Cast((img * 0), sitk.sitkFloat64)
vote_structures = settings["label_fusion_settings"]["optimal_threshold"].keys()
vote_structures = [i for i in vote_structures if i in atlas_structure_list]
for structure_name in vote_structures:
probability_map = combined_label_dict[structure_name]
optimal_threshold = settings["label_fusion_settings"]["optimal_threshold"][structure_name]
binary_struct = process_probability_image(probability_map, optimal_threshold)
if return_as_cropped:
results[structure_name] = binary_struct
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
results_prob[structure_name] = binary_encode_structure_list(atlas_contours)
else:
results_prob[structure_name] = probability_map
# We also generate another version of the guide_structure using the atlas contours
# We *can* return this, but probably don't want to
# Here this check is performed
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
results[guide_structure_name] = guide_structure
results_prob[guide_structure_name] = guide_structure
else:
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
probability_img = binary_encode_structure_list(atlas_contours)
template_img_prob = sitk.Cast((img * 0), sitk.sitkUInt32)
else:
probability_img = probability_map
# Un-crop binary structure
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
# Un-crop probability map
paste_prob_img = sitk.Paste(
template_img_prob,
probability_img,
probability_img.GetSize(),
(0, 0, 0),
crop_box_index,
)
results_prob[structure_name] = paste_prob_img
# Un-crop the guide structure
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
new_guide_structure = sitk.Paste(
template_img_binary,
guide_structure,
guide_structure.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[guide_structure_name] = new_guide_structure
results_prob[guide_structure_name] = new_guide_structure
for structure_name in vessel_spline_settings["vessel_name_list"]:
binary_struct = segmented_vessel_dict[structure_name]
if return_as_cropped:
results[structure_name] = binary_struct
vessel_list = [
atlas_set[atlas_id]["DIR"][structure_name] for atlas_id in list(atlas_set.keys())
]
else:
# Un-crop binary vessel
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
vessel_list = []
for atlas_id in list(atlas_set.keys()):
paste_img_binary = sitk.Paste(
template_img_binary,
atlas_set[atlas_id]["DIR"][structure_name],
atlas_set[atlas_id]["DIR"][structure_name].GetSize(),
(0, 0, 0),
crop_box_index,
)
vessel_list.append(paste_img_binary)
# Encode list of vessels
encoded_vessels = binary_encode_structure_list(vessel_list)
results_prob[structure_name] = encoded_vessels
"""
Step 7 - Geometric definitions of cardiac valves and conduction system nodes
"""
geometric_segmentation_settings = settings["geometric_segmentation_settings"]
if geometric_segmentation_settings["run_geometric_algorithms"]:
logger.info("Computing geometric definitions for valves and conduction system.")
geom_atlas_names = geometric_segmentation_settings["atlas_structure_names"]
geom_valve_defs = geometric_segmentation_settings["valve_definitions"]
geom_conduction_defs = geometric_segmentation_settings["conduction_system_definitions"]
# 1 - MITRAL VALVE
mv_name = "MITRALVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[mv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_left_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
radius_mm=geom_valve_defs["mitral_valve_radius_mm"],
height_mm=geom_valve_defs["mitral_valve_thickness_mm"],
)
# 2 - TRICUSPID VALVE
tv_name = "TRICUSPIDVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[tv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_right_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_valve_defs["tricuspid_valve_radius_mm"],
height_mm=geom_valve_defs["tricuspid_valve_thickness_mm"],
)
# 3 - AORTIC VALVE
av_name = "AORTICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[av_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_ascending_aorta"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
valve_thickness_mm=geom_valve_defs["aortic_valve_thickness_mm"],
)
# 4 - PULMONIC VALVE
pv_name = "PULMONICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[pv_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_pulmonary_artery"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
valve_thickness_mm=geom_valve_defs["pulmonic_valve_thickness_mm"],
)
# 5 - SINOATRIAL NODE
san_name = "SAN" + geometric_segmentation_settings["geometric_name_suffix"]
results[san_name] = geometric_sinoatrialnode(
label_svc=results[geom_atlas_names["atlas_superior_vena_cava"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_wholeheart=results[geom_atlas_names["atlas_whole_heart"]],
radius_mm=geom_conduction_defs["sinoatrial_node_radius_mm"],
)
# 6 - ATRIOVENTRICULAR NODE
avn_name = "AVN" + geometric_segmentation_settings["geometric_name_suffix"]
results[avn_name] = geometric_atrioventricularnode(
label_la=results[geom_atlas_names["atlas_left_atrium"]],
label_lv=results[geom_atlas_names["atlas_left_ventricle"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_rv=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_conduction_defs["atrioventricular_node_radius_mm"],
)
"""
Step 8 - Post-processing
"""
postprocessing_settings = settings["postprocessing_settings"]
if postprocessing_settings["run_postprocessing"]:
logger.info("Running post-processing.")
# Remove any smaller components and perform morphological closing (hole filling)
binaryfillhole_img = [
int(postprocessing_settings["binaryfillhole_mm"] / sp) for sp in img.GetSpacing()
]
for structure_name in postprocessing_settings["structures_for_binaryfillhole"]:
if structure_name not in results.keys():
continue
contour_s = results[structure_name]
contour_s = sitk.RelabelComponent(sitk.ConnectedComponent(contour_s)) == 1
contour_s = sitk.BinaryMorphologicalClosing(contour_s, binaryfillhole_img)
results[structure_name] = contour_s
# Remove any overlaps
input_overlap = {
s: results[s] for s in postprocessing_settings["structures_for_overlap_correction"]
}
output_overlap = correct_volume_overlap(input_overlap)
for s in postprocessing_settings["structures_for_overlap_correction"]:
results[s] = output_overlap[s]
if return_as_cropped:
results["CROP_IMAGE"] = img_crop
logger.info("Done!")
return results, results_prob
|
import abc
import collections.abc
import contextlib
import dataclasses
import itertools
import math
import operator
import re
import sys
import time
from collections import defaultdict
from datetime import datetime, timedelta
from functools import lru_cache
from hashlib import md5
from typing import Any, Optional
import dateutil.tz
import msgpack
import orjson
import pydantic
from fastapi import Depends, HTTPException, Query, Request, Response
from starlette.responses import JSONResponse, Send, StreamingResponse
# These modules are not directly used, but they register things on import.
from .. import queries
from ..media_type_registration import (
serialization_registry as default_serialization_registry,
)
from ..queries import KeyLookup, QueryValueError
from ..query_registration import query_registry as default_query_registry
from ..trees.in_memory import Tree as TreeInMemory
from ..utils import (
APACHE_ARROW_FILE_MIME_TYPE,
SerializationError,
UnsupportedShape,
modules_available,
)
from . import models
from .authentication import get_current_user
from .etag import tokenize
del queries
if modules_available("numpy", "dask.array"):
from ..structures import array as _array # noqa: F401
del _array
if modules_available("pandas", "pyarrow", "dask.dataframe"):
from ..structures import dataframe as _dataframe # noqa: F401
del _dataframe
if modules_available("xarray"):
from ..structures import xarray as _xarray # noqa: F401
del _xarray
_FILTER_PARAM_PATTERN = re.compile(r"filter___(?P<name>.*)___(?P<field>[^\d\W][\w\d]+)")
_LOCAL_TZINFO = dateutil.tz.gettz()
@lru_cache(1)
def get_query_registry():
"This may be overridden via dependency_overrides."
return default_query_registry
@lru_cache(1)
def get_serialization_registry():
"This may be overridden via dependency_overrides."
return default_serialization_registry
def get_root_tree():
raise NotImplementedError(
"This should be overridden via dependency_overrides. "
"See tiled.server.app.serve_tree()."
)
def entry(
path: str,
request: Request,
current_user: str = Depends(get_current_user),
root_tree: pydantic.BaseSettings = Depends(get_root_tree),
):
path_parts = [segment for segment in path.split("/") if segment]
entry = root_tree.authenticated_as(current_user)
try:
# Traverse into sub-tree(s).
for segment in path_parts:
try:
with record_timing(request.state.metrics, "acl"):
unauthenticated_entry = entry[segment]
except (KeyError, TypeError):
raise NoEntry(path_parts)
# TODO Update this when Tree has structure_family == "tree".
if not hasattr(unauthenticated_entry, "structure_family"):
with record_timing(request.state.metrics, "acl"):
entry = unauthenticated_entry.authenticated_as(current_user)
else:
entry = unauthenticated_entry
return entry
except NoEntry:
raise HTTPException(status_code=404, detail=f"No such entry: {path_parts}")
def reader(
entry: Any = Depends(entry),
):
"Specify a path parameter and use it to look up a reader."
if not isinstance(entry, DuckReader):
raise HTTPException(status_code=404, detail="This is not a Reader.")
return entry
def block(
# Ellipsis as the "default" tells FastAPI to make this parameter required.
block: str = Query(..., regex="^[0-9]*(,[0-9]+)*$"),
):
"Specify and parse a block index parameter."
if not block:
return ()
return tuple(map(int, block.split(",")))
def expected_shape(
expected_shape: Optional[str] = Query(
None, min_length=1, regex="^[0-9]+(,[0-9]+)*$|^scalar$"
),
):
"Specify and parse an expected_shape parameter."
if expected_shape is None:
return
if expected_shape == "scalar":
return ()
return tuple(map(int, expected_shape.split(",")))
def slice_(
slice: str = Query(None, regex="^[0-9,:]*$"),
):
"Specify and parse a block index parameter."
import numpy
# IMPORTANT We are eval-ing a user-provider string here so we need to be
# very careful about locking down what can be in it. The regex above
# excludes any letters or operators, so it is not possible to execute
# functions or expensive arithmetic.
return tuple(
[
eval(f"numpy.s_[{dim!s}]", {"numpy": numpy})
for dim in (slice or "").split(",")
if dim
]
)
def len_or_approx(tree):
"""
Prefer approximate length if implemented. (It's cheaper.)
"""
try:
return operator.length_hint(tree)
except TypeError:
return len(tree)
def pagination_links(route, path_parts, offset, limit, length_hint):
path_str = "/".join(path_parts)
links = {
"self": f"{route}/{path_str}?page[offset]={offset}&page[limit]={limit}",
# These are conditionally overwritten below.
"first": None,
"last": None,
"next": None,
"prev": None,
}
if limit:
last_page = math.floor(length_hint / limit) * limit
links.update(
{
"first": f"{route}/{path_str}?page[offset]={0}&page[limit]={limit}",
"last": f"{route}/{path_str}?page[offset]={last_page}&page[limit]={limit}",
}
)
if offset + limit < length_hint:
links[
"next"
] = f"{route}/{path_str}?page[offset]={offset + limit}&page[limit]={limit}"
if offset > 0:
links[
"prev"
] = f"{route}/{path_str}?page[offset]={max(0, offset - limit)}&page[limit]={limit}"
return links
class DuckReader(metaclass=abc.ABCMeta):
"""
Used for isinstance(obj, DuckReader):
"""
@classmethod
def __subclasshook__(cls, candidate):
# If the following condition is True, candidate is recognized
# to "quack" like a Reader.
EXPECTED_ATTRS = ("read", "macrostructure", "microstructure")
return all(hasattr(candidate, attr) for attr in EXPECTED_ATTRS)
class DuckTree(metaclass=abc.ABCMeta):
"""
Used for isinstance(obj, DuckTree):
"""
@classmethod
def __subclasshook__(cls, candidate):
# If the following condition is True, candidate is recognized
# to "quack" like a Tree.
EXPECTED_ATTRS = ("__getitem__", "__iter__")
return all(hasattr(candidate, attr) for attr in EXPECTED_ATTRS)
def construct_entries_response(
query_registry, tree, route, path, offset, limit, fields, filters, sort, base_url
):
path_parts = [segment for segment in path.split("/") if segment]
if not isinstance(tree, DuckTree):
raise WrongTypeForRoute("This is not a Tree.")
queries = defaultdict(
dict
) # e.g. {"text": {"text": "dog"}, "lookup": {"key": "..."}}
# Group the parameters by query type.
for key, value in filters.items():
if value is None:
continue
name, field = _FILTER_PARAM_PATTERN.match(key).groups()
queries[name][field] = value
sorting = []
if sort is not None:
for item in sort.split(","):
if item:
if item.startswith("-"):
sorting.append((item[1:], -1))
else:
sorting.append((item, 1))
if sorting:
if not hasattr(tree, "sort"):
raise HTTPException(
status_code=400, detail="This Tree does not support sorting."
)
tree = tree.sort(sorting)
# Apply the queries and obtain a narrowed tree.
key_lookups = []
for query_name, parameters_dict_of_lists in queries.items():
for i in itertools.count(0):
try:
parameters = {
field_name: parameters_list[i]
for field_name, parameters_list in parameters_dict_of_lists.items()
}
except IndexError:
break
query_class = query_registry.name_to_query_type[query_name]
# Special case:
# List fields are serialized as comma-separated strings.
for field in dataclasses.fields(query_class):
if getattr(field.type, "__origin__", None) is list:
(inner_type,) = field.type.__args__
parameters[field.name] = [
inner_type(item) for item in parameters[field.name].split(",")
]
try:
query = query_class(**parameters)
# Special case: Do key-lookups at the end after all other filtering.
# We do not require trees to implement this query; we implement it
# directly here by just calling __getitem__.
if isinstance(query, KeyLookup):
key_lookups.append(query.key)
continue
tree = tree.search(query)
except QueryValueError as err:
raise HTTPException(status_code=400, detail=err.args[0])
if key_lookups:
# Duplicates are technically legal because *any* query can be given
# with multiple parameters.
unique_key_lookups = set(key_lookups)
(key_lookup), *others = unique_key_lookups
if others:
# Two non-equal KeyLookup queries must return no results.
tree = TreeInMemory({})
else:
try:
tree = TreeInMemory(
{key_lookup: tree[key_lookup]}, must_revalidate=False
)
except KeyError:
tree = TreeInMemory({})
count = len_or_approx(tree)
links = pagination_links(route, path_parts, offset, limit, count)
data = []
if fields != [models.EntryFields.none]:
# Pull a page of items into memory.
items = tree.items_indexer[offset : offset + limit] # noqa: E203
else:
# Pull a page of just the keys, which is cheaper.
items = (
(key, None)
for key in tree.keys_indexer[offset : offset + limit] # noqa: E203
)
# This value will not leak out. It just used to seed comparisons.
metadata_stale_at = datetime.utcnow() + timedelta(days=1_000_000)
must_revalidate = getattr(tree, "must_revalidate", True)
for key, entry in items:
resource = construct_resource(base_url, path_parts + [key], entry, fields)
data.append(resource)
# If any entry has emtry.metadata_stale_at = None, then there will
# be no 'Expires' header. We will pessimistically assume the values
# are immediately stale.
if metadata_stale_at is not None:
if getattr(entry, "metadata_stale_at", None) is None:
metadata_stale_at = None
else:
metadata_stale_at = min(metadata_stale_at, entry.metadata_stale_at)
return (
models.Response(data=data, links=links, meta={"count": count}),
metadata_stale_at,
must_revalidate,
)
DEFAULT_MEDIA_TYPES = {
"array": "application/octet-stream",
"dataframe": APACHE_ARROW_FILE_MIME_TYPE,
"structured_array_tabular": "application/octet-stream",
"structured_array_generic": "application/octet-stream",
"variable": "application/octet-stream",
"data_array": "application/octet-stream",
"dataset": "application/netcdf",
}
def construct_data_response(
structure_family,
serialization_registry,
payload,
metadata,
request,
format=None,
specs=None,
expires=None,
):
request.state.endpoint = "data"
if specs is None:
specs = []
default_media_type = DEFAULT_MEDIA_TYPES[structure_family]
# Give priority to the `format` query parameter. Otherwise, consult Accept
# header.
if format is not None:
media_types_or_aliases = format.split(",")
# Resolve aliases, like "csv" -> "text/csv".
media_types = [
serialization_registry.resolve_alias(t) for t in media_types_or_aliases
]
else:
# The HTTP spec says these should be separated by ", " but some
# browsers separate with just "," (no space).
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation/List_of_default_Accept_values#default_values # noqa
# That variation is what we are handling below with lstrip.
media_types = [
s.lstrip(" ")
for s in request.headers.get("Accept", default_media_type).split(",")
]
# The client may give us a choice of media types. Find the first one
# that we support.
supported = set()
for media_type in media_types:
if media_type == "*/*":
media_type = default_media_type
# fall back to generic dataframe serializer if no specs present
for spec in specs + [structure_family]:
media_types_for_spec = serialization_registry.media_types(spec)
if media_type in media_types_for_spec:
break
supported.update(media_types_for_spec)
else:
# None of the specs or the structure_family can serialize to this
# media_type. Try the next one.
continue
# We found a match above. We have our media_type.
break
else:
# We have checked each of the media_types, and we cannot serialize
# to any of them.
raise UnsupportedMediaTypes(
f"None of the media types requested by the client are supported. "
f"Supported: {", ".join(supported)}. Requested: {", ".join(media_types)}.",
)
with record_timing(request.state.metrics, "tok"):
# Create an ETag that uniquely identifies this content and the media
# type that it will be encoded as.
etag = tokenize((payload, media_type))
headers = {"ETag": etag}
if expires is not None:
headers["Expires"] = expires.strftime(HTTP_EXPIRES_HEADER_FORMAT)
if request.headers.get("If-None-Match", "") == etag:
# If the client already has this content, confirm that.
return Response(status_code=304, headers=headers)
# This is the expensive step: actually serialize.
try:
content = serialization_registry(
structure_family, media_type, payload, metadata
)
except UnsupportedShape as err:
raise UnsupportedMediaTypes(
f"The shape of this data {err.args[0]} is incompatible with the requested format ({media_type}). "
f"Slice it or choose a different format.",
)
except SerializationError:
raise UnsupportedMediaTypes(
"This type is supported in general but there was an unknown error packing this specific data.",
)
return PatchedResponse(
content=content,
media_type=media_type,
headers=headers,
)
def construct_resource(base_url, path_parts, entry, fields):
path_str = "/".join(path_parts)
attributes = {}
if models.EntryFields.metadata in fields:
attributes["metadata"] = entry.metadata
if models.EntryFields.specs in fields:
attributes["specs"] = getattr(entry, "specs", None)
if isinstance(entry, DuckTree):
if models.EntryFields.count in fields:
attributes["count"] = len_or_approx(entry)
if hasattr(entry, "sorting"):
attributes["sorting"] = entry.sorting
resource = models.TreeResource(
**{
"id": path_parts[-1] if path_parts else "",
"attributes": models.TreeAttributes(**attributes),
"type": models.EntryType.tree,
"links": {
"self": f"{base_url}metadata/{path_str}",
"search": f"{base_url}search/{path_str}",
},
}
)
else:
links = {"self": f"{base_url}metadata/{path_str}"}
structure = {}
if entry is not None:
# entry is None when we are pulling just *keys* from the
# Tree and not values.
links.update(
{
link: template.format(base_url=base_url, path=path_str)
for link, template in FULL_LINKS[entry.structure_family].items()
}
)
if models.EntryFields.structure_family in fields:
attributes["structure_family"] = entry.structure_family
if models.EntryFields.macrostructure in fields:
macrostructure = entry.macrostructure()
if macrostructure is not None:
structure["macro"] = dataclasses.asdict(macrostructure)
if models.EntryFields.microstructure in fields:
if entry.structure_family == "dataframe":
# Special case: its microstructure is cannot be JSON-serialized
# and is therefore available from separate routes. Sends links
# instead of the actual payload.
structure["micro"] = {
"links": {
"meta": f"{base_url}dataframe/meta/{path_str}",
"divisions": f"{base_url}dataframe/divisions/{path_str}",
}
}
else:
microstructure = entry.microstructure()
if microstructure is not None:
structure["micro"] = dataclasses.asdict(microstructure)
if entry.structure_family == "array":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(len(structure["macro"]["shape"]))
)
links[
"block"
] = f"{base_url}array/block/{path_str}?block={block_template}"
elif entry.structure_family == "dataframe":
links[
"partition"
] = f"{base_url}dataframe/partition/{path_str}?partition={{index}}"
elif entry.structure_family == "variable":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(
len(structure["macro"]["data"]["macro"]["shape"])
)
)
links[
"block"
] = f"{base_url}variable/block/{path_str}?block={block_template}"
elif entry.structure_family == "data_array":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(
len(structure["macro"]["variable"]["macro"]["data"])
)
)
links[
"block"
] = f"{base_url}data_array/block/{path_str}?block={block_template}"
elif entry.structure_family == "dataset":
links[
"block"
] = f"{base_url}dataset/block/{path_str}?variable={{variable}}&block={{block_indexes}}"
microstructure = entry.microstructure()
attributes["structure"] = structure
resource = models.ReaderResource(
**{
"id": path_parts[-1],
"attributes": models.ReaderAttributes(**attributes),
"type": models.EntryType.reader,
"links": links,
}
)
return resource
class PatchedResponse(Response):
"Patch the render method to accept memoryview."
def render(self, content: Any) -> bytes:
if isinstance(content, memoryview):
return content.cast("B")
return super().render(content)
class PatchedStreamingResponse(StreamingResponse):
"Patch the stream_response method to accept memoryview."
async def stream_response(self, send: Send) -> None:
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
async for chunk in self.body_iterator:
# BEGIN ALTERATION
if not isinstance(chunk, (bytes, memoryview)):
# END ALTERATION
chunk = chunk.encode(self.charset)
await send({"type": "http.response.body", "body": chunk, "more_body": True})
await send({"type": "http.response.body", "body": b"", "more_body": False})
class NumpySafeJSONResponse(JSONResponse):
def __init__(self, *args, metrics, **kwargs):
self.__metrics = metrics
super().__init__(*args, **kwargs)
def render(self, content: Any) -> bytes:
with record_timing(self.__metrics, "pack"):
return orjson.dumps(content, option=orjson.OPT_SERIALIZE_NUMPY)
def _numpy_safe_msgpack_encoder(obj):
# If numpy has not been imported yet, then we can be sure that obj
# is not a numpy object, and we want to avoid triggering a numpy
# import. (The server does not have a hard numpy dependency.)
if "numpy" in sys.modules:
import numpy
if isinstance(obj, (numpy.generic, numpy.ndarray)):
if numpy.isscalar(obj):
return obj.item()
return obj.tolist()
return obj
def _patch_naive_datetimes(obj):
"""
If a naive datetime is found, attach local time.
Msgpack can only serialize datetimes with tzinfo.
"""
if hasattr(obj, "items"):
patched_obj = {}
for k, v in obj.items():
patched_obj[k] = _patch_naive_datetimes(v)
elif (not isinstance(obj, str)) and isinstance(obj, collections.abc.Iterable):
patched_obj = []
for item in obj:
patched_obj.append(_patch_naive_datetimes(item))
elif isinstance(obj, datetime) and obj.tzinfo is None:
patched_obj = obj.astimezone(_LOCAL_TZINFO)
else:
patched_obj = obj
return patched_obj
class MsgpackResponse(Response):
media_type = "application/x-msgpack"
def __init__(self, *args, metrics, **kwargs):
self.__metrics = metrics
super().__init__(*args, **kwargs)
def render(self, content: Any, _reentered=False) -> bytes:
try:
with record_timing(self.__metrics, "pack"):
return msgpack.packb(
content, default=_numpy_safe_msgpack_encoder, datetime=True
)
except TypeError as err:
# msgpack tries to handle all datetimes, but if it
# received a naive one (tzinfo=None) then it fails.
# We cannot use the default hook to handle this because
# it is not called.
if err.args == ("can not serialize 'datetime.datetime' object",) and (
not _reentered
):
patched_content = _patch_naive_datetimes(content)
return self.render(patched_content, _reentered=True)
raise
JSON_MIME_TYPE = "application/json"
MSGPACK_MIME_TYPE = "application/x-msgpack"
# This is a silly time format, but it is the HTTP standard.
HTTP_EXPIRES_HEADER_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
def json_or_msgpack(request, content, expires=None, headers=None):
media_types = request.headers.get("Accept", JSON_MIME_TYPE).split(", ")
for media_type in media_types:
if media_type == "*/*":
media_type = JSON_MIME_TYPE
break
if media_type == MSGPACK_MIME_TYPE:
break
if media_type == JSON_MIME_TYPE:
break
else:
# It is commmon in HTTP to fall back on a default representation if
# none of the requested ones are available. We do not do this for
# data payloads, but it makes some sense to do it for these metadata
# messages.
media_type = JSON_MIME_TYPE
assert media_type in {JSON_MIME_TYPE, MSGPACK_MIME_TYPE}
content_as_dict = content.dict()
with record_timing(request.state.metrics, "tok"):
etag = md5(str(content_as_dict).encode()).hexdigest()
headers = headers or {}
headers["ETag"] = etag
if expires is not None:
headers["Expires"] = expires.strftime(HTTP_EXPIRES_HEADER_FORMAT)
if request.headers.get("If-None-Match", "") == etag:
# If the client already has this content, confirm that.
return Response(status_code=304, headers=headers)
if media_type == "application/x-msgpack":
return MsgpackResponse(
content_as_dict, headers=headers, metrics=request.state.metrics
)
return NumpySafeJSONResponse(
content_as_dict, headers=headers, metrics=request.state.metrics
)
class UnsupportedMediaTypes(Exception):
pass
class NoEntry(KeyError):
pass
class WrongTypeForRoute(Exception):
pass
FULL_LINKS = {
"array": {"full": "{base_url}array/full/{path}"},
"structured_array_generic": {
"full": "{base_url}structured_array_generic/full/{path}"
},
"structured_array_tabular": {
"full": "{base_url}structured_array_tabular/full/{path}"
},
"dataframe": {"full": "{base_url}dataframe/full/{path}"},
"variable": {"full": "{base_url}variable/full/{path}"},
"data_array": {"full_variable": "{base_url}data_array/variable/full/{path}"},
"dataset": {
"full_variable": "{base_url}dataset/data_var/full/{path}?variable={{variable}}",
"full_coordinate": "{base_url}dataset/coord/full/{path}?variable={{variable}}",
"full_dataset": "{base_url}dataset/full/{path}",
},
}
@contextlib.contextmanager
def record_timing(metrics, key):
"""
Set timings[key] equal to the run time (in milliseconds) of the context body.
"""
t0 = time.perf_counter()
yield
metrics[key]["dur"] += time.perf_counter() - t0 # Units: seconds
| import abc
import collections.abc
import contextlib
import dataclasses
import itertools
import math
import operator
import re
import sys
import time
from collections import defaultdict
from datetime import datetime, timedelta
from functools import lru_cache
from hashlib import md5
from typing import Any, Optional
import dateutil.tz
import msgpack
import orjson
import pydantic
from fastapi import Depends, HTTPException, Query, Request, Response
from starlette.responses import JSONResponse, Send, StreamingResponse
# These modules are not directly used, but they register things on import.
from .. import queries
from ..media_type_registration import (
serialization_registry as default_serialization_registry,
)
from ..queries import KeyLookup, QueryValueError
from ..query_registration import query_registry as default_query_registry
from ..trees.in_memory import Tree as TreeInMemory
from ..utils import (
APACHE_ARROW_FILE_MIME_TYPE,
SerializationError,
UnsupportedShape,
modules_available,
)
from . import models
from .authentication import get_current_user
from .etag import tokenize
del queries
if modules_available("numpy", "dask.array"):
from ..structures import array as _array # noqa: F401
del _array
if modules_available("pandas", "pyarrow", "dask.dataframe"):
from ..structures import dataframe as _dataframe # noqa: F401
del _dataframe
if modules_available("xarray"):
from ..structures import xarray as _xarray # noqa: F401
del _xarray
_FILTER_PARAM_PATTERN = re.compile(r"filter___(?P<name>.*)___(?P<field>[^\d\W][\w\d]+)")
_LOCAL_TZINFO = dateutil.tz.gettz()
@lru_cache(1)
def get_query_registry():
"This may be overridden via dependency_overrides."
return default_query_registry
@lru_cache(1)
def get_serialization_registry():
"This may be overridden via dependency_overrides."
return default_serialization_registry
def get_root_tree():
raise NotImplementedError(
"This should be overridden via dependency_overrides. "
"See tiled.server.app.serve_tree()."
)
def entry(
path: str,
request: Request,
current_user: str = Depends(get_current_user),
root_tree: pydantic.BaseSettings = Depends(get_root_tree),
):
path_parts = [segment for segment in path.split("/") if segment]
entry = root_tree.authenticated_as(current_user)
try:
# Traverse into sub-tree(s).
for segment in path_parts:
try:
with record_timing(request.state.metrics, "acl"):
unauthenticated_entry = entry[segment]
except (KeyError, TypeError):
raise NoEntry(path_parts)
# TODO Update this when Tree has structure_family == "tree".
if not hasattr(unauthenticated_entry, "structure_family"):
with record_timing(request.state.metrics, "acl"):
entry = unauthenticated_entry.authenticated_as(current_user)
else:
entry = unauthenticated_entry
return entry
except NoEntry:
raise HTTPException(status_code=404, detail=f"No such entry: {path_parts}")
def reader(
entry: Any = Depends(entry),
):
"Specify a path parameter and use it to look up a reader."
if not isinstance(entry, DuckReader):
raise HTTPException(status_code=404, detail="This is not a Reader.")
return entry
def block(
# Ellipsis as the "default" tells FastAPI to make this parameter required.
block: str = Query(..., regex="^[0-9]*(,[0-9]+)*$"),
):
"Specify and parse a block index parameter."
if not block:
return ()
return tuple(map(int, block.split(",")))
def expected_shape(
expected_shape: Optional[str] = Query(
None, min_length=1, regex="^[0-9]+(,[0-9]+)*$|^scalar$"
),
):
"Specify and parse an expected_shape parameter."
if expected_shape is None:
return
if expected_shape == "scalar":
return ()
return tuple(map(int, expected_shape.split(",")))
def slice_(
slice: str = Query(None, regex="^[0-9,:]*$"),
):
"Specify and parse a block index parameter."
import numpy
# IMPORTANT We are eval-ing a user-provider string here so we need to be
# very careful about locking down what can be in it. The regex above
# excludes any letters or operators, so it is not possible to execute
# functions or expensive arithmetic.
return tuple(
[
eval(f"numpy.s_[{dim!s}]", {"numpy": numpy})
for dim in (slice or "").split(",")
if dim
]
)
def len_or_approx(tree):
"""
Prefer approximate length if implemented. (It's cheaper.)
"""
try:
return operator.length_hint(tree)
except TypeError:
return len(tree)
def pagination_links(route, path_parts, offset, limit, length_hint):
path_str = "/".join(path_parts)
links = {
"self": f"{route}/{path_str}?page[offset]={offset}&page[limit]={limit}",
# These are conditionally overwritten below.
"first": None,
"last": None,
"next": None,
"prev": None,
}
if limit:
last_page = math.floor(length_hint / limit) * limit
links.update(
{
"first": f"{route}/{path_str}?page[offset]={0}&page[limit]={limit}",
"last": f"{route}/{path_str}?page[offset]={last_page}&page[limit]={limit}",
}
)
if offset + limit < length_hint:
links[
"next"
] = f"{route}/{path_str}?page[offset]={offset + limit}&page[limit]={limit}"
if offset > 0:
links[
"prev"
] = f"{route}/{path_str}?page[offset]={max(0, offset - limit)}&page[limit]={limit}"
return links
class DuckReader(metaclass=abc.ABCMeta):
"""
Used for isinstance(obj, DuckReader):
"""
@classmethod
def __subclasshook__(cls, candidate):
# If the following condition is True, candidate is recognized
# to "quack" like a Reader.
EXPECTED_ATTRS = ("read", "macrostructure", "microstructure")
return all(hasattr(candidate, attr) for attr in EXPECTED_ATTRS)
class DuckTree(metaclass=abc.ABCMeta):
"""
Used for isinstance(obj, DuckTree):
"""
@classmethod
def __subclasshook__(cls, candidate):
# If the following condition is True, candidate is recognized
# to "quack" like a Tree.
EXPECTED_ATTRS = ("__getitem__", "__iter__")
return all(hasattr(candidate, attr) for attr in EXPECTED_ATTRS)
def construct_entries_response(
query_registry, tree, route, path, offset, limit, fields, filters, sort, base_url
):
path_parts = [segment for segment in path.split("/") if segment]
if not isinstance(tree, DuckTree):
raise WrongTypeForRoute("This is not a Tree.")
queries = defaultdict(
dict
) # e.g. {"text": {"text": "dog"}, "lookup": {"key": "..."}}
# Group the parameters by query type.
for key, value in filters.items():
if value is None:
continue
name, field = _FILTER_PARAM_PATTERN.match(key).groups()
queries[name][field] = value
sorting = []
if sort is not None:
for item in sort.split(","):
if item:
if item.startswith("-"):
sorting.append((item[1:], -1))
else:
sorting.append((item, 1))
if sorting:
if not hasattr(tree, "sort"):
raise HTTPException(
status_code=400, detail="This Tree does not support sorting."
)
tree = tree.sort(sorting)
# Apply the queries and obtain a narrowed tree.
key_lookups = []
for query_name, parameters_dict_of_lists in queries.items():
for i in itertools.count(0):
try:
parameters = {
field_name: parameters_list[i]
for field_name, parameters_list in parameters_dict_of_lists.items()
}
except IndexError:
break
query_class = query_registry.name_to_query_type[query_name]
# Special case:
# List fields are serialized as comma-separated strings.
for field in dataclasses.fields(query_class):
if getattr(field.type, "__origin__", None) is list:
(inner_type,) = field.type.__args__
parameters[field.name] = [
inner_type(item) for item in parameters[field.name].split(",")
]
try:
query = query_class(**parameters)
# Special case: Do key-lookups at the end after all other filtering.
# We do not require trees to implement this query; we implement it
# directly here by just calling __getitem__.
if isinstance(query, KeyLookup):
key_lookups.append(query.key)
continue
tree = tree.search(query)
except QueryValueError as err:
raise HTTPException(status_code=400, detail=err.args[0])
if key_lookups:
# Duplicates are technically legal because *any* query can be given
# with multiple parameters.
unique_key_lookups = set(key_lookups)
(key_lookup), *others = unique_key_lookups
if others:
# Two non-equal KeyLookup queries must return no results.
tree = TreeInMemory({})
else:
try:
tree = TreeInMemory(
{key_lookup: tree[key_lookup]}, must_revalidate=False
)
except KeyError:
tree = TreeInMemory({})
count = len_or_approx(tree)
links = pagination_links(route, path_parts, offset, limit, count)
data = []
if fields != [models.EntryFields.none]:
# Pull a page of items into memory.
items = tree.items_indexer[offset : offset + limit] # noqa: E203
else:
# Pull a page of just the keys, which is cheaper.
items = (
(key, None)
for key in tree.keys_indexer[offset : offset + limit] # noqa: E203
)
# This value will not leak out. It just used to seed comparisons.
metadata_stale_at = datetime.utcnow() + timedelta(days=1_000_000)
must_revalidate = getattr(tree, "must_revalidate", True)
for key, entry in items:
resource = construct_resource(base_url, path_parts + [key], entry, fields)
data.append(resource)
# If any entry has emtry.metadata_stale_at = None, then there will
# be no 'Expires' header. We will pessimistically assume the values
# are immediately stale.
if metadata_stale_at is not None:
if getattr(entry, "metadata_stale_at", None) is None:
metadata_stale_at = None
else:
metadata_stale_at = min(metadata_stale_at, entry.metadata_stale_at)
return (
models.Response(data=data, links=links, meta={"count": count}),
metadata_stale_at,
must_revalidate,
)
DEFAULT_MEDIA_TYPES = {
"array": "application/octet-stream",
"dataframe": APACHE_ARROW_FILE_MIME_TYPE,
"structured_array_tabular": "application/octet-stream",
"structured_array_generic": "application/octet-stream",
"variable": "application/octet-stream",
"data_array": "application/octet-stream",
"dataset": "application/netcdf",
}
def construct_data_response(
structure_family,
serialization_registry,
payload,
metadata,
request,
format=None,
specs=None,
expires=None,
):
request.state.endpoint = "data"
if specs is None:
specs = []
default_media_type = DEFAULT_MEDIA_TYPES[structure_family]
# Give priority to the `format` query parameter. Otherwise, consult Accept
# header.
if format is not None:
media_types_or_aliases = format.split(",")
# Resolve aliases, like "csv" -> "text/csv".
media_types = [
serialization_registry.resolve_alias(t) for t in media_types_or_aliases
]
else:
# The HTTP spec says these should be separated by ", " but some
# browsers separate with just "," (no space).
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation/List_of_default_Accept_values#default_values # noqa
# That variation is what we are handling below with lstrip.
media_types = [
s.lstrip(" ")
for s in request.headers.get("Accept", default_media_type).split(",")
]
# The client may give us a choice of media types. Find the first one
# that we support.
supported = set()
for media_type in media_types:
if media_type == "*/*":
media_type = default_media_type
# fall back to generic dataframe serializer if no specs present
for spec in specs + [structure_family]:
media_types_for_spec = serialization_registry.media_types(spec)
if media_type in media_types_for_spec:
break
supported.update(media_types_for_spec)
else:
# None of the specs or the structure_family can serialize to this
# media_type. Try the next one.
continue
# We found a match above. We have our media_type.
break
else:
# We have checked each of the media_types, and we cannot serialize
# to any of them.
raise UnsupportedMediaTypes(
f"None of the media types requested by the client are supported. "
f"Supported: {', '.join(supported)}. Requested: {', '.join(media_types)}.",
)
with record_timing(request.state.metrics, "tok"):
# Create an ETag that uniquely identifies this content and the media
# type that it will be encoded as.
etag = tokenize((payload, media_type))
headers = {"ETag": etag}
if expires is not None:
headers["Expires"] = expires.strftime(HTTP_EXPIRES_HEADER_FORMAT)
if request.headers.get("If-None-Match", "") == etag:
# If the client already has this content, confirm that.
return Response(status_code=304, headers=headers)
# This is the expensive step: actually serialize.
try:
content = serialization_registry(
structure_family, media_type, payload, metadata
)
except UnsupportedShape as err:
raise UnsupportedMediaTypes(
f"The shape of this data {err.args[0]} is incompatible with the requested format ({media_type}). "
f"Slice it or choose a different format.",
)
except SerializationError:
raise UnsupportedMediaTypes(
"This type is supported in general but there was an unknown error packing this specific data.",
)
return PatchedResponse(
content=content,
media_type=media_type,
headers=headers,
)
def construct_resource(base_url, path_parts, entry, fields):
path_str = "/".join(path_parts)
attributes = {}
if models.EntryFields.metadata in fields:
attributes["metadata"] = entry.metadata
if models.EntryFields.specs in fields:
attributes["specs"] = getattr(entry, "specs", None)
if isinstance(entry, DuckTree):
if models.EntryFields.count in fields:
attributes["count"] = len_or_approx(entry)
if hasattr(entry, "sorting"):
attributes["sorting"] = entry.sorting
resource = models.TreeResource(
**{
"id": path_parts[-1] if path_parts else "",
"attributes": models.TreeAttributes(**attributes),
"type": models.EntryType.tree,
"links": {
"self": f"{base_url}metadata/{path_str}",
"search": f"{base_url}search/{path_str}",
},
}
)
else:
links = {"self": f"{base_url}metadata/{path_str}"}
structure = {}
if entry is not None:
# entry is None when we are pulling just *keys* from the
# Tree and not values.
links.update(
{
link: template.format(base_url=base_url, path=path_str)
for link, template in FULL_LINKS[entry.structure_family].items()
}
)
if models.EntryFields.structure_family in fields:
attributes["structure_family"] = entry.structure_family
if models.EntryFields.macrostructure in fields:
macrostructure = entry.macrostructure()
if macrostructure is not None:
structure["macro"] = dataclasses.asdict(macrostructure)
if models.EntryFields.microstructure in fields:
if entry.structure_family == "dataframe":
# Special case: its microstructure is cannot be JSON-serialized
# and is therefore available from separate routes. Sends links
# instead of the actual payload.
structure["micro"] = {
"links": {
"meta": f"{base_url}dataframe/meta/{path_str}",
"divisions": f"{base_url}dataframe/divisions/{path_str}",
}
}
else:
microstructure = entry.microstructure()
if microstructure is not None:
structure["micro"] = dataclasses.asdict(microstructure)
if entry.structure_family == "array":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(len(structure["macro"]["shape"]))
)
links[
"block"
] = f"{base_url}array/block/{path_str}?block={block_template}"
elif entry.structure_family == "dataframe":
links[
"partition"
] = f"{base_url}dataframe/partition/{path_str}?partition={{index}}"
elif entry.structure_family == "variable":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(
len(structure["macro"]["data"]["macro"]["shape"])
)
)
links[
"block"
] = f"{base_url}variable/block/{path_str}?block={block_template}"
elif entry.structure_family == "data_array":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(
len(structure["macro"]["variable"]["macro"]["data"])
)
)
links[
"block"
] = f"{base_url}data_array/block/{path_str}?block={block_template}"
elif entry.structure_family == "dataset":
links[
"block"
] = f"{base_url}dataset/block/{path_str}?variable={{variable}}&block={{block_indexes}}"
microstructure = entry.microstructure()
attributes["structure"] = structure
resource = models.ReaderResource(
**{
"id": path_parts[-1],
"attributes": models.ReaderAttributes(**attributes),
"type": models.EntryType.reader,
"links": links,
}
)
return resource
class PatchedResponse(Response):
"Patch the render method to accept memoryview."
def render(self, content: Any) -> bytes:
if isinstance(content, memoryview):
return content.cast("B")
return super().render(content)
class PatchedStreamingResponse(StreamingResponse):
"Patch the stream_response method to accept memoryview."
async def stream_response(self, send: Send) -> None:
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
async for chunk in self.body_iterator:
# BEGIN ALTERATION
if not isinstance(chunk, (bytes, memoryview)):
# END ALTERATION
chunk = chunk.encode(self.charset)
await send({"type": "http.response.body", "body": chunk, "more_body": True})
await send({"type": "http.response.body", "body": b"", "more_body": False})
class NumpySafeJSONResponse(JSONResponse):
def __init__(self, *args, metrics, **kwargs):
self.__metrics = metrics
super().__init__(*args, **kwargs)
def render(self, content: Any) -> bytes:
with record_timing(self.__metrics, "pack"):
return orjson.dumps(content, option=orjson.OPT_SERIALIZE_NUMPY)
def _numpy_safe_msgpack_encoder(obj):
# If numpy has not been imported yet, then we can be sure that obj
# is not a numpy object, and we want to avoid triggering a numpy
# import. (The server does not have a hard numpy dependency.)
if "numpy" in sys.modules:
import numpy
if isinstance(obj, (numpy.generic, numpy.ndarray)):
if numpy.isscalar(obj):
return obj.item()
return obj.tolist()
return obj
def _patch_naive_datetimes(obj):
"""
If a naive datetime is found, attach local time.
Msgpack can only serialize datetimes with tzinfo.
"""
if hasattr(obj, "items"):
patched_obj = {}
for k, v in obj.items():
patched_obj[k] = _patch_naive_datetimes(v)
elif (not isinstance(obj, str)) and isinstance(obj, collections.abc.Iterable):
patched_obj = []
for item in obj:
patched_obj.append(_patch_naive_datetimes(item))
elif isinstance(obj, datetime) and obj.tzinfo is None:
patched_obj = obj.astimezone(_LOCAL_TZINFO)
else:
patched_obj = obj
return patched_obj
class MsgpackResponse(Response):
media_type = "application/x-msgpack"
def __init__(self, *args, metrics, **kwargs):
self.__metrics = metrics
super().__init__(*args, **kwargs)
def render(self, content: Any, _reentered=False) -> bytes:
try:
with record_timing(self.__metrics, "pack"):
return msgpack.packb(
content, default=_numpy_safe_msgpack_encoder, datetime=True
)
except TypeError as err:
# msgpack tries to handle all datetimes, but if it
# received a naive one (tzinfo=None) then it fails.
# We cannot use the default hook to handle this because
# it is not called.
if err.args == ("can not serialize 'datetime.datetime' object",) and (
not _reentered
):
patched_content = _patch_naive_datetimes(content)
return self.render(patched_content, _reentered=True)
raise
JSON_MIME_TYPE = "application/json"
MSGPACK_MIME_TYPE = "application/x-msgpack"
# This is a silly time format, but it is the HTTP standard.
HTTP_EXPIRES_HEADER_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
def json_or_msgpack(request, content, expires=None, headers=None):
media_types = request.headers.get("Accept", JSON_MIME_TYPE).split(", ")
for media_type in media_types:
if media_type == "*/*":
media_type = JSON_MIME_TYPE
break
if media_type == MSGPACK_MIME_TYPE:
break
if media_type == JSON_MIME_TYPE:
break
else:
# It is commmon in HTTP to fall back on a default representation if
# none of the requested ones are available. We do not do this for
# data payloads, but it makes some sense to do it for these metadata
# messages.
media_type = JSON_MIME_TYPE
assert media_type in {JSON_MIME_TYPE, MSGPACK_MIME_TYPE}
content_as_dict = content.dict()
with record_timing(request.state.metrics, "tok"):
etag = md5(str(content_as_dict).encode()).hexdigest()
headers = headers or {}
headers["ETag"] = etag
if expires is not None:
headers["Expires"] = expires.strftime(HTTP_EXPIRES_HEADER_FORMAT)
if request.headers.get("If-None-Match", "") == etag:
# If the client already has this content, confirm that.
return Response(status_code=304, headers=headers)
if media_type == "application/x-msgpack":
return MsgpackResponse(
content_as_dict, headers=headers, metrics=request.state.metrics
)
return NumpySafeJSONResponse(
content_as_dict, headers=headers, metrics=request.state.metrics
)
class UnsupportedMediaTypes(Exception):
pass
class NoEntry(KeyError):
pass
class WrongTypeForRoute(Exception):
pass
FULL_LINKS = {
"array": {"full": "{base_url}array/full/{path}"},
"structured_array_generic": {
"full": "{base_url}structured_array_generic/full/{path}"
},
"structured_array_tabular": {
"full": "{base_url}structured_array_tabular/full/{path}"
},
"dataframe": {"full": "{base_url}dataframe/full/{path}"},
"variable": {"full": "{base_url}variable/full/{path}"},
"data_array": {"full_variable": "{base_url}data_array/variable/full/{path}"},
"dataset": {
"full_variable": "{base_url}dataset/data_var/full/{path}?variable={{variable}}",
"full_coordinate": "{base_url}dataset/coord/full/{path}?variable={{variable}}",
"full_dataset": "{base_url}dataset/full/{path}",
},
}
@contextlib.contextmanager
def record_timing(metrics, key):
"""
Set timings[key] equal to the run time (in milliseconds) of the context body.
"""
t0 = time.perf_counter()
yield
metrics[key]["dur"] += time.perf_counter() - t0 # Units: seconds
|
import os
from spirl.models.closed_loop_spirl_mdl import GoalClSPiRLMdl
from spirl.components.logger import Logger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.kitchen import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.kitchen.src.kitchen_data_loader import KitchenStateSeqDataset
current_dir = os.path.dirname(os.path.realpath(__file__))
fewshot_dataset = KitchenStateSeqDataset(
data_path='data/kitchen/kitchen-demo-topknob_bottomknob_hinge_slide.hdf5',
num_demo=1,
subseq_len=10,
)
env = AttrDict(
task_list = ['top burner', 'bottom burner', 'hinge cabinet', 'slide cabinet']
)
contra_model_cf = AttrDict(
state_dimension=data_spec.state_dim,
hidden_size=128,
feature_size=32,
)
configuration = {
'model': GoalClSPiRLMdl,
'logger': Logger,
'data_dir': '.',
'epoch_cycles_train': 1,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 50,
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'contra_config': contra_model_cf,
'contra_ckpt': './experiments/contrastive/kitchen/exact-mixed-all/exact_model.pt',
'finetune_vae': True,
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=10,
kl_div_weight=5e-4,
nz_enc=128,
nz_mid=128,
n_processing_layers=5,
cond_decode=True,
checkpt_path=f'{os.environ['EXP_DIR']}/skill_prior_learning/kitchen/hierarchical_cl_gc_top_bot_excluded'
)
# Dataset
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config.dataset_spec['dataset_path'] = './data/kitchen/kitchen-mixed-top-bot-excluded.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1 # flat last action from seq gets cropped
| import os
from spirl.models.closed_loop_spirl_mdl import GoalClSPiRLMdl
from spirl.components.logger import Logger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.kitchen import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.kitchen.src.kitchen_data_loader import KitchenStateSeqDataset
current_dir = os.path.dirname(os.path.realpath(__file__))
fewshot_dataset = KitchenStateSeqDataset(
data_path='data/kitchen/kitchen-demo-topknob_bottomknob_hinge_slide.hdf5',
num_demo=1,
subseq_len=10,
)
env = AttrDict(
task_list = ['top burner', 'bottom burner', 'hinge cabinet', 'slide cabinet']
)
contra_model_cf = AttrDict(
state_dimension=data_spec.state_dim,
hidden_size=128,
feature_size=32,
)
configuration = {
'model': GoalClSPiRLMdl,
'logger': Logger,
'data_dir': '.',
'epoch_cycles_train': 1,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 50,
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'contra_config': contra_model_cf,
'contra_ckpt': './experiments/contrastive/kitchen/exact-mixed-all/exact_model.pt',
'finetune_vae': True,
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=10,
kl_div_weight=5e-4,
nz_enc=128,
nz_mid=128,
n_processing_layers=5,
cond_decode=True,
checkpt_path=f'{os.environ["EXP_DIR"]}/skill_prior_learning/kitchen/hierarchical_cl_gc_top_bot_excluded'
)
# Dataset
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config.dataset_spec['dataset_path'] = './data/kitchen/kitchen-mixed-top-bot-excluded.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1 # flat last action from seq gets cropped
|
# flake8: noqa
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Configuration of Airflow Docs"""
import os
import sys
from glob import glob
from typing import List
import airflow
from airflow.configuration import default_config_yaml
try:
import sphinx_airflow_theme # pylint: disable=unused-import
airflow_theme_is_available = True
except ImportError:
airflow_theme_is_available = False
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
# == Sphinx configuration ======================================================
# -- Project information -------------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
# General information about the project.
project = 'Airflow'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1.0.0'
version = airflow.__version__
# The full version, including alpha/beta/rc tags.
# release = '1.0.0'
release = airflow.__version__
# -- General configuration -----------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'provider_init_hack',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinxarg.ext',
'sphinxcontrib.httpdomain',
'sphinxcontrib.jinja',
'sphinx.ext.intersphinx',
'autoapi.extension',
'exampleinclude',
'docroles',
'removemarktransform',
'sphinx_copybutton',
'redirects',
'providers_packages_ref',
# First, generate redoc
'sphinxcontrib.redoc',
# Second, update redoc script
"sphinx_script_update",
"sphinxcontrib.spelling",
]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.join(os.path.dirname(__file__), 'exts'))
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: List[str] = [
# We only link to selected subpackages.
'_api/airflow/index.rst',
# We have custom page - operators-and-hooks-ref.rst
'_api/airflow/providers/index.rst',
# Packages with subpackages
"_api/airflow/providers/microsoft/index.rst",
"_api/airflow/providers/apache/index.rst",
"_api/airflow/providers/cncf/index.rst",
# Templates or partials
'autoapi_templates',
'howto/operator/google/_partials',
'howto/operator/microsoft/_partials',
]
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def _get_rst_filepath_from_path(filepath: str):
if os.path.isdir(filepath):
result = filepath
elif os.path.isfile(filepath) and filepath.endswith('/__init__.py'):
result = filepath.rpartition("/")[0]
else:
result = filepath.rpartition(".")[0]
result += "/index.rst"
result = f"_api/{os.path.relpath(result, ROOT_DIR)}"
return result
# Exclude top-level packages
# do not exclude these top-level modules from the doc build:
_allowed_top_level = ("exceptions.py",)
for path in glob(f"{ROOT_DIR}/airflow/*"):
name = os.path.basename(path)
if os.path.isfile(path) and not path.endswith(_allowed_top_level):
exclude_patterns.append(f"_api/airflow/{name.rpartition(".")[0]}")
browsable_packages = ["operators", "hooks", "sensors", "providers", "executors", "models", "secrets"]
if os.path.isdir(path) and name not in browsable_packages:
exclude_patterns.append(f"_api/airflow/{name}")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# -- Options for HTML output ---------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
if airflow_theme_is_available:
html_theme = 'sphinx_airflow_theme'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers
# use this as the icon for tabs, windows and bookmarks. It should be a
# Windows-style icon file (.ico), which is 16x16 or 32x32 pixels large.
html_favicon = "../airflow/www/static/pin_32.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# A list of JavaScript filename. The entry must be a filename string or a
# tuple containing the filename string and the attributes dictionary. The
# filename must be relative to the html_static_path, or a full URI with
# scheme like http://example.org/script.js.
html_js_files = ['jira-links.js']
# Custom sidebar templates, maps document names to template names.
if airflow_theme_is_available:
html_sidebars = {
'**': [
'version-selector.html',
'searchbox.html',
'globaltoc.html',
]
}
# If false, no index is generated.
html_use_index = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
# Google Analytics ID.
# For more information look at:
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.html#L222-L232
'theme_analytics_id': 'UA-140539454-1',
}
if airflow_theme_is_available:
html_context = {
# Variables used to build a button for editing the source code
#
# The path is created according to the following template:
#
# https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/
# {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }}
# {{ pagename }}{{ suffix }}
#
# More information:
# https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L100-L103
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.html#L45
# https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.html#L36-L40
#
'theme_vcs_pageview_mode': 'edit',
'conf_py_path': '/docs/',
'github_user': 'apache',
'github_repo': 'airflow',
'github_version': 'master',
'display_github': 'master',
'suffix': '.rst',
}
# == Extensions configuration ==================================================
# -- Options for sphinxcontrib.jinjac ------------------------------------------
# See: https://github.com/tardyp/sphinx-jinja
# Jinja context
jinja_contexts = {'config_ctx': {"configs": default_config_yaml()}}
# -- Options for sphinx.ext.autodoc --------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
# This value contains a list of modules to be mocked up. This is useful when some external dependencies
# are not met at build time and break the building process.
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.kusto',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'grpc',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slackclient',
'smbclient',
'snowflake',
'sshtunnel',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# The default options for autodoc directives. They are applied to all autodoc directives automatically.
autodoc_default_options = {'show-inheritance': True, 'members': True}
# -- Options for sphinx.ext.intersphinx ----------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
# This config value contains the locations and names of other projects that should
# be linked to in this documentation.
intersphinx_mapping = {
'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),
'celery': ('https://docs.celeryproject.org/en/stable/', None),
'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None),
'jinja2': ('https://jinja.palletsprojects.com/en/master/', None),
'mongodb': ('https://api.mongodb.com/python/current/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/3/', None),
'requests': ('https://requests.readthedocs.io/en/master/', None),
'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None),
# google-api
'google-api-core': ('https://googleapis.dev/python/google-api-core/latest', None),
'google-cloud-automl': ('https://googleapis.dev/python/automl/latest', None),
'google-cloud-bigquery': ('https://googleapis.dev/python/bigquery/latest', None),
'google-cloud-bigquery-datatransfer': ('https://googleapis.dev/python/bigquerydatatransfer/latest', None),
'google-cloud-bigquery-storage': ('https://googleapis.dev/python/bigquerystorage/latest', None),
'google-cloud-bigtable': ('https://googleapis.dev/python/bigtable/latest', None),
'google-cloud-container': ('https://googleapis.dev/python/container/latest', None),
'google-cloud-core': ('https://googleapis.dev/python/google-cloud-core/latest', None),
'google-cloud-datacatalog': ('https://googleapis.dev/python/datacatalog/latest', None),
'google-cloud-datastore': ('https://googleapis.dev/python/datastore/latest', None),
'google-cloud-dlp': ('https://googleapis.dev/python/dlp/latest', None),
'google-cloud-kms': ('https://googleapis.dev/python/cloudkms/latest', None),
'google-cloud-language': ('https://googleapis.dev/python/language/latest', None),
'google-cloud-monitoring': ('https://googleapis.dev/python/monitoring/latest', None),
'google-cloud-pubsub': ('https://googleapis.dev/python/pubsub/latest', None),
'google-cloud-redis': ('https://googleapis.dev/python/redis/latest', None),
'google-cloud-spanner': ('https://googleapis.dev/python/spanner/latest', None),
'google-cloud-speech': ('https://googleapis.dev/python/speech/latest', None),
'google-cloud-storage': ('https://googleapis.dev/python/storage/latest', None),
'google-cloud-tasks': ('https://googleapis.dev/python/cloudtasks/latest', None),
'google-cloud-texttospeech': ('https://googleapis.dev/python/texttospeech/latest', None),
'google-cloud-translate': ('https://googleapis.dev/python/translation/latest', None),
'google-cloud-videointelligence': ('https://googleapis.dev/python/videointelligence/latest', None),
'google-cloud-vision': ('https://googleapis.dev/python/vision/latest', None),
}
# -- Options for sphinx.ext.viewcode -------------------------------------------
# See: https://www.sphinx-doc.org/es/master/usage/extensions/viewcode.html
# If this is True, viewcode extension will emit viewcode-follow-imported event to resolve the name of
# the module by other extensions. The default is True.
viewcode_follow_imported_members = True
# -- Options for sphinx-autoapi ------------------------------------------------
# See: https://sphinx-autoapi.readthedocs.io/en/latest/config.html
# Paths (relative or absolute) to the source code that you wish to generate
# your API documentation from.
autoapi_dirs = [
os.path.abspath('../airflow'),
]
# A directory that has user-defined templates to override our default templates.
autoapi_template_dir = 'autoapi_templates'
# A list of patterns to ignore when finding files
autoapi_ignore = [
'*/airflow/kubernetes/kubernetes_request_factory/*',
'*/_internal*',
'*/airflow/**/providers/**/utils/*',
'*/node_modules/*',
'*/example_dags/*',
'*/migrations/*',
]
# Keep the AutoAPI generated files on the filesystem after the run.
# Useful for debugging.
autoapi_keep_files = True
# Relative path to output the AutoAPI files into. This can also be used to place the generated documentation
# anywhere in your documentation hierarchy.
autoapi_root = '_api'
# -- Options for ext.exampleinclude --------------------------------------------
exampleinclude_sourceroot = os.path.abspath('..')
# -- Options for ext.redirects -------------------------------------------------
redirects_file = 'redirects.txt'
# -- Options for sphinxcontrib.redoc -------------------------------------------
# See: https://sphinxcontrib-redoc.readthedocs.io/en/stable/
OPENAPI_FILE = os.path.join(os.path.dirname(__file__), "..", "airflow", "api_connexion", "openapi", "v1.yaml")
redoc = [
{
'name': 'Airflow REST API',
'page': 'stable-rest-api-ref',
'spec': OPENAPI_FILE,
'opts': {
'hide-hostname': True,
'no-auto-auth': True,
},
},
]
# Options for script updater
redoc_script_url = "https://cdn.jsdelivr.net/npm/redoc@2.0.0-rc.30/bundles/redoc.standalone.js"
| # flake8: noqa
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Configuration of Airflow Docs"""
import os
import sys
from glob import glob
from typing import List
import airflow
from airflow.configuration import default_config_yaml
try:
import sphinx_airflow_theme # pylint: disable=unused-import
airflow_theme_is_available = True
except ImportError:
airflow_theme_is_available = False
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
# == Sphinx configuration ======================================================
# -- Project information -------------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
# General information about the project.
project = 'Airflow'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1.0.0'
version = airflow.__version__
# The full version, including alpha/beta/rc tags.
# release = '1.0.0'
release = airflow.__version__
# -- General configuration -----------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'provider_init_hack',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinxarg.ext',
'sphinxcontrib.httpdomain',
'sphinxcontrib.jinja',
'sphinx.ext.intersphinx',
'autoapi.extension',
'exampleinclude',
'docroles',
'removemarktransform',
'sphinx_copybutton',
'redirects',
'providers_packages_ref',
# First, generate redoc
'sphinxcontrib.redoc',
# Second, update redoc script
"sphinx_script_update",
"sphinxcontrib.spelling",
]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.join(os.path.dirname(__file__), 'exts'))
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: List[str] = [
# We only link to selected subpackages.
'_api/airflow/index.rst',
# We have custom page - operators-and-hooks-ref.rst
'_api/airflow/providers/index.rst',
# Packages with subpackages
"_api/airflow/providers/microsoft/index.rst",
"_api/airflow/providers/apache/index.rst",
"_api/airflow/providers/cncf/index.rst",
# Templates or partials
'autoapi_templates',
'howto/operator/google/_partials',
'howto/operator/microsoft/_partials',
]
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def _get_rst_filepath_from_path(filepath: str):
if os.path.isdir(filepath):
result = filepath
elif os.path.isfile(filepath) and filepath.endswith('/__init__.py'):
result = filepath.rpartition("/")[0]
else:
result = filepath.rpartition(".")[0]
result += "/index.rst"
result = f"_api/{os.path.relpath(result, ROOT_DIR)}"
return result
# Exclude top-level packages
# do not exclude these top-level modules from the doc build:
_allowed_top_level = ("exceptions.py",)
for path in glob(f"{ROOT_DIR}/airflow/*"):
name = os.path.basename(path)
if os.path.isfile(path) and not path.endswith(_allowed_top_level):
exclude_patterns.append(f"_api/airflow/{name.rpartition('.')[0]}")
browsable_packages = ["operators", "hooks", "sensors", "providers", "executors", "models", "secrets"]
if os.path.isdir(path) and name not in browsable_packages:
exclude_patterns.append(f"_api/airflow/{name}")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# -- Options for HTML output ---------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
if airflow_theme_is_available:
html_theme = 'sphinx_airflow_theme'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers
# use this as the icon for tabs, windows and bookmarks. It should be a
# Windows-style icon file (.ico), which is 16x16 or 32x32 pixels large.
html_favicon = "../airflow/www/static/pin_32.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# A list of JavaScript filename. The entry must be a filename string or a
# tuple containing the filename string and the attributes dictionary. The
# filename must be relative to the html_static_path, or a full URI with
# scheme like http://example.org/script.js.
html_js_files = ['jira-links.js']
# Custom sidebar templates, maps document names to template names.
if airflow_theme_is_available:
html_sidebars = {
'**': [
'version-selector.html',
'searchbox.html',
'globaltoc.html',
]
}
# If false, no index is generated.
html_use_index = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
# Google Analytics ID.
# For more information look at:
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.html#L222-L232
'theme_analytics_id': 'UA-140539454-1',
}
if airflow_theme_is_available:
html_context = {
# Variables used to build a button for editing the source code
#
# The path is created according to the following template:
#
# https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/
# {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }}
# {{ pagename }}{{ suffix }}
#
# More information:
# https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L100-L103
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.html#L45
# https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.html#L36-L40
#
'theme_vcs_pageview_mode': 'edit',
'conf_py_path': '/docs/',
'github_user': 'apache',
'github_repo': 'airflow',
'github_version': 'master',
'display_github': 'master',
'suffix': '.rst',
}
# == Extensions configuration ==================================================
# -- Options for sphinxcontrib.jinjac ------------------------------------------
# See: https://github.com/tardyp/sphinx-jinja
# Jinja context
jinja_contexts = {'config_ctx': {"configs": default_config_yaml()}}
# -- Options for sphinx.ext.autodoc --------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
# This value contains a list of modules to be mocked up. This is useful when some external dependencies
# are not met at build time and break the building process.
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.kusto',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'grpc',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slackclient',
'smbclient',
'snowflake',
'sshtunnel',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# The default options for autodoc directives. They are applied to all autodoc directives automatically.
autodoc_default_options = {'show-inheritance': True, 'members': True}
# -- Options for sphinx.ext.intersphinx ----------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
# This config value contains the locations and names of other projects that should
# be linked to in this documentation.
intersphinx_mapping = {
'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),
'celery': ('https://docs.celeryproject.org/en/stable/', None),
'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None),
'jinja2': ('https://jinja.palletsprojects.com/en/master/', None),
'mongodb': ('https://api.mongodb.com/python/current/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/3/', None),
'requests': ('https://requests.readthedocs.io/en/master/', None),
'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None),
# google-api
'google-api-core': ('https://googleapis.dev/python/google-api-core/latest', None),
'google-cloud-automl': ('https://googleapis.dev/python/automl/latest', None),
'google-cloud-bigquery': ('https://googleapis.dev/python/bigquery/latest', None),
'google-cloud-bigquery-datatransfer': ('https://googleapis.dev/python/bigquerydatatransfer/latest', None),
'google-cloud-bigquery-storage': ('https://googleapis.dev/python/bigquerystorage/latest', None),
'google-cloud-bigtable': ('https://googleapis.dev/python/bigtable/latest', None),
'google-cloud-container': ('https://googleapis.dev/python/container/latest', None),
'google-cloud-core': ('https://googleapis.dev/python/google-cloud-core/latest', None),
'google-cloud-datacatalog': ('https://googleapis.dev/python/datacatalog/latest', None),
'google-cloud-datastore': ('https://googleapis.dev/python/datastore/latest', None),
'google-cloud-dlp': ('https://googleapis.dev/python/dlp/latest', None),
'google-cloud-kms': ('https://googleapis.dev/python/cloudkms/latest', None),
'google-cloud-language': ('https://googleapis.dev/python/language/latest', None),
'google-cloud-monitoring': ('https://googleapis.dev/python/monitoring/latest', None),
'google-cloud-pubsub': ('https://googleapis.dev/python/pubsub/latest', None),
'google-cloud-redis': ('https://googleapis.dev/python/redis/latest', None),
'google-cloud-spanner': ('https://googleapis.dev/python/spanner/latest', None),
'google-cloud-speech': ('https://googleapis.dev/python/speech/latest', None),
'google-cloud-storage': ('https://googleapis.dev/python/storage/latest', None),
'google-cloud-tasks': ('https://googleapis.dev/python/cloudtasks/latest', None),
'google-cloud-texttospeech': ('https://googleapis.dev/python/texttospeech/latest', None),
'google-cloud-translate': ('https://googleapis.dev/python/translation/latest', None),
'google-cloud-videointelligence': ('https://googleapis.dev/python/videointelligence/latest', None),
'google-cloud-vision': ('https://googleapis.dev/python/vision/latest', None),
}
# -- Options for sphinx.ext.viewcode -------------------------------------------
# See: https://www.sphinx-doc.org/es/master/usage/extensions/viewcode.html
# If this is True, viewcode extension will emit viewcode-follow-imported event to resolve the name of
# the module by other extensions. The default is True.
viewcode_follow_imported_members = True
# -- Options for sphinx-autoapi ------------------------------------------------
# See: https://sphinx-autoapi.readthedocs.io/en/latest/config.html
# Paths (relative or absolute) to the source code that you wish to generate
# your API documentation from.
autoapi_dirs = [
os.path.abspath('../airflow'),
]
# A directory that has user-defined templates to override our default templates.
autoapi_template_dir = 'autoapi_templates'
# A list of patterns to ignore when finding files
autoapi_ignore = [
'*/airflow/kubernetes/kubernetes_request_factory/*',
'*/_internal*',
'*/airflow/**/providers/**/utils/*',
'*/node_modules/*',
'*/example_dags/*',
'*/migrations/*',
]
# Keep the AutoAPI generated files on the filesystem after the run.
# Useful for debugging.
autoapi_keep_files = True
# Relative path to output the AutoAPI files into. This can also be used to place the generated documentation
# anywhere in your documentation hierarchy.
autoapi_root = '_api'
# -- Options for ext.exampleinclude --------------------------------------------
exampleinclude_sourceroot = os.path.abspath('..')
# -- Options for ext.redirects -------------------------------------------------
redirects_file = 'redirects.txt'
# -- Options for sphinxcontrib.redoc -------------------------------------------
# See: https://sphinxcontrib-redoc.readthedocs.io/en/stable/
OPENAPI_FILE = os.path.join(os.path.dirname(__file__), "..", "airflow", "api_connexion", "openapi", "v1.yaml")
redoc = [
{
'name': 'Airflow REST API',
'page': 'stable-rest-api-ref',
'spec': OPENAPI_FILE,
'opts': {
'hide-hostname': True,
'no-auto-auth': True,
},
},
]
# Options for script updater
redoc_script_url = "https://cdn.jsdelivr.net/npm/redoc@2.0.0-rc.30/bundles/redoc.standalone.js"
|
import json
from girder.constants import AccessType
from girder_client import HttpError
import pytest
from .conftest import getClient, getTestFolder, localDataRoot, users, wait_for_jobs
@pytest.mark.integration
@pytest.mark.parametrize("user", users.values())
@pytest.mark.run(order=3)
def test_reset_integration_env(user: dict):
client = getClient(user['login'])
privateFolder = getTestFolder(client)
client.delete(f"folder/{privateFolder["_id"]}")
@pytest.mark.integration
@pytest.mark.parametrize("user", users.values())
@pytest.mark.run(order=4)
def test_upload_user_data(user: dict):
client = getClient(user['login'])
createdDatasets = []
for dataset in user['data']:
dsPath = localDataRoot / str(dataset['path'])
privateFolder = getTestFolder(client)
newDatasetFolder = client.createFolder(
privateFolder['_id'],
dataset['name'],
metadata={
'fps': dataset['fps'],
'type': dataset['type'],
},
)
createdDatasets.append(newDatasetFolder)
# Validate the fileset
filenames = [file.name for file in dsPath.iterdir()]
valid = client.post('dive_dataset/validate_files', json=filenames)
assert valid['ok'], 'File validation failed'
for file in dsPath.iterdir():
if file.is_file():
client.uploadFileToFolder(newDatasetFolder['_id'], str(file))
client.post(f'dive_rpc/postprocess/{newDatasetFolder['_id']}')
if dataset.get('sharedWith', False):
me = client.get('user/me')
otherClient = getClient(dataset['sharedWith'])
otherUser = otherClient.get('user/me')
with pytest.raises(HttpError):
otherClient.get(f'dive_dataset/{newDatasetFolder['_id']}')
client.put(
f'folder/{newDatasetFolder['_id']}/access',
data={
'public': False,
'recurse': False,
'progress': False,
'access': json.dumps(
{
'users': [
{'id': me['_id'], 'level': AccessType.ADMIN, 'flags': []},
{'id': otherUser['_id'], 'level': AccessType.READ, 'flags': []},
],
'groups': [],
}
),
},
)
assert (
otherClient.get(
f'dive_dataset/{newDatasetFolder['_id']}', jsonResp=False
).status_code
== 200
)
wait_for_jobs(client)
# Confirm that the new dataset looks like it should.
for created, expected in zip(createdDatasets, user['data']):
created = client.get(f'dive_dataset/{created['_id']}')
if expected['type'] == 'video':
assert created['fps'] == expected['originalFps'] or created['fps'] == expected['fps']
assert created['annotate']
assert created['originalFps'] == expected['originalFps']
| import json
from girder.constants import AccessType
from girder_client import HttpError
import pytest
from .conftest import getClient, getTestFolder, localDataRoot, users, wait_for_jobs
@pytest.mark.integration
@pytest.mark.parametrize("user", users.values())
@pytest.mark.run(order=3)
def test_reset_integration_env(user: dict):
client = getClient(user['login'])
privateFolder = getTestFolder(client)
client.delete(f"folder/{privateFolder['_id']}")
@pytest.mark.integration
@pytest.mark.parametrize("user", users.values())
@pytest.mark.run(order=4)
def test_upload_user_data(user: dict):
client = getClient(user['login'])
createdDatasets = []
for dataset in user['data']:
dsPath = localDataRoot / str(dataset['path'])
privateFolder = getTestFolder(client)
newDatasetFolder = client.createFolder(
privateFolder['_id'],
dataset['name'],
metadata={
'fps': dataset['fps'],
'type': dataset['type'],
},
)
createdDatasets.append(newDatasetFolder)
# Validate the fileset
filenames = [file.name for file in dsPath.iterdir()]
valid = client.post('dive_dataset/validate_files', json=filenames)
assert valid['ok'], 'File validation failed'
for file in dsPath.iterdir():
if file.is_file():
client.uploadFileToFolder(newDatasetFolder['_id'], str(file))
client.post(f'dive_rpc/postprocess/{newDatasetFolder["_id"]}')
if dataset.get('sharedWith', False):
me = client.get('user/me')
otherClient = getClient(dataset['sharedWith'])
otherUser = otherClient.get('user/me')
with pytest.raises(HttpError):
otherClient.get(f'dive_dataset/{newDatasetFolder["_id"]}')
client.put(
f'folder/{newDatasetFolder["_id"]}/access',
data={
'public': False,
'recurse': False,
'progress': False,
'access': json.dumps(
{
'users': [
{'id': me['_id'], 'level': AccessType.ADMIN, 'flags': []},
{'id': otherUser['_id'], 'level': AccessType.READ, 'flags': []},
],
'groups': [],
}
),
},
)
assert (
otherClient.get(
f'dive_dataset/{newDatasetFolder["_id"]}', jsonResp=False
).status_code
== 200
)
wait_for_jobs(client)
# Confirm that the new dataset looks like it should.
for created, expected in zip(createdDatasets, user['data']):
created = client.get(f'dive_dataset/{created["_id"]}')
if expected['type'] == 'video':
assert created['fps'] == expected['originalFps'] or created['fps'] == expected['fps']
assert created['annotate']
assert created['originalFps'] == expected['originalFps']
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['HostGroupAccountUserGroupAttachmentArgs', 'HostGroupAccountUserGroupAttachment']
@pulumi.input_type
class HostGroupAccountUserGroupAttachmentArgs:
def __init__(__self__, *,
host_account_names: pulumi.Input[Sequence[pulumi.Input[str]]],
host_group_id: pulumi.Input[str],
instance_id: pulumi.Input[str],
user_group_id: pulumi.Input[str]):
"""
The set of arguments for constructing a HostGroupAccountUserGroupAttachment resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
pulumi.set(__self__, "host_account_names", host_account_names)
pulumi.set(__self__, "host_group_id", host_group_id)
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@host_account_names.setter
def host_account_names(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "host_account_names", value)
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> pulumi.Input[str]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@host_group_id.setter
def host_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "host_group_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Input[str]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "user_group_id", value)
@pulumi.input_type
class _HostGroupAccountUserGroupAttachmentState:
def __init__(__self__, *,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering HostGroupAccountUserGroupAttachment resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
if host_account_names is not None:
pulumi.set(__self__, "host_account_names", host_account_names)
if host_group_id is not None:
pulumi.set(__self__, "host_group_id", host_group_id)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if user_group_id is not None:
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@host_account_names.setter
def host_account_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "host_account_names", value)
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@host_group_id.setter
def host_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_group_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_group_id", value)
class HostGroupAccountUserGroupAttachment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl3xxxxxxx",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range["value"]}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range["value"]}",
host_id=default_host.host_id,
protocol_name="SSH",
password="YourPassword12345"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id=default_host.instance_id,
user_group_name="my-local-user")
default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",
host_group_name="example_value",
instance_id="bastionhost-cn-tl3xxxxxxx")
default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_group_id=default_host_group.host_group_id,
host_account_names=[__item.host_account_name for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HostGroupAccountUserGroupAttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl3xxxxxxx",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range["value"]}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range["value"]}",
host_id=default_host.host_id,
protocol_name="SSH",
password="YourPassword12345"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id=default_host.instance_id,
user_group_name="my-local-user")
default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",
host_group_name="example_value",
instance_id="bastionhost-cn-tl3xxxxxxx")
default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_group_id=default_host_group.host_group_id,
host_account_names=[__item.host_account_name for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>
```
:param str resource_name: The name of the resource.
:param HostGroupAccountUserGroupAttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HostGroupAccountUserGroupAttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HostGroupAccountUserGroupAttachmentArgs.__new__(HostGroupAccountUserGroupAttachmentArgs)
if host_account_names is None and not opts.urn:
raise TypeError("Missing required property 'host_account_names'")
__props__.__dict__["host_account_names"] = host_account_names
if host_group_id is None and not opts.urn:
raise TypeError("Missing required property 'host_group_id'")
__props__.__dict__["host_group_id"] = host_group_id
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
if user_group_id is None and not opts.urn:
raise TypeError("Missing required property 'user_group_id'")
__props__.__dict__["user_group_id"] = user_group_id
super(HostGroupAccountUserGroupAttachment, __self__).__init__(
'alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None) -> 'HostGroupAccountUserGroupAttachment':
"""
Get an existing HostGroupAccountUserGroupAttachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HostGroupAccountUserGroupAttachmentState.__new__(_HostGroupAccountUserGroupAttachmentState)
__props__.__dict__["host_account_names"] = host_account_names
__props__.__dict__["host_group_id"] = host_group_id
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["user_group_id"] = user_group_id
return HostGroupAccountUserGroupAttachment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> pulumi.Output[Sequence[str]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> pulumi.Output[str]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Output[str]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['HostGroupAccountUserGroupAttachmentArgs', 'HostGroupAccountUserGroupAttachment']
@pulumi.input_type
class HostGroupAccountUserGroupAttachmentArgs:
def __init__(__self__, *,
host_account_names: pulumi.Input[Sequence[pulumi.Input[str]]],
host_group_id: pulumi.Input[str],
instance_id: pulumi.Input[str],
user_group_id: pulumi.Input[str]):
"""
The set of arguments for constructing a HostGroupAccountUserGroupAttachment resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
pulumi.set(__self__, "host_account_names", host_account_names)
pulumi.set(__self__, "host_group_id", host_group_id)
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@host_account_names.setter
def host_account_names(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "host_account_names", value)
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> pulumi.Input[str]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@host_group_id.setter
def host_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "host_group_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Input[str]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "user_group_id", value)
@pulumi.input_type
class _HostGroupAccountUserGroupAttachmentState:
def __init__(__self__, *,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering HostGroupAccountUserGroupAttachment resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
if host_account_names is not None:
pulumi.set(__self__, "host_account_names", host_account_names)
if host_group_id is not None:
pulumi.set(__self__, "host_group_id", host_group_id)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if user_group_id is not None:
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@host_account_names.setter
def host_account_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "host_account_names", value)
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@host_group_id.setter
def host_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_group_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_group_id", value)
class HostGroupAccountUserGroupAttachment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl3xxxxxxx",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range['value']}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range['value']}",
host_id=default_host.host_id,
protocol_name="SSH",
password="YourPassword12345"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id=default_host.instance_id,
user_group_name="my-local-user")
default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",
host_group_name="example_value",
instance_id="bastionhost-cn-tl3xxxxxxx")
default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_group_id=default_host_group.host_group_id,
host_account_names=[__item.host_account_name for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HostGroupAccountUserGroupAttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl3xxxxxxx",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range['value']}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range['value']}",
host_id=default_host.host_id,
protocol_name="SSH",
password="YourPassword12345"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id=default_host.instance_id,
user_group_name="my-local-user")
default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",
host_group_name="example_value",
instance_id="bastionhost-cn-tl3xxxxxxx")
default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_group_id=default_host_group.host_group_id,
host_account_names=[__item.host_account_name for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>
```
:param str resource_name: The name of the resource.
:param HostGroupAccountUserGroupAttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HostGroupAccountUserGroupAttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HostGroupAccountUserGroupAttachmentArgs.__new__(HostGroupAccountUserGroupAttachmentArgs)
if host_account_names is None and not opts.urn:
raise TypeError("Missing required property 'host_account_names'")
__props__.__dict__["host_account_names"] = host_account_names
if host_group_id is None and not opts.urn:
raise TypeError("Missing required property 'host_group_id'")
__props__.__dict__["host_group_id"] = host_group_id
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
if user_group_id is None and not opts.urn:
raise TypeError("Missing required property 'user_group_id'")
__props__.__dict__["user_group_id"] = user_group_id
super(HostGroupAccountUserGroupAttachment, __self__).__init__(
'alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None) -> 'HostGroupAccountUserGroupAttachment':
"""
Get an existing HostGroupAccountUserGroupAttachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HostGroupAccountUserGroupAttachmentState.__new__(_HostGroupAccountUserGroupAttachmentState)
__props__.__dict__["host_account_names"] = host_account_names
__props__.__dict__["host_group_id"] = host_group_id
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["user_group_id"] = user_group_id
return HostGroupAccountUserGroupAttachment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> pulumi.Output[Sequence[str]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> pulumi.Output[str]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Output[str]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
|
import random
from time import sleep
def Guess():
global attempts
# If the user choose anything but a number between 0 and 10, they will get stuck in loop.
while True:
try:
attempts += 1 # This will count every attempt made by the user
user_number = int(input().replace(' ', ''))
except:
print("You should put a number between 0 and 10 <3")
else:
if user_number > 10 or user_number < 0:
print("I told you a number between 0 and 10 <3")
else:
break
return user_number
def NextGame():
# If the user choose anything but "[S] or [N]", they will get stuck in loop.
while True:
choice = input(
"Do you want to play again? [S]/[N] ").upper().replace(' ', '')
if (choice in "[S]" or choice in "[N]") and choice not in "[]":
break
else:
print("I didn't understand your choice.", end=' ')
return choice
# Introduction
print("\033[1;36m=-"*20, "\033[m")
print(f'\033[1;36m {'Lets play Number Guesser!':^40}\033[m')
print("\033[1;36m=-"*20, "\033[m")
sleep(2)
# The user will choose a mode or will get stuck in a loop until they do so.
while True:
mode = input(
"\nFirst of all, choose a mode: \n[1] Normal mode \n[2] Hide the thimble\n").replace(' ', '')
while True:
if mode.isnumeric() == False or int(mode) != 1 and int(mode) != 2:
mode = input("I said to you to choose 1 or 2.\n")
else:
break
# If the user choose the "normal mode"
if int(mode) == 1:
while True:
# It will reset the amount of attempts every time the player choose to play it.
attempts = 0
# The computer will choose a random number
print("I chose a number between 0 and 10, try to guess it! ")
while True:
pc_number = random.randint(0, 10)
# The user will type a number between 0 and 10 or will get stuck in a loop until they do so.
user_number = Guess()
if user_number != pc_number:
print(
"Oops! You are wrong, let me chose another number... Guess it!")
# When the user win
else:
break
print(f"Yes! You are right! You made it with {attempts} attempts!")
# The user choices if they want to play again or not.
choice = NextGame()
break
if choice not in "[S]":
break
elif int(mode) == 2: # If the user choose the "Hide the thimble mode"
# It will reset the amount of attempts every time the player choose to play it.
attempts = 0
# The computer will choose a random number
pc_number = random.randint(0, 10)
print("I chose a number between 0 and 10, try to guess it!")
# The user will choose a number between 0 and 10, otherwise they will get stuck in a loop.
while True:
user_number = Guess()
if pc_number == user_number: # If the user number is the same as the computer one, the user wins!
break
# If the user's choice is 2 numbers or less apart from the computer one, the user will know they are getting close.
elif pc_number > user_number >= pc_number-2 or pc_number < user_number <= pc_number+2:
print("Hot.")
# Else, they know they aren't close to the computer's number.
else:
print("Cold.")
# When the user win
print(f"Yes! You are right! You made it with {attempts} attempts!")
choice = NextGame()
if choice not in "[S]":
break
# Goodbye
print(f"\nBye, bye! I'll miss you <3")
print("\033[1;34;107mBy: Kaique Apolinário\033[m")
| import random
from time import sleep
def Guess():
global attempts
# If the user choose anything but a number between 0 and 10, they will get stuck in loop.
while True:
try:
attempts += 1 # This will count every attempt made by the user
user_number = int(input().replace(' ', ''))
except:
print("You should put a number between 0 and 10 <3")
else:
if user_number > 10 or user_number < 0:
print("I told you a number between 0 and 10 <3")
else:
break
return user_number
def NextGame():
# If the user choose anything but "[S] or [N]", they will get stuck in loop.
while True:
choice = input(
"Do you want to play again? [S]/[N] ").upper().replace(' ', '')
if (choice in "[S]" or choice in "[N]") and choice not in "[]":
break
else:
print("I didn't understand your choice.", end=' ')
return choice
# Introduction
print("\033[1;36m=-"*20, "\033[m")
print(f'\033[1;36m {"Lets play Number Guesser!":^40}\033[m')
print("\033[1;36m=-"*20, "\033[m")
sleep(2)
# The user will choose a mode or will get stuck in a loop until they do so.
while True:
mode = input(
"\nFirst of all, choose a mode: \n[1] Normal mode \n[2] Hide the thimble\n").replace(' ', '')
while True:
if mode.isnumeric() == False or int(mode) != 1 and int(mode) != 2:
mode = input("I said to you to choose 1 or 2.\n")
else:
break
# If the user choose the "normal mode"
if int(mode) == 1:
while True:
# It will reset the amount of attempts every time the player choose to play it.
attempts = 0
# The computer will choose a random number
print("I chose a number between 0 and 10, try to guess it! ")
while True:
pc_number = random.randint(0, 10)
# The user will type a number between 0 and 10 or will get stuck in a loop until they do so.
user_number = Guess()
if user_number != pc_number:
print(
"Oops! You are wrong, let me chose another number... Guess it!")
# When the user win
else:
break
print(f"Yes! You are right! You made it with {attempts} attempts!")
# The user choices if they want to play again or not.
choice = NextGame()
break
if choice not in "[S]":
break
elif int(mode) == 2: # If the user choose the "Hide the thimble mode"
# It will reset the amount of attempts every time the player choose to play it.
attempts = 0
# The computer will choose a random number
pc_number = random.randint(0, 10)
print("I chose a number between 0 and 10, try to guess it!")
# The user will choose a number between 0 and 10, otherwise they will get stuck in a loop.
while True:
user_number = Guess()
if pc_number == user_number: # If the user number is the same as the computer one, the user wins!
break
# If the user's choice is 2 numbers or less apart from the computer one, the user will know they are getting close.
elif pc_number > user_number >= pc_number-2 or pc_number < user_number <= pc_number+2:
print("Hot.")
# Else, they know they aren't close to the computer's number.
else:
print("Cold.")
# When the user win
print(f"Yes! You are right! You made it with {attempts} attempts!")
choice = NextGame()
if choice not in "[S]":
break
# Goodbye
print(f"\nBye, bye! I'll miss you <3")
print("\033[1;34;107mBy: Kaique Apolinário\033[m")
|
"""
Conversion of length units.
Available Units:- Metre,Kilometre,Feet,Inch,Centimeter,Yard,Foot,Mile,Millimeter
USAGE :
-> Import this file into their respective project.
-> Use the function length_conversion() for conversion of length units.
-> Parameters :
-> value : The number of from units you want to convert
-> from_type : From which type you want to convert
-> to_type : To which type you want to convert
REFERENCES :
-> Wikipedia reference: https://en.wikipedia.org/wiki/Meter
-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilometer
-> Wikipedia reference: https://en.wikipedia.org/wiki/Feet
-> Wikipedia reference: https://en.wikipedia.org/wiki/Inch
-> Wikipedia reference: https://en.wikipedia.org/wiki/Centimeter
-> Wikipedia reference: https://en.wikipedia.org/wiki/Yard
-> Wikipedia reference: https://en.wikipedia.org/wiki/Foot
-> Wikipedia reference: https://en.wikipedia.org/wiki/Mile
-> Wikipedia reference: https://en.wikipedia.org/wiki/Millimeter
"""
from collections import namedtuple
from_to = namedtuple("from_to", "from_ to")
TYPE_CONVERSION = {
"millimeter": "mm",
"centimeter": "cm",
"meter": "m",
"kilometer": "km",
"inch": "in",
"inche": "in", # Trailing 's' has been stripped off
"feet": "ft",
"foot": "ft",
"yard": "yd",
"mile": "mi",
}
METRIC_CONVERSION = {
"mm": from_to(0.001, 1000),
"cm": from_to(0.01, 100),
"m": from_to(1, 1),
"km": from_to(1000, 0.001),
"in": from_to(0.0254, 39.3701),
"ft": from_to(0.3048, 3.28084),
"yd": from_to(0.9144, 1.09361),
"mi": from_to(1609.34, 0.000621371),
}
def length_conversion(value: float, from_type: str, to_type: str) -> float:
"""
Conversion between length units.
>>> length_conversion(4, "METER", "FEET")
13.12336
>>> length_conversion(4, "M", "FT")
13.12336
>>> length_conversion(1, "meter", "kilometer")
0.001
>>> length_conversion(1, "kilometer", "inch")
39370.1
>>> length_conversion(3, "kilometer", "mile")
1.8641130000000001
>>> length_conversion(2, "feet", "meter")
0.6096
>>> length_conversion(4, "feet", "yard")
1.333329312
>>> length_conversion(1, "inch", "meter")
0.0254
>>> length_conversion(2, "inch", "mile")
3.15656468e-05
>>> length_conversion(2, "centimeter", "millimeter")
20.0
>>> length_conversion(2, "centimeter", "yard")
0.0218722
>>> length_conversion(4, "yard", "meter")
3.6576
>>> length_conversion(4, "yard", "kilometer")
0.0036576
>>> length_conversion(3, "foot", "meter")
0.9144000000000001
>>> length_conversion(3, "foot", "inch")
36.00001944
>>> length_conversion(4, "mile", "kilometer")
6.43736
>>> length_conversion(2, "miles", "InChEs")
126719.753468
>>> length_conversion(3, "millimeter", "centimeter")
0.3
>>> length_conversion(3, "mm", "in")
0.1181103
>>> length_conversion(4, "wrongUnit", "inch")
Traceback (most recent call last):
...
ValueError: Invalid 'from_type' value: 'wrongUnit'.
Conversion abbreviations are: mm, cm, m, km, in, ft, yd, mi
"""
new_from = from_type.lower().rstrip("s")
new_from = TYPE_CONVERSION.get(new_from, new_from)
new_to = to_type.lower().rstrip("s")
new_to = TYPE_CONVERSION.get(new_to, new_to)
if new_from not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {", ".join(METRIC_CONVERSION)}"
)
if new_to not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {", ".join(METRIC_CONVERSION)}"
)
return value * METRIC_CONVERSION[new_from].from_ * METRIC_CONVERSION[new_to].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
Conversion of length units.
Available Units:- Metre,Kilometre,Feet,Inch,Centimeter,Yard,Foot,Mile,Millimeter
USAGE :
-> Import this file into their respective project.
-> Use the function length_conversion() for conversion of length units.
-> Parameters :
-> value : The number of from units you want to convert
-> from_type : From which type you want to convert
-> to_type : To which type you want to convert
REFERENCES :
-> Wikipedia reference: https://en.wikipedia.org/wiki/Meter
-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilometer
-> Wikipedia reference: https://en.wikipedia.org/wiki/Feet
-> Wikipedia reference: https://en.wikipedia.org/wiki/Inch
-> Wikipedia reference: https://en.wikipedia.org/wiki/Centimeter
-> Wikipedia reference: https://en.wikipedia.org/wiki/Yard
-> Wikipedia reference: https://en.wikipedia.org/wiki/Foot
-> Wikipedia reference: https://en.wikipedia.org/wiki/Mile
-> Wikipedia reference: https://en.wikipedia.org/wiki/Millimeter
"""
from collections import namedtuple
from_to = namedtuple("from_to", "from_ to")
TYPE_CONVERSION = {
"millimeter": "mm",
"centimeter": "cm",
"meter": "m",
"kilometer": "km",
"inch": "in",
"inche": "in", # Trailing 's' has been stripped off
"feet": "ft",
"foot": "ft",
"yard": "yd",
"mile": "mi",
}
METRIC_CONVERSION = {
"mm": from_to(0.001, 1000),
"cm": from_to(0.01, 100),
"m": from_to(1, 1),
"km": from_to(1000, 0.001),
"in": from_to(0.0254, 39.3701),
"ft": from_to(0.3048, 3.28084),
"yd": from_to(0.9144, 1.09361),
"mi": from_to(1609.34, 0.000621371),
}
def length_conversion(value: float, from_type: str, to_type: str) -> float:
"""
Conversion between length units.
>>> length_conversion(4, "METER", "FEET")
13.12336
>>> length_conversion(4, "M", "FT")
13.12336
>>> length_conversion(1, "meter", "kilometer")
0.001
>>> length_conversion(1, "kilometer", "inch")
39370.1
>>> length_conversion(3, "kilometer", "mile")
1.8641130000000001
>>> length_conversion(2, "feet", "meter")
0.6096
>>> length_conversion(4, "feet", "yard")
1.333329312
>>> length_conversion(1, "inch", "meter")
0.0254
>>> length_conversion(2, "inch", "mile")
3.15656468e-05
>>> length_conversion(2, "centimeter", "millimeter")
20.0
>>> length_conversion(2, "centimeter", "yard")
0.0218722
>>> length_conversion(4, "yard", "meter")
3.6576
>>> length_conversion(4, "yard", "kilometer")
0.0036576
>>> length_conversion(3, "foot", "meter")
0.9144000000000001
>>> length_conversion(3, "foot", "inch")
36.00001944
>>> length_conversion(4, "mile", "kilometer")
6.43736
>>> length_conversion(2, "miles", "InChEs")
126719.753468
>>> length_conversion(3, "millimeter", "centimeter")
0.3
>>> length_conversion(3, "mm", "in")
0.1181103
>>> length_conversion(4, "wrongUnit", "inch")
Traceback (most recent call last):
...
ValueError: Invalid 'from_type' value: 'wrongUnit'.
Conversion abbreviations are: mm, cm, m, km, in, ft, yd, mi
"""
new_from = from_type.lower().rstrip("s")
new_from = TYPE_CONVERSION.get(new_from, new_from)
new_to = to_type.lower().rstrip("s")
new_to = TYPE_CONVERSION.get(new_to, new_to)
if new_from not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}"
)
if new_to not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}"
)
return value * METRIC_CONVERSION[new_from].from_ * METRIC_CONVERSION[new_to].to
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import os
import csv
import shutil
from datetime import datetime
from numpy import logspace
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from torch.optim import Adam
from dataset.e_piano import create_epiano_datasets, create_pop909_datasets
from model.music_transformer import MusicTransformer
from model.discriminator import MusicDiscriminator
from model.classifier import CNNDiscriminator
from model.loss import SmoothCrossEntropyLoss
from utilities.constants import *
from utilities.WGAN_GP import WassersteinLoss
from utilities.device import get_device, use_cuda
from utilities.lr_scheduling import LrStepTracker, get_lr
from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params
from utilities.run_model import train_epoch, eval_model
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"]
dis_filter_sizes = [2, 3, 4, 5]
dis_num_filters = [300, 300, 300, 300]
# Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy
BASELINE_EPOCH = -1
# main
def main():
"""
----------
Author: Damon Gwinn
----------
Entry point. Trains a model specified by command line arguments
----------
"""
args = parse_train_args()
print_train_args(args)
if(args.force_cpu):
use_cuda(False)
print("WARNING: Forced CPU usage, expect model to perform slower")
print("")
eventid = f"{datetime.now().strftime("MusicTransformer-%Y.%m.%d")}_gan_{args.gan}_creative_{args.creative}_ce_{args.ce_smoothing}"
args.output_dir = args.output_dir + "/" + eventid
os.makedirs(args.output_dir, exist_ok=True)
##### Output prep #####
params_file = os.path.join(args.output_dir, "model_params.txt")
write_model_params(args, params_file)
weights_folder = os.path.join(args.output_dir, "weights")
os.makedirs(weights_folder, exist_ok=True)
results_folder = os.path.join(args.output_dir, "results")
os.makedirs(results_folder, exist_ok=True)
results_file = os.path.join(results_folder, "results.csv")
best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle")
best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle")
best_loss_critic_file = os.path.join(results_folder, "best_loss_critic_weights.pickle")
best_acc_critic_file = os.path.join(results_folder, "best_acc_critic_weights.pickle")
best_loss_classifier_file = os.path.join(
results_folder, "best_loss_classifier_weights.pickle")
best_acc_classifier_file = os.path.join(
results_folder, "best_acc_classifier_weights.pickle")
best_text = os.path.join(results_folder, "best_epochs.txt")
##### Tensorboard #####
if(args.no_tensorboard):
tensorboard_summary = None
else:
from torch.utils.tensorboard import SummaryWriter
tensorboad_dir = os.path.join(args.output_dir, "tensorboard/" + eventid)
tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir)
##### Datasets #####
# 데이터셋이 바뀌기 때문에 아래와같이 해주어야함
if args.interval and args.octave:
print("octave interval dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_interval_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding and args.absolute:
print("absolute dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_absolute_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/pop909_absolute', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.interval and not args.octave:
print("interval dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding:
print("Octave_fusion dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif not args.interval and args.octave and not args.fusion_encoding:
print("Octave dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/pop909_octave', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.logscale:
print("logscvale dataset")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_epiano0420', args.max_sequence, random_seq=True,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop0420', args.max_sequence, random_seq=True, condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
else:
classic_train, classic_val, classic_test = create_epiano_datasets(args.classic_input_dir, args.max_sequence,
condition_token = args.condition_token, octave = args.octave)
pop909_dataset = create_pop909_datasets('dataset/pop_pickle/', args.max_sequence, condition_token = args.condition_token, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1), len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
if args.data == 'both':
print("Dataset: both")
train_dataset = torch.utils.data.ConcatDataset([ classic_train, pop_train])
val_dataset = torch.utils.data.ConcatDataset([ classic_val, pop_valid])
elif args.data == 'classic':
print("Dataset: classic")
train_dataset = torch.utils.data.ConcatDataset([classic_train])
val_dataset = torch.utils.data.ConcatDataset([classic_val])
else:
print("Dataset: pop")
train_dataset = torch.utils.data.ConcatDataset([pop_train])
val_dataset = torch.utils.data.ConcatDataset([pop_valid])
test_dataset = torch.utils.data.ConcatDataset([classic_test, pop_test])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads,
d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr,
condition_token = args.condition_token, interval = args.interval, octave = args.octave,
fusion = args.fusion_encoding, absolute = args.absolute, logscale=args.logscale).to(get_device())
# EY critic
# num_prime = args.num_prime
critic = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
classifier = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
if args.creative:
classifier.load_state_dict(torch.load('best_classifier_acc_0.9883.pickle'))
##### Continuing from previous training session #####
start_epoch = BASELINE_EPOCH
if(args.continue_weights is not None):
if(args.continue_epoch is None):
print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights")
return
else:
model.load_state_dict(torch.load(args.continue_weights))
start_epoch = args.continue_epoch
elif(args.continue_epoch is not None):
print("ERROR: Need continue weights (-continue_weights) when using continue_epoch")
return
##### Lr Scheduler vs static lr #####
if(args.lr is None):
if(args.continue_epoch is None):
init_step = 0
else:
init_step = args.continue_epoch * len(train_loader)
lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step)
else:
lr = args.lr
##### Not smoothing evaluation loss #####
if args.interval and args.octave:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_INTERVAL)
elif args.interval and not args.octave:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_INTERVAL)
elif args.octave and args.fusion_encoding and args.absolute:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif args.octave and args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_RELATIVE)
else:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD)
##### SmoothCrossEntropyLoss or CrossEntropyLoss for training #####
if(args.ce_smoothing is None):
train_loss_func = eval_loss_func
else:
if args.interval and args.octave:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)
elif args.interval and not args.octave:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)
elif not args.interval and args.octave and args.fusion_encoding and args.absolute:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION_ABSOLUTE, ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif not args.interval and args.octave and args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION, ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE, ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_RELATIVE, ignore_index=TOKEN_PAD_RELATIVE)
else:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD)
##### EY - WGAN Loss #####
classifier_loss_func = nn.MSELoss()
##### Optimizer #####
opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
critic_opt = Adam(critic.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
classifier_opt = Adam(classifier.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
if(args.lr is None):
lr_scheduler = LambdaLR(opt, lr_stepper.step)
critic_lr_scheduler = LambdaLR(critic_opt, lr_stepper.step)
classifier_lr_scheduler = LambdaLR(classifier_opt, lr_stepper.step)
else:
lr_scheduler = None
##### Tracking best evaluation accuracy #####
best_eval_acc = 0.0
best_eval_acc_epoch = -1
best_eval_loss = float("inf")
best_eval_loss_epoch = -1
##### Results reporting #####
if(not os.path.isfile(results_file)):
with open(results_file, "w", newline="") as o_stream:
writer = csv.writer(o_stream)
writer.writerow(CSV_HEADER)
##### TRAIN LOOP #####
for epoch in range(start_epoch, args.epochs):
# Baseline has no training and acts as a base loss and accuracy (epoch 0 in a sense)
if(epoch >= BASELINE_EPOCH):
print(SEPERATOR)
print("NEW EPOCH:", epoch+1)
print(SEPERATOR)
print("")
# Train
# EY 고쳐야 할 부분의 시작
train_loss, train_acc, dis_loss, gen_loss, cre_loss, gan_accuracy, class_accuracy, creativity = train_epoch(epoch+1, model, critic, classifier, train_loader, train_loss_func, classifier_loss_func, opt, critic_opt, classifier_opt, lr_scheduler, critic_lr_scheduler, classifier_lr_scheduler, args)
print(SEPERATOR)
print("Evaluating:")
else:
print(SEPERATOR)
print("Baseline model evaluation (Epoch 0):")
# Eval
# train_loss, train_acc = eval_model(model, train_loader, train_loss_func)
eval_loss, eval_acc = eval_model(model, val_loader, eval_loss_func, args)
# Learn rate
lr = get_lr(opt)
print("Epoch:", epoch+1)
print("Avg train loss:", train_loss)
print("Avg train acc:", train_acc)
print("Avg eval loss:", eval_loss)
print("Avg eval acc:", eval_acc)
print(SEPERATOR)
print("")
new_best = False
if(eval_acc > best_eval_acc):
best_eval_acc = eval_acc
best_eval_acc_epoch = epoch+1
torch.save(model.state_dict(), best_acc_file)
torch.save(critic.state_dict(), best_acc_critic_file)
torch.save(classifier.state_dict(), best_acc_classifier_file)
new_best = True
if(eval_loss < best_eval_loss):
best_eval_loss = eval_loss
best_eval_loss_epoch = epoch+1
torch.save(model.state_dict(), best_loss_file)
torch.save(critic.state_dict(), best_loss_critic_file)
torch.save(classifier.state_dict(), best_loss_classifier_file)
new_best = True
# Writing out new bests
if(new_best):
with open(best_text, "w") as o_stream:
print("Best eval acc epoch:", best_eval_acc_epoch, file=o_stream)
print("Best eval acc:", best_eval_acc, file=o_stream)
print("")
print("Best eval loss epoch:", best_eval_loss_epoch, file=o_stream)
print("Best eval loss:", best_eval_loss, file=o_stream)
if(not args.no_tensorboard):
tensorboard_summary.add_scalar("Avg_CE_loss/train", train_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Avg_CE_loss/eval", eval_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/train", train_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/eval", eval_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Learn_rate/train", lr, global_step=epoch+1)
tensorboard_summary.add_scalar("Critic_loss/train", dis_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Gen_loss/train", gen_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity_loss/train", cre_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("GAN_accuracy/train", gan_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Class_accuracy/train", class_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity/train", creativity, global_step=epoch+1)
tensorboard_summary.flush()
if((epoch+1) % args.weight_modulus == 0):
epoch_str = str(epoch+1).zfill(PREPEND_ZEROS_WIDTH)
path = os.path.join(weights_folder, "epoch_" + epoch_str + ".pickle")
torch.save(model.state_dict(), path)
with open(results_file, "a", newline="") as o_stream:
writer = csv.writer(o_stream)
writer.writerow([epoch+1, lr, train_loss, train_acc, eval_loss, eval_acc])
# Sanity check just to make sure everything is gone
if(not args.no_tensorboard):
tensorboard_summary.flush()
return
if __name__ == "__main__":
main()
| import os
import csv
import shutil
from datetime import datetime
from numpy import logspace
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from torch.optim import Adam
from dataset.e_piano import create_epiano_datasets, create_pop909_datasets
from model.music_transformer import MusicTransformer
from model.discriminator import MusicDiscriminator
from model.classifier import CNNDiscriminator
from model.loss import SmoothCrossEntropyLoss
from utilities.constants import *
from utilities.WGAN_GP import WassersteinLoss
from utilities.device import get_device, use_cuda
from utilities.lr_scheduling import LrStepTracker, get_lr
from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params
from utilities.run_model import train_epoch, eval_model
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"]
dis_filter_sizes = [2, 3, 4, 5]
dis_num_filters = [300, 300, 300, 300]
# Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy
BASELINE_EPOCH = -1
# main
def main():
"""
----------
Author: Damon Gwinn
----------
Entry point. Trains a model specified by command line arguments
----------
"""
args = parse_train_args()
print_train_args(args)
if(args.force_cpu):
use_cuda(False)
print("WARNING: Forced CPU usage, expect model to perform slower")
print("")
eventid = f"{datetime.now().strftime('MusicTransformer-%Y.%m.%d')}_gan_{args.gan}_creative_{args.creative}_ce_{args.ce_smoothing}"
args.output_dir = args.output_dir + "/" + eventid
os.makedirs(args.output_dir, exist_ok=True)
##### Output prep #####
params_file = os.path.join(args.output_dir, "model_params.txt")
write_model_params(args, params_file)
weights_folder = os.path.join(args.output_dir, "weights")
os.makedirs(weights_folder, exist_ok=True)
results_folder = os.path.join(args.output_dir, "results")
os.makedirs(results_folder, exist_ok=True)
results_file = os.path.join(results_folder, "results.csv")
best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle")
best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle")
best_loss_critic_file = os.path.join(results_folder, "best_loss_critic_weights.pickle")
best_acc_critic_file = os.path.join(results_folder, "best_acc_critic_weights.pickle")
best_loss_classifier_file = os.path.join(
results_folder, "best_loss_classifier_weights.pickle")
best_acc_classifier_file = os.path.join(
results_folder, "best_acc_classifier_weights.pickle")
best_text = os.path.join(results_folder, "best_epochs.txt")
##### Tensorboard #####
if(args.no_tensorboard):
tensorboard_summary = None
else:
from torch.utils.tensorboard import SummaryWriter
tensorboad_dir = os.path.join(args.output_dir, "tensorboard/" + eventid)
tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir)
##### Datasets #####
# 데이터셋이 바뀌기 때문에 아래와같이 해주어야함
if args.interval and args.octave:
print("octave interval dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_interval_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding and args.absolute:
print("absolute dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_absolute_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/pop909_absolute', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.interval and not args.octave:
print("interval dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding:
print("Octave_fusion dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif not args.interval and args.octave and not args.fusion_encoding:
print("Octave dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/pop909_octave', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.logscale:
print("logscvale dataset")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_epiano0420', args.max_sequence, random_seq=True,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop0420', args.max_sequence, random_seq=True, condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
else:
classic_train, classic_val, classic_test = create_epiano_datasets(args.classic_input_dir, args.max_sequence,
condition_token = args.condition_token, octave = args.octave)
pop909_dataset = create_pop909_datasets('dataset/pop_pickle/', args.max_sequence, condition_token = args.condition_token, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1), len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
if args.data == 'both':
print("Dataset: both")
train_dataset = torch.utils.data.ConcatDataset([ classic_train, pop_train])
val_dataset = torch.utils.data.ConcatDataset([ classic_val, pop_valid])
elif args.data == 'classic':
print("Dataset: classic")
train_dataset = torch.utils.data.ConcatDataset([classic_train])
val_dataset = torch.utils.data.ConcatDataset([classic_val])
else:
print("Dataset: pop")
train_dataset = torch.utils.data.ConcatDataset([pop_train])
val_dataset = torch.utils.data.ConcatDataset([pop_valid])
test_dataset = torch.utils.data.ConcatDataset([classic_test, pop_test])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads,
d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr,
condition_token = args.condition_token, interval = args.interval, octave = args.octave,
fusion = args.fusion_encoding, absolute = args.absolute, logscale=args.logscale).to(get_device())
# EY critic
# num_prime = args.num_prime
critic = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
classifier = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
if args.creative:
classifier.load_state_dict(torch.load('best_classifier_acc_0.9883.pickle'))
##### Continuing from previous training session #####
start_epoch = BASELINE_EPOCH
if(args.continue_weights is not None):
if(args.continue_epoch is None):
print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights")
return
else:
model.load_state_dict(torch.load(args.continue_weights))
start_epoch = args.continue_epoch
elif(args.continue_epoch is not None):
print("ERROR: Need continue weights (-continue_weights) when using continue_epoch")
return
##### Lr Scheduler vs static lr #####
if(args.lr is None):
if(args.continue_epoch is None):
init_step = 0
else:
init_step = args.continue_epoch * len(train_loader)
lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step)
else:
lr = args.lr
##### Not smoothing evaluation loss #####
if args.interval and args.octave:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_INTERVAL)
elif args.interval and not args.octave:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_INTERVAL)
elif args.octave and args.fusion_encoding and args.absolute:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif args.octave and args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_RELATIVE)
else:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD)
##### SmoothCrossEntropyLoss or CrossEntropyLoss for training #####
if(args.ce_smoothing is None):
train_loss_func = eval_loss_func
else:
if args.interval and args.octave:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)
elif args.interval and not args.octave:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)
elif not args.interval and args.octave and args.fusion_encoding and args.absolute:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION_ABSOLUTE, ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif not args.interval and args.octave and args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION, ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE, ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_RELATIVE, ignore_index=TOKEN_PAD_RELATIVE)
else:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD)
##### EY - WGAN Loss #####
classifier_loss_func = nn.MSELoss()
##### Optimizer #####
opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
critic_opt = Adam(critic.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
classifier_opt = Adam(classifier.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
if(args.lr is None):
lr_scheduler = LambdaLR(opt, lr_stepper.step)
critic_lr_scheduler = LambdaLR(critic_opt, lr_stepper.step)
classifier_lr_scheduler = LambdaLR(classifier_opt, lr_stepper.step)
else:
lr_scheduler = None
##### Tracking best evaluation accuracy #####
best_eval_acc = 0.0
best_eval_acc_epoch = -1
best_eval_loss = float("inf")
best_eval_loss_epoch = -1
##### Results reporting #####
if(not os.path.isfile(results_file)):
with open(results_file, "w", newline="") as o_stream:
writer = csv.writer(o_stream)
writer.writerow(CSV_HEADER)
##### TRAIN LOOP #####
for epoch in range(start_epoch, args.epochs):
# Baseline has no training and acts as a base loss and accuracy (epoch 0 in a sense)
if(epoch >= BASELINE_EPOCH):
print(SEPERATOR)
print("NEW EPOCH:", epoch+1)
print(SEPERATOR)
print("")
# Train
# EY 고쳐야 할 부분의 시작
train_loss, train_acc, dis_loss, gen_loss, cre_loss, gan_accuracy, class_accuracy, creativity = train_epoch(epoch+1, model, critic, classifier, train_loader, train_loss_func, classifier_loss_func, opt, critic_opt, classifier_opt, lr_scheduler, critic_lr_scheduler, classifier_lr_scheduler, args)
print(SEPERATOR)
print("Evaluating:")
else:
print(SEPERATOR)
print("Baseline model evaluation (Epoch 0):")
# Eval
# train_loss, train_acc = eval_model(model, train_loader, train_loss_func)
eval_loss, eval_acc = eval_model(model, val_loader, eval_loss_func, args)
# Learn rate
lr = get_lr(opt)
print("Epoch:", epoch+1)
print("Avg train loss:", train_loss)
print("Avg train acc:", train_acc)
print("Avg eval loss:", eval_loss)
print("Avg eval acc:", eval_acc)
print(SEPERATOR)
print("")
new_best = False
if(eval_acc > best_eval_acc):
best_eval_acc = eval_acc
best_eval_acc_epoch = epoch+1
torch.save(model.state_dict(), best_acc_file)
torch.save(critic.state_dict(), best_acc_critic_file)
torch.save(classifier.state_dict(), best_acc_classifier_file)
new_best = True
if(eval_loss < best_eval_loss):
best_eval_loss = eval_loss
best_eval_loss_epoch = epoch+1
torch.save(model.state_dict(), best_loss_file)
torch.save(critic.state_dict(), best_loss_critic_file)
torch.save(classifier.state_dict(), best_loss_classifier_file)
new_best = True
# Writing out new bests
if(new_best):
with open(best_text, "w") as o_stream:
print("Best eval acc epoch:", best_eval_acc_epoch, file=o_stream)
print("Best eval acc:", best_eval_acc, file=o_stream)
print("")
print("Best eval loss epoch:", best_eval_loss_epoch, file=o_stream)
print("Best eval loss:", best_eval_loss, file=o_stream)
if(not args.no_tensorboard):
tensorboard_summary.add_scalar("Avg_CE_loss/train", train_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Avg_CE_loss/eval", eval_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/train", train_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/eval", eval_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Learn_rate/train", lr, global_step=epoch+1)
tensorboard_summary.add_scalar("Critic_loss/train", dis_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Gen_loss/train", gen_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity_loss/train", cre_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("GAN_accuracy/train", gan_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Class_accuracy/train", class_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity/train", creativity, global_step=epoch+1)
tensorboard_summary.flush()
if((epoch+1) % args.weight_modulus == 0):
epoch_str = str(epoch+1).zfill(PREPEND_ZEROS_WIDTH)
path = os.path.join(weights_folder, "epoch_" + epoch_str + ".pickle")
torch.save(model.state_dict(), path)
with open(results_file, "a", newline="") as o_stream:
writer = csv.writer(o_stream)
writer.writerow([epoch+1, lr, train_loss, train_acc, eval_loss, eval_acc])
# Sanity check just to make sure everything is gone
if(not args.no_tensorboard):
tensorboard_summary.flush()
return
if __name__ == "__main__":
main()
|
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import inspect
from time import time, perf_counter
from eggroll.utils.log_utils import get_logger
L = get_logger(filename='profile')
def _method_profile_logger(func):
def wrapper(*args, **kwargs):
start_wall_time = time()
start_cpu_time = perf_counter()
result = func(*args, **kwargs)
end_wall_time = time()
end_cpu_time = perf_counter()
code = func.__code__
try:
outerframes = inspect.getouterframes(inspect.currentframe(), 2)
real_caller = outerframes[1]
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "{real_caller.filename.rsplit('/', 1)[-1]}:{real_caller.lineno}", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return result
except Exception as e:
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "unknown", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return wrapper
| # Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import inspect
from time import time, perf_counter
from eggroll.utils.log_utils import get_logger
L = get_logger(filename='profile')
def _method_profile_logger(func):
def wrapper(*args, **kwargs):
start_wall_time = time()
start_cpu_time = perf_counter()
result = func(*args, **kwargs)
end_wall_time = time()
end_cpu_time = perf_counter()
code = func.__code__
try:
outerframes = inspect.getouterframes(inspect.currentframe(), 2)
real_caller = outerframes[1]
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "{real_caller.filename.rsplit("/", 1)[-1]}:{real_caller.lineno}", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return result
except Exception as e:
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "unknown", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return wrapper
|
import glob
import json
import os
import shutil
import subprocess
from .helpers import *
def constructor(*args, default_channel=True, no_rc=True, no_dry_run=False):
umamba = get_umamba()
cmd = [umamba, "constructor"] + [arg for arg in args if arg]
try:
res = subprocess.check_output(cmd)
if "--json" in args:
try:
j = json.loads(res)
return j
except json.decoder.JSONDecodeError as e:
print(f"Error when loading JSON output from {res}")
raise (e)
print(f"Error when executing '{" ".join(cmd)}'")
return res.decode()
except subprocess.CalledProcessError as e:
print(f"Error when executing '{" ".join(cmd)}'")
raise (e)
class TestInstall:
current_root_prefix = os.environ["MAMBA_ROOT_PREFIX"]
current_prefix = os.environ["CONDA_PREFIX"]
cache = os.path.join(current_root_prefix, "pkgs")
env_name = random_string()
root_prefix = os.path.expanduser(os.path.join("~", "tmproot" + random_string()))
prefix = os.path.join(root_prefix, "envs", env_name)
new_cache = os.path.join(root_prefix, "pkgs")
@classmethod
def setup_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
# speed-up the tests
os.environ["CONDA_PKGS_DIRS"] = TestInstall.new_cache
os.makedirs(TestInstall.new_cache, exist_ok=True)
root_pkgs = glob.glob(
os.path.join(TestInstall.current_root_prefix, "pkgs", "x*.tar.bz2")
)
urls = []
for pkg in root_pkgs:
shutil.copy(pkg, TestInstall.new_cache)
urls.append(
"http://testurl.com/conda-forge/linux-64/"
+ os.path.basename(pkg)
+ "#123412341234"
)
cls.pkgs = [os.path.basename(pkg) for pkg in root_pkgs]
with open(os.path.join(TestInstall.new_cache, "urls"), "w") as furls:
furls.write("\n".join(urls))
@classmethod
def teardown_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.current_root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.current_prefix
shutil.rmtree(TestInstall.root_prefix)
@classmethod
def teardown(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
def test_extract_pkgs(self):
constructor("--prefix", TestInstall.root_prefix, "--extract-conda-pkgs")
for pkg in self.pkgs:
extracted_pkg = os.path.join(
TestInstall.root_prefix, "pkgs", pkg.rsplit(".tar.bz2")[0]
)
with open(
os.path.join(extracted_pkg, "info", "repodata_record.json")
) as rr:
repodata_record = json.load(rr)
with open(os.path.join(extracted_pkg, "info", "index.json")) as ri:
index = json.load(ri)
assert repodata_record["fn"] == pkg
assert repodata_record["md5"] == "123412341234"
assert (
repodata_record["url"]
== "http://testurl.com/conda-forge/linux-64/" + pkg
)
assert repodata_record["depends"] == index["depends"]
| import glob
import json
import os
import shutil
import subprocess
from .helpers import *
def constructor(*args, default_channel=True, no_rc=True, no_dry_run=False):
umamba = get_umamba()
cmd = [umamba, "constructor"] + [arg for arg in args if arg]
try:
res = subprocess.check_output(cmd)
if "--json" in args:
try:
j = json.loads(res)
return j
except json.decoder.JSONDecodeError as e:
print(f"Error when loading JSON output from {res}")
raise (e)
print(f"Error when executing '{' '.join(cmd)}'")
return res.decode()
except subprocess.CalledProcessError as e:
print(f"Error when executing '{' '.join(cmd)}'")
raise (e)
class TestInstall:
current_root_prefix = os.environ["MAMBA_ROOT_PREFIX"]
current_prefix = os.environ["CONDA_PREFIX"]
cache = os.path.join(current_root_prefix, "pkgs")
env_name = random_string()
root_prefix = os.path.expanduser(os.path.join("~", "tmproot" + random_string()))
prefix = os.path.join(root_prefix, "envs", env_name)
new_cache = os.path.join(root_prefix, "pkgs")
@classmethod
def setup_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
# speed-up the tests
os.environ["CONDA_PKGS_DIRS"] = TestInstall.new_cache
os.makedirs(TestInstall.new_cache, exist_ok=True)
root_pkgs = glob.glob(
os.path.join(TestInstall.current_root_prefix, "pkgs", "x*.tar.bz2")
)
urls = []
for pkg in root_pkgs:
shutil.copy(pkg, TestInstall.new_cache)
urls.append(
"http://testurl.com/conda-forge/linux-64/"
+ os.path.basename(pkg)
+ "#123412341234"
)
cls.pkgs = [os.path.basename(pkg) for pkg in root_pkgs]
with open(os.path.join(TestInstall.new_cache, "urls"), "w") as furls:
furls.write("\n".join(urls))
@classmethod
def teardown_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.current_root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.current_prefix
shutil.rmtree(TestInstall.root_prefix)
@classmethod
def teardown(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
def test_extract_pkgs(self):
constructor("--prefix", TestInstall.root_prefix, "--extract-conda-pkgs")
for pkg in self.pkgs:
extracted_pkg = os.path.join(
TestInstall.root_prefix, "pkgs", pkg.rsplit(".tar.bz2")[0]
)
with open(
os.path.join(extracted_pkg, "info", "repodata_record.json")
) as rr:
repodata_record = json.load(rr)
with open(os.path.join(extracted_pkg, "info", "index.json")) as ri:
index = json.load(ri)
assert repodata_record["fn"] == pkg
assert repodata_record["md5"] == "123412341234"
assert (
repodata_record["url"]
== "http://testurl.com/conda-forge/linux-64/" + pkg
)
assert repodata_record["depends"] == index["depends"]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import os
import sys
def update_allure_feature_name(results_dir: str, prefix: str):
"""Make Allure JSON results unique by pre-pending a prefix to: name, historyId & uuid.
Use it when not all of the test results show up in the Allure report.
This is because tests from different workers can actually have the same: historyId & uuid values.
You can use e.g. browser name as the prefix.
"""
results_dir_path = os.path.join(".", results_dir)
update_count = 0
for filename in os.listdir(results_dir_path):
if filename.endswith(".json"):
result_file = os.path.join(results_dir_path, filename)
with open(result_file, "r") as json_file:
report = json.loads(json_file.read())
report["name"] = f"{prefix} - {report["name"]}"
report["historyId"] = f"{prefix}{report["historyId"]}"
report["uuid"] = f"{prefix}{report["uuid"]}"
with open(result_file, "w") as json_file:
json.dump(report, json_file, indent=2, ensure_ascii=False)
update_count += 1
print(f"Updated {update_count} JSON reports")
if __name__ == "__main__":
update_allure_feature_name(results_dir=sys.argv[1], prefix=sys.argv[2])
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import os
import sys
def update_allure_feature_name(results_dir: str, prefix: str):
"""Make Allure JSON results unique by pre-pending a prefix to: name, historyId & uuid.
Use it when not all of the test results show up in the Allure report.
This is because tests from different workers can actually have the same: historyId & uuid values.
You can use e.g. browser name as the prefix.
"""
results_dir_path = os.path.join(".", results_dir)
update_count = 0
for filename in os.listdir(results_dir_path):
if filename.endswith(".json"):
result_file = os.path.join(results_dir_path, filename)
with open(result_file, "r") as json_file:
report = json.loads(json_file.read())
report["name"] = f"{prefix} - {report['name']}"
report["historyId"] = f"{prefix}{report['historyId']}"
report["uuid"] = f"{prefix}{report['uuid']}"
with open(result_file, "w") as json_file:
json.dump(report, json_file, indent=2, ensure_ascii=False)
update_count += 1
print(f"Updated {update_count} JSON reports")
if __name__ == "__main__":
update_allure_feature_name(results_dir=sys.argv[1], prefix=sys.argv[2])
|
from typing import Dict, Iterator, List, Optional, Tuple, Union
from ..constant.util import Amount, ItemPointer, Number
from .item_wrapper import item_type
from .other_wrapper import *
__all__ = [
'ItemType', 'Item', 'Empty',
'Accessory', 'EnchantedBook', 'ReforgeStone', 'TravelScroll',
'Bow', 'Sword',
'Axe', 'Pickaxe', 'Drill', 'Hoe', 'FishingRod', 'Armor', 'Pet', 'Minion',
'Resource',
'Crop', 'Mineral', 'Log', 'Mob',
'Recipe', 'RecipeGroup', 'Collection',
'load_item',
]
class ItemType:
pass
@item_type
class Item(ItemType):
name: str
count: int = 1
# common | uncommon | rare | epic | legendary |
# mythic | supreme | special | very_special
rarity: str = 'common'
abilities: List[str] = []
@item_type
class Empty(ItemType):
def __repr__(self):
return '{}'
@item_type
class Accessory(ItemType):
name: str
rarity: str = 'common'
modifier: Optional[str] = None
abilities: List[str] = []
@item_type
class Armor(ItemType):
name: str
rarity: str
# helmet | chestplate | leggings | boots
part: str
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
health: int = 0
defense: int = 0
intelligence: int = 0
speed: int = 0
magic_find: int = 0
mining_speed: int = 0
mining_fortune: int = 0
true_defense: int = 0
ferocity: int = 0
sea_creature_chance: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
combat_skill_req: Optional[int] = None
dungeon_skill_req: Optional[int] = None
dungeon_completion_req: Optional[int] = None
abilities: List[str] = []
@item_type
class Axe(ItemType):
name: str
rarity: str
tool_speed: int
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
abilities: List[str] = []
@item_type
class Bow(ItemType):
name: str
rarity: str
damage: int
count: int = 1
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
attack_speed: int = 0
intelligence: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
combat_skill_req: Optional[int] = None
dungeon_skill_req: Optional[int] = None
dungeon_completion_req: Optional[int] = None
abilities: List[str] = []
@item_type
class Drill(ItemType):
name: str
rarity: str
breaking_power: int
mining_speed: int
mining_fortune: int = 0
damage: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
abilities: List[str] = []
@enchanted_book_type
@item_type
class EnchantedBook(ItemType):
enchantments: Dict[str, int] = {}
name: str = 'enchanted_book'
rarity: str = 'common'
@item_type
class FishingRod(ItemType):
name: str
rarity: str
damage: int = 0
strength: int = 0
ferocity: int = 0
fishing_speed: int = 0
sea_creature_chance: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
fishing_skill_req: Optional[int] = None
abilities: List[str] = []
@item_type
class Hoe(ItemType):
name: str
rarity: str
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
@item_type
class Minion(ItemType):
name: str
tier: str
cooldown: Number
slots: int
@item_type
class Pet(ItemType):
name: str
rarity: str
category: str = None
exp: float = 0.0
candy_used: int = 0
active: bool = False
health: int = 0
defense: int = 0
speed: int = 0
true_defense: int = 0
intelligence: int = 0
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
damage: int = 0
magic_find: int = 0
attack_speed: int = 0
ferocity: int = 0
sea_creature_chance: int = 0
abilities: List = []
@item_type
class Pickaxe(ItemType):
name: str
rarity: str
breaking_power: int
mining_speed: int
damage: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
abilities: List[str] = []
@item_type
class ReforgeStone(ItemType):
name: str
modifier: Optional[str] = None
category: Optional[str] = None
rarity: str = 'common'
cost: Tuple[int] = (0, 0, 0, 0, 0, 0)
mining_skill_req: Optional[int] = None
@item_type
class Sword(ItemType):
name: str
rarity: str
count: int = 1
damage: int = 0
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
attack_speed: int = 0
defense: int = 0
intelligence: int = 0
true_defense: int = 0
ferocity: int = 0
speed: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
combat_skill_req: Optional[int] = None
dungeon_skill_req: Optional[int] = None
dungeon_completion_req: Optional[int] = None
abilities: List[str] = []
@item_type
class TravelScroll(ItemType):
name: str
island: str
zone: Optional[str] = None
rarity: str = 'rare'
OBJECT_NAMES = {
'item': Item,
'empty': Empty,
'accessory': Accessory,
'armor': Armor,
'axe': Axe,
'bow': Bow,
'drill': Drill,
'enchanted_book': EnchantedBook,
'fishing_rod': FishingRod,
'hoe': Hoe,
'minion': Minion,
'pet': Pet,
'pickaxe': Pickaxe,
'reforge_stone': ReforgeStone,
'sword': Sword,
'travel_scroll': TravelScroll,
}
class Resource:
def type(self):
return type(self).__name__
@resource_type
class Crop(Resource):
name: str
amount: int = 1
farming_exp: Number = 1
@resource_type
class Log(Resource):
name: str
hardness: int = 2
foraging_exp: Number = 1
@resource_type
class Mineral(Resource):
name: str
drop: str
amount: int = 1
breaking_power: int = 0
hardness: Number = 2
exp: Amount = 1
mining_exp: Number = 1
mithril_powder: Amount = 0
@mob_type
class Mob:
name: str
level: int
health: int
defense: int = 0
damage: int = 0
true_damage: int = 0
coins: int = 0
exp: int = 0
farming_exp: int = 0
combat_exp: int = 0
fishing_exp: int = 0
drops: List[Tuple[ItemPointer, Amount, str, Number]] = []
@recipe_type
class Recipe:
name: str
category: str
ingredients: List[ItemPointer]
result: ItemPointer
collection_req: Optional[Tuple[str, int]] = None
# slayer_req: Optional[Tuple[str, int]] = None
@recipe_group_type
class RecipeGroup:
name: str
category: str
recipes: List[str]
collection_req: Optional[Tuple[str, int]] = None
# slayer_req: Optional[Tuple[str, int]] = None
@collection_type
class Collection:
name: str
category: str
levels: List[Tuple[int, Union[str, Tuple[str], Number]]]
def __iter__(self, /) -> Iterator:
return iter(self.levels)
def load_item(obj, /):
if isinstance(obj, ItemType):
return obj
elif 'type' not in obj:
return Empty()
for name, cls in OBJECT_NAMES.items():
if obj['type'] == name:
return cls.from_obj(obj)
else:
raise ValueError(f"invalid item obj type: {obj["type"]!r}")
| from typing import Dict, Iterator, List, Optional, Tuple, Union
from ..constant.util import Amount, ItemPointer, Number
from .item_wrapper import item_type
from .other_wrapper import *
__all__ = [
'ItemType', 'Item', 'Empty',
'Accessory', 'EnchantedBook', 'ReforgeStone', 'TravelScroll',
'Bow', 'Sword',
'Axe', 'Pickaxe', 'Drill', 'Hoe', 'FishingRod', 'Armor', 'Pet', 'Minion',
'Resource',
'Crop', 'Mineral', 'Log', 'Mob',
'Recipe', 'RecipeGroup', 'Collection',
'load_item',
]
class ItemType:
pass
@item_type
class Item(ItemType):
name: str
count: int = 1
# common | uncommon | rare | epic | legendary |
# mythic | supreme | special | very_special
rarity: str = 'common'
abilities: List[str] = []
@item_type
class Empty(ItemType):
def __repr__(self):
return '{}'
@item_type
class Accessory(ItemType):
name: str
rarity: str = 'common'
modifier: Optional[str] = None
abilities: List[str] = []
@item_type
class Armor(ItemType):
name: str
rarity: str
# helmet | chestplate | leggings | boots
part: str
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
health: int = 0
defense: int = 0
intelligence: int = 0
speed: int = 0
magic_find: int = 0
mining_speed: int = 0
mining_fortune: int = 0
true_defense: int = 0
ferocity: int = 0
sea_creature_chance: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
combat_skill_req: Optional[int] = None
dungeon_skill_req: Optional[int] = None
dungeon_completion_req: Optional[int] = None
abilities: List[str] = []
@item_type
class Axe(ItemType):
name: str
rarity: str
tool_speed: int
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
abilities: List[str] = []
@item_type
class Bow(ItemType):
name: str
rarity: str
damage: int
count: int = 1
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
attack_speed: int = 0
intelligence: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
combat_skill_req: Optional[int] = None
dungeon_skill_req: Optional[int] = None
dungeon_completion_req: Optional[int] = None
abilities: List[str] = []
@item_type
class Drill(ItemType):
name: str
rarity: str
breaking_power: int
mining_speed: int
mining_fortune: int = 0
damage: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
abilities: List[str] = []
@enchanted_book_type
@item_type
class EnchantedBook(ItemType):
enchantments: Dict[str, int] = {}
name: str = 'enchanted_book'
rarity: str = 'common'
@item_type
class FishingRod(ItemType):
name: str
rarity: str
damage: int = 0
strength: int = 0
ferocity: int = 0
fishing_speed: int = 0
sea_creature_chance: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
fishing_skill_req: Optional[int] = None
abilities: List[str] = []
@item_type
class Hoe(ItemType):
name: str
rarity: str
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
@item_type
class Minion(ItemType):
name: str
tier: str
cooldown: Number
slots: int
@item_type
class Pet(ItemType):
name: str
rarity: str
category: str = None
exp: float = 0.0
candy_used: int = 0
active: bool = False
health: int = 0
defense: int = 0
speed: int = 0
true_defense: int = 0
intelligence: int = 0
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
damage: int = 0
magic_find: int = 0
attack_speed: int = 0
ferocity: int = 0
sea_creature_chance: int = 0
abilities: List = []
@item_type
class Pickaxe(ItemType):
name: str
rarity: str
breaking_power: int
mining_speed: int
damage: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
abilities: List[str] = []
@item_type
class ReforgeStone(ItemType):
name: str
modifier: Optional[str] = None
category: Optional[str] = None
rarity: str = 'common'
cost: Tuple[int] = (0, 0, 0, 0, 0, 0)
mining_skill_req: Optional[int] = None
@item_type
class Sword(ItemType):
name: str
rarity: str
count: int = 1
damage: int = 0
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
attack_speed: int = 0
defense: int = 0
intelligence: int = 0
true_defense: int = 0
ferocity: int = 0
speed: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
combat_skill_req: Optional[int] = None
dungeon_skill_req: Optional[int] = None
dungeon_completion_req: Optional[int] = None
abilities: List[str] = []
@item_type
class TravelScroll(ItemType):
name: str
island: str
zone: Optional[str] = None
rarity: str = 'rare'
OBJECT_NAMES = {
'item': Item,
'empty': Empty,
'accessory': Accessory,
'armor': Armor,
'axe': Axe,
'bow': Bow,
'drill': Drill,
'enchanted_book': EnchantedBook,
'fishing_rod': FishingRod,
'hoe': Hoe,
'minion': Minion,
'pet': Pet,
'pickaxe': Pickaxe,
'reforge_stone': ReforgeStone,
'sword': Sword,
'travel_scroll': TravelScroll,
}
class Resource:
def type(self):
return type(self).__name__
@resource_type
class Crop(Resource):
name: str
amount: int = 1
farming_exp: Number = 1
@resource_type
class Log(Resource):
name: str
hardness: int = 2
foraging_exp: Number = 1
@resource_type
class Mineral(Resource):
name: str
drop: str
amount: int = 1
breaking_power: int = 0
hardness: Number = 2
exp: Amount = 1
mining_exp: Number = 1
mithril_powder: Amount = 0
@mob_type
class Mob:
name: str
level: int
health: int
defense: int = 0
damage: int = 0
true_damage: int = 0
coins: int = 0
exp: int = 0
farming_exp: int = 0
combat_exp: int = 0
fishing_exp: int = 0
drops: List[Tuple[ItemPointer, Amount, str, Number]] = []
@recipe_type
class Recipe:
name: str
category: str
ingredients: List[ItemPointer]
result: ItemPointer
collection_req: Optional[Tuple[str, int]] = None
# slayer_req: Optional[Tuple[str, int]] = None
@recipe_group_type
class RecipeGroup:
name: str
category: str
recipes: List[str]
collection_req: Optional[Tuple[str, int]] = None
# slayer_req: Optional[Tuple[str, int]] = None
@collection_type
class Collection:
name: str
category: str
levels: List[Tuple[int, Union[str, Tuple[str], Number]]]
def __iter__(self, /) -> Iterator:
return iter(self.levels)
def load_item(obj, /):
if isinstance(obj, ItemType):
return obj
elif 'type' not in obj:
return Empty()
for name, cls in OBJECT_NAMES.items():
if obj['type'] == name:
return cls.from_obj(obj)
else:
raise ValueError(f"invalid item obj type: {obj['type']!r}")
|
import click
import subprocess
import os
@click.group()
def cli():
...
@cli.command()
def deploy():
click.echo("Running chalice deploy")
output = subprocess.check_output(f"source {os.environ["VIRTUAL_ENV"]}/bin/activate && chalice deploy",shell=True)
click.echo(output)
click.echo(os.environ["VIRTUAL_ENV"])
| import click
import subprocess
import os
@click.group()
def cli():
...
@cli.command()
def deploy():
click.echo("Running chalice deploy")
output = subprocess.check_output(f"source {os.environ['VIRTUAL_ENV']}/bin/activate && chalice deploy",shell=True)
click.echo(output)
click.echo(os.environ["VIRTUAL_ENV"])
|
from typing import *
T = TypeVar('T')
MAGIC_ATTR = "__cxxpy_s13s__"
def template(cls: T) -> T:
s13s = {}
setattr(cls, MAGIC_ATTR, s13s)
def __class_getitem__(args):
if not isinstance(args, tuple):
args = (args,)
if args not in s13s:
name = cls.__name__ + ", ".join(map(str, args))
class s12n(cls):
...
s12n.__name__ = name
s12n.__qualname__ = name
s13s[args] = s12n
return s13s[args]
cls.__class_getitem__ = __class_getitem__
return cls
NOCOPY = ("__dict__", "__doc__", "__module__", "__weakref__")
def implement(actual):
def decorator(cls: Type[T]) -> None:
for k, v in cls.__dict__.items():
if k not in NOCOPY:
setattr(actual, k, v)
return decorator
@template
class Ops(Generic[T]):
def add(a: T, b: T) -> T:
...
@implement(Ops[int])
class _:
def add(a: int, b: int) -> int:
return a + b
@implement(Ops[str])
class _:
def add(a: str, b: str) -> str:
return f"{a} {b}"
print(f"{Ops[int].add(1, 2) = }")
print(f"{Ops[str].add("hello", "world") = }")
| from typing import *
T = TypeVar('T')
MAGIC_ATTR = "__cxxpy_s13s__"
def template(cls: T) -> T:
s13s = {}
setattr(cls, MAGIC_ATTR, s13s)
def __class_getitem__(args):
if not isinstance(args, tuple):
args = (args,)
if args not in s13s:
name = cls.__name__ + ", ".join(map(str, args))
class s12n(cls):
...
s12n.__name__ = name
s12n.__qualname__ = name
s13s[args] = s12n
return s13s[args]
cls.__class_getitem__ = __class_getitem__
return cls
NOCOPY = ("__dict__", "__doc__", "__module__", "__weakref__")
def implement(actual):
def decorator(cls: Type[T]) -> None:
for k, v in cls.__dict__.items():
if k not in NOCOPY:
setattr(actual, k, v)
return decorator
@template
class Ops(Generic[T]):
def add(a: T, b: T) -> T:
...
@implement(Ops[int])
class _:
def add(a: int, b: int) -> int:
return a + b
@implement(Ops[str])
class _:
def add(a: str, b: str) -> str:
return f"{a} {b}"
print(f"{Ops[int].add(1, 2) = }")
print(f"{Ops[str].add('hello', 'world') = }")
|
#crie um tupla com o nome dos produtos, seguidos do preço.
#mostre uma listagem de preços, de forma tabular.
lista = ('Lápis', 1.5, 'Borracha', 2.5, 'Caderno', 10.8,
'Estojo', 20, 'Mochila', 100.5)
print('\033[31m--'*20)
print(f'{'LISTAGEM DE PREÇOS':^40}')
print('--'*20, '\033[m')
for i in range(0, len(lista), 2):
print(f'{lista[i]:.<30}R${lista[i+1]:>5.2f}')
print('\033[31m--\033[m'*20)
''' Formatação:
print(f'{'LISTAGEM DE PREÇOS':^40}')
centralizado = {elemento:^quantidade}
à direita = {:<quantidade} > preenche com espaço
à direita = {:.<quantidade} > preenche com ponto
à esquerda = {:>quantidade} > preenche com espaço
à esquerda = {:->quantidade} > preenche com -
'''
| #crie um tupla com o nome dos produtos, seguidos do preço.
#mostre uma listagem de preços, de forma tabular.
lista = ('Lápis', 1.5, 'Borracha', 2.5, 'Caderno', 10.8,
'Estojo', 20, 'Mochila', 100.5)
print('\033[31m--'*20)
print(f'{"LISTAGEM DE PREÇOS":^40}')
print('--'*20, '\033[m')
for i in range(0, len(lista), 2):
print(f'{lista[i]:.<30}R${lista[i+1]:>5.2f}')
print('\033[31m--\033[m'*20)
''' Formatação:
print(f'{"LISTAGEM DE PREÇOS":^40}')
centralizado = {elemento:^quantidade}
à direita = {:<quantidade} > preenche com espaço
à direita = {:.<quantidade} > preenche com ponto
à esquerda = {:>quantidade} > preenche com espaço
à esquerda = {:->quantidade} > preenche com -
'''
|
"""
A module that contains utility functions to load the 'classical' workspace configuration.
This configuration may have three meaningful files:
.remote (required) - information about the connection options
.remoteindex (optional) - information about which connection from options above to use
.remoteignore (optional) - information about files that should be ignore when syncing files
"""
import os
import re
from collections import defaultdict
from dataclasses import asdict
from pathlib import Path
from typing import Dict, List, Tuple
from remote.exceptions import ConfigurationError
from . import ConfigurationMedium, RemoteConfig, SyncRules, WorkspaceConfig
from .shared import DEFAULT_REMOTE_ROOT, hash_path
CONFIG_FILE_NAME = ".remote"
INDEX_FILE_NAME = ".remoteindex"
IGNORE_FILE_NAME = ".remoteignore"
IGNORE_SECTION_REGEX = re.compile(r"^(push|pull|both)\s*:$")
BASE_IGNORES = (CONFIG_FILE_NAME, INDEX_FILE_NAME, IGNORE_FILE_NAME)
DEFAULT_SHELL = "sh"
DEFAULT_SHELL_OPTIONS = ""
def _extract_shell_info(line: str, env_vars: List[str]) -> Tuple[str, str]:
if not env_vars:
return DEFAULT_SHELL, DEFAULT_SHELL_OPTIONS
vars_string = env_vars[0]
env = {}
items = vars_string.split()
index = 0
while index < len(items):
key, value = items[index].split("=")
if value.startswith("'") or value.startswith('"'):
control_character = value[0]
while index < len(items) - 1:
if value[-1] == control_character:
break
index += 1
value += " " + items[index]
if not value[-1] == control_character:
raise ConfigurationError(f"Config line {line} is corrupted. Cannot parse end {key}={value}")
env[key] = value.strip("\"'")
index += 1
print(env)
# TODO: these shell types are not used in new implementation, need to remove them
shell = env.pop("RSHELL", DEFAULT_SHELL)
shell_options = env.pop("RSHELL_OPTS", DEFAULT_SHELL_OPTIONS)
if env:
raise ConfigurationError(
f"Config line {line} contains unexpected env variables: {env}. Only RSHELL and RSHELL_OPTS can be used"
)
return shell, shell_options
def parse_config_line(line: str) -> RemoteConfig:
# The line should look like this:
# sdas-ld2:.remotes/814f27f15f4e7a0842cada353dfc765a RSHELL=zsh
entry, *env_items = line.split(maxsplit=1)
shell, shell_options = _extract_shell_info(line, env_items)
parts = entry.split(":")
if len(parts) != 2:
raise ConfigurationError(
f"The configuration string is malformed: {parts}. Please use host-name:remote_dir format"
)
host, directory = parts
return RemoteConfig(host=host, directory=Path(directory), shell=shell, shell_options=shell_options)
def load_configurations(workspace_root: Path) -> List[RemoteConfig]:
config_file = workspace_root / CONFIG_FILE_NAME
configurations = []
for line in config_file.read_text().splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
configurations.append(parse_config_line(line))
return configurations
def load_default_configuration_num(workspace_root: Path) -> int:
# If REMOTE_HOST_INDEX is set, that overrides settings in .remoteindex
env_index = os.environ.get("REMOTE_HOST_INDEX")
if env_index:
try:
return int(env_index)
except ValueError:
raise ConfigurationError(
f"REMOTE_HOST_INDEX env variable contains symbols other than numbers: '{env_index}'. "
"Please set the coorect index value to continue"
)
index_file = workspace_root / INDEX_FILE_NAME
if not index_file.exists():
return 0
# Configuration uses 1-base index and we need to have 0-based
text = index_file.read_text().strip()
try:
return int(text) - 1
except ValueError:
raise ConfigurationError(
f"File {index_file} contains symbols other than numbers: '{text}'. "
"Please remove it or replace the value to continue"
)
def _postprocess(ignores):
pull = ignores.pop("pull", [])
push = ignores.pop("push", [])
both = ignores.pop("both", [])
if ignores:
raise ConfigurationError(
f"{IGNORE_FILE_NAME} file has unexpected sections: {", ".join(ignores.keys())}. Please remove them"
)
return SyncRules(pull=pull, push=push, both=both)
def load_ignores(workspace_root: Path) -> SyncRules:
ignores: Dict[str, List[str]] = defaultdict(list)
ignores["both"].extend(BASE_IGNORES)
ignore_file = workspace_root / IGNORE_FILE_NAME
if not ignore_file.exists():
return _postprocess(ignores)
active_section = "both"
is_new_format = None
for line in ignore_file.read_text().splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
matcher = IGNORE_SECTION_REGEX.match(line)
if matcher is None:
if is_new_format is None:
is_new_format = False
ignores[active_section].append(line)
else:
if is_new_format is None:
is_new_format = True
elif not is_new_format:
raise ConfigurationError(
f"Few ignore patters were listed in {IGNORE_FILE_NAME} before the first section {matcher.group(1)} appeared. "
"Please list all ignored files after a section declaration if you use new ignore format"
)
active_section = matcher.group(1)
return _postprocess(ignores)
def save_general_config(config_file: Path, configurations: List[RemoteConfig]):
with config_file.open("w") as f:
for item in configurations:
f.write(f"{item.host}:{item.directory}")
if item.shell != "sh":
f.write(f" RSHELL={item.shell}")
if item.shell_options:
f.write(f" RSHELL_OPTS='{item.shell_options}'")
f.write("\n")
def save_ignores(config_file: Path, ignores: SyncRules):
ignores.both.extend(BASE_IGNORES)
ignores.trim()
if ignores.is_empty():
if config_file.exists():
config_file.unlink()
return
with config_file.open("w") as f:
for key, value in asdict(ignores).items():
f.write(f"{key}:\n")
for item in value:
f.write(f"{item}\n")
def save_index(config_file: Path, index: int):
if index == 0:
# We delete file when index is default
if config_file.exists():
config_file.unlink()
else:
config_file.write_text(f"{index + 1}\n")
class ClassicConfigurationMedium(ConfigurationMedium):
"""A medium class that knows how to load and save the 'classical' workspace configuration.
This configuration may have three meaningful files:
.remote (required) - information about the connection options
.remoteindex (optional) - information about which connection from options above to use
.remoteignore (optional) - information about files that should be ignore when syncing files
"""
def load_config(self, workspace_root: Path) -> WorkspaceConfig:
configurations = load_configurations(workspace_root)
configuration_index = load_default_configuration_num(workspace_root)
if configuration_index > len(configurations) - 1:
raise ConfigurationError(
f"Configuration #{configuration_index + 1} requested but there are only {len(configurations)} declared"
)
ignores = load_ignores(workspace_root)
return WorkspaceConfig(
root=workspace_root,
configurations=configurations,
default_configuration=configuration_index,
ignores=ignores,
includes=SyncRules.new(),
)
def save_config(self, config: WorkspaceConfig) -> None:
save_general_config(config.root / CONFIG_FILE_NAME, config.configurations)
save_ignores(config.root / IGNORE_FILE_NAME, config.ignores)
save_index(config.root / INDEX_FILE_NAME, config.default_configuration)
def is_workspace_root(self, path: Path) -> bool:
return (path / CONFIG_FILE_NAME).exists()
def generate_remote_directory(self, config: WorkspaceConfig) -> Path:
md5 = hash_path(config.root)
return Path(f"{DEFAULT_REMOTE_ROOT}/{config.root.name}_{md5}")
| """
A module that contains utility functions to load the 'classical' workspace configuration.
This configuration may have three meaningful files:
.remote (required) - information about the connection options
.remoteindex (optional) - information about which connection from options above to use
.remoteignore (optional) - information about files that should be ignore when syncing files
"""
import os
import re
from collections import defaultdict
from dataclasses import asdict
from pathlib import Path
from typing import Dict, List, Tuple
from remote.exceptions import ConfigurationError
from . import ConfigurationMedium, RemoteConfig, SyncRules, WorkspaceConfig
from .shared import DEFAULT_REMOTE_ROOT, hash_path
CONFIG_FILE_NAME = ".remote"
INDEX_FILE_NAME = ".remoteindex"
IGNORE_FILE_NAME = ".remoteignore"
IGNORE_SECTION_REGEX = re.compile(r"^(push|pull|both)\s*:$")
BASE_IGNORES = (CONFIG_FILE_NAME, INDEX_FILE_NAME, IGNORE_FILE_NAME)
DEFAULT_SHELL = "sh"
DEFAULT_SHELL_OPTIONS = ""
def _extract_shell_info(line: str, env_vars: List[str]) -> Tuple[str, str]:
if not env_vars:
return DEFAULT_SHELL, DEFAULT_SHELL_OPTIONS
vars_string = env_vars[0]
env = {}
items = vars_string.split()
index = 0
while index < len(items):
key, value = items[index].split("=")
if value.startswith("'") or value.startswith('"'):
control_character = value[0]
while index < len(items) - 1:
if value[-1] == control_character:
break
index += 1
value += " " + items[index]
if not value[-1] == control_character:
raise ConfigurationError(f"Config line {line} is corrupted. Cannot parse end {key}={value}")
env[key] = value.strip("\"'")
index += 1
print(env)
# TODO: these shell types are not used in new implementation, need to remove them
shell = env.pop("RSHELL", DEFAULT_SHELL)
shell_options = env.pop("RSHELL_OPTS", DEFAULT_SHELL_OPTIONS)
if env:
raise ConfigurationError(
f"Config line {line} contains unexpected env variables: {env}. Only RSHELL and RSHELL_OPTS can be used"
)
return shell, shell_options
def parse_config_line(line: str) -> RemoteConfig:
# The line should look like this:
# sdas-ld2:.remotes/814f27f15f4e7a0842cada353dfc765a RSHELL=zsh
entry, *env_items = line.split(maxsplit=1)
shell, shell_options = _extract_shell_info(line, env_items)
parts = entry.split(":")
if len(parts) != 2:
raise ConfigurationError(
f"The configuration string is malformed: {parts}. Please use host-name:remote_dir format"
)
host, directory = parts
return RemoteConfig(host=host, directory=Path(directory), shell=shell, shell_options=shell_options)
def load_configurations(workspace_root: Path) -> List[RemoteConfig]:
config_file = workspace_root / CONFIG_FILE_NAME
configurations = []
for line in config_file.read_text().splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
configurations.append(parse_config_line(line))
return configurations
def load_default_configuration_num(workspace_root: Path) -> int:
# If REMOTE_HOST_INDEX is set, that overrides settings in .remoteindex
env_index = os.environ.get("REMOTE_HOST_INDEX")
if env_index:
try:
return int(env_index)
except ValueError:
raise ConfigurationError(
f"REMOTE_HOST_INDEX env variable contains symbols other than numbers: '{env_index}'. "
"Please set the coorect index value to continue"
)
index_file = workspace_root / INDEX_FILE_NAME
if not index_file.exists():
return 0
# Configuration uses 1-base index and we need to have 0-based
text = index_file.read_text().strip()
try:
return int(text) - 1
except ValueError:
raise ConfigurationError(
f"File {index_file} contains symbols other than numbers: '{text}'. "
"Please remove it or replace the value to continue"
)
def _postprocess(ignores):
pull = ignores.pop("pull", [])
push = ignores.pop("push", [])
both = ignores.pop("both", [])
if ignores:
raise ConfigurationError(
f"{IGNORE_FILE_NAME} file has unexpected sections: {', '.join(ignores.keys())}. Please remove them"
)
return SyncRules(pull=pull, push=push, both=both)
def load_ignores(workspace_root: Path) -> SyncRules:
ignores: Dict[str, List[str]] = defaultdict(list)
ignores["both"].extend(BASE_IGNORES)
ignore_file = workspace_root / IGNORE_FILE_NAME
if not ignore_file.exists():
return _postprocess(ignores)
active_section = "both"
is_new_format = None
for line in ignore_file.read_text().splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
matcher = IGNORE_SECTION_REGEX.match(line)
if matcher is None:
if is_new_format is None:
is_new_format = False
ignores[active_section].append(line)
else:
if is_new_format is None:
is_new_format = True
elif not is_new_format:
raise ConfigurationError(
f"Few ignore patters were listed in {IGNORE_FILE_NAME} before the first section {matcher.group(1)} appeared. "
"Please list all ignored files after a section declaration if you use new ignore format"
)
active_section = matcher.group(1)
return _postprocess(ignores)
def save_general_config(config_file: Path, configurations: List[RemoteConfig]):
with config_file.open("w") as f:
for item in configurations:
f.write(f"{item.host}:{item.directory}")
if item.shell != "sh":
f.write(f" RSHELL={item.shell}")
if item.shell_options:
f.write(f" RSHELL_OPTS='{item.shell_options}'")
f.write("\n")
def save_ignores(config_file: Path, ignores: SyncRules):
ignores.both.extend(BASE_IGNORES)
ignores.trim()
if ignores.is_empty():
if config_file.exists():
config_file.unlink()
return
with config_file.open("w") as f:
for key, value in asdict(ignores).items():
f.write(f"{key}:\n")
for item in value:
f.write(f"{item}\n")
def save_index(config_file: Path, index: int):
if index == 0:
# We delete file when index is default
if config_file.exists():
config_file.unlink()
else:
config_file.write_text(f"{index + 1}\n")
class ClassicConfigurationMedium(ConfigurationMedium):
"""A medium class that knows how to load and save the 'classical' workspace configuration.
This configuration may have three meaningful files:
.remote (required) - information about the connection options
.remoteindex (optional) - information about which connection from options above to use
.remoteignore (optional) - information about files that should be ignore when syncing files
"""
def load_config(self, workspace_root: Path) -> WorkspaceConfig:
configurations = load_configurations(workspace_root)
configuration_index = load_default_configuration_num(workspace_root)
if configuration_index > len(configurations) - 1:
raise ConfigurationError(
f"Configuration #{configuration_index + 1} requested but there are only {len(configurations)} declared"
)
ignores = load_ignores(workspace_root)
return WorkspaceConfig(
root=workspace_root,
configurations=configurations,
default_configuration=configuration_index,
ignores=ignores,
includes=SyncRules.new(),
)
def save_config(self, config: WorkspaceConfig) -> None:
save_general_config(config.root / CONFIG_FILE_NAME, config.configurations)
save_ignores(config.root / IGNORE_FILE_NAME, config.ignores)
save_index(config.root / INDEX_FILE_NAME, config.default_configuration)
def is_workspace_root(self, path: Path) -> bool:
return (path / CONFIG_FILE_NAME).exists()
def generate_remote_directory(self, config: WorkspaceConfig) -> Path:
md5 = hash_path(config.root)
return Path(f"{DEFAULT_REMOTE_ROOT}/{config.root.name}_{md5}")
|
import logging
from pyrogram.errors import InputUserDeactivated, UserNotParticipant, FloodWait, UserIsBlocked, PeerIdInvalid
from info import AUTH_CHANNEL, LONG_IMDB_DESCRIPTION, MAX_LIST_ELM
from imdb import IMDb
import asyncio
from pyrogram.types import Message
from typing import Union
import re
import os
from datetime import datetime
from typing import List
from pyrogram.types import InlineKeyboardButton
from database.users_chats_db import db
from bs4 import BeautifulSoup
import requests
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
BTN_URL_REGEX = re.compile(
r"(\[([^\[]+?)\]\((buttonurl|buttonalert):(?:/{0,2})(.+?)(:same)?\))"
)
imdb = IMDb()
BANNED = {}
SMART_OPEN = '“'
SMART_CLOSE = '”'
START_CHAR = ('\'', '"', SMART_OPEN)
# temp db for banned
class temp(object):
BANNED_USERS = []
BANNED_CHATS = []
ME = None
CURRENT=int(os.environ.get("SKIP", 2))
CANCEL = False
MELCOW = {}
U_NAME = None
B_NAME = None
async def is_subscribed(bot, query):
try:
user = await bot.get_chat_member(AUTH_CHANNEL, query.from_user.id)
except UserNotParticipant:
pass
except Exception as e:
logger.exception(e)
else:
if user.status != 'kicked':
return True
return False
async def get_poster(query, bulk=False, id=False, file=None):
if not id:
# https://t.me/GetTGLink/4183
query = (query.strip()).lower()
title = query
year = re.findall(r'[1-2]\d{3}$', query, re.IGNORECASE)
if year:
year = list_to_str(year[:1])
title = (query.replace(year, "")).strip()
elif file is not None:
year = re.findall(r'[1-2]\d{3}', file, re.IGNORECASE)
if year:
year = list_to_str(year[:1])
else:
year = None
movieid = imdb.search_movie(title.lower(), results=10)
if not movieid:
return None
if year:
filtered=list(filter(lambda k: str(k.get('year')) == str(year), movieid))
if not filtered:
filtered = movieid
else:
filtered = movieid
movieid=list(filter(lambda k: k.get('kind') in ['movie', 'tv series'], filtered))
if not movieid:
movieid = filtered
if bulk:
return movieid
movieid = movieid[0].movieID
else:
movieid = int(query)
movie = imdb.get_movie(movieid)
if movie.get("original air date"):
date = movie["original air date"]
elif movie.get("year"):
date = movie.get("year")
else:
date = "N/A"
plot = ""
if not LONG_IMDB_DESCRIPTION:
plot = movie.get('plot')
if plot and len(plot) > 0:
plot = plot[0]
else:
plot = movie.get('plot outline')
if plot and len(plot) > 800:
plot = plot[0:800] + "..."
return {
'title': movie.get('title'),
'votes': movie.get('votes'),
"aka": list_to_str(movie.get("akas")),
"seasons": movie.get("number of seasons"),
"box_office": movie.get('box office'),
'localized_title': movie.get('localized title'),
'kind': movie.get("kind"),
"imdb_id": f"tt{movie.get("imdbID")}",
"cast": list_to_str(movie.get("cast")),
"runtime": list_to_str(movie.get("runtimes")),
"countries": list_to_str(movie.get("countries")),
"certificates": list_to_str(movie.get("certificates")),
"languages": list_to_str(movie.get("languages")),
"director": list_to_str(movie.get("director")),
"writer":list_to_str(movie.get("writer")),
"producer":list_to_str(movie.get("producer")),
"composer":list_to_str(movie.get("composer")) ,
"cinematographer":list_to_str(movie.get("cinematographer")),
"music_team": list_to_str(movie.get("music department")),
"distributors": list_to_str(movie.get("distributors")),
'release_date': date,
'year': movie.get('year'),
'genres': list_to_str(movie.get("genres")),
'poster': movie.get('full-size cover url'),
'plot': plot,
'rating': str(movie.get("rating")),
'url':f'https://www.imdb.com/title/tt{movieid}'
}
# https://github.com/odysseusmax/animated-lamp/blob/2ef4730eb2b5f0596ed6d03e7b05243d93e3415b/bot/utils/broadcast.py#L37
async def broadcast_messages(user_id, message):
try:
await message.copy(chat_id=user_id)
return True, "Succes"
except FloodWait as e:
await asyncio.sleep(e.x)
return await broadcast_messages(user_id, message)
except InputUserDeactivated:
await db.delete_user(int(user_id))
logging.info(f"{user_id}-Removed from Database, since deleted account.")
return False, "Deleted"
except UserIsBlocked:
logging.info(f"{user_id} -Blocked the bot.")
return False, "Blocked"
except PeerIdInvalid:
await db.delete_user(int(user_id))
logging.info(f"{user_id} - PeerIdInvalid")
return False, "Error"
except Exception as e:
return False, "Error"
async def search_gagala(text):
usr_agent = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/61.0.3163.100 Safari/537.36'
}
text = text.replace(" ", '+')
url = f'https://www.google.com/search?q={text}'
response = requests.get(url, headers=usr_agent)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
titles = soup.find_all( 'h3' )
return [title.getText() for title in titles]
def get_size(size):
"""Get size in readable format"""
units = ["Bytes", "KB", "MB", "GB", "TB", "PB", "EB"]
size = float(size)
i = 0
while size >= 1024.0 and i < len(units):
i += 1
size /= 1024.0
return "%.2f %s" % (size, units[i])
def split_list(l, n):
for i in range(0, len
(l), n):
yield l[i:i + n]
def get_file_id(msg: Message):
if msg.media:
for message_type in (
"photo",
"animation",
"audio",
"document",
"video",
"video_note",
"voice",
"sticker"
):
obj = getattr(msg, message_type)
if obj:
setattr(obj, "message_type", message_type)
return obj
def extract_user(message: Message) -> Union[int, str]:
"""extracts the user from a message"""
# https://github.com/SpEcHiDe/PyroGramBot/blob/f30e2cca12002121bad1982f68cd0ff9814ce027/pyrobot/helper_functions/extract_user.py#L7
user_id = None
user_first_name = None
if message.reply_to_message:
user_id = message.reply_to_message.from_user.id
user_first_name = message.reply_to_message.from_user.first_name
elif len(message.command) > 1:
if (
len(message.entities) > 1 and
message.entities[1].type == "text_mention"
):
required_entity = message.entities[1]
user_id = required_entity.user.id
user_first_name = required_entity.user.first_name
else:
user_id = message.command[1]
# don't want to make a request -_-
user_first_name = user_id
try:
user_id = int(user_id)
except ValueError:
pass
else:
user_id = message.from_user.id
user_first_name = message.from_user.first_name
return (user_id, user_first_name)
def list_to_str(k):
if not k:
return "N/A"
elif len(k) == 1:
return str(k[0])
elif MAX_LIST_ELM:
k = k[:int(MAX_LIST_ELM)]
return ' '.join(f'{elem}, ' for elem in k)
else:
return ' '.join(f'{elem}, ' for elem in k)
def last_online(from_user):
time = ""
if from_user.is_bot:
time += "🤖 Bot :("
elif from_user.status == 'recently':
time += "Recently"
elif from_user.status == 'within_week':
time += "Within the last week"
elif from_user.status == 'within_month':
time += "Within the last month"
elif from_user.status == 'long_time_ago':
time += "A long time ago :("
elif from_user.status == 'online':
time += "Currently Online"
elif from_user.status == 'offline':
time += datetime.fromtimestamp(from_user.last_online_date).strftime("%a, %d %b %Y, %H:%M:%S")
return time
def split_quotes(text: str) -> List:
if not any(text.startswith(char) for char in START_CHAR):
return text.split(None, 1)
counter = 1 # ignore first char -> is some kind of quote
while counter < len(text):
if text[counter] == "\\":
counter += 1
elif text[counter] == text[0] or (text[0] == SMART_OPEN and text[counter] == SMART_CLOSE):
break
counter += 1
else:
return text.split(None, 1)
# 1 to avoid starting quote, and counter is exclusive so avoids ending
key = remove_escapes(text[1:counter].strip())
# index will be in range, or `else` would have been executed and returned
rest = text[counter + 1:].strip()
if not key:
key = text[0] + text[0]
return list(filter(None, [key, rest]))
def parser(text, keyword):
if "buttonalert" in text:
text = (text.replace("\n", "\\n").replace("\t", "\\t"))
buttons = []
note_data = ""
prev = 0
i = 0
alerts = []
for match in BTN_URL_REGEX.finditer(text):
# Check if btnurl is escaped
n_escapes = 0
to_check = match.start(1) - 1
while to_check > 0 and text[to_check] == "\\":
n_escapes += 1
to_check -= 1
# if even, not escaped -> create button
if n_escapes % 2 == 0:
note_data += text[prev:match.start(1)]
prev = match.end(1)
if match.group(3) == "buttonalert":
# create a thruple with button label, url, and newline status
if bool(match.group(5)) and buttons:
buttons[-1].append(InlineKeyboardButton(
text=match.group(2),
callback_data=f"alertmessage:{i}:{keyword}"
))
else:
buttons.append([InlineKeyboardButton(
text=match.group(2),
callback_data=f"alertmessage:{i}:{keyword}"
)])
i += 1
alerts.append(match.group(4))
elif bool(match.group(5)) and buttons:
buttons[-1].append(InlineKeyboardButton(
text=match.group(2),
url=match.group(4).replace(" ", "")
))
else:
buttons.append([InlineKeyboardButton(
text=match.group(2),
url=match.group(4).replace(" ", "")
)])
else:
note_data += text[prev:to_check]
prev = match.start(1) - 1
else:
note_data += text[prev:]
try:
return note_data, buttons, alerts
except:
return note_data, buttons, None
def remove_escapes(text: str) -> str:
res = ""
is_escaped = False
for counter in range(len(text)):
if is_escaped:
res += text[counter]
is_escaped = False
elif text[counter] == "\\":
is_escaped = True
else:
res += text[counter]
return res
def humanbytes(size):
if not size:
return ""
power = 2**10
n = 0
Dic_powerN = {0: ' ', 1: 'Ki', 2: 'Mi', 3: 'Gi', 4: 'Ti'}
while size > power:
size /= power
n +=
return str(round(size, 2)) + " " + Dic_powerN[n] + 'B'
| import logging
from pyrogram.errors import InputUserDeactivated, UserNotParticipant, FloodWait, UserIsBlocked, PeerIdInvalid
from info import AUTH_CHANNEL, LONG_IMDB_DESCRIPTION, MAX_LIST_ELM
from imdb import IMDb
import asyncio
from pyrogram.types import Message
from typing import Union
import re
import os
from datetime import datetime
from typing import List
from pyrogram.types import InlineKeyboardButton
from database.users_chats_db import db
from bs4 import BeautifulSoup
import requests
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
BTN_URL_REGEX = re.compile(
r"(\[([^\[]+?)\]\((buttonurl|buttonalert):(?:/{0,2})(.+?)(:same)?\))"
)
imdb = IMDb()
BANNED = {}
SMART_OPEN = '“'
SMART_CLOSE = '”'
START_CHAR = ('\'', '"', SMART_OPEN)
# temp db for banned
class temp(object):
BANNED_USERS = []
BANNED_CHATS = []
ME = None
CURRENT=int(os.environ.get("SKIP", 2))
CANCEL = False
MELCOW = {}
U_NAME = None
B_NAME = None
async def is_subscribed(bot, query):
try:
user = await bot.get_chat_member(AUTH_CHANNEL, query.from_user.id)
except UserNotParticipant:
pass
except Exception as e:
logger.exception(e)
else:
if user.status != 'kicked':
return True
return False
async def get_poster(query, bulk=False, id=False, file=None):
if not id:
# https://t.me/GetTGLink/4183
query = (query.strip()).lower()
title = query
year = re.findall(r'[1-2]\d{3}$', query, re.IGNORECASE)
if year:
year = list_to_str(year[:1])
title = (query.replace(year, "")).strip()
elif file is not None:
year = re.findall(r'[1-2]\d{3}', file, re.IGNORECASE)
if year:
year = list_to_str(year[:1])
else:
year = None
movieid = imdb.search_movie(title.lower(), results=10)
if not movieid:
return None
if year:
filtered=list(filter(lambda k: str(k.get('year')) == str(year), movieid))
if not filtered:
filtered = movieid
else:
filtered = movieid
movieid=list(filter(lambda k: k.get('kind') in ['movie', 'tv series'], filtered))
if not movieid:
movieid = filtered
if bulk:
return movieid
movieid = movieid[0].movieID
else:
movieid = int(query)
movie = imdb.get_movie(movieid)
if movie.get("original air date"):
date = movie["original air date"]
elif movie.get("year"):
date = movie.get("year")
else:
date = "N/A"
plot = ""
if not LONG_IMDB_DESCRIPTION:
plot = movie.get('plot')
if plot and len(plot) > 0:
plot = plot[0]
else:
plot = movie.get('plot outline')
if plot and len(plot) > 800:
plot = plot[0:800] + "..."
return {
'title': movie.get('title'),
'votes': movie.get('votes'),
"aka": list_to_str(movie.get("akas")),
"seasons": movie.get("number of seasons"),
"box_office": movie.get('box office'),
'localized_title': movie.get('localized title'),
'kind': movie.get("kind"),
"imdb_id": f"tt{movie.get('imdbID')}",
"cast": list_to_str(movie.get("cast")),
"runtime": list_to_str(movie.get("runtimes")),
"countries": list_to_str(movie.get("countries")),
"certificates": list_to_str(movie.get("certificates")),
"languages": list_to_str(movie.get("languages")),
"director": list_to_str(movie.get("director")),
"writer":list_to_str(movie.get("writer")),
"producer":list_to_str(movie.get("producer")),
"composer":list_to_str(movie.get("composer")) ,
"cinematographer":list_to_str(movie.get("cinematographer")),
"music_team": list_to_str(movie.get("music department")),
"distributors": list_to_str(movie.get("distributors")),
'release_date': date,
'year': movie.get('year'),
'genres': list_to_str(movie.get("genres")),
'poster': movie.get('full-size cover url'),
'plot': plot,
'rating': str(movie.get("rating")),
'url':f'https://www.imdb.com/title/tt{movieid}'
}
# https://github.com/odysseusmax/animated-lamp/blob/2ef4730eb2b5f0596ed6d03e7b05243d93e3415b/bot/utils/broadcast.py#L37
async def broadcast_messages(user_id, message):
try:
await message.copy(chat_id=user_id)
return True, "Succes"
except FloodWait as e:
await asyncio.sleep(e.x)
return await broadcast_messages(user_id, message)
except InputUserDeactivated:
await db.delete_user(int(user_id))
logging.info(f"{user_id}-Removed from Database, since deleted account.")
return False, "Deleted"
except UserIsBlocked:
logging.info(f"{user_id} -Blocked the bot.")
return False, "Blocked"
except PeerIdInvalid:
await db.delete_user(int(user_id))
logging.info(f"{user_id} - PeerIdInvalid")
return False, "Error"
except Exception as e:
return False, "Error"
async def search_gagala(text):
usr_agent = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/61.0.3163.100 Safari/537.36'
}
text = text.replace(" ", '+')
url = f'https://www.google.com/search?q={text}'
response = requests.get(url, headers=usr_agent)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
titles = soup.find_all( 'h3' )
return [title.getText() for title in titles]
def get_size(size):
"""Get size in readable format"""
units = ["Bytes", "KB", "MB", "GB", "TB", "PB", "EB"]
size = float(size)
i = 0
while size >= 1024.0 and i < len(units):
i += 1
size /= 1024.0
return "%.2f %s" % (size, units[i])
def split_list(l, n):
for i in range(0, len
(l), n):
yield l[i:i + n]
def get_file_id(msg: Message):
if msg.media:
for message_type in (
"photo",
"animation",
"audio",
"document",
"video",
"video_note",
"voice",
"sticker"
):
obj = getattr(msg, message_type)
if obj:
setattr(obj, "message_type", message_type)
return obj
def extract_user(message: Message) -> Union[int, str]:
"""extracts the user from a message"""
# https://github.com/SpEcHiDe/PyroGramBot/blob/f30e2cca12002121bad1982f68cd0ff9814ce027/pyrobot/helper_functions/extract_user.py#L7
user_id = None
user_first_name = None
if message.reply_to_message:
user_id = message.reply_to_message.from_user.id
user_first_name = message.reply_to_message.from_user.first_name
elif len(message.command) > 1:
if (
len(message.entities) > 1 and
message.entities[1].type == "text_mention"
):
required_entity = message.entities[1]
user_id = required_entity.user.id
user_first_name = required_entity.user.first_name
else:
user_id = message.command[1]
# don't want to make a request -_-
user_first_name = user_id
try:
user_id = int(user_id)
except ValueError:
pass
else:
user_id = message.from_user.id
user_first_name = message.from_user.first_name
return (user_id, user_first_name)
def list_to_str(k):
if not k:
return "N/A"
elif len(k) == 1:
return str(k[0])
elif MAX_LIST_ELM:
k = k[:int(MAX_LIST_ELM)]
return ' '.join(f'{elem}, ' for elem in k)
else:
return ' '.join(f'{elem}, ' for elem in k)
def last_online(from_user):
time = ""
if from_user.is_bot:
time += "🤖 Bot :("
elif from_user.status == 'recently':
time += "Recently"
elif from_user.status == 'within_week':
time += "Within the last week"
elif from_user.status == 'within_month':
time += "Within the last month"
elif from_user.status == 'long_time_ago':
time += "A long time ago :("
elif from_user.status == 'online':
time += "Currently Online"
elif from_user.status == 'offline':
time += datetime.fromtimestamp(from_user.last_online_date).strftime("%a, %d %b %Y, %H:%M:%S")
return time
def split_quotes(text: str) -> List:
if not any(text.startswith(char) for char in START_CHAR):
return text.split(None, 1)
counter = 1 # ignore first char -> is some kind of quote
while counter < len(text):
if text[counter] == "\\":
counter += 1
elif text[counter] == text[0] or (text[0] == SMART_OPEN and text[counter] == SMART_CLOSE):
break
counter += 1
else:
return text.split(None, 1)
# 1 to avoid starting quote, and counter is exclusive so avoids ending
key = remove_escapes(text[1:counter].strip())
# index will be in range, or `else` would have been executed and returned
rest = text[counter + 1:].strip()
if not key:
key = text[0] + text[0]
return list(filter(None, [key, rest]))
def parser(text, keyword):
if "buttonalert" in text:
text = (text.replace("\n", "\\n").replace("\t", "\\t"))
buttons = []
note_data = ""
prev = 0
i = 0
alerts = []
for match in BTN_URL_REGEX.finditer(text):
# Check if btnurl is escaped
n_escapes = 0
to_check = match.start(1) - 1
while to_check > 0 and text[to_check] == "\\":
n_escapes += 1
to_check -= 1
# if even, not escaped -> create button
if n_escapes % 2 == 0:
note_data += text[prev:match.start(1)]
prev = match.end(1)
if match.group(3) == "buttonalert":
# create a thruple with button label, url, and newline status
if bool(match.group(5)) and buttons:
buttons[-1].append(InlineKeyboardButton(
text=match.group(2),
callback_data=f"alertmessage:{i}:{keyword}"
))
else:
buttons.append([InlineKeyboardButton(
text=match.group(2),
callback_data=f"alertmessage:{i}:{keyword}"
)])
i += 1
alerts.append(match.group(4))
elif bool(match.group(5)) and buttons:
buttons[-1].append(InlineKeyboardButton(
text=match.group(2),
url=match.group(4).replace(" ", "")
))
else:
buttons.append([InlineKeyboardButton(
text=match.group(2),
url=match.group(4).replace(" ", "")
)])
else:
note_data += text[prev:to_check]
prev = match.start(1) - 1
else:
note_data += text[prev:]
try:
return note_data, buttons, alerts
except:
return note_data, buttons, None
def remove_escapes(text: str) -> str:
res = ""
is_escaped = False
for counter in range(len(text)):
if is_escaped:
res += text[counter]
is_escaped = False
elif text[counter] == "\\":
is_escaped = True
else:
res += text[counter]
return res
def humanbytes(size):
if not size:
return ""
power = 2**10
n = 0
Dic_powerN = {0: ' ', 1: 'Ki', 2: 'Mi', 3: 'Gi', 4: 'Ti'}
while size > power:
size /= power
n +=
return str(round(size, 2)) + " " + Dic_powerN[n] + 'B'
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.logger._json}.
"""
from io import BytesIO, StringIO
from typing import IO, Any, List, Optional, Sequence, cast
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
from zope.interface.verify import verifyObject
from twisted.python.failure import Failure
from twisted.trial.unittest import TestCase
from .._flatten import extractField
from .._format import formatEvent
from .._global import globalLogPublisher
from .._interfaces import ILogObserver, LogEvent
from .._json import (
eventAsJSON,
eventFromJSON,
eventsFromJSONLogFile,
jsonFileLogObserver,
log as jsonLog,
)
from .._levels import LogLevel
from .._logger import Logger
from .._observer import LogPublisher
def savedJSONInvariants(testCase: TestCase, savedJSON: str) -> str:
"""
Assert a few things about the result of L{eventAsJSON}, then return it.
@param testCase: The L{TestCase} with which to perform the assertions.
@param savedJSON: The result of L{eventAsJSON}.
@return: C{savedJSON}
@raise AssertionError: If any of the preconditions fail.
"""
testCase.assertIsInstance(savedJSON, str)
testCase.assertEqual(savedJSON.count("\n"), 0)
return savedJSON
class SaveLoadTests(TestCase):
"""
Tests for loading and saving log events.
"""
def savedEventJSON(self, event: LogEvent) -> str:
"""
Serialize some an events, assert some things about it, and return the
JSON.
@param event: An event.
@return: JSON.
"""
return savedJSONInvariants(self, eventAsJSON(event))
def test_simpleSaveLoad(self) -> None:
"""
Saving and loading an empty dictionary results in an empty dictionary.
"""
self.assertEqual(eventFromJSON(self.savedEventJSON({})), {})
def test_saveLoad(self) -> None:
"""
Saving and loading a dictionary with some simple values in it results
in those same simple values in the output; according to JSON's rules,
though, all dictionary keys must be L{str} and any non-L{str}
keys will be converted.
"""
self.assertEqual(
eventFromJSON(self.savedEventJSON({1: 2, "3": "4"})), # type: ignore[dict-item]
{"1": 2, "3": "4"},
)
def test_saveUnPersistable(self) -> None:
"""
Saving and loading an object which cannot be represented in JSON will
result in a placeholder.
"""
self.assertEqual(
eventFromJSON(self.savedEventJSON({"1": 2, "3": object()})),
{"1": 2, "3": {"unpersistable": True}},
)
def test_saveNonASCII(self) -> None:
"""
Non-ASCII keys and values can be saved and loaded.
"""
self.assertEqual(
eventFromJSON(self.savedEventJSON({"\u1234": "\u4321", "3": object()})),
{"\u1234": "\u4321", "3": {"unpersistable": True}},
)
def test_saveBytes(self) -> None:
"""
Any L{bytes} objects will be saved as if they are latin-1 so they can
be faithfully re-loaded.
"""
inputEvent = {"hello": bytes(range(255))}
# On Python 3, bytes keys will be skipped by the JSON encoder. Not
# much we can do about that. Let's make sure that we don't get an
# error, though.
inputEvent.update({b"skipped": "okay"}) # type: ignore[dict-item]
self.assertEqual(
eventFromJSON(self.savedEventJSON(inputEvent)),
{"hello": bytes(range(255)).decode("charmap")},
)
def test_saveUnPersistableThenFormat(self) -> None:
"""
Saving and loading an object which cannot be represented in JSON, but
has a string representation which I{can} be saved as JSON, will result
in the same string formatting; any extractable fields will retain their
data types.
"""
class Reprable:
def __init__(self, value: object) -> None:
self.value = value
def __repr__(self) -> str:
return "reprable"
inputEvent = {"log_format": "{object} {object.value}", "object": Reprable(7)}
outputEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertEqual(formatEvent(outputEvent), "reprable 7")
def test_extractingFieldsPostLoad(self) -> None:
"""
L{extractField} can extract fields from an object that's been saved and
loaded from JSON.
"""
class Obj:
def __init__(self) -> None:
self.value = 345
inputEvent = dict(log_format="{object.value}", object=Obj())
loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertEqual(extractField("object.value", loadedEvent), 345)
# The behavior of extractField is consistent between pre-persistence
# and post-persistence events, although looking up the key directly
# won't be:
self.assertRaises(KeyError, extractField, "object", loadedEvent)
self.assertRaises(KeyError, extractField, "object", inputEvent)
def test_failureStructurePreserved(self) -> None:
"""
Round-tripping a failure through L{eventAsJSON} preserves its class and
structure.
"""
events: List[LogEvent] = []
log = Logger(observer=cast(ILogObserver, events.append))
try:
1 / 0
except ZeroDivisionError:
f = Failure()
log.failure("a message about failure", f)
self.assertEqual(len(events), 1)
loaded = eventFromJSON(self.savedEventJSON(events[0]))["log_failure"]
self.assertIsInstance(loaded, Failure)
self.assertTrue(loaded.check(ZeroDivisionError))
self.assertIsInstance(loaded.getTraceback(), str)
def test_saveLoadLevel(self) -> None:
"""
It's important that the C{log_level} key remain a
L{constantly.NamedConstant} object.
"""
inputEvent = dict(log_level=LogLevel.warn)
loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertIs(loadedEvent["log_level"], LogLevel.warn)
def test_saveLoadUnknownLevel(self) -> None:
"""
If a saved bit of JSON (let's say, from a future version of Twisted)
were to persist a different log_level, it will resolve as None.
"""
loadedEvent = eventFromJSON(
'{"log_level": {"name": "other", '
'"__class_uuid__": "02E59486-F24D-46AD-8224-3ACDF2A5732A"}}'
)
self.assertEqual(loadedEvent, dict(log_level=None))
class FileLogObserverTests(TestCase):
"""
Tests for L{jsonFileLogObserver}.
"""
def test_interface(self) -> None:
"""
A L{FileLogObserver} returned by L{jsonFileLogObserver} is an
L{ILogObserver}.
"""
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle)
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
def assertObserverWritesJSON(self, recordSeparator: str = "\x1e") -> None:
"""
Asserts that an observer created by L{jsonFileLogObserver} with the
given arguments writes events serialized as JSON text, using the given
record separator.
@param recordSeparator: C{recordSeparator} argument to
L{jsonFileLogObserver}
"""
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle, recordSeparator)
event = dict(x=1)
observer(event)
self.assertEqual(fileHandle.getvalue(), f'{recordSeparator}{{'x': 1}}\n')
def test_observeWritesDefaultRecordSeparator(self) -> None:
"""
A L{FileLogObserver} created by L{jsonFileLogObserver} writes events
serialzed as JSON text to a file when it observes events.
By default, the record separator is C{"\\x1e"}.
"""
self.assertObserverWritesJSON()
def test_observeWritesEmptyRecordSeparator(self) -> None:
"""
A L{FileLogObserver} created by L{jsonFileLogObserver} writes events
serialzed as JSON text to a file when it observes events.
This test sets the record separator to C{""}.
"""
self.assertObserverWritesJSON(recordSeparator="")
def test_failureFormatting(self) -> None:
"""
A L{FileLogObserver} created by L{jsonFileLogObserver} writes failures
serialized as JSON text to a file when it observes events.
"""
io = StringIO()
publisher = LogPublisher()
logged: List[LogEvent] = []
publisher.addObserver(cast(ILogObserver, logged.append))
publisher.addObserver(jsonFileLogObserver(io))
logger = Logger(observer=publisher)
try:
1 / 0
except BaseException:
logger.failure("failed as expected")
reader = StringIO(io.getvalue())
deserialized = list(eventsFromJSONLogFile(reader))
def checkEvents(logEvents: Sequence[LogEvent]) -> None:
self.assertEqual(len(logEvents), 1)
[failureEvent] = logEvents
self.assertIn("log_failure", failureEvent)
failureObject = failureEvent["log_failure"]
self.assertIsInstance(failureObject, Failure)
tracebackObject = failureObject.getTracebackObject()
self.assertEqual(
tracebackObject.tb_frame.f_code.co_filename.rstrip("co"),
__file__.rstrip("co"),
)
checkEvents(logged)
checkEvents(deserialized)
class LogFileReaderTests(TestCase):
"""
Tests for L{eventsFromJSONLogFile}.
"""
def setUp(self) -> None:
self.errorEvents: List[LogEvent] = []
@implementer(ILogObserver)
def observer(event: LogEvent) -> None:
if event["log_namespace"] == jsonLog.namespace and "record" in event:
self.errorEvents.append(event)
self.logObserver = observer
globalLogPublisher.addObserver(observer)
def tearDown(self) -> None:
globalLogPublisher.removeObserver(self.logObserver)
def _readEvents(
self,
inFile: IO[Any],
recordSeparator: Optional[str] = None,
bufferSize: int = 4096,
) -> None:
"""
Test that L{eventsFromJSONLogFile} reads two pre-defined events from a
file: C{{"x": 1}} and C{{"y": 2}}.
@param inFile: C{inFile} argument to L{eventsFromJSONLogFile}
@param recordSeparator: C{recordSeparator} argument to
L{eventsFromJSONLogFile}
@param bufferSize: C{bufferSize} argument to L{eventsFromJSONLogFile}
"""
events = iter(eventsFromJSONLogFile(inFile, recordSeparator, bufferSize))
self.assertEqual(next(events), {"x": 1})
self.assertEqual(next(events), {"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
def test_readEventsAutoWithRecordSeparator(self) -> None:
"""
L{eventsFromJSONLogFile} reads events from a file and automatically
detects use of C{"\\x1e"} as the record separator.
"""
with StringIO('\x1e{"x": 1}\n' '\x1e{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle)
self.assertEqual(len(self.errorEvents), 0)
def test_readEventsAutoEmptyRecordSeparator(self) -> None:
"""
L{eventsFromJSONLogFile} reads events from a file and automatically
detects use of C{""} as the record separator.
"""
with StringIO('{"x": 1}\n' '{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle)
self.assertEqual(len(self.errorEvents), 0)
def test_readEventsExplicitRecordSeparator(self) -> None:
"""
L{eventsFromJSONLogFile} reads events from a file and is told to use
a specific record separator.
"""
# Use "\x08" (backspace)... because that seems weird enough.
with StringIO('\x08{"x": 1}\n' '\x08{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle, recordSeparator="\x08")
self.assertEqual(len(self.errorEvents), 0)
def test_readEventsPartialBuffer(self) -> None:
"""
L{eventsFromJSONLogFile} handles buffering a partial event.
"""
with StringIO('\x1e{"x": 1}\n' '\x1e{"y": 2}\n') as fileHandle:
# Use a buffer size smaller than the event text.
self._readEvents(fileHandle, bufferSize=1)
self.assertEqual(len(self.errorEvents), 0)
def test_readTruncated(self) -> None:
"""
If the JSON text for a record is truncated, skip it.
"""
with StringIO('\x1e{"x": 1' '\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
"Unable to read truncated JSON record: {record!r}",
)
self.assertEqual(self.errorEvents[0]["record"], b'{"x": 1')
def test_readUnicode(self) -> None:
"""
If the file being read from vends L{str}, strings decode from JSON
as-is.
"""
# The Euro currency sign is "\u20ac"
with StringIO('\x1e{"currency": "\u20ac"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {"currency": "\u20ac"})
self.assertRaises(StopIteration, next, events) # No more events
self.assertEqual(len(self.errorEvents), 0)
def test_readUTF8Bytes(self) -> None:
"""
If the file being read from vends L{bytes}, strings decode from JSON as
UTF-8.
"""
# The Euro currency sign is b"\xe2\x82\xac" in UTF-8
with BytesIO(b'\x1e{"currency": "\xe2\x82\xac"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
# The Euro currency sign is "\u20ac"
self.assertEqual(next(events), {"currency": "\u20ac"})
self.assertRaises(StopIteration, next, events) # No more events
self.assertEqual(len(self.errorEvents), 0)
def test_readTruncatedUTF8Bytes(self) -> None:
"""
If the JSON text for a record is truncated in the middle of a two-byte
Unicode codepoint, we don't want to see a codec exception and the
stream is read properly when the additional data arrives.
"""
# The Euro currency sign is "\u20ac" and encodes in UTF-8 as three
# bytes: b"\xe2\x82\xac".
with BytesIO(b'\x1e{"x": "\xe2\x82\xac"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle, bufferSize=8))
self.assertEqual(next(events), {"x": "\u20ac"}) # Got text
self.assertRaises(StopIteration, next, events) # No more events
self.assertEqual(len(self.errorEvents), 0)
def test_readInvalidUTF8Bytes(self) -> None:
"""
If the JSON text for a record contains invalid UTF-8 text, ignore that
record.
"""
# The string b"\xe2\xac" is bogus
with BytesIO(b'\x1e{"x": "\xe2\xac"}\n' b'\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
"Unable to decode UTF-8 for JSON record: {record!r}",
)
self.assertEqual(self.errorEvents[0]["record"], b'{"x": "\xe2\xac"}\n')
def test_readInvalidJSON(self) -> None:
"""
If the JSON text for a record is invalid, skip it.
"""
with StringIO('\x1e{"x": }\n' '\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
"Unable to read JSON record: {record!r}",
)
self.assertEqual(self.errorEvents[0]["record"], b'{"x": }\n')
def test_readUnseparated(self) -> None:
"""
Multiple events without a record separator are skipped.
"""
with StringIO('\x1e{"x": 1}\n' '{"y": 2}\n') as fileHandle:
events = eventsFromJSONLogFile(fileHandle)
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
"Unable to read JSON record: {record!r}",
)
self.assertEqual(self.errorEvents[0]["record"], b'{"x": 1}\n{"y": 2}\n')
def test_roundTrip(self) -> None:
"""
Data written by a L{FileLogObserver} returned by L{jsonFileLogObserver}
and read by L{eventsFromJSONLogFile} is reconstructed properly.
"""
event = dict(x=1)
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle)
observer(event)
fileHandle.seek(0)
events = eventsFromJSONLogFile(fileHandle)
self.assertEqual(tuple(events), (event,))
self.assertEqual(len(self.errorEvents), 0)
| # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.logger._json}.
"""
from io import BytesIO, StringIO
from typing import IO, Any, List, Optional, Sequence, cast
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
from zope.interface.verify import verifyObject
from twisted.python.failure import Failure
from twisted.trial.unittest import TestCase
from .._flatten import extractField
from .._format import formatEvent
from .._global import globalLogPublisher
from .._interfaces import ILogObserver, LogEvent
from .._json import (
eventAsJSON,
eventFromJSON,
eventsFromJSONLogFile,
jsonFileLogObserver,
log as jsonLog,
)
from .._levels import LogLevel
from .._logger import Logger
from .._observer import LogPublisher
def savedJSONInvariants(testCase: TestCase, savedJSON: str) -> str:
"""
Assert a few things about the result of L{eventAsJSON}, then return it.
@param testCase: The L{TestCase} with which to perform the assertions.
@param savedJSON: The result of L{eventAsJSON}.
@return: C{savedJSON}
@raise AssertionError: If any of the preconditions fail.
"""
testCase.assertIsInstance(savedJSON, str)
testCase.assertEqual(savedJSON.count("\n"), 0)
return savedJSON
class SaveLoadTests(TestCase):
"""
Tests for loading and saving log events.
"""
def savedEventJSON(self, event: LogEvent) -> str:
"""
Serialize some an events, assert some things about it, and return the
JSON.
@param event: An event.
@return: JSON.
"""
return savedJSONInvariants(self, eventAsJSON(event))
def test_simpleSaveLoad(self) -> None:
"""
Saving and loading an empty dictionary results in an empty dictionary.
"""
self.assertEqual(eventFromJSON(self.savedEventJSON({})), {})
def test_saveLoad(self) -> None:
"""
Saving and loading a dictionary with some simple values in it results
in those same simple values in the output; according to JSON's rules,
though, all dictionary keys must be L{str} and any non-L{str}
keys will be converted.
"""
self.assertEqual(
eventFromJSON(self.savedEventJSON({1: 2, "3": "4"})), # type: ignore[dict-item]
{"1": 2, "3": "4"},
)
def test_saveUnPersistable(self) -> None:
"""
Saving and loading an object which cannot be represented in JSON will
result in a placeholder.
"""
self.assertEqual(
eventFromJSON(self.savedEventJSON({"1": 2, "3": object()})),
{"1": 2, "3": {"unpersistable": True}},
)
def test_saveNonASCII(self) -> None:
"""
Non-ASCII keys and values can be saved and loaded.
"""
self.assertEqual(
eventFromJSON(self.savedEventJSON({"\u1234": "\u4321", "3": object()})),
{"\u1234": "\u4321", "3": {"unpersistable": True}},
)
def test_saveBytes(self) -> None:
"""
Any L{bytes} objects will be saved as if they are latin-1 so they can
be faithfully re-loaded.
"""
inputEvent = {"hello": bytes(range(255))}
# On Python 3, bytes keys will be skipped by the JSON encoder. Not
# much we can do about that. Let's make sure that we don't get an
# error, though.
inputEvent.update({b"skipped": "okay"}) # type: ignore[dict-item]
self.assertEqual(
eventFromJSON(self.savedEventJSON(inputEvent)),
{"hello": bytes(range(255)).decode("charmap")},
)
def test_saveUnPersistableThenFormat(self) -> None:
"""
Saving and loading an object which cannot be represented in JSON, but
has a string representation which I{can} be saved as JSON, will result
in the same string formatting; any extractable fields will retain their
data types.
"""
class Reprable:
def __init__(self, value: object) -> None:
self.value = value
def __repr__(self) -> str:
return "reprable"
inputEvent = {"log_format": "{object} {object.value}", "object": Reprable(7)}
outputEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertEqual(formatEvent(outputEvent), "reprable 7")
def test_extractingFieldsPostLoad(self) -> None:
"""
L{extractField} can extract fields from an object that's been saved and
loaded from JSON.
"""
class Obj:
def __init__(self) -> None:
self.value = 345
inputEvent = dict(log_format="{object.value}", object=Obj())
loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertEqual(extractField("object.value", loadedEvent), 345)
# The behavior of extractField is consistent between pre-persistence
# and post-persistence events, although looking up the key directly
# won't be:
self.assertRaises(KeyError, extractField, "object", loadedEvent)
self.assertRaises(KeyError, extractField, "object", inputEvent)
def test_failureStructurePreserved(self) -> None:
"""
Round-tripping a failure through L{eventAsJSON} preserves its class and
structure.
"""
events: List[LogEvent] = []
log = Logger(observer=cast(ILogObserver, events.append))
try:
1 / 0
except ZeroDivisionError:
f = Failure()
log.failure("a message about failure", f)
self.assertEqual(len(events), 1)
loaded = eventFromJSON(self.savedEventJSON(events[0]))["log_failure"]
self.assertIsInstance(loaded, Failure)
self.assertTrue(loaded.check(ZeroDivisionError))
self.assertIsInstance(loaded.getTraceback(), str)
def test_saveLoadLevel(self) -> None:
"""
It's important that the C{log_level} key remain a
L{constantly.NamedConstant} object.
"""
inputEvent = dict(log_level=LogLevel.warn)
loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertIs(loadedEvent["log_level"], LogLevel.warn)
def test_saveLoadUnknownLevel(self) -> None:
"""
If a saved bit of JSON (let's say, from a future version of Twisted)
were to persist a different log_level, it will resolve as None.
"""
loadedEvent = eventFromJSON(
'{"log_level": {"name": "other", '
'"__class_uuid__": "02E59486-F24D-46AD-8224-3ACDF2A5732A"}}'
)
self.assertEqual(loadedEvent, dict(log_level=None))
class FileLogObserverTests(TestCase):
"""
Tests for L{jsonFileLogObserver}.
"""
def test_interface(self) -> None:
"""
A L{FileLogObserver} returned by L{jsonFileLogObserver} is an
L{ILogObserver}.
"""
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle)
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
def assertObserverWritesJSON(self, recordSeparator: str = "\x1e") -> None:
"""
Asserts that an observer created by L{jsonFileLogObserver} with the
given arguments writes events serialized as JSON text, using the given
record separator.
@param recordSeparator: C{recordSeparator} argument to
L{jsonFileLogObserver}
"""
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle, recordSeparator)
event = dict(x=1)
observer(event)
self.assertEqual(fileHandle.getvalue(), f'{recordSeparator}{{"x": 1}}\n')
def test_observeWritesDefaultRecordSeparator(self) -> None:
"""
A L{FileLogObserver} created by L{jsonFileLogObserver} writes events
serialzed as JSON text to a file when it observes events.
By default, the record separator is C{"\\x1e"}.
"""
self.assertObserverWritesJSON()
def test_observeWritesEmptyRecordSeparator(self) -> None:
"""
A L{FileLogObserver} created by L{jsonFileLogObserver} writes events
serialzed as JSON text to a file when it observes events.
This test sets the record separator to C{""}.
"""
self.assertObserverWritesJSON(recordSeparator="")
def test_failureFormatting(self) -> None:
"""
A L{FileLogObserver} created by L{jsonFileLogObserver} writes failures
serialized as JSON text to a file when it observes events.
"""
io = StringIO()
publisher = LogPublisher()
logged: List[LogEvent] = []
publisher.addObserver(cast(ILogObserver, logged.append))
publisher.addObserver(jsonFileLogObserver(io))
logger = Logger(observer=publisher)
try:
1 / 0
except BaseException:
logger.failure("failed as expected")
reader = StringIO(io.getvalue())
deserialized = list(eventsFromJSONLogFile(reader))
def checkEvents(logEvents: Sequence[LogEvent]) -> None:
self.assertEqual(len(logEvents), 1)
[failureEvent] = logEvents
self.assertIn("log_failure", failureEvent)
failureObject = failureEvent["log_failure"]
self.assertIsInstance(failureObject, Failure)
tracebackObject = failureObject.getTracebackObject()
self.assertEqual(
tracebackObject.tb_frame.f_code.co_filename.rstrip("co"),
__file__.rstrip("co"),
)
checkEvents(logged)
checkEvents(deserialized)
class LogFileReaderTests(TestCase):
"""
Tests for L{eventsFromJSONLogFile}.
"""
def setUp(self) -> None:
self.errorEvents: List[LogEvent] = []
@implementer(ILogObserver)
def observer(event: LogEvent) -> None:
if event["log_namespace"] == jsonLog.namespace and "record" in event:
self.errorEvents.append(event)
self.logObserver = observer
globalLogPublisher.addObserver(observer)
def tearDown(self) -> None:
globalLogPublisher.removeObserver(self.logObserver)
def _readEvents(
self,
inFile: IO[Any],
recordSeparator: Optional[str] = None,
bufferSize: int = 4096,
) -> None:
"""
Test that L{eventsFromJSONLogFile} reads two pre-defined events from a
file: C{{"x": 1}} and C{{"y": 2}}.
@param inFile: C{inFile} argument to L{eventsFromJSONLogFile}
@param recordSeparator: C{recordSeparator} argument to
L{eventsFromJSONLogFile}
@param bufferSize: C{bufferSize} argument to L{eventsFromJSONLogFile}
"""
events = iter(eventsFromJSONLogFile(inFile, recordSeparator, bufferSize))
self.assertEqual(next(events), {"x": 1})
self.assertEqual(next(events), {"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
def test_readEventsAutoWithRecordSeparator(self) -> None:
"""
L{eventsFromJSONLogFile} reads events from a file and automatically
detects use of C{"\\x1e"} as the record separator.
"""
with StringIO('\x1e{"x": 1}\n' '\x1e{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle)
self.assertEqual(len(self.errorEvents), 0)
def test_readEventsAutoEmptyRecordSeparator(self) -> None:
"""
L{eventsFromJSONLogFile} reads events from a file and automatically
detects use of C{""} as the record separator.
"""
with StringIO('{"x": 1}\n' '{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle)
self.assertEqual(len(self.errorEvents), 0)
def test_readEventsExplicitRecordSeparator(self) -> None:
"""
L{eventsFromJSONLogFile} reads events from a file and is told to use
a specific record separator.
"""
# Use "\x08" (backspace)... because that seems weird enough.
with StringIO('\x08{"x": 1}\n' '\x08{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle, recordSeparator="\x08")
self.assertEqual(len(self.errorEvents), 0)
def test_readEventsPartialBuffer(self) -> None:
"""
L{eventsFromJSONLogFile} handles buffering a partial event.
"""
with StringIO('\x1e{"x": 1}\n' '\x1e{"y": 2}\n') as fileHandle:
# Use a buffer size smaller than the event text.
self._readEvents(fileHandle, bufferSize=1)
self.assertEqual(len(self.errorEvents), 0)
def test_readTruncated(self) -> None:
"""
If the JSON text for a record is truncated, skip it.
"""
with StringIO('\x1e{"x": 1' '\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
"Unable to read truncated JSON record: {record!r}",
)
self.assertEqual(self.errorEvents[0]["record"], b'{"x": 1')
def test_readUnicode(self) -> None:
"""
If the file being read from vends L{str}, strings decode from JSON
as-is.
"""
# The Euro currency sign is "\u20ac"
with StringIO('\x1e{"currency": "\u20ac"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {"currency": "\u20ac"})
self.assertRaises(StopIteration, next, events) # No more events
self.assertEqual(len(self.errorEvents), 0)
def test_readUTF8Bytes(self) -> None:
"""
If the file being read from vends L{bytes}, strings decode from JSON as
UTF-8.
"""
# The Euro currency sign is b"\xe2\x82\xac" in UTF-8
with BytesIO(b'\x1e{"currency": "\xe2\x82\xac"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
# The Euro currency sign is "\u20ac"
self.assertEqual(next(events), {"currency": "\u20ac"})
self.assertRaises(StopIteration, next, events) # No more events
self.assertEqual(len(self.errorEvents), 0)
def test_readTruncatedUTF8Bytes(self) -> None:
"""
If the JSON text for a record is truncated in the middle of a two-byte
Unicode codepoint, we don't want to see a codec exception and the
stream is read properly when the additional data arrives.
"""
# The Euro currency sign is "\u20ac" and encodes in UTF-8 as three
# bytes: b"\xe2\x82\xac".
with BytesIO(b'\x1e{"x": "\xe2\x82\xac"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle, bufferSize=8))
self.assertEqual(next(events), {"x": "\u20ac"}) # Got text
self.assertRaises(StopIteration, next, events) # No more events
self.assertEqual(len(self.errorEvents), 0)
def test_readInvalidUTF8Bytes(self) -> None:
"""
If the JSON text for a record contains invalid UTF-8 text, ignore that
record.
"""
# The string b"\xe2\xac" is bogus
with BytesIO(b'\x1e{"x": "\xe2\xac"}\n' b'\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
"Unable to decode UTF-8 for JSON record: {record!r}",
)
self.assertEqual(self.errorEvents[0]["record"], b'{"x": "\xe2\xac"}\n')
def test_readInvalidJSON(self) -> None:
"""
If the JSON text for a record is invalid, skip it.
"""
with StringIO('\x1e{"x": }\n' '\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
"Unable to read JSON record: {record!r}",
)
self.assertEqual(self.errorEvents[0]["record"], b'{"x": }\n')
def test_readUnseparated(self) -> None:
"""
Multiple events without a record separator are skipped.
"""
with StringIO('\x1e{"x": 1}\n' '{"y": 2}\n') as fileHandle:
events = eventsFromJSONLogFile(fileHandle)
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
"Unable to read JSON record: {record!r}",
)
self.assertEqual(self.errorEvents[0]["record"], b'{"x": 1}\n{"y": 2}\n')
def test_roundTrip(self) -> None:
"""
Data written by a L{FileLogObserver} returned by L{jsonFileLogObserver}
and read by L{eventsFromJSONLogFile} is reconstructed properly.
"""
event = dict(x=1)
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle)
observer(event)
fileHandle.seek(0)
events = eventsFromJSONLogFile(fileHandle)
self.assertEqual(tuple(events), (event,))
self.assertEqual(len(self.errorEvents), 0)
|
import click
from typing import Sequence, Tuple
from click.formatting import measure_table, iter_rows
class OrderedCommand(click.Command):
def get_params(self, ctx):
rv = super().get_params(ctx)
rv.sort(key=lambda o: (not o.required, o.name))
return rv
def format_options(self, ctx, formatter) -> None:
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section("Options"):
self.write_dl(formatter, opts)
@staticmethod
def write_dl(formatter, rows: Sequence[Tuple[str, str]], col_max: int = 30, col_spacing: int = 2) -> None:
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError("Expected two columns for definition list")
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
formatter.write(f"{"":>{formatter.current_indent}}{first}")
if not second:
formatter.write("\n")
continue
if len(first) <= first_col - col_spacing:
formatter.write(" " * (first_col - len(first)))
else:
formatter.write("\n")
formatter.write(" " * (first_col + formatter.current_indent))
if "[" in second:
text, meta = second.split("[")
formatter.write(f"[{meta} {text}\n")
else:
formatter.write(f"{second}\n")
def add_options(options):
def _add_options(func):
for option in reversed(options):
func = option(func)
return func
return _add_options
class Defaults:
DOCKER_IMAGE = "docker.io/yellowdogco/virtual-screening-worker-public:3.3.0"
PORTAL_API_URL = "https://portal.yellowdog.co/api"
NAMESPACE = "virtual-screening"
RETRIES = 10
shared_options = [
click.option("--api_key_id", envvar="API_KEY_ID", required=True,
help="The application's API key ID for authenticating with the platform API. It is recommended to "
"supply this via the environment variable API_KEY_ID"),
click.option("--api_key_secret", envvar="API_KEY_SECRET", required=True,
help="The application's API key secret for authenticating with the platform API. It is recommended to "
"supply this via the environment variable API_KEY_SECRET"),
click.option("--template_id", envvar="TEMPLATE_ID", required=True,
help="The compute requirement template ID to use for provisioning compute"),
click.option("--platform_api_url", envvar="PLATFORM_API_URL", default=Defaults.PORTAL_API_URL,
help="The URL of the platform API"),
click.option("--namespace", envvar="NAMESPACE", default=Defaults.NAMESPACE,
help="The namespace within which all work and compute will be created"),
click.option("--docker_image", envvar="DOCKER_IMAGE", default=Defaults.DOCKER_IMAGE,
help="The docker image that will be executed by the workers"),
click.option("--retries", envvar="RETRIES", type=int, default=Defaults.RETRIES,
help="The number of times each failed task should be retried"),
]
| import click
from typing import Sequence, Tuple
from click.formatting import measure_table, iter_rows
class OrderedCommand(click.Command):
def get_params(self, ctx):
rv = super().get_params(ctx)
rv.sort(key=lambda o: (not o.required, o.name))
return rv
def format_options(self, ctx, formatter) -> None:
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section("Options"):
self.write_dl(formatter, opts)
@staticmethod
def write_dl(formatter, rows: Sequence[Tuple[str, str]], col_max: int = 30, col_spacing: int = 2) -> None:
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError("Expected two columns for definition list")
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
formatter.write(f"{'':>{formatter.current_indent}}{first}")
if not second:
formatter.write("\n")
continue
if len(first) <= first_col - col_spacing:
formatter.write(" " * (first_col - len(first)))
else:
formatter.write("\n")
formatter.write(" " * (first_col + formatter.current_indent))
if "[" in second:
text, meta = second.split("[")
formatter.write(f"[{meta} {text}\n")
else:
formatter.write(f"{second}\n")
def add_options(options):
def _add_options(func):
for option in reversed(options):
func = option(func)
return func
return _add_options
class Defaults:
DOCKER_IMAGE = "docker.io/yellowdogco/virtual-screening-worker-public:3.3.0"
PORTAL_API_URL = "https://portal.yellowdog.co/api"
NAMESPACE = "virtual-screening"
RETRIES = 10
shared_options = [
click.option("--api_key_id", envvar="API_KEY_ID", required=True,
help="The application's API key ID for authenticating with the platform API. It is recommended to "
"supply this via the environment variable API_KEY_ID"),
click.option("--api_key_secret", envvar="API_KEY_SECRET", required=True,
help="The application's API key secret for authenticating with the platform API. It is recommended to "
"supply this via the environment variable API_KEY_SECRET"),
click.option("--template_id", envvar="TEMPLATE_ID", required=True,
help="The compute requirement template ID to use for provisioning compute"),
click.option("--platform_api_url", envvar="PLATFORM_API_URL", default=Defaults.PORTAL_API_URL,
help="The URL of the platform API"),
click.option("--namespace", envvar="NAMESPACE", default=Defaults.NAMESPACE,
help="The namespace within which all work and compute will be created"),
click.option("--docker_image", envvar="DOCKER_IMAGE", default=Defaults.DOCKER_IMAGE,
help="The docker image that will be executed by the workers"),
click.option("--retries", envvar="RETRIES", type=int, default=Defaults.RETRIES,
help="The number of times each failed task should be retried"),
]
|
# Copyright 2012-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from .. import mparser
from .. import environment
from .. import coredata
from .. import dependencies
from .. import mlog
from .. import build
from .. import optinterpreter
from .. import compilers
from .. import envconfig
from ..wrap import wrap, WrapMode
from .. import mesonlib
from ..mesonlib import MesonBugException, HoldableObject, FileMode, MachineChoice, OptionKey, listify, extract_as_list, has_path_sep
from ..programs import ExternalProgram, NonExistingExternalProgram
from ..dependencies import Dependency
from ..depfile import DepFile
from ..interpreterbase import ContainerTypeInfo, InterpreterBase, KwargInfo, typed_kwargs, typed_pos_args
from ..interpreterbase import noPosargs, noKwargs, permittedKwargs, noArgsFlattening, noSecondLevelHolderResolving, unholder_return
from ..interpreterbase import InterpreterException, InvalidArguments, InvalidCode, SubdirDoneRequest
from ..interpreterbase import Disabler, disablerIfNotFound
from ..interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs, FeatureDeprecatedKwargs
from ..interpreterbase import ObjectHolder
from ..modules import ExtensionModule, ModuleObject, MutableModuleObject, NewExtensionModule, NotFoundExtensionModule
from ..cmake import CMakeInterpreter
from ..backend.backends import ExecutableSerialisation
from . import interpreterobjects as OBJ
from . import compiler as compilerOBJ
from .mesonmain import MesonMain
from .dependencyfallbacks import DependencyFallbacksHolder
from .interpreterobjects import (
SubprojectHolder,
Test,
RunProcess,
extract_required_kwarg,
extract_search_dirs,
NullSubprojectInterpreter,
)
from .type_checking import (
COMMAND_KW,
CT_BUILD_ALWAYS,
CT_BUILD_ALWAYS_STALE,
CT_BUILD_BY_DEFAULT,
CT_INPUT_KW,
CT_INSTALL_DIR_KW,
CT_OUTPUT_KW,
DEFAULT_OPTIONS,
DEPENDS_KW,
DEPEND_FILES_KW,
DEPFILE_KW,
DISABLER_KW,
ENV_KW,
ENV_METHOD_KW,
ENV_SEPARATOR_KW,
INSTALL_KW,
INSTALL_MODE_KW,
CT_INSTALL_TAG_KW,
INSTALL_TAG_KW,
LANGUAGE_KW,
NATIVE_KW, OVERRIDE_OPTIONS_KW,
REQUIRED_KW,
NoneType,
in_set_validator,
env_convertor_with_method
)
from . import primitives as P_OBJ
from pathlib import Path
import os
import shutil
import uuid
import re
import stat
import collections
import typing as T
import textwrap
import importlib
if T.TYPE_CHECKING:
import argparse
from typing_extensions import Literal
from . import kwargs
from ..backend.backends import Backend
from ..interpreterbase.baseobjects import InterpreterObject, TYPE_var, TYPE_kwargs
from ..programs import OverrideProgram
# Input source types passed to Targets
SourceInputs = T.Union[mesonlib.File, build.GeneratedList, build.BuildTarget, build.BothLibraries,
build.CustomTargetIndex, build.CustomTarget, build.GeneratedList,
build.ExtractedObjects, str]
# Input source types passed to the build.Target classes
SourceOutputs = T.Union[mesonlib.File, build.GeneratedList,
build.BuildTarget, build.CustomTargetIndex, build.CustomTarget,
build.ExtractedObjects, build.GeneratedList, build.StructuredSources]
def _project_version_validator(value: T.Union[T.List, str, mesonlib.File, None]) -> T.Optional[str]:
if isinstance(value, list):
if len(value) != 1:
return 'when passed as array must have a length of 1'
elif not isinstance(value[0], mesonlib.File):
return 'when passed as array must contain a File'
return None
def stringifyUserArguments(args: T.List[T.Any], quote: bool = False) -> str:
if isinstance(args, list):
return '[%s]' % ', '.join([stringifyUserArguments(x, True) for x in args])
elif isinstance(args, dict):
return '{%s}' % ', '.join(['{} : {}'.format(stringifyUserArguments(k, True), stringifyUserArguments(v, True)) for k, v in args.items()])
elif isinstance(args, bool):
return 'true' if args else 'false'
elif isinstance(args, int):
return str(args)
elif isinstance(args, str):
return f"'{args}'" if quote else args
raise InvalidArguments('Function accepts only strings, integers, bools, lists, dictionaries and lists thereof.')
class Summary:
def __init__(self, project_name: str, project_version: str):
self.project_name = project_name
self.project_version = project_version
self.sections = collections.defaultdict(dict)
self.max_key_len = 0
def add_section(self, section: str, values: T.Dict[str, T.Any], bool_yn: bool,
list_sep: T.Optional[str], subproject: str) -> None:
for k, v in values.items():
if k in self.sections[section]:
raise InterpreterException(f'Summary section {section!r} already have key {k!r}')
formatted_values = []
for i in listify(v):
if isinstance(i, bool) and bool_yn:
formatted_values.append(mlog.green('YES') if i else mlog.red('NO'))
elif isinstance(i, (str, int, bool)):
formatted_values.append(str(i))
elif isinstance(i, (ExternalProgram, Dependency)):
FeatureNew.single_use('dependency or external program in summary', '0.57.0', subproject)
formatted_values.append(i.summary_value())
elif isinstance(i, coredata.UserOption):
FeatureNew.single_use('feature option in summary', '0.58.0', subproject)
formatted_values.append(i.printable_value())
else:
m = 'Summary value in section {!r}, key {!r}, must be string, integer, boolean, dependency or external program'
raise InterpreterException(m.format(section, k))
self.sections[section][k] = (formatted_values, list_sep)
self.max_key_len = max(self.max_key_len, len(k))
def dump(self):
mlog.log(self.project_name, mlog.normal_cyan(self.project_version))
for section, values in self.sections.items():
mlog.log('') # newline
if section:
mlog.log(' ', mlog.bold(section))
for k, v in values.items():
v, list_sep = v
padding = self.max_key_len - len(k)
end = ' ' if v else ''
mlog.log(' ' * 3, k + ' ' * padding + ':', end=end)
indent = self.max_key_len + 6
self.dump_value(v, list_sep, indent)
mlog.log('') # newline
def dump_value(self, arr, list_sep, indent):
lines_sep = '\n' + ' ' * indent
if list_sep is None:
mlog.log(*arr, sep=lines_sep)
return
max_len = shutil.get_terminal_size().columns
line = []
line_len = indent
lines_sep = list_sep.rstrip() + lines_sep
for v in arr:
v_len = len(v) + len(list_sep)
if line and line_len + v_len > max_len:
mlog.log(*line, sep=list_sep, end=lines_sep)
line_len = indent
line = []
line.append(v)
line_len += v_len
mlog.log(*line, sep=list_sep)
known_library_kwargs = (
build.known_shlib_kwargs |
build.known_stlib_kwargs
)
known_build_target_kwargs = (
known_library_kwargs |
build.known_exe_kwargs |
build.known_jar_kwargs |
{'target_type'}
)
TEST_KWARGS: T.List[KwargInfo] = [
KwargInfo('args', ContainerTypeInfo(list, (str, mesonlib.File, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex)),
listify=True, default=[]),
KwargInfo('should_fail', bool, default=False),
KwargInfo('timeout', int, default=30),
KwargInfo('workdir', (str, NoneType), default=None,
validator=lambda x: 'must be an absolute path' if not os.path.isabs(x) else None),
KwargInfo('protocol', str,
default='exitcode',
validator=in_set_validator({'exitcode', 'tap', 'gtest', 'rust'}),
since_values={'gtest': '0.55.0', 'rust': '0.57.0'}),
KwargInfo('priority', int, default=0, since='0.52.0'),
# TODO: env needs reworks of the way the environment variable holder itself works probably
ENV_KW,
DEPENDS_KW.evolve(since='0.46.0'),
KwargInfo('suite', ContainerTypeInfo(list, str), listify=True, default=['']), # yes, a list of empty string
KwargInfo('verbose', bool, default=False, since='0.62.0'),
]
permitted_dependency_kwargs = {
'allow_fallback',
'cmake_args',
'cmake_module_path',
'cmake_package_version',
'components',
'default_options',
'fallback',
'include_type',
'language',
'main',
'method',
'modules',
'native',
'not_found_message',
'optional_modules',
'private_headers',
'required',
'static',
'version',
}
implicit_check_false_warning = """You should add the boolean check kwarg to the run_command call.
It currently defaults to false,
but it will default to true in future releases of meson.
See also: https://github.com/mesonbuild/meson/issues/9300"""
class Interpreter(InterpreterBase, HoldableObject):
def __init__(
self,
_build: build.Build,
backend: T.Optional[Backend] = None,
subproject: str = '',
subdir: str = '',
subproject_dir: str = 'subprojects',
default_project_options: T.Optional[T.Dict[OptionKey, str]] = None,
mock: bool = False,
ast: T.Optional[mparser.CodeBlockNode] = None,
is_translated: bool = False,
user_defined_options: T.Optional['argparse.Namespace'] = None,
) -> None:
super().__init__(_build.environment.get_source_dir(), subdir, subproject)
self.active_projectname = ''
self.build = _build
self.environment = self.build.environment
self.coredata = self.environment.get_coredata()
self.backend = backend
self.summary: T.Dict[str, 'Summary'] = {}
self.modules: T.Dict[str, NewExtensionModule] = {}
# Subproject directory is usually the name of the subproject, but can
# be different for dependencies provided by wrap files.
self.subproject_directory_name = subdir.split(os.path.sep)[-1]
self.subproject_dir = subproject_dir
self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt')
if not mock and ast is None:
self.load_root_meson_file()
self.sanity_check_ast()
elif ast is not None:
self.ast = ast
self.sanity_check_ast()
self.builtin.update({'meson': MesonMain(self.build, self)})
self.generators: T.List[build.Generator] = []
self.processed_buildfiles = set() # type: T.Set[str]
self.project_args_frozen = False
self.global_args_frozen = False # implies self.project_args_frozen
self.subprojects: T.Dict[str, SubprojectHolder] = {}
self.subproject_stack: T.List[str] = []
self.configure_file_outputs: T.Dict[str, int] = {}
# Passed from the outside, only used in subprojects.
if default_project_options:
self.default_project_options = default_project_options.copy()
else:
self.default_project_options = {}
self.project_default_options: T.Dict[OptionKey, str] = {}
self.build_func_dict()
self.build_holder_map()
self.user_defined_options = user_defined_options
# build_def_files needs to be defined before parse_project is called
#
# For non-meson subprojects, we'll be using the ast. Even if it does
# exist we don't want to add a dependency on it, it's autogenerated
# from the actual build files, and is just for reference.
self.build_def_files: mesonlib.OrderedSet[str] = mesonlib.OrderedSet()
build_filename = os.path.join(self.subdir, environment.build_filename)
if not is_translated:
self.build_def_files.add(build_filename)
if not mock:
self.parse_project()
self._redetect_machines()
def __getnewargs_ex__(self) -> T.Tuple[T.Tuple[object], T.Dict[str, object]]:
raise MesonBugException('This class is unpicklable')
def _redetect_machines(self) -> None:
# Re-initialize machine descriptions. We can do a better job now because we
# have the compilers needed to gain more knowledge, so wipe out old
# inference and start over.
machines = self.build.environment.machines.miss_defaulting()
machines.build = environment.detect_machine_info(self.coredata.compilers.build)
self.build.environment.machines = machines.default_missing()
assert self.build.environment.machines.build.cpu is not None
assert self.build.environment.machines.host.cpu is not None
assert self.build.environment.machines.target.cpu is not None
self.builtin['build_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.build, self)
self.builtin['host_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.host, self)
self.builtin['target_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.target, self)
def build_func_dict(self) -> None:
self.funcs.update({'add_global_arguments': self.func_add_global_arguments,
'add_global_link_arguments': self.func_add_global_link_arguments,
'add_languages': self.func_add_languages,
'add_project_arguments': self.func_add_project_arguments,
'add_project_link_arguments': self.func_add_project_link_arguments,
'add_test_setup': self.func_add_test_setup,
'alias_target': self.func_alias_target,
'assert': self.func_assert,
'benchmark': self.func_benchmark,
'both_libraries': self.func_both_lib,
'build_target': self.func_build_target,
'configuration_data': self.func_configuration_data,
'configure_file': self.func_configure_file,
'custom_target': self.func_custom_target,
'declare_dependency': self.func_declare_dependency,
'dependency': self.func_dependency,
'disabler': self.func_disabler,
'environment': self.func_environment,
'error': self.func_error,
'executable': self.func_executable,
'files': self.func_files,
'find_library': self.func_find_library,
'find_program': self.func_find_program,
'generator': self.func_generator,
'get_option': self.func_get_option,
'get_variable': self.func_get_variable,
'gettext': self.func_gettext,
'import': self.func_import,
'include_directories': self.func_include_directories,
'install_data': self.func_install_data,
'install_emptydir': self.func_install_emptydir,
'install_headers': self.func_install_headers,
'install_man': self.func_install_man,
'install_subdir': self.func_install_subdir,
'install_symlink': self.func_install_symlink,
'is_disabler': self.func_is_disabler,
'is_variable': self.func_is_variable,
'jar': self.func_jar,
'join_paths': self.func_join_paths,
'library': self.func_library,
'message': self.func_message,
'option': self.func_option,
'project': self.func_project,
'range': self.func_range,
'run_command': self.func_run_command,
'run_target': self.func_run_target,
'set_variable': self.func_set_variable,
'structured_sources': self.func_structured_sources,
'subdir': self.func_subdir,
'shared_library': self.func_shared_lib,
'shared_module': self.func_shared_module,
'static_library': self.func_static_lib,
'subdir_done': self.func_subdir_done,
'subproject': self.func_subproject,
'summary': self.func_summary,
'test': self.func_test,
'unset_variable': self.func_unset_variable,
'vcs_tag': self.func_vcs_tag,
'warning': self.func_warning,
})
if 'MESON_UNIT_TEST' in os.environ:
self.funcs.update({'exception': self.func_exception})
def build_holder_map(self) -> None:
'''
Build a mapping of `HoldableObject` types to their corresponding
`ObjectHolder`s. This mapping is used in `InterpreterBase` to automatically
holderify all returned values from methods and functions.
'''
self.holder_map.update({
# Primitives
list: P_OBJ.ArrayHolder,
dict: P_OBJ.DictHolder,
int: P_OBJ.IntegerHolder,
bool: P_OBJ.BooleanHolder,
str: P_OBJ.StringHolder,
P_OBJ.MesonVersionString: P_OBJ.MesonVersionStringHolder,
# Meson types
mesonlib.File: OBJ.FileHolder,
build.SharedLibrary: OBJ.SharedLibraryHolder,
build.StaticLibrary: OBJ.StaticLibraryHolder,
build.BothLibraries: OBJ.BothLibrariesHolder,
build.SharedModule: OBJ.SharedModuleHolder,
build.Executable: OBJ.ExecutableHolder,
build.Jar: OBJ.JarHolder,
build.CustomTarget: OBJ.CustomTargetHolder,
build.CustomTargetIndex: OBJ.CustomTargetIndexHolder,
build.Generator: OBJ.GeneratorHolder,
build.GeneratedList: OBJ.GeneratedListHolder,
build.ExtractedObjects: OBJ.GeneratedObjectsHolder,
build.RunTarget: OBJ.RunTargetHolder,
build.AliasTarget: OBJ.AliasTargetHolder,
build.Headers: OBJ.HeadersHolder,
build.Man: OBJ.ManHolder,
build.EmptyDir: OBJ.EmptyDirHolder,
build.Data: OBJ.DataHolder,
build.SymlinkData: OBJ.SymlinkDataHolder,
build.InstallDir: OBJ.InstallDirHolder,
build.IncludeDirs: OBJ.IncludeDirsHolder,
build.EnvironmentVariables: OBJ.EnvironmentVariablesHolder,
build.StructuredSources: OBJ.StructuredSourcesHolder,
compilers.RunResult: compilerOBJ.TryRunResultHolder,
dependencies.ExternalLibrary: OBJ.ExternalLibraryHolder,
coredata.UserFeatureOption: OBJ.FeatureOptionHolder,
envconfig.MachineInfo: OBJ.MachineHolder,
build.ConfigurationData: OBJ.ConfigurationDataHolder,
})
'''
Build a mapping of `HoldableObject` base classes to their
corresponding `ObjectHolder`s. The difference to `self.holder_map`
is that the keys here define an upper bound instead of requiring an
exact match.
The mappings defined here are only used when there was no direct hit
found in `self.holder_map`.
'''
self.bound_holder_map.update({
dependencies.Dependency: OBJ.DependencyHolder,
ExternalProgram: OBJ.ExternalProgramHolder,
compilers.Compiler: compilerOBJ.CompilerHolder,
ModuleObject: OBJ.ModuleObjectHolder,
MutableModuleObject: OBJ.MutableModuleObjectHolder,
})
def append_holder_map(self, held_type: T.Type[mesonlib.HoldableObject], holder_type: T.Type[ObjectHolder]) -> None:
'''
Adds one additional mapping to the `holder_map`.
The intended use for this function is in the `initialize` method of
modules to register custom object holders.
'''
self.holder_map.update({
held_type: holder_type
})
def process_new_values(self, invalues: T.List[T.Union[TYPE_var, ExecutableSerialisation]]) -> None:
invalues = listify(invalues)
for v in invalues:
if isinstance(v, ObjectHolder):
raise InterpreterException('Modules must not return ObjectHolders')
if isinstance(v, (build.BuildTarget, build.CustomTarget, build.RunTarget)):
self.add_target(v.name, v)
elif isinstance(v, list):
self.process_new_values(v)
elif isinstance(v, ExecutableSerialisation):
v.subproject = self.subproject
self.build.install_scripts.append(v)
elif isinstance(v, build.Data):
self.build.data.append(v)
elif isinstance(v, build.SymlinkData):
self.build.symlinks.append(v)
elif isinstance(v, dependencies.InternalDependency):
# FIXME: This is special cased and not ideal:
# The first source is our new VapiTarget, the rest are deps
self.process_new_values(v.sources[0])
elif isinstance(v, build.InstallDir):
self.build.install_dirs.append(v)
elif isinstance(v, Test):
self.build.tests.append(v)
elif isinstance(v, (int, str, bool, Disabler, ObjectHolder, build.GeneratedList,
ExternalProgram, build.ConfigurationData)):
pass
else:
raise InterpreterException(f'Module returned a value of unknown type {v!r}.')
def get_build_def_files(self) -> mesonlib.OrderedSet[str]:
return self.build_def_files
def add_build_def_file(self, f: mesonlib.FileOrString) -> None:
# Use relative path for files within source directory, and absolute path
# for system files. Skip files within build directory. Also skip not regular
# files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this
# is especially important to convert '/' to '\' on Windows.
if isinstance(f, mesonlib.File):
if f.is_built:
return
f = os.path.normpath(f.relative_name())
elif os.path.isfile(f) and not f.startswith('/dev'):
srcdir = Path(self.environment.get_source_dir())
builddir = Path(self.environment.get_build_dir())
try:
f_ = Path(f).resolve()
except OSError:
f_ = Path(f)
s = f_.stat()
if (hasattr(s, 'st_file_attributes') and
s.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT != 0 and
s.st_reparse_tag == stat.IO_REPARSE_TAG_APPEXECLINK):
# This is a Windows Store link which we can't
# resolve, so just do our best otherwise.
f_ = f_.parent.resolve() / f_.name
else:
raise
if builddir in f_.parents:
return
if srcdir in f_.parents:
f_ = f_.relative_to(srcdir)
f = str(f_)
else:
return
if f not in self.build_def_files:
self.build_def_files.add(f)
def get_variables(self) -> T.Dict[str, InterpreterObject]:
return self.variables
def check_stdlibs(self) -> None:
machine_choices = [MachineChoice.HOST]
if self.coredata.is_cross_build():
machine_choices.append(MachineChoice.BUILD)
for for_machine in machine_choices:
props = self.build.environment.properties[for_machine]
for l in self.coredata.compilers[for_machine].keys():
try:
di = mesonlib.stringlistify(props.get_stdlib(l))
except KeyError:
continue
if len(di) == 1:
FeatureNew.single_use('stdlib without variable name', '0.56.0', self.subproject, location=self.current_node)
kwargs = {'native': for_machine is MachineChoice.BUILD,
}
name = l + '_stdlib'
df = DependencyFallbacksHolder(self, [name])
df.set_fallback(di)
dep = df.lookup(kwargs, force_fallback=True)
self.build.stdlibs[for_machine][l] = dep
def _import_module(self, modname: str, required: bool) -> NewExtensionModule:
if modname in self.modules:
return self.modules[modname]
try:
module = importlib.import_module('mesonbuild.modules.' + modname)
except ImportError:
if required:
raise InvalidArguments(f'Module "{modname}" does not exist')
ext_module = NotFoundExtensionModule()
else:
ext_module = module.initialize(self)
assert isinstance(ext_module, (ExtensionModule, NewExtensionModule))
self.build.modules.append(modname)
self.modules[modname] = ext_module
return ext_module
@typed_pos_args('import', str)
@typed_kwargs(
'import',
REQUIRED_KW.evolve(since='0.59.0'),
DISABLER_KW.evolve(since='0.59.0'),
)
@disablerIfNotFound
def func_import(self, node: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'kwargs.FuncImportModule') -> T.Union[ExtensionModule, NewExtensionModule, NotFoundExtensionModule]:
modname = args[0]
disabled, required, _ = extract_required_kwarg(kwargs, self.subproject)
if disabled:
return NotFoundExtensionModule()
if modname.startswith('unstable-'):
plainname = modname.split('-', 1)[1]
try:
# check if stable module exists
mod = self._import_module(plainname, required)
# XXX: this is actually not helpful, since it doesn't do a version check
mlog.warning(f'Module {modname} is now stable, please use the {plainname} module instead.')
return mod
except InvalidArguments:
mlog.warning(f'Module {modname} has no backwards or forwards compatibility and might not exist in future releases.', location=node)
modname = 'unstable_' + plainname
return self._import_module(modname, required)
@typed_pos_args('files', varargs=str)
@noKwargs
def func_files(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'TYPE_kwargs') -> T.List[mesonlib.File]:
return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args[0]]
# Used by declare_dependency() and pkgconfig.generate()
def extract_variables(self, kwargs, argname='variables', list_new=False, dict_new=False):
variables = kwargs.get(argname, {})
if isinstance(variables, dict):
if dict_new and variables:
FeatureNew.single_use(f'{argname} as dictionary', '0.56.0', self.subproject, location=self.current_node)
else:
varlist = mesonlib.stringlistify(variables)
if list_new:
FeatureNew.single_use(f'{argname} as list of strings', '0.56.0', self.subproject, location=self.current_node)
variables = collections.OrderedDict()
for v in varlist:
try:
(key, value) = v.split('=', 1)
except ValueError:
raise InterpreterException(f'Variable {v!r} must have a value separated by equals sign.')
variables[key.strip()] = value.strip()
for k, v in variables.items():
if not k or not v:
raise InterpreterException('Empty variable name or value')
if any(c.isspace() for c in k):
raise InterpreterException(f'Invalid whitespace in variable name "{k}"')
if not isinstance(v, str):
raise InterpreterException('variables values must be strings.')
return variables
@FeatureNewKwargs('declare_dependency', '0.46.0', ['link_whole'])
@FeatureNewKwargs('declare_dependency', '0.54.0', ['variables'])
@FeatureNewKwargs('declare_dependency', '0.62.0', ['d_module_versions', 'd_import_dirs'])
@permittedKwargs({'include_directories', 'link_with', 'sources', 'dependencies',
'compile_args', 'link_args', 'link_whole', 'version',
'variables', 'd_module_versions', 'd_import_dirs'})
@noPosargs
def func_declare_dependency(self, node, args, kwargs):
version = kwargs.get('version', self.project_version)
if not isinstance(version, str):
raise InterpreterException('Version must be a string.')
incs = self.extract_incdirs(kwargs)
libs = extract_as_list(kwargs, 'link_with')
libs_whole = extract_as_list(kwargs, 'link_whole')
sources = extract_as_list(kwargs, 'sources')
sources = listify(self.source_strings_to_files(sources))
deps = extract_as_list(kwargs, 'dependencies')
compile_args = mesonlib.stringlistify(kwargs.get('compile_args', []))
link_args = mesonlib.stringlistify(kwargs.get('link_args', []))
variables = self.extract_variables(kwargs, list_new=True)
d_module_versions = extract_as_list(kwargs, 'd_module_versions')
d_import_dirs = self.extract_incdirs(kwargs, 'd_import_dirs')
final_deps = []
for d in deps:
if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)):
raise InterpreterException('Dependencies must be external deps')
final_deps.append(d)
for l in libs:
if isinstance(l, dependencies.Dependency):
raise InterpreterException('''Entries in "link_with" may only be self-built targets,
external dependencies (including libraries) must go to "dependencies".''')
dep = dependencies.InternalDependency(version, incs, compile_args,
link_args, libs, libs_whole, sources, final_deps,
variables, d_module_versions, d_import_dirs)
return dep
@typed_pos_args('assert', bool, optargs=[str])
@noKwargs
def func_assert(self, node: mparser.FunctionNode, args: T.Tuple[bool, T.Optional[str]],
kwargs: 'TYPE_kwargs') -> None:
value, message = args
if message is None:
FeatureNew.single_use('assert function without message argument', '0.53.0', self.subproject, location=node)
if not value:
if message is None:
from ..ast import AstPrinter
printer = AstPrinter()
node.args.arguments[0].accept(printer)
message = printer.result
raise InterpreterException('Assert failed: ' + message)
def validate_arguments(self, args, argcount, arg_types):
if argcount is not None:
if argcount != len(args):
raise InvalidArguments(f'Expected {argcount} arguments, got {len(args)}.')
for actual, wanted in zip(args, arg_types):
if wanted is not None:
if not isinstance(actual, wanted):
raise InvalidArguments('Incorrect argument type.')
# Executables aren't actually accepted, but we allow them here to allow for
# better error messages when overridden
@typed_pos_args(
'run_command',
(build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str),
varargs=(build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str))
@typed_kwargs(
'run_command',
KwargInfo('check', (bool, NoneType), since='0.47.0'),
KwargInfo('capture', bool, default=True, since='0.47.0'),
ENV_KW.evolve(since='0.50.0'),
)
def func_run_command(self, node: mparser.BaseNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str],
T.List[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str]]],
kwargs: 'kwargs.RunCommand') -> RunProcess:
return self.run_command_impl(node, args, kwargs)
def run_command_impl(self,
node: mparser.BaseNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str],
T.List[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str]]],
kwargs: 'kwargs.RunCommand',
in_builddir: bool = False) -> RunProcess:
cmd, cargs = args
capture = kwargs['capture']
env = kwargs['env']
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
check = kwargs['check']
if check is None:
mlog.warning(implicit_check_false_warning, once=True)
check = False
overridden_msg = ('Program {!r} was overridden with the compiled '
'executable {!r} and therefore cannot be used during '
'configuration')
expanded_args: T.List[str] = []
if isinstance(cmd, build.Executable):
progname = node.args.arguments[0].value
raise InterpreterException(overridden_msg.format(progname, cmd.description()))
if isinstance(cmd, ExternalProgram):
if not cmd.found():
raise InterpreterException(f'command {cmd.get_name()!r} not found or not executable')
elif isinstance(cmd, compilers.Compiler):
exelist = cmd.get_exelist()
cmd = exelist[0]
prog = ExternalProgram(cmd, silent=True)
if not prog.found():
raise InterpreterException(f'Program {cmd!r} not found or not executable')
cmd = prog
expanded_args = exelist[1:]
else:
if isinstance(cmd, mesonlib.File):
cmd = cmd.absolute_path(srcdir, builddir)
# Prefer scripts in the current source directory
search_dir = os.path.join(srcdir, self.subdir)
prog = ExternalProgram(cmd, silent=True, search_dir=search_dir)
if not prog.found():
raise InterpreterException(f'Program or command {cmd!r} not found or not executable')
cmd = prog
for a in cargs:
if isinstance(a, str):
expanded_args.append(a)
elif isinstance(a, mesonlib.File):
expanded_args.append(a.absolute_path(srcdir, builddir))
elif isinstance(a, ExternalProgram):
expanded_args.append(a.get_path())
elif isinstance(a, compilers.Compiler):
FeatureNew.single_use('Compiler object as a variadic argument to `run_command`', '0.61.0', self.subproject, location=node)
prog = ExternalProgram(a.exelist[0], silent=True)
if not prog.found():
raise InterpreterException(f'Program {cmd!r} not found or not executable')
expanded_args.append(prog.get_path())
else:
raise InterpreterException(overridden_msg.format(a.name, cmd.description()))
# If any file that was used as an argument to the command
# changes, we must re-run the configuration step.
self.add_build_def_file(cmd.get_path())
for a in expanded_args:
if not os.path.isabs(a):
a = os.path.join(builddir if in_builddir else srcdir, self.subdir, a)
self.add_build_def_file(a)
return RunProcess(cmd, expanded_args, env, srcdir, builddir, self.subdir,
self.environment.get_build_command() + ['introspect'],
in_builddir=in_builddir, check=check, capture=capture)
def func_gettext(self, nodes, args, kwargs):
raise InterpreterException('Gettext() function has been moved to module i18n. Import it and use i18n.gettext() instead')
def func_option(self, nodes, args, kwargs):
raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.')
@typed_pos_args('subproject', str)
@typed_kwargs(
'subproject',
REQUIRED_KW,
DEFAULT_OPTIONS.evolve(since='0.38.0'),
KwargInfo('version', ContainerTypeInfo(list, str), default=[], listify=True),
)
def func_subproject(self, nodes: mparser.BaseNode, args: T.Tuple[str], kwargs_: kwargs.Subproject) -> SubprojectHolder:
kw: kwargs.DoSubproject = {
'required': kwargs_['required'],
'default_options': kwargs_['default_options'],
'version': kwargs_['version'],
'options': None,
'cmake_options': [],
}
return self.do_subproject(args[0], 'meson', kw)
def disabled_subproject(self, subp_name: str, disabled_feature: T.Optional[str] = None,
exception: T.Optional[Exception] = None) -> SubprojectHolder:
sub = SubprojectHolder(NullSubprojectInterpreter(), os.path.join(self.subproject_dir, subp_name),
disabled_feature=disabled_feature, exception=exception)
self.subprojects[subp_name] = sub
self.coredata.initialized_subprojects.add(subp_name)
return sub
def do_subproject(self, subp_name: str, method: Literal['meson', 'cmake'], kwargs: kwargs.DoSubproject) -> SubprojectHolder:
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Subproject', mlog.bold(subp_name), ':', 'skipped: feature', mlog.bold(feature), 'disabled')
return self.disabled_subproject(subp_name, disabled_feature=feature)
default_options = coredata.create_options_dict(kwargs['default_options'], subp_name)
if subp_name == '':
raise InterpreterException('Subproject name must not be empty.')
if subp_name[0] == '.':
raise InterpreterException('Subproject name must not start with a period.')
if '..' in subp_name:
raise InterpreterException('Subproject name must not contain a ".." path segment.')
if os.path.isabs(subp_name):
raise InterpreterException('Subproject name must not be an absolute path.')
if has_path_sep(subp_name):
mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.',
location=self.current_node)
if subp_name in self.subproject_stack:
fullstack = self.subproject_stack + [subp_name]
incpath = ' => '.join(fullstack)
raise InvalidCode(f'Recursive include of subprojects: {incpath}.')
if subp_name in self.subprojects:
subproject = self.subprojects[subp_name]
if required and not subproject.found():
raise InterpreterException(f'Subproject "{subproject.subdir}" required but not found.')
if kwargs['version']:
pv = self.build.subprojects[subp_name]
wanted = kwargs['version']
if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]:
raise InterpreterException(f'Subproject {subp_name} version is {pv} but {wanted} required.')
return subproject
r = self.environment.wrap_resolver
try:
subdir = r.resolve(subp_name, method)
except wrap.WrapException as e:
if not required:
mlog.log(e)
mlog.log('Subproject ', mlog.bold(subp_name), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(subp_name, exception=e)
raise e
subdir_abs = os.path.join(self.environment.get_source_dir(), subdir)
os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True)
self.global_args_frozen = True
stack = ':'.join(self.subproject_stack + [subp_name])
m = ['\nExecuting subproject', mlog.bold(stack)]
if method != 'meson':
m += ['method', mlog.bold(method)]
mlog.log(*m, '\n', nested=False)
try:
if method == 'meson':
return self._do_subproject_meson(subp_name, subdir, default_options, kwargs)
elif method == 'cmake':
return self._do_subproject_cmake(subp_name, subdir, subdir_abs, default_options, kwargs)
else:
raise mesonlib.MesonBugException(f'The method {method} is invalid for the subproject {subp_name}')
# Invalid code is always an error
except InvalidCode:
raise
except Exception as e:
if not required:
with mlog.nested(subp_name):
# Suppress the 'ERROR:' prefix because this exception is not
# fatal and VS CI treat any logs with "ERROR:" as fatal.
mlog.exception(e, prefix=mlog.yellow('Exception:'))
mlog.log('\nSubproject', mlog.bold(subdir), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(subp_name, exception=e)
raise e
def _do_subproject_meson(self, subp_name: str, subdir: str,
default_options: T.Dict[OptionKey, str],
kwargs: kwargs.DoSubproject,
ast: T.Optional[mparser.CodeBlockNode] = None,
build_def_files: T.Optional[T.List[str]] = None,
is_translated: bool = False) -> SubprojectHolder:
with mlog.nested(subp_name):
new_build = self.build.copy()
subi = Interpreter(new_build, self.backend, subp_name, subdir, self.subproject_dir,
default_options, ast=ast, is_translated=is_translated,
user_defined_options=self.user_defined_options)
# Those lists are shared by all interpreters. That means that
# even if the subproject fails, any modification that the subproject
# made to those lists will affect the parent project.
subi.subprojects = self.subprojects
subi.modules = self.modules
subi.holder_map = self.holder_map
subi.bound_holder_map = self.bound_holder_map
subi.summary = self.summary
subi.subproject_stack = self.subproject_stack + [subp_name]
current_active = self.active_projectname
current_warnings_counter = mlog.log_warnings_counter
mlog.log_warnings_counter = 0
subi.run()
subi_warnings = mlog.log_warnings_counter
mlog.log_warnings_counter = current_warnings_counter
mlog.log('Subproject', mlog.bold(subp_name), 'finished.')
mlog.log()
if kwargs['version']:
pv = subi.project_version
wanted = kwargs['version']
if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]:
raise InterpreterException(f'Subproject {subp_name} version is {pv} but {wanted} required.')
self.active_projectname = current_active
self.subprojects.update(subi.subprojects)
self.subprojects[subp_name] = SubprojectHolder(subi, subdir, warnings=subi_warnings)
# Duplicates are possible when subproject uses files from project root
if build_def_files:
self.build_def_files.update(build_def_files)
# We always need the subi.build_def_files, to propgate sub-sub-projects
self.build_def_files.update(subi.build_def_files)
self.build.merge(subi.build)
self.build.subprojects[subp_name] = subi.project_version
self.coredata.initialized_subprojects.add(subp_name)
return self.subprojects[subp_name]
def _do_subproject_cmake(self, subp_name: str, subdir: str, subdir_abs: str,
default_options: T.Dict[OptionKey, str],
kwargs: kwargs.DoSubproject) -> SubprojectHolder:
with mlog.nested(subp_name):
new_build = self.build.copy()
prefix = self.coredata.options[OptionKey('prefix')].value
from ..modules.cmake import CMakeSubprojectOptions
options = kwargs['options'] or CMakeSubprojectOptions()
cmake_options = kwargs['cmake_options'] + options.cmake_options
cm_int = CMakeInterpreter(new_build, Path(subdir), Path(subdir_abs), Path(prefix), new_build.environment, self.backend)
cm_int.initialise(cmake_options)
cm_int.analyse()
# Generate a meson ast and execute it with the normal do_subproject_meson
ast = cm_int.pretend_to_be_meson(options.target_options)
mlog.log()
with mlog.nested('cmake-ast'):
mlog.log('Processing generated meson AST')
# Debug print the generated meson file
from ..ast import AstIndentationGenerator, AstPrinter
printer = AstPrinter()
ast.accept(AstIndentationGenerator())
ast.accept(printer)
printer.post_process()
meson_filename = os.path.join(self.build.environment.get_build_dir(), subdir, 'meson.build')
with open(meson_filename, "w", encoding='utf-8') as f:
f.write(printer.result)
mlog.log('Build file:', meson_filename)
mlog.cmd_ci_include(meson_filename)
mlog.log()
result = self._do_subproject_meson(subp_name, subdir, default_options, kwargs, ast, [str(f) for f in cm_int.bs_files], is_translated=True)
result.cm_interpreter = cm_int
mlog.log()
return result
def get_option_internal(self, optname: str) -> coredata.UserOption:
key = OptionKey.from_string(optname).evolve(subproject=self.subproject)
if not key.is_project():
for opts in [self.coredata.options, compilers.base_options]:
v = opts.get(key)
if v is None or v.yielding:
v = opts.get(key.as_root())
if v is not None:
assert isinstance(v, coredata.UserOption), 'for mypy'
return v
try:
opt = self.coredata.options[key]
if opt.yielding and key.subproject and key.as_root() in self.coredata.options:
popt = self.coredata.options[key.as_root()]
if type(opt) is type(popt):
opt = popt
else:
# Get class name, then option type as a string
opt_type = opt.__class__.__name__[4:][:-6].lower()
popt_type = popt.__class__.__name__[4:][:-6].lower()
# This is not a hard error to avoid dependency hell, the workaround
# when this happens is to simply set the subproject's option directly.
mlog.warning('Option {0!r} of type {1!r} in subproject {2!r} cannot yield '
'to parent option of type {3!r}, ignoring parent value. '
'Use -D{2}:{0}=value to set the value for this option manually'
'.'.format(optname, opt_type, self.subproject, popt_type),
location=self.current_node)
return opt
except KeyError:
pass
raise InterpreterException(f'Tried to access unknown option {optname!r}.')
@typed_pos_args('get_option', str)
@noKwargs
def func_get_option(self, nodes: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'TYPE_kwargs') -> T.Union[coredata.UserOption, 'TYPE_var']:
optname = args[0]
if ':' in optname:
raise InterpreterException('Having a colon in option name is forbidden, '
'projects are not allowed to directly access '
'options of other subprojects.')
opt = self.get_option_internal(optname)
if isinstance(opt, coredata.UserFeatureOption):
opt.name = optname
return opt
elif isinstance(opt, coredata.UserOption):
return opt.value
return opt
@typed_pos_args('configuration_data', optargs=[dict])
@noKwargs
def func_configuration_data(self, node: mparser.BaseNode, args: T.Tuple[T.Optional[T.Dict[str, T.Any]]],
kwargs: 'TYPE_kwargs') -> build.ConfigurationData:
initial_values = args[0]
if initial_values is not None:
FeatureNew.single_use('configuration_data dictionary', '0.49.0', self.subproject, location=node)
for k, v in initial_values.items():
if not isinstance(v, (str, int, bool)):
raise InvalidArguments(
f'"configuration_data": initial value dictionary key "{k!r}"" must be "str | int | bool", not "{v!r}"')
return build.ConfigurationData(initial_values)
def set_backend(self) -> None:
# The backend is already set when parsing subprojects
if self.backend is not None:
return
backend = self.coredata.get_option(OptionKey('backend'))
from ..backend import backends
self.backend = backends.get_backend_from_name(backend, self.build, self)
if self.backend is None:
raise InterpreterException(f'Unknown backend "{backend}".')
if backend != self.backend.name:
if self.backend.name.startswith('vs'):
mlog.log('Auto detected Visual Studio backend:', mlog.bold(self.backend.name))
self.coredata.set_option(OptionKey('backend'), self.backend.name)
# Only init backend options on first invocation otherwise it would
# override values previously set from command line.
if self.environment.first_invocation:
self.coredata.init_backend_options(backend)
options = {k: v for k, v in self.environment.options.items() if k.is_backend()}
self.coredata.set_options(options)
@typed_pos_args('project', str, varargs=str)
@typed_kwargs(
'project',
DEFAULT_OPTIONS,
KwargInfo('meson_version', (str, NoneType)),
KwargInfo(
'version',
(str, mesonlib.File, NoneType, list),
default='undefined',
validator=_project_version_validator,
convertor=lambda x: x[0] if isinstance(x, list) else x,
),
KwargInfo('license', ContainerTypeInfo(list, str), default=['unknown'], listify=True),
KwargInfo('subproject_dir', str, default='subprojects'),
)
def func_project(self, node: mparser.FunctionNode, args: T.Tuple[str, T.List[str]], kwargs: 'kwargs.Project') -> None:
proj_name, proj_langs = args
if ':' in proj_name:
raise InvalidArguments(f"Project name {proj_name!r} must not contain ':'")
# This needs to be evaluated as early as possible, as meson uses this
# for things like deprecation testing.
if kwargs['meson_version']:
cv = coredata.version
pv = kwargs['meson_version']
if not mesonlib.version_compare(cv, pv):
raise InterpreterException(f'Meson version is {cv} but project requires {pv}')
mesonlib.project_meson_versions[self.subproject] = kwargs['meson_version']
if os.path.exists(self.option_file):
oi = optinterpreter.OptionInterpreter(self.subproject)
oi.process(self.option_file)
self.coredata.update_project_options(oi.options)
self.add_build_def_file(self.option_file)
# Do not set default_options on reconfigure otherwise it would override
# values previously set from command line. That means that changing
# default_options in a project will trigger a reconfigure but won't
# have any effect.
self.project_default_options = coredata.create_options_dict(
kwargs['default_options'], self.subproject)
# If this is the first invocation we always need to initialize
# builtins, if this is a subproject that is new in a re-invocation we
# need to initialize builtins for that
if self.environment.first_invocation or (self.subproject != '' and self.subproject not in self.coredata.initialized_subprojects):
default_options = self.project_default_options.copy()
default_options.update(self.default_project_options)
self.coredata.init_builtins(self.subproject)
else:
default_options = {}
self.coredata.set_default_options(default_options, self.subproject, self.environment)
if not self.is_subproject():
self.build.project_name = proj_name
self.active_projectname = proj_name
version = kwargs['version']
if isinstance(version, mesonlib.File):
FeatureNew.single_use('version from file', '0.57.0', self.subproject, location=node)
self.add_build_def_file(version)
ifname = version.absolute_path(self.environment.source_dir,
self.environment.build_dir)
try:
ver_data = Path(ifname).read_text(encoding='utf-8').split('\n')
except FileNotFoundError:
raise InterpreterException('Version file not found.')
if len(ver_data) == 2 and ver_data[1] == '':
ver_data = ver_data[0:1]
if len(ver_data) != 1:
raise InterpreterException('Version file must contain exactly one line of text.')
self.project_version = ver_data[0]
else:
self.project_version = version
if self.build.project_version is None:
self.build.project_version = self.project_version
proj_license = kwargs['license']
self.build.dep_manifest[proj_name] = build.DepManifest(self.project_version, proj_license)
if self.subproject in self.build.projects:
raise InvalidCode('Second call to project().')
# spdirname is the subproject_dir for this project, relative to self.subdir.
# self.subproject_dir is the subproject_dir for the main project, relative to top source dir.
spdirname = kwargs['subproject_dir']
if not isinstance(spdirname, str):
raise InterpreterException('Subproject_dir must be a string')
if os.path.isabs(spdirname):
raise InterpreterException('Subproject_dir must not be an absolute path.')
if spdirname.startswith('.'):
raise InterpreterException('Subproject_dir must not begin with a period.')
if '..' in spdirname:
raise InterpreterException('Subproject_dir must not contain a ".." segment.')
if not self.is_subproject():
self.subproject_dir = spdirname
self.build.subproject_dir = self.subproject_dir
# Load wrap files from this (sub)project.
wrap_mode = self.coredata.get_option(OptionKey('wrap_mode'))
if not self.is_subproject() or wrap_mode != WrapMode.nopromote:
subdir = os.path.join(self.subdir, spdirname)
r = wrap.Resolver(self.environment.get_source_dir(), subdir, self.subproject, wrap_mode)
if self.is_subproject():
self.environment.wrap_resolver.merge_wraps(r)
else:
self.environment.wrap_resolver = r
self.build.projects[self.subproject] = proj_name
mlog.log('Project name:', mlog.bold(proj_name))
mlog.log('Project version:', mlog.bold(self.project_version))
if not self.is_subproject():
# We have to activate VS before adding languages and before calling
# self.set_backend() otherwise it wouldn't be able to detect which
# vs backend version we need. But after setting default_options in case
# the project sets vs backend by default.
backend = self.coredata.get_option(OptionKey('backend'))
force_vsenv = self.user_defined_options.vsenv or backend.startswith('vs')
if mesonlib.setup_vsenv(force_vsenv):
self.build.need_vsenv = True
self.add_languages(proj_langs, True, MachineChoice.HOST)
self.add_languages(proj_langs, False, MachineChoice.BUILD)
self.set_backend()
if not self.is_subproject():
self.check_stdlibs()
@typed_kwargs('add_languages', KwargInfo('native', (bool, NoneType), since='0.54.0'), REQUIRED_KW)
@typed_pos_args('add_languages', varargs=str)
def func_add_languages(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddLanguages') -> bool:
langs = args[0]
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
native = kwargs['native']
if disabled:
for lang in sorted(langs, key=compilers.sort_clink):
mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
if native is not None:
return self.add_languages(langs, required, self.machine_from_native_kwarg(kwargs))
else:
# absent 'native' means 'both' for backwards compatibility
tv = FeatureNew.get_target_version(self.subproject)
if FeatureNew.check_version(tv, '0.54.0'):
mlog.warning('add_languages is missing native:, assuming languages are wanted for both host and build.',
location=node)
success = self.add_languages(langs, False, MachineChoice.BUILD)
success &= self.add_languages(langs, required, MachineChoice.HOST)
return success
@noArgsFlattening
@noKwargs
def func_message(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('message with more than one argument', '0.54.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
self.message_impl(args_str)
def message_impl(self, args):
mlog.log(mlog.bold('Message:'), *args)
@noArgsFlattening
@FeatureNew('summary', '0.53.0')
@typed_pos_args('summary', (str, dict), optargs=[object])
@typed_kwargs(
'summary',
KwargInfo('section', str, default=''),
KwargInfo('bool_yn', bool, default=False),
KwargInfo('list_sep', (str, NoneType), since='0.54.0')
)
def func_summary(self, node: mparser.BaseNode, args: T.Tuple[T.Union[str, T.Dict[str, T.Any]], T.Optional[T.Any]],
kwargs: 'kwargs.Summary') -> None:
if args[1] is None:
if not isinstance(args[0], dict):
raise InterpreterException('Summary first argument must be dictionary.')
values = args[0]
else:
if not isinstance(args[0], str):
raise InterpreterException('Summary first argument must be string.')
values = {args[0]: args[1]}
self.summary_impl(kwargs['section'], values, kwargs)
def summary_impl(self, section: str, values, kwargs: 'kwargs.Summary') -> None:
if self.subproject not in self.summary:
self.summary[self.subproject] = Summary(self.active_projectname, self.project_version)
self.summary[self.subproject].add_section(
section, values, kwargs['bool_yn'], kwargs['list_sep'], self.subproject)
def _print_summary(self) -> None:
# Add automatic 'Supbrojects' section in main project.
all_subprojects = collections.OrderedDict()
for name, subp in sorted(self.subprojects.items()):
value = subp.found()
if subp.disabled_feature:
value = [value, f'Feature {subp.disabled_feature!r} disabled']
elif subp.exception:
value = [value, str(subp.exception)]
elif subp.warnings > 0:
value = [value, f'{subp.warnings} warnings']
all_subprojects[name] = value
if all_subprojects:
self.summary_impl('Subprojects', all_subprojects,
{'bool_yn': True,
'list_sep': ' ',
})
# Add automatic section with all user defined options
if self.user_defined_options:
values = collections.OrderedDict()
if self.user_defined_options.cross_file:
values['Cross files'] = self.user_defined_options.cross_file
if self.user_defined_options.native_file:
values['Native files'] = self.user_defined_options.native_file
sorted_options = sorted(self.user_defined_options.cmd_line_options.items())
values.update({str(k): v for k, v in sorted_options})
if values:
self.summary_impl('User defined options', values, {'bool_yn': False, 'list_sep': None})
# Print all summaries, main project last.
mlog.log('') # newline
main_summary = self.summary.pop('', None)
for subp_name, summary in sorted(self.summary.items()):
if self.subprojects[subp_name].found():
summary.dump()
if main_summary:
main_summary.dump()
@noArgsFlattening
@FeatureNew('warning', '0.44.0')
@noKwargs
def func_warning(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('warning with more than one argument', '0.54.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
mlog.warning(*args_str, location=node)
@noArgsFlattening
@noKwargs
def func_error(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('error with more than one argument', '0.58.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
raise InterpreterException('Problem encountered: ' + ' '.join(args_str))
@noKwargs
@noPosargs
def func_exception(self, node, args, kwargs):
raise Exception()
def add_languages(self, args: T.Sequence[str], required: bool, for_machine: MachineChoice) -> bool:
success = self.add_languages_for(args, required, for_machine)
if not self.coredata.is_cross_build():
self.coredata.copy_build_options_from_regular_ones()
self._redetect_machines()
return success
def should_skip_sanity_check(self, for_machine: MachineChoice) -> bool:
should = self.environment.properties.host.get('skip_sanity_check', False)
if not isinstance(should, bool):
raise InterpreterException('Option skip_sanity_check must be a boolean.')
if for_machine != MachineChoice.HOST and not should:
return False
if not self.environment.is_cross_build() and not should:
return False
return should
def add_languages_for(self, args: T.List[str], required: bool, for_machine: MachineChoice) -> bool:
args = [a.lower() for a in args]
langs = set(self.coredata.compilers[for_machine].keys())
langs.update(args)
# We'd really like to add cython's default language here, but it can't
# actually be done because the cython compiler hasn't been initialized,
# so we can't actually get the option yet. Because we can't know what
# compiler to add by default, and we don't want to add unnecessary
# compilers we don't add anything for cython here, and instead do it
# When the first cython target using a particular language is used.
if 'vala' in langs and 'c' not in langs:
FeatureNew.single_use('Adding Vala language without C', '0.59.0', self.subproject, location=self.current_node)
args.append('c')
success = True
for lang in sorted(args, key=compilers.sort_clink):
clist = self.coredata.compilers[for_machine]
machine_name = for_machine.get_lower_case_name()
if lang in clist:
comp = clist[lang]
else:
try:
comp = compilers.detect_compiler_for(self.environment, lang, for_machine)
if comp is None:
raise InvalidArguments(f'Tried to use unknown language "{lang}".')
if self.should_skip_sanity_check(for_machine):
mlog.log_once('Cross compiler sanity tests disabled via the cross file.')
else:
comp.sanity_check(self.environment.get_scratch_dir(), self.environment)
except Exception:
if not required:
mlog.log('Compiler for language',
mlog.bold(lang), 'for the', machine_name,
'machine not found.')
success = False
continue
else:
raise
if for_machine == MachineChoice.HOST or self.environment.is_cross_build():
logger_fun = mlog.log
else:
logger_fun = mlog.debug
logger_fun(comp.get_display_language(), 'compiler for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.get_exelist())), comp.get_version_string())
if comp.linker is not None:
logger_fun(comp.get_display_language(), 'linker for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.linker.get_exelist())), comp.linker.id, comp.linker.version)
self.build.ensure_static_linker(comp)
return success
def program_from_file_for(self, for_machine: MachineChoice, prognames: T.List[mesonlib.FileOrString]
) -> T.Optional[ExternalProgram]:
for p in prognames:
if isinstance(p, mesonlib.File):
continue # Always points to a local (i.e. self generated) file.
if not isinstance(p, str):
raise InterpreterException('Executable name must be a string')
prog = ExternalProgram.from_bin_list(self.environment, for_machine, p)
if prog.found():
return prog
return None
def program_from_system(self, args: T.List[mesonlib.FileOrString], search_dirs: T.List[str],
extra_info: T.List[mlog.TV_Loggable]) -> T.Optional[ExternalProgram]:
# Search for scripts relative to current subdir.
# Do not cache found programs because find_program('foobar')
# might give different results when run from different source dirs.
source_dir = os.path.join(self.environment.get_source_dir(), self.subdir)
for exename in args:
if isinstance(exename, mesonlib.File):
if exename.is_built:
search_dir = os.path.join(self.environment.get_build_dir(),
exename.subdir)
else:
search_dir = os.path.join(self.environment.get_source_dir(),
exename.subdir)
exename = exename.fname
extra_search_dirs = []
elif isinstance(exename, str):
search_dir = source_dir
extra_search_dirs = search_dirs
else:
raise InvalidArguments(f'find_program only accepts strings and files, not {exename!r}')
extprog = ExternalProgram(exename, search_dir=search_dir,
extra_search_dirs=extra_search_dirs,
silent=True)
if extprog.found():
extra_info.append(f"({" ".join(extprog.get_command())})")
return extprog
return None
def program_from_overrides(self, command_names: T.List[mesonlib.FileOrString],
extra_info: T.List['mlog.TV_Loggable']
) -> T.Optional[T.Union[ExternalProgram, OverrideProgram, build.Executable]]:
for name in command_names:
if not isinstance(name, str):
continue
if name in self.build.find_overrides:
exe = self.build.find_overrides[name]
extra_info.append(mlog.blue('(overridden)'))
return exe
return None
def store_name_lookups(self, command_names: T.List[mesonlib.FileOrString]) -> None:
for name in command_names:
if isinstance(name, str):
self.build.searched_programs.add(name)
def add_find_program_override(self, name: str, exe: T.Union[build.Executable, ExternalProgram, 'OverrideProgram']) -> None:
if name in self.build.searched_programs:
raise InterpreterException(f'Tried to override finding of executable "{name}" which has already been found.')
if name in self.build.find_overrides:
raise InterpreterException(f'Tried to override executable "{name}" which has already been overridden.')
self.build.find_overrides[name] = exe
def notfound_program(self, args: T.List[mesonlib.FileOrString]) -> ExternalProgram:
return NonExistingExternalProgram(' '.join(
[a if isinstance(a, str) else a.absolute_path(self.environment.source_dir, self.environment.build_dir)
for a in args]))
# TODO update modules to always pass `for_machine`. It is bad-form to assume
# the host machine.
def find_program_impl(self, args: T.List[mesonlib.FileOrString],
for_machine: MachineChoice = MachineChoice.HOST,
required: bool = True, silent: bool = True,
wanted: T.Union[str, T.List[str]] = '',
search_dirs: T.Optional[T.List[str]] = None,
version_func: T.Optional[T.Callable[[T.Union['ExternalProgram', 'build.Executable', 'OverrideProgram']], str]] = None
) -> T.Union['ExternalProgram', 'build.Executable', 'OverrideProgram']:
args = mesonlib.listify(args)
extra_info: T.List[mlog.TV_Loggable] = []
progobj = self.program_lookup(args, for_machine, required, search_dirs, extra_info)
if progobj is None:
progobj = self.notfound_program(args)
if isinstance(progobj, ExternalProgram) and not progobj.found():
if not silent:
mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO'))
if required:
m = 'Program {!r} not found or not executable'
raise InterpreterException(m.format(progobj.get_name()))
return progobj
if wanted:
if version_func:
version = version_func(progobj)
elif isinstance(progobj, build.Executable):
if progobj.subproject:
interp = self.subprojects[progobj.subproject].held_object
else:
interp = self
assert isinstance(interp, Interpreter)
version = interp.project_version
else:
version = progobj.get_version(self)
is_found, not_found, _ = mesonlib.version_compare_many(version, wanted)
if not is_found:
mlog.log('Program', mlog.bold(progobj.name), 'found:', mlog.red('NO'),
'found', mlog.normal_cyan(version), 'but need:',
mlog.bold(', '.join([f"'{e}'" for e in not_found])), *extra_info)
if required:
m = 'Invalid version of program, need {!r} {!r} found {!r}.'
raise InterpreterException(m.format(progobj.name, not_found, version))
return self.notfound_program(args)
extra_info.insert(0, mlog.normal_cyan(version))
# Only store successful lookups
self.store_name_lookups(args)
if not silent:
mlog.log('Program', mlog.bold(progobj.name), 'found:', mlog.green('YES'), *extra_info)
if isinstance(progobj, build.Executable):
progobj.was_returned_by_find_program = True
return progobj
def program_lookup(self, args: T.List[mesonlib.FileOrString], for_machine: MachineChoice,
required: bool, search_dirs: T.List[str], extra_info: T.List[mlog.TV_Loggable]
) -> T.Optional[T.Union[ExternalProgram, build.Executable, OverrideProgram]]:
progobj = self.program_from_overrides(args, extra_info)
if progobj:
return progobj
fallback = None
wrap_mode = self.coredata.get_option(OptionKey('wrap_mode'))
if wrap_mode != WrapMode.nofallback and self.environment.wrap_resolver:
fallback = self.environment.wrap_resolver.find_program_provider(args)
if fallback and wrap_mode == WrapMode.forcefallback:
return self.find_program_fallback(fallback, args, required, extra_info)
progobj = self.program_from_file_for(for_machine, args)
if progobj is None:
progobj = self.program_from_system(args, search_dirs, extra_info)
if progobj is None and args[0].endswith('python3'):
prog = ExternalProgram('python3', mesonlib.python_command, silent=True)
progobj = prog if prog.found() else None
if progobj is None and fallback and required:
progobj = self.find_program_fallback(fallback, args, required, extra_info)
return progobj
def find_program_fallback(self, fallback: str, args: T.List[mesonlib.FileOrString],
required: bool, extra_info: T.List[mlog.TV_Loggable]
) -> T.Optional[T.Union[ExternalProgram, build.Executable, OverrideProgram]]:
mlog.log('Fallback to subproject', mlog.bold(fallback), 'which provides program',
mlog.bold(' '.join(args)))
sp_kwargs: kwargs.DoSubproject = {
'required': required,
'default_options': [],
'version': [],
'cmake_options': [],
'options': None,
}
self.do_subproject(fallback, 'meson', sp_kwargs)
return self.program_from_overrides(args, extra_info)
@typed_pos_args('find_program', varargs=(str, mesonlib.File), min_varargs=1)
@typed_kwargs(
'find_program',
DISABLER_KW.evolve(since='0.49.0'),
NATIVE_KW,
REQUIRED_KW,
KwargInfo('dirs', ContainerTypeInfo(list, str), default=[], listify=True, since='0.53.0'),
KwargInfo('version', ContainerTypeInfo(list, str), default=[], listify=True, since='0.52.0'),
)
@disablerIfNotFound
def func_find_program(self, node: mparser.BaseNode, args: T.Tuple[T.List[mesonlib.FileOrString]],
kwargs: 'kwargs.FindProgram',
) -> T.Union['build.Executable', ExternalProgram, 'OverrideProgram']:
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Program', mlog.bold(' '.join(args[0])), 'skipped: feature', mlog.bold(feature), 'disabled')
return self.notfound_program(args[0])
search_dirs = extract_search_dirs(kwargs)
return self.find_program_impl(args[0], kwargs['native'], required=required,
silent=False, wanted=kwargs['version'],
search_dirs=search_dirs)
def func_find_library(self, node, args, kwargs):
raise InvalidCode('find_library() is removed, use meson.get_compiler(\'name\').find_library() instead.\n'
'Look here for documentation: http://mesonbuild.com/Reference-manual.html#compiler-object\n'
'Look here for example: http://mesonbuild.com/howtox.html#add-math-library-lm-portably\n'
)
# When adding kwargs, please check if they make sense in dependencies.get_dep_identifier()
@FeatureNewKwargs('dependency', '0.57.0', ['cmake_package_version'])
@FeatureNewKwargs('dependency', '0.56.0', ['allow_fallback'])
@FeatureNewKwargs('dependency', '0.54.0', ['components'])
@FeatureNewKwargs('dependency', '0.52.0', ['include_type'])
@FeatureNewKwargs('dependency', '0.50.0', ['not_found_message', 'cmake_module_path', 'cmake_args'])
@FeatureNewKwargs('dependency', '0.49.0', ['disabler'])
@FeatureNewKwargs('dependency', '0.40.0', ['method'])
@FeatureNewKwargs('dependency', '0.38.0', ['default_options'])
@disablerIfNotFound
@permittedKwargs(permitted_dependency_kwargs)
@typed_pos_args('dependency', varargs=str, min_varargs=1)
def func_dependency(self, node, args, kwargs):
# Replace '' by empty list of names
names = [n for n in args[0] if n]
if len(names) > 1:
FeatureNew('dependency with more than one name', '0.60.0').use(self.subproject)
allow_fallback = kwargs.get('allow_fallback')
if allow_fallback is not None and not isinstance(allow_fallback, bool):
raise InvalidArguments('"allow_fallback" argument must be boolean')
fallback = kwargs.get('fallback')
default_options = kwargs.get('default_options')
df = DependencyFallbacksHolder(self, names, allow_fallback, default_options)
df.set_fallback(fallback)
not_found_message = kwargs.get('not_found_message', '')
if not isinstance(not_found_message, str):
raise InvalidArguments('The not_found_message must be a string.')
try:
d = df.lookup(kwargs)
except Exception:
if not_found_message:
self.message_impl([not_found_message])
raise
assert isinstance(d, Dependency)
if not d.found() and not_found_message:
self.message_impl([not_found_message])
self.message_impl([not_found_message])
# Ensure the correct include type
if 'include_type' in kwargs:
wanted = kwargs['include_type']
if not isinstance(wanted, str):
raise InvalidArguments('The `include_type` kwarg must be a string')
actual = d.get_include_type()
if wanted != actual:
mlog.debug(f'Current include type of {args[0]} is {actual}. Converting to requested {wanted}')
d = d.generate_system_dependency(wanted)
if d.feature_since is not None:
version, extra_msg = d.feature_since
FeatureNew.single_use(f'dep {d.name!r} custom lookup', version, self.subproject, extra_msg, node)
for f in d.featurechecks:
f.use(self.subproject, node)
return d
@FeatureNew('disabler', '0.44.0')
@noKwargs
@noPosargs
def func_disabler(self, node, args, kwargs):
return Disabler()
@FeatureNewKwargs('executable', '0.42.0', ['implib'])
@FeatureNewKwargs('executable', '0.56.0', ['win_subsystem'])
@FeatureDeprecatedKwargs('executable', '0.56.0', ['gui_app'], extra_message="Use 'win_subsystem' instead.")
@permittedKwargs(build.known_exe_kwargs)
def func_executable(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.Executable)
@permittedKwargs(build.known_stlib_kwargs)
def func_static_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.StaticLibrary)
@permittedKwargs(build.known_shlib_kwargs)
def func_shared_lib(self, node, args, kwargs):
holder = self.build_target(node, args, kwargs, build.SharedLibrary)
holder.shared_library_only = True
return holder
@permittedKwargs(known_library_kwargs)
def func_both_lib(self, node, args, kwargs):
return self.build_both_libraries(node, args, kwargs)
@FeatureNew('shared_module', '0.37.0')
@permittedKwargs(build.known_shmod_kwargs)
def func_shared_module(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.SharedModule)
@permittedKwargs(known_library_kwargs)
def func_library(self, node, args, kwargs):
return self.build_library(node, args, kwargs)
@permittedKwargs(build.known_jar_kwargs)
def func_jar(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.Jar)
@FeatureNewKwargs('build_target', '0.40.0', ['link_whole', 'override_options'])
@permittedKwargs(known_build_target_kwargs)
def func_build_target(self, node, args, kwargs):
if 'target_type' not in kwargs:
raise InterpreterException('Missing target_type keyword argument')
target_type = kwargs.pop('target_type')
if target_type == 'executable':
return self.build_target(node, args, kwargs, build.Executable)
elif target_type == 'shared_library':
return self.build_target(node, args, kwargs, build.SharedLibrary)
elif target_type == 'shared_module':
FeatureNew('build_target(target_type: \'shared_module\')',
'0.51.0').use(self.subproject)
return self.build_target(node, args, kwargs, build.SharedModule)
elif target_type == 'static_library':
return self.build_target(node, args, kwargs, build.StaticLibrary)
elif target_type == 'both_libraries':
return self.build_both_libraries(node, args, kwargs)
elif target_type == 'library':
return self.build_library(node, args, kwargs)
elif target_type == 'jar':
return self.build_target(node, args, kwargs, build.Jar)
else:
raise InterpreterException('Unknown target_type.')
@noPosargs
@typed_kwargs(
'vcs_tag',
CT_INPUT_KW.evolve(required=True),
CT_OUTPUT_KW,
# Cannot use the COMMAND_KW because command is allowed to be empty
KwargInfo(
'command',
ContainerTypeInfo(list, (str, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex, ExternalProgram, mesonlib.File)),
listify=True,
default=[],
),
KwargInfo('fallback', (str, NoneType)),
KwargInfo('replace_string', str, default='@VCS_TAG@'),
)
def func_vcs_tag(self, node: mparser.BaseNode, args: T.List['TYPE_var'], kwargs: 'kwargs.VcsTag') -> build.CustomTarget:
if kwargs['fallback'] is None:
FeatureNew.single_use('Optional fallback in vcs_tag', '0.41.0', self.subproject, location=node)
fallback = kwargs['fallback'] or self.project_version
replace_string = kwargs['replace_string']
regex_selector = '(.*)' # default regex selector for custom command: use complete output
vcs_cmd = kwargs['command']
source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir))
if vcs_cmd:
if isinstance(vcs_cmd[0], mesonlib.File):
FeatureNew.single_use('vcs_tag with file as the first argument', '0.62.0', self.subproject, location=node)
maincmd = self.find_program_impl(vcs_cmd[0], required=False)
if maincmd.found():
vcs_cmd[0] = maincmd
else:
vcs = mesonlib.detect_vcs(source_dir)
if vcs:
mlog.log('Found {} repository at {}'.format(vcs['name'], vcs['wc_dir']))
vcs_cmd = vcs['get_rev'].split()
regex_selector = vcs['rev_regex']
else:
vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string
# vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command...
self._validate_custom_target_outputs(len(kwargs['input']) > 1, kwargs['output'], "vcs_tag")
tg = build.CustomTarget(
kwargs['output'][0],
self.subdir,
self.subproject,
self.environment.get_build_command() +
['--internal',
'vcstagger',
'@INPUT0@',
'@OUTPUT0@',
fallback,
source_dir,
replace_string,
regex_selector] + vcs_cmd,
self.source_strings_to_files(kwargs['input']),
kwargs['output'],
build_by_default=True,
build_always_stale=True,
)
self.add_target(tg.name, tg)
return tg
@FeatureNew('subdir_done', '0.46.0')
@noPosargs
@noKwargs
def func_subdir_done(self, node, args, kwargs):
raise SubdirDoneRequest()
@staticmethod
def _validate_custom_target_outputs(has_multi_in: bool, outputs: T.Iterable[str], name: str) -> None:
"""Checks for additional invalid values in a custom_target output.
This cannot be done with typed_kwargs because it requires the number of
inputs.
"""
for out in outputs:
if has_multi_in and ('@PLAINNAME@' in out or '@BASENAME@' in out):
raise InvalidArguments(f'{name}: output cannot containe "@PLAINNAME@" or "@BASENAME@" '
'when there is more than one input (we can\'t know which to use)')
@typed_pos_args('custom_target', optargs=[str])
@typed_kwargs(
'custom_target',
COMMAND_KW,
CT_BUILD_ALWAYS,
CT_BUILD_ALWAYS_STALE,
CT_BUILD_BY_DEFAULT,
CT_INPUT_KW,
CT_INSTALL_DIR_KW,
CT_INSTALL_TAG_KW,
CT_OUTPUT_KW,
DEPENDS_KW,
DEPEND_FILES_KW,
DEPFILE_KW,
ENV_KW.evolve(since='0.57.0'),
INSTALL_KW,
INSTALL_MODE_KW.evolve(since='0.47.0'),
OVERRIDE_OPTIONS_KW,
KwargInfo('feed', bool, default=False, since='0.59.0'),
KwargInfo('capture', bool, default=False),
KwargInfo('console', bool, default=False, since='0.48.0'),
)
def func_custom_target(self, node: mparser.FunctionNode, args: T.Tuple[str],
kwargs: 'kwargs.CustomTarget') -> build.CustomTarget:
if kwargs['depfile'] and ('@BASENAME@' in kwargs['depfile'] or '@PLAINNAME@' in kwargs['depfile']):
FeatureNew.single_use('substitutions in custom_target depfile', '0.47.0', self.subproject, location=node)
# Don't mutate the kwargs
build_by_default = kwargs['build_by_default']
build_always_stale = kwargs['build_always_stale']
# Remap build_always to build_by_default and build_always_stale
if kwargs['build_always'] is not None and kwargs['build_always_stale'] is not None:
raise InterpreterException('CustomTarget: "build_always" and "build_always_stale" are mutually exclusive')
if build_by_default is None and kwargs['install']:
build_by_default = True
elif kwargs['build_always'] is not None:
if build_by_default is None:
build_by_default = kwargs['build_always']
build_always_stale = kwargs['build_by_default']
# These are are nullaable so that we can know whether they're explicitly
# set or not. If they haven't been overwritten, set them to their true
# default
if build_by_default is None:
build_by_default = False
if build_always_stale is None:
build_always_stale = False
name = args[0]
if name is None:
# name will default to first output, but we cannot do that yet because
# they could need substitutions (e.g. @BASENAME@) first. CustomTarget()
# will take care of setting a proper default but name must be an empty
# string in the meantime.
FeatureNew.single_use('custom_target() with no name argument', '0.60.0', self.subproject, location=node)
name = ''
inputs = self.source_strings_to_files(kwargs['input'], strict=False)
command = kwargs['command']
if command and isinstance(command[0], str):
command[0] = self.find_program_impl([command[0]])
if len(inputs) > 1 and kwargs['feed']:
raise InvalidArguments('custom_target: "feed" keyword argument can only be used used with a single input')
if len(kwargs['output']) > 1 and kwargs['capture']:
raise InvalidArguments('custom_target: "capture" keyword argument can only be used used with a single output')
if kwargs['capture'] and kwargs['console']:
raise InvalidArguments('custom_target: "capture" and "console" keyword arguments are mutually exclusive')
for c in command:
if kwargs['capture'] and isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('custom_target: "capture" keyword argument cannot be used with "@OUTPUT@"')
if kwargs['feed'] and isinstance(c, str) and '@INPUT@' in c:
raise InvalidArguments('custom_target: "feed" keyword argument cannot be used with "@INPUT@"')
if kwargs['install'] and not kwargs['install_dir']:
raise InvalidArguments('custom_target: "install_dir" keyword argument must be set when "install" is true.')
if len(kwargs['install_dir']) > 1:
FeatureNew.single_use('multiple install_dir for custom_target', '0.40.0', self.subproject, location=node)
if len(kwargs['install_tag']) not in {0, 1, len(kwargs['output'])}:
raise InvalidArguments('custom_target: install_tag argument must have 0 or 1 outputs, '
'or the same number of elements as the output keyword argument. '
f'(there are {len(kwargs['install_tag'])} install_tags, '
f'and {len(kwargs['output'])} outputs)')
self._validate_custom_target_outputs(len(inputs) > 1, kwargs['output'], "custom_target")
tg = build.CustomTarget(
name,
self.subdir,
self.subproject,
command,
inputs,
kwargs['output'],
build_always_stale=build_always_stale,
build_by_default=build_by_default,
capture=kwargs['capture'],
console=kwargs['console'],
depend_files=kwargs['depend_files'],
depfile=kwargs['depfile'],
extra_depends=kwargs['depends'],
env=kwargs['env'],
feed=kwargs['feed'],
install=kwargs['install'],
install_dir=kwargs['install_dir'],
install_mode=kwargs['install_mode'],
install_tag=kwargs['install_tag'],
override_options=kwargs['override_options'],
backend=self.backend)
self.add_target(tg.name, tg)
return tg
@typed_pos_args('run_target', str)
@typed_kwargs(
'run_target',
COMMAND_KW,
DEPENDS_KW,
ENV_KW.evolve(since='0.57.0'),
)
def func_run_target(self, node: mparser.FunctionNode, args: T.Tuple[str],
kwargs: 'kwargs.RunTarget') -> build.RunTarget:
all_args = kwargs['command'].copy()
for i in listify(all_args):
if isinstance(i, ExternalProgram) and not i.found():
raise InterpreterException(f'Tried to use non-existing executable {i.name!r}')
if isinstance(all_args[0], str):
all_args[0] = self.find_program_impl([all_args[0]])
name = args[0]
tg = build.RunTarget(name, all_args, kwargs['depends'], self.subdir, self.subproject, kwargs['env'])
self.add_target(name, tg)
full_name = (self.subproject, name)
assert full_name not in self.build.run_target_names
self.build.run_target_names.add(full_name)
return tg
@FeatureNew('alias_target', '0.52.0')
@typed_pos_args('alias_target', str, varargs=build.Target, min_varargs=1)
@noKwargs
def func_alias_target(self, node: mparser.BaseNode, args: T.Tuple[str, T.List[build.Target]],
kwargs: 'TYPE_kwargs') -> build.AliasTarget:
name, deps = args
tg = build.AliasTarget(name, deps, self.subdir, self.subproject)
self.add_target(name, tg)
return tg
@typed_pos_args('generator', (build.Executable, ExternalProgram))
@typed_kwargs(
'generator',
KwargInfo('arguments', ContainerTypeInfo(list, str, allow_empty=False), required=True, listify=True),
KwargInfo('output', ContainerTypeInfo(list, str, allow_empty=False), required=True, listify=True),
DEPFILE_KW,
DEPENDS_KW,
KwargInfo('capture', bool, default=False, since='0.43.0'),
)
def func_generator(self, node: mparser.FunctionNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram]],
kwargs: 'kwargs.FuncGenerator') -> build.Generator:
for rule in kwargs['output']:
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"output" must not contain a directory separator.')
if len(kwargs['output']) > 1:
for o in kwargs['output']:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
gen = build.Generator(args[0], **kwargs)
self.generators.append(gen)
return gen
@typed_pos_args('benchmark', str, (build.Executable, build.Jar, ExternalProgram, mesonlib.File))
@typed_kwargs('benchmark', *TEST_KWARGS)
def func_benchmark(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.FuncBenchmark') -> None:
self.add_test(node, args, kwargs, False)
@typed_pos_args('test', str, (build.Executable, build.Jar, ExternalProgram, mesonlib.File))
@typed_kwargs('test', *TEST_KWARGS, KwargInfo('is_parallel', bool, default=True))
def func_test(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.FuncTest') -> None:
self.add_test(node, args, kwargs, True)
def unpack_env_kwarg(self, kwargs: T.Union[build.EnvironmentVariables, T.Dict[str, 'TYPE_var'], T.List['TYPE_var'], str]) -> build.EnvironmentVariables:
envlist = kwargs.get('env')
if envlist is None:
return build.EnvironmentVariables()
msg = ENV_KW.validator(envlist)
if msg:
raise InvalidArguments(f'"env": {msg}')
return ENV_KW.convertor(envlist)
def make_test(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.BaseTest') -> Test:
name = args[0]
if ':' in name:
mlog.deprecation(f'":" is not allowed in test name "{name}", it has been replaced with "_"',
location=node)
name = name.replace(':', '_')
exe = args[1]
if isinstance(exe, ExternalProgram):
if not exe.found():
raise InvalidArguments('Tried to use not-found external program as test exe')
elif isinstance(exe, mesonlib.File):
exe = self.find_program_impl([exe])
env = self.unpack_env_kwarg(kwargs)
if kwargs['timeout'] <= 0:
FeatureNew.single_use('test() timeout <= 0', '0.57.0', self.subproject, location=node)
prj = self.subproject if self.is_subproject() else self.build.project_name
suite: T.List[str] = []
for s in kwargs['suite']:
if s:
s = ':' + s
suite.append(prj.replace(' ', '_').replace(':', '_') + s)
return Test(name,
prj,
suite,
exe,
kwargs['depends'],
kwargs.get('is_parallel', False),
kwargs['args'],
env,
kwargs['should_fail'],
kwargs['timeout'],
kwargs['workdir'],
kwargs['protocol'],
kwargs['priority'],
kwargs['verbose'])
def add_test(self, node: mparser.BaseNode, args: T.List, kwargs: T.Dict[str, T.Any], is_base_test: bool):
t = self.make_test(node, args, kwargs)
if is_base_test:
self.build.tests.append(t)
mlog.debug('Adding test', mlog.bold(t.name, True))
else:
self.build.benchmarks.append(t)
mlog.debug('Adding benchmark', mlog.bold(t.name, True))
@typed_pos_args('install_headers', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_headers',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('subdir', (str, NoneType)),
INSTALL_MODE_KW.evolve(since='0.47.0'),
)
def func_install_headers(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallHeaders') -> build.Headers:
source_files = self.source_strings_to_files(args[0])
install_subdir = kwargs['subdir']
if install_subdir is not None:
if kwargs['install_dir'] is not None:
raise InterpreterException('install_headers: cannot specify both "install_dir" and "subdir". Use only "install_dir".')
if os.path.isabs(install_subdir):
mlog.deprecation('Subdir keyword must not be an absolute path. This will be a hard error in the next release.')
h = build.Headers(source_files, install_subdir, kwargs['install_dir'],
kwargs['install_mode'], self.subproject)
self.build.headers.append(h)
return h
@typed_pos_args('install_man', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_man',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('locale', (str, NoneType), since='0.58.0'),
INSTALL_MODE_KW.evolve(since='0.47.0')
)
def func_install_man(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallMan') -> build.Man:
# We just need to narrow this, because the input is limited to files and
# Strings as inputs, so only Files will be returned
sources = self.source_strings_to_files(args[0])
for s in sources:
try:
num = int(s.rsplit('.', 1)[-1])
except (IndexError, ValueError):
num = 0
if not 1 <= num <= 9:
raise InvalidArguments('Man file must have a file extension of a number between 1 and 9')
m = build.Man(sources, kwargs['install_dir'], kwargs['install_mode'],
self.subproject, kwargs['locale'])
self.build.man.append(m)
return m
@FeatureNew('install_emptydir', '0.60.0')
@typed_kwargs(
'install_emptydir',
INSTALL_MODE_KW,
KwargInfo('install_tag', (str, NoneType), since='0.62.0')
)
def func_install_emptydir(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs) -> None:
d = build.EmptyDir(args[0], kwargs['install_mode'], self.subproject, kwargs['install_tag'])
self.build.emptydir.append(d)
return d
@FeatureNew('install_symlink', '0.61.0')
@typed_pos_args('symlink_name', str)
@typed_kwargs(
'install_symlink',
KwargInfo('pointing_to', str, required=True),
KwargInfo('install_dir', str, required=True),
INSTALL_TAG_KW,
)
def func_install_symlink(self, node: mparser.BaseNode,
args: T.Tuple[T.List[str]],
kwargs) -> build.SymlinkData:
name = args[0] # Validation while creating the SymlinkData object
target = kwargs['pointing_to']
l = build.SymlinkData(target, name, kwargs['install_dir'],
self.subproject, kwargs['install_tag'])
self.build.symlinks.append(l)
return l
@FeatureNew('structured_sources', '0.62.0')
@typed_pos_args('structured_sources', object, optargs=[dict])
@noKwargs
@noArgsFlattening
def func_structured_sources(
self, node: mparser.BaseNode,
args: T.Tuple[object, T.Optional[T.Dict[str, object]]],
kwargs: 'TYPE_kwargs') -> build.StructuredSources:
valid_types = (str, mesonlib.File, build.GeneratedList, build.CustomTarget, build.CustomTargetIndex, build.GeneratedList)
sources: T.Dict[str, T.List[T.Union[mesonlib.File, 'build.GeneratedTypes']]] = collections.defaultdict(list)
for arg in mesonlib.listify(args[0]):
if not isinstance(arg, valid_types):
raise InvalidArguments(f'structured_sources: type "{type(arg)}" is not valid')
if isinstance(arg, str):
arg = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, arg)
sources[''].append(arg)
if args[1]:
if '' in args[1]:
raise InvalidArguments('structured_sources: keys to dictionary argument may not be an empty string.')
for k, v in args[1].items():
for arg in mesonlib.listify(v):
if not isinstance(arg, valid_types):
raise InvalidArguments(f'structured_sources: type "{type(arg)}" is not valid')
if isinstance(arg, str):
arg = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, arg)
sources[k].append(arg)
return build.StructuredSources(sources)
@typed_pos_args('subdir', str)
@typed_kwargs(
'subdir',
KwargInfo(
'if_found',
ContainerTypeInfo(list, object),
validator=lambda a: 'Objects must have a found() method' if not all(hasattr(x, 'found') for x in a) else None,
since='0.44.0',
default=[],
listify=True,
),
)
def func_subdir(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.Subdir') -> None:
mesonlib.check_direntry_issues(args)
if '..' in args[0]:
raise InvalidArguments('Subdir contains ..')
if self.subdir == '' and args[0] == self.subproject_dir:
raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.')
if self.subdir == '' and args[0].startswith('meson-'):
raise InvalidArguments('The "meson-" prefix is reserved and cannot be used for top-level subdir().')
if args[0] == '':
raise InvalidArguments("The argument given to subdir() is the empty string ''. This is prohibited.")
for i in kwargs['if_found']:
if not i.found():
return
prev_subdir = self.subdir
subdir = os.path.join(prev_subdir, args[0])
if os.path.isabs(subdir):
raise InvalidArguments('Subdir argument must be a relative path.')
absdir = os.path.join(self.environment.get_source_dir(), subdir)
symlinkless_dir = os.path.realpath(absdir)
build_file = os.path.join(symlinkless_dir, 'meson.build')
if build_file in self.processed_buildfiles:
raise InvalidArguments(f'Tried to enter directory "{subdir}", which has already been visited.')
self.processed_buildfiles.add(build_file)
self.subdir = subdir
os.makedirs(os.path.join(self.environment.build_dir, subdir), exist_ok=True)
buildfilename = os.path.join(self.subdir, environment.build_filename)
self.build_def_files.add(buildfilename)
absname = os.path.join(self.environment.get_source_dir(), buildfilename)
if not os.path.isfile(absname):
self.subdir = prev_subdir
raise InterpreterException(f"Non-existent build file '{buildfilename!s}'")
with open(absname, encoding='utf-8') as f:
code = f.read()
assert isinstance(code, str)
try:
codeblock = mparser.Parser(code, absname).parse()
except mesonlib.MesonException as me:
me.file = absname
raise me
try:
self.evaluate_codeblock(codeblock)
except SubdirDoneRequest:
pass
self.subdir = prev_subdir
def _get_kwarg_install_mode(self, kwargs: T.Dict[str, T.Any]) -> T.Optional[FileMode]:
if kwargs.get('install_mode', None) is None:
return None
if isinstance(kwargs['install_mode'], FileMode):
return kwargs['install_mode']
install_mode: T.List[str] = []
mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int))
for m in mode:
# We skip any arguments that are set to `false`
if m is False:
m = None
install_mode.append(m)
if len(install_mode) > 3:
raise InvalidArguments('Keyword argument install_mode takes at '
'most 3 arguments.')
if len(install_mode) > 0 and install_mode[0] is not None and \
not isinstance(install_mode[0], str):
raise InvalidArguments('Keyword argument install_mode requires the '
'permissions arg to be a string or false')
return FileMode(*install_mode)
@typed_pos_args('install_data', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_data',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('sources', ContainerTypeInfo(list, (str, mesonlib.File)), listify=True, default=[]),
KwargInfo('rename', ContainerTypeInfo(list, str), default=[], listify=True, since='0.46.0'),
INSTALL_MODE_KW.evolve(since='0.38.0'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
)
def func_install_data(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallData') -> build.Data:
sources = self.source_strings_to_files(args[0] + kwargs['sources'])
rename = kwargs['rename'] or None
if rename:
if len(rename) != len(sources):
raise InvalidArguments(
'"rename" and "sources" argument lists must be the same length if "rename" is given. '
f'Rename has {len(rename)} elements and sources has {len(sources)}.')
install_dir_name = kwargs['install_dir']
if install_dir_name:
if not os.path.isabs(install_dir_name):
install_dir_name = os.path.join('{datadir}', install_dir_name)
else:
install_dir_name = '{datadir}'
return self.install_data_impl(sources, kwargs['install_dir'], kwargs['install_mode'],
rename, kwargs['install_tag'], install_dir_name)
def install_data_impl(self, sources: T.List[mesonlib.File], install_dir: str,
install_mode: FileMode, rename: T.Optional[str],
tag: T.Optional[str],
install_dir_name: T.Optional[str] = None,
install_data_type: T.Optional[str] = None) -> build.Data:
"""Just the implementation with no validation."""
data = build.Data(sources, install_dir, install_dir_name or install_dir, install_mode,
self.subproject, rename, tag, install_data_type)
self.build.data.append(data)
return data
@typed_pos_args('install_subdir', str)
@typed_kwargs(
'install_subdir',
KwargInfo('install_dir', str, required=True),
KwargInfo('strip_directory', bool, default=False),
KwargInfo('exclude_files', ContainerTypeInfo(list, str),
default=[], listify=True, since='0.42.0',
validator=lambda x: 'cannot be absolute' if any(os.path.isabs(d) for d in x) else None),
KwargInfo('exclude_directories', ContainerTypeInfo(list, str),
default=[], listify=True, since='0.42.0',
validator=lambda x: 'cannot be absolute' if any(os.path.isabs(d) for d in x) else None),
INSTALL_MODE_KW.evolve(since='0.38.0'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
)
def func_install_subdir(self, node: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'kwargs.FuncInstallSubdir') -> build.InstallDir:
exclude = (set(kwargs['exclude_files']), set(kwargs['exclude_directories']))
idir = build.InstallDir(
self.subdir,
args[0],
kwargs['install_dir'],
kwargs['install_mode'],
exclude,
kwargs['strip_directory'],
self.subproject,
install_tag=kwargs['install_tag'])
self.build.install_dirs.append(idir)
return idir
@noPosargs
@typed_kwargs(
'configure_file',
DEPFILE_KW.evolve(since='0.52.0'),
INSTALL_MODE_KW.evolve(since='0.47.0,'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
KwargInfo('capture', bool, default=False, since='0.41.0'),
KwargInfo(
'command',
(ContainerTypeInfo(list, (build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str), allow_empty=False), NoneType),
listify=True,
),
KwargInfo(
'configuration',
(ContainerTypeInfo(dict, (str, int, bool)), build.ConfigurationData, NoneType),
),
KwargInfo('copy', bool, default=False, since='0.47.0'),
KwargInfo('encoding', str, default='utf-8', since='0.47.0'),
KwargInfo('format', str, default='meson', since='0.46.0',
validator=in_set_validator({'meson', 'cmake', 'cmake@'})),
KwargInfo(
'input',
ContainerTypeInfo(list, (mesonlib.File, str)),
listify=True,
default=[],
),
# Cannot use shared implementation until None backwards compat is dropped
KwargInfo('install', (bool, NoneType), since='0.50.0'),
KwargInfo('install_dir', (str, bool), default='',
validator=lambda x: 'must be `false` if boolean' if x is True else None),
KwargInfo('output', str, required=True),
KwargInfo('output_format', str, default='c', since='0.47.0',
validator=in_set_validator({'c', 'nasm'})),
)
def func_configure_file(self, node: mparser.BaseNode, args: T.List[TYPE_var],
kwargs: kwargs.ConfigureFile):
actions = sorted(x for x in {'configuration', 'command', 'copy'}
if kwargs[x] not in [None, False])
num_actions = len(actions)
if num_actions == 0:
raise InterpreterException('Must specify an action with one of these '
'keyword arguments: \'configuration\', '
'\'command\', or \'copy\'.')
elif num_actions == 2:
raise InterpreterException('Must not specify both {!r} and {!r} '
'keyword arguments since they are '
'mutually exclusive.'.format(*actions))
elif num_actions == 3:
raise InterpreterException('Must specify one of {!r}, {!r}, and '
'{!r} keyword arguments since they are '
'mutually exclusive.'.format(*actions))
if kwargs['capture'] and not kwargs['command']:
raise InvalidArguments('configure_file: "capture" keyword requires "command" keyword.')
fmt = kwargs['format']
output_format = kwargs['output_format']
depfile = kwargs['depfile']
# Validate input
inputs = self.source_strings_to_files(kwargs['input'])
inputs_abs = []
for f in inputs:
if isinstance(f, mesonlib.File):
inputs_abs.append(f.absolute_path(self.environment.source_dir,
self.environment.build_dir))
self.add_build_def_file(f)
else:
raise InterpreterException('Inputs can only be strings or file objects')
# Validate output
output = kwargs['output']
if inputs_abs:
values = mesonlib.get_filenames_templates_dict(inputs_abs, None)
outputs = mesonlib.substitute_values([output], values)
output = outputs[0]
if depfile:
depfile = mesonlib.substitute_values([depfile], values)[0]
ofile_rpath = os.path.join(self.subdir, output)
if ofile_rpath in self.configure_file_outputs:
mesonbuildfile = os.path.join(self.subdir, 'meson.build')
current_call = f"{mesonbuildfile}:{self.current_lineno}"
first_call = "{}:{}".format(mesonbuildfile, self.configure_file_outputs[ofile_rpath])
mlog.warning('Output file', mlog.bold(ofile_rpath, True), 'for configure_file() at', current_call, 'overwrites configure_file() output at', first_call)
else:
self.configure_file_outputs[ofile_rpath] = self.current_lineno
if os.path.dirname(output) != '':
raise InterpreterException('Output file name must not contain a subdirectory.')
(ofile_path, ofile_fname) = os.path.split(os.path.join(self.subdir, output))
ofile_abs = os.path.join(self.environment.build_dir, ofile_path, ofile_fname)
# Perform the appropriate action
if kwargs['configuration'] is not None:
conf = kwargs['configuration']
if isinstance(conf, dict):
FeatureNew.single_use('configure_file.configuration dictionary', '0.49.0', self.subproject, location=node)
for k, v in conf.items():
if not isinstance(v, (str, int, bool)):
raise InvalidArguments(
f'"configuration_data": initial value dictionary key "{k!r}"" must be "str | int | bool", not "{v!r}"')
conf = build.ConfigurationData(conf)
mlog.log('Configuring', mlog.bold(output), 'using configuration')
if len(inputs) > 1:
raise InterpreterException('At most one input file can given in configuration mode')
if inputs:
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
file_encoding = kwargs['encoding']
missing_variables, confdata_useless = \
mesonlib.do_conf_file(inputs_abs[0], ofile_abs, conf,
fmt, file_encoding)
if missing_variables:
var_list = ", ".join(map(repr, sorted(missing_variables)))
mlog.warning(
f"The variable(s) {var_list} in the input file '{inputs[0]}' are not "
"present in the given configuration data.", location=node)
if confdata_useless:
ifbase = os.path.basename(inputs_abs[0])
tv = FeatureNew.get_target_version(self.subproject)
if FeatureNew.check_version(tv, '0.47.0'):
mlog.warning('Got an empty configuration_data() object and found no '
f'substitutions in the input file {ifbase!r}. If you want to '
'copy a file to the build dir, use the \'copy:\' keyword '
'argument added in 0.47.0', location=node)
else:
mesonlib.dump_conf_header(ofile_abs, conf, output_format)
conf.used = True
elif kwargs['command'] is not None:
if len(inputs) > 1:
FeatureNew.single_use('multiple inputs in configure_file()', '0.52.0', self.subproject, location=node)
# We use absolute paths for input and output here because the cwd
# that the command is run from is 'unspecified', so it could change.
# Currently it's builddir/subdir for in_builddir else srcdir/subdir.
values = mesonlib.get_filenames_templates_dict(inputs_abs, [ofile_abs])
if depfile:
depfile = os.path.join(self.environment.get_scratch_dir(), depfile)
values['@DEPFILE@'] = depfile
# Substitute @INPUT@, @OUTPUT@, etc here.
_cmd = mesonlib.substitute_values(kwargs['command'], values)
mlog.log('Configuring', mlog.bold(output), 'with command')
cmd, *args = _cmd
res = self.run_command_impl(node, (cmd, args),
{'capture': True, 'check': True, 'env': build.EnvironmentVariables()},
True)
if kwargs['capture']:
dst_tmp = ofile_abs + '~'
file_encoding = kwargs['encoding']
with open(dst_tmp, 'w', encoding=file_encoding) as f:
f.writelines(res.stdout)
if inputs_abs:
shutil.copymode(inputs_abs[0], dst_tmp)
mesonlib.replace_if_different(ofile_abs, dst_tmp)
if depfile:
mlog.log('Reading depfile:', mlog.bold(depfile))
with open(depfile, encoding='utf-8') as f:
df = DepFile(f.readlines())
deps = df.get_all_dependencies(ofile_fname)
for dep in deps:
self.add_build_def_file(dep)
elif kwargs['copy']:
if len(inputs_abs) != 1:
raise InterpreterException('Exactly one input file must be given in copy mode')
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
shutil.copy2(inputs_abs[0], ofile_abs)
# Install file if requested, we check for the empty string
# for backwards compatibility. That was the behaviour before
# 0.45.0 so preserve it.
idir = kwargs['install_dir']
if idir is False:
idir = ''
FeatureDeprecated.single_use('configure_file install_dir: false', '0.50.0',
self.subproject, 'Use the `install:` kwarg instead', location=node)
install = kwargs['install'] if kwargs['install'] is not None else idir != ''
if install:
if not idir:
raise InterpreterException(
'"install_dir" must be specified when "install" in a configure_file is true')
cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname)
install_mode = kwargs['install_mode']
install_tag = kwargs['install_tag']
self.build.data.append(build.Data([cfile], idir, idir, install_mode, self.subproject,
install_tag=install_tag, data_type='configure'))
return mesonlib.File.from_built_file(self.subdir, output)
def extract_incdirs(self, kwargs, key: str = 'include_directories'):
prospectives = extract_as_list(kwargs, key)
result = []
for p in prospectives:
if isinstance(p, build.IncludeDirs):
result.append(p)
elif isinstance(p, str):
result.append(self.build_incdir_object([p]))
else:
raise InterpreterException('Include directory objects can only be created from strings or include directories.')
return result
@typed_pos_args('include_directories', varargs=str)
@typed_kwargs('include_directories', KwargInfo('is_system', bool, default=False))
def func_include_directories(self, node: mparser.BaseNode, args: T.Tuple[T.List[str]],
kwargs: 'kwargs.FuncIncludeDirectories') -> build.IncludeDirs:
return self.build_incdir_object(args[0], kwargs['is_system'])
def build_incdir_object(self, incdir_strings: T.List[str], is_system: bool = False) -> build.IncludeDirs:
if not isinstance(is_system, bool):
raise InvalidArguments('Is_system must be boolean.')
src_root = self.environment.get_source_dir()
build_root = self.environment.get_build_dir()
absbase_src = os.path.join(src_root, self.subdir)
absbase_build = os.path.join(build_root, self.subdir)
for a in incdir_strings:
if a.startswith(src_root):
raise InvalidArguments(textwrap.dedent('''\
Tried to form an absolute path to a source dir.
You should not do that but use relative paths instead.
To get include path to any directory relative to the current dir do
incdir = include_directories(dirname)
After this incdir will contain both the current source dir as well as the
corresponding build dir. It can then be used in any subdirectory and
Meson will take care of all the busywork to make paths work.
Dirname can even be '.' to mark the current directory. Though you should
remember that the current source and build directories are always
put in the include directories by default so you only need to do
include_directories('.') if you intend to use the result in a
different subdirectory.
'''))
else:
try:
self.validate_within_subproject(self.subdir, a)
except InterpreterException:
mlog.warning('include_directories sandbox violation!', location=self.current_node)
print(textwrap.dedent(f'''\
The project is trying to access the directory {a!r} which belongs to a different
subproject. This is a problem as it hardcodes the relative paths of these two projects.
This makes it impossible to compile the project in any other directory layout and also
prevents the subproject from changing its own directory layout.
Instead of poking directly at the internals the subproject should be executed and
it should set a variable that the caller can then use. Something like:
# In subproject
some_dep = declare_dependency(include_directories: include_directories('include'))
# In subproject wrap file
[provide]
some = some_dep
# In parent project
some_dep = dependency('some')
executable(..., dependencies: [some_dep])
This warning will become a hard error in a future Meson release.
'''))
absdir_src = os.path.join(absbase_src, a)
absdir_build = os.path.join(absbase_build, a)
if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build):
raise InvalidArguments(f'Include dir {a} does not exist.')
i = build.IncludeDirs(self.subdir, incdir_strings, is_system)
return i
@typed_pos_args('add_test_setup', str)
@typed_kwargs(
'add_test_setup',
KwargInfo('exe_wrapper', ContainerTypeInfo(list, (str, ExternalProgram)), listify=True, default=[]),
KwargInfo('gdb', bool, default=False),
KwargInfo('timeout_multiplier', int, default=1),
KwargInfo('exclude_suites', ContainerTypeInfo(list, str), listify=True, default=[], since='0.57.0'),
KwargInfo('is_default', bool, default=False, since='0.49.0'),
ENV_KW,
)
def func_add_test_setup(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.AddTestSetup') -> None:
setup_name = args[0]
if re.fullmatch('([_a-zA-Z][_0-9a-zA-Z]*:)?[_a-zA-Z][_0-9a-zA-Z]*', setup_name) is None:
raise InterpreterException('Setup name may only contain alphanumeric characters.')
if ":" not in setup_name:
setup_name = f'{(self.subproject if self.subproject else self.build.project_name)}:{setup_name}'
exe_wrapper: T.List[str] = []
for i in kwargs['exe_wrapper']:
if isinstance(i, str):
exe_wrapper.append(i)
else:
if not i.found():
raise InterpreterException('Tried to use non-found executable.')
exe_wrapper += i.get_command()
timeout_multiplier = kwargs['timeout_multiplier']
if timeout_multiplier <= 0:
FeatureNew('add_test_setup() timeout_multiplier <= 0', '0.57.0').use(self.subproject)
if kwargs['is_default']:
if self.build.test_setup_default_name is not None:
raise InterpreterException(f'{self.build.test_setup_default_name!r} is already set as default. '
'is_default can be set to true only once')
self.build.test_setup_default_name = setup_name
self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, kwargs['gdb'], timeout_multiplier, kwargs['env'],
kwargs['exclude_suites'])
@typed_pos_args('add_global_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_global_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_global_arguments(node, self.build.global_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_global_link_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_global_link_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_global_arguments(node, self.build.global_link_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_project_arguments', varargs=str)
@typed_kwargs('add_project_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_project_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_project_arguments(node, self.build.projects_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_project_link_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_project_link_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_project_arguments(node, self.build.projects_link_args[kwargs['native']], args[0], kwargs)
def _warn_about_builtin_args(self, args: T.List[str]) -> None:
# -Wpedantic is deliberately not included, since some people want to use it but not use -Wextra
# see e.g.
# https://github.com/mesonbuild/meson/issues/3275#issuecomment-641354956
# https://github.com/mesonbuild/meson/issues/3742
warnargs = ('/W1', '/W2', '/W3', '/W4', '/Wall', '-Wall', '-Wextra')
optargs = ('-O0', '-O2', '-O3', '-Os', '-Oz', '/O1', '/O2', '/Os')
for arg in args:
if arg in warnargs:
mlog.warning(f'Consider using the built-in warning_level option instead of using "{arg}".',
location=self.current_node)
elif arg in optargs:
mlog.warning(f'Consider using the built-in optimization level instead of using "{arg}".',
location=self.current_node)
elif arg == '-Werror':
mlog.warning(f'Consider using the built-in werror option instead of using "{arg}".',
location=self.current_node)
elif arg == '-g':
mlog.warning(f'Consider using the built-in debug option instead of using "{arg}".',
location=self.current_node)
elif arg.startswith('-fsanitize'):
mlog.warning(f'Consider using the built-in option for sanitizers instead of using "{arg}".',
location=self.current_node)
elif arg.startswith('-std=') or arg.startswith('/std:'):
mlog.warning(f'Consider using the built-in option for language standard version instead of using "{arg}".',
location=self.current_node)
def _add_global_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.List[str]],
args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if self.is_subproject():
msg = f'Function \'{node.func_name}\' cannot be used in subprojects because ' \
'there is no way to make that reliable.\nPlease only call ' \
'this if is_subproject() returns false. Alternatively, ' \
'define a variable that\ncontains your language-specific ' \
'arguments and add it to the appropriate *_args kwarg ' \
'in each target.'
raise InvalidCode(msg)
frozen = self.project_args_frozen or self.global_args_frozen
self._add_arguments(node, argsdict, frozen, args, kwargs)
def _add_project_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.Dict[str, T.List[str]]],
args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if self.subproject not in argsdict:
argsdict[self.subproject] = {}
self._add_arguments(node, argsdict[self.subproject],
self.project_args_frozen, args, kwargs)
def _add_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.List[str]],
args_frozen: bool, args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if args_frozen:
msg = f'Tried to use \'{node.func_name}\' after a build target has been declared.\n' \
'This is not permitted. Please declare all arguments before your targets.'
raise InvalidCode(msg)
self._warn_about_builtin_args(args)
for lang in kwargs['language']:
argsdict[lang] = argsdict.get(lang, []) + args
@noArgsFlattening
@typed_pos_args('environment', optargs=[(str, list, dict)])
@typed_kwargs('environment', ENV_METHOD_KW, ENV_SEPARATOR_KW.evolve(since='0.62.0'))
def func_environment(self, node: mparser.FunctionNode, args: T.Tuple[T.Union[None, str, T.List['TYPE_var'], T.Dict[str, 'TYPE_var']]],
kwargs: 'TYPE_kwargs') -> build.EnvironmentVariables:
init = args[0]
if init is not None:
FeatureNew.single_use('environment positional arguments', '0.52.0', self.subproject, location=node)
msg = ENV_KW.validator(init)
if msg:
raise InvalidArguments(f'"environment": {msg}')
if isinstance(init, dict) and any(i for i in init.values() if isinstance(i, list)):
FeatureNew.single_use('List of string in dictionary value', '0.62.0', self.subproject, location=node)
return env_convertor_with_method(init, kwargs['method'], kwargs['separator'])
return build.EnvironmentVariables()
@typed_pos_args('join_paths', varargs=str, min_varargs=1)
@noKwargs
def func_join_paths(self, node: mparser.BaseNode, args: T.Tuple[T.List[str]], kwargs: 'TYPE_kwargs') -> str:
return os.path.join(*args[0]).replace('\\', '/')
def run(self) -> None:
super().run()
mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets))))
FeatureNew.report(self.subproject)
FeatureDeprecated.report(self.subproject)
if not self.is_subproject():
self.print_extra_warnings()
if self.subproject == '':
self._print_summary()
def print_extra_warnings(self) -> None:
# TODO cross compilation
for c in self.coredata.compilers.host.values():
if c.get_id() == 'clang':
self.check_clang_asan_lundef()
break
def check_clang_asan_lundef(self) -> None:
if OptionKey('b_lundef') not in self.coredata.options:
return
if OptionKey('b_sanitize') not in self.coredata.options:
return
if (self.coredata.options[OptionKey('b_lundef')].value and
self.coredata.options[OptionKey('b_sanitize')].value != 'none'):
mlog.warning('''Trying to use {} sanitizer on Clang with b_lundef.
This will probably not work.
Try setting b_lundef to false instead.'''.format(self.coredata.options[OptionKey('b_sanitize')].value),
location=self.current_node)
# Check that the indicated file is within the same subproject
# as we currently are. This is to stop people doing
# nasty things like:
#
# f = files('../../master_src/file.c')
#
# Note that this is validated only when the file
# object is generated. The result can be used in a different
# subproject than it is defined in (due to e.g. a
# declare_dependency).
def validate_within_subproject(self, subdir, fname):
srcdir = Path(self.environment.source_dir)
norm = Path(srcdir, subdir, fname).resolve()
if os.path.isdir(norm):
inputtype = 'directory'
else:
inputtype = 'file'
if srcdir not in norm.parents:
# Grabbing files outside the source tree is ok.
# This is for vendor stuff like:
#
# /opt/vendorsdk/src/file_with_license_restrictions.c
return
project_root = Path(srcdir, self.root_subdir)
subproject_dir = project_root / self.subproject_dir
if norm == project_root:
return
if project_root not in norm.parents:
raise InterpreterException(f'Sandbox violation: Tried to grab {inputtype} {norm.name} outside current (sub)project.')
if subproject_dir == norm or subproject_dir in norm.parents:
raise InterpreterException(f'Sandbox violation: Tried to grab {inputtype} {norm.name} from a nested subproject.')
@T.overload
def source_strings_to_files(self, sources: T.List['mesonlib.FileOrString'], strict: bool = True) -> T.List['mesonlib.File']: ...
@T.overload
def source_strings_to_files(self, sources: T.List['mesonlib.FileOrString'], strict: bool = False) -> T.List['mesonlib.FileOrString']: ... # noqa: F811
@T.overload
def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool = True) -> T.List['SourceOutputs']: ... # noqa: F811
def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool = True) -> T.List['SourceOutputs']: # noqa: F811
"""Lower inputs to a list of Targets and Files, replacing any strings.
:param sources: A raw (Meson DSL) list of inputs (targets, files, and
strings)
:raises InterpreterException: if any of the inputs are of an invalid type
:return: A list of Targets and Files
"""
mesonlib.check_direntry_issues(sources)
if not isinstance(sources, list):
sources = [sources]
results: T.List['SourceOutputs'] = []
for s in sources:
if isinstance(s, str):
if not strict and s.startswith(self.environment.get_build_dir()):
results.append(s)
mlog.warning(f'Source item {s!r} cannot be converted to File object, because it is a generated file. '
'This will become a hard error in the future.', location=self.current_node)
else:
self.validate_within_subproject(self.subdir, s)
results.append(mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s))
elif isinstance(s, mesonlib.File):
results.append(s)
elif isinstance(s, (build.GeneratedList, build.BuildTarget,
build.CustomTargetIndex, build.CustomTarget,
build.ExtractedObjects, build.StructuredSources)):
results.append(s)
else:
raise InterpreterException(f'Source item is {s!r} instead of '
'string or File-type object')
return results
def add_target(self, name, tobj):
if name == '':
raise InterpreterException('Target name must not be empty.')
if name.strip() == '':
raise InterpreterException('Target name must not consist only of whitespace.')
if has_path_sep(name):
pathseg = os.path.join(self.subdir, os.path.split(name)[0])
if os.path.exists(os.path.join(self.source_root, pathseg)):
raise InvalidArguments(textwrap.dedent(f'''\
Target "{name}" has a path segment pointing to directory "{pathseg}". This is an error.
To define a target that builds in that directory you must define it
in the meson.build file in that directory.
'''))
if name.startswith('meson-'):
raise InvalidArguments("Target names starting with 'meson-' are reserved "
"for Meson's internal use. Please rename.")
if name in coredata.FORBIDDEN_TARGET_NAMES:
raise InvalidArguments(f"Target name '{name}' is reserved for Meson's "
"internal use. Please rename.")
# To permit an executable and a shared library to have the
# same name, such as "foo.exe" and "libfoo.a".
idname = tobj.get_id()
if idname in self.build.targets:
raise InvalidCode(f'Tried to create target "{name}", but a target of that name already exists.')
self.build.targets[idname] = tobj
if idname not in self.coredata.target_guids:
self.coredata.target_guids[idname] = str(uuid.uuid4()).upper()
@FeatureNew('both_libraries', '0.46.0')
def build_both_libraries(self, node, args, kwargs):
shared_lib = self.build_target(node, args, kwargs, build.SharedLibrary)
# Check if user forces non-PIC static library.
pic = True
key = OptionKey('b_staticpic')
if 'pic' in kwargs:
pic = kwargs['pic']
elif key in self.environment.coredata.options:
pic = self.environment.coredata.options[key].value
if self.backend.name == 'xcode':
# Xcode is a bit special in that you can't (at least for the moment)
# form a library only from object file inputs. The simple but inefficient
# solution is to use the sources directly. This will lead to them being
# built twice. This is unfortunate and slow, but at least it works.
# Feel free to submit patches to get this fixed if it is an
# issue for you.
reuse_object_files = False
else:
reuse_object_files = pic
if reuse_object_files:
# Exclude sources from args and kwargs to avoid building them twice
static_args = [args[0]]
static_kwargs = kwargs.copy()
static_kwargs['sources'] = []
static_kwargs['objects'] = shared_lib.extract_all_objects()
else:
static_args = args
static_kwargs = kwargs
static_lib = self.build_target(node, static_args, static_kwargs, build.StaticLibrary)
return build.BothLibraries(shared_lib, static_lib)
def build_library(self, node, args, kwargs):
default_library = self.coredata.get_option(OptionKey('default_library', subproject=self.subproject))
if default_library == 'shared':
return self.build_target(node, args, kwargs, build.SharedLibrary)
elif default_library == 'static':
return self.build_target(node, args, kwargs, build.StaticLibrary)
elif default_library == 'both':
return self.build_both_libraries(node, args, kwargs)
else:
raise InterpreterException(f'Unknown default_library value: {default_library}.')
def build_target(self, node: mparser.BaseNode, args, kwargs, targetclass):
@FeatureNewKwargs('build target', '0.42.0', ['rust_crate_type', 'build_rpath', 'implicit_include_directories'])
@FeatureNewKwargs('build target', '0.41.0', ['rust_args'])
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@FeatureNewKwargs('build target', '0.48.0', ['gnu_symbol_visibility'])
def build_target_decorator_caller(self, node, args, kwargs):
return True
build_target_decorator_caller(self, node, args, kwargs)
if not args:
raise InterpreterException('Target does not have a name.')
name, *sources = args
for_machine = self.machine_from_native_kwarg(kwargs)
if 'sources' in kwargs:
sources += listify(kwargs['sources'])
sources = self.source_strings_to_files(sources)
objs = extract_as_list(kwargs, 'objects')
kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies')
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
if 'extra_files' in kwargs:
ef = extract_as_list(kwargs, 'extra_files')
kwargs['extra_files'] = self.source_strings_to_files(ef)
self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources)
if targetclass not in {build.Executable, build.SharedLibrary, build.SharedModule, build.StaticLibrary, build.Jar}:
mlog.debug('Unknown target type:', str(targetclass))
raise RuntimeError('Unreachable code')
self.kwarg_strings_to_includedirs(kwargs)
# Filter out kwargs from other target types. For example 'soversion'
# passed to library() when default_library == 'static'.
kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs}
srcs: T.List['SourceInputs'] = []
struct: T.Optional[build.StructuredSources] = build.StructuredSources()
for s in sources:
if isinstance(s, build.StructuredSources):
struct = struct + s
else:
srcs.append(s)
if not struct:
struct = None
else:
# Validate that we won't end up with two outputs with the same name.
# i.e, don't allow:
# [structured_sources('foo/bar.rs'), structured_sources('bar/bar.rs')]
for v in struct.sources.values():
outputs: T.Set[str] = set()
for f in v:
o: T.List[str]
if isinstance(f, str):
o = [os.path.basename(f)]
elif isinstance(f, mesonlib.File):
o = [f.fname]
else:
o = f.get_outputs()
conflicts = outputs.intersection(o)
if conflicts:
raise InvalidArguments.from_node(
f"Conflicting sources in structured sources: {", ".join(sorted(conflicts))}",
node=node)
outputs.update(o)
kwargs['include_directories'] = self.extract_incdirs(kwargs)
target = targetclass(name, self.subdir, self.subproject, for_machine, srcs, struct, objs, self.environment, kwargs)
target.project_version = self.project_version
self.add_stdlib_info(target)
self.add_target(name, target)
self.project_args_frozen = True
return target
def kwarg_strings_to_includedirs(self, kwargs):
if 'd_import_dirs' in kwargs:
items = mesonlib.extract_as_list(kwargs, 'd_import_dirs')
cleaned_items = []
for i in items:
if isinstance(i, str):
# BW compatibility. This was permitted so we must support it
# for a few releases so people can transition to "correct"
# path declarations.
if os.path.normpath(i).startswith(self.environment.get_source_dir()):
mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead.
This will become a hard error in the future.''', location=self.current_node)
i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir))
i = self.build_incdir_object([i])
cleaned_items.append(i)
kwargs['d_import_dirs'] = cleaned_items
def get_used_languages(self, target):
result = set()
for i in target.sources:
for lang, c in self.coredata.compilers[target.for_machine].items():
if c.can_compile(i):
result.add(lang)
break
return result
def add_stdlib_info(self, target):
for l in self.get_used_languages(target):
dep = self.build.stdlibs[target.for_machine].get(l, None)
if dep:
target.add_deps(dep)
def check_sources_exist(self, subdir, sources):
for s in sources:
if not isinstance(s, str):
continue # This means a generated source and they always exist.
fname = os.path.join(subdir, s)
if not os.path.isfile(fname):
raise InterpreterException(f'Tried to add non-existing source file {s}.')
# Only permit object extraction from the same subproject
def validate_extraction(self, buildtarget: mesonlib.HoldableObject) -> None:
if self.subproject != buildtarget.subproject:
raise InterpreterException('Tried to extract objects from a different subproject.')
def is_subproject(self) -> bool:
return self.subproject != ''
@typed_pos_args('set_variable', str, object)
@noKwargs
@noArgsFlattening
@noSecondLevelHolderResolving
def func_set_variable(self, node: mparser.BaseNode, args: T.Tuple[str, object], kwargs: 'TYPE_kwargs') -> None:
varname, value = args
self.set_variable(varname, value, holderify=True)
@typed_pos_args('get_variable', (str, Disabler), optargs=[object])
@noKwargs
@noArgsFlattening
@unholder_return
def func_get_variable(self, node: mparser.BaseNode, args: T.Tuple[T.Union[str, Disabler], T.Optional[object]],
kwargs: 'TYPE_kwargs') -> 'TYPE_var':
varname, fallback = args
if isinstance(varname, Disabler):
return varname
try:
return self.variables[varname]
except KeyError:
if fallback is not None:
return self._holderify(fallback)
raise InterpreterException(f'Tried to get unknown variable "{varname}".')
@typed_pos_args('is_variable', str)
@noKwargs
def func_is_variable(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> bool:
return args[0] in self.variables
@FeatureNew('unset_variable', '0.60.0')
@typed_pos_args('unset_variable', str)
@noKwargs
def func_unset_variable(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> None:
varname = args[0]
try:
del self.variables[varname]
except KeyError:
raise InterpreterException(f'Tried to unset unknown variable "{varname}".')
@staticmethod
def machine_from_native_kwarg(kwargs: T.Dict[str, T.Any]) -> MachineChoice:
native = kwargs.get('native', False)
if not isinstance(native, bool):
raise InvalidArguments('Argument to "native" must be a boolean.')
return MachineChoice.BUILD if native else MachineChoice.HOST
@FeatureNew('is_disabler', '0.52.0')
@typed_pos_args('is_disabler', object)
@noKwargs
def func_is_disabler(self, node: mparser.BaseNode, args: T.Tuple[object], kwargs: 'TYPE_kwargs') -> bool:
return isinstance(args[0], Disabler)
@noKwargs
@FeatureNew('range', '0.58.0')
@typed_pos_args('range', int, optargs=[int, int])
def func_range(self, node, args: T.Tuple[int, T.Optional[int], T.Optional[int]], kwargs: T.Dict[str, T.Any]) -> P_OBJ.RangeHolder:
start, stop, step = args
# Just like Python's range, we allow range(stop), range(start, stop), or
# range(start, stop, step)
if stop is None:
stop = start
start = 0
if step is None:
step = 1
# This is more strict than Python's range()
if start < 0:
raise InterpreterException('start cannot be negative')
if stop < start:
raise InterpreterException('stop cannot be less than start')
if step < 1:
raise InterpreterException('step must be >=1')
return P_OBJ.RangeHolder(start, stop, step, subproject=self.subproject)
| # Copyright 2012-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from .. import mparser
from .. import environment
from .. import coredata
from .. import dependencies
from .. import mlog
from .. import build
from .. import optinterpreter
from .. import compilers
from .. import envconfig
from ..wrap import wrap, WrapMode
from .. import mesonlib
from ..mesonlib import MesonBugException, HoldableObject, FileMode, MachineChoice, OptionKey, listify, extract_as_list, has_path_sep
from ..programs import ExternalProgram, NonExistingExternalProgram
from ..dependencies import Dependency
from ..depfile import DepFile
from ..interpreterbase import ContainerTypeInfo, InterpreterBase, KwargInfo, typed_kwargs, typed_pos_args
from ..interpreterbase import noPosargs, noKwargs, permittedKwargs, noArgsFlattening, noSecondLevelHolderResolving, unholder_return
from ..interpreterbase import InterpreterException, InvalidArguments, InvalidCode, SubdirDoneRequest
from ..interpreterbase import Disabler, disablerIfNotFound
from ..interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs, FeatureDeprecatedKwargs
from ..interpreterbase import ObjectHolder
from ..modules import ExtensionModule, ModuleObject, MutableModuleObject, NewExtensionModule, NotFoundExtensionModule
from ..cmake import CMakeInterpreter
from ..backend.backends import ExecutableSerialisation
from . import interpreterobjects as OBJ
from . import compiler as compilerOBJ
from .mesonmain import MesonMain
from .dependencyfallbacks import DependencyFallbacksHolder
from .interpreterobjects import (
SubprojectHolder,
Test,
RunProcess,
extract_required_kwarg,
extract_search_dirs,
NullSubprojectInterpreter,
)
from .type_checking import (
COMMAND_KW,
CT_BUILD_ALWAYS,
CT_BUILD_ALWAYS_STALE,
CT_BUILD_BY_DEFAULT,
CT_INPUT_KW,
CT_INSTALL_DIR_KW,
CT_OUTPUT_KW,
DEFAULT_OPTIONS,
DEPENDS_KW,
DEPEND_FILES_KW,
DEPFILE_KW,
DISABLER_KW,
ENV_KW,
ENV_METHOD_KW,
ENV_SEPARATOR_KW,
INSTALL_KW,
INSTALL_MODE_KW,
CT_INSTALL_TAG_KW,
INSTALL_TAG_KW,
LANGUAGE_KW,
NATIVE_KW, OVERRIDE_OPTIONS_KW,
REQUIRED_KW,
NoneType,
in_set_validator,
env_convertor_with_method
)
from . import primitives as P_OBJ
from pathlib import Path
import os
import shutil
import uuid
import re
import stat
import collections
import typing as T
import textwrap
import importlib
if T.TYPE_CHECKING:
import argparse
from typing_extensions import Literal
from . import kwargs
from ..backend.backends import Backend
from ..interpreterbase.baseobjects import InterpreterObject, TYPE_var, TYPE_kwargs
from ..programs import OverrideProgram
# Input source types passed to Targets
SourceInputs = T.Union[mesonlib.File, build.GeneratedList, build.BuildTarget, build.BothLibraries,
build.CustomTargetIndex, build.CustomTarget, build.GeneratedList,
build.ExtractedObjects, str]
# Input source types passed to the build.Target classes
SourceOutputs = T.Union[mesonlib.File, build.GeneratedList,
build.BuildTarget, build.CustomTargetIndex, build.CustomTarget,
build.ExtractedObjects, build.GeneratedList, build.StructuredSources]
def _project_version_validator(value: T.Union[T.List, str, mesonlib.File, None]) -> T.Optional[str]:
if isinstance(value, list):
if len(value) != 1:
return 'when passed as array must have a length of 1'
elif not isinstance(value[0], mesonlib.File):
return 'when passed as array must contain a File'
return None
def stringifyUserArguments(args: T.List[T.Any], quote: bool = False) -> str:
if isinstance(args, list):
return '[%s]' % ', '.join([stringifyUserArguments(x, True) for x in args])
elif isinstance(args, dict):
return '{%s}' % ', '.join(['{} : {}'.format(stringifyUserArguments(k, True), stringifyUserArguments(v, True)) for k, v in args.items()])
elif isinstance(args, bool):
return 'true' if args else 'false'
elif isinstance(args, int):
return str(args)
elif isinstance(args, str):
return f"'{args}'" if quote else args
raise InvalidArguments('Function accepts only strings, integers, bools, lists, dictionaries and lists thereof.')
class Summary:
def __init__(self, project_name: str, project_version: str):
self.project_name = project_name
self.project_version = project_version
self.sections = collections.defaultdict(dict)
self.max_key_len = 0
def add_section(self, section: str, values: T.Dict[str, T.Any], bool_yn: bool,
list_sep: T.Optional[str], subproject: str) -> None:
for k, v in values.items():
if k in self.sections[section]:
raise InterpreterException(f'Summary section {section!r} already have key {k!r}')
formatted_values = []
for i in listify(v):
if isinstance(i, bool) and bool_yn:
formatted_values.append(mlog.green('YES') if i else mlog.red('NO'))
elif isinstance(i, (str, int, bool)):
formatted_values.append(str(i))
elif isinstance(i, (ExternalProgram, Dependency)):
FeatureNew.single_use('dependency or external program in summary', '0.57.0', subproject)
formatted_values.append(i.summary_value())
elif isinstance(i, coredata.UserOption):
FeatureNew.single_use('feature option in summary', '0.58.0', subproject)
formatted_values.append(i.printable_value())
else:
m = 'Summary value in section {!r}, key {!r}, must be string, integer, boolean, dependency or external program'
raise InterpreterException(m.format(section, k))
self.sections[section][k] = (formatted_values, list_sep)
self.max_key_len = max(self.max_key_len, len(k))
def dump(self):
mlog.log(self.project_name, mlog.normal_cyan(self.project_version))
for section, values in self.sections.items():
mlog.log('') # newline
if section:
mlog.log(' ', mlog.bold(section))
for k, v in values.items():
v, list_sep = v
padding = self.max_key_len - len(k)
end = ' ' if v else ''
mlog.log(' ' * 3, k + ' ' * padding + ':', end=end)
indent = self.max_key_len + 6
self.dump_value(v, list_sep, indent)
mlog.log('') # newline
def dump_value(self, arr, list_sep, indent):
lines_sep = '\n' + ' ' * indent
if list_sep is None:
mlog.log(*arr, sep=lines_sep)
return
max_len = shutil.get_terminal_size().columns
line = []
line_len = indent
lines_sep = list_sep.rstrip() + lines_sep
for v in arr:
v_len = len(v) + len(list_sep)
if line and line_len + v_len > max_len:
mlog.log(*line, sep=list_sep, end=lines_sep)
line_len = indent
line = []
line.append(v)
line_len += v_len
mlog.log(*line, sep=list_sep)
known_library_kwargs = (
build.known_shlib_kwargs |
build.known_stlib_kwargs
)
known_build_target_kwargs = (
known_library_kwargs |
build.known_exe_kwargs |
build.known_jar_kwargs |
{'target_type'}
)
TEST_KWARGS: T.List[KwargInfo] = [
KwargInfo('args', ContainerTypeInfo(list, (str, mesonlib.File, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex)),
listify=True, default=[]),
KwargInfo('should_fail', bool, default=False),
KwargInfo('timeout', int, default=30),
KwargInfo('workdir', (str, NoneType), default=None,
validator=lambda x: 'must be an absolute path' if not os.path.isabs(x) else None),
KwargInfo('protocol', str,
default='exitcode',
validator=in_set_validator({'exitcode', 'tap', 'gtest', 'rust'}),
since_values={'gtest': '0.55.0', 'rust': '0.57.0'}),
KwargInfo('priority', int, default=0, since='0.52.0'),
# TODO: env needs reworks of the way the environment variable holder itself works probably
ENV_KW,
DEPENDS_KW.evolve(since='0.46.0'),
KwargInfo('suite', ContainerTypeInfo(list, str), listify=True, default=['']), # yes, a list of empty string
KwargInfo('verbose', bool, default=False, since='0.62.0'),
]
permitted_dependency_kwargs = {
'allow_fallback',
'cmake_args',
'cmake_module_path',
'cmake_package_version',
'components',
'default_options',
'fallback',
'include_type',
'language',
'main',
'method',
'modules',
'native',
'not_found_message',
'optional_modules',
'private_headers',
'required',
'static',
'version',
}
implicit_check_false_warning = """You should add the boolean check kwarg to the run_command call.
It currently defaults to false,
but it will default to true in future releases of meson.
See also: https://github.com/mesonbuild/meson/issues/9300"""
class Interpreter(InterpreterBase, HoldableObject):
def __init__(
self,
_build: build.Build,
backend: T.Optional[Backend] = None,
subproject: str = '',
subdir: str = '',
subproject_dir: str = 'subprojects',
default_project_options: T.Optional[T.Dict[OptionKey, str]] = None,
mock: bool = False,
ast: T.Optional[mparser.CodeBlockNode] = None,
is_translated: bool = False,
user_defined_options: T.Optional['argparse.Namespace'] = None,
) -> None:
super().__init__(_build.environment.get_source_dir(), subdir, subproject)
self.active_projectname = ''
self.build = _build
self.environment = self.build.environment
self.coredata = self.environment.get_coredata()
self.backend = backend
self.summary: T.Dict[str, 'Summary'] = {}
self.modules: T.Dict[str, NewExtensionModule] = {}
# Subproject directory is usually the name of the subproject, but can
# be different for dependencies provided by wrap files.
self.subproject_directory_name = subdir.split(os.path.sep)[-1]
self.subproject_dir = subproject_dir
self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt')
if not mock and ast is None:
self.load_root_meson_file()
self.sanity_check_ast()
elif ast is not None:
self.ast = ast
self.sanity_check_ast()
self.builtin.update({'meson': MesonMain(self.build, self)})
self.generators: T.List[build.Generator] = []
self.processed_buildfiles = set() # type: T.Set[str]
self.project_args_frozen = False
self.global_args_frozen = False # implies self.project_args_frozen
self.subprojects: T.Dict[str, SubprojectHolder] = {}
self.subproject_stack: T.List[str] = []
self.configure_file_outputs: T.Dict[str, int] = {}
# Passed from the outside, only used in subprojects.
if default_project_options:
self.default_project_options = default_project_options.copy()
else:
self.default_project_options = {}
self.project_default_options: T.Dict[OptionKey, str] = {}
self.build_func_dict()
self.build_holder_map()
self.user_defined_options = user_defined_options
# build_def_files needs to be defined before parse_project is called
#
# For non-meson subprojects, we'll be using the ast. Even if it does
# exist we don't want to add a dependency on it, it's autogenerated
# from the actual build files, and is just for reference.
self.build_def_files: mesonlib.OrderedSet[str] = mesonlib.OrderedSet()
build_filename = os.path.join(self.subdir, environment.build_filename)
if not is_translated:
self.build_def_files.add(build_filename)
if not mock:
self.parse_project()
self._redetect_machines()
def __getnewargs_ex__(self) -> T.Tuple[T.Tuple[object], T.Dict[str, object]]:
raise MesonBugException('This class is unpicklable')
def _redetect_machines(self) -> None:
# Re-initialize machine descriptions. We can do a better job now because we
# have the compilers needed to gain more knowledge, so wipe out old
# inference and start over.
machines = self.build.environment.machines.miss_defaulting()
machines.build = environment.detect_machine_info(self.coredata.compilers.build)
self.build.environment.machines = machines.default_missing()
assert self.build.environment.machines.build.cpu is not None
assert self.build.environment.machines.host.cpu is not None
assert self.build.environment.machines.target.cpu is not None
self.builtin['build_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.build, self)
self.builtin['host_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.host, self)
self.builtin['target_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.target, self)
def build_func_dict(self) -> None:
self.funcs.update({'add_global_arguments': self.func_add_global_arguments,
'add_global_link_arguments': self.func_add_global_link_arguments,
'add_languages': self.func_add_languages,
'add_project_arguments': self.func_add_project_arguments,
'add_project_link_arguments': self.func_add_project_link_arguments,
'add_test_setup': self.func_add_test_setup,
'alias_target': self.func_alias_target,
'assert': self.func_assert,
'benchmark': self.func_benchmark,
'both_libraries': self.func_both_lib,
'build_target': self.func_build_target,
'configuration_data': self.func_configuration_data,
'configure_file': self.func_configure_file,
'custom_target': self.func_custom_target,
'declare_dependency': self.func_declare_dependency,
'dependency': self.func_dependency,
'disabler': self.func_disabler,
'environment': self.func_environment,
'error': self.func_error,
'executable': self.func_executable,
'files': self.func_files,
'find_library': self.func_find_library,
'find_program': self.func_find_program,
'generator': self.func_generator,
'get_option': self.func_get_option,
'get_variable': self.func_get_variable,
'gettext': self.func_gettext,
'import': self.func_import,
'include_directories': self.func_include_directories,
'install_data': self.func_install_data,
'install_emptydir': self.func_install_emptydir,
'install_headers': self.func_install_headers,
'install_man': self.func_install_man,
'install_subdir': self.func_install_subdir,
'install_symlink': self.func_install_symlink,
'is_disabler': self.func_is_disabler,
'is_variable': self.func_is_variable,
'jar': self.func_jar,
'join_paths': self.func_join_paths,
'library': self.func_library,
'message': self.func_message,
'option': self.func_option,
'project': self.func_project,
'range': self.func_range,
'run_command': self.func_run_command,
'run_target': self.func_run_target,
'set_variable': self.func_set_variable,
'structured_sources': self.func_structured_sources,
'subdir': self.func_subdir,
'shared_library': self.func_shared_lib,
'shared_module': self.func_shared_module,
'static_library': self.func_static_lib,
'subdir_done': self.func_subdir_done,
'subproject': self.func_subproject,
'summary': self.func_summary,
'test': self.func_test,
'unset_variable': self.func_unset_variable,
'vcs_tag': self.func_vcs_tag,
'warning': self.func_warning,
})
if 'MESON_UNIT_TEST' in os.environ:
self.funcs.update({'exception': self.func_exception})
def build_holder_map(self) -> None:
'''
Build a mapping of `HoldableObject` types to their corresponding
`ObjectHolder`s. This mapping is used in `InterpreterBase` to automatically
holderify all returned values from methods and functions.
'''
self.holder_map.update({
# Primitives
list: P_OBJ.ArrayHolder,
dict: P_OBJ.DictHolder,
int: P_OBJ.IntegerHolder,
bool: P_OBJ.BooleanHolder,
str: P_OBJ.StringHolder,
P_OBJ.MesonVersionString: P_OBJ.MesonVersionStringHolder,
# Meson types
mesonlib.File: OBJ.FileHolder,
build.SharedLibrary: OBJ.SharedLibraryHolder,
build.StaticLibrary: OBJ.StaticLibraryHolder,
build.BothLibraries: OBJ.BothLibrariesHolder,
build.SharedModule: OBJ.SharedModuleHolder,
build.Executable: OBJ.ExecutableHolder,
build.Jar: OBJ.JarHolder,
build.CustomTarget: OBJ.CustomTargetHolder,
build.CustomTargetIndex: OBJ.CustomTargetIndexHolder,
build.Generator: OBJ.GeneratorHolder,
build.GeneratedList: OBJ.GeneratedListHolder,
build.ExtractedObjects: OBJ.GeneratedObjectsHolder,
build.RunTarget: OBJ.RunTargetHolder,
build.AliasTarget: OBJ.AliasTargetHolder,
build.Headers: OBJ.HeadersHolder,
build.Man: OBJ.ManHolder,
build.EmptyDir: OBJ.EmptyDirHolder,
build.Data: OBJ.DataHolder,
build.SymlinkData: OBJ.SymlinkDataHolder,
build.InstallDir: OBJ.InstallDirHolder,
build.IncludeDirs: OBJ.IncludeDirsHolder,
build.EnvironmentVariables: OBJ.EnvironmentVariablesHolder,
build.StructuredSources: OBJ.StructuredSourcesHolder,
compilers.RunResult: compilerOBJ.TryRunResultHolder,
dependencies.ExternalLibrary: OBJ.ExternalLibraryHolder,
coredata.UserFeatureOption: OBJ.FeatureOptionHolder,
envconfig.MachineInfo: OBJ.MachineHolder,
build.ConfigurationData: OBJ.ConfigurationDataHolder,
})
'''
Build a mapping of `HoldableObject` base classes to their
corresponding `ObjectHolder`s. The difference to `self.holder_map`
is that the keys here define an upper bound instead of requiring an
exact match.
The mappings defined here are only used when there was no direct hit
found in `self.holder_map`.
'''
self.bound_holder_map.update({
dependencies.Dependency: OBJ.DependencyHolder,
ExternalProgram: OBJ.ExternalProgramHolder,
compilers.Compiler: compilerOBJ.CompilerHolder,
ModuleObject: OBJ.ModuleObjectHolder,
MutableModuleObject: OBJ.MutableModuleObjectHolder,
})
def append_holder_map(self, held_type: T.Type[mesonlib.HoldableObject], holder_type: T.Type[ObjectHolder]) -> None:
'''
Adds one additional mapping to the `holder_map`.
The intended use for this function is in the `initialize` method of
modules to register custom object holders.
'''
self.holder_map.update({
held_type: holder_type
})
def process_new_values(self, invalues: T.List[T.Union[TYPE_var, ExecutableSerialisation]]) -> None:
invalues = listify(invalues)
for v in invalues:
if isinstance(v, ObjectHolder):
raise InterpreterException('Modules must not return ObjectHolders')
if isinstance(v, (build.BuildTarget, build.CustomTarget, build.RunTarget)):
self.add_target(v.name, v)
elif isinstance(v, list):
self.process_new_values(v)
elif isinstance(v, ExecutableSerialisation):
v.subproject = self.subproject
self.build.install_scripts.append(v)
elif isinstance(v, build.Data):
self.build.data.append(v)
elif isinstance(v, build.SymlinkData):
self.build.symlinks.append(v)
elif isinstance(v, dependencies.InternalDependency):
# FIXME: This is special cased and not ideal:
# The first source is our new VapiTarget, the rest are deps
self.process_new_values(v.sources[0])
elif isinstance(v, build.InstallDir):
self.build.install_dirs.append(v)
elif isinstance(v, Test):
self.build.tests.append(v)
elif isinstance(v, (int, str, bool, Disabler, ObjectHolder, build.GeneratedList,
ExternalProgram, build.ConfigurationData)):
pass
else:
raise InterpreterException(f'Module returned a value of unknown type {v!r}.')
def get_build_def_files(self) -> mesonlib.OrderedSet[str]:
return self.build_def_files
def add_build_def_file(self, f: mesonlib.FileOrString) -> None:
# Use relative path for files within source directory, and absolute path
# for system files. Skip files within build directory. Also skip not regular
# files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this
# is especially important to convert '/' to '\' on Windows.
if isinstance(f, mesonlib.File):
if f.is_built:
return
f = os.path.normpath(f.relative_name())
elif os.path.isfile(f) and not f.startswith('/dev'):
srcdir = Path(self.environment.get_source_dir())
builddir = Path(self.environment.get_build_dir())
try:
f_ = Path(f).resolve()
except OSError:
f_ = Path(f)
s = f_.stat()
if (hasattr(s, 'st_file_attributes') and
s.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT != 0 and
s.st_reparse_tag == stat.IO_REPARSE_TAG_APPEXECLINK):
# This is a Windows Store link which we can't
# resolve, so just do our best otherwise.
f_ = f_.parent.resolve() / f_.name
else:
raise
if builddir in f_.parents:
return
if srcdir in f_.parents:
f_ = f_.relative_to(srcdir)
f = str(f_)
else:
return
if f not in self.build_def_files:
self.build_def_files.add(f)
def get_variables(self) -> T.Dict[str, InterpreterObject]:
return self.variables
def check_stdlibs(self) -> None:
machine_choices = [MachineChoice.HOST]
if self.coredata.is_cross_build():
machine_choices.append(MachineChoice.BUILD)
for for_machine in machine_choices:
props = self.build.environment.properties[for_machine]
for l in self.coredata.compilers[for_machine].keys():
try:
di = mesonlib.stringlistify(props.get_stdlib(l))
except KeyError:
continue
if len(di) == 1:
FeatureNew.single_use('stdlib without variable name', '0.56.0', self.subproject, location=self.current_node)
kwargs = {'native': for_machine is MachineChoice.BUILD,
}
name = l + '_stdlib'
df = DependencyFallbacksHolder(self, [name])
df.set_fallback(di)
dep = df.lookup(kwargs, force_fallback=True)
self.build.stdlibs[for_machine][l] = dep
def _import_module(self, modname: str, required: bool) -> NewExtensionModule:
if modname in self.modules:
return self.modules[modname]
try:
module = importlib.import_module('mesonbuild.modules.' + modname)
except ImportError:
if required:
raise InvalidArguments(f'Module "{modname}" does not exist')
ext_module = NotFoundExtensionModule()
else:
ext_module = module.initialize(self)
assert isinstance(ext_module, (ExtensionModule, NewExtensionModule))
self.build.modules.append(modname)
self.modules[modname] = ext_module
return ext_module
@typed_pos_args('import', str)
@typed_kwargs(
'import',
REQUIRED_KW.evolve(since='0.59.0'),
DISABLER_KW.evolve(since='0.59.0'),
)
@disablerIfNotFound
def func_import(self, node: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'kwargs.FuncImportModule') -> T.Union[ExtensionModule, NewExtensionModule, NotFoundExtensionModule]:
modname = args[0]
disabled, required, _ = extract_required_kwarg(kwargs, self.subproject)
if disabled:
return NotFoundExtensionModule()
if modname.startswith('unstable-'):
plainname = modname.split('-', 1)[1]
try:
# check if stable module exists
mod = self._import_module(plainname, required)
# XXX: this is actually not helpful, since it doesn't do a version check
mlog.warning(f'Module {modname} is now stable, please use the {plainname} module instead.')
return mod
except InvalidArguments:
mlog.warning(f'Module {modname} has no backwards or forwards compatibility and might not exist in future releases.', location=node)
modname = 'unstable_' + plainname
return self._import_module(modname, required)
@typed_pos_args('files', varargs=str)
@noKwargs
def func_files(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'TYPE_kwargs') -> T.List[mesonlib.File]:
return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args[0]]
# Used by declare_dependency() and pkgconfig.generate()
def extract_variables(self, kwargs, argname='variables', list_new=False, dict_new=False):
variables = kwargs.get(argname, {})
if isinstance(variables, dict):
if dict_new and variables:
FeatureNew.single_use(f'{argname} as dictionary', '0.56.0', self.subproject, location=self.current_node)
else:
varlist = mesonlib.stringlistify(variables)
if list_new:
FeatureNew.single_use(f'{argname} as list of strings', '0.56.0', self.subproject, location=self.current_node)
variables = collections.OrderedDict()
for v in varlist:
try:
(key, value) = v.split('=', 1)
except ValueError:
raise InterpreterException(f'Variable {v!r} must have a value separated by equals sign.')
variables[key.strip()] = value.strip()
for k, v in variables.items():
if not k or not v:
raise InterpreterException('Empty variable name or value')
if any(c.isspace() for c in k):
raise InterpreterException(f'Invalid whitespace in variable name "{k}"')
if not isinstance(v, str):
raise InterpreterException('variables values must be strings.')
return variables
@FeatureNewKwargs('declare_dependency', '0.46.0', ['link_whole'])
@FeatureNewKwargs('declare_dependency', '0.54.0', ['variables'])
@FeatureNewKwargs('declare_dependency', '0.62.0', ['d_module_versions', 'd_import_dirs'])
@permittedKwargs({'include_directories', 'link_with', 'sources', 'dependencies',
'compile_args', 'link_args', 'link_whole', 'version',
'variables', 'd_module_versions', 'd_import_dirs'})
@noPosargs
def func_declare_dependency(self, node, args, kwargs):
version = kwargs.get('version', self.project_version)
if not isinstance(version, str):
raise InterpreterException('Version must be a string.')
incs = self.extract_incdirs(kwargs)
libs = extract_as_list(kwargs, 'link_with')
libs_whole = extract_as_list(kwargs, 'link_whole')
sources = extract_as_list(kwargs, 'sources')
sources = listify(self.source_strings_to_files(sources))
deps = extract_as_list(kwargs, 'dependencies')
compile_args = mesonlib.stringlistify(kwargs.get('compile_args', []))
link_args = mesonlib.stringlistify(kwargs.get('link_args', []))
variables = self.extract_variables(kwargs, list_new=True)
d_module_versions = extract_as_list(kwargs, 'd_module_versions')
d_import_dirs = self.extract_incdirs(kwargs, 'd_import_dirs')
final_deps = []
for d in deps:
if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)):
raise InterpreterException('Dependencies must be external deps')
final_deps.append(d)
for l in libs:
if isinstance(l, dependencies.Dependency):
raise InterpreterException('''Entries in "link_with" may only be self-built targets,
external dependencies (including libraries) must go to "dependencies".''')
dep = dependencies.InternalDependency(version, incs, compile_args,
link_args, libs, libs_whole, sources, final_deps,
variables, d_module_versions, d_import_dirs)
return dep
@typed_pos_args('assert', bool, optargs=[str])
@noKwargs
def func_assert(self, node: mparser.FunctionNode, args: T.Tuple[bool, T.Optional[str]],
kwargs: 'TYPE_kwargs') -> None:
value, message = args
if message is None:
FeatureNew.single_use('assert function without message argument', '0.53.0', self.subproject, location=node)
if not value:
if message is None:
from ..ast import AstPrinter
printer = AstPrinter()
node.args.arguments[0].accept(printer)
message = printer.result
raise InterpreterException('Assert failed: ' + message)
def validate_arguments(self, args, argcount, arg_types):
if argcount is not None:
if argcount != len(args):
raise InvalidArguments(f'Expected {argcount} arguments, got {len(args)}.')
for actual, wanted in zip(args, arg_types):
if wanted is not None:
if not isinstance(actual, wanted):
raise InvalidArguments('Incorrect argument type.')
# Executables aren't actually accepted, but we allow them here to allow for
# better error messages when overridden
@typed_pos_args(
'run_command',
(build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str),
varargs=(build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str))
@typed_kwargs(
'run_command',
KwargInfo('check', (bool, NoneType), since='0.47.0'),
KwargInfo('capture', bool, default=True, since='0.47.0'),
ENV_KW.evolve(since='0.50.0'),
)
def func_run_command(self, node: mparser.BaseNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str],
T.List[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str]]],
kwargs: 'kwargs.RunCommand') -> RunProcess:
return self.run_command_impl(node, args, kwargs)
def run_command_impl(self,
node: mparser.BaseNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str],
T.List[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str]]],
kwargs: 'kwargs.RunCommand',
in_builddir: bool = False) -> RunProcess:
cmd, cargs = args
capture = kwargs['capture']
env = kwargs['env']
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
check = kwargs['check']
if check is None:
mlog.warning(implicit_check_false_warning, once=True)
check = False
overridden_msg = ('Program {!r} was overridden with the compiled '
'executable {!r} and therefore cannot be used during '
'configuration')
expanded_args: T.List[str] = []
if isinstance(cmd, build.Executable):
progname = node.args.arguments[0].value
raise InterpreterException(overridden_msg.format(progname, cmd.description()))
if isinstance(cmd, ExternalProgram):
if not cmd.found():
raise InterpreterException(f'command {cmd.get_name()!r} not found or not executable')
elif isinstance(cmd, compilers.Compiler):
exelist = cmd.get_exelist()
cmd = exelist[0]
prog = ExternalProgram(cmd, silent=True)
if not prog.found():
raise InterpreterException(f'Program {cmd!r} not found or not executable')
cmd = prog
expanded_args = exelist[1:]
else:
if isinstance(cmd, mesonlib.File):
cmd = cmd.absolute_path(srcdir, builddir)
# Prefer scripts in the current source directory
search_dir = os.path.join(srcdir, self.subdir)
prog = ExternalProgram(cmd, silent=True, search_dir=search_dir)
if not prog.found():
raise InterpreterException(f'Program or command {cmd!r} not found or not executable')
cmd = prog
for a in cargs:
if isinstance(a, str):
expanded_args.append(a)
elif isinstance(a, mesonlib.File):
expanded_args.append(a.absolute_path(srcdir, builddir))
elif isinstance(a, ExternalProgram):
expanded_args.append(a.get_path())
elif isinstance(a, compilers.Compiler):
FeatureNew.single_use('Compiler object as a variadic argument to `run_command`', '0.61.0', self.subproject, location=node)
prog = ExternalProgram(a.exelist[0], silent=True)
if not prog.found():
raise InterpreterException(f'Program {cmd!r} not found or not executable')
expanded_args.append(prog.get_path())
else:
raise InterpreterException(overridden_msg.format(a.name, cmd.description()))
# If any file that was used as an argument to the command
# changes, we must re-run the configuration step.
self.add_build_def_file(cmd.get_path())
for a in expanded_args:
if not os.path.isabs(a):
a = os.path.join(builddir if in_builddir else srcdir, self.subdir, a)
self.add_build_def_file(a)
return RunProcess(cmd, expanded_args, env, srcdir, builddir, self.subdir,
self.environment.get_build_command() + ['introspect'],
in_builddir=in_builddir, check=check, capture=capture)
def func_gettext(self, nodes, args, kwargs):
raise InterpreterException('Gettext() function has been moved to module i18n. Import it and use i18n.gettext() instead')
def func_option(self, nodes, args, kwargs):
raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.')
@typed_pos_args('subproject', str)
@typed_kwargs(
'subproject',
REQUIRED_KW,
DEFAULT_OPTIONS.evolve(since='0.38.0'),
KwargInfo('version', ContainerTypeInfo(list, str), default=[], listify=True),
)
def func_subproject(self, nodes: mparser.BaseNode, args: T.Tuple[str], kwargs_: kwargs.Subproject) -> SubprojectHolder:
kw: kwargs.DoSubproject = {
'required': kwargs_['required'],
'default_options': kwargs_['default_options'],
'version': kwargs_['version'],
'options': None,
'cmake_options': [],
}
return self.do_subproject(args[0], 'meson', kw)
def disabled_subproject(self, subp_name: str, disabled_feature: T.Optional[str] = None,
exception: T.Optional[Exception] = None) -> SubprojectHolder:
sub = SubprojectHolder(NullSubprojectInterpreter(), os.path.join(self.subproject_dir, subp_name),
disabled_feature=disabled_feature, exception=exception)
self.subprojects[subp_name] = sub
self.coredata.initialized_subprojects.add(subp_name)
return sub
def do_subproject(self, subp_name: str, method: Literal['meson', 'cmake'], kwargs: kwargs.DoSubproject) -> SubprojectHolder:
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Subproject', mlog.bold(subp_name), ':', 'skipped: feature', mlog.bold(feature), 'disabled')
return self.disabled_subproject(subp_name, disabled_feature=feature)
default_options = coredata.create_options_dict(kwargs['default_options'], subp_name)
if subp_name == '':
raise InterpreterException('Subproject name must not be empty.')
if subp_name[0] == '.':
raise InterpreterException('Subproject name must not start with a period.')
if '..' in subp_name:
raise InterpreterException('Subproject name must not contain a ".." path segment.')
if os.path.isabs(subp_name):
raise InterpreterException('Subproject name must not be an absolute path.')
if has_path_sep(subp_name):
mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.',
location=self.current_node)
if subp_name in self.subproject_stack:
fullstack = self.subproject_stack + [subp_name]
incpath = ' => '.join(fullstack)
raise InvalidCode(f'Recursive include of subprojects: {incpath}.')
if subp_name in self.subprojects:
subproject = self.subprojects[subp_name]
if required and not subproject.found():
raise InterpreterException(f'Subproject "{subproject.subdir}" required but not found.')
if kwargs['version']:
pv = self.build.subprojects[subp_name]
wanted = kwargs['version']
if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]:
raise InterpreterException(f'Subproject {subp_name} version is {pv} but {wanted} required.')
return subproject
r = self.environment.wrap_resolver
try:
subdir = r.resolve(subp_name, method)
except wrap.WrapException as e:
if not required:
mlog.log(e)
mlog.log('Subproject ', mlog.bold(subp_name), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(subp_name, exception=e)
raise e
subdir_abs = os.path.join(self.environment.get_source_dir(), subdir)
os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True)
self.global_args_frozen = True
stack = ':'.join(self.subproject_stack + [subp_name])
m = ['\nExecuting subproject', mlog.bold(stack)]
if method != 'meson':
m += ['method', mlog.bold(method)]
mlog.log(*m, '\n', nested=False)
try:
if method == 'meson':
return self._do_subproject_meson(subp_name, subdir, default_options, kwargs)
elif method == 'cmake':
return self._do_subproject_cmake(subp_name, subdir, subdir_abs, default_options, kwargs)
else:
raise mesonlib.MesonBugException(f'The method {method} is invalid for the subproject {subp_name}')
# Invalid code is always an error
except InvalidCode:
raise
except Exception as e:
if not required:
with mlog.nested(subp_name):
# Suppress the 'ERROR:' prefix because this exception is not
# fatal and VS CI treat any logs with "ERROR:" as fatal.
mlog.exception(e, prefix=mlog.yellow('Exception:'))
mlog.log('\nSubproject', mlog.bold(subdir), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(subp_name, exception=e)
raise e
def _do_subproject_meson(self, subp_name: str, subdir: str,
default_options: T.Dict[OptionKey, str],
kwargs: kwargs.DoSubproject,
ast: T.Optional[mparser.CodeBlockNode] = None,
build_def_files: T.Optional[T.List[str]] = None,
is_translated: bool = False) -> SubprojectHolder:
with mlog.nested(subp_name):
new_build = self.build.copy()
subi = Interpreter(new_build, self.backend, subp_name, subdir, self.subproject_dir,
default_options, ast=ast, is_translated=is_translated,
user_defined_options=self.user_defined_options)
# Those lists are shared by all interpreters. That means that
# even if the subproject fails, any modification that the subproject
# made to those lists will affect the parent project.
subi.subprojects = self.subprojects
subi.modules = self.modules
subi.holder_map = self.holder_map
subi.bound_holder_map = self.bound_holder_map
subi.summary = self.summary
subi.subproject_stack = self.subproject_stack + [subp_name]
current_active = self.active_projectname
current_warnings_counter = mlog.log_warnings_counter
mlog.log_warnings_counter = 0
subi.run()
subi_warnings = mlog.log_warnings_counter
mlog.log_warnings_counter = current_warnings_counter
mlog.log('Subproject', mlog.bold(subp_name), 'finished.')
mlog.log()
if kwargs['version']:
pv = subi.project_version
wanted = kwargs['version']
if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]:
raise InterpreterException(f'Subproject {subp_name} version is {pv} but {wanted} required.')
self.active_projectname = current_active
self.subprojects.update(subi.subprojects)
self.subprojects[subp_name] = SubprojectHolder(subi, subdir, warnings=subi_warnings)
# Duplicates are possible when subproject uses files from project root
if build_def_files:
self.build_def_files.update(build_def_files)
# We always need the subi.build_def_files, to propgate sub-sub-projects
self.build_def_files.update(subi.build_def_files)
self.build.merge(subi.build)
self.build.subprojects[subp_name] = subi.project_version
self.coredata.initialized_subprojects.add(subp_name)
return self.subprojects[subp_name]
def _do_subproject_cmake(self, subp_name: str, subdir: str, subdir_abs: str,
default_options: T.Dict[OptionKey, str],
kwargs: kwargs.DoSubproject) -> SubprojectHolder:
with mlog.nested(subp_name):
new_build = self.build.copy()
prefix = self.coredata.options[OptionKey('prefix')].value
from ..modules.cmake import CMakeSubprojectOptions
options = kwargs['options'] or CMakeSubprojectOptions()
cmake_options = kwargs['cmake_options'] + options.cmake_options
cm_int = CMakeInterpreter(new_build, Path(subdir), Path(subdir_abs), Path(prefix), new_build.environment, self.backend)
cm_int.initialise(cmake_options)
cm_int.analyse()
# Generate a meson ast and execute it with the normal do_subproject_meson
ast = cm_int.pretend_to_be_meson(options.target_options)
mlog.log()
with mlog.nested('cmake-ast'):
mlog.log('Processing generated meson AST')
# Debug print the generated meson file
from ..ast import AstIndentationGenerator, AstPrinter
printer = AstPrinter()
ast.accept(AstIndentationGenerator())
ast.accept(printer)
printer.post_process()
meson_filename = os.path.join(self.build.environment.get_build_dir(), subdir, 'meson.build')
with open(meson_filename, "w", encoding='utf-8') as f:
f.write(printer.result)
mlog.log('Build file:', meson_filename)
mlog.cmd_ci_include(meson_filename)
mlog.log()
result = self._do_subproject_meson(subp_name, subdir, default_options, kwargs, ast, [str(f) for f in cm_int.bs_files], is_translated=True)
result.cm_interpreter = cm_int
mlog.log()
return result
def get_option_internal(self, optname: str) -> coredata.UserOption:
key = OptionKey.from_string(optname).evolve(subproject=self.subproject)
if not key.is_project():
for opts in [self.coredata.options, compilers.base_options]:
v = opts.get(key)
if v is None or v.yielding:
v = opts.get(key.as_root())
if v is not None:
assert isinstance(v, coredata.UserOption), 'for mypy'
return v
try:
opt = self.coredata.options[key]
if opt.yielding and key.subproject and key.as_root() in self.coredata.options:
popt = self.coredata.options[key.as_root()]
if type(opt) is type(popt):
opt = popt
else:
# Get class name, then option type as a string
opt_type = opt.__class__.__name__[4:][:-6].lower()
popt_type = popt.__class__.__name__[4:][:-6].lower()
# This is not a hard error to avoid dependency hell, the workaround
# when this happens is to simply set the subproject's option directly.
mlog.warning('Option {0!r} of type {1!r} in subproject {2!r} cannot yield '
'to parent option of type {3!r}, ignoring parent value. '
'Use -D{2}:{0}=value to set the value for this option manually'
'.'.format(optname, opt_type, self.subproject, popt_type),
location=self.current_node)
return opt
except KeyError:
pass
raise InterpreterException(f'Tried to access unknown option {optname!r}.')
@typed_pos_args('get_option', str)
@noKwargs
def func_get_option(self, nodes: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'TYPE_kwargs') -> T.Union[coredata.UserOption, 'TYPE_var']:
optname = args[0]
if ':' in optname:
raise InterpreterException('Having a colon in option name is forbidden, '
'projects are not allowed to directly access '
'options of other subprojects.')
opt = self.get_option_internal(optname)
if isinstance(opt, coredata.UserFeatureOption):
opt.name = optname
return opt
elif isinstance(opt, coredata.UserOption):
return opt.value
return opt
@typed_pos_args('configuration_data', optargs=[dict])
@noKwargs
def func_configuration_data(self, node: mparser.BaseNode, args: T.Tuple[T.Optional[T.Dict[str, T.Any]]],
kwargs: 'TYPE_kwargs') -> build.ConfigurationData:
initial_values = args[0]
if initial_values is not None:
FeatureNew.single_use('configuration_data dictionary', '0.49.0', self.subproject, location=node)
for k, v in initial_values.items():
if not isinstance(v, (str, int, bool)):
raise InvalidArguments(
f'"configuration_data": initial value dictionary key "{k!r}"" must be "str | int | bool", not "{v!r}"')
return build.ConfigurationData(initial_values)
def set_backend(self) -> None:
# The backend is already set when parsing subprojects
if self.backend is not None:
return
backend = self.coredata.get_option(OptionKey('backend'))
from ..backend import backends
self.backend = backends.get_backend_from_name(backend, self.build, self)
if self.backend is None:
raise InterpreterException(f'Unknown backend "{backend}".')
if backend != self.backend.name:
if self.backend.name.startswith('vs'):
mlog.log('Auto detected Visual Studio backend:', mlog.bold(self.backend.name))
self.coredata.set_option(OptionKey('backend'), self.backend.name)
# Only init backend options on first invocation otherwise it would
# override values previously set from command line.
if self.environment.first_invocation:
self.coredata.init_backend_options(backend)
options = {k: v for k, v in self.environment.options.items() if k.is_backend()}
self.coredata.set_options(options)
@typed_pos_args('project', str, varargs=str)
@typed_kwargs(
'project',
DEFAULT_OPTIONS,
KwargInfo('meson_version', (str, NoneType)),
KwargInfo(
'version',
(str, mesonlib.File, NoneType, list),
default='undefined',
validator=_project_version_validator,
convertor=lambda x: x[0] if isinstance(x, list) else x,
),
KwargInfo('license', ContainerTypeInfo(list, str), default=['unknown'], listify=True),
KwargInfo('subproject_dir', str, default='subprojects'),
)
def func_project(self, node: mparser.FunctionNode, args: T.Tuple[str, T.List[str]], kwargs: 'kwargs.Project') -> None:
proj_name, proj_langs = args
if ':' in proj_name:
raise InvalidArguments(f"Project name {proj_name!r} must not contain ':'")
# This needs to be evaluated as early as possible, as meson uses this
# for things like deprecation testing.
if kwargs['meson_version']:
cv = coredata.version
pv = kwargs['meson_version']
if not mesonlib.version_compare(cv, pv):
raise InterpreterException(f'Meson version is {cv} but project requires {pv}')
mesonlib.project_meson_versions[self.subproject] = kwargs['meson_version']
if os.path.exists(self.option_file):
oi = optinterpreter.OptionInterpreter(self.subproject)
oi.process(self.option_file)
self.coredata.update_project_options(oi.options)
self.add_build_def_file(self.option_file)
# Do not set default_options on reconfigure otherwise it would override
# values previously set from command line. That means that changing
# default_options in a project will trigger a reconfigure but won't
# have any effect.
self.project_default_options = coredata.create_options_dict(
kwargs['default_options'], self.subproject)
# If this is the first invocation we always need to initialize
# builtins, if this is a subproject that is new in a re-invocation we
# need to initialize builtins for that
if self.environment.first_invocation or (self.subproject != '' and self.subproject not in self.coredata.initialized_subprojects):
default_options = self.project_default_options.copy()
default_options.update(self.default_project_options)
self.coredata.init_builtins(self.subproject)
else:
default_options = {}
self.coredata.set_default_options(default_options, self.subproject, self.environment)
if not self.is_subproject():
self.build.project_name = proj_name
self.active_projectname = proj_name
version = kwargs['version']
if isinstance(version, mesonlib.File):
FeatureNew.single_use('version from file', '0.57.0', self.subproject, location=node)
self.add_build_def_file(version)
ifname = version.absolute_path(self.environment.source_dir,
self.environment.build_dir)
try:
ver_data = Path(ifname).read_text(encoding='utf-8').split('\n')
except FileNotFoundError:
raise InterpreterException('Version file not found.')
if len(ver_data) == 2 and ver_data[1] == '':
ver_data = ver_data[0:1]
if len(ver_data) != 1:
raise InterpreterException('Version file must contain exactly one line of text.')
self.project_version = ver_data[0]
else:
self.project_version = version
if self.build.project_version is None:
self.build.project_version = self.project_version
proj_license = kwargs['license']
self.build.dep_manifest[proj_name] = build.DepManifest(self.project_version, proj_license)
if self.subproject in self.build.projects:
raise InvalidCode('Second call to project().')
# spdirname is the subproject_dir for this project, relative to self.subdir.
# self.subproject_dir is the subproject_dir for the main project, relative to top source dir.
spdirname = kwargs['subproject_dir']
if not isinstance(spdirname, str):
raise InterpreterException('Subproject_dir must be a string')
if os.path.isabs(spdirname):
raise InterpreterException('Subproject_dir must not be an absolute path.')
if spdirname.startswith('.'):
raise InterpreterException('Subproject_dir must not begin with a period.')
if '..' in spdirname:
raise InterpreterException('Subproject_dir must not contain a ".." segment.')
if not self.is_subproject():
self.subproject_dir = spdirname
self.build.subproject_dir = self.subproject_dir
# Load wrap files from this (sub)project.
wrap_mode = self.coredata.get_option(OptionKey('wrap_mode'))
if not self.is_subproject() or wrap_mode != WrapMode.nopromote:
subdir = os.path.join(self.subdir, spdirname)
r = wrap.Resolver(self.environment.get_source_dir(), subdir, self.subproject, wrap_mode)
if self.is_subproject():
self.environment.wrap_resolver.merge_wraps(r)
else:
self.environment.wrap_resolver = r
self.build.projects[self.subproject] = proj_name
mlog.log('Project name:', mlog.bold(proj_name))
mlog.log('Project version:', mlog.bold(self.project_version))
if not self.is_subproject():
# We have to activate VS before adding languages and before calling
# self.set_backend() otherwise it wouldn't be able to detect which
# vs backend version we need. But after setting default_options in case
# the project sets vs backend by default.
backend = self.coredata.get_option(OptionKey('backend'))
force_vsenv = self.user_defined_options.vsenv or backend.startswith('vs')
if mesonlib.setup_vsenv(force_vsenv):
self.build.need_vsenv = True
self.add_languages(proj_langs, True, MachineChoice.HOST)
self.add_languages(proj_langs, False, MachineChoice.BUILD)
self.set_backend()
if not self.is_subproject():
self.check_stdlibs()
@typed_kwargs('add_languages', KwargInfo('native', (bool, NoneType), since='0.54.0'), REQUIRED_KW)
@typed_pos_args('add_languages', varargs=str)
def func_add_languages(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddLanguages') -> bool:
langs = args[0]
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
native = kwargs['native']
if disabled:
for lang in sorted(langs, key=compilers.sort_clink):
mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
if native is not None:
return self.add_languages(langs, required, self.machine_from_native_kwarg(kwargs))
else:
# absent 'native' means 'both' for backwards compatibility
tv = FeatureNew.get_target_version(self.subproject)
if FeatureNew.check_version(tv, '0.54.0'):
mlog.warning('add_languages is missing native:, assuming languages are wanted for both host and build.',
location=node)
success = self.add_languages(langs, False, MachineChoice.BUILD)
success &= self.add_languages(langs, required, MachineChoice.HOST)
return success
@noArgsFlattening
@noKwargs
def func_message(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('message with more than one argument', '0.54.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
self.message_impl(args_str)
def message_impl(self, args):
mlog.log(mlog.bold('Message:'), *args)
@noArgsFlattening
@FeatureNew('summary', '0.53.0')
@typed_pos_args('summary', (str, dict), optargs=[object])
@typed_kwargs(
'summary',
KwargInfo('section', str, default=''),
KwargInfo('bool_yn', bool, default=False),
KwargInfo('list_sep', (str, NoneType), since='0.54.0')
)
def func_summary(self, node: mparser.BaseNode, args: T.Tuple[T.Union[str, T.Dict[str, T.Any]], T.Optional[T.Any]],
kwargs: 'kwargs.Summary') -> None:
if args[1] is None:
if not isinstance(args[0], dict):
raise InterpreterException('Summary first argument must be dictionary.')
values = args[0]
else:
if not isinstance(args[0], str):
raise InterpreterException('Summary first argument must be string.')
values = {args[0]: args[1]}
self.summary_impl(kwargs['section'], values, kwargs)
def summary_impl(self, section: str, values, kwargs: 'kwargs.Summary') -> None:
if self.subproject not in self.summary:
self.summary[self.subproject] = Summary(self.active_projectname, self.project_version)
self.summary[self.subproject].add_section(
section, values, kwargs['bool_yn'], kwargs['list_sep'], self.subproject)
def _print_summary(self) -> None:
# Add automatic 'Supbrojects' section in main project.
all_subprojects = collections.OrderedDict()
for name, subp in sorted(self.subprojects.items()):
value = subp.found()
if subp.disabled_feature:
value = [value, f'Feature {subp.disabled_feature!r} disabled']
elif subp.exception:
value = [value, str(subp.exception)]
elif subp.warnings > 0:
value = [value, f'{subp.warnings} warnings']
all_subprojects[name] = value
if all_subprojects:
self.summary_impl('Subprojects', all_subprojects,
{'bool_yn': True,
'list_sep': ' ',
})
# Add automatic section with all user defined options
if self.user_defined_options:
values = collections.OrderedDict()
if self.user_defined_options.cross_file:
values['Cross files'] = self.user_defined_options.cross_file
if self.user_defined_options.native_file:
values['Native files'] = self.user_defined_options.native_file
sorted_options = sorted(self.user_defined_options.cmd_line_options.items())
values.update({str(k): v for k, v in sorted_options})
if values:
self.summary_impl('User defined options', values, {'bool_yn': False, 'list_sep': None})
# Print all summaries, main project last.
mlog.log('') # newline
main_summary = self.summary.pop('', None)
for subp_name, summary in sorted(self.summary.items()):
if self.subprojects[subp_name].found():
summary.dump()
if main_summary:
main_summary.dump()
@noArgsFlattening
@FeatureNew('warning', '0.44.0')
@noKwargs
def func_warning(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('warning with more than one argument', '0.54.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
mlog.warning(*args_str, location=node)
@noArgsFlattening
@noKwargs
def func_error(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('error with more than one argument', '0.58.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
raise InterpreterException('Problem encountered: ' + ' '.join(args_str))
@noKwargs
@noPosargs
def func_exception(self, node, args, kwargs):
raise Exception()
def add_languages(self, args: T.Sequence[str], required: bool, for_machine: MachineChoice) -> bool:
success = self.add_languages_for(args, required, for_machine)
if not self.coredata.is_cross_build():
self.coredata.copy_build_options_from_regular_ones()
self._redetect_machines()
return success
def should_skip_sanity_check(self, for_machine: MachineChoice) -> bool:
should = self.environment.properties.host.get('skip_sanity_check', False)
if not isinstance(should, bool):
raise InterpreterException('Option skip_sanity_check must be a boolean.')
if for_machine != MachineChoice.HOST and not should:
return False
if not self.environment.is_cross_build() and not should:
return False
return should
def add_languages_for(self, args: T.List[str], required: bool, for_machine: MachineChoice) -> bool:
args = [a.lower() for a in args]
langs = set(self.coredata.compilers[for_machine].keys())
langs.update(args)
# We'd really like to add cython's default language here, but it can't
# actually be done because the cython compiler hasn't been initialized,
# so we can't actually get the option yet. Because we can't know what
# compiler to add by default, and we don't want to add unnecessary
# compilers we don't add anything for cython here, and instead do it
# When the first cython target using a particular language is used.
if 'vala' in langs and 'c' not in langs:
FeatureNew.single_use('Adding Vala language without C', '0.59.0', self.subproject, location=self.current_node)
args.append('c')
success = True
for lang in sorted(args, key=compilers.sort_clink):
clist = self.coredata.compilers[for_machine]
machine_name = for_machine.get_lower_case_name()
if lang in clist:
comp = clist[lang]
else:
try:
comp = compilers.detect_compiler_for(self.environment, lang, for_machine)
if comp is None:
raise InvalidArguments(f'Tried to use unknown language "{lang}".')
if self.should_skip_sanity_check(for_machine):
mlog.log_once('Cross compiler sanity tests disabled via the cross file.')
else:
comp.sanity_check(self.environment.get_scratch_dir(), self.environment)
except Exception:
if not required:
mlog.log('Compiler for language',
mlog.bold(lang), 'for the', machine_name,
'machine not found.')
success = False
continue
else:
raise
if for_machine == MachineChoice.HOST or self.environment.is_cross_build():
logger_fun = mlog.log
else:
logger_fun = mlog.debug
logger_fun(comp.get_display_language(), 'compiler for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.get_exelist())), comp.get_version_string())
if comp.linker is not None:
logger_fun(comp.get_display_language(), 'linker for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.linker.get_exelist())), comp.linker.id, comp.linker.version)
self.build.ensure_static_linker(comp)
return success
def program_from_file_for(self, for_machine: MachineChoice, prognames: T.List[mesonlib.FileOrString]
) -> T.Optional[ExternalProgram]:
for p in prognames:
if isinstance(p, mesonlib.File):
continue # Always points to a local (i.e. self generated) file.
if not isinstance(p, str):
raise InterpreterException('Executable name must be a string')
prog = ExternalProgram.from_bin_list(self.environment, for_machine, p)
if prog.found():
return prog
return None
def program_from_system(self, args: T.List[mesonlib.FileOrString], search_dirs: T.List[str],
extra_info: T.List[mlog.TV_Loggable]) -> T.Optional[ExternalProgram]:
# Search for scripts relative to current subdir.
# Do not cache found programs because find_program('foobar')
# might give different results when run from different source dirs.
source_dir = os.path.join(self.environment.get_source_dir(), self.subdir)
for exename in args:
if isinstance(exename, mesonlib.File):
if exename.is_built:
search_dir = os.path.join(self.environment.get_build_dir(),
exename.subdir)
else:
search_dir = os.path.join(self.environment.get_source_dir(),
exename.subdir)
exename = exename.fname
extra_search_dirs = []
elif isinstance(exename, str):
search_dir = source_dir
extra_search_dirs = search_dirs
else:
raise InvalidArguments(f'find_program only accepts strings and files, not {exename!r}')
extprog = ExternalProgram(exename, search_dir=search_dir,
extra_search_dirs=extra_search_dirs,
silent=True)
if extprog.found():
extra_info.append(f"({' '.join(extprog.get_command())})")
return extprog
return None
def program_from_overrides(self, command_names: T.List[mesonlib.FileOrString],
extra_info: T.List['mlog.TV_Loggable']
) -> T.Optional[T.Union[ExternalProgram, OverrideProgram, build.Executable]]:
for name in command_names:
if not isinstance(name, str):
continue
if name in self.build.find_overrides:
exe = self.build.find_overrides[name]
extra_info.append(mlog.blue('(overridden)'))
return exe
return None
def store_name_lookups(self, command_names: T.List[mesonlib.FileOrString]) -> None:
for name in command_names:
if isinstance(name, str):
self.build.searched_programs.add(name)
def add_find_program_override(self, name: str, exe: T.Union[build.Executable, ExternalProgram, 'OverrideProgram']) -> None:
if name in self.build.searched_programs:
raise InterpreterException(f'Tried to override finding of executable "{name}" which has already been found.')
if name in self.build.find_overrides:
raise InterpreterException(f'Tried to override executable "{name}" which has already been overridden.')
self.build.find_overrides[name] = exe
def notfound_program(self, args: T.List[mesonlib.FileOrString]) -> ExternalProgram:
return NonExistingExternalProgram(' '.join(
[a if isinstance(a, str) else a.absolute_path(self.environment.source_dir, self.environment.build_dir)
for a in args]))
# TODO update modules to always pass `for_machine`. It is bad-form to assume
# the host machine.
def find_program_impl(self, args: T.List[mesonlib.FileOrString],
for_machine: MachineChoice = MachineChoice.HOST,
required: bool = True, silent: bool = True,
wanted: T.Union[str, T.List[str]] = '',
search_dirs: T.Optional[T.List[str]] = None,
version_func: T.Optional[T.Callable[[T.Union['ExternalProgram', 'build.Executable', 'OverrideProgram']], str]] = None
) -> T.Union['ExternalProgram', 'build.Executable', 'OverrideProgram']:
args = mesonlib.listify(args)
extra_info: T.List[mlog.TV_Loggable] = []
progobj = self.program_lookup(args, for_machine, required, search_dirs, extra_info)
if progobj is None:
progobj = self.notfound_program(args)
if isinstance(progobj, ExternalProgram) and not progobj.found():
if not silent:
mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO'))
if required:
m = 'Program {!r} not found or not executable'
raise InterpreterException(m.format(progobj.get_name()))
return progobj
if wanted:
if version_func:
version = version_func(progobj)
elif isinstance(progobj, build.Executable):
if progobj.subproject:
interp = self.subprojects[progobj.subproject].held_object
else:
interp = self
assert isinstance(interp, Interpreter)
version = interp.project_version
else:
version = progobj.get_version(self)
is_found, not_found, _ = mesonlib.version_compare_many(version, wanted)
if not is_found:
mlog.log('Program', mlog.bold(progobj.name), 'found:', mlog.red('NO'),
'found', mlog.normal_cyan(version), 'but need:',
mlog.bold(', '.join([f"'{e}'" for e in not_found])), *extra_info)
if required:
m = 'Invalid version of program, need {!r} {!r} found {!r}.'
raise InterpreterException(m.format(progobj.name, not_found, version))
return self.notfound_program(args)
extra_info.insert(0, mlog.normal_cyan(version))
# Only store successful lookups
self.store_name_lookups(args)
if not silent:
mlog.log('Program', mlog.bold(progobj.name), 'found:', mlog.green('YES'), *extra_info)
if isinstance(progobj, build.Executable):
progobj.was_returned_by_find_program = True
return progobj
def program_lookup(self, args: T.List[mesonlib.FileOrString], for_machine: MachineChoice,
required: bool, search_dirs: T.List[str], extra_info: T.List[mlog.TV_Loggable]
) -> T.Optional[T.Union[ExternalProgram, build.Executable, OverrideProgram]]:
progobj = self.program_from_overrides(args, extra_info)
if progobj:
return progobj
fallback = None
wrap_mode = self.coredata.get_option(OptionKey('wrap_mode'))
if wrap_mode != WrapMode.nofallback and self.environment.wrap_resolver:
fallback = self.environment.wrap_resolver.find_program_provider(args)
if fallback and wrap_mode == WrapMode.forcefallback:
return self.find_program_fallback(fallback, args, required, extra_info)
progobj = self.program_from_file_for(for_machine, args)
if progobj is None:
progobj = self.program_from_system(args, search_dirs, extra_info)
if progobj is None and args[0].endswith('python3'):
prog = ExternalProgram('python3', mesonlib.python_command, silent=True)
progobj = prog if prog.found() else None
if progobj is None and fallback and required:
progobj = self.find_program_fallback(fallback, args, required, extra_info)
return progobj
def find_program_fallback(self, fallback: str, args: T.List[mesonlib.FileOrString],
required: bool, extra_info: T.List[mlog.TV_Loggable]
) -> T.Optional[T.Union[ExternalProgram, build.Executable, OverrideProgram]]:
mlog.log('Fallback to subproject', mlog.bold(fallback), 'which provides program',
mlog.bold(' '.join(args)))
sp_kwargs: kwargs.DoSubproject = {
'required': required,
'default_options': [],
'version': [],
'cmake_options': [],
'options': None,
}
self.do_subproject(fallback, 'meson', sp_kwargs)
return self.program_from_overrides(args, extra_info)
@typed_pos_args('find_program', varargs=(str, mesonlib.File), min_varargs=1)
@typed_kwargs(
'find_program',
DISABLER_KW.evolve(since='0.49.0'),
NATIVE_KW,
REQUIRED_KW,
KwargInfo('dirs', ContainerTypeInfo(list, str), default=[], listify=True, since='0.53.0'),
KwargInfo('version', ContainerTypeInfo(list, str), default=[], listify=True, since='0.52.0'),
)
@disablerIfNotFound
def func_find_program(self, node: mparser.BaseNode, args: T.Tuple[T.List[mesonlib.FileOrString]],
kwargs: 'kwargs.FindProgram',
) -> T.Union['build.Executable', ExternalProgram, 'OverrideProgram']:
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Program', mlog.bold(' '.join(args[0])), 'skipped: feature', mlog.bold(feature), 'disabled')
return self.notfound_program(args[0])
search_dirs = extract_search_dirs(kwargs)
return self.find_program_impl(args[0], kwargs['native'], required=required,
silent=False, wanted=kwargs['version'],
search_dirs=search_dirs)
def func_find_library(self, node, args, kwargs):
raise InvalidCode('find_library() is removed, use meson.get_compiler(\'name\').find_library() instead.\n'
'Look here for documentation: http://mesonbuild.com/Reference-manual.html#compiler-object\n'
'Look here for example: http://mesonbuild.com/howtox.html#add-math-library-lm-portably\n'
)
# When adding kwargs, please check if they make sense in dependencies.get_dep_identifier()
@FeatureNewKwargs('dependency', '0.57.0', ['cmake_package_version'])
@FeatureNewKwargs('dependency', '0.56.0', ['allow_fallback'])
@FeatureNewKwargs('dependency', '0.54.0', ['components'])
@FeatureNewKwargs('dependency', '0.52.0', ['include_type'])
@FeatureNewKwargs('dependency', '0.50.0', ['not_found_message', 'cmake_module_path', 'cmake_args'])
@FeatureNewKwargs('dependency', '0.49.0', ['disabler'])
@FeatureNewKwargs('dependency', '0.40.0', ['method'])
@FeatureNewKwargs('dependency', '0.38.0', ['default_options'])
@disablerIfNotFound
@permittedKwargs(permitted_dependency_kwargs)
@typed_pos_args('dependency', varargs=str, min_varargs=1)
def func_dependency(self, node, args, kwargs):
# Replace '' by empty list of names
names = [n for n in args[0] if n]
if len(names) > 1:
FeatureNew('dependency with more than one name', '0.60.0').use(self.subproject)
allow_fallback = kwargs.get('allow_fallback')
if allow_fallback is not None and not isinstance(allow_fallback, bool):
raise InvalidArguments('"allow_fallback" argument must be boolean')
fallback = kwargs.get('fallback')
default_options = kwargs.get('default_options')
df = DependencyFallbacksHolder(self, names, allow_fallback, default_options)
df.set_fallback(fallback)
not_found_message = kwargs.get('not_found_message', '')
if not isinstance(not_found_message, str):
raise InvalidArguments('The not_found_message must be a string.')
try:
d = df.lookup(kwargs)
except Exception:
if not_found_message:
self.message_impl([not_found_message])
raise
assert isinstance(d, Dependency)
if not d.found() and not_found_message:
self.message_impl([not_found_message])
self.message_impl([not_found_message])
# Ensure the correct include type
if 'include_type' in kwargs:
wanted = kwargs['include_type']
if not isinstance(wanted, str):
raise InvalidArguments('The `include_type` kwarg must be a string')
actual = d.get_include_type()
if wanted != actual:
mlog.debug(f'Current include type of {args[0]} is {actual}. Converting to requested {wanted}')
d = d.generate_system_dependency(wanted)
if d.feature_since is not None:
version, extra_msg = d.feature_since
FeatureNew.single_use(f'dep {d.name!r} custom lookup', version, self.subproject, extra_msg, node)
for f in d.featurechecks:
f.use(self.subproject, node)
return d
@FeatureNew('disabler', '0.44.0')
@noKwargs
@noPosargs
def func_disabler(self, node, args, kwargs):
return Disabler()
@FeatureNewKwargs('executable', '0.42.0', ['implib'])
@FeatureNewKwargs('executable', '0.56.0', ['win_subsystem'])
@FeatureDeprecatedKwargs('executable', '0.56.0', ['gui_app'], extra_message="Use 'win_subsystem' instead.")
@permittedKwargs(build.known_exe_kwargs)
def func_executable(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.Executable)
@permittedKwargs(build.known_stlib_kwargs)
def func_static_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.StaticLibrary)
@permittedKwargs(build.known_shlib_kwargs)
def func_shared_lib(self, node, args, kwargs):
holder = self.build_target(node, args, kwargs, build.SharedLibrary)
holder.shared_library_only = True
return holder
@permittedKwargs(known_library_kwargs)
def func_both_lib(self, node, args, kwargs):
return self.build_both_libraries(node, args, kwargs)
@FeatureNew('shared_module', '0.37.0')
@permittedKwargs(build.known_shmod_kwargs)
def func_shared_module(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.SharedModule)
@permittedKwargs(known_library_kwargs)
def func_library(self, node, args, kwargs):
return self.build_library(node, args, kwargs)
@permittedKwargs(build.known_jar_kwargs)
def func_jar(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.Jar)
@FeatureNewKwargs('build_target', '0.40.0', ['link_whole', 'override_options'])
@permittedKwargs(known_build_target_kwargs)
def func_build_target(self, node, args, kwargs):
if 'target_type' not in kwargs:
raise InterpreterException('Missing target_type keyword argument')
target_type = kwargs.pop('target_type')
if target_type == 'executable':
return self.build_target(node, args, kwargs, build.Executable)
elif target_type == 'shared_library':
return self.build_target(node, args, kwargs, build.SharedLibrary)
elif target_type == 'shared_module':
FeatureNew('build_target(target_type: \'shared_module\')',
'0.51.0').use(self.subproject)
return self.build_target(node, args, kwargs, build.SharedModule)
elif target_type == 'static_library':
return self.build_target(node, args, kwargs, build.StaticLibrary)
elif target_type == 'both_libraries':
return self.build_both_libraries(node, args, kwargs)
elif target_type == 'library':
return self.build_library(node, args, kwargs)
elif target_type == 'jar':
return self.build_target(node, args, kwargs, build.Jar)
else:
raise InterpreterException('Unknown target_type.')
@noPosargs
@typed_kwargs(
'vcs_tag',
CT_INPUT_KW.evolve(required=True),
CT_OUTPUT_KW,
# Cannot use the COMMAND_KW because command is allowed to be empty
KwargInfo(
'command',
ContainerTypeInfo(list, (str, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex, ExternalProgram, mesonlib.File)),
listify=True,
default=[],
),
KwargInfo('fallback', (str, NoneType)),
KwargInfo('replace_string', str, default='@VCS_TAG@'),
)
def func_vcs_tag(self, node: mparser.BaseNode, args: T.List['TYPE_var'], kwargs: 'kwargs.VcsTag') -> build.CustomTarget:
if kwargs['fallback'] is None:
FeatureNew.single_use('Optional fallback in vcs_tag', '0.41.0', self.subproject, location=node)
fallback = kwargs['fallback'] or self.project_version
replace_string = kwargs['replace_string']
regex_selector = '(.*)' # default regex selector for custom command: use complete output
vcs_cmd = kwargs['command']
source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir))
if vcs_cmd:
if isinstance(vcs_cmd[0], mesonlib.File):
FeatureNew.single_use('vcs_tag with file as the first argument', '0.62.0', self.subproject, location=node)
maincmd = self.find_program_impl(vcs_cmd[0], required=False)
if maincmd.found():
vcs_cmd[0] = maincmd
else:
vcs = mesonlib.detect_vcs(source_dir)
if vcs:
mlog.log('Found {} repository at {}'.format(vcs['name'], vcs['wc_dir']))
vcs_cmd = vcs['get_rev'].split()
regex_selector = vcs['rev_regex']
else:
vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string
# vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command...
self._validate_custom_target_outputs(len(kwargs['input']) > 1, kwargs['output'], "vcs_tag")
tg = build.CustomTarget(
kwargs['output'][0],
self.subdir,
self.subproject,
self.environment.get_build_command() +
['--internal',
'vcstagger',
'@INPUT0@',
'@OUTPUT0@',
fallback,
source_dir,
replace_string,
regex_selector] + vcs_cmd,
self.source_strings_to_files(kwargs['input']),
kwargs['output'],
build_by_default=True,
build_always_stale=True,
)
self.add_target(tg.name, tg)
return tg
@FeatureNew('subdir_done', '0.46.0')
@noPosargs
@noKwargs
def func_subdir_done(self, node, args, kwargs):
raise SubdirDoneRequest()
@staticmethod
def _validate_custom_target_outputs(has_multi_in: bool, outputs: T.Iterable[str], name: str) -> None:
"""Checks for additional invalid values in a custom_target output.
This cannot be done with typed_kwargs because it requires the number of
inputs.
"""
for out in outputs:
if has_multi_in and ('@PLAINNAME@' in out or '@BASENAME@' in out):
raise InvalidArguments(f'{name}: output cannot containe "@PLAINNAME@" or "@BASENAME@" '
'when there is more than one input (we can\'t know which to use)')
@typed_pos_args('custom_target', optargs=[str])
@typed_kwargs(
'custom_target',
COMMAND_KW,
CT_BUILD_ALWAYS,
CT_BUILD_ALWAYS_STALE,
CT_BUILD_BY_DEFAULT,
CT_INPUT_KW,
CT_INSTALL_DIR_KW,
CT_INSTALL_TAG_KW,
CT_OUTPUT_KW,
DEPENDS_KW,
DEPEND_FILES_KW,
DEPFILE_KW,
ENV_KW.evolve(since='0.57.0'),
INSTALL_KW,
INSTALL_MODE_KW.evolve(since='0.47.0'),
OVERRIDE_OPTIONS_KW,
KwargInfo('feed', bool, default=False, since='0.59.0'),
KwargInfo('capture', bool, default=False),
KwargInfo('console', bool, default=False, since='0.48.0'),
)
def func_custom_target(self, node: mparser.FunctionNode, args: T.Tuple[str],
kwargs: 'kwargs.CustomTarget') -> build.CustomTarget:
if kwargs['depfile'] and ('@BASENAME@' in kwargs['depfile'] or '@PLAINNAME@' in kwargs['depfile']):
FeatureNew.single_use('substitutions in custom_target depfile', '0.47.0', self.subproject, location=node)
# Don't mutate the kwargs
build_by_default = kwargs['build_by_default']
build_always_stale = kwargs['build_always_stale']
# Remap build_always to build_by_default and build_always_stale
if kwargs['build_always'] is not None and kwargs['build_always_stale'] is not None:
raise InterpreterException('CustomTarget: "build_always" and "build_always_stale" are mutually exclusive')
if build_by_default is None and kwargs['install']:
build_by_default = True
elif kwargs['build_always'] is not None:
if build_by_default is None:
build_by_default = kwargs['build_always']
build_always_stale = kwargs['build_by_default']
# These are are nullaable so that we can know whether they're explicitly
# set or not. If they haven't been overwritten, set them to their true
# default
if build_by_default is None:
build_by_default = False
if build_always_stale is None:
build_always_stale = False
name = args[0]
if name is None:
# name will default to first output, but we cannot do that yet because
# they could need substitutions (e.g. @BASENAME@) first. CustomTarget()
# will take care of setting a proper default but name must be an empty
# string in the meantime.
FeatureNew.single_use('custom_target() with no name argument', '0.60.0', self.subproject, location=node)
name = ''
inputs = self.source_strings_to_files(kwargs['input'], strict=False)
command = kwargs['command']
if command and isinstance(command[0], str):
command[0] = self.find_program_impl([command[0]])
if len(inputs) > 1 and kwargs['feed']:
raise InvalidArguments('custom_target: "feed" keyword argument can only be used used with a single input')
if len(kwargs['output']) > 1 and kwargs['capture']:
raise InvalidArguments('custom_target: "capture" keyword argument can only be used used with a single output')
if kwargs['capture'] and kwargs['console']:
raise InvalidArguments('custom_target: "capture" and "console" keyword arguments are mutually exclusive')
for c in command:
if kwargs['capture'] and isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('custom_target: "capture" keyword argument cannot be used with "@OUTPUT@"')
if kwargs['feed'] and isinstance(c, str) and '@INPUT@' in c:
raise InvalidArguments('custom_target: "feed" keyword argument cannot be used with "@INPUT@"')
if kwargs['install'] and not kwargs['install_dir']:
raise InvalidArguments('custom_target: "install_dir" keyword argument must be set when "install" is true.')
if len(kwargs['install_dir']) > 1:
FeatureNew.single_use('multiple install_dir for custom_target', '0.40.0', self.subproject, location=node)
if len(kwargs['install_tag']) not in {0, 1, len(kwargs['output'])}:
raise InvalidArguments('custom_target: install_tag argument must have 0 or 1 outputs, '
'or the same number of elements as the output keyword argument. '
f'(there are {len(kwargs["install_tag"])} install_tags, '
f'and {len(kwargs["output"])} outputs)')
self._validate_custom_target_outputs(len(inputs) > 1, kwargs['output'], "custom_target")
tg = build.CustomTarget(
name,
self.subdir,
self.subproject,
command,
inputs,
kwargs['output'],
build_always_stale=build_always_stale,
build_by_default=build_by_default,
capture=kwargs['capture'],
console=kwargs['console'],
depend_files=kwargs['depend_files'],
depfile=kwargs['depfile'],
extra_depends=kwargs['depends'],
env=kwargs['env'],
feed=kwargs['feed'],
install=kwargs['install'],
install_dir=kwargs['install_dir'],
install_mode=kwargs['install_mode'],
install_tag=kwargs['install_tag'],
override_options=kwargs['override_options'],
backend=self.backend)
self.add_target(tg.name, tg)
return tg
@typed_pos_args('run_target', str)
@typed_kwargs(
'run_target',
COMMAND_KW,
DEPENDS_KW,
ENV_KW.evolve(since='0.57.0'),
)
def func_run_target(self, node: mparser.FunctionNode, args: T.Tuple[str],
kwargs: 'kwargs.RunTarget') -> build.RunTarget:
all_args = kwargs['command'].copy()
for i in listify(all_args):
if isinstance(i, ExternalProgram) and not i.found():
raise InterpreterException(f'Tried to use non-existing executable {i.name!r}')
if isinstance(all_args[0], str):
all_args[0] = self.find_program_impl([all_args[0]])
name = args[0]
tg = build.RunTarget(name, all_args, kwargs['depends'], self.subdir, self.subproject, kwargs['env'])
self.add_target(name, tg)
full_name = (self.subproject, name)
assert full_name not in self.build.run_target_names
self.build.run_target_names.add(full_name)
return tg
@FeatureNew('alias_target', '0.52.0')
@typed_pos_args('alias_target', str, varargs=build.Target, min_varargs=1)
@noKwargs
def func_alias_target(self, node: mparser.BaseNode, args: T.Tuple[str, T.List[build.Target]],
kwargs: 'TYPE_kwargs') -> build.AliasTarget:
name, deps = args
tg = build.AliasTarget(name, deps, self.subdir, self.subproject)
self.add_target(name, tg)
return tg
@typed_pos_args('generator', (build.Executable, ExternalProgram))
@typed_kwargs(
'generator',
KwargInfo('arguments', ContainerTypeInfo(list, str, allow_empty=False), required=True, listify=True),
KwargInfo('output', ContainerTypeInfo(list, str, allow_empty=False), required=True, listify=True),
DEPFILE_KW,
DEPENDS_KW,
KwargInfo('capture', bool, default=False, since='0.43.0'),
)
def func_generator(self, node: mparser.FunctionNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram]],
kwargs: 'kwargs.FuncGenerator') -> build.Generator:
for rule in kwargs['output']:
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"output" must not contain a directory separator.')
if len(kwargs['output']) > 1:
for o in kwargs['output']:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
gen = build.Generator(args[0], **kwargs)
self.generators.append(gen)
return gen
@typed_pos_args('benchmark', str, (build.Executable, build.Jar, ExternalProgram, mesonlib.File))
@typed_kwargs('benchmark', *TEST_KWARGS)
def func_benchmark(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.FuncBenchmark') -> None:
self.add_test(node, args, kwargs, False)
@typed_pos_args('test', str, (build.Executable, build.Jar, ExternalProgram, mesonlib.File))
@typed_kwargs('test', *TEST_KWARGS, KwargInfo('is_parallel', bool, default=True))
def func_test(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.FuncTest') -> None:
self.add_test(node, args, kwargs, True)
def unpack_env_kwarg(self, kwargs: T.Union[build.EnvironmentVariables, T.Dict[str, 'TYPE_var'], T.List['TYPE_var'], str]) -> build.EnvironmentVariables:
envlist = kwargs.get('env')
if envlist is None:
return build.EnvironmentVariables()
msg = ENV_KW.validator(envlist)
if msg:
raise InvalidArguments(f'"env": {msg}')
return ENV_KW.convertor(envlist)
def make_test(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.BaseTest') -> Test:
name = args[0]
if ':' in name:
mlog.deprecation(f'":" is not allowed in test name "{name}", it has been replaced with "_"',
location=node)
name = name.replace(':', '_')
exe = args[1]
if isinstance(exe, ExternalProgram):
if not exe.found():
raise InvalidArguments('Tried to use not-found external program as test exe')
elif isinstance(exe, mesonlib.File):
exe = self.find_program_impl([exe])
env = self.unpack_env_kwarg(kwargs)
if kwargs['timeout'] <= 0:
FeatureNew.single_use('test() timeout <= 0', '0.57.0', self.subproject, location=node)
prj = self.subproject if self.is_subproject() else self.build.project_name
suite: T.List[str] = []
for s in kwargs['suite']:
if s:
s = ':' + s
suite.append(prj.replace(' ', '_').replace(':', '_') + s)
return Test(name,
prj,
suite,
exe,
kwargs['depends'],
kwargs.get('is_parallel', False),
kwargs['args'],
env,
kwargs['should_fail'],
kwargs['timeout'],
kwargs['workdir'],
kwargs['protocol'],
kwargs['priority'],
kwargs['verbose'])
def add_test(self, node: mparser.BaseNode, args: T.List, kwargs: T.Dict[str, T.Any], is_base_test: bool):
t = self.make_test(node, args, kwargs)
if is_base_test:
self.build.tests.append(t)
mlog.debug('Adding test', mlog.bold(t.name, True))
else:
self.build.benchmarks.append(t)
mlog.debug('Adding benchmark', mlog.bold(t.name, True))
@typed_pos_args('install_headers', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_headers',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('subdir', (str, NoneType)),
INSTALL_MODE_KW.evolve(since='0.47.0'),
)
def func_install_headers(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallHeaders') -> build.Headers:
source_files = self.source_strings_to_files(args[0])
install_subdir = kwargs['subdir']
if install_subdir is not None:
if kwargs['install_dir'] is not None:
raise InterpreterException('install_headers: cannot specify both "install_dir" and "subdir". Use only "install_dir".')
if os.path.isabs(install_subdir):
mlog.deprecation('Subdir keyword must not be an absolute path. This will be a hard error in the next release.')
h = build.Headers(source_files, install_subdir, kwargs['install_dir'],
kwargs['install_mode'], self.subproject)
self.build.headers.append(h)
return h
@typed_pos_args('install_man', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_man',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('locale', (str, NoneType), since='0.58.0'),
INSTALL_MODE_KW.evolve(since='0.47.0')
)
def func_install_man(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallMan') -> build.Man:
# We just need to narrow this, because the input is limited to files and
# Strings as inputs, so only Files will be returned
sources = self.source_strings_to_files(args[0])
for s in sources:
try:
num = int(s.rsplit('.', 1)[-1])
except (IndexError, ValueError):
num = 0
if not 1 <= num <= 9:
raise InvalidArguments('Man file must have a file extension of a number between 1 and 9')
m = build.Man(sources, kwargs['install_dir'], kwargs['install_mode'],
self.subproject, kwargs['locale'])
self.build.man.append(m)
return m
@FeatureNew('install_emptydir', '0.60.0')
@typed_kwargs(
'install_emptydir',
INSTALL_MODE_KW,
KwargInfo('install_tag', (str, NoneType), since='0.62.0')
)
def func_install_emptydir(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs) -> None:
d = build.EmptyDir(args[0], kwargs['install_mode'], self.subproject, kwargs['install_tag'])
self.build.emptydir.append(d)
return d
@FeatureNew('install_symlink', '0.61.0')
@typed_pos_args('symlink_name', str)
@typed_kwargs(
'install_symlink',
KwargInfo('pointing_to', str, required=True),
KwargInfo('install_dir', str, required=True),
INSTALL_TAG_KW,
)
def func_install_symlink(self, node: mparser.BaseNode,
args: T.Tuple[T.List[str]],
kwargs) -> build.SymlinkData:
name = args[0] # Validation while creating the SymlinkData object
target = kwargs['pointing_to']
l = build.SymlinkData(target, name, kwargs['install_dir'],
self.subproject, kwargs['install_tag'])
self.build.symlinks.append(l)
return l
@FeatureNew('structured_sources', '0.62.0')
@typed_pos_args('structured_sources', object, optargs=[dict])
@noKwargs
@noArgsFlattening
def func_structured_sources(
self, node: mparser.BaseNode,
args: T.Tuple[object, T.Optional[T.Dict[str, object]]],
kwargs: 'TYPE_kwargs') -> build.StructuredSources:
valid_types = (str, mesonlib.File, build.GeneratedList, build.CustomTarget, build.CustomTargetIndex, build.GeneratedList)
sources: T.Dict[str, T.List[T.Union[mesonlib.File, 'build.GeneratedTypes']]] = collections.defaultdict(list)
for arg in mesonlib.listify(args[0]):
if not isinstance(arg, valid_types):
raise InvalidArguments(f'structured_sources: type "{type(arg)}" is not valid')
if isinstance(arg, str):
arg = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, arg)
sources[''].append(arg)
if args[1]:
if '' in args[1]:
raise InvalidArguments('structured_sources: keys to dictionary argument may not be an empty string.')
for k, v in args[1].items():
for arg in mesonlib.listify(v):
if not isinstance(arg, valid_types):
raise InvalidArguments(f'structured_sources: type "{type(arg)}" is not valid')
if isinstance(arg, str):
arg = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, arg)
sources[k].append(arg)
return build.StructuredSources(sources)
@typed_pos_args('subdir', str)
@typed_kwargs(
'subdir',
KwargInfo(
'if_found',
ContainerTypeInfo(list, object),
validator=lambda a: 'Objects must have a found() method' if not all(hasattr(x, 'found') for x in a) else None,
since='0.44.0',
default=[],
listify=True,
),
)
def func_subdir(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.Subdir') -> None:
mesonlib.check_direntry_issues(args)
if '..' in args[0]:
raise InvalidArguments('Subdir contains ..')
if self.subdir == '' and args[0] == self.subproject_dir:
raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.')
if self.subdir == '' and args[0].startswith('meson-'):
raise InvalidArguments('The "meson-" prefix is reserved and cannot be used for top-level subdir().')
if args[0] == '':
raise InvalidArguments("The argument given to subdir() is the empty string ''. This is prohibited.")
for i in kwargs['if_found']:
if not i.found():
return
prev_subdir = self.subdir
subdir = os.path.join(prev_subdir, args[0])
if os.path.isabs(subdir):
raise InvalidArguments('Subdir argument must be a relative path.')
absdir = os.path.join(self.environment.get_source_dir(), subdir)
symlinkless_dir = os.path.realpath(absdir)
build_file = os.path.join(symlinkless_dir, 'meson.build')
if build_file in self.processed_buildfiles:
raise InvalidArguments(f'Tried to enter directory "{subdir}", which has already been visited.')
self.processed_buildfiles.add(build_file)
self.subdir = subdir
os.makedirs(os.path.join(self.environment.build_dir, subdir), exist_ok=True)
buildfilename = os.path.join(self.subdir, environment.build_filename)
self.build_def_files.add(buildfilename)
absname = os.path.join(self.environment.get_source_dir(), buildfilename)
if not os.path.isfile(absname):
self.subdir = prev_subdir
raise InterpreterException(f"Non-existent build file '{buildfilename!s}'")
with open(absname, encoding='utf-8') as f:
code = f.read()
assert isinstance(code, str)
try:
codeblock = mparser.Parser(code, absname).parse()
except mesonlib.MesonException as me:
me.file = absname
raise me
try:
self.evaluate_codeblock(codeblock)
except SubdirDoneRequest:
pass
self.subdir = prev_subdir
def _get_kwarg_install_mode(self, kwargs: T.Dict[str, T.Any]) -> T.Optional[FileMode]:
if kwargs.get('install_mode', None) is None:
return None
if isinstance(kwargs['install_mode'], FileMode):
return kwargs['install_mode']
install_mode: T.List[str] = []
mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int))
for m in mode:
# We skip any arguments that are set to `false`
if m is False:
m = None
install_mode.append(m)
if len(install_mode) > 3:
raise InvalidArguments('Keyword argument install_mode takes at '
'most 3 arguments.')
if len(install_mode) > 0 and install_mode[0] is not None and \
not isinstance(install_mode[0], str):
raise InvalidArguments('Keyword argument install_mode requires the '
'permissions arg to be a string or false')
return FileMode(*install_mode)
@typed_pos_args('install_data', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_data',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('sources', ContainerTypeInfo(list, (str, mesonlib.File)), listify=True, default=[]),
KwargInfo('rename', ContainerTypeInfo(list, str), default=[], listify=True, since='0.46.0'),
INSTALL_MODE_KW.evolve(since='0.38.0'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
)
def func_install_data(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallData') -> build.Data:
sources = self.source_strings_to_files(args[0] + kwargs['sources'])
rename = kwargs['rename'] or None
if rename:
if len(rename) != len(sources):
raise InvalidArguments(
'"rename" and "sources" argument lists must be the same length if "rename" is given. '
f'Rename has {len(rename)} elements and sources has {len(sources)}.')
install_dir_name = kwargs['install_dir']
if install_dir_name:
if not os.path.isabs(install_dir_name):
install_dir_name = os.path.join('{datadir}', install_dir_name)
else:
install_dir_name = '{datadir}'
return self.install_data_impl(sources, kwargs['install_dir'], kwargs['install_mode'],
rename, kwargs['install_tag'], install_dir_name)
def install_data_impl(self, sources: T.List[mesonlib.File], install_dir: str,
install_mode: FileMode, rename: T.Optional[str],
tag: T.Optional[str],
install_dir_name: T.Optional[str] = None,
install_data_type: T.Optional[str] = None) -> build.Data:
"""Just the implementation with no validation."""
data = build.Data(sources, install_dir, install_dir_name or install_dir, install_mode,
self.subproject, rename, tag, install_data_type)
self.build.data.append(data)
return data
@typed_pos_args('install_subdir', str)
@typed_kwargs(
'install_subdir',
KwargInfo('install_dir', str, required=True),
KwargInfo('strip_directory', bool, default=False),
KwargInfo('exclude_files', ContainerTypeInfo(list, str),
default=[], listify=True, since='0.42.0',
validator=lambda x: 'cannot be absolute' if any(os.path.isabs(d) for d in x) else None),
KwargInfo('exclude_directories', ContainerTypeInfo(list, str),
default=[], listify=True, since='0.42.0',
validator=lambda x: 'cannot be absolute' if any(os.path.isabs(d) for d in x) else None),
INSTALL_MODE_KW.evolve(since='0.38.0'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
)
def func_install_subdir(self, node: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'kwargs.FuncInstallSubdir') -> build.InstallDir:
exclude = (set(kwargs['exclude_files']), set(kwargs['exclude_directories']))
idir = build.InstallDir(
self.subdir,
args[0],
kwargs['install_dir'],
kwargs['install_mode'],
exclude,
kwargs['strip_directory'],
self.subproject,
install_tag=kwargs['install_tag'])
self.build.install_dirs.append(idir)
return idir
@noPosargs
@typed_kwargs(
'configure_file',
DEPFILE_KW.evolve(since='0.52.0'),
INSTALL_MODE_KW.evolve(since='0.47.0,'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
KwargInfo('capture', bool, default=False, since='0.41.0'),
KwargInfo(
'command',
(ContainerTypeInfo(list, (build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str), allow_empty=False), NoneType),
listify=True,
),
KwargInfo(
'configuration',
(ContainerTypeInfo(dict, (str, int, bool)), build.ConfigurationData, NoneType),
),
KwargInfo('copy', bool, default=False, since='0.47.0'),
KwargInfo('encoding', str, default='utf-8', since='0.47.0'),
KwargInfo('format', str, default='meson', since='0.46.0',
validator=in_set_validator({'meson', 'cmake', 'cmake@'})),
KwargInfo(
'input',
ContainerTypeInfo(list, (mesonlib.File, str)),
listify=True,
default=[],
),
# Cannot use shared implementation until None backwards compat is dropped
KwargInfo('install', (bool, NoneType), since='0.50.0'),
KwargInfo('install_dir', (str, bool), default='',
validator=lambda x: 'must be `false` if boolean' if x is True else None),
KwargInfo('output', str, required=True),
KwargInfo('output_format', str, default='c', since='0.47.0',
validator=in_set_validator({'c', 'nasm'})),
)
def func_configure_file(self, node: mparser.BaseNode, args: T.List[TYPE_var],
kwargs: kwargs.ConfigureFile):
actions = sorted(x for x in {'configuration', 'command', 'copy'}
if kwargs[x] not in [None, False])
num_actions = len(actions)
if num_actions == 0:
raise InterpreterException('Must specify an action with one of these '
'keyword arguments: \'configuration\', '
'\'command\', or \'copy\'.')
elif num_actions == 2:
raise InterpreterException('Must not specify both {!r} and {!r} '
'keyword arguments since they are '
'mutually exclusive.'.format(*actions))
elif num_actions == 3:
raise InterpreterException('Must specify one of {!r}, {!r}, and '
'{!r} keyword arguments since they are '
'mutually exclusive.'.format(*actions))
if kwargs['capture'] and not kwargs['command']:
raise InvalidArguments('configure_file: "capture" keyword requires "command" keyword.')
fmt = kwargs['format']
output_format = kwargs['output_format']
depfile = kwargs['depfile']
# Validate input
inputs = self.source_strings_to_files(kwargs['input'])
inputs_abs = []
for f in inputs:
if isinstance(f, mesonlib.File):
inputs_abs.append(f.absolute_path(self.environment.source_dir,
self.environment.build_dir))
self.add_build_def_file(f)
else:
raise InterpreterException('Inputs can only be strings or file objects')
# Validate output
output = kwargs['output']
if inputs_abs:
values = mesonlib.get_filenames_templates_dict(inputs_abs, None)
outputs = mesonlib.substitute_values([output], values)
output = outputs[0]
if depfile:
depfile = mesonlib.substitute_values([depfile], values)[0]
ofile_rpath = os.path.join(self.subdir, output)
if ofile_rpath in self.configure_file_outputs:
mesonbuildfile = os.path.join(self.subdir, 'meson.build')
current_call = f"{mesonbuildfile}:{self.current_lineno}"
first_call = "{}:{}".format(mesonbuildfile, self.configure_file_outputs[ofile_rpath])
mlog.warning('Output file', mlog.bold(ofile_rpath, True), 'for configure_file() at', current_call, 'overwrites configure_file() output at', first_call)
else:
self.configure_file_outputs[ofile_rpath] = self.current_lineno
if os.path.dirname(output) != '':
raise InterpreterException('Output file name must not contain a subdirectory.')
(ofile_path, ofile_fname) = os.path.split(os.path.join(self.subdir, output))
ofile_abs = os.path.join(self.environment.build_dir, ofile_path, ofile_fname)
# Perform the appropriate action
if kwargs['configuration'] is not None:
conf = kwargs['configuration']
if isinstance(conf, dict):
FeatureNew.single_use('configure_file.configuration dictionary', '0.49.0', self.subproject, location=node)
for k, v in conf.items():
if not isinstance(v, (str, int, bool)):
raise InvalidArguments(
f'"configuration_data": initial value dictionary key "{k!r}"" must be "str | int | bool", not "{v!r}"')
conf = build.ConfigurationData(conf)
mlog.log('Configuring', mlog.bold(output), 'using configuration')
if len(inputs) > 1:
raise InterpreterException('At most one input file can given in configuration mode')
if inputs:
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
file_encoding = kwargs['encoding']
missing_variables, confdata_useless = \
mesonlib.do_conf_file(inputs_abs[0], ofile_abs, conf,
fmt, file_encoding)
if missing_variables:
var_list = ", ".join(map(repr, sorted(missing_variables)))
mlog.warning(
f"The variable(s) {var_list} in the input file '{inputs[0]}' are not "
"present in the given configuration data.", location=node)
if confdata_useless:
ifbase = os.path.basename(inputs_abs[0])
tv = FeatureNew.get_target_version(self.subproject)
if FeatureNew.check_version(tv, '0.47.0'):
mlog.warning('Got an empty configuration_data() object and found no '
f'substitutions in the input file {ifbase!r}. If you want to '
'copy a file to the build dir, use the \'copy:\' keyword '
'argument added in 0.47.0', location=node)
else:
mesonlib.dump_conf_header(ofile_abs, conf, output_format)
conf.used = True
elif kwargs['command'] is not None:
if len(inputs) > 1:
FeatureNew.single_use('multiple inputs in configure_file()', '0.52.0', self.subproject, location=node)
# We use absolute paths for input and output here because the cwd
# that the command is run from is 'unspecified', so it could change.
# Currently it's builddir/subdir for in_builddir else srcdir/subdir.
values = mesonlib.get_filenames_templates_dict(inputs_abs, [ofile_abs])
if depfile:
depfile = os.path.join(self.environment.get_scratch_dir(), depfile)
values['@DEPFILE@'] = depfile
# Substitute @INPUT@, @OUTPUT@, etc here.
_cmd = mesonlib.substitute_values(kwargs['command'], values)
mlog.log('Configuring', mlog.bold(output), 'with command')
cmd, *args = _cmd
res = self.run_command_impl(node, (cmd, args),
{'capture': True, 'check': True, 'env': build.EnvironmentVariables()},
True)
if kwargs['capture']:
dst_tmp = ofile_abs + '~'
file_encoding = kwargs['encoding']
with open(dst_tmp, 'w', encoding=file_encoding) as f:
f.writelines(res.stdout)
if inputs_abs:
shutil.copymode(inputs_abs[0], dst_tmp)
mesonlib.replace_if_different(ofile_abs, dst_tmp)
if depfile:
mlog.log('Reading depfile:', mlog.bold(depfile))
with open(depfile, encoding='utf-8') as f:
df = DepFile(f.readlines())
deps = df.get_all_dependencies(ofile_fname)
for dep in deps:
self.add_build_def_file(dep)
elif kwargs['copy']:
if len(inputs_abs) != 1:
raise InterpreterException('Exactly one input file must be given in copy mode')
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
shutil.copy2(inputs_abs[0], ofile_abs)
# Install file if requested, we check for the empty string
# for backwards compatibility. That was the behaviour before
# 0.45.0 so preserve it.
idir = kwargs['install_dir']
if idir is False:
idir = ''
FeatureDeprecated.single_use('configure_file install_dir: false', '0.50.0',
self.subproject, 'Use the `install:` kwarg instead', location=node)
install = kwargs['install'] if kwargs['install'] is not None else idir != ''
if install:
if not idir:
raise InterpreterException(
'"install_dir" must be specified when "install" in a configure_file is true')
cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname)
install_mode = kwargs['install_mode']
install_tag = kwargs['install_tag']
self.build.data.append(build.Data([cfile], idir, idir, install_mode, self.subproject,
install_tag=install_tag, data_type='configure'))
return mesonlib.File.from_built_file(self.subdir, output)
def extract_incdirs(self, kwargs, key: str = 'include_directories'):
prospectives = extract_as_list(kwargs, key)
result = []
for p in prospectives:
if isinstance(p, build.IncludeDirs):
result.append(p)
elif isinstance(p, str):
result.append(self.build_incdir_object([p]))
else:
raise InterpreterException('Include directory objects can only be created from strings or include directories.')
return result
@typed_pos_args('include_directories', varargs=str)
@typed_kwargs('include_directories', KwargInfo('is_system', bool, default=False))
def func_include_directories(self, node: mparser.BaseNode, args: T.Tuple[T.List[str]],
kwargs: 'kwargs.FuncIncludeDirectories') -> build.IncludeDirs:
return self.build_incdir_object(args[0], kwargs['is_system'])
def build_incdir_object(self, incdir_strings: T.List[str], is_system: bool = False) -> build.IncludeDirs:
if not isinstance(is_system, bool):
raise InvalidArguments('Is_system must be boolean.')
src_root = self.environment.get_source_dir()
build_root = self.environment.get_build_dir()
absbase_src = os.path.join(src_root, self.subdir)
absbase_build = os.path.join(build_root, self.subdir)
for a in incdir_strings:
if a.startswith(src_root):
raise InvalidArguments(textwrap.dedent('''\
Tried to form an absolute path to a source dir.
You should not do that but use relative paths instead.
To get include path to any directory relative to the current dir do
incdir = include_directories(dirname)
After this incdir will contain both the current source dir as well as the
corresponding build dir. It can then be used in any subdirectory and
Meson will take care of all the busywork to make paths work.
Dirname can even be '.' to mark the current directory. Though you should
remember that the current source and build directories are always
put in the include directories by default so you only need to do
include_directories('.') if you intend to use the result in a
different subdirectory.
'''))
else:
try:
self.validate_within_subproject(self.subdir, a)
except InterpreterException:
mlog.warning('include_directories sandbox violation!', location=self.current_node)
print(textwrap.dedent(f'''\
The project is trying to access the directory {a!r} which belongs to a different
subproject. This is a problem as it hardcodes the relative paths of these two projects.
This makes it impossible to compile the project in any other directory layout and also
prevents the subproject from changing its own directory layout.
Instead of poking directly at the internals the subproject should be executed and
it should set a variable that the caller can then use. Something like:
# In subproject
some_dep = declare_dependency(include_directories: include_directories('include'))
# In subproject wrap file
[provide]
some = some_dep
# In parent project
some_dep = dependency('some')
executable(..., dependencies: [some_dep])
This warning will become a hard error in a future Meson release.
'''))
absdir_src = os.path.join(absbase_src, a)
absdir_build = os.path.join(absbase_build, a)
if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build):
raise InvalidArguments(f'Include dir {a} does not exist.')
i = build.IncludeDirs(self.subdir, incdir_strings, is_system)
return i
@typed_pos_args('add_test_setup', str)
@typed_kwargs(
'add_test_setup',
KwargInfo('exe_wrapper', ContainerTypeInfo(list, (str, ExternalProgram)), listify=True, default=[]),
KwargInfo('gdb', bool, default=False),
KwargInfo('timeout_multiplier', int, default=1),
KwargInfo('exclude_suites', ContainerTypeInfo(list, str), listify=True, default=[], since='0.57.0'),
KwargInfo('is_default', bool, default=False, since='0.49.0'),
ENV_KW,
)
def func_add_test_setup(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.AddTestSetup') -> None:
setup_name = args[0]
if re.fullmatch('([_a-zA-Z][_0-9a-zA-Z]*:)?[_a-zA-Z][_0-9a-zA-Z]*', setup_name) is None:
raise InterpreterException('Setup name may only contain alphanumeric characters.')
if ":" not in setup_name:
setup_name = f'{(self.subproject if self.subproject else self.build.project_name)}:{setup_name}'
exe_wrapper: T.List[str] = []
for i in kwargs['exe_wrapper']:
if isinstance(i, str):
exe_wrapper.append(i)
else:
if not i.found():
raise InterpreterException('Tried to use non-found executable.')
exe_wrapper += i.get_command()
timeout_multiplier = kwargs['timeout_multiplier']
if timeout_multiplier <= 0:
FeatureNew('add_test_setup() timeout_multiplier <= 0', '0.57.0').use(self.subproject)
if kwargs['is_default']:
if self.build.test_setup_default_name is not None:
raise InterpreterException(f'{self.build.test_setup_default_name!r} is already set as default. '
'is_default can be set to true only once')
self.build.test_setup_default_name = setup_name
self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, kwargs['gdb'], timeout_multiplier, kwargs['env'],
kwargs['exclude_suites'])
@typed_pos_args('add_global_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_global_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_global_arguments(node, self.build.global_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_global_link_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_global_link_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_global_arguments(node, self.build.global_link_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_project_arguments', varargs=str)
@typed_kwargs('add_project_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_project_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_project_arguments(node, self.build.projects_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_project_link_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_project_link_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_project_arguments(node, self.build.projects_link_args[kwargs['native']], args[0], kwargs)
def _warn_about_builtin_args(self, args: T.List[str]) -> None:
# -Wpedantic is deliberately not included, since some people want to use it but not use -Wextra
# see e.g.
# https://github.com/mesonbuild/meson/issues/3275#issuecomment-641354956
# https://github.com/mesonbuild/meson/issues/3742
warnargs = ('/W1', '/W2', '/W3', '/W4', '/Wall', '-Wall', '-Wextra')
optargs = ('-O0', '-O2', '-O3', '-Os', '-Oz', '/O1', '/O2', '/Os')
for arg in args:
if arg in warnargs:
mlog.warning(f'Consider using the built-in warning_level option instead of using "{arg}".',
location=self.current_node)
elif arg in optargs:
mlog.warning(f'Consider using the built-in optimization level instead of using "{arg}".',
location=self.current_node)
elif arg == '-Werror':
mlog.warning(f'Consider using the built-in werror option instead of using "{arg}".',
location=self.current_node)
elif arg == '-g':
mlog.warning(f'Consider using the built-in debug option instead of using "{arg}".',
location=self.current_node)
elif arg.startswith('-fsanitize'):
mlog.warning(f'Consider using the built-in option for sanitizers instead of using "{arg}".',
location=self.current_node)
elif arg.startswith('-std=') or arg.startswith('/std:'):
mlog.warning(f'Consider using the built-in option for language standard version instead of using "{arg}".',
location=self.current_node)
def _add_global_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.List[str]],
args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if self.is_subproject():
msg = f'Function \'{node.func_name}\' cannot be used in subprojects because ' \
'there is no way to make that reliable.\nPlease only call ' \
'this if is_subproject() returns false. Alternatively, ' \
'define a variable that\ncontains your language-specific ' \
'arguments and add it to the appropriate *_args kwarg ' \
'in each target.'
raise InvalidCode(msg)
frozen = self.project_args_frozen or self.global_args_frozen
self._add_arguments(node, argsdict, frozen, args, kwargs)
def _add_project_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.Dict[str, T.List[str]]],
args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if self.subproject not in argsdict:
argsdict[self.subproject] = {}
self._add_arguments(node, argsdict[self.subproject],
self.project_args_frozen, args, kwargs)
def _add_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.List[str]],
args_frozen: bool, args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if args_frozen:
msg = f'Tried to use \'{node.func_name}\' after a build target has been declared.\n' \
'This is not permitted. Please declare all arguments before your targets.'
raise InvalidCode(msg)
self._warn_about_builtin_args(args)
for lang in kwargs['language']:
argsdict[lang] = argsdict.get(lang, []) + args
@noArgsFlattening
@typed_pos_args('environment', optargs=[(str, list, dict)])
@typed_kwargs('environment', ENV_METHOD_KW, ENV_SEPARATOR_KW.evolve(since='0.62.0'))
def func_environment(self, node: mparser.FunctionNode, args: T.Tuple[T.Union[None, str, T.List['TYPE_var'], T.Dict[str, 'TYPE_var']]],
kwargs: 'TYPE_kwargs') -> build.EnvironmentVariables:
init = args[0]
if init is not None:
FeatureNew.single_use('environment positional arguments', '0.52.0', self.subproject, location=node)
msg = ENV_KW.validator(init)
if msg:
raise InvalidArguments(f'"environment": {msg}')
if isinstance(init, dict) and any(i for i in init.values() if isinstance(i, list)):
FeatureNew.single_use('List of string in dictionary value', '0.62.0', self.subproject, location=node)
return env_convertor_with_method(init, kwargs['method'], kwargs['separator'])
return build.EnvironmentVariables()
@typed_pos_args('join_paths', varargs=str, min_varargs=1)
@noKwargs
def func_join_paths(self, node: mparser.BaseNode, args: T.Tuple[T.List[str]], kwargs: 'TYPE_kwargs') -> str:
return os.path.join(*args[0]).replace('\\', '/')
def run(self) -> None:
super().run()
mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets))))
FeatureNew.report(self.subproject)
FeatureDeprecated.report(self.subproject)
if not self.is_subproject():
self.print_extra_warnings()
if self.subproject == '':
self._print_summary()
def print_extra_warnings(self) -> None:
# TODO cross compilation
for c in self.coredata.compilers.host.values():
if c.get_id() == 'clang':
self.check_clang_asan_lundef()
break
def check_clang_asan_lundef(self) -> None:
if OptionKey('b_lundef') not in self.coredata.options:
return
if OptionKey('b_sanitize') not in self.coredata.options:
return
if (self.coredata.options[OptionKey('b_lundef')].value and
self.coredata.options[OptionKey('b_sanitize')].value != 'none'):
mlog.warning('''Trying to use {} sanitizer on Clang with b_lundef.
This will probably not work.
Try setting b_lundef to false instead.'''.format(self.coredata.options[OptionKey('b_sanitize')].value),
location=self.current_node)
# Check that the indicated file is within the same subproject
# as we currently are. This is to stop people doing
# nasty things like:
#
# f = files('../../master_src/file.c')
#
# Note that this is validated only when the file
# object is generated. The result can be used in a different
# subproject than it is defined in (due to e.g. a
# declare_dependency).
def validate_within_subproject(self, subdir, fname):
srcdir = Path(self.environment.source_dir)
norm = Path(srcdir, subdir, fname).resolve()
if os.path.isdir(norm):
inputtype = 'directory'
else:
inputtype = 'file'
if srcdir not in norm.parents:
# Grabbing files outside the source tree is ok.
# This is for vendor stuff like:
#
# /opt/vendorsdk/src/file_with_license_restrictions.c
return
project_root = Path(srcdir, self.root_subdir)
subproject_dir = project_root / self.subproject_dir
if norm == project_root:
return
if project_root not in norm.parents:
raise InterpreterException(f'Sandbox violation: Tried to grab {inputtype} {norm.name} outside current (sub)project.')
if subproject_dir == norm or subproject_dir in norm.parents:
raise InterpreterException(f'Sandbox violation: Tried to grab {inputtype} {norm.name} from a nested subproject.')
@T.overload
def source_strings_to_files(self, sources: T.List['mesonlib.FileOrString'], strict: bool = True) -> T.List['mesonlib.File']: ...
@T.overload
def source_strings_to_files(self, sources: T.List['mesonlib.FileOrString'], strict: bool = False) -> T.List['mesonlib.FileOrString']: ... # noqa: F811
@T.overload
def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool = True) -> T.List['SourceOutputs']: ... # noqa: F811
def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool = True) -> T.List['SourceOutputs']: # noqa: F811
"""Lower inputs to a list of Targets and Files, replacing any strings.
:param sources: A raw (Meson DSL) list of inputs (targets, files, and
strings)
:raises InterpreterException: if any of the inputs are of an invalid type
:return: A list of Targets and Files
"""
mesonlib.check_direntry_issues(sources)
if not isinstance(sources, list):
sources = [sources]
results: T.List['SourceOutputs'] = []
for s in sources:
if isinstance(s, str):
if not strict and s.startswith(self.environment.get_build_dir()):
results.append(s)
mlog.warning(f'Source item {s!r} cannot be converted to File object, because it is a generated file. '
'This will become a hard error in the future.', location=self.current_node)
else:
self.validate_within_subproject(self.subdir, s)
results.append(mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s))
elif isinstance(s, mesonlib.File):
results.append(s)
elif isinstance(s, (build.GeneratedList, build.BuildTarget,
build.CustomTargetIndex, build.CustomTarget,
build.ExtractedObjects, build.StructuredSources)):
results.append(s)
else:
raise InterpreterException(f'Source item is {s!r} instead of '
'string or File-type object')
return results
def add_target(self, name, tobj):
if name == '':
raise InterpreterException('Target name must not be empty.')
if name.strip() == '':
raise InterpreterException('Target name must not consist only of whitespace.')
if has_path_sep(name):
pathseg = os.path.join(self.subdir, os.path.split(name)[0])
if os.path.exists(os.path.join(self.source_root, pathseg)):
raise InvalidArguments(textwrap.dedent(f'''\
Target "{name}" has a path segment pointing to directory "{pathseg}". This is an error.
To define a target that builds in that directory you must define it
in the meson.build file in that directory.
'''))
if name.startswith('meson-'):
raise InvalidArguments("Target names starting with 'meson-' are reserved "
"for Meson's internal use. Please rename.")
if name in coredata.FORBIDDEN_TARGET_NAMES:
raise InvalidArguments(f"Target name '{name}' is reserved for Meson's "
"internal use. Please rename.")
# To permit an executable and a shared library to have the
# same name, such as "foo.exe" and "libfoo.a".
idname = tobj.get_id()
if idname in self.build.targets:
raise InvalidCode(f'Tried to create target "{name}", but a target of that name already exists.')
self.build.targets[idname] = tobj
if idname not in self.coredata.target_guids:
self.coredata.target_guids[idname] = str(uuid.uuid4()).upper()
@FeatureNew('both_libraries', '0.46.0')
def build_both_libraries(self, node, args, kwargs):
shared_lib = self.build_target(node, args, kwargs, build.SharedLibrary)
# Check if user forces non-PIC static library.
pic = True
key = OptionKey('b_staticpic')
if 'pic' in kwargs:
pic = kwargs['pic']
elif key in self.environment.coredata.options:
pic = self.environment.coredata.options[key].value
if self.backend.name == 'xcode':
# Xcode is a bit special in that you can't (at least for the moment)
# form a library only from object file inputs. The simple but inefficient
# solution is to use the sources directly. This will lead to them being
# built twice. This is unfortunate and slow, but at least it works.
# Feel free to submit patches to get this fixed if it is an
# issue for you.
reuse_object_files = False
else:
reuse_object_files = pic
if reuse_object_files:
# Exclude sources from args and kwargs to avoid building them twice
static_args = [args[0]]
static_kwargs = kwargs.copy()
static_kwargs['sources'] = []
static_kwargs['objects'] = shared_lib.extract_all_objects()
else:
static_args = args
static_kwargs = kwargs
static_lib = self.build_target(node, static_args, static_kwargs, build.StaticLibrary)
return build.BothLibraries(shared_lib, static_lib)
def build_library(self, node, args, kwargs):
default_library = self.coredata.get_option(OptionKey('default_library', subproject=self.subproject))
if default_library == 'shared':
return self.build_target(node, args, kwargs, build.SharedLibrary)
elif default_library == 'static':
return self.build_target(node, args, kwargs, build.StaticLibrary)
elif default_library == 'both':
return self.build_both_libraries(node, args, kwargs)
else:
raise InterpreterException(f'Unknown default_library value: {default_library}.')
def build_target(self, node: mparser.BaseNode, args, kwargs, targetclass):
@FeatureNewKwargs('build target', '0.42.0', ['rust_crate_type', 'build_rpath', 'implicit_include_directories'])
@FeatureNewKwargs('build target', '0.41.0', ['rust_args'])
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@FeatureNewKwargs('build target', '0.48.0', ['gnu_symbol_visibility'])
def build_target_decorator_caller(self, node, args, kwargs):
return True
build_target_decorator_caller(self, node, args, kwargs)
if not args:
raise InterpreterException('Target does not have a name.')
name, *sources = args
for_machine = self.machine_from_native_kwarg(kwargs)
if 'sources' in kwargs:
sources += listify(kwargs['sources'])
sources = self.source_strings_to_files(sources)
objs = extract_as_list(kwargs, 'objects')
kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies')
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
if 'extra_files' in kwargs:
ef = extract_as_list(kwargs, 'extra_files')
kwargs['extra_files'] = self.source_strings_to_files(ef)
self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources)
if targetclass not in {build.Executable, build.SharedLibrary, build.SharedModule, build.StaticLibrary, build.Jar}:
mlog.debug('Unknown target type:', str(targetclass))
raise RuntimeError('Unreachable code')
self.kwarg_strings_to_includedirs(kwargs)
# Filter out kwargs from other target types. For example 'soversion'
# passed to library() when default_library == 'static'.
kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs}
srcs: T.List['SourceInputs'] = []
struct: T.Optional[build.StructuredSources] = build.StructuredSources()
for s in sources:
if isinstance(s, build.StructuredSources):
struct = struct + s
else:
srcs.append(s)
if not struct:
struct = None
else:
# Validate that we won't end up with two outputs with the same name.
# i.e, don't allow:
# [structured_sources('foo/bar.rs'), structured_sources('bar/bar.rs')]
for v in struct.sources.values():
outputs: T.Set[str] = set()
for f in v:
o: T.List[str]
if isinstance(f, str):
o = [os.path.basename(f)]
elif isinstance(f, mesonlib.File):
o = [f.fname]
else:
o = f.get_outputs()
conflicts = outputs.intersection(o)
if conflicts:
raise InvalidArguments.from_node(
f"Conflicting sources in structured sources: {', '.join(sorted(conflicts))}",
node=node)
outputs.update(o)
kwargs['include_directories'] = self.extract_incdirs(kwargs)
target = targetclass(name, self.subdir, self.subproject, for_machine, srcs, struct, objs, self.environment, kwargs)
target.project_version = self.project_version
self.add_stdlib_info(target)
self.add_target(name, target)
self.project_args_frozen = True
return target
def kwarg_strings_to_includedirs(self, kwargs):
if 'd_import_dirs' in kwargs:
items = mesonlib.extract_as_list(kwargs, 'd_import_dirs')
cleaned_items = []
for i in items:
if isinstance(i, str):
# BW compatibility. This was permitted so we must support it
# for a few releases so people can transition to "correct"
# path declarations.
if os.path.normpath(i).startswith(self.environment.get_source_dir()):
mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead.
This will become a hard error in the future.''', location=self.current_node)
i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir))
i = self.build_incdir_object([i])
cleaned_items.append(i)
kwargs['d_import_dirs'] = cleaned_items
def get_used_languages(self, target):
result = set()
for i in target.sources:
for lang, c in self.coredata.compilers[target.for_machine].items():
if c.can_compile(i):
result.add(lang)
break
return result
def add_stdlib_info(self, target):
for l in self.get_used_languages(target):
dep = self.build.stdlibs[target.for_machine].get(l, None)
if dep:
target.add_deps(dep)
def check_sources_exist(self, subdir, sources):
for s in sources:
if not isinstance(s, str):
continue # This means a generated source and they always exist.
fname = os.path.join(subdir, s)
if not os.path.isfile(fname):
raise InterpreterException(f'Tried to add non-existing source file {s}.')
# Only permit object extraction from the same subproject
def validate_extraction(self, buildtarget: mesonlib.HoldableObject) -> None:
if self.subproject != buildtarget.subproject:
raise InterpreterException('Tried to extract objects from a different subproject.')
def is_subproject(self) -> bool:
return self.subproject != ''
@typed_pos_args('set_variable', str, object)
@noKwargs
@noArgsFlattening
@noSecondLevelHolderResolving
def func_set_variable(self, node: mparser.BaseNode, args: T.Tuple[str, object], kwargs: 'TYPE_kwargs') -> None:
varname, value = args
self.set_variable(varname, value, holderify=True)
@typed_pos_args('get_variable', (str, Disabler), optargs=[object])
@noKwargs
@noArgsFlattening
@unholder_return
def func_get_variable(self, node: mparser.BaseNode, args: T.Tuple[T.Union[str, Disabler], T.Optional[object]],
kwargs: 'TYPE_kwargs') -> 'TYPE_var':
varname, fallback = args
if isinstance(varname, Disabler):
return varname
try:
return self.variables[varname]
except KeyError:
if fallback is not None:
return self._holderify(fallback)
raise InterpreterException(f'Tried to get unknown variable "{varname}".')
@typed_pos_args('is_variable', str)
@noKwargs
def func_is_variable(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> bool:
return args[0] in self.variables
@FeatureNew('unset_variable', '0.60.0')
@typed_pos_args('unset_variable', str)
@noKwargs
def func_unset_variable(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> None:
varname = args[0]
try:
del self.variables[varname]
except KeyError:
raise InterpreterException(f'Tried to unset unknown variable "{varname}".')
@staticmethod
def machine_from_native_kwarg(kwargs: T.Dict[str, T.Any]) -> MachineChoice:
native = kwargs.get('native', False)
if not isinstance(native, bool):
raise InvalidArguments('Argument to "native" must be a boolean.')
return MachineChoice.BUILD if native else MachineChoice.HOST
@FeatureNew('is_disabler', '0.52.0')
@typed_pos_args('is_disabler', object)
@noKwargs
def func_is_disabler(self, node: mparser.BaseNode, args: T.Tuple[object], kwargs: 'TYPE_kwargs') -> bool:
return isinstance(args[0], Disabler)
@noKwargs
@FeatureNew('range', '0.58.0')
@typed_pos_args('range', int, optargs=[int, int])
def func_range(self, node, args: T.Tuple[int, T.Optional[int], T.Optional[int]], kwargs: T.Dict[str, T.Any]) -> P_OBJ.RangeHolder:
start, stop, step = args
# Just like Python's range, we allow range(stop), range(start, stop), or
# range(start, stop, step)
if stop is None:
stop = start
start = 0
if step is None:
step = 1
# This is more strict than Python's range()
if start < 0:
raise InterpreterException('start cannot be negative')
if stop < start:
raise InterpreterException('stop cannot be less than start')
if step < 1:
raise InterpreterException('step must be >=1')
return P_OBJ.RangeHolder(start, stop, step, subproject=self.subproject)
|
# Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import logging
from time import sleep
from streamsets.testframework.markers import sdc_min_version
from streamsets.sdk.sdc_models import Metrics
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def sdc_common_hook():
def hook(data_collector):
data_collector.add_stage_lib('streamsets-datacollector-groovy_2_4-lib')
return hook
# SDC-11777: provide way to easily see where a pipeline is when it is stuck in STARTING
@sdc_min_version('3.15.0')
def test_runner_metrics_for_init_and_destroy(sdc_builder, sdc_executor):
"""Ensure that we properly update metrics when the runner is in starting phase."""
builder = sdc_builder.get_pipeline_builder()
SLEEP_SCRIPT = "sleep(5*1000)"
# Super simple cluster pipeline
source = builder.add_stage('Dev Data Generator')
groovy = builder.add_stage('Groovy Evaluator', type='processor')
groovy.init_script = SLEEP_SCRIPT
groovy.destroy_script = SLEEP_SCRIPT
groovy.script = SLEEP_SCRIPT
trash = builder.add_stage('Trash')
source >> groovy >> trash
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
# Start the pipeline, it should take at least 5 seconds (since the sleep) and we check that at least once
# we have seen the metrics we're looking for.
sdc_executor.start_pipeline(pipeline, wait=False)
count = 0
while True:
# TLKT-468: SDC object doesn't expose get_pipeline_metrics method
metrics_json = sdc_executor.api_client.get_pipeline_metrics(pipeline.id)
if metrics_json:
metrics = Metrics(metrics_json)
logger.info(f"Detected runtime gauge state {metrics.gauge("runner.0.gauge").value["state"]}")
if metrics.gauge('runner.0.gauge').value['state'] == 'Starting':
count += 1
status = sdc_executor.get_pipeline_status(pipeline).response.json()
sleep(0.5)
if status.get('status') == 'RUNNING':
break
assert count > 0
sdc_executor.stop_pipeline(pipeline)
| # Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import logging
from time import sleep
from streamsets.testframework.markers import sdc_min_version
from streamsets.sdk.sdc_models import Metrics
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def sdc_common_hook():
def hook(data_collector):
data_collector.add_stage_lib('streamsets-datacollector-groovy_2_4-lib')
return hook
# SDC-11777: provide way to easily see where a pipeline is when it is stuck in STARTING
@sdc_min_version('3.15.0')
def test_runner_metrics_for_init_and_destroy(sdc_builder, sdc_executor):
"""Ensure that we properly update metrics when the runner is in starting phase."""
builder = sdc_builder.get_pipeline_builder()
SLEEP_SCRIPT = "sleep(5*1000)"
# Super simple cluster pipeline
source = builder.add_stage('Dev Data Generator')
groovy = builder.add_stage('Groovy Evaluator', type='processor')
groovy.init_script = SLEEP_SCRIPT
groovy.destroy_script = SLEEP_SCRIPT
groovy.script = SLEEP_SCRIPT
trash = builder.add_stage('Trash')
source >> groovy >> trash
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
# Start the pipeline, it should take at least 5 seconds (since the sleep) and we check that at least once
# we have seen the metrics we're looking for.
sdc_executor.start_pipeline(pipeline, wait=False)
count = 0
while True:
# TLKT-468: SDC object doesn't expose get_pipeline_metrics method
metrics_json = sdc_executor.api_client.get_pipeline_metrics(pipeline.id)
if metrics_json:
metrics = Metrics(metrics_json)
logger.info(f"Detected runtime gauge state {metrics.gauge('runner.0.gauge').value['state']}")
if metrics.gauge('runner.0.gauge').value['state'] == 'Starting':
count += 1
status = sdc_executor.get_pipeline_status(pipeline).response.json()
sleep(0.5)
if status.get('status') == 'RUNNING':
break
assert count > 0
sdc_executor.stop_pipeline(pipeline)
|
# https://github.com/theeko74/pdfc
# modified by brio50 on 2022/01/23, working with gs version 9.54.0
"""
Simple python wrapper script to use ghoscript function to compress PDF files.
Compression levels:
0: default
1: prepress
2: printer
3: ebook
4: screen
Dependency: Ghostscript.
On MacOSX install via command line `brew install ghostscript`.
"""
import argparse
import subprocess
import os.path
import sys
import shutil
def compress(input_file_path, output_file_path, level=0, method=1):
"""Function to compress PDF via Ghostscript command line interface"""
quality = {
0: '/default',
1: '/prepress',
2: '/printer',
3: '/ebook',
4: '/screen'
}
# Check if valid path
if not os.path.isfile(input_file_path):
print(f"Error: invalid path for input file: {input_file_path}")
sys.exit(1)
# Check if file is a PDF by extension
if input_file_path.split('.')[-1].lower() != 'pdf': # not sure this is the most robust solution
print(f"Error: input file is not a PDF: {input_file_path}")
sys.exit(1)
gs = get_ghostscript_path()
file_name = input_file_path.split('/')[-1] # everything after last '/'
print("Compressing PDF \"{}\"...".format(file_name))
if method == 1:
# https://gist.github.com/lkraider/f0888da30bc352f9d167dfa4f4fc8213
cmd = [gs, '-sDEVICE=pdfwrite',
'-dNumRenderingThreads=2',
'-dPDFSETTINGS={}'.format(quality[level]),
'-dCompatibilityLevel=1.5',
'-dNOPAUSE', '-dQUIET', '-dBATCH', '-dSAFER',
# font settings
'-dSubsetFonts=true',
'-dCompressFonts=true',
'-dEmbedAllFonts=true',
# color format`
'-sProcessColorModel=DeviceRGB',
'-sColorConversionStrategy=RGB',
'-sColorConversionStrategyForImages=RGB',
'-dConvertCMYKImagesToRGB=true',
# image resample
'-dDetectDuplicateImages=true',
'-dColorImageDownsampleType=/Bicubic',
'-dColorImageResolution=300',
'-dGrayImageDownsampleType=/Bicubic',
'-dGrayImageResolution=300',
'-dMonoImageDownsampleType=/Subsample',
'-dMonoImageResolution=300',
'-dDownsampleColorImages=true',
# preset overrides
'-dDoThumbnails=false',
'-dCreateJobTicket=false',
'-dPreserveEPSInfo=false',
'-dPreserveOPIComments=false',
'-dPreserveOverprintSettings=false',
'-dUCRandBGInfo=/Remove',
'-sOutputFile={}'.format(output_file_path),
input_file_path]
elif method == 2:
cmd = [gs, '-sDEVICE=pdfwrite',
'-dNumRenderingThreads=2',
'-dPDFSETTINGS={}'.format(quality[level]),
'-dCompatibilityLevel=1.4',
'-dNOPAUSE', '-dQUIET', '-dBATCH', '-dSAFER',
'-dDetectDuplicateImages=true',
'-sOutputFile={}'.format(output_file_path),
input_file_path]
try:
# execute
subprocess.call(cmd, stderr=sys.stdout)
except:
# print ghostscript command for debug
print(" ".join(cmd))
if not os.path.exists(output_file_path):
raise Exception(f"Ghostscript failed to create {output_file_path}, time to debug...\n",
" ".join(cmd))
initial_size = round(os.path.getsize(input_file_path) / (1024 * 1024), 2)
final_size = round(os.path.getsize(output_file_path) / (1024 * 1024), 2)
ratio = round(100 - ((final_size / initial_size) * 100), 1)
print(f"Initial file size is {initial_size}MB",
f"; Final file size is {final_size}MB",
f"; Compression Ratio = {ratio}%\n")
if final_size > initial_size and method == 1:
print('-' * 100)
print('Compression Failed\nTrying another ghostscript compression method...')
print('-' * 100)
info = compress(input_file_path, output_file_path, 4, 2)
initial_size = info[0]
final_size = info[1]
ratio = info[2]
return [initial_size, final_size, ratio]
def get_ghostscript_path():
gs_names = ['gs', 'gswin32', 'gswin64']
for name in gs_names:
if shutil.which(name):
return shutil.which(name)
raise FileNotFoundError(f'No GhostScript executable was found on path ({'/'.join(gs_names)})')
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('input', help='Relative or absolute path of the input PDF file')
parser.add_argument('-o', '--out', help='Relative or absolute path of the output PDF file')
parser.add_argument('-c', '--compress', type=int, help='Compression level from 0 to 4')
parser.add_argument('-b', '--backup', action='store_true', help="Backup the old PDF file")
parser.add_argument('--open', action='store_true', default=False,
help='Open PDF after compression')
args = parser.parse_args()
# In case no compression level is specified, default is 2 '/ printer'
if not args.compress:
args.compress = 2
# In case no output file is specified, store in temp file
if not args.out:
args.out = 'temp.pdf'
# Run
compress(args.input, args.out, power=args.compress)
# In case no output file is specified, erase original file
if args.out == 'temp.pdf':
if args.backup:
shutil.copyfile(args.input, args.input.replace(".pdf", "_BACKUP.pdf"))
shutil.copyfile(args.out, args.input)
os.remove(args.out)
# In case we want to open the file after compression
if args.open:
if args.out == 'temp.pdf' and args.backup:
subprocess.call(['open', args.input])
else:
subprocess.call(['open', args.out])
if __name__ == '__main__':
main()
| # https://github.com/theeko74/pdfc
# modified by brio50 on 2022/01/23, working with gs version 9.54.0
"""
Simple python wrapper script to use ghoscript function to compress PDF files.
Compression levels:
0: default
1: prepress
2: printer
3: ebook
4: screen
Dependency: Ghostscript.
On MacOSX install via command line `brew install ghostscript`.
"""
import argparse
import subprocess
import os.path
import sys
import shutil
def compress(input_file_path, output_file_path, level=0, method=1):
"""Function to compress PDF via Ghostscript command line interface"""
quality = {
0: '/default',
1: '/prepress',
2: '/printer',
3: '/ebook',
4: '/screen'
}
# Check if valid path
if not os.path.isfile(input_file_path):
print(f"Error: invalid path for input file: {input_file_path}")
sys.exit(1)
# Check if file is a PDF by extension
if input_file_path.split('.')[-1].lower() != 'pdf': # not sure this is the most robust solution
print(f"Error: input file is not a PDF: {input_file_path}")
sys.exit(1)
gs = get_ghostscript_path()
file_name = input_file_path.split('/')[-1] # everything after last '/'
print("Compressing PDF \"{}\"...".format(file_name))
if method == 1:
# https://gist.github.com/lkraider/f0888da30bc352f9d167dfa4f4fc8213
cmd = [gs, '-sDEVICE=pdfwrite',
'-dNumRenderingThreads=2',
'-dPDFSETTINGS={}'.format(quality[level]),
'-dCompatibilityLevel=1.5',
'-dNOPAUSE', '-dQUIET', '-dBATCH', '-dSAFER',
# font settings
'-dSubsetFonts=true',
'-dCompressFonts=true',
'-dEmbedAllFonts=true',
# color format`
'-sProcessColorModel=DeviceRGB',
'-sColorConversionStrategy=RGB',
'-sColorConversionStrategyForImages=RGB',
'-dConvertCMYKImagesToRGB=true',
# image resample
'-dDetectDuplicateImages=true',
'-dColorImageDownsampleType=/Bicubic',
'-dColorImageResolution=300',
'-dGrayImageDownsampleType=/Bicubic',
'-dGrayImageResolution=300',
'-dMonoImageDownsampleType=/Subsample',
'-dMonoImageResolution=300',
'-dDownsampleColorImages=true',
# preset overrides
'-dDoThumbnails=false',
'-dCreateJobTicket=false',
'-dPreserveEPSInfo=false',
'-dPreserveOPIComments=false',
'-dPreserveOverprintSettings=false',
'-dUCRandBGInfo=/Remove',
'-sOutputFile={}'.format(output_file_path),
input_file_path]
elif method == 2:
cmd = [gs, '-sDEVICE=pdfwrite',
'-dNumRenderingThreads=2',
'-dPDFSETTINGS={}'.format(quality[level]),
'-dCompatibilityLevel=1.4',
'-dNOPAUSE', '-dQUIET', '-dBATCH', '-dSAFER',
'-dDetectDuplicateImages=true',
'-sOutputFile={}'.format(output_file_path),
input_file_path]
try:
# execute
subprocess.call(cmd, stderr=sys.stdout)
except:
# print ghostscript command for debug
print(" ".join(cmd))
if not os.path.exists(output_file_path):
raise Exception(f"Ghostscript failed to create {output_file_path}, time to debug...\n",
" ".join(cmd))
initial_size = round(os.path.getsize(input_file_path) / (1024 * 1024), 2)
final_size = round(os.path.getsize(output_file_path) / (1024 * 1024), 2)
ratio = round(100 - ((final_size / initial_size) * 100), 1)
print(f"Initial file size is {initial_size}MB",
f"; Final file size is {final_size}MB",
f"; Compression Ratio = {ratio}%\n")
if final_size > initial_size and method == 1:
print('-' * 100)
print('Compression Failed\nTrying another ghostscript compression method...')
print('-' * 100)
info = compress(input_file_path, output_file_path, 4, 2)
initial_size = info[0]
final_size = info[1]
ratio = info[2]
return [initial_size, final_size, ratio]
def get_ghostscript_path():
gs_names = ['gs', 'gswin32', 'gswin64']
for name in gs_names:
if shutil.which(name):
return shutil.which(name)
raise FileNotFoundError(f'No GhostScript executable was found on path ({"/".join(gs_names)})')
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('input', help='Relative or absolute path of the input PDF file')
parser.add_argument('-o', '--out', help='Relative or absolute path of the output PDF file')
parser.add_argument('-c', '--compress', type=int, help='Compression level from 0 to 4')
parser.add_argument('-b', '--backup', action='store_true', help="Backup the old PDF file")
parser.add_argument('--open', action='store_true', default=False,
help='Open PDF after compression')
args = parser.parse_args()
# In case no compression level is specified, default is 2 '/ printer'
if not args.compress:
args.compress = 2
# In case no output file is specified, store in temp file
if not args.out:
args.out = 'temp.pdf'
# Run
compress(args.input, args.out, power=args.compress)
# In case no output file is specified, erase original file
if args.out == 'temp.pdf':
if args.backup:
shutil.copyfile(args.input, args.input.replace(".pdf", "_BACKUP.pdf"))
shutil.copyfile(args.out, args.input)
os.remove(args.out)
# In case we want to open the file after compression
if args.open:
if args.out == 'temp.pdf' and args.backup:
subprocess.call(['open', args.input])
else:
subprocess.call(['open', args.out])
if __name__ == '__main__':
main()
|
from recyclus import Client
import time
def load(job):
print('saving cyclus.sqlite...')
client.save('cyclus.sqlite', job.jobid)
def wait_for_completion(job):
while True:
time.sleep(2)
resp = job.status()
if resp['status'] != 'ok':
print(f'Error:', resp['message'])
return
info = resp['info']
print(f"\tStatus: {info["status"]}")
if info['status'] in ['done', 'error', 'failed', 'unknown job']:
if info['status'] == 'done':
load(job)
# job.delete()
print('done')
return
client = Client()
job = client.run(scenario='./scenario.xml', project='demo')
print('job submitted:', job.jobid)
wait_for_completion(job)
print('files:',job.files())
print('list:')
job.list()
| from recyclus import Client
import time
def load(job):
print('saving cyclus.sqlite...')
client.save('cyclus.sqlite', job.jobid)
def wait_for_completion(job):
while True:
time.sleep(2)
resp = job.status()
if resp['status'] != 'ok':
print(f'Error:', resp['message'])
return
info = resp['info']
print(f"\tStatus: {info['status']}")
if info['status'] in ['done', 'error', 'failed', 'unknown job']:
if info['status'] == 'done':
load(job)
# job.delete()
print('done')
return
client = Client()
job = client.run(scenario='./scenario.xml', project='demo')
print('job submitted:', job.jobid)
wait_for_completion(job)
print('files:',job.files())
print('list:')
job.list()
|
import importlib
import os
from datasets.hdf5 import get_test_loaders
from unet3d import utils
from unet3d.config import load_config
from unet3d.model import get_model
logger = utils.get_logger('UNet3DPredictor')
def _get_predictor(model, loader, output_file, config):
predictor_config = config.get('predictor', {})
class_name = predictor_config.get('name', 'StandardPredictor')
m = importlib.import_module('unet3d.predictor')
predictor_class = getattr(m, class_name)
# model: UNet3D, loader: test_loader, output_file: data.h5, config: config.yaml
return predictor_class(model, loader, output_file, config, **predictor_config)
def main():
# Load configuration
config = load_config()
# Create the model
model = get_model(config)
# Load model state
model_path = config['model_path']
logger.info(f'Loading model from {model_path}...')
utils.load_checkpoint(model_path, model)
logger.info(f"Sending the model to '{config["device"]}'")
model = model.to(config['device'])
logger.info('Loading HDF5 datasets...')
test_loader = get_test_loaders(config)['test']
for i, data_pair in enumerate(test_loader):
output_file = 'predict_' + str(i) + '.h5'
predictor = _get_predictor(model, data_pair, output_file, config)
predictor.predict()
if __name__ == '__main__':
main()
| import importlib
import os
from datasets.hdf5 import get_test_loaders
from unet3d import utils
from unet3d.config import load_config
from unet3d.model import get_model
logger = utils.get_logger('UNet3DPredictor')
def _get_predictor(model, loader, output_file, config):
predictor_config = config.get('predictor', {})
class_name = predictor_config.get('name', 'StandardPredictor')
m = importlib.import_module('unet3d.predictor')
predictor_class = getattr(m, class_name)
# model: UNet3D, loader: test_loader, output_file: data.h5, config: config.yaml
return predictor_class(model, loader, output_file, config, **predictor_config)
def main():
# Load configuration
config = load_config()
# Create the model
model = get_model(config)
# Load model state
model_path = config['model_path']
logger.info(f'Loading model from {model_path}...')
utils.load_checkpoint(model_path, model)
logger.info(f"Sending the model to '{config['device']}'")
model = model.to(config['device'])
logger.info('Loading HDF5 datasets...')
test_loader = get_test_loaders(config)['test']
for i, data_pair in enumerate(test_loader):
output_file = 'predict_' + str(i) + '.h5'
predictor = _get_predictor(model, data_pair, output_file, config)
predictor.predict()
if __name__ == '__main__':
main()
|
"""fix_parser.py - parse V1.0 fixprotocol sbe xml files described
by xsd https://github.com/FIXTradingCommunity/
fix-simple-binary-encoding/blob/master/v1-0-STANDARD/resources/sbe.xsd
"""
import xml.etree.ElementTree as etree
from pysbe.schema.constants import (
SBE_TYPES_TYPE,
STRING_ENUM_MAP,
VALID_TYPE_PRIMITIVE_TYPE,
TYPE_PRIMITIVE_TYPE_MAP,
PRESENCE_MAP,
QUALIFIED_NAME_RE,
SYMBOLIC_NAME_RE,
)
from pysbe.schema.builder import createMessageSchema
from pysbe.schema.types import (
createType,
createComposite,
createEnum,
createValidValue,
TypeCollection,
createRef,
createSet,
createChoice,
createMessage,
createField,
FieldCollection,
createGroup,
)
from pysbe.schema.exceptions import UnknownReference
SBE_NS = "http://fixprotocol.io/2016/sbe"
SEMANTIC_ATTRIBUTES = {
"semanticType": {"type": str, "use": "optional"},
"description": {"type": str, "use": "optional"},
}
VERSION_ATTRIBUTES = {
"sinceVersion": {"type": int, "default": 0, "minimumValue": 0, "use": "optional"},
# deprecated is itself deprecated in RC4
"deprecated": {"type": int, "minimumValue": 0, "use": "optional"},
}
ALIGNMENT_ATTRIBUTES = {"offset": {"type": int, "minimumValue": 0, "use": "optional"}}
PRESENCE_ATTRIBUTES = {
"presence": {"type": str, "default": "required", "map": PRESENCE_MAP},
"valueRef": {"type": str, "use": "optional", "pattern": QUALIFIED_NAME_RE},
}
TYPE_ATTRIBUTES = {
"name": {"type": str, "pattern": SYMBOLIC_NAME_RE},
"primitiveType": {"type": str, "map": TYPE_PRIMITIVE_TYPE_MAP},
"nullValue": {"type": str, "use": "optional"},
"minValue": {"type": str, "use": "optional"},
"maxValue": {"type": str, "use": "optional"},
"characterEncoding": {"type": str, "use": "optional"},
"length": {"type": int, "minimumValue": 0, "use": "optional", "default": 1},
}
ENUM_ATTRIBUTES = {"encodingType": {"type": str, "pattern": SYMBOLIC_NAME_RE}}
REF_ATTRIBUTES = {"type": {"type": str}}
MESSAGE_ATTRIBUTES = {
"blockLength": {"type": int, "use": "optional"},
"message_id": {"type": int, "attribute_name": "id"},
}
FIELD_ATTRIBUTES = {
"field_id": {"type": int, "attribute_name": "id"},
"field_type": {"type": str, "pattern": SYMBOLIC_NAME_RE, "attribute_name": "type"},
}
GROUP_ATTRIBUTES = {
"group_id": {"type": int, "attribute_name": "id"},
"dimensionType": {"type": str, "pattern": SYMBOLIC_NAME_RE, "use": "optional"},
}
ALL_ATTRIBUTES_MAP = {
**SEMANTIC_ATTRIBUTES,
**VERSION_ATTRIBUTES,
**ALIGNMENT_ATTRIBUTES,
**PRESENCE_ATTRIBUTES,
**TYPE_ATTRIBUTES,
**ENUM_ATTRIBUTES,
**REF_ATTRIBUTES,
**MESSAGE_ATTRIBUTES,
**FIELD_ATTRIBUTES,
**GROUP_ATTRIBUTES,
}
TYPE_ATTRIBUTES_LIST = list(SEMANTIC_ATTRIBUTES) + list(VERSION_ATTRIBUTES) + list(
ALIGNMENT_ATTRIBUTES
) + list(
PRESENCE_ATTRIBUTES
) + list(
TYPE_ATTRIBUTES
)
COMPOSITE_ATTRIBUTES_LIST = ["name"] + list(SEMANTIC_ATTRIBUTES) + list(
ALIGNMENT_ATTRIBUTES
) + list(
VERSION_ATTRIBUTES
)
ENUM_ATTRIBUTES_LIST = ["name"] + list(ENUM_ATTRIBUTES) + list(
ALIGNMENT_ATTRIBUTES
) + list(
SEMANTIC_ATTRIBUTES
) + list(
VERSION_ATTRIBUTES
)
ENUM_VALID_VALUES_ATTRIBUTES_LIST = (
"name", "description", "sinceVersion", "deprecated"
)
REF_ATTRIBUTES_LIST = ("name", "type", "offset", "sinceVersion", "deprecated")
SET_ATTRIBUTES_LIST = (
"name", "description", "encodingType", "sinceVersion", "deprecated", "offset"
)
SET_CHOICE_ATTRIBUTES_LIST = ("name", "description", "sinceVersion", "deprecated")
VALID_COMPOSITE_CHILD_ELEMENTS = ("type", "enum", "set", "composite", "ref")
MESSAGE_ATTRIBUTES_LIST = (
"name",
"message_id",
"description",
"blockLength",
"semanticType",
"sinceVersion",
"deprecated",
)
FIELD_ATTRIBUTES_LIST = (
"name",
"field_id",
"field_type",
"description",
"offset",
"presence",
"valueRef",
"sinceVersion",
"deprecated",
)
GROUP_ATTRIBUTES_LIST = (
"name",
"group_id",
"description",
"blockLength",
"semanticType",
"sinceVersion",
"deprecated",
"dimensionType",
)
MISSING = object()
class BaseParser:
"""contains shared functionality"""
NS = {"sbe": SBE_NS}
def parse_common_attributes(self, element, attributes):
"""parse and return dict of common attributes"""
result_attributes = {}
for attribute in attributes:
attrib_info = ALL_ATTRIBUTES_MAP[attribute]
if attrib_info.get("default", MISSING) is not MISSING:
default_value = attrib_info["default"]
else:
default_value = MISSING
attribute_name = attrib_info.get("attribute_name", attribute)
value = element.attrib.get(attribute_name, default_value)
if value is MISSING or value == "":
if attrib_info.get("use") == "optional":
continue
else:
raise ValueError(
f"element {element.tag} missing required "
f"attribute {attribute_name}"
)
if attrib_info.get("type"):
try:
value = attrib_info["type"](value)
except ValueError as exc:
raise ValueError(
f"element {element.tag} invalid value "
f"{repr(value)} for attribute {attribute_name}"
) from exc
if attrib_info.get("minimumValue"):
if value < attrib_info["minimumValue"]:
raise ValueError(
f"element {element.tag} invalid value {repr(value)}"
f" for attribute {attribute_name},"
"less than allowed minimum "
f"{repr(attrib_info["minimumValue"])}"
)
if attrib_info.get("pattern"):
if not attrib_info["pattern"].match(value):
raise ValueError(
f"element {element.tag} invalid value {repr(value)} "
f"for attribute {attribute_name},"
"does not match expected pattern "
f"{repr(attrib_info["pattern"])}"
)
if attrib_info.get("map"):
try:
value = attrib_info["map"][value]
except (KeyError, IndexError) as exc:
raise ValueError(
f"element {element.tag} invalid value {repr(value)} "
f"for attribute {attribute_name}"
f", must be one of {repr(attrib_info["map"].keys())}"
) from exc
if attrib_info.get("rename"):
attribute = attrib_info["rename"]
result_attributes[attribute] = value
return result_attributes
class SBESpecParser(BaseParser):
"""Parser for VFIX"""
def __init__(self):
pass
def parseFile(self, file_or_object):
"""parse a file"""
root = etree.parse(file_or_object)
element_name = "{%s}messageSchema" % SBE_NS
# for some reason root.find('sbe:messageSchema') returns None
# work around that
messageSchema_element = root.getroot()
if messageSchema_element.tag != element_name:
raise ValueError(
f"root element is not sbe:messageSchema,"
" found {repr(messageSchema_element)} instead"
)
return self.processSchema(messageSchema_element)
def processSchema(self, messageSchema_element):
"""process xml elements beginning with root messageSchema_element"""
attrib = messageSchema_element.attrib
version = parse_version(attrib.get("version"))
byteOrder = parse_byteOrder(attrib.get("byteOrder") or "littleEndian")
package = parse_optionalString(attrib.get("package"))
semanticVersion = parse_optionalString(attrib.get("semanticVersion"))
description = parse_optionalString(attrib.get("description"))
headerType = parse_optionalString(attrib.get("headerType") or "messageHeader")
messageSchema = createMessageSchema(
version=version,
byteOrder=byteOrder,
package=package,
semanticVersion=semanticVersion,
description=description,
headerType=headerType,
)
types_elements = messageSchema_element.findall("types")
types_parser = TypesParser()
for element in types_elements:
types_parser.parse_types(messageSchema, element)
message_elements = messageSchema_element.findall(
"sbe:message", namespaces=self.NS
)
message_parser = MessageParser()
for element in message_elements:
message_parser.parse_message(messageSchema, element)
return messageSchema
class TypesParser(BaseParser):
"""parse type definitions"""
# which child elements may appear in types
VALID_TYPES_ELEMENTS = ("type", "composite", "enum", "set")
def parse_types(self, messageSchema, element):
"""parse type, can be repeated"""
for child_element in element:
if child_element.tag not in self.VALID_TYPES_ELEMENTS:
raise ValueError(
f"invalid types child element {repr(child_element.tag)}"
)
parser = getattr(self, f"parse_types_{child_element.tag}", None)
if not parser:
raise RuntimeError(
f"unsupported types parser {repr(child_element.tag)}"
)
parser(messageSchema, child_element)
def parse_types_type(self, parent: TypeCollection, element):
"""parse types/type"""
attributes = self.parse_common_attributes(
element, attributes=TYPE_ATTRIBUTES_LIST
)
sbe_type = createType(**attributes)
parent.addType(sbe_type)
def parse_types_ref(self, parent: TypeCollection, element):
"""parse composite / ref"""
attributes = self.parse_common_attributes(
element, attributes=REF_ATTRIBUTES_LIST
)
sbe_ref = createRef(**attributes)
reference_type = parent.lookupName(sbe_ref.type)
if not reference_type:
raise UnknownReference(
f"composite {parent.name} ref {sbe_ref.name}"
f" references unknown encodingType {sbe_ref.type}"
)
parent.addType(sbe_ref)
def parse_types_composite(self, parent: TypeCollection, element):
"""parse types/composite"""
attributes = self.parse_common_attributes(
element, attributes=COMPOSITE_ATTRIBUTES_LIST
)
sbe_composite = createComposite(**attributes)
parent.addType(sbe_composite)
# now iterate over composite children
for child_element in element:
tag = child_element.tag
if tag not in VALID_COMPOSITE_CHILD_ELEMENTS:
raise ValueError(
f"invalid child element {repr(tag)} in "
f"composite element {repr(sbe_composite.name)}"
)
parser = getattr(self, f"parse_types_{tag}", None)
if not parser:
raise RuntimeError(
f"unsupported types parser {repr(child_element.tag)}"
)
parser(sbe_composite, child_element)
def parse_types_set(self, parent: TypeCollection, element):
"""parse types/set"""
attributes = self.parse_common_attributes(
element, attributes=SET_ATTRIBUTES_LIST
)
sbe_set = createSet(**attributes)
parent.addType(sbe_set)
for child_element in element.findall("choice"):
choice = self.parse_set_choice(sbe_set=sbe_set, element=child_element)
sbe_set.addChoice(choice)
def parse_set_choice(self, sbe_set, element):
"""parse and return an enum validvalue"""
attributes = self.parse_common_attributes(
element, attributes=SET_CHOICE_ATTRIBUTES_LIST
)
value = element.text
try:
value = int(element.text)
except ValueError as exc:
raise ValueError(
f"invalid value for set {sbe_set.name} choice "
f"{attributes.get("name")}"
) from exc
choice = createChoice(value=value, **attributes)
return choice
def parse_types_enum(self, parent: TypeCollection, element):
"""parse types/enum"""
attributes = self.parse_common_attributes(
element, attributes=ENUM_ATTRIBUTES_LIST
)
sbe_enum = createEnum(**attributes)
parent.addType(sbe_enum)
for child_element in element.findall("validValue"):
valid_value = self.parse_enum_valid_value(
sbe_enum=sbe_enum, element=child_element
)
sbe_enum.addValidValue(valid_value)
def parse_enum_valid_value(self, sbe_enum, element):
"""parse and return an enum validvalue"""
attributes = self.parse_common_attributes(
element, attributes=ENUM_VALID_VALUES_ATTRIBUTES_LIST
)
value = element.text
enum_valid_value = createValidValue(value=value, **attributes)
return enum_valid_value
class MessageParser(BaseParser):
"""parse message definitions"""
# which child elements may appear in message
VALID_MESSAGE_TYPES = ("field", "group", "data")
def parse_message(self, messageSchema, element):
"""parse message, can be repeated"""
attributes = self.parse_common_attributes(
element, attributes=MESSAGE_ATTRIBUTES_LIST
)
message = createMessage(**attributes)
messageSchema.addMessage(message)
self.parse_field_children(messageSchema, message, element)
def parse_field_children(self, messageSchema, parent: FieldCollection, element):
"""parse child elements that fit in a fieldCollection"""
for child_element in element:
if child_element.tag not in self.VALID_MESSAGE_TYPES:
raise ValueError(
f"invalid message/group child element {repr(child_element.tag)}"
)
parser = getattr(self, f"parse_message_{child_element.tag}", None)
if not parser:
raise RuntimeError(
f"unsupported message parser {repr(child_element.tag)}"
)
parser(messageSchema, parent, child_element)
def parse_message_field(
self, messageSchema, parent: FieldCollection, element
) -> None:
"""parse field Type"""
attributes = self.parse_common_attributes(
element, attributes=FIELD_ATTRIBUTES_LIST
)
field = createField(**attributes)
field.validate(messageSchema)
parent.addField(field)
def parse_message_group(
self, messageSchema, parent: FieldCollection, element
) -> None:
"""parse field Type"""
attributes = self.parse_common_attributes(
element, attributes=GROUP_ATTRIBUTES_LIST
)
group = createGroup(**attributes)
group.validate(messageSchema)
parent.addField(group)
self.parse_field_children(messageSchema, group, element)
def parse_byteOrder(byteOrder):
"""convert byteOrder to enum"""
if byteOrder is None or byteOrder == "":
return None
value = STRING_ENUM_MAP.get(byteOrder)
if value is None:
raise ValueError(
f"invalid byteOrder {repr(value)},"
"expected one of {SBE_STRING_ENUM_MAP.keys()}"
)
return value
def parse_version(version):
"""convert version to int"""
if version is None:
raise ValueError("sbe:messageSchema/@version is required")
return int(version)
def parse_optionalString(value):
"""parse an optional string"""
if not value:
return None
return value
| """fix_parser.py - parse V1.0 fixprotocol sbe xml files described
by xsd https://github.com/FIXTradingCommunity/
fix-simple-binary-encoding/blob/master/v1-0-STANDARD/resources/sbe.xsd
"""
import xml.etree.ElementTree as etree
from pysbe.schema.constants import (
SBE_TYPES_TYPE,
STRING_ENUM_MAP,
VALID_TYPE_PRIMITIVE_TYPE,
TYPE_PRIMITIVE_TYPE_MAP,
PRESENCE_MAP,
QUALIFIED_NAME_RE,
SYMBOLIC_NAME_RE,
)
from pysbe.schema.builder import createMessageSchema
from pysbe.schema.types import (
createType,
createComposite,
createEnum,
createValidValue,
TypeCollection,
createRef,
createSet,
createChoice,
createMessage,
createField,
FieldCollection,
createGroup,
)
from pysbe.schema.exceptions import UnknownReference
SBE_NS = "http://fixprotocol.io/2016/sbe"
SEMANTIC_ATTRIBUTES = {
"semanticType": {"type": str, "use": "optional"},
"description": {"type": str, "use": "optional"},
}
VERSION_ATTRIBUTES = {
"sinceVersion": {"type": int, "default": 0, "minimumValue": 0, "use": "optional"},
# deprecated is itself deprecated in RC4
"deprecated": {"type": int, "minimumValue": 0, "use": "optional"},
}
ALIGNMENT_ATTRIBUTES = {"offset": {"type": int, "minimumValue": 0, "use": "optional"}}
PRESENCE_ATTRIBUTES = {
"presence": {"type": str, "default": "required", "map": PRESENCE_MAP},
"valueRef": {"type": str, "use": "optional", "pattern": QUALIFIED_NAME_RE},
}
TYPE_ATTRIBUTES = {
"name": {"type": str, "pattern": SYMBOLIC_NAME_RE},
"primitiveType": {"type": str, "map": TYPE_PRIMITIVE_TYPE_MAP},
"nullValue": {"type": str, "use": "optional"},
"minValue": {"type": str, "use": "optional"},
"maxValue": {"type": str, "use": "optional"},
"characterEncoding": {"type": str, "use": "optional"},
"length": {"type": int, "minimumValue": 0, "use": "optional", "default": 1},
}
ENUM_ATTRIBUTES = {"encodingType": {"type": str, "pattern": SYMBOLIC_NAME_RE}}
REF_ATTRIBUTES = {"type": {"type": str}}
MESSAGE_ATTRIBUTES = {
"blockLength": {"type": int, "use": "optional"},
"message_id": {"type": int, "attribute_name": "id"},
}
FIELD_ATTRIBUTES = {
"field_id": {"type": int, "attribute_name": "id"},
"field_type": {"type": str, "pattern": SYMBOLIC_NAME_RE, "attribute_name": "type"},
}
GROUP_ATTRIBUTES = {
"group_id": {"type": int, "attribute_name": "id"},
"dimensionType": {"type": str, "pattern": SYMBOLIC_NAME_RE, "use": "optional"},
}
ALL_ATTRIBUTES_MAP = {
**SEMANTIC_ATTRIBUTES,
**VERSION_ATTRIBUTES,
**ALIGNMENT_ATTRIBUTES,
**PRESENCE_ATTRIBUTES,
**TYPE_ATTRIBUTES,
**ENUM_ATTRIBUTES,
**REF_ATTRIBUTES,
**MESSAGE_ATTRIBUTES,
**FIELD_ATTRIBUTES,
**GROUP_ATTRIBUTES,
}
TYPE_ATTRIBUTES_LIST = list(SEMANTIC_ATTRIBUTES) + list(VERSION_ATTRIBUTES) + list(
ALIGNMENT_ATTRIBUTES
) + list(
PRESENCE_ATTRIBUTES
) + list(
TYPE_ATTRIBUTES
)
COMPOSITE_ATTRIBUTES_LIST = ["name"] + list(SEMANTIC_ATTRIBUTES) + list(
ALIGNMENT_ATTRIBUTES
) + list(
VERSION_ATTRIBUTES
)
ENUM_ATTRIBUTES_LIST = ["name"] + list(ENUM_ATTRIBUTES) + list(
ALIGNMENT_ATTRIBUTES
) + list(
SEMANTIC_ATTRIBUTES
) + list(
VERSION_ATTRIBUTES
)
ENUM_VALID_VALUES_ATTRIBUTES_LIST = (
"name", "description", "sinceVersion", "deprecated"
)
REF_ATTRIBUTES_LIST = ("name", "type", "offset", "sinceVersion", "deprecated")
SET_ATTRIBUTES_LIST = (
"name", "description", "encodingType", "sinceVersion", "deprecated", "offset"
)
SET_CHOICE_ATTRIBUTES_LIST = ("name", "description", "sinceVersion", "deprecated")
VALID_COMPOSITE_CHILD_ELEMENTS = ("type", "enum", "set", "composite", "ref")
MESSAGE_ATTRIBUTES_LIST = (
"name",
"message_id",
"description",
"blockLength",
"semanticType",
"sinceVersion",
"deprecated",
)
FIELD_ATTRIBUTES_LIST = (
"name",
"field_id",
"field_type",
"description",
"offset",
"presence",
"valueRef",
"sinceVersion",
"deprecated",
)
GROUP_ATTRIBUTES_LIST = (
"name",
"group_id",
"description",
"blockLength",
"semanticType",
"sinceVersion",
"deprecated",
"dimensionType",
)
MISSING = object()
class BaseParser:
"""contains shared functionality"""
NS = {"sbe": SBE_NS}
def parse_common_attributes(self, element, attributes):
"""parse and return dict of common attributes"""
result_attributes = {}
for attribute in attributes:
attrib_info = ALL_ATTRIBUTES_MAP[attribute]
if attrib_info.get("default", MISSING) is not MISSING:
default_value = attrib_info["default"]
else:
default_value = MISSING
attribute_name = attrib_info.get("attribute_name", attribute)
value = element.attrib.get(attribute_name, default_value)
if value is MISSING or value == "":
if attrib_info.get("use") == "optional":
continue
else:
raise ValueError(
f"element {element.tag} missing required "
f"attribute {attribute_name}"
)
if attrib_info.get("type"):
try:
value = attrib_info["type"](value)
except ValueError as exc:
raise ValueError(
f"element {element.tag} invalid value "
f"{repr(value)} for attribute {attribute_name}"
) from exc
if attrib_info.get("minimumValue"):
if value < attrib_info["minimumValue"]:
raise ValueError(
f"element {element.tag} invalid value {repr(value)}"
f" for attribute {attribute_name},"
"less than allowed minimum "
f"{repr(attrib_info['minimumValue'])}"
)
if attrib_info.get("pattern"):
if not attrib_info["pattern"].match(value):
raise ValueError(
f"element {element.tag} invalid value {repr(value)} "
f"for attribute {attribute_name},"
"does not match expected pattern "
f"{repr(attrib_info['pattern'])}"
)
if attrib_info.get("map"):
try:
value = attrib_info["map"][value]
except (KeyError, IndexError) as exc:
raise ValueError(
f"element {element.tag} invalid value {repr(value)} "
f"for attribute {attribute_name}"
f", must be one of {repr(attrib_info['map'].keys())}"
) from exc
if attrib_info.get("rename"):
attribute = attrib_info["rename"]
result_attributes[attribute] = value
return result_attributes
class SBESpecParser(BaseParser):
"""Parser for VFIX"""
def __init__(self):
pass
def parseFile(self, file_or_object):
"""parse a file"""
root = etree.parse(file_or_object)
element_name = "{%s}messageSchema" % SBE_NS
# for some reason root.find('sbe:messageSchema') returns None
# work around that
messageSchema_element = root.getroot()
if messageSchema_element.tag != element_name:
raise ValueError(
f"root element is not sbe:messageSchema,"
" found {repr(messageSchema_element)} instead"
)
return self.processSchema(messageSchema_element)
def processSchema(self, messageSchema_element):
"""process xml elements beginning with root messageSchema_element"""
attrib = messageSchema_element.attrib
version = parse_version(attrib.get("version"))
byteOrder = parse_byteOrder(attrib.get("byteOrder") or "littleEndian")
package = parse_optionalString(attrib.get("package"))
semanticVersion = parse_optionalString(attrib.get("semanticVersion"))
description = parse_optionalString(attrib.get("description"))
headerType = parse_optionalString(attrib.get("headerType") or "messageHeader")
messageSchema = createMessageSchema(
version=version,
byteOrder=byteOrder,
package=package,
semanticVersion=semanticVersion,
description=description,
headerType=headerType,
)
types_elements = messageSchema_element.findall("types")
types_parser = TypesParser()
for element in types_elements:
types_parser.parse_types(messageSchema, element)
message_elements = messageSchema_element.findall(
"sbe:message", namespaces=self.NS
)
message_parser = MessageParser()
for element in message_elements:
message_parser.parse_message(messageSchema, element)
return messageSchema
class TypesParser(BaseParser):
"""parse type definitions"""
# which child elements may appear in types
VALID_TYPES_ELEMENTS = ("type", "composite", "enum", "set")
def parse_types(self, messageSchema, element):
"""parse type, can be repeated"""
for child_element in element:
if child_element.tag not in self.VALID_TYPES_ELEMENTS:
raise ValueError(
f"invalid types child element {repr(child_element.tag)}"
)
parser = getattr(self, f"parse_types_{child_element.tag}", None)
if not parser:
raise RuntimeError(
f"unsupported types parser {repr(child_element.tag)}"
)
parser(messageSchema, child_element)
def parse_types_type(self, parent: TypeCollection, element):
"""parse types/type"""
attributes = self.parse_common_attributes(
element, attributes=TYPE_ATTRIBUTES_LIST
)
sbe_type = createType(**attributes)
parent.addType(sbe_type)
def parse_types_ref(self, parent: TypeCollection, element):
"""parse composite / ref"""
attributes = self.parse_common_attributes(
element, attributes=REF_ATTRIBUTES_LIST
)
sbe_ref = createRef(**attributes)
reference_type = parent.lookupName(sbe_ref.type)
if not reference_type:
raise UnknownReference(
f"composite {parent.name} ref {sbe_ref.name}"
f" references unknown encodingType {sbe_ref.type}"
)
parent.addType(sbe_ref)
def parse_types_composite(self, parent: TypeCollection, element):
"""parse types/composite"""
attributes = self.parse_common_attributes(
element, attributes=COMPOSITE_ATTRIBUTES_LIST
)
sbe_composite = createComposite(**attributes)
parent.addType(sbe_composite)
# now iterate over composite children
for child_element in element:
tag = child_element.tag
if tag not in VALID_COMPOSITE_CHILD_ELEMENTS:
raise ValueError(
f"invalid child element {repr(tag)} in "
f"composite element {repr(sbe_composite.name)}"
)
parser = getattr(self, f"parse_types_{tag}", None)
if not parser:
raise RuntimeError(
f"unsupported types parser {repr(child_element.tag)}"
)
parser(sbe_composite, child_element)
def parse_types_set(self, parent: TypeCollection, element):
"""parse types/set"""
attributes = self.parse_common_attributes(
element, attributes=SET_ATTRIBUTES_LIST
)
sbe_set = createSet(**attributes)
parent.addType(sbe_set)
for child_element in element.findall("choice"):
choice = self.parse_set_choice(sbe_set=sbe_set, element=child_element)
sbe_set.addChoice(choice)
def parse_set_choice(self, sbe_set, element):
"""parse and return an enum validvalue"""
attributes = self.parse_common_attributes(
element, attributes=SET_CHOICE_ATTRIBUTES_LIST
)
value = element.text
try:
value = int(element.text)
except ValueError as exc:
raise ValueError(
f"invalid value for set {sbe_set.name} choice "
f"{attributes.get('name')}"
) from exc
choice = createChoice(value=value, **attributes)
return choice
def parse_types_enum(self, parent: TypeCollection, element):
"""parse types/enum"""
attributes = self.parse_common_attributes(
element, attributes=ENUM_ATTRIBUTES_LIST
)
sbe_enum = createEnum(**attributes)
parent.addType(sbe_enum)
for child_element in element.findall("validValue"):
valid_value = self.parse_enum_valid_value(
sbe_enum=sbe_enum, element=child_element
)
sbe_enum.addValidValue(valid_value)
def parse_enum_valid_value(self, sbe_enum, element):
"""parse and return an enum validvalue"""
attributes = self.parse_common_attributes(
element, attributes=ENUM_VALID_VALUES_ATTRIBUTES_LIST
)
value = element.text
enum_valid_value = createValidValue(value=value, **attributes)
return enum_valid_value
class MessageParser(BaseParser):
"""parse message definitions"""
# which child elements may appear in message
VALID_MESSAGE_TYPES = ("field", "group", "data")
def parse_message(self, messageSchema, element):
"""parse message, can be repeated"""
attributes = self.parse_common_attributes(
element, attributes=MESSAGE_ATTRIBUTES_LIST
)
message = createMessage(**attributes)
messageSchema.addMessage(message)
self.parse_field_children(messageSchema, message, element)
def parse_field_children(self, messageSchema, parent: FieldCollection, element):
"""parse child elements that fit in a fieldCollection"""
for child_element in element:
if child_element.tag not in self.VALID_MESSAGE_TYPES:
raise ValueError(
f"invalid message/group child element {repr(child_element.tag)}"
)
parser = getattr(self, f"parse_message_{child_element.tag}", None)
if not parser:
raise RuntimeError(
f"unsupported message parser {repr(child_element.tag)}"
)
parser(messageSchema, parent, child_element)
def parse_message_field(
self, messageSchema, parent: FieldCollection, element
) -> None:
"""parse field Type"""
attributes = self.parse_common_attributes(
element, attributes=FIELD_ATTRIBUTES_LIST
)
field = createField(**attributes)
field.validate(messageSchema)
parent.addField(field)
def parse_message_group(
self, messageSchema, parent: FieldCollection, element
) -> None:
"""parse field Type"""
attributes = self.parse_common_attributes(
element, attributes=GROUP_ATTRIBUTES_LIST
)
group = createGroup(**attributes)
group.validate(messageSchema)
parent.addField(group)
self.parse_field_children(messageSchema, group, element)
def parse_byteOrder(byteOrder):
"""convert byteOrder to enum"""
if byteOrder is None or byteOrder == "":
return None
value = STRING_ENUM_MAP.get(byteOrder)
if value is None:
raise ValueError(
f"invalid byteOrder {repr(value)},"
"expected one of {SBE_STRING_ENUM_MAP.keys()}"
)
return value
def parse_version(version):
"""convert version to int"""
if version is None:
raise ValueError("sbe:messageSchema/@version is required")
return int(version)
def parse_optionalString(value):
"""parse an optional string"""
if not value:
return None
return value
|
# -*- coding: utf-8 -*-
"""
Benchmark Results
Updated: 18.02.2022 (6618fa3c36b0c9f3a9d7a21bcdb00bf4fd258ee8))
------------------------------------------------------------------------------------------
| Model | Batch Size | Epochs | KNN Test Accuracy | Time | Peak GPU Usage |
------------------------------------------------------------------------------------------
| BarlowTwins | 128 | 200 | 0.835 | 193.4 Min | 2.2 GByte |
| BYOL | 128 | 200 | 0.872 | 217.0 Min | 2.3 GByte |
| DINO | 128 | 200 | 0.868 | 220.7 Min | 2.3 GByte |
| Moco | 128 | 200 | 0.838 | 229.5 Min | 2.3 GByte |
| NNCLR | 128 | 200 | 0.838 | 198.7 Min | 2.2 GByte |
| SimCLR | 128 | 200 | 0.822 | 182.7 Min | 2.2 GByte |
| SimSiam | 128 | 200 | 0.779 | 182.6 Min | 2.3 GByte |
| SwaV | 128 | 200 | 0.806 | 182.4 Min | 2.2 GByte |
------------------------------------------------------------------------------------------
| BarlowTwins | 512 | 200 | 0.827 | 160.7 Min | 7.5 GByte |
| BYOL | 512 | 200 | 0.872 | 188.5 Min | 7.7 GByte |
| DINO | 512 | 200 | 0.862 | 191.1 Min | 7.5 GByte |
| Moco (*) | 512 | 200 | 0.850 | 196.8 Min | 7.8 GByte |
| NNCLR (*) | 512 | 200 | 0.836 | 164.7 Min | 7.6 GByte |
| SimCLR | 512 | 200 | 0.828 | 158.2 Min | 7.5 GByte |
| SimSiam | 512 | 200 | 0.814 | 159.0 Min | 7.6 GByte |
| SwaV | 512 | 200 | 0.833 | 158.4 Min | 7.5 GByte |
------------------------------------------------------------------------------------------
| BarlowTwins | 512 | 800 | 0.857 | 641.5 Min | 7.5 GByte |
| BYOL | 512 | 800 | 0.911 | 754.2 Min | 7.8 GByte |
| DINO | 512 | 800 | 0.884 | 765.5 Min | 7.6 GByte |
| Moco (*) | 512 | 800 | 0.900 | 787.7 Min | 7.8 GByte |
| NNCLR (*) | 512 | 800 | 0.896 | 659.2 Min | 7.6 GByte |
| SimCLR | 512 | 800 | 0.875 | 632.5 Min | 7.5 GByte |
| SimSiam | 512 | 800 | 0.906 | 636.5 Min | 7.6 GByte |
| SwaV | 512 | 800 | 0.881 | 634.9 Min | 7.5 GByte |
------------------------------------------------------------------------------------------
(*): Increased size of memory bank from 4096 to 8192 to avoid too quickly
changing memory bank due to larger batch size.
The benchmarks were created on a single NVIDIA RTX A6000.
Note that this benchmark also supports a multi-GPU setup. If you run it on
a system with multiple GPUs make sure that you kill all the processes when
killing the application. Due to the way we setup this benchmark the distributed
processes might continue the benchmark if one of the nodes is killed.
If you know how to fix this don't hesitate to create an issue or PR :)
"""
import copy
import os
import time
import lightly
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision
from lightly.models import modules
from lightly.models.modules import heads
from lightly.models import utils
from lightly.utils import BenchmarkModule
from pytorch_lightning.loggers import TensorBoardLogger
logs_root_dir = os.path.join(os.getcwd(), 'benchmark_logs')
# set max_epochs to 800 for long run (takes around 10h on a single V100)
max_epochs = 1
num_workers = 8
knn_k = 200
knn_t = 0.1
classes = 10
# Set to True to enable Distributed Data Parallel training.
distributed = True
# Set to True to enable Synchronized Batch Norm (requires distributed=True).
# If enabled the batch norm is calculated over all gpus, otherwise the batch
# norm is only calculated from samples on the same gpu.
sync_batchnorm = False
# Set to True to gather features from all gpus before calculating
# the loss (requires distributed=True).
# If enabled then the loss on every gpu is calculated with features from all
# gpus, otherwise only features from the same gpu are used.
gather_distributed = True
# benchmark
n_runs = 1 # optional, increase to create multiple runs and report mean + std
batch_size = 512
lr_factor = batch_size / 128 # scales the learning rate linearly with batch size
# use a GPU if available
#gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0
gpus = 4 if torch.cuda.is_available() else 0
print(gpus)
if distributed:
distributed_backend = 'ddp'
# reduce batch size for distributed training
batch_size = batch_size // gpus
else:
distributed_backend = None
# limit to single gpu if not using distributed training
gpus = min(gpus, 1)
# Adapted from our MoCo Tutorial on CIFAR-10
#
# Replace the path with the location of your CIFAR-10 dataset.
# We assume we have a train folder with subfolders
# for each class and .png images inside.
#
# You can download `CIFAR-10 in folders from kaggle
# <https://www.kaggle.com/swaroopkml/cifar10-pngs-in-folders>`_.
# The dataset structure should be like this:
# cifar10/train/
# L airplane/
# L 10008_airplane.png
# L ...
# L automobile/
# L bird/
# L cat/
# L deer/
# L dog/
# L frog/
# L horse/
# L ship/
# L truck/
path_to_train = './data/cifar10/train/'
path_to_test = './data/cifar10/test/'
# Use SimCLR augmentations, additionally, disable blur for cifar10
collate_fn = lightly.data.SimCLRCollateFunction(
input_size=32,
gaussian_blur=0.,
)
# Multi crop augmentation for SwAV, additionally, disable blur for cifar10
swav_collate_fn = lightly.data.SwaVCollateFunction(
crop_sizes=[32],
crop_counts=[2], # 2 crops @ 32x32px
crop_min_scales=[0.14],
gaussian_blur=0,
)
# Multi crop augmentation for DINO, additionally, disable blur for cifar10
dino_collate_fn = lightly.data.DINOCollateFunction(
global_crop_size=32,
n_local_views=0,
gaussian_blur=(0, 0, 0),
)
# No additional augmentations for the test set
test_transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=lightly.data.collate.imagenet_normalize['mean'],
std=lightly.data.collate.imagenet_normalize['std'],
)
])
dataset_train_ssl = lightly.data.LightlyDataset(
input_dir=path_to_train
)
# we use test transformations for getting the feature for kNN on train data
dataset_train_kNN = lightly.data.LightlyDataset(
input_dir=path_to_train,
transform=test_transforms
)
dataset_test = lightly.data.LightlyDataset(
input_dir=path_to_test,
transform=test_transforms
)
def get_data_loaders(batch_size: int, model):
"""Helper method to create dataloaders for ssl, kNN train and kNN test
Args:
batch_size: Desired batch size for all dataloaders
"""
col_fn = collate_fn
if isinstance(model, SwaVModel):
col_fn = swav_collate_fn
elif isinstance(model, DINOModel):
col_fn = dino_collate_fn
dataloader_train_ssl = torch.utils.data.DataLoader(
dataset_train_ssl,
batch_size=batch_size,
shuffle=True,
collate_fn=col_fn,
drop_last=True,
num_workers=num_workers
)
dataloader_train_kNN = torch.utils.data.DataLoader(
dataset_train_kNN,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers
)
dataloader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers
)
return dataloader_train_ssl, dataloader_train_kNN, dataloader_test
class MocoModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
num_splits = 0 if sync_batchnorm else 8
resnet = lightly.models.ResNetGenerator('resnet-18', num_splits=num_splits)
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
# create a moco model based on ResNet
self.projection_head = heads.MoCoProjectionHead(512, 512, 128)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
# create our loss with the optional memory bank
self.criterion = lightly.loss.NTXentLoss(
temperature=0.1,
memory_bank_size=4096,
)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
return self.projection_head(x)
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
# update momentum
utils.update_momentum(self.backbone, self.backbone_momentum, 0.99)
utils.update_momentum(self.projection_head, self.projection_head_momentum, 0.99)
def step(x0_, x1_):
x1_, shuffle = utils.batch_shuffle(x1_, distributed=distributed)
x0_ = self.backbone(x0_).flatten(start_dim=1)
x0_ = self.projection_head(x0_)
x1_ = self.backbone_momentum(x1_).flatten(start_dim=1)
x1_ = self.projection_head_momentum(x1_)
x1_ = utils.batch_unshuffle(x1_, shuffle, distributed=distributed)
return x0_, x1_
# We use a symmetric loss (model trains faster at little compute overhead)
# https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb
loss_1 = self.criterion(*step(x0, x1))
loss_2 = self.criterion(*step(x1, x0))
loss = 0.5 * (loss_1 + loss_2)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
params = list(self.backbone.parameters()) + list(self.projection_head.parameters())
optim = torch.optim.SGD(
params,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SimCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.projection_head = heads.SimCLRProjectionHead(512, 512, 128)
self.criterion = lightly.loss.NTXentLoss()
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SimSiamModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.prediction_head = heads.SimSiamPredictionHead(2048, 512, 2048)
# use a 2-layer projection head for cifar10 as described in the paper
self.projection_head = heads.ProjectionHead([
(
512,
2048,
nn.BatchNorm1d(2048),
nn.ReLU(inplace=True)
),
(
2048,
2048,
nn.BatchNorm1d(2048),
None
)
])
self.criterion = lightly.loss.NegativeCosineSimilarity()
def forward(self, x):
f = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(f)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2, # no lr-scaling, results in better training stability
momentum=0.9,
weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class BarlowTwinsModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
# use a 2-layer projection head for cifar10 as described in the paper
self.projection_head = heads.ProjectionHead([
(
512,
2048,
nn.BatchNorm1d(2048),
nn.ReLU(inplace=True)
),
(
2048,
2048,
None,
None
)
])
self.criterion = lightly.loss.BarlowTwinsLoss(gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class BYOLModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
# create a byol model based on ResNet
self.projection_head = heads.BYOLProjectionHead(512, 1024, 256)
self.prediction_head = heads.BYOLProjectionHead(256, 1024, 256)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
self.criterion = lightly.loss.NegativeCosineSimilarity()
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
return p
def forward_momentum(self, x):
y = self.backbone_momentum(x).flatten(start_dim=1)
z = self.projection_head_momentum(y)
z = z.detach()
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.backbone_momentum, m=0.99)
utils.update_momentum(self.projection_head, self.projection_head_momentum, m=0.99)
(x0, x1), _, _ = batch
p0 = self.forward(x0)
z0 = self.forward_momentum(x0)
p1 = self.forward(x1)
z1 = self.forward_momentum(x1)
loss = 0.5 * (self.criterion(p0, z1) + self.criterion(p1, z0))
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
params = list(self.backbone.parameters()) \
+ list(self.projection_head.parameters()) \
+ list(self.prediction_head.parameters())
optim = torch.optim.SGD(
params,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SwaVModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.projection_head = heads.SwaVProjectionHead(512, 512, 128)
self.prototypes = heads.SwaVPrototypes(128, 512) # use 512 prototypes
self.criterion = lightly.loss.SwaVLoss(sinkhorn_gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
x = self.projection_head(x)
x = nn.functional.normalize(x, dim=1, p=2)
return self.prototypes(x)
def training_step(self, batch, batch_idx):
# normalize the prototypes so they are on the unit sphere
self.prototypes.normalize()
# the multi-crop dataloader returns a list of image crops where the
# first two items are the high resolution crops and the rest are low
# resolution crops
multi_crops, _, _ = batch
multi_crop_features = [self.forward(x) for x in multi_crops]
# split list of crop features into high and low resolution
high_resolution_features = multi_crop_features[:2]
low_resolution_features = multi_crop_features[2:]
# calculate the SwaV loss
loss = self.criterion(
high_resolution_features,
low_resolution_features
)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.Adam(
self.parameters(),
lr=1e-3 * lr_factor,
weight_decay=1e-6,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class NNCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.prediction_head = heads.NNCLRPredictionHead(256, 4096, 256)
# use only a 2-layer projection head for cifar10
self.projection_head = heads.ProjectionHead([
(
512,
2048,
nn.BatchNorm1d(2048),
nn.ReLU(inplace=True)
),
(
2048,
256,
nn.BatchNorm1d(256),
None
)
])
self.criterion = lightly.loss.NTXentLoss()
self.memory_bank = modules.NNMemoryBankModule(size=4096)
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
z0 = self.memory_bank(z0, update=False)
z1 = self.memory_bank(z1, update=True)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class DINOModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.head = self._build_projection_head()
self.teacher_backbone = copy.deepcopy(self.backbone)
self.teacher_head = self._build_projection_head()
utils.deactivate_requires_grad(self.teacher_backbone)
utils.deactivate_requires_grad(self.teacher_head)
self.criterion = lightly.loss.DINOLoss(output_dim=2048)
def _build_projection_head(self):
head = heads.DINOProjectionHead(512, 2048, 256, 2048, batch_norm=True)
# use only 2 layers for cifar10
head.layers = heads.ProjectionHead([
(512, 2048, nn.BatchNorm1d(2048), nn.GELU()),
(2048, 256, None, None),
]).layers
return head
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.head(y)
return z
def forward_teacher(self, x):
y = self.teacher_backbone(x).flatten(start_dim=1)
z = self.teacher_head(y)
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.teacher_backbone, m=0.99)
utils.update_momentum(self.head, self.teacher_head, m=0.99)
views, _, _ = batch
views = [view.to(self.device) for view in views]
global_views = views[:2]
teacher_out = [self.forward_teacher(view) for view in global_views]
student_out = [self.forward(view) for view in views]
loss = self.criterion(teacher_out, student_out, epoch=self.current_epoch)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
param = list(self.backbone.parameters()) \
+ list(self.head.parameters())
optim = torch.optim.SGD(
param,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
models = [
BarlowTwinsModel,
BYOLModel,
DINOModel,
MocoModel,
NNCLRModel,
SimCLRModel,
SimSiamModel,
SwaVModel,
]
bench_results = dict()
experiment_version = None
# loop through configurations and train models
for BenchmarkModel in models:
runs = []
model_name = BenchmarkModel.__name__.replace('Model', '')
for seed in range(n_runs):
pl.seed_everything(seed)
dataloader_train_ssl, dataloader_train_kNN, dataloader_test = get_data_loaders(
batch_size=batch_size,
model=BenchmarkModel,
)
benchmark_model = BenchmarkModel(dataloader_train_kNN, classes)
# Save logs to: {CWD}/benchmark_logs/cifar10/{experiment_version}/{model_name}/
# If multiple runs are specified a subdirectory for each run is created.
sub_dir = model_name if n_runs <= 1 else f'{model_name}/run{seed}'
logger = TensorBoardLogger(
save_dir=os.path.join(logs_root_dir, 'cifar10'),
name='',
sub_dir=sub_dir,
version=experiment_version,
)
if experiment_version is None:
# Save results of all models under same version directory
experiment_version = logger.version
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath=os.path.join(logger.log_dir, 'checkpoints')
)
trainer = pl.Trainer(
max_epochs=max_epochs,
gpus=gpus,
default_root_dir=logs_root_dir,
strategy=distributed_backend,
sync_batchnorm=sync_batchnorm,
logger=logger,
callbacks=[checkpoint_callback]
)
start = time.time()
trainer.fit(
benchmark_model,
train_dataloaders=dataloader_train_ssl,
val_dataloaders=dataloader_test
)
end = time.time()
run = {
'model': model_name,
'batch_size': batch_size,
'epochs': max_epochs,
'max_accuracy': benchmark_model.max_accuracy,
'runtime': end - start,
'gpu_memory_usage': torch.cuda.max_memory_allocated(),
'seed': seed,
}
runs.append(run)
print(run)
# delete model and trainer + free up cuda memory
del benchmark_model
del trainer
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
bench_results[model_name] = runs
# print results table
header = (
f"| {"Model":<13} | {"Batch Size":>10} | {"Epochs":>6} "
f"| {"KNN Test Accuracy":>18} | {"Time":>10} | {"Peak GPU Usage":>14} |"
)
print('-' * len(header))
print(header)
print('-' * len(header))
for model, results in bench_results.items():
runtime = np.array([result['runtime'] for result in results])
runtime = runtime.mean() / 60 # convert to min
accuracy = np.array([result['max_accuracy'] for result in results])
gpu_memory_usage = np.array([result['gpu_memory_usage'] for result in results])
gpu_memory_usage = gpu_memory_usage.max() / (1024**3) # convert to gbyte
if len(accuracy) > 1:
accuracy_msg = f"{accuracy.mean():>8.3f} +- {accuracy.std():>4.3f}"
else:
accuracy_msg = f"{accuracy.mean():>18.3f}"
print(
f"| {model:<13} | {batch_size:>10} | {max_epochs:>6} "
f"| {accuracy_msg} | {runtime:>6.1f} Min "
f"| {gpu_memory_usage:>8.1f} GByte |",
flush=True
)
print('-' * len(header))
| # -*- coding: utf-8 -*-
"""
Benchmark Results
Updated: 18.02.2022 (6618fa3c36b0c9f3a9d7a21bcdb00bf4fd258ee8))
------------------------------------------------------------------------------------------
| Model | Batch Size | Epochs | KNN Test Accuracy | Time | Peak GPU Usage |
------------------------------------------------------------------------------------------
| BarlowTwins | 128 | 200 | 0.835 | 193.4 Min | 2.2 GByte |
| BYOL | 128 | 200 | 0.872 | 217.0 Min | 2.3 GByte |
| DINO | 128 | 200 | 0.868 | 220.7 Min | 2.3 GByte |
| Moco | 128 | 200 | 0.838 | 229.5 Min | 2.3 GByte |
| NNCLR | 128 | 200 | 0.838 | 198.7 Min | 2.2 GByte |
| SimCLR | 128 | 200 | 0.822 | 182.7 Min | 2.2 GByte |
| SimSiam | 128 | 200 | 0.779 | 182.6 Min | 2.3 GByte |
| SwaV | 128 | 200 | 0.806 | 182.4 Min | 2.2 GByte |
------------------------------------------------------------------------------------------
| BarlowTwins | 512 | 200 | 0.827 | 160.7 Min | 7.5 GByte |
| BYOL | 512 | 200 | 0.872 | 188.5 Min | 7.7 GByte |
| DINO | 512 | 200 | 0.862 | 191.1 Min | 7.5 GByte |
| Moco (*) | 512 | 200 | 0.850 | 196.8 Min | 7.8 GByte |
| NNCLR (*) | 512 | 200 | 0.836 | 164.7 Min | 7.6 GByte |
| SimCLR | 512 | 200 | 0.828 | 158.2 Min | 7.5 GByte |
| SimSiam | 512 | 200 | 0.814 | 159.0 Min | 7.6 GByte |
| SwaV | 512 | 200 | 0.833 | 158.4 Min | 7.5 GByte |
------------------------------------------------------------------------------------------
| BarlowTwins | 512 | 800 | 0.857 | 641.5 Min | 7.5 GByte |
| BYOL | 512 | 800 | 0.911 | 754.2 Min | 7.8 GByte |
| DINO | 512 | 800 | 0.884 | 765.5 Min | 7.6 GByte |
| Moco (*) | 512 | 800 | 0.900 | 787.7 Min | 7.8 GByte |
| NNCLR (*) | 512 | 800 | 0.896 | 659.2 Min | 7.6 GByte |
| SimCLR | 512 | 800 | 0.875 | 632.5 Min | 7.5 GByte |
| SimSiam | 512 | 800 | 0.906 | 636.5 Min | 7.6 GByte |
| SwaV | 512 | 800 | 0.881 | 634.9 Min | 7.5 GByte |
------------------------------------------------------------------------------------------
(*): Increased size of memory bank from 4096 to 8192 to avoid too quickly
changing memory bank due to larger batch size.
The benchmarks were created on a single NVIDIA RTX A6000.
Note that this benchmark also supports a multi-GPU setup. If you run it on
a system with multiple GPUs make sure that you kill all the processes when
killing the application. Due to the way we setup this benchmark the distributed
processes might continue the benchmark if one of the nodes is killed.
If you know how to fix this don't hesitate to create an issue or PR :)
"""
import copy
import os
import time
import lightly
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision
from lightly.models import modules
from lightly.models.modules import heads
from lightly.models import utils
from lightly.utils import BenchmarkModule
from pytorch_lightning.loggers import TensorBoardLogger
logs_root_dir = os.path.join(os.getcwd(), 'benchmark_logs')
# set max_epochs to 800 for long run (takes around 10h on a single V100)
max_epochs = 1
num_workers = 8
knn_k = 200
knn_t = 0.1
classes = 10
# Set to True to enable Distributed Data Parallel training.
distributed = True
# Set to True to enable Synchronized Batch Norm (requires distributed=True).
# If enabled the batch norm is calculated over all gpus, otherwise the batch
# norm is only calculated from samples on the same gpu.
sync_batchnorm = False
# Set to True to gather features from all gpus before calculating
# the loss (requires distributed=True).
# If enabled then the loss on every gpu is calculated with features from all
# gpus, otherwise only features from the same gpu are used.
gather_distributed = True
# benchmark
n_runs = 1 # optional, increase to create multiple runs and report mean + std
batch_size = 512
lr_factor = batch_size / 128 # scales the learning rate linearly with batch size
# use a GPU if available
#gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0
gpus = 4 if torch.cuda.is_available() else 0
print(gpus)
if distributed:
distributed_backend = 'ddp'
# reduce batch size for distributed training
batch_size = batch_size // gpus
else:
distributed_backend = None
# limit to single gpu if not using distributed training
gpus = min(gpus, 1)
# Adapted from our MoCo Tutorial on CIFAR-10
#
# Replace the path with the location of your CIFAR-10 dataset.
# We assume we have a train folder with subfolders
# for each class and .png images inside.
#
# You can download `CIFAR-10 in folders from kaggle
# <https://www.kaggle.com/swaroopkml/cifar10-pngs-in-folders>`_.
# The dataset structure should be like this:
# cifar10/train/
# L airplane/
# L 10008_airplane.png
# L ...
# L automobile/
# L bird/
# L cat/
# L deer/
# L dog/
# L frog/
# L horse/
# L ship/
# L truck/
path_to_train = './data/cifar10/train/'
path_to_test = './data/cifar10/test/'
# Use SimCLR augmentations, additionally, disable blur for cifar10
collate_fn = lightly.data.SimCLRCollateFunction(
input_size=32,
gaussian_blur=0.,
)
# Multi crop augmentation for SwAV, additionally, disable blur for cifar10
swav_collate_fn = lightly.data.SwaVCollateFunction(
crop_sizes=[32],
crop_counts=[2], # 2 crops @ 32x32px
crop_min_scales=[0.14],
gaussian_blur=0,
)
# Multi crop augmentation for DINO, additionally, disable blur for cifar10
dino_collate_fn = lightly.data.DINOCollateFunction(
global_crop_size=32,
n_local_views=0,
gaussian_blur=(0, 0, 0),
)
# No additional augmentations for the test set
test_transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=lightly.data.collate.imagenet_normalize['mean'],
std=lightly.data.collate.imagenet_normalize['std'],
)
])
dataset_train_ssl = lightly.data.LightlyDataset(
input_dir=path_to_train
)
# we use test transformations for getting the feature for kNN on train data
dataset_train_kNN = lightly.data.LightlyDataset(
input_dir=path_to_train,
transform=test_transforms
)
dataset_test = lightly.data.LightlyDataset(
input_dir=path_to_test,
transform=test_transforms
)
def get_data_loaders(batch_size: int, model):
"""Helper method to create dataloaders for ssl, kNN train and kNN test
Args:
batch_size: Desired batch size for all dataloaders
"""
col_fn = collate_fn
if isinstance(model, SwaVModel):
col_fn = swav_collate_fn
elif isinstance(model, DINOModel):
col_fn = dino_collate_fn
dataloader_train_ssl = torch.utils.data.DataLoader(
dataset_train_ssl,
batch_size=batch_size,
shuffle=True,
collate_fn=col_fn,
drop_last=True,
num_workers=num_workers
)
dataloader_train_kNN = torch.utils.data.DataLoader(
dataset_train_kNN,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers
)
dataloader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers
)
return dataloader_train_ssl, dataloader_train_kNN, dataloader_test
class MocoModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
num_splits = 0 if sync_batchnorm else 8
resnet = lightly.models.ResNetGenerator('resnet-18', num_splits=num_splits)
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
# create a moco model based on ResNet
self.projection_head = heads.MoCoProjectionHead(512, 512, 128)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
# create our loss with the optional memory bank
self.criterion = lightly.loss.NTXentLoss(
temperature=0.1,
memory_bank_size=4096,
)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
return self.projection_head(x)
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
# update momentum
utils.update_momentum(self.backbone, self.backbone_momentum, 0.99)
utils.update_momentum(self.projection_head, self.projection_head_momentum, 0.99)
def step(x0_, x1_):
x1_, shuffle = utils.batch_shuffle(x1_, distributed=distributed)
x0_ = self.backbone(x0_).flatten(start_dim=1)
x0_ = self.projection_head(x0_)
x1_ = self.backbone_momentum(x1_).flatten(start_dim=1)
x1_ = self.projection_head_momentum(x1_)
x1_ = utils.batch_unshuffle(x1_, shuffle, distributed=distributed)
return x0_, x1_
# We use a symmetric loss (model trains faster at little compute overhead)
# https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb
loss_1 = self.criterion(*step(x0, x1))
loss_2 = self.criterion(*step(x1, x0))
loss = 0.5 * (loss_1 + loss_2)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
params = list(self.backbone.parameters()) + list(self.projection_head.parameters())
optim = torch.optim.SGD(
params,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SimCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.projection_head = heads.SimCLRProjectionHead(512, 512, 128)
self.criterion = lightly.loss.NTXentLoss()
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SimSiamModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.prediction_head = heads.SimSiamPredictionHead(2048, 512, 2048)
# use a 2-layer projection head for cifar10 as described in the paper
self.projection_head = heads.ProjectionHead([
(
512,
2048,
nn.BatchNorm1d(2048),
nn.ReLU(inplace=True)
),
(
2048,
2048,
nn.BatchNorm1d(2048),
None
)
])
self.criterion = lightly.loss.NegativeCosineSimilarity()
def forward(self, x):
f = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(f)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2, # no lr-scaling, results in better training stability
momentum=0.9,
weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class BarlowTwinsModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
# use a 2-layer projection head for cifar10 as described in the paper
self.projection_head = heads.ProjectionHead([
(
512,
2048,
nn.BatchNorm1d(2048),
nn.ReLU(inplace=True)
),
(
2048,
2048,
None,
None
)
])
self.criterion = lightly.loss.BarlowTwinsLoss(gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class BYOLModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
# create a byol model based on ResNet
self.projection_head = heads.BYOLProjectionHead(512, 1024, 256)
self.prediction_head = heads.BYOLProjectionHead(256, 1024, 256)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
self.criterion = lightly.loss.NegativeCosineSimilarity()
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
return p
def forward_momentum(self, x):
y = self.backbone_momentum(x).flatten(start_dim=1)
z = self.projection_head_momentum(y)
z = z.detach()
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.backbone_momentum, m=0.99)
utils.update_momentum(self.projection_head, self.projection_head_momentum, m=0.99)
(x0, x1), _, _ = batch
p0 = self.forward(x0)
z0 = self.forward_momentum(x0)
p1 = self.forward(x1)
z1 = self.forward_momentum(x1)
loss = 0.5 * (self.criterion(p0, z1) + self.criterion(p1, z0))
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
params = list(self.backbone.parameters()) \
+ list(self.projection_head.parameters()) \
+ list(self.prediction_head.parameters())
optim = torch.optim.SGD(
params,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SwaVModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.projection_head = heads.SwaVProjectionHead(512, 512, 128)
self.prototypes = heads.SwaVPrototypes(128, 512) # use 512 prototypes
self.criterion = lightly.loss.SwaVLoss(sinkhorn_gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
x = self.projection_head(x)
x = nn.functional.normalize(x, dim=1, p=2)
return self.prototypes(x)
def training_step(self, batch, batch_idx):
# normalize the prototypes so they are on the unit sphere
self.prototypes.normalize()
# the multi-crop dataloader returns a list of image crops where the
# first two items are the high resolution crops and the rest are low
# resolution crops
multi_crops, _, _ = batch
multi_crop_features = [self.forward(x) for x in multi_crops]
# split list of crop features into high and low resolution
high_resolution_features = multi_crop_features[:2]
low_resolution_features = multi_crop_features[2:]
# calculate the SwaV loss
loss = self.criterion(
high_resolution_features,
low_resolution_features
)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.Adam(
self.parameters(),
lr=1e-3 * lr_factor,
weight_decay=1e-6,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class NNCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.prediction_head = heads.NNCLRPredictionHead(256, 4096, 256)
# use only a 2-layer projection head for cifar10
self.projection_head = heads.ProjectionHead([
(
512,
2048,
nn.BatchNorm1d(2048),
nn.ReLU(inplace=True)
),
(
2048,
256,
nn.BatchNorm1d(256),
None
)
])
self.criterion = lightly.loss.NTXentLoss()
self.memory_bank = modules.NNMemoryBankModule(size=4096)
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
z0 = self.memory_bank(z0, update=False)
z1 = self.memory_bank(z1, update=True)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class DINOModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.head = self._build_projection_head()
self.teacher_backbone = copy.deepcopy(self.backbone)
self.teacher_head = self._build_projection_head()
utils.deactivate_requires_grad(self.teacher_backbone)
utils.deactivate_requires_grad(self.teacher_head)
self.criterion = lightly.loss.DINOLoss(output_dim=2048)
def _build_projection_head(self):
head = heads.DINOProjectionHead(512, 2048, 256, 2048, batch_norm=True)
# use only 2 layers for cifar10
head.layers = heads.ProjectionHead([
(512, 2048, nn.BatchNorm1d(2048), nn.GELU()),
(2048, 256, None, None),
]).layers
return head
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.head(y)
return z
def forward_teacher(self, x):
y = self.teacher_backbone(x).flatten(start_dim=1)
z = self.teacher_head(y)
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.teacher_backbone, m=0.99)
utils.update_momentum(self.head, self.teacher_head, m=0.99)
views, _, _ = batch
views = [view.to(self.device) for view in views]
global_views = views[:2]
teacher_out = [self.forward_teacher(view) for view in global_views]
student_out = [self.forward(view) for view in views]
loss = self.criterion(teacher_out, student_out, epoch=self.current_epoch)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
param = list(self.backbone.parameters()) \
+ list(self.head.parameters())
optim = torch.optim.SGD(
param,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
models = [
BarlowTwinsModel,
BYOLModel,
DINOModel,
MocoModel,
NNCLRModel,
SimCLRModel,
SimSiamModel,
SwaVModel,
]
bench_results = dict()
experiment_version = None
# loop through configurations and train models
for BenchmarkModel in models:
runs = []
model_name = BenchmarkModel.__name__.replace('Model', '')
for seed in range(n_runs):
pl.seed_everything(seed)
dataloader_train_ssl, dataloader_train_kNN, dataloader_test = get_data_loaders(
batch_size=batch_size,
model=BenchmarkModel,
)
benchmark_model = BenchmarkModel(dataloader_train_kNN, classes)
# Save logs to: {CWD}/benchmark_logs/cifar10/{experiment_version}/{model_name}/
# If multiple runs are specified a subdirectory for each run is created.
sub_dir = model_name if n_runs <= 1 else f'{model_name}/run{seed}'
logger = TensorBoardLogger(
save_dir=os.path.join(logs_root_dir, 'cifar10'),
name='',
sub_dir=sub_dir,
version=experiment_version,
)
if experiment_version is None:
# Save results of all models under same version directory
experiment_version = logger.version
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath=os.path.join(logger.log_dir, 'checkpoints')
)
trainer = pl.Trainer(
max_epochs=max_epochs,
gpus=gpus,
default_root_dir=logs_root_dir,
strategy=distributed_backend,
sync_batchnorm=sync_batchnorm,
logger=logger,
callbacks=[checkpoint_callback]
)
start = time.time()
trainer.fit(
benchmark_model,
train_dataloaders=dataloader_train_ssl,
val_dataloaders=dataloader_test
)
end = time.time()
run = {
'model': model_name,
'batch_size': batch_size,
'epochs': max_epochs,
'max_accuracy': benchmark_model.max_accuracy,
'runtime': end - start,
'gpu_memory_usage': torch.cuda.max_memory_allocated(),
'seed': seed,
}
runs.append(run)
print(run)
# delete model and trainer + free up cuda memory
del benchmark_model
del trainer
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
bench_results[model_name] = runs
# print results table
header = (
f"| {'Model':<13} | {'Batch Size':>10} | {'Epochs':>6} "
f"| {'KNN Test Accuracy':>18} | {'Time':>10} | {'Peak GPU Usage':>14} |"
)
print('-' * len(header))
print(header)
print('-' * len(header))
for model, results in bench_results.items():
runtime = np.array([result['runtime'] for result in results])
runtime = runtime.mean() / 60 # convert to min
accuracy = np.array([result['max_accuracy'] for result in results])
gpu_memory_usage = np.array([result['gpu_memory_usage'] for result in results])
gpu_memory_usage = gpu_memory_usage.max() / (1024**3) # convert to gbyte
if len(accuracy) > 1:
accuracy_msg = f"{accuracy.mean():>8.3f} +- {accuracy.std():>4.3f}"
else:
accuracy_msg = f"{accuracy.mean():>18.3f}"
print(
f"| {model:<13} | {batch_size:>10} | {max_epochs:>6} "
f"| {accuracy_msg} | {runtime:>6.1f} Min "
f"| {gpu_memory_usage:>8.1f} GByte |",
flush=True
)
print('-' * len(header))
|
#%% First
import numpy as np
import json
import os
import pandas as pd
import requests
from contextlib import closing
import time
from datetime import datetime
from requests.models import HTTPBasicAuth
import seaborn as sns
from matplotlib import pyplot as plt
from requests import get
from requests_futures.sessions import FuturesSession
from bs4 import BeautifulSoup
from dotenv import load_dotenv, dotenv_values
from requests_oauthlib import OAuth2, OAuth2Session
#%%
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
env_vars = dotenv_values('config.env')
client_id = env_vars['id']
client_secret = env_vars['secret']
code = env_vars['code']
callback_uri = "http://localhost:8080"
authorize_url = "https://www.warcraftlogs.com/oauth/authorize"
token_url = "https://www.warcraftlogs.com/oauth/token"
# warcraftlogs = OAuth2Session(client_id, redirect_uri=callback_uri)
# authorization_url, state = warcraftlogs.authorization_url(authorize_url,
# access_type="offline")
# token = warcraftlogs.fetch_token(token_url = token_url,
# auth = HTTPBasicAuth(client_id, client_secret),
# code = code)
# access_token = token['access_token']
# refresh_token = token['refresh_token']
# with open('refresh_token.env', 'w') as f:
# f.write('refresh_token = '+str(refresh_token)+'\nacces_token = '+str(access_token))
if os.path.isfile('refresh_token.env'):
env_vars = dotenv_values('refresh_token.env')
refresh_token = env_vars['refresh_token']
access_token = env_vars['access_token']
else:
raise 'Get your fresh token dumby'
# print(refresh_token)
try:
warcraftlogs = OAuth2Session(client_id = client_id)
graphql_endpoint = "https://www.warcraftlogs.com/api/v2/client"
headers = {"Authorization": f"Bearer {access_token}"}
query = """{
reportData{
reports(guildID: 95321, endTime: 1622872800000.0, startTime: 1605855600000.0){
data{
fights(difficulty: 5){
name
averageItemLevel
# friendlyPlayers
id
}
}
}
}
}"""
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
except:
token = warcraftlogs.refresh_token(token_url = token_url,
auth = HTTPBasicAuth(client_id, client_secret),
refresh_token = refresh_token)
access_token = token['access_token']
refresh_token = token['refresh_token']
with open('refresh_token.env', 'w') as f:
f.write('refresh_token = '+str(refresh_token)+'\naccess_token = '+str(access_token))
warcraftlogs = OAuth2Session(client_id = client_id)
graphql_endpoint = "https://www.warcraftlogs.com/api/v2/client"
headers = {"Authorization": f"Bearer {access_token}"}
query = """{
reportData{
reports(guildID: 95321, endTime: 1622872800000.0, startTime: 1605855600000.0){
data{
fights(difficulty: 5){
name
averageItemLevel
# friendlyPlayers
id
}
}
}
}
}"""
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
with open('..//get_guild_list/guild_list_hungering.json', encoding='utf-8') as f:
guilds = json.load(f)
#%%
def is_good_response_json(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('json') > -1)
def get_guild_id(guild):
try:
guild_id = int(guild['id'])
except:
query = """
{
guildData{
guild(name: "%s", serverSlug: "%s", serverRegion: "%s"){
id
}
}
}
""" % (guild['name'], guild['realm'].replace(' ', '-'), guild['region'])
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
guild_id = r.json()['data']['guildData']['guild']['id']
return guild_id
def get_log_list(guild):
guild['id'] = get_guild_id(guild)
query = ("{"
f"reportData{{"
f" reports(guildID: {guild["id"]}, zoneID: 26){{"
f" data{{"
f" code"
f" startTime"
f" endTime"
f" }}"
f" }}"
f"}}"
f"}}")
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
log_list = r.json()['data']['reportData']['reports']['data']
return log_list
def get_log_list_apiv1(guild):
with open('..//..//Warcraftlogs//api_key.txt.') as f:
api_key = f.readlines()[0]
link = "https://www.warcraftlogs.com:443/v1/reports/guild/" + \
guild['name'] + "/" + guild['realm'].replace(' ', '-').replace("'","")+ "/" + \
guild['region'] + "?api_key=" + api_key
guild_logs = requests.get(link)
log_list = guild_logs.json()
log_list_new = []
for item in log_list:
if item['zone'] == 26:
log_list_new.append({'code': item['id'],
'startTime': item['start'],
'endTime': item['end']})
return log_list_new
def get_pulls(log, guild):
log_id = log['code']
query = """
{
reportData{
report(code: "%s"){
fights(difficulty: 5){
name
id
averageItemLevel
bossPercentage
kill
startTime
endTime
}
}
}
}
""" % (log_id)
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
fight_list = r.json()['data']['reportData']['report']['fights']
for k in range(len(fight_list)):
fight_list[k].update({'log_code': log_id})
return fight_list
def get_fight_info(fight, guild, unique_id):
code = fight['log_code']
fight_ID = fight['id']
start_time = fight['start_time']
end_time = fight['end_time']
query = """
{
reportData{
report(code: "%s"){
table(fightIDs: %s, startTime: %s, endTime: %s)
}
}
}
""" % (code, fight_ID, str(start_time), str(end_time))
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
table = r.json()['data']['reportData']['report']['table']['data']
comp = table['composition']
roles = table['playerDetails']
player_list = []
for role in roles:
players = roles[role]
for player in players:
try:
gear_ilvl = [piece['itemLevel'] for piece in player['combatantInfo']['gear']]
ilvl = np.mean(gear_ilvl)
except:
try:
ilvl = player['minItemLevel']
except:
ilvl = np.NaN
try:
covenant = player['combatantInfo']['covenantID']
except:
covenant = np.NaN
try:
spec = player['specs'][0]
except:
spec = np.NaN
try:
stats = player['combatantInfo']['stats']
primaries = ['Agility','Intellect','Strength']
for primary in primaries:
if primary in stats.keys():
break
primary= stats[primary]['min']
mastery= stats['Mastery']['min']
crit= stats['Crit']['min']
haste= stats['Haste']['min']
vers= stats['Versatility']['min']
stamina= stats['Stamina']['min']
except:
primary = np.NaN
mastery = np.NaN
crit = np.NaN
haste = np.NaN
vers = np.NaN
stamina = np.NaN
player_info= {'unique_id': unique_id,
'class': player['type'],
'spec': spec,
'role': role,
'ilvl': ilvl,
'covenant': covenant,
'primary': primary,
'mastery': mastery,
'crit': crit,
'haste': haste,
'vers': vers,
'stamina': stamina,
'boss_name': fight['name']}
player_list.append(player_info)
return player_list
# %% Setup the SQL Stuff
from sqlalchemy import create_engine
import psycopg2
server = 'localhost'
database = 'nathria_prog'
username = 'postgres'
password = 'postgres'
if 'conn' in locals():
conn.close()
engine = create_engine('postgresql://postgres:postgres@localhost:5432/nathria_prog')
conn = psycopg2.connect('host='+server+' dbname='+database+' user='+username+' password='+password)
curs = conn.cursor()
curs.execute("select exists(select * from information_schema.tables where table_name=%s)",\
('nathria_prog_v2',))
if curs.fetchone()[0]:
curs.execute('select distinct guild_name from nathria_prog_v2')
already_added_guilds = [item[0] for item in curs.fetchall()]
already_added_length = len(already_added_guilds)
else:
already_added_guilds = []
already_added_length = 0
def check_in_sql(fight):
unique_id = fight['unique_id']
curs.execute("select * from nathria_prog_v2 where unique_id = '%s'" % (unique_id))
if curs.fetchone() is None:
check_one = False
else:
check_one = True
curs.execute("select * from nathria_prog_v2 where start_time > %s and end_time < %s and guild_name = '%s';" \
% (fight['start_time']-60, fight['end_time']+60, fight['guild_name']))
if curs.fetchone() is None:
check_two = False
else:
check_two = True
check = check_one or check_two
return check
def add_to_sql(curs, table, info):
placeholders = ', '.join(['%s'] * len(info))
columns = ', '.join(info.keys())
sql = "INSERT INTO %s ( %s ) VALUES ( %s )" % (str(table), columns, placeholders)
curs.execute(sql, list(info.values()))
#%% This is for futures use
def make_logs_query(log):
log_id = log['code']
query = """
{
reportData{
report(code: "%s"){
fights(difficulty: 5){
name
id
averageItemLevel
bossPercentage
kill
startTime
endTime
}
}
}
}
""" % (log_id)
return query
def get_log_args(log, graphql_endpoint, headers):
args = {'url': graphql_endpoint,
'json': {'query': make_logs_query(log)},
'headers': headers}
return args
def get_fight_list(log_list, graphql_endpoint, headers):
session = FuturesSession(max_workers = 2)
futures = [session.post(**get_log_args(log, graphql_endpoint, headers)) for log in log_list]
fights_list = []
for q, item in enumerate(futures):
result = item.result()
if result.status_code!=200:
print(result.status_code)
fights = result.json()['data']['reportData']['report']['fights']
for k, fight in enumerate(fights):
fight['log_code'] = log_list[q]['code']
fight['log_start'] = log_list[q]['startTime']
fight['log_end'] = log_list[q]['endTime']
fight['unique_id'] = log_list[q]['code'] + '_' + str(fight['id'])
fights_list.extend([fight])
return fights_list
def get_prog_pulls(df, boss_name):
if type(df.iloc[0]['start_time']) != 'int':
df['start_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['start_time']]
df['end_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['end_time']]
kills_df = df.query('name == "'+boss_name+'"').query('zoneDifficulty == 5').query('kill == True')
first_kill_time = min(kills_df['start_time'])
return df.query('name == "'+boss_name+'"').query('zoneDifficulty == 5').query('start_time <= '+str(first_kill_time))
def add_pull_num(df):
df = df.sort_values(by = ['start_time'])
df.insert(loc = 0, column = 'pull_num', value = np.arange(len(df))+1)
return df
def combine_boss_df(df):
boss_names = [
'Shriekwing', \
'Huntsman Altimor',
'Hungering Destroyer', \
"Sun King's Salvation",
"Artificer Xy'mox", \
'Lady Inerva Darkvein', \
'The Council of Blood', \
'Sludgefist', \
'Stone Legion Generals', \
'Sire Denathrius']
combine_df = pd.DataFrame()
for k, boss_name in enumerate(np.unique(df['name'])):
if boss_name in boss_names and boss_name in np.unique(df['name']):
combine_df = combine_df.append(add_pull_num(df.copy(deep = True).query('name == "'+boss_name+'"')))
combine_df = combine_df.reset_index().drop(columns = 'index')
return combine_df
n_start = 3500
for gnum, guild in enumerate(guilds[n_start:]):
if guild['name'] in already_added_guilds:
continue
# log_list = get_log_list(guild)
try:
log_list = get_log_list_apiv1(guild)
if len(log_list) == 0:
print(f'Log list empty for {guild['name']}')
fightdf = pd.DataFrame()
playerdf = pd.DataFrame()
print(f'Parsing guild {guild['name']} (#{gnum+1+n_start} of {len(guilds)})')
fight_list = get_fight_list(log_list, graphql_endpoint, headers)
fightdf = pd.DataFrame()
for q, fight in enumerate(fight_list):
fight['boss_perc'] = fight.pop('bossPercentage')
fight['average_item_level'] = fight.pop('averageItemLevel')
fight['unique_id'] = fight['log_code'] + '_' + str(fight['id'])
fight['start_time'] = fight.pop('startTime')
fight['end_time'] = fight.pop('endTime')
fight['guild_name'] = guild['name']
fight['guild_realm'] = guild['realm']
fight['guild_region'] = guild['region']
fightdf = fightdf.append(pd.DataFrame(fight, index=['i',]))
fightdf = combine_boss_df(fightdf.copy(deep = True))
fightdf.to_sql('nathria_prog_v2', engine, if_exists='append')
if len(fightdf)>1:
print(f'Adding to SQL guild {guild['name']}')
time.sleep(3)
except:
continue
#%%
asdfasdf
from sqlalchemy import create_engine
import psycopg2
server = 'localhost'
database = 'nathria_prog'
username = 'postgres'
password = 'postgres'
if 'conn' in locals():
conn.close()
engine = create_engine('postgresql://postgres:postgres@localhost:5432/nathria_prog')
conn = psycopg2.connect('host='+server+' dbname='+database+' user='+username+' password='+password)
curs = conn.cursor()
curs.execute("select exists(select * from information_schema.tables where table_name=%s)",\
('nathria_prog_v2',))
if curs.fetchone()[0]:
curs.execute('select distinct guild_name from nathria_prog_v2')
logged_guilds = [item[0] for item in curs.fetchall()]
else:
logged_guilds = []
def make_fights_query(fight):
code = fight['log_code']
fight_ID = fight['id']
start_time = fight['start_time']
end_time = fight['end_time']
query = """
{
reportData{
report(code: "%s"){
table(fightIDs: %s, startTime: %s, endTime: %s)
}
}
}
""" % (code, fight_ID, str(start_time), str(end_time))
return query
def get_fight_args(log, graphql_endpoint, headers):
args = {'url': graphql_endpoint,
'json': {'query': make_fights_query(log)},
'headers': headers}
return args
def get_fight_table(fights_list, graphql_endpoint, headers):
session = FuturesSession(max_workers = 2)
futures = [session.post(**get_fight_args(fight, graphql_endpoint, headers)) for fight in fights_list]
fights_tables = []
for k, item in enumerate(futures):
result = item.result()
if result.status_code!=200:
print(result.status_code)
# if is_good_response_json(item.result()):
try:
fights_tables.append(result.json()['data']['reportData']['report']['table']['data'])
except:
pass
return fights_tables
def parse_fight_table(table, boss_name, unique_id, guild_name):
comp = table['composition']
roles = table['playerDetails']
player_list = []
for role in roles:
players = roles[role]
for player in players:
try:
gear_ilvl = [piece['itemLevel'] for piece in player['combatantInfo']['gear']]
ilvl = np.mean(gear_ilvl)
except:
try:
ilvl = player['minItemLevel']
except:
ilvl = np.NaN
try:
covenant = player['combatantInfo']['covenantID']
except:
covenant = np.NaN
try:
spec = player['specs'][0]
except:
spec = np.NaN
try:
stats = player['combatantInfo']['stats']
primaries = ['Agility','Intellect','Strength']
for primary in primaries:
if primary in stats.keys():
break
primary= stats[primary]['min']
mastery= stats['Mastery']['min']
crit= stats['Crit']['min']
haste= stats['Haste']['min']
vers= stats['Versatility']['min']
stamina= stats['Stamina']['min']
except:
primary = np.NaN
mastery = np.NaN
crit = np.NaN
haste = np.NaN
vers = np.NaN
stamina = np.NaN
player_info= {'unique_id': unique_id,
'name': player['name'],
'guild_name': guild_name,
'server': player['server'],
'class': player['type'],
'spec': spec,
'role': role,
'ilvl': ilvl,
'covenant': covenant,
'primary': primary,
'mastery': mastery,
'crit': crit,
'haste': haste,
'vers': vers,
'stamina': stamina,
'boss_name': boss_name}
player_list.append(player_info)
return player_list
for guild_name in logged_guilds:
curs.execute(f"select * from nathria_prog_v2 where guild_name = '{guild_name}'")
pulls = pd.DataFrame(curs.fetchall())
pulls.columns = [desc[0] for desc in curs.description]
fights_list = pulls.to_dict('records')
curs.execute(f"select distinct unique_id from nathria_prog_v2_players where guild_name = '{guild_name}'")
added_fights = [item[0] for item in curs.fetchall()]
fight_list = [fight for fight in fights_list if fight['unique_id'] not in added_fights]
if len(fight_list)>1:
fights_tables = get_fight_table(fights_list, graphql_endpoint, headers)
playerdf = pd.DataFrame()
for q, table in enumerate(fights_tables):
unique_id = fights_list[q]['unique_id']
guild_name = guild_name
player_info = parse_fight_table(table, fights_list[q]['name'], unique_id, guild_name)
for player in player_info:
for player in player_info:
playerdf = playerdf.append(pd.DataFrame(player, index=['i',]))
if len(playerdf)>1:
print(f'Adding to SQL guild player info {guild['name']}')
playerdf.to_sql('nathria_prog_v2_players', engine, if_exists='append') | #%% First
import numpy as np
import json
import os
import pandas as pd
import requests
from contextlib import closing
import time
from datetime import datetime
from requests.models import HTTPBasicAuth
import seaborn as sns
from matplotlib import pyplot as plt
from requests import get
from requests_futures.sessions import FuturesSession
from bs4 import BeautifulSoup
from dotenv import load_dotenv, dotenv_values
from requests_oauthlib import OAuth2, OAuth2Session
#%%
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
env_vars = dotenv_values('config.env')
client_id = env_vars['id']
client_secret = env_vars['secret']
code = env_vars['code']
callback_uri = "http://localhost:8080"
authorize_url = "https://www.warcraftlogs.com/oauth/authorize"
token_url = "https://www.warcraftlogs.com/oauth/token"
# warcraftlogs = OAuth2Session(client_id, redirect_uri=callback_uri)
# authorization_url, state = warcraftlogs.authorization_url(authorize_url,
# access_type="offline")
# token = warcraftlogs.fetch_token(token_url = token_url,
# auth = HTTPBasicAuth(client_id, client_secret),
# code = code)
# access_token = token['access_token']
# refresh_token = token['refresh_token']
# with open('refresh_token.env', 'w') as f:
# f.write('refresh_token = '+str(refresh_token)+'\nacces_token = '+str(access_token))
if os.path.isfile('refresh_token.env'):
env_vars = dotenv_values('refresh_token.env')
refresh_token = env_vars['refresh_token']
access_token = env_vars['access_token']
else:
raise 'Get your fresh token dumby'
# print(refresh_token)
try:
warcraftlogs = OAuth2Session(client_id = client_id)
graphql_endpoint = "https://www.warcraftlogs.com/api/v2/client"
headers = {"Authorization": f"Bearer {access_token}"}
query = """{
reportData{
reports(guildID: 95321, endTime: 1622872800000.0, startTime: 1605855600000.0){
data{
fights(difficulty: 5){
name
averageItemLevel
# friendlyPlayers
id
}
}
}
}
}"""
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
except:
token = warcraftlogs.refresh_token(token_url = token_url,
auth = HTTPBasicAuth(client_id, client_secret),
refresh_token = refresh_token)
access_token = token['access_token']
refresh_token = token['refresh_token']
with open('refresh_token.env', 'w') as f:
f.write('refresh_token = '+str(refresh_token)+'\naccess_token = '+str(access_token))
warcraftlogs = OAuth2Session(client_id = client_id)
graphql_endpoint = "https://www.warcraftlogs.com/api/v2/client"
headers = {"Authorization": f"Bearer {access_token}"}
query = """{
reportData{
reports(guildID: 95321, endTime: 1622872800000.0, startTime: 1605855600000.0){
data{
fights(difficulty: 5){
name
averageItemLevel
# friendlyPlayers
id
}
}
}
}
}"""
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
with open('..//get_guild_list/guild_list_hungering.json', encoding='utf-8') as f:
guilds = json.load(f)
#%%
def is_good_response_json(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('json') > -1)
def get_guild_id(guild):
try:
guild_id = int(guild['id'])
except:
query = """
{
guildData{
guild(name: "%s", serverSlug: "%s", serverRegion: "%s"){
id
}
}
}
""" % (guild['name'], guild['realm'].replace(' ', '-'), guild['region'])
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
guild_id = r.json()['data']['guildData']['guild']['id']
return guild_id
def get_log_list(guild):
guild['id'] = get_guild_id(guild)
query = ("{"
f"reportData{{"
f" reports(guildID: {guild['id']}, zoneID: 26){{"
f" data{{"
f" code"
f" startTime"
f" endTime"
f" }}"
f" }}"
f"}}"
f"}}")
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
log_list = r.json()['data']['reportData']['reports']['data']
return log_list
def get_log_list_apiv1(guild):
with open('..//..//Warcraftlogs//api_key.txt.') as f:
api_key = f.readlines()[0]
link = "https://www.warcraftlogs.com:443/v1/reports/guild/" + \
guild['name'] + "/" + guild['realm'].replace(' ', '-').replace("'","")+ "/" + \
guild['region'] + "?api_key=" + api_key
guild_logs = requests.get(link)
log_list = guild_logs.json()
log_list_new = []
for item in log_list:
if item['zone'] == 26:
log_list_new.append({'code': item['id'],
'startTime': item['start'],
'endTime': item['end']})
return log_list_new
def get_pulls(log, guild):
log_id = log['code']
query = """
{
reportData{
report(code: "%s"){
fights(difficulty: 5){
name
id
averageItemLevel
bossPercentage
kill
startTime
endTime
}
}
}
}
""" % (log_id)
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
fight_list = r.json()['data']['reportData']['report']['fights']
for k in range(len(fight_list)):
fight_list[k].update({'log_code': log_id})
return fight_list
def get_fight_info(fight, guild, unique_id):
code = fight['log_code']
fight_ID = fight['id']
start_time = fight['start_time']
end_time = fight['end_time']
query = """
{
reportData{
report(code: "%s"){
table(fightIDs: %s, startTime: %s, endTime: %s)
}
}
}
""" % (code, fight_ID, str(start_time), str(end_time))
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
table = r.json()['data']['reportData']['report']['table']['data']
comp = table['composition']
roles = table['playerDetails']
player_list = []
for role in roles:
players = roles[role]
for player in players:
try:
gear_ilvl = [piece['itemLevel'] for piece in player['combatantInfo']['gear']]
ilvl = np.mean(gear_ilvl)
except:
try:
ilvl = player['minItemLevel']
except:
ilvl = np.NaN
try:
covenant = player['combatantInfo']['covenantID']
except:
covenant = np.NaN
try:
spec = player['specs'][0]
except:
spec = np.NaN
try:
stats = player['combatantInfo']['stats']
primaries = ['Agility','Intellect','Strength']
for primary in primaries:
if primary in stats.keys():
break
primary= stats[primary]['min']
mastery= stats['Mastery']['min']
crit= stats['Crit']['min']
haste= stats['Haste']['min']
vers= stats['Versatility']['min']
stamina= stats['Stamina']['min']
except:
primary = np.NaN
mastery = np.NaN
crit = np.NaN
haste = np.NaN
vers = np.NaN
stamina = np.NaN
player_info= {'unique_id': unique_id,
'class': player['type'],
'spec': spec,
'role': role,
'ilvl': ilvl,
'covenant': covenant,
'primary': primary,
'mastery': mastery,
'crit': crit,
'haste': haste,
'vers': vers,
'stamina': stamina,
'boss_name': fight['name']}
player_list.append(player_info)
return player_list
# %% Setup the SQL Stuff
from sqlalchemy import create_engine
import psycopg2
server = 'localhost'
database = 'nathria_prog'
username = 'postgres'
password = 'postgres'
if 'conn' in locals():
conn.close()
engine = create_engine('postgresql://postgres:postgres@localhost:5432/nathria_prog')
conn = psycopg2.connect('host='+server+' dbname='+database+' user='+username+' password='+password)
curs = conn.cursor()
curs.execute("select exists(select * from information_schema.tables where table_name=%s)",\
('nathria_prog_v2',))
if curs.fetchone()[0]:
curs.execute('select distinct guild_name from nathria_prog_v2')
already_added_guilds = [item[0] for item in curs.fetchall()]
already_added_length = len(already_added_guilds)
else:
already_added_guilds = []
already_added_length = 0
def check_in_sql(fight):
unique_id = fight['unique_id']
curs.execute("select * from nathria_prog_v2 where unique_id = '%s'" % (unique_id))
if curs.fetchone() is None:
check_one = False
else:
check_one = True
curs.execute("select * from nathria_prog_v2 where start_time > %s and end_time < %s and guild_name = '%s';" \
% (fight['start_time']-60, fight['end_time']+60, fight['guild_name']))
if curs.fetchone() is None:
check_two = False
else:
check_two = True
check = check_one or check_two
return check
def add_to_sql(curs, table, info):
placeholders = ', '.join(['%s'] * len(info))
columns = ', '.join(info.keys())
sql = "INSERT INTO %s ( %s ) VALUES ( %s )" % (str(table), columns, placeholders)
curs.execute(sql, list(info.values()))
#%% This is for futures use
def make_logs_query(log):
log_id = log['code']
query = """
{
reportData{
report(code: "%s"){
fights(difficulty: 5){
name
id
averageItemLevel
bossPercentage
kill
startTime
endTime
}
}
}
}
""" % (log_id)
return query
def get_log_args(log, graphql_endpoint, headers):
args = {'url': graphql_endpoint,
'json': {'query': make_logs_query(log)},
'headers': headers}
return args
def get_fight_list(log_list, graphql_endpoint, headers):
session = FuturesSession(max_workers = 2)
futures = [session.post(**get_log_args(log, graphql_endpoint, headers)) for log in log_list]
fights_list = []
for q, item in enumerate(futures):
result = item.result()
if result.status_code!=200:
print(result.status_code)
fights = result.json()['data']['reportData']['report']['fights']
for k, fight in enumerate(fights):
fight['log_code'] = log_list[q]['code']
fight['log_start'] = log_list[q]['startTime']
fight['log_end'] = log_list[q]['endTime']
fight['unique_id'] = log_list[q]['code'] + '_' + str(fight['id'])
fights_list.extend([fight])
return fights_list
def get_prog_pulls(df, boss_name):
if type(df.iloc[0]['start_time']) != 'int':
df['start_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['start_time']]
df['end_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['end_time']]
kills_df = df.query('name == "'+boss_name+'"').query('zoneDifficulty == 5').query('kill == True')
first_kill_time = min(kills_df['start_time'])
return df.query('name == "'+boss_name+'"').query('zoneDifficulty == 5').query('start_time <= '+str(first_kill_time))
def add_pull_num(df):
df = df.sort_values(by = ['start_time'])
df.insert(loc = 0, column = 'pull_num', value = np.arange(len(df))+1)
return df
def combine_boss_df(df):
boss_names = [
'Shriekwing', \
'Huntsman Altimor',
'Hungering Destroyer', \
"Sun King's Salvation",
"Artificer Xy'mox", \
'Lady Inerva Darkvein', \
'The Council of Blood', \
'Sludgefist', \
'Stone Legion Generals', \
'Sire Denathrius']
combine_df = pd.DataFrame()
for k, boss_name in enumerate(np.unique(df['name'])):
if boss_name in boss_names and boss_name in np.unique(df['name']):
combine_df = combine_df.append(add_pull_num(df.copy(deep = True).query('name == "'+boss_name+'"')))
combine_df = combine_df.reset_index().drop(columns = 'index')
return combine_df
n_start = 3500
for gnum, guild in enumerate(guilds[n_start:]):
if guild['name'] in already_added_guilds:
continue
# log_list = get_log_list(guild)
try:
log_list = get_log_list_apiv1(guild)
if len(log_list) == 0:
print(f'Log list empty for {guild["name"]}')
fightdf = pd.DataFrame()
playerdf = pd.DataFrame()
print(f'Parsing guild {guild["name"]} (#{gnum+1+n_start} of {len(guilds)})')
fight_list = get_fight_list(log_list, graphql_endpoint, headers)
fightdf = pd.DataFrame()
for q, fight in enumerate(fight_list):
fight['boss_perc'] = fight.pop('bossPercentage')
fight['average_item_level'] = fight.pop('averageItemLevel')
fight['unique_id'] = fight['log_code'] + '_' + str(fight['id'])
fight['start_time'] = fight.pop('startTime')
fight['end_time'] = fight.pop('endTime')
fight['guild_name'] = guild['name']
fight['guild_realm'] = guild['realm']
fight['guild_region'] = guild['region']
fightdf = fightdf.append(pd.DataFrame(fight, index=['i',]))
fightdf = combine_boss_df(fightdf.copy(deep = True))
fightdf.to_sql('nathria_prog_v2', engine, if_exists='append')
if len(fightdf)>1:
print(f'Adding to SQL guild {guild["name"]}')
time.sleep(3)
except:
continue
#%%
asdfasdf
from sqlalchemy import create_engine
import psycopg2
server = 'localhost'
database = 'nathria_prog'
username = 'postgres'
password = 'postgres'
if 'conn' in locals():
conn.close()
engine = create_engine('postgresql://postgres:postgres@localhost:5432/nathria_prog')
conn = psycopg2.connect('host='+server+' dbname='+database+' user='+username+' password='+password)
curs = conn.cursor()
curs.execute("select exists(select * from information_schema.tables where table_name=%s)",\
('nathria_prog_v2',))
if curs.fetchone()[0]:
curs.execute('select distinct guild_name from nathria_prog_v2')
logged_guilds = [item[0] for item in curs.fetchall()]
else:
logged_guilds = []
def make_fights_query(fight):
code = fight['log_code']
fight_ID = fight['id']
start_time = fight['start_time']
end_time = fight['end_time']
query = """
{
reportData{
report(code: "%s"){
table(fightIDs: %s, startTime: %s, endTime: %s)
}
}
}
""" % (code, fight_ID, str(start_time), str(end_time))
return query
def get_fight_args(log, graphql_endpoint, headers):
args = {'url': graphql_endpoint,
'json': {'query': make_fights_query(log)},
'headers': headers}
return args
def get_fight_table(fights_list, graphql_endpoint, headers):
session = FuturesSession(max_workers = 2)
futures = [session.post(**get_fight_args(fight, graphql_endpoint, headers)) for fight in fights_list]
fights_tables = []
for k, item in enumerate(futures):
result = item.result()
if result.status_code!=200:
print(result.status_code)
# if is_good_response_json(item.result()):
try:
fights_tables.append(result.json()['data']['reportData']['report']['table']['data'])
except:
pass
return fights_tables
def parse_fight_table(table, boss_name, unique_id, guild_name):
comp = table['composition']
roles = table['playerDetails']
player_list = []
for role in roles:
players = roles[role]
for player in players:
try:
gear_ilvl = [piece['itemLevel'] for piece in player['combatantInfo']['gear']]
ilvl = np.mean(gear_ilvl)
except:
try:
ilvl = player['minItemLevel']
except:
ilvl = np.NaN
try:
covenant = player['combatantInfo']['covenantID']
except:
covenant = np.NaN
try:
spec = player['specs'][0]
except:
spec = np.NaN
try:
stats = player['combatantInfo']['stats']
primaries = ['Agility','Intellect','Strength']
for primary in primaries:
if primary in stats.keys():
break
primary= stats[primary]['min']
mastery= stats['Mastery']['min']
crit= stats['Crit']['min']
haste= stats['Haste']['min']
vers= stats['Versatility']['min']
stamina= stats['Stamina']['min']
except:
primary = np.NaN
mastery = np.NaN
crit = np.NaN
haste = np.NaN
vers = np.NaN
stamina = np.NaN
player_info= {'unique_id': unique_id,
'name': player['name'],
'guild_name': guild_name,
'server': player['server'],
'class': player['type'],
'spec': spec,
'role': role,
'ilvl': ilvl,
'covenant': covenant,
'primary': primary,
'mastery': mastery,
'crit': crit,
'haste': haste,
'vers': vers,
'stamina': stamina,
'boss_name': boss_name}
player_list.append(player_info)
return player_list
for guild_name in logged_guilds:
curs.execute(f"select * from nathria_prog_v2 where guild_name = '{guild_name}'")
pulls = pd.DataFrame(curs.fetchall())
pulls.columns = [desc[0] for desc in curs.description]
fights_list = pulls.to_dict('records')
curs.execute(f"select distinct unique_id from nathria_prog_v2_players where guild_name = '{guild_name}'")
added_fights = [item[0] for item in curs.fetchall()]
fight_list = [fight for fight in fights_list if fight['unique_id'] not in added_fights]
if len(fight_list)>1:
fights_tables = get_fight_table(fights_list, graphql_endpoint, headers)
playerdf = pd.DataFrame()
for q, table in enumerate(fights_tables):
unique_id = fights_list[q]['unique_id']
guild_name = guild_name
player_info = parse_fight_table(table, fights_list[q]['name'], unique_id, guild_name)
for player in player_info:
for player in player_info:
playerdf = playerdf.append(pd.DataFrame(player, index=['i',]))
if len(playerdf)>1:
print(f'Adding to SQL guild player info {guild["name"]}')
playerdf.to_sql('nathria_prog_v2_players', engine, if_exists='append') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/31 0031 18:55
# @Author : Hadrianl
# @File : realtime_data_server
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.const import COMMISSION_TYPE
from spapi.spAPI import *
from spapi.sp_struct import *
import zmq
import datetime as dt
from rqalpha.api import logger
from queue import Queue, Empty
import pymongo as pmg
from threading import Thread
from collections import deque
import pandas as pd
from rqalpha.events import EVENT
import time
from rqalpha.environment import Environment
from rqalpha.model.instrument import Instrument
from .util import _convert_from_ctype
class RealtimeDataServer:
def __init__(self, sp_info, db_info, socket_info):
mongo_cli = pmg.MongoClient(db_info['host'])
admin_db = mongo_cli.get_database('admin')
admin_db.authenticate(db_info['user'], db_info['pwd'])
self._db = mongo_cli.get_database(db_info['db'])
self._col = self._db.get_collection('realtime_future_1min_')
self._col.create_index([('datetime', pmg.DESCENDING), ('code', pmg.ASCENDING)], unique=True)
self._col.create_index([('code', pmg.ASCENDING)])
self.ctx = zmq.Context()
self.trigger_socket = self.ctx.socket(zmq.PUB)
self.trigger_socket.bind(f'tcp://*: {socket_info['trigger_port']}')
self.prod_codes = {}
initialize()
set_login_info(**sp_info)
self._init_callback()
login()
time.sleep(3)
self._init_subscribe()
def _init_callback(self):
self._ticker_queues = {}
self._price_queues = {}
self._trigger_queue = Queue()
self._resample_thread = {}
@on_login_reply # 登录成功时候调用
def login_reply(user_id, ret_code, ret_msg):
if ret_code == 0:
api_logger.info(f'@{user_id.decode()}登录成功')
self._init_subscribe()
else:
api_logger.error(f'@{user_id.decode()}登录失败--errcode:{ret_code}--errmsg:{ret_msg.decode()}')
@on_instrument_list_reply # 产品系列信息的回调推送,用load_instrument_list()触发
def inst_list_reply(req_id, is_ready, ret_msg):
if is_ready:
api_logger.info('<产品>' + f'信息加载成功 req_id:{req_id}-msg:{ret_msg.decode()}')
else:
api_logger.error('<产品>' + f'信息正在加载......req_id{req_id}-msg:{ret_msg.decode()}')
@on_product_list_by_code_reply # 根据产品系列名返回合约信息
def product_list_by_code_reply(req_id, inst_code, is_ready, ret_msg):
if is_ready:
if inst_code == '':
api_logger.info('<合约>' + f'该产品系列没有合约信息 req_id:{req_id}-msg:{ret_msg.decode()}')
else:
api_logger.info('<合约>' + f'产品:{inst_code.decode()}合约信息加载成功 req_id:{req_id}-msg:{ret_msg.decode()}')
else:
api_logger.error('<合约>' + f'产品:{inst_code.decode()}合约信息正在加载......req_id:{req_id}-msg:{ret_msg.decode()}')
#
@on_business_date_reply # 登录成功后会返回一个交易日期
def business_date_reply(business_date):
self.trade_date = dt.datetime.fromtimestamp(business_date)
api_logger.info('<日期>' + f'当前交易日--{self.trade_date}')
@on_ticker_update # ticker数据推送
def ticker_update(ticker: SPApiTicker):
ticker_dict = _convert_from_ctype(ticker)
self._ticker_queues[ticker_dict['ProdCode']].put(ticker_dict)
api_logger.info(f'{ticker_dict}')
@on_api_price_update # price数据推送
def price_update(price: SPApiPrice):
price_dict = _convert_from_ctype(price)
self._price_queues[price_dict['ProdCode']].append(price_dict)
api_logger.info(f'{price_dict}')
@on_connecting_reply # 连接状态改变时调用
def connecting_reply(host_id, con_status):
api_logger.info(f'<连接>{HOST_TYPE[host_id]}状态改变--{HOST_CON_STATUS[con_status]}')
# global login_flag
self.on_login_reply = login_reply
self.inst_list_reply = inst_list_reply
self.product_list_by_code_reply = product_list_by_code_reply
self.business_date_reply = business_date_reply
self.ticker_update = ticker_update
self.price_update = price_update
self.connecting_reply = connecting_reply
def _init_subscribe(self):
contract_col = self._db.get_collection('realtime_future_contract_info')
code = contract_col.find()
self.prod_codes = {c['Filler']: c['CODE'] for c in code}
for p in self.prod_codes:
self.subscribe_ticker(p)
for p in self.prod_codes:
self.subscribe_price(p)
def _resample_ticker(self, prod_code):
tickers = []
q = self._ticker_queues[prod_code]
code = self.prod_codes[prod_code]
time_diff = 0
while True:
try:
tick = q.get(timeout=1)
time_diff = tick['TickerTime'] - time.time()
print(time_diff)
except Empty:
if tickers and time.time() % (tickers[-1]['TickerTime'] // 60) >= 61 + time_diff: # 在没有新的一分钟tick数据时,跨过下分钟超过3秒会自动生成bar
price_list = []
vol_list = []
d = dt.datetime.fromtimestamp(tickers[-1]['TickerTime']).replace(second=0)
for t in tickers:
price_list.append(t['Price'])
vol_list.append(t['Qty'])
o, h, l, c, v = price_list[0], max(price_list), min(price_list), price_list[-1], sum(vol_list)
self._col.update_one({'datetime': d, 'code': code},
{'$set': {'datetime': d, 'code': code, 'open': o,
'high': h, 'low': l, 'close': c, 'volume': v,
'trade_date': self.trade_date}}, upsert=True)
self._trigger_queue.put(d)
tickers.clear()
continue
if tick is None:
break
if tickers and tickers[-1]['TickerTime'] // 60 != tick['TickerTime'] // 60:
price_list = []
vol_list = []
d = dt.datetime.fromtimestamp(tickers[-1]['TickerTime']).replace(second=0)
for t in tickers:
price_list.append(t['Price'])
vol_list.append(t['Qty'])
o, h, l, c, v = price_list[0], max(price_list), min(price_list), price_list[-1], sum(vol_list)
self._col.update_one({'datetime': d, 'code': code}, {'$set': {'datetime': d, 'code': code, 'open': o,
'high': h, 'low': l, 'close': c, 'volume': v,
'trade_date': self.trade_date}}, upsert=True)
self._trigger_queue.put(d)
tickers.clear()
tickers.append(tick)
def subscribe_ticker(self, prod_code):
self._ticker_queues.setdefault(prod_code, Queue())
subscribe_ticker(prod_code, 1)
t = self._resample_thread.setdefault(prod_code, Thread(target=self._resample_ticker, args=(prod_code, )))
if not t.isAlive():
t.setDaemon(True)
t.start()
def unsubscribe_ticker(self, prod_code):
subscribe_ticker(prod_code, 0)
q = self._ticker_queues.pop(prod_code)
t = self._resample_thread.pop(prod_code)
q.put(None)
t.join()
def subscribe_price(self, prod_code):
self._price_queues.setdefault(prod_code, deque(maxlen=1))
subscribe_price(prod_code, 1)
def unsubscribe_price(self, prod_code):
try:
self._price_queues.pop(prod_code)
finally:
subscribe_price(prod_code, 0)
def publish_bar_signal(self):
dt_list = []
while True:
d = self._trigger_queue.get()
dt_list.append(d)
print(d)
if len(dt_list) >= len(self._resample_thread) or dt.datetime.now() > d + dt.timedelta(seconds=2):
self.trigger_socket.send_pyobj(d)
dt_list.clear()
def add_contract(db_info, code):
mongo_cli = pmg.MongoClient(db_info['host'])
admin_db = mongo_cli.get_database('admin')
admin_db.authenticate(db_info['user'], db_info['pwd'])
db = mongo_cli.get_database(db_info['db'])
contract_col = db.get_collection('realtime_future_contract_info')
product_info = db.get_collection('realtime_future_product_info')
contract_col.create_index([('DATE', pmg.DESCENDING), ('CODE', pmg.ASCENDING)], unique=True)
contract_col.create_index([('CODE', pmg.ASCENDING)])
product_info.create_index([('DATE', pmg.DESCENDING), ('CLASS_CODE', pmg.ASCENDING)], unique=True)
product_info.create_index([('CLASS_CODE', pmg.ASCENDING)])
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/31 0031 18:55
# @Author : Hadrianl
# @File : realtime_data_server
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.const import COMMISSION_TYPE
from spapi.spAPI import *
from spapi.sp_struct import *
import zmq
import datetime as dt
from rqalpha.api import logger
from queue import Queue, Empty
import pymongo as pmg
from threading import Thread
from collections import deque
import pandas as pd
from rqalpha.events import EVENT
import time
from rqalpha.environment import Environment
from rqalpha.model.instrument import Instrument
from .util import _convert_from_ctype
class RealtimeDataServer:
def __init__(self, sp_info, db_info, socket_info):
mongo_cli = pmg.MongoClient(db_info['host'])
admin_db = mongo_cli.get_database('admin')
admin_db.authenticate(db_info['user'], db_info['pwd'])
self._db = mongo_cli.get_database(db_info['db'])
self._col = self._db.get_collection('realtime_future_1min_')
self._col.create_index([('datetime', pmg.DESCENDING), ('code', pmg.ASCENDING)], unique=True)
self._col.create_index([('code', pmg.ASCENDING)])
self.ctx = zmq.Context()
self.trigger_socket = self.ctx.socket(zmq.PUB)
self.trigger_socket.bind(f'tcp://*: {socket_info["trigger_port"]}')
self.prod_codes = {}
initialize()
set_login_info(**sp_info)
self._init_callback()
login()
time.sleep(3)
self._init_subscribe()
def _init_callback(self):
self._ticker_queues = {}
self._price_queues = {}
self._trigger_queue = Queue()
self._resample_thread = {}
@on_login_reply # 登录成功时候调用
def login_reply(user_id, ret_code, ret_msg):
if ret_code == 0:
api_logger.info(f'@{user_id.decode()}登录成功')
self._init_subscribe()
else:
api_logger.error(f'@{user_id.decode()}登录失败--errcode:{ret_code}--errmsg:{ret_msg.decode()}')
@on_instrument_list_reply # 产品系列信息的回调推送,用load_instrument_list()触发
def inst_list_reply(req_id, is_ready, ret_msg):
if is_ready:
api_logger.info('<产品>' + f'信息加载成功 req_id:{req_id}-msg:{ret_msg.decode()}')
else:
api_logger.error('<产品>' + f'信息正在加载......req_id{req_id}-msg:{ret_msg.decode()}')
@on_product_list_by_code_reply # 根据产品系列名返回合约信息
def product_list_by_code_reply(req_id, inst_code, is_ready, ret_msg):
if is_ready:
if inst_code == '':
api_logger.info('<合约>' + f'该产品系列没有合约信息 req_id:{req_id}-msg:{ret_msg.decode()}')
else:
api_logger.info('<合约>' + f'产品:{inst_code.decode()}合约信息加载成功 req_id:{req_id}-msg:{ret_msg.decode()}')
else:
api_logger.error('<合约>' + f'产品:{inst_code.decode()}合约信息正在加载......req_id:{req_id}-msg:{ret_msg.decode()}')
#
@on_business_date_reply # 登录成功后会返回一个交易日期
def business_date_reply(business_date):
self.trade_date = dt.datetime.fromtimestamp(business_date)
api_logger.info('<日期>' + f'当前交易日--{self.trade_date}')
@on_ticker_update # ticker数据推送
def ticker_update(ticker: SPApiTicker):
ticker_dict = _convert_from_ctype(ticker)
self._ticker_queues[ticker_dict['ProdCode']].put(ticker_dict)
api_logger.info(f'{ticker_dict}')
@on_api_price_update # price数据推送
def price_update(price: SPApiPrice):
price_dict = _convert_from_ctype(price)
self._price_queues[price_dict['ProdCode']].append(price_dict)
api_logger.info(f'{price_dict}')
@on_connecting_reply # 连接状态改变时调用
def connecting_reply(host_id, con_status):
api_logger.info(f'<连接>{HOST_TYPE[host_id]}状态改变--{HOST_CON_STATUS[con_status]}')
# global login_flag
self.on_login_reply = login_reply
self.inst_list_reply = inst_list_reply
self.product_list_by_code_reply = product_list_by_code_reply
self.business_date_reply = business_date_reply
self.ticker_update = ticker_update
self.price_update = price_update
self.connecting_reply = connecting_reply
def _init_subscribe(self):
contract_col = self._db.get_collection('realtime_future_contract_info')
code = contract_col.find()
self.prod_codes = {c['Filler']: c['CODE'] for c in code}
for p in self.prod_codes:
self.subscribe_ticker(p)
for p in self.prod_codes:
self.subscribe_price(p)
def _resample_ticker(self, prod_code):
tickers = []
q = self._ticker_queues[prod_code]
code = self.prod_codes[prod_code]
time_diff = 0
while True:
try:
tick = q.get(timeout=1)
time_diff = tick['TickerTime'] - time.time()
print(time_diff)
except Empty:
if tickers and time.time() % (tickers[-1]['TickerTime'] // 60) >= 61 + time_diff: # 在没有新的一分钟tick数据时,跨过下分钟超过3秒会自动生成bar
price_list = []
vol_list = []
d = dt.datetime.fromtimestamp(tickers[-1]['TickerTime']).replace(second=0)
for t in tickers:
price_list.append(t['Price'])
vol_list.append(t['Qty'])
o, h, l, c, v = price_list[0], max(price_list), min(price_list), price_list[-1], sum(vol_list)
self._col.update_one({'datetime': d, 'code': code},
{'$set': {'datetime': d, 'code': code, 'open': o,
'high': h, 'low': l, 'close': c, 'volume': v,
'trade_date': self.trade_date}}, upsert=True)
self._trigger_queue.put(d)
tickers.clear()
continue
if tick is None:
break
if tickers and tickers[-1]['TickerTime'] // 60 != tick['TickerTime'] // 60:
price_list = []
vol_list = []
d = dt.datetime.fromtimestamp(tickers[-1]['TickerTime']).replace(second=0)
for t in tickers:
price_list.append(t['Price'])
vol_list.append(t['Qty'])
o, h, l, c, v = price_list[0], max(price_list), min(price_list), price_list[-1], sum(vol_list)
self._col.update_one({'datetime': d, 'code': code}, {'$set': {'datetime': d, 'code': code, 'open': o,
'high': h, 'low': l, 'close': c, 'volume': v,
'trade_date': self.trade_date}}, upsert=True)
self._trigger_queue.put(d)
tickers.clear()
tickers.append(tick)
def subscribe_ticker(self, prod_code):
self._ticker_queues.setdefault(prod_code, Queue())
subscribe_ticker(prod_code, 1)
t = self._resample_thread.setdefault(prod_code, Thread(target=self._resample_ticker, args=(prod_code, )))
if not t.isAlive():
t.setDaemon(True)
t.start()
def unsubscribe_ticker(self, prod_code):
subscribe_ticker(prod_code, 0)
q = self._ticker_queues.pop(prod_code)
t = self._resample_thread.pop(prod_code)
q.put(None)
t.join()
def subscribe_price(self, prod_code):
self._price_queues.setdefault(prod_code, deque(maxlen=1))
subscribe_price(prod_code, 1)
def unsubscribe_price(self, prod_code):
try:
self._price_queues.pop(prod_code)
finally:
subscribe_price(prod_code, 0)
def publish_bar_signal(self):
dt_list = []
while True:
d = self._trigger_queue.get()
dt_list.append(d)
print(d)
if len(dt_list) >= len(self._resample_thread) or dt.datetime.now() > d + dt.timedelta(seconds=2):
self.trigger_socket.send_pyobj(d)
dt_list.clear()
def add_contract(db_info, code):
mongo_cli = pmg.MongoClient(db_info['host'])
admin_db = mongo_cli.get_database('admin')
admin_db.authenticate(db_info['user'], db_info['pwd'])
db = mongo_cli.get_database(db_info['db'])
contract_col = db.get_collection('realtime_future_contract_info')
product_info = db.get_collection('realtime_future_product_info')
contract_col.create_index([('DATE', pmg.DESCENDING), ('CODE', pmg.ASCENDING)], unique=True)
contract_col.create_index([('CODE', pmg.ASCENDING)])
product_info.create_index([('DATE', pmg.DESCENDING), ('CLASS_CODE', pmg.ASCENDING)], unique=True)
product_info.create_index([('CLASS_CODE', pmg.ASCENDING)])
|
import itertools
import logging
import warnings
from abc import abstractmethod
from collections import Counter
from pathlib import Path
from typing import Union, List, Tuple, Dict, Optional
import torch.nn
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
import flair
from flair import file_utils
from flair.data import DataPoint, Sentence, Dictionary
from flair.datasets import DataLoader, SentenceDataset
from flair.training_utils import Result, store_embeddings
log = logging.getLogger("flair")
class Model(torch.nn.Module):
"""Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.
Every new type of model must implement these methods."""
@property
@abstractmethod
def label_type(self):
"""Each model predicts labels of a certain type. TODO: can we find a better name for this?"""
raise NotImplementedError
@abstractmethod
def forward_loss(self, data_points: Union[List[DataPoint], DataPoint]) -> torch.tensor:
"""Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training."""
raise NotImplementedError
@abstractmethod
def evaluate(
self,
sentences: Union[List[Sentence], Dataset],
gold_label_type: str,
out_path: Union[str, Path] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
num_workers: int = 8,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
) -> Result:
"""Evaluates the model. Returns a Result object containing evaluation
results and a loss value. Implement this to enable evaluation.
:param data_loader: DataLoader that iterates over dataset to be evaluated
:param out_path: Optional output path to store predictions
:param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and
freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU
:return: Returns a Tuple consisting of a Result object and a loss float value
"""
raise NotImplementedError
@abstractmethod
def _get_state_dict(self):
"""Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()
functionality."""
raise NotImplementedError
@staticmethod
@abstractmethod
def _init_model_with_state_dict(state):
"""Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()
functionality."""
raise NotImplementedError
@staticmethod
def _fetch_model(model_name) -> str:
return model_name
def save(self, model_file: Union[str, Path], checkpoint: bool = False):
"""
Saves the current model to the provided file.
:param model_file: the model file
"""
model_state = self._get_state_dict()
# in Flair <0.9.1, optimizer and scheduler used to train model are not saved
optimizer = scheduler = None
# write out a "model card" if one is set
if hasattr(self, 'model_card'):
# special handling for optimizer: remember optimizer class and state dictionary
if 'training_parameters' in self.model_card:
training_parameters = self.model_card['training_parameters']
if 'optimizer' in training_parameters:
optimizer = training_parameters['optimizer']
if checkpoint:
training_parameters['optimizer_state_dict'] = optimizer.state_dict()
training_parameters['optimizer'] = optimizer.__class__
if 'scheduler' in training_parameters:
scheduler = training_parameters['scheduler']
if checkpoint:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
training_parameters['scheduler_state_dict'] = scheduler.state_dict()
training_parameters['scheduler'] = scheduler.__class__
model_state['model_card'] = self.model_card
# save model
torch.save(model_state, str(model_file), pickle_protocol=4)
# restore optimizer and scheduler to model card if set
if optimizer:
self.model_card['training_parameters']['optimizer'] = optimizer
if scheduler:
self.model_card['training_parameters']['scheduler'] = scheduler
@classmethod
def load(cls, model: Union[str, Path]):
"""
Loads the model from the given file.
:param model: the model file
:return: the loaded text classifier model
"""
model_file = cls._fetch_model(str(model))
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
# see https://github.com/zalandoresearch/flair/issues/351
f = file_utils.load_big_file(str(model_file))
state = torch.load(f, map_location='cpu')
model = cls._init_model_with_state_dict(state)
if 'model_card' in state:
model.model_card = state['model_card']
model.eval()
model.to(flair.device)
return model
def print_model_card(self):
if hasattr(self, 'model_card'):
param_out = "\n------------------------------------\n"
param_out += "--------- Flair Model Card ---------\n"
param_out += "------------------------------------\n"
param_out += "- this Flair model was trained with:\n"
param_out += f"-- Flair version {self.model_card["flair_version"]}\n"
param_out += f"-- PyTorch version {self.model_card["pytorch_version"]}\n"
if 'transformers_version' in self.model_card:
param_out += f"-- Transformers version {self.model_card["transformers_version"]}\n"
param_out += "------------------------------------\n"
param_out += "------- Training Parameters: -------\n"
param_out += "------------------------------------\n"
training_params = '\n'.join(f'-- {param} = {self.model_card['training_parameters'][param]}'
for param in self.model_card['training_parameters'])
param_out += training_params + "\n"
param_out += "------------------------------------\n"
log.info(param_out)
else:
log.info(
"This model has no model card (likely because it is not yet trained or was trained with Flair version < 0.9.1)")
class Classifier(Model):
"""Abstract base class for all Flair models that do classification, both single- and multi-label.
It inherits from flair.nn.Model and adds a unified evaluate() function so that all classification models
use the same evaluation routines and compute the same numbers.
Currently, the SequenceTagger implements this class directly, while all other classifiers in Flair
implement the DefaultClassifier base class which implements Classifier."""
def evaluate(
self,
data_points: Union[List[DataPoint], Dataset],
gold_label_type: str,
out_path: Union[str, Path] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
num_workers: int = 8,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
) -> Result:
import numpy as np
import sklearn
# read Dataset into data loader (if list of sentences passed, make Dataset first)
if not isinstance(data_points, Dataset):
data_points = SentenceDataset(data_points)
data_loader = DataLoader(data_points, batch_size=mini_batch_size, num_workers=num_workers)
with torch.no_grad():
# loss calculation
eval_loss = 0
average_over = 0
# variables for printing
lines: List[str] = []
# variables for computing scores
all_spans: List[str] = []
all_true_values = {}
all_predicted_values = {}
sentence_id = 0
for batch in data_loader:
# remove any previously predicted labels
for datapoint in batch:
datapoint.remove_labels('predicted')
# predict for batch
loss_and_count = self.predict(batch,
embedding_storage_mode=embedding_storage_mode,
mini_batch_size=mini_batch_size,
label_name='predicted',
return_loss=True)
if isinstance(loss_and_count, Tuple):
average_over += loss_and_count[1]
eval_loss += loss_and_count[0]
else:
eval_loss += loss_and_count
# get the gold labels
for datapoint in batch:
for gold_label in datapoint.get_labels(gold_label_type):
representation = str(sentence_id) + ': ' + gold_label.identifier
value = gold_label.value
if gold_label_dictionary and gold_label_dictionary.get_idx_for_item(value) == 0:
value = '<unk>'
if representation not in all_true_values:
all_true_values[representation] = [value]
else:
all_true_values[representation].append(value)
if representation not in all_spans:
all_spans.append(representation)
for predicted_span in datapoint.get_labels("predicted"):
representation = str(sentence_id) + ': ' + predicted_span.identifier
# add to all_predicted_values
if representation not in all_predicted_values:
all_predicted_values[representation] = [predicted_span.value]
else:
all_predicted_values[representation].append(predicted_span.value)
if representation not in all_spans:
all_spans.append(representation)
sentence_id += 1
store_embeddings(batch, embedding_storage_mode)
# make printout lines
if out_path:
lines.extend(self._print_predictions(batch, gold_label_type))
# write all_predicted_values to out_file if set
if out_path:
with open(Path(out_path), "w", encoding="utf-8") as outfile:
outfile.write("".join(lines))
# make the evaluation dictionary
evaluation_label_dictionary = Dictionary(add_unk=False)
evaluation_label_dictionary.add_item("O")
for true_values in all_true_values.values():
for label in true_values:
evaluation_label_dictionary.add_item(label)
for predicted_values in all_predicted_values.values():
for label in predicted_values:
evaluation_label_dictionary.add_item(label)
# finally, compute numbers
y_true = []
y_pred = []
for span in all_spans:
true_values = all_true_values[span] if span in all_true_values else ['O']
predicted_values = all_predicted_values[span] if span in all_predicted_values else ['O']
y_true_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for true_value in true_values:
y_true_instance[evaluation_label_dictionary.get_idx_for_item(true_value)] = 1
y_true.append(y_true_instance.tolist())
y_pred_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for predicted_value in predicted_values:
y_pred_instance[evaluation_label_dictionary.get_idx_for_item(predicted_value)] = 1
y_pred.append(y_pred_instance.tolist())
# now, calculate evaluation numbers
target_names = []
labels = []
counter = Counter()
counter.update(list(itertools.chain.from_iterable(all_true_values.values())))
counter.update(list(itertools.chain.from_iterable(all_predicted_values.values())))
for label_name, count in counter.most_common():
if label_name == 'O': continue
if label_name in exclude_labels: continue
target_names.append(label_name)
labels.append(evaluation_label_dictionary.get_idx_for_item(label_name))
# there is at least one gold label or one prediction (default)
if len(all_true_values) + len(all_predicted_values) > 1:
classification_report = sklearn.metrics.classification_report(
y_true, y_pred, digits=4, target_names=target_names, zero_division=0, labels=labels,
)
classification_report_dict = sklearn.metrics.classification_report(
y_true, y_pred, target_names=target_names, zero_division=0, output_dict=True, labels=labels,
)
accuracy_score = round(sklearn.metrics.accuracy_score(y_true, y_pred), 4)
precision_score = round(classification_report_dict["micro avg"]["precision"], 4)
recall_score = round(classification_report_dict["micro avg"]["recall"], 4)
micro_f_score = round(classification_report_dict["micro avg"]["f1-score"], 4)
macro_f_score = round(classification_report_dict["macro avg"]["f1-score"], 4)
main_score = classification_report_dict[main_evaluation_metric[0]][main_evaluation_metric[1]]
else:
# issue error and default all evaluation numbers to 0.
log.error(
"ACHTUNG! No gold labels and no all_predicted_values found! Could be an error in your corpus or how you "
"initialize the trainer!")
accuracy_score = precision_score = recall_score = micro_f_score = macro_f_score = main_score = 0.
classification_report = ""
classification_report_dict = {}
detailed_result = (
"\nResults:"
f"\n- F-score (micro) {micro_f_score}"
f"\n- F-score (macro) {macro_f_score}"
f"\n- Accuracy {accuracy_score}"
"\n\nBy class:\n" + classification_report
)
# line for log file
log_header = "PRECISION\tRECALL\tF1\tACCURACY"
log_line = f"{precision_score}\t" f"{recall_score}\t" f"{micro_f_score}\t" f"{accuracy_score}"
if average_over > 0:
eval_loss /= average_over
result = Result(
main_score=main_score,
log_line=log_line,
log_header=log_header,
detailed_results=detailed_result,
classification_report=classification_report_dict,
loss=eval_loss
)
return result
def _print_predictions(self, batch, gold_label_type):
lines = []
for datapoint in batch:
# check if there is a label mismatch
g = [label.identifier + label.value for label in datapoint.get_labels(gold_label_type)]
p = [label.identifier + label.value for label in datapoint.get_labels('predicted')]
g.sort()
p.sort()
correct_string = " -> MISMATCH!\n" if g != p else ""
# print info
eval_line = f"{datapoint.to_original_text()}\n" \
f" - Gold: {datapoint.get_labels(gold_label_type)}\n" \
f" - Pred: {datapoint.get_labels("predicted")}\n{correct_string}\n"
lines.append(eval_line)
return lines
class DefaultClassifier(Classifier):
"""Default base class for all Flair models that do classification, both single- and multi-label.
It inherits from flair.nn.Classifier and thus from flair.nn.Model. All features shared by all classifiers
are implemented here, including the loss calculation and the predict() method.
Currently, the TextClassifier, RelationExtractor, TextPairClassifier and SimpleSequenceTagger implement
this class. You only need to implement the forward_pass() method to implement this base class.
"""
def forward_pass(self,
sentences: Union[List[DataPoint], DataPoint],
return_label_candidates: bool = False,
):
"""This method does a forward pass through the model given a list of data points as input.
Returns the tuple (scores, labels) if return_label_candidates = False, where scores are a tensor of logits
produced by the decoder and labels are the string labels for each data point.
Returns the tuple (scores, labels, data_points, candidate_labels) if return_label_candidates = True,
where data_points are the data points to which labels are added (commonly either Sentence or Token objects)
and candidate_labels are empty Label objects for each prediction (depending on the task Label,
SpanLabel or RelationLabel)."""
raise NotImplementedError
def __init__(self,
label_dictionary: Dictionary,
multi_label: bool = False,
multi_label_threshold: float = 0.5,
loss_weights: Dict[str, float] = None,
):
super().__init__()
# initialize the label dictionary
self.label_dictionary: Dictionary = label_dictionary
# set up multi-label logic
self.multi_label = multi_label
self.multi_label_threshold = multi_label_threshold
# loss weights and loss function
self.weight_dict = loss_weights
# Initialize the weight tensor
if loss_weights is not None:
n_classes = len(self.label_dictionary)
weight_list = [1.0 for i in range(n_classes)]
for i, tag in enumerate(self.label_dictionary.get_items()):
if tag in loss_weights.keys():
weight_list[i] = loss_weights[tag]
self.loss_weights = torch.FloatTensor(weight_list).to(flair.device)
else:
self.loss_weights = None
if self.multi_label:
self.loss_function = torch.nn.BCEWithLogitsLoss(weight=self.loss_weights)
else:
self.loss_function = torch.nn.CrossEntropyLoss(weight=self.loss_weights)
@property
def multi_label_threshold(self):
return self._multi_label_threshold
@multi_label_threshold.setter
def multi_label_threshold(self, x): # setter method
if type(x) is dict:
if 'default' in x:
self._multi_label_threshold = x
else:
raise Exception('multi_label_threshold dict should have a "default" key')
else:
self._multi_label_threshold = {'default': x}
def forward_loss(self, sentences: Union[List[DataPoint], DataPoint]) -> torch.tensor:
scores, labels = self.forward_pass(sentences)
return self._calculate_loss(scores, labels)
def _calculate_loss(self, scores, labels):
if not any(labels): return torch.tensor(0., requires_grad=True, device=flair.device), 1
if self.multi_label:
labels = torch.tensor([[1 if l in all_labels_for_point else 0 for l in self.label_dictionary.get_items()]
for all_labels_for_point in labels], dtype=torch.float, device=flair.device)
else:
labels = torch.tensor([self.label_dictionary.get_idx_for_item(label[0]) if len(label) > 0
else self.label_dictionary.get_idx_for_item('O')
for label in labels], dtype=torch.long, device=flair.device)
return self.loss_function(scores, labels), len(labels)
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
):
"""
Predicts the class labels for the given sentences. The labels are directly added to the sentences.
:param sentences: list of sentences
:param mini_batch_size: mini batch size to use
:param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.label_type if self.label_type is not None else "label"
with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, DataPoint):
sentences = [sentences]
# filter empty sentences
if isinstance(sentences[0], DataPoint):
sentences = [sentence for sentence in sentences if len(sentence) > 0]
if len(sentences) == 0:
return sentences
# reverse sort all sequences by their length
rev_order_len_index = sorted(range(len(sentences)), key=lambda k: len(sentences[k]), reverse=True)
reordered_sentences: List[Union[DataPoint, str]] = [sentences[index] for index in rev_order_len_index]
dataloader = DataLoader(dataset=SentenceDataset(reordered_sentences), batch_size=mini_batch_size)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader)
overall_loss = 0
batch_no = 0
label_count = 0
for batch in dataloader:
batch_no += 1
if verbose:
dataloader.set_description(f"Inferencing on batch {batch_no}")
# stop if all sentences are empty
if not batch:
continue
scores, gold_labels, data_points, label_candidates = self.forward_pass(batch,
return_label_candidates=True)
# remove previously predicted labels of this type
for sentence in data_points:
sentence.remove_labels(label_name)
if return_loss:
overall_loss += self._calculate_loss(scores, gold_labels)[0]
label_count += len(label_candidates)
# if anything could possibly be predicted
if len(label_candidates) > 0:
if self.multi_label:
sigmoided = torch.sigmoid(scores) # size: (n_sentences, n_classes)
n_labels = sigmoided.size(1)
for s_idx, (data_point, label_candidate) in enumerate(zip(data_points, label_candidates)):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == 'O': continue
label_threshold = self._get_label_threshold(label_value)
label_score = sigmoided[s_idx, l_idx].item()
if label_score > label_threshold or return_probabilities_for_all_classes:
label = label_candidate.spawn(value=label_value, score=label_score)
data_point.add_complex_label(label_name, label)
else:
softmax = torch.nn.functional.softmax(scores, dim=-1)
if return_probabilities_for_all_classes:
n_labels = softmax.size(1)
for s_idx, (data_point, label_candidate) in enumerate(zip(data_points, label_candidates)):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == 'O': continue
label_score = softmax[s_idx, l_idx].item()
label = label_candidate.spawn(value=label_value, score=label_score)
data_point.add_complex_label(label_name, label)
else:
conf, idx = torch.max(softmax, dim=-1)
for data_point, label_candidate, c, i in zip(data_points, label_candidates, conf, idx):
label_value = self.label_dictionary.get_item_for_index(i.item())
if label_value == 'O': continue
label = label_candidate.spawn(value=label_value, score=c.item())
data_point.add_complex_label(label_name, label)
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, label_count
def _get_label_threshold(self, label_value):
label_threshold = self.multi_label_threshold['default']
if label_value in self.multi_label_threshold:
label_threshold = self.multi_label_threshold[label_value]
return label_threshold
def __str__(self):
return super(flair.nn.Model, self).__str__().rstrip(')') + \
f' (weights): {self.weight_dict}\n' + \
f' (weight_tensor) {self.loss_weights}\n)'
| import itertools
import logging
import warnings
from abc import abstractmethod
from collections import Counter
from pathlib import Path
from typing import Union, List, Tuple, Dict, Optional
import torch.nn
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
import flair
from flair import file_utils
from flair.data import DataPoint, Sentence, Dictionary
from flair.datasets import DataLoader, SentenceDataset
from flair.training_utils import Result, store_embeddings
log = logging.getLogger("flair")
class Model(torch.nn.Module):
"""Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.
Every new type of model must implement these methods."""
@property
@abstractmethod
def label_type(self):
"""Each model predicts labels of a certain type. TODO: can we find a better name for this?"""
raise NotImplementedError
@abstractmethod
def forward_loss(self, data_points: Union[List[DataPoint], DataPoint]) -> torch.tensor:
"""Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training."""
raise NotImplementedError
@abstractmethod
def evaluate(
self,
sentences: Union[List[Sentence], Dataset],
gold_label_type: str,
out_path: Union[str, Path] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
num_workers: int = 8,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
) -> Result:
"""Evaluates the model. Returns a Result object containing evaluation
results and a loss value. Implement this to enable evaluation.
:param data_loader: DataLoader that iterates over dataset to be evaluated
:param out_path: Optional output path to store predictions
:param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and
freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU
:return: Returns a Tuple consisting of a Result object and a loss float value
"""
raise NotImplementedError
@abstractmethod
def _get_state_dict(self):
"""Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()
functionality."""
raise NotImplementedError
@staticmethod
@abstractmethod
def _init_model_with_state_dict(state):
"""Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()
functionality."""
raise NotImplementedError
@staticmethod
def _fetch_model(model_name) -> str:
return model_name
def save(self, model_file: Union[str, Path], checkpoint: bool = False):
"""
Saves the current model to the provided file.
:param model_file: the model file
"""
model_state = self._get_state_dict()
# in Flair <0.9.1, optimizer and scheduler used to train model are not saved
optimizer = scheduler = None
# write out a "model card" if one is set
if hasattr(self, 'model_card'):
# special handling for optimizer: remember optimizer class and state dictionary
if 'training_parameters' in self.model_card:
training_parameters = self.model_card['training_parameters']
if 'optimizer' in training_parameters:
optimizer = training_parameters['optimizer']
if checkpoint:
training_parameters['optimizer_state_dict'] = optimizer.state_dict()
training_parameters['optimizer'] = optimizer.__class__
if 'scheduler' in training_parameters:
scheduler = training_parameters['scheduler']
if checkpoint:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
training_parameters['scheduler_state_dict'] = scheduler.state_dict()
training_parameters['scheduler'] = scheduler.__class__
model_state['model_card'] = self.model_card
# save model
torch.save(model_state, str(model_file), pickle_protocol=4)
# restore optimizer and scheduler to model card if set
if optimizer:
self.model_card['training_parameters']['optimizer'] = optimizer
if scheduler:
self.model_card['training_parameters']['scheduler'] = scheduler
@classmethod
def load(cls, model: Union[str, Path]):
"""
Loads the model from the given file.
:param model: the model file
:return: the loaded text classifier model
"""
model_file = cls._fetch_model(str(model))
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
# see https://github.com/zalandoresearch/flair/issues/351
f = file_utils.load_big_file(str(model_file))
state = torch.load(f, map_location='cpu')
model = cls._init_model_with_state_dict(state)
if 'model_card' in state:
model.model_card = state['model_card']
model.eval()
model.to(flair.device)
return model
def print_model_card(self):
if hasattr(self, 'model_card'):
param_out = "\n------------------------------------\n"
param_out += "--------- Flair Model Card ---------\n"
param_out += "------------------------------------\n"
param_out += "- this Flair model was trained with:\n"
param_out += f"-- Flair version {self.model_card['flair_version']}\n"
param_out += f"-- PyTorch version {self.model_card['pytorch_version']}\n"
if 'transformers_version' in self.model_card:
param_out += f"-- Transformers version {self.model_card['transformers_version']}\n"
param_out += "------------------------------------\n"
param_out += "------- Training Parameters: -------\n"
param_out += "------------------------------------\n"
training_params = '\n'.join(f'-- {param} = {self.model_card["training_parameters"][param]}'
for param in self.model_card['training_parameters'])
param_out += training_params + "\n"
param_out += "------------------------------------\n"
log.info(param_out)
else:
log.info(
"This model has no model card (likely because it is not yet trained or was trained with Flair version < 0.9.1)")
class Classifier(Model):
"""Abstract base class for all Flair models that do classification, both single- and multi-label.
It inherits from flair.nn.Model and adds a unified evaluate() function so that all classification models
use the same evaluation routines and compute the same numbers.
Currently, the SequenceTagger implements this class directly, while all other classifiers in Flair
implement the DefaultClassifier base class which implements Classifier."""
def evaluate(
self,
data_points: Union[List[DataPoint], Dataset],
gold_label_type: str,
out_path: Union[str, Path] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
num_workers: int = 8,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
) -> Result:
import numpy as np
import sklearn
# read Dataset into data loader (if list of sentences passed, make Dataset first)
if not isinstance(data_points, Dataset):
data_points = SentenceDataset(data_points)
data_loader = DataLoader(data_points, batch_size=mini_batch_size, num_workers=num_workers)
with torch.no_grad():
# loss calculation
eval_loss = 0
average_over = 0
# variables for printing
lines: List[str] = []
# variables for computing scores
all_spans: List[str] = []
all_true_values = {}
all_predicted_values = {}
sentence_id = 0
for batch in data_loader:
# remove any previously predicted labels
for datapoint in batch:
datapoint.remove_labels('predicted')
# predict for batch
loss_and_count = self.predict(batch,
embedding_storage_mode=embedding_storage_mode,
mini_batch_size=mini_batch_size,
label_name='predicted',
return_loss=True)
if isinstance(loss_and_count, Tuple):
average_over += loss_and_count[1]
eval_loss += loss_and_count[0]
else:
eval_loss += loss_and_count
# get the gold labels
for datapoint in batch:
for gold_label in datapoint.get_labels(gold_label_type):
representation = str(sentence_id) + ': ' + gold_label.identifier
value = gold_label.value
if gold_label_dictionary and gold_label_dictionary.get_idx_for_item(value) == 0:
value = '<unk>'
if representation not in all_true_values:
all_true_values[representation] = [value]
else:
all_true_values[representation].append(value)
if representation not in all_spans:
all_spans.append(representation)
for predicted_span in datapoint.get_labels("predicted"):
representation = str(sentence_id) + ': ' + predicted_span.identifier
# add to all_predicted_values
if representation not in all_predicted_values:
all_predicted_values[representation] = [predicted_span.value]
else:
all_predicted_values[representation].append(predicted_span.value)
if representation not in all_spans:
all_spans.append(representation)
sentence_id += 1
store_embeddings(batch, embedding_storage_mode)
# make printout lines
if out_path:
lines.extend(self._print_predictions(batch, gold_label_type))
# write all_predicted_values to out_file if set
if out_path:
with open(Path(out_path), "w", encoding="utf-8") as outfile:
outfile.write("".join(lines))
# make the evaluation dictionary
evaluation_label_dictionary = Dictionary(add_unk=False)
evaluation_label_dictionary.add_item("O")
for true_values in all_true_values.values():
for label in true_values:
evaluation_label_dictionary.add_item(label)
for predicted_values in all_predicted_values.values():
for label in predicted_values:
evaluation_label_dictionary.add_item(label)
# finally, compute numbers
y_true = []
y_pred = []
for span in all_spans:
true_values = all_true_values[span] if span in all_true_values else ['O']
predicted_values = all_predicted_values[span] if span in all_predicted_values else ['O']
y_true_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for true_value in true_values:
y_true_instance[evaluation_label_dictionary.get_idx_for_item(true_value)] = 1
y_true.append(y_true_instance.tolist())
y_pred_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for predicted_value in predicted_values:
y_pred_instance[evaluation_label_dictionary.get_idx_for_item(predicted_value)] = 1
y_pred.append(y_pred_instance.tolist())
# now, calculate evaluation numbers
target_names = []
labels = []
counter = Counter()
counter.update(list(itertools.chain.from_iterable(all_true_values.values())))
counter.update(list(itertools.chain.from_iterable(all_predicted_values.values())))
for label_name, count in counter.most_common():
if label_name == 'O': continue
if label_name in exclude_labels: continue
target_names.append(label_name)
labels.append(evaluation_label_dictionary.get_idx_for_item(label_name))
# there is at least one gold label or one prediction (default)
if len(all_true_values) + len(all_predicted_values) > 1:
classification_report = sklearn.metrics.classification_report(
y_true, y_pred, digits=4, target_names=target_names, zero_division=0, labels=labels,
)
classification_report_dict = sklearn.metrics.classification_report(
y_true, y_pred, target_names=target_names, zero_division=0, output_dict=True, labels=labels,
)
accuracy_score = round(sklearn.metrics.accuracy_score(y_true, y_pred), 4)
precision_score = round(classification_report_dict["micro avg"]["precision"], 4)
recall_score = round(classification_report_dict["micro avg"]["recall"], 4)
micro_f_score = round(classification_report_dict["micro avg"]["f1-score"], 4)
macro_f_score = round(classification_report_dict["macro avg"]["f1-score"], 4)
main_score = classification_report_dict[main_evaluation_metric[0]][main_evaluation_metric[1]]
else:
# issue error and default all evaluation numbers to 0.
log.error(
"ACHTUNG! No gold labels and no all_predicted_values found! Could be an error in your corpus or how you "
"initialize the trainer!")
accuracy_score = precision_score = recall_score = micro_f_score = macro_f_score = main_score = 0.
classification_report = ""
classification_report_dict = {}
detailed_result = (
"\nResults:"
f"\n- F-score (micro) {micro_f_score}"
f"\n- F-score (macro) {macro_f_score}"
f"\n- Accuracy {accuracy_score}"
"\n\nBy class:\n" + classification_report
)
# line for log file
log_header = "PRECISION\tRECALL\tF1\tACCURACY"
log_line = f"{precision_score}\t" f"{recall_score}\t" f"{micro_f_score}\t" f"{accuracy_score}"
if average_over > 0:
eval_loss /= average_over
result = Result(
main_score=main_score,
log_line=log_line,
log_header=log_header,
detailed_results=detailed_result,
classification_report=classification_report_dict,
loss=eval_loss
)
return result
def _print_predictions(self, batch, gold_label_type):
lines = []
for datapoint in batch:
# check if there is a label mismatch
g = [label.identifier + label.value for label in datapoint.get_labels(gold_label_type)]
p = [label.identifier + label.value for label in datapoint.get_labels('predicted')]
g.sort()
p.sort()
correct_string = " -> MISMATCH!\n" if g != p else ""
# print info
eval_line = f"{datapoint.to_original_text()}\n" \
f" - Gold: {datapoint.get_labels(gold_label_type)}\n" \
f" - Pred: {datapoint.get_labels('predicted')}\n{correct_string}\n"
lines.append(eval_line)
return lines
class DefaultClassifier(Classifier):
"""Default base class for all Flair models that do classification, both single- and multi-label.
It inherits from flair.nn.Classifier and thus from flair.nn.Model. All features shared by all classifiers
are implemented here, including the loss calculation and the predict() method.
Currently, the TextClassifier, RelationExtractor, TextPairClassifier and SimpleSequenceTagger implement
this class. You only need to implement the forward_pass() method to implement this base class.
"""
def forward_pass(self,
sentences: Union[List[DataPoint], DataPoint],
return_label_candidates: bool = False,
):
"""This method does a forward pass through the model given a list of data points as input.
Returns the tuple (scores, labels) if return_label_candidates = False, where scores are a tensor of logits
produced by the decoder and labels are the string labels for each data point.
Returns the tuple (scores, labels, data_points, candidate_labels) if return_label_candidates = True,
where data_points are the data points to which labels are added (commonly either Sentence or Token objects)
and candidate_labels are empty Label objects for each prediction (depending on the task Label,
SpanLabel or RelationLabel)."""
raise NotImplementedError
def __init__(self,
label_dictionary: Dictionary,
multi_label: bool = False,
multi_label_threshold: float = 0.5,
loss_weights: Dict[str, float] = None,
):
super().__init__()
# initialize the label dictionary
self.label_dictionary: Dictionary = label_dictionary
# set up multi-label logic
self.multi_label = multi_label
self.multi_label_threshold = multi_label_threshold
# loss weights and loss function
self.weight_dict = loss_weights
# Initialize the weight tensor
if loss_weights is not None:
n_classes = len(self.label_dictionary)
weight_list = [1.0 for i in range(n_classes)]
for i, tag in enumerate(self.label_dictionary.get_items()):
if tag in loss_weights.keys():
weight_list[i] = loss_weights[tag]
self.loss_weights = torch.FloatTensor(weight_list).to(flair.device)
else:
self.loss_weights = None
if self.multi_label:
self.loss_function = torch.nn.BCEWithLogitsLoss(weight=self.loss_weights)
else:
self.loss_function = torch.nn.CrossEntropyLoss(weight=self.loss_weights)
@property
def multi_label_threshold(self):
return self._multi_label_threshold
@multi_label_threshold.setter
def multi_label_threshold(self, x): # setter method
if type(x) is dict:
if 'default' in x:
self._multi_label_threshold = x
else:
raise Exception('multi_label_threshold dict should have a "default" key')
else:
self._multi_label_threshold = {'default': x}
def forward_loss(self, sentences: Union[List[DataPoint], DataPoint]) -> torch.tensor:
scores, labels = self.forward_pass(sentences)
return self._calculate_loss(scores, labels)
def _calculate_loss(self, scores, labels):
if not any(labels): return torch.tensor(0., requires_grad=True, device=flair.device), 1
if self.multi_label:
labels = torch.tensor([[1 if l in all_labels_for_point else 0 for l in self.label_dictionary.get_items()]
for all_labels_for_point in labels], dtype=torch.float, device=flair.device)
else:
labels = torch.tensor([self.label_dictionary.get_idx_for_item(label[0]) if len(label) > 0
else self.label_dictionary.get_idx_for_item('O')
for label in labels], dtype=torch.long, device=flair.device)
return self.loss_function(scores, labels), len(labels)
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
):
"""
Predicts the class labels for the given sentences. The labels are directly added to the sentences.
:param sentences: list of sentences
:param mini_batch_size: mini batch size to use
:param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.label_type if self.label_type is not None else "label"
with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, DataPoint):
sentences = [sentences]
# filter empty sentences
if isinstance(sentences[0], DataPoint):
sentences = [sentence for sentence in sentences if len(sentence) > 0]
if len(sentences) == 0:
return sentences
# reverse sort all sequences by their length
rev_order_len_index = sorted(range(len(sentences)), key=lambda k: len(sentences[k]), reverse=True)
reordered_sentences: List[Union[DataPoint, str]] = [sentences[index] for index in rev_order_len_index]
dataloader = DataLoader(dataset=SentenceDataset(reordered_sentences), batch_size=mini_batch_size)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader)
overall_loss = 0
batch_no = 0
label_count = 0
for batch in dataloader:
batch_no += 1
if verbose:
dataloader.set_description(f"Inferencing on batch {batch_no}")
# stop if all sentences are empty
if not batch:
continue
scores, gold_labels, data_points, label_candidates = self.forward_pass(batch,
return_label_candidates=True)
# remove previously predicted labels of this type
for sentence in data_points:
sentence.remove_labels(label_name)
if return_loss:
overall_loss += self._calculate_loss(scores, gold_labels)[0]
label_count += len(label_candidates)
# if anything could possibly be predicted
if len(label_candidates) > 0:
if self.multi_label:
sigmoided = torch.sigmoid(scores) # size: (n_sentences, n_classes)
n_labels = sigmoided.size(1)
for s_idx, (data_point, label_candidate) in enumerate(zip(data_points, label_candidates)):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == 'O': continue
label_threshold = self._get_label_threshold(label_value)
label_score = sigmoided[s_idx, l_idx].item()
if label_score > label_threshold or return_probabilities_for_all_classes:
label = label_candidate.spawn(value=label_value, score=label_score)
data_point.add_complex_label(label_name, label)
else:
softmax = torch.nn.functional.softmax(scores, dim=-1)
if return_probabilities_for_all_classes:
n_labels = softmax.size(1)
for s_idx, (data_point, label_candidate) in enumerate(zip(data_points, label_candidates)):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == 'O': continue
label_score = softmax[s_idx, l_idx].item()
label = label_candidate.spawn(value=label_value, score=label_score)
data_point.add_complex_label(label_name, label)
else:
conf, idx = torch.max(softmax, dim=-1)
for data_point, label_candidate, c, i in zip(data_points, label_candidates, conf, idx):
label_value = self.label_dictionary.get_item_for_index(i.item())
if label_value == 'O': continue
label = label_candidate.spawn(value=label_value, score=c.item())
data_point.add_complex_label(label_name, label)
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, label_count
def _get_label_threshold(self, label_value):
label_threshold = self.multi_label_threshold['default']
if label_value in self.multi_label_threshold:
label_threshold = self.multi_label_threshold[label_value]
return label_threshold
def __str__(self):
return super(flair.nn.Model, self).__str__().rstrip(')') + \
f' (weights): {self.weight_dict}\n' + \
f' (weight_tensor) {self.loss_weights}\n)'
|
from pathlib import Path
from typing import List, Optional, Dict, Union, Tuple, Literal, Sequence, Any
import fsspec
import numpy as np
from xarray import DataArray
from dataclasses import asdict, dataclass
import json
from ..io.mrc import mrc_to_dask
from ..io import read
import dask.array as da
import dacite
from xarray_multiscale.metadata.util import SpatialTransform
CONTAINER_TYPES ={'mrc', 'n5', 'precomputed'}
DTYPE_FORMATS = {"uint16": "n5", "uint8": "precomputed", "uint64": "n5"}
CONTENT_TYPES = {"em", "lm", "prediction", "segmentation", "analysis"}
ContainerTypes = Literal['n5', 'precomputed', 'mrc']
@dataclass
class VolumeStorageSpec:
kvStore: str
containerType: ContainerTypes
containerPath: str
dataPath: str
def toURI(self):
return f'{self.kvStore}://{Path(self.containerPath).with_suffix('.' + self.containerType).joinpath(self.dataPath)}'
def __post_init__(self):
if self.containerType not in CONTAINER_TYPES:
raise ValueError(
f"containerType must be one of {CONTAINER_TYPES}"
)
@dataclass
class ContrastLimits:
min: float
max: float
def __post_init__(self):
if not self.min <= self.max:
raise ValueError('min must be less than or equal to max.')
@dataclass
class DisplaySettings:
contrastLimits: ContrastLimits
color: str = 'white'
invertColormap: bool = False
@classmethod
def fromDict(cls, d: Dict[str, Any]):
return dacite.from_dict(cls, d)
@dataclass
class DatasetView:
datasetName: str
name: str
description: str
position: Optional[Sequence[float]]
scale: Optional[float]
volumeKeys: Sequence[str]
@classmethod
def fromDict(cls, d: Dict[str, Any]):
return dacite.from_dict(cls, d)
@dataclass
class MultiscaleSpec:
reduction: str
depth: int
factors: Union[int, Sequence[int]]
@dataclass
class MeshSource:
path: str
name: str
datasetName: str
format: str
@dataclass
class VolumeSource:
path: str
name: str
datasetName: str
dataType: str
dimensions: Sequence[float]
transform: SpatialTransform
contentType: str
containerType: Optional[ContainerTypes]
displaySettings: DisplaySettings
description: str = ''
version: str="0"
tags: Optional[Sequence[str]] = None
def __post_init__(self):
assert self.contentType in CONTENT_TYPES
assert len(self.version) > 0
def toDataArray(self):
if Path(self.path).suffix == ".mrc":
array = mrc_to_dask(self.path, chunks=(1, -1, -1))
else:
r = read(self.path)
array = da.from_array(r, chunks=r.chunks)
coords = [
DataArray(
self.transform.translate[idx] + np.arange(array.shape[idx]) * self.transform.scale[idx],
dims=ax,
attrs= {'units': self.transform.units[idx]}
)
for idx, ax in enumerate(self.transform.axes)
]
return DataArray(array, coords=coords, name=self.name)
@classmethod
def fromDict(cls, d: Dict[str, Any]):
return dacite.from_dict(cls, d)
@dataclass
class DatasetIndex:
name: str
volumes: Sequence[VolumeSource]
meshes: Sequence[MeshSource]
views: Sequence[DatasetView]
@classmethod
def from_json(cls, fname: Union[str, Path], open_kwargs: dict = {}):
with fsspec.open(str(fname), mode='rt', **open_kwargs) as fh:
jblob = json.loads(fh.read())
return cls(**jblob)
def to_json(self, fname: Union[str, Path], open_kwargs: dict = {}) -> int:
jblob = json.dumps(asdict(self))
with fsspec.open(str(fname), mode='wt', **open_kwargs) as fh:
result = fh.write(jblob)
return result
@dataclass
class VolumeIngest:
source: VolumeSource
multiscaleSpec: MultiscaleSpec
storageSpec: VolumeStorageSpec
mutation: Optional[str] = None
@dataclass
class COSEMArrayAttrs:
name: str
transform: SpatialTransform
@classmethod
def fromDataArray(cls, data: DataArray) -> "COSEMArrayAttrs":
name = data.name
if name is not None:
return cls(str(name), SpatialTransform.fromDataArray((data)))
else:
raise ValueError('DataArray argument must have a valid name')
@dataclass
class OMEScaleAttrs:
path: str
transform: SpatialTransform
@dataclass
class OMEMultiscaleAttrs:
datasets: Sequence[OMEScaleAttrs]
@dataclass
class COSEMGroupAttrs:
name: str
multiscales: Sequence[OMEMultiscaleAttrs]
@dataclass
class N5PixelResolution:
dimensions: Sequence[float]
unit: str
@dataclass
class NeuroglancerGroupAttrs:
# see https://github.com/google/neuroglancer/issues/176#issuecomment-553027775
axes: Sequence[str]
units: Sequence[str]
scales: Sequence[Sequence[int]]
pixelResolution: N5PixelResolution
@dataclass
class MultiscaleGroupAttrs:
name: str
multiscales: Sequence[OMEMultiscaleAttrs]
axes: Sequence[str]
units: Sequence[str]
scales: Sequence[Sequence[int]]
pixelResolution: N5PixelResolution
def makeN5ArrayAttrs(dimensions: Sequence[float], unit: str) -> Dict[str, N5PixelResolution]:
return {'pixelResolution': N5PixelResolution(dimensions, unit)}
def makeMultiscaleGroupAttrs(name: str,
arrays: Sequence[DataArray],
array_paths: Sequence[str],
axis_order: str="F") -> MultiscaleGroupAttrs:
assert len(arrays) == len(array_paths)
cosemArrayAttrs = tuple(COSEMArrayAttrs.fromDataArray(a) for a in arrays)
axis_indexer = slice(None)
# neuroglancer wants the axes reported in fortran order
if axis_order == "F":
axis_indexer = slice(-1, None, -1)
axes: Tuple[str] = arrays[0].dims[axis_indexer]
scales = tuple(tuple(s.scale_factors)[axis_indexer] for s in arrays)
coords_reordered = tuple(arrays[0].coords[k] for k in axes)
units = tuple(d.units for d in coords_reordered)
# we need this for neuroglancer
pixelResolution = N5PixelResolution(dimensions=cosemArrayAttrs[0].transform.scale[axis_indexer], unit=units[0])
multiscales = OMEMultiscaleAttrs(datasets=[OMEScaleAttrs(path=ap, transform=attr.transform) for ap, attr in zip(array_paths, cosemArrayAttrs)])
result = MultiscaleGroupAttrs(name=name,
multiscales=[multiscales],
axes=axes,
units=units,
scales=scales,
pixelResolution=pixelResolution)
return result
@dataclass
class CompositeArrayAttrs:
name: str
transform: SpatialTransform
pixelResolution: N5PixelResolution
@classmethod
def fromDataArray(cls, data: DataArray):
cosemAttrs = COSEMArrayAttrs.fromDataArray(data)
pixelResolution = N5PixelResolution(cosemAttrs.transform.scale[::-1], unit=cosemAttrs.transform.units[0])
return cls(cosemAttrs.name, cosemAttrs.transform, pixelResolution) | from pathlib import Path
from typing import List, Optional, Dict, Union, Tuple, Literal, Sequence, Any
import fsspec
import numpy as np
from xarray import DataArray
from dataclasses import asdict, dataclass
import json
from ..io.mrc import mrc_to_dask
from ..io import read
import dask.array as da
import dacite
from xarray_multiscale.metadata.util import SpatialTransform
CONTAINER_TYPES ={'mrc', 'n5', 'precomputed'}
DTYPE_FORMATS = {"uint16": "n5", "uint8": "precomputed", "uint64": "n5"}
CONTENT_TYPES = {"em", "lm", "prediction", "segmentation", "analysis"}
ContainerTypes = Literal['n5', 'precomputed', 'mrc']
@dataclass
class VolumeStorageSpec:
kvStore: str
containerType: ContainerTypes
containerPath: str
dataPath: str
def toURI(self):
return f'{self.kvStore}://{Path(self.containerPath).with_suffix("." + self.containerType).joinpath(self.dataPath)}'
def __post_init__(self):
if self.containerType not in CONTAINER_TYPES:
raise ValueError(
f"containerType must be one of {CONTAINER_TYPES}"
)
@dataclass
class ContrastLimits:
min: float
max: float
def __post_init__(self):
if not self.min <= self.max:
raise ValueError('min must be less than or equal to max.')
@dataclass
class DisplaySettings:
contrastLimits: ContrastLimits
color: str = 'white'
invertColormap: bool = False
@classmethod
def fromDict(cls, d: Dict[str, Any]):
return dacite.from_dict(cls, d)
@dataclass
class DatasetView:
datasetName: str
name: str
description: str
position: Optional[Sequence[float]]
scale: Optional[float]
volumeKeys: Sequence[str]
@classmethod
def fromDict(cls, d: Dict[str, Any]):
return dacite.from_dict(cls, d)
@dataclass
class MultiscaleSpec:
reduction: str
depth: int
factors: Union[int, Sequence[int]]
@dataclass
class MeshSource:
path: str
name: str
datasetName: str
format: str
@dataclass
class VolumeSource:
path: str
name: str
datasetName: str
dataType: str
dimensions: Sequence[float]
transform: SpatialTransform
contentType: str
containerType: Optional[ContainerTypes]
displaySettings: DisplaySettings
description: str = ''
version: str="0"
tags: Optional[Sequence[str]] = None
def __post_init__(self):
assert self.contentType in CONTENT_TYPES
assert len(self.version) > 0
def toDataArray(self):
if Path(self.path).suffix == ".mrc":
array = mrc_to_dask(self.path, chunks=(1, -1, -1))
else:
r = read(self.path)
array = da.from_array(r, chunks=r.chunks)
coords = [
DataArray(
self.transform.translate[idx] + np.arange(array.shape[idx]) * self.transform.scale[idx],
dims=ax,
attrs= {'units': self.transform.units[idx]}
)
for idx, ax in enumerate(self.transform.axes)
]
return DataArray(array, coords=coords, name=self.name)
@classmethod
def fromDict(cls, d: Dict[str, Any]):
return dacite.from_dict(cls, d)
@dataclass
class DatasetIndex:
name: str
volumes: Sequence[VolumeSource]
meshes: Sequence[MeshSource]
views: Sequence[DatasetView]
@classmethod
def from_json(cls, fname: Union[str, Path], open_kwargs: dict = {}):
with fsspec.open(str(fname), mode='rt', **open_kwargs) as fh:
jblob = json.loads(fh.read())
return cls(**jblob)
def to_json(self, fname: Union[str, Path], open_kwargs: dict = {}) -> int:
jblob = json.dumps(asdict(self))
with fsspec.open(str(fname), mode='wt', **open_kwargs) as fh:
result = fh.write(jblob)
return result
@dataclass
class VolumeIngest:
source: VolumeSource
multiscaleSpec: MultiscaleSpec
storageSpec: VolumeStorageSpec
mutation: Optional[str] = None
@dataclass
class COSEMArrayAttrs:
name: str
transform: SpatialTransform
@classmethod
def fromDataArray(cls, data: DataArray) -> "COSEMArrayAttrs":
name = data.name
if name is not None:
return cls(str(name), SpatialTransform.fromDataArray((data)))
else:
raise ValueError('DataArray argument must have a valid name')
@dataclass
class OMEScaleAttrs:
path: str
transform: SpatialTransform
@dataclass
class OMEMultiscaleAttrs:
datasets: Sequence[OMEScaleAttrs]
@dataclass
class COSEMGroupAttrs:
name: str
multiscales: Sequence[OMEMultiscaleAttrs]
@dataclass
class N5PixelResolution:
dimensions: Sequence[float]
unit: str
@dataclass
class NeuroglancerGroupAttrs:
# see https://github.com/google/neuroglancer/issues/176#issuecomment-553027775
axes: Sequence[str]
units: Sequence[str]
scales: Sequence[Sequence[int]]
pixelResolution: N5PixelResolution
@dataclass
class MultiscaleGroupAttrs:
name: str
multiscales: Sequence[OMEMultiscaleAttrs]
axes: Sequence[str]
units: Sequence[str]
scales: Sequence[Sequence[int]]
pixelResolution: N5PixelResolution
def makeN5ArrayAttrs(dimensions: Sequence[float], unit: str) -> Dict[str, N5PixelResolution]:
return {'pixelResolution': N5PixelResolution(dimensions, unit)}
def makeMultiscaleGroupAttrs(name: str,
arrays: Sequence[DataArray],
array_paths: Sequence[str],
axis_order: str="F") -> MultiscaleGroupAttrs:
assert len(arrays) == len(array_paths)
cosemArrayAttrs = tuple(COSEMArrayAttrs.fromDataArray(a) for a in arrays)
axis_indexer = slice(None)
# neuroglancer wants the axes reported in fortran order
if axis_order == "F":
axis_indexer = slice(-1, None, -1)
axes: Tuple[str] = arrays[0].dims[axis_indexer]
scales = tuple(tuple(s.scale_factors)[axis_indexer] for s in arrays)
coords_reordered = tuple(arrays[0].coords[k] for k in axes)
units = tuple(d.units for d in coords_reordered)
# we need this for neuroglancer
pixelResolution = N5PixelResolution(dimensions=cosemArrayAttrs[0].transform.scale[axis_indexer], unit=units[0])
multiscales = OMEMultiscaleAttrs(datasets=[OMEScaleAttrs(path=ap, transform=attr.transform) for ap, attr in zip(array_paths, cosemArrayAttrs)])
result = MultiscaleGroupAttrs(name=name,
multiscales=[multiscales],
axes=axes,
units=units,
scales=scales,
pixelResolution=pixelResolution)
return result
@dataclass
class CompositeArrayAttrs:
name: str
transform: SpatialTransform
pixelResolution: N5PixelResolution
@classmethod
def fromDataArray(cls, data: DataArray):
cosemAttrs = COSEMArrayAttrs.fromDataArray(data)
pixelResolution = N5PixelResolution(cosemAttrs.transform.scale[::-1], unit=cosemAttrs.transform.units[0])
return cls(cosemAttrs.name, cosemAttrs.transform, pixelResolution) |
'''
This is to fetch the tip table data for a telegram_id
Error Handling
==============
- /withdrawmemo tipuser11111 0.0001 TLOS pay_bill
- /withdrawmemo tipuser11111 0.00001 EOS pay_bill
{"code": 3050003, "name": "eosio_assert_message_exception", "what": "eosio_assert_message assertion failure"
, "details": [{"message": "assertion failure with message: there is no balances available corresponding to t
he parsed quantity symbol for the given from_id.", "file": "cf_system.cpp", "line_number": 14, "method": "eo
sio_assert"}, {"message": "pending console output: ", "file": "apply_context.cpp", "line_number": 143, "meth
od": "exec_one"}]}
- /withdrawmemo tipuser11117 0.0001 EOS pay_bill
{"code": 3010001, "name": "name_type_exception", "what": "Invalid name", "details": [{"message": "Name conta
ins invalid character: (7) ", "file": "name.hpp", "line_number": 26, "method": "char_to_symbol"}, {"message"
: "", "file": "abi_serializer.cpp", "line_number": 570, "method": "_variant_to_binary"}, {"message": "", "fi
le": "abi_serializer.cpp", "line_number": 570, "method": "_variant_to_binary"}, {"message": "", "file": "abi
_serializer.cpp", "line_number": 584, "method": "_variant_to_binary"}, {"message": "\"{"from_id":410894301,"
from_username":"abhi3700","to_ac":"tipuser11117","quantity":"0.0001 EOS","memo":"pay_bill"}\" is invalid arg
s for action \"withdraw\" code \"tippertipper\". expected \"[{"name":"from_id","type":"uint64"},{"name":"fro
m_username","type":"string"},{"name":"to_ac","type":"name"},{"name":"quantity","type":"asset"},{"name":"memo
","type":"string"}]\"", "file": "chain_plugin.cpp", "line_number": 3396, "method": "abi_json_to_bin"}, {"mes
sage": "code: tippertipper, action: withdraw, args: {"from_id":410894301,"from_username":"abhi3700","to_ac":
"tipuser11117","quantity":"0.0001 EOS","memo":"pay_bill"}", "file": "chain_plugin.cpp", "line_number": 3402,
"method": "abi_json_to_bin"}]}
'''
import json
import asyncio
from aioeos import EosAccount, EosJsonRpc, EosTransaction
from aioeos import types
from aioeos.exceptions import EosAccountDoesntExistException
from aioeos.exceptions import EosAssertMessageException
from aioeos.exceptions import EosDeadlineException
from aioeos.exceptions import EosRamUsageExceededException
from aioeos.exceptions import EosTxCpuUsageExceededException
from aioeos.exceptions import EosTxNetUsageExceededException
from input import *
# def validate(j):
# try:
# return json.load(j) # put JSON-data to a variable
# except json.decoder.JSONDecodeError:
# print("Invalid JSON") # in case json is invalid
# else:
# print("Valid JSON") # in case json is valid
async def balance(
from_id,
# chat
):
rpc = EosJsonRpc(url=Chain_URL)
table_response = await rpc.get_table_rows(
code=tip_eosio_ac,
scope= tip_eosio_ac,
table=tip_table,
lower_bound= from_id,
upper_bound= from_id
)
table_response = str(table_response).replace("\'", "\"")
table_response = table_response.replace("False", "false") # As False is invalid in JSON, so replace with false
# print(table_response)
for r in json.loads(table_response)['rows'][0]["balances"]:
prec, sym_name = r["key"]["sym"].split(",")
# print(f'token precision: {prec}') # precision
# print(f'token sym_name: {sym_name}') # symbol name
# print(f'val: {r['value']/10**int(prec)}\n\n') # exact value
print(f'{r['value']/10**int(prec)} {sym_name}') # result e.g. 2.0 EOS
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(balance(410894301)) | '''
This is to fetch the tip table data for a telegram_id
Error Handling
==============
- /withdrawmemo tipuser11111 0.0001 TLOS pay_bill
- /withdrawmemo tipuser11111 0.00001 EOS pay_bill
{"code": 3050003, "name": "eosio_assert_message_exception", "what": "eosio_assert_message assertion failure"
, "details": [{"message": "assertion failure with message: there is no balances available corresponding to t
he parsed quantity symbol for the given from_id.", "file": "cf_system.cpp", "line_number": 14, "method": "eo
sio_assert"}, {"message": "pending console output: ", "file": "apply_context.cpp", "line_number": 143, "meth
od": "exec_one"}]}
- /withdrawmemo tipuser11117 0.0001 EOS pay_bill
{"code": 3010001, "name": "name_type_exception", "what": "Invalid name", "details": [{"message": "Name conta
ins invalid character: (7) ", "file": "name.hpp", "line_number": 26, "method": "char_to_symbol"}, {"message"
: "", "file": "abi_serializer.cpp", "line_number": 570, "method": "_variant_to_binary"}, {"message": "", "fi
le": "abi_serializer.cpp", "line_number": 570, "method": "_variant_to_binary"}, {"message": "", "file": "abi
_serializer.cpp", "line_number": 584, "method": "_variant_to_binary"}, {"message": "\"{"from_id":410894301,"
from_username":"abhi3700","to_ac":"tipuser11117","quantity":"0.0001 EOS","memo":"pay_bill"}\" is invalid arg
s for action \"withdraw\" code \"tippertipper\". expected \"[{"name":"from_id","type":"uint64"},{"name":"fro
m_username","type":"string"},{"name":"to_ac","type":"name"},{"name":"quantity","type":"asset"},{"name":"memo
","type":"string"}]\"", "file": "chain_plugin.cpp", "line_number": 3396, "method": "abi_json_to_bin"}, {"mes
sage": "code: tippertipper, action: withdraw, args: {"from_id":410894301,"from_username":"abhi3700","to_ac":
"tipuser11117","quantity":"0.0001 EOS","memo":"pay_bill"}", "file": "chain_plugin.cpp", "line_number": 3402,
"method": "abi_json_to_bin"}]}
'''
import json
import asyncio
from aioeos import EosAccount, EosJsonRpc, EosTransaction
from aioeos import types
from aioeos.exceptions import EosAccountDoesntExistException
from aioeos.exceptions import EosAssertMessageException
from aioeos.exceptions import EosDeadlineException
from aioeos.exceptions import EosRamUsageExceededException
from aioeos.exceptions import EosTxCpuUsageExceededException
from aioeos.exceptions import EosTxNetUsageExceededException
from input import *
# def validate(j):
# try:
# return json.load(j) # put JSON-data to a variable
# except json.decoder.JSONDecodeError:
# print("Invalid JSON") # in case json is invalid
# else:
# print("Valid JSON") # in case json is valid
async def balance(
from_id,
# chat
):
rpc = EosJsonRpc(url=Chain_URL)
table_response = await rpc.get_table_rows(
code=tip_eosio_ac,
scope= tip_eosio_ac,
table=tip_table,
lower_bound= from_id,
upper_bound= from_id
)
table_response = str(table_response).replace("\'", "\"")
table_response = table_response.replace("False", "false") # As False is invalid in JSON, so replace with false
# print(table_response)
for r in json.loads(table_response)['rows'][0]["balances"]:
prec, sym_name = r["key"]["sym"].split(",")
# print(f'token precision: {prec}') # precision
# print(f'token sym_name: {sym_name}') # symbol name
# print(f'val: {r["value"]/10**int(prec)}\n\n') # exact value
print(f'{r["value"]/10**int(prec)} {sym_name}') # result e.g. 2.0 EOS
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(balance(410894301)) |
"""
Code for understanding type annotations.
This file contains functions that turn various representations of
Python type annotations into :class:`pyanalyze.value.Value` objects.
There are three major functions:
- :func:`type_from_runtime` takes a runtime Python object, for example
``type_from_value(int)`` -> ``TypedValue(int)``.
- :func:`type_from_value` takes an existing :class:`pyanalyze.value.Value`
object. For example, evaluating the expression ``int`` will produce
``KnownValue(int)``, and calling :func:`type_from_value` on that value
will produce ``TypedValue(int)``.
- :func:`type_from_ast` takes an AST node and evaluates it into a type.
These functions all rely on each other. For example, when a forward
reference is found in a runtime annotation, the code parses it and calls
:func:`type_from_ast` to evaluate it.
These functions all use :class:`Context` objects to resolve names and
show errors.
"""
import contextlib
from dataclasses import dataclass, InitVar, field
import typing
import typing_inspect
import qcore
import ast
import builtins
from collections.abc import Callable, Iterable, Hashable
import sys
from typing import (
Any,
Container,
NamedTuple,
cast,
TypeVar,
ContextManager,
Mapping,
NewType,
Sequence,
Optional,
Tuple,
Union,
TYPE_CHECKING,
)
from typing_extensions import ParamSpec, TypedDict
from .error_code import ErrorCode
from .extensions import (
AsynqCallable,
CustomCheck,
ExternalType,
HasAttrGuard,
NoReturnGuard,
ParameterTypeGuard,
TypeGuard,
)
from .find_unused import used
from .functions import FunctionDefNode
from .node_visitor import ErrorContext
from .signature import ELLIPSIS_PARAM, SigParameter, Signature, ParameterKind
from .safe import is_typing_name, is_instance_of_typing_name
from . import type_evaluation
from .value import (
AnnotatedValue,
AnySource,
AnyValue,
CallableValue,
CustomCheckExtension,
Extension,
HasAttrGuardExtension,
KnownValue,
MultiValuedValue,
NO_RETURN_VALUE,
NoReturnGuardExtension,
ParamSpecArgsValue,
ParamSpecKwargsValue,
ParameterTypeGuardExtension,
SelfTVV,
TypeGuardExtension,
TypedValue,
SequenceIncompleteValue,
annotate_value,
unite_values,
Value,
GenericValue,
SubclassValue,
TypedDictValue,
NewTypeValue,
TypeVarValue,
_HashableValue,
)
if TYPE_CHECKING:
from .name_check_visitor import NameCheckVisitor
try:
from typing import get_origin, get_args # Python 3.9
from types import GenericAlias
except ImportError:
GenericAlias = None
def get_origin(obj: object) -> Any:
return None
def get_args(obj: object) -> Tuple[Any, ...]:
return ()
CONTEXT_MANAGER_TYPES = (typing.ContextManager, contextlib.AbstractContextManager)
if sys.version_info >= (3, 7):
ASYNC_CONTEXT_MANAGER_TYPES = (
typing.AsyncContextManager,
# Doesn't exist on 3.6
# static analysis: ignore[undefined_attribute]
contextlib.AbstractAsyncContextManager,
)
else:
ASYNC_CONTEXT_MANAGER_TYPES = (typing.AsyncContextManager,)
@dataclass
class Context:
"""A context for evaluating annotations.
The base implementation does very little. Subclass this to do something more useful.
"""
should_suppress_undefined_names: bool = field(default=False, init=False)
"""While this is True, no errors are shown for undefined names."""
def suppress_undefined_names(self) -> ContextManager[None]:
"""Temporarily suppress errors about undefined names."""
return qcore.override(self, "should_suppress_undefined_names", True)
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
"""Show an error found while evaluating an annotation."""
pass
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return AnyValue(AnySource.inference)
def handle_undefined_name(self, name: str) -> Value:
if self.should_suppress_undefined_names:
return AnyValue(AnySource.inference)
self.show_error(
f"Undefined name {name!r} used in annotation", ErrorCode.undefined_name
)
return AnyValue(AnySource.error)
def get_name_from_globals(self, name: str, globals: Mapping[str, Any]) -> Value:
if name in globals:
return KnownValue(globals[name])
elif hasattr(builtins, name):
return KnownValue(getattr(builtins, name))
return self.handle_undefined_name(name)
@dataclass
class RuntimeEvaluator(type_evaluation.Evaluator, Context):
globals: Mapping[str, object] = field(repr=False)
func: typing.Callable[..., Any]
def evaluate_type(self, node: ast.AST) -> Value:
return type_from_ast(node, ctx=self)
def evaluate_value(self, node: ast.AST) -> Value:
return value_from_ast(node, ctx=self, error_on_unrecognized=False)
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return self.get_name_from_globals(node.id, self.globals)
@dataclass
class SyntheticEvaluator(type_evaluation.Evaluator):
error_ctx: ErrorContext
annotations_context: Context
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
self.error_ctx.show_error(node or self.node, message, error_code=error_code)
def evaluate_type(self, node: ast.AST) -> Value:
return type_from_ast(node, ctx=self.annotations_context)
def evaluate_value(self, node: ast.AST) -> Value:
return value_from_ast(
node, ctx=self.annotations_context, error_on_unrecognized=False
)
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return self.annotations_context.get_name(node)
@classmethod
def from_visitor(
cls,
node: FunctionDefNode,
visitor: "NameCheckVisitor",
return_annotation: Value,
) -> "SyntheticEvaluator":
return cls(
node,
return_annotation,
visitor,
_DefaultContext(visitor, node, use_name_node_for_error=True),
)
@used # part of an API
def type_from_ast(
ast_node: ast.AST,
visitor: Optional["NameCheckVisitor"] = None,
ctx: Optional[Context] = None,
) -> Value:
"""Given an AST node representing an annotation, return a
:class:`Value <pyanalyze.value.Value>`.
:param ast_node: AST node to evaluate.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
"""
if ctx is None:
ctx = _DefaultContext(visitor, ast_node)
return _type_from_ast(ast_node, ctx)
def type_from_annotations(
annotations: Mapping[str, object],
key: str,
*,
globals: Optional[Mapping[str, object]] = None,
ctx: Optional[Context] = None,
) -> Optional[Value]:
try:
annotation = annotations[key]
except Exception:
# Malformed __annotations__
return None
else:
maybe_val = type_from_runtime(annotation, globals=globals, ctx=ctx)
if maybe_val != AnyValue(AnySource.incomplete_annotation):
return maybe_val
return None
def type_from_runtime(
val: object,
visitor: Optional["NameCheckVisitor"] = None,
node: Optional[ast.AST] = None,
globals: Optional[Mapping[str, object]] = None,
ctx: Optional[Context] = None,
) -> Value:
"""Given a runtime annotation object, return a
:class:`Value <pyanalyze.value.Value>`.
:param val: Object to evaluate. This will usually come from an
``__annotations__`` dictionary.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param node: AST node that the annotation derives from. This is
used for showing errors. Ignored if `ctx` is given.
:param globals: Dictionary of global variables that can be used
to resolve names. Ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
"""
if ctx is None:
ctx = _DefaultContext(visitor, node, globals)
return _type_from_runtime(val, ctx)
def type_from_value(
value: Value,
visitor: Optional["NameCheckVisitor"] = None,
node: Optional[ast.AST] = None,
ctx: Optional[Context] = None,
is_typeddict: bool = False,
) -> Value:
"""Given a :class:`Value <pyanalyze.value.Value` representing an annotation,
return a :class:`Value <pyanalyze.value.Value>` representing the type.
The input value represents an expression, the output value represents
a type. For example, the :term:`impl` of ``typing.cast(typ, val)``
calls :func:`type_from_value` on the value it receives for its
`typ` argument and returns the result.
:param value: :class:`Value <pyanalyze.value.Value` to evaluate.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param node: AST node that the annotation derives from. This is
used for showing errors. Ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
:param is_typeddict: Whether we are at the top level of a `TypedDict`
definition.
"""
if ctx is None:
ctx = _DefaultContext(visitor, node)
return _type_from_value(value, ctx, is_typeddict=is_typeddict)
def value_from_ast(
ast_node: ast.AST, ctx: Context, *, error_on_unrecognized: bool = True
) -> Value:
val = _Visitor(ctx).visit(ast_node)
if val is None:
if error_on_unrecognized:
ctx.show_error("Invalid type annotation", node=ast_node)
return AnyValue(AnySource.error)
return val
def _type_from_ast(node: ast.AST, ctx: Context, is_typeddict: bool = False) -> Value:
val = value_from_ast(node, ctx)
return _type_from_value(val, ctx, is_typeddict=is_typeddict)
def _type_from_runtime(val: Any, ctx: Context, is_typeddict: bool = False) -> Value:
if isinstance(val, str):
return _eval_forward_ref(val, ctx, is_typeddict=is_typeddict)
elif isinstance(val, tuple):
# This happens under some Python versions for types
# nested in tuples, e.g. on 3.6:
# > typing_inspect.get_args(Union[Set[int], List[str]])
# ((typing.Set, int), (typing.List, str))
if not val:
# from Tuple[()]
return KnownValue(())
origin = val[0]
if len(val) == 2:
args = (val[1],)
else:
args = val[1:]
return _value_of_origin_args(origin, args, val, ctx)
elif GenericAlias is not None and isinstance(val, GenericAlias):
origin = get_origin(val)
args = get_args(val)
if origin is tuple and not args:
return SequenceIncompleteValue(tuple, [])
return _value_of_origin_args(origin, args, val, ctx)
elif typing_inspect.is_literal_type(val):
args = typing_inspect.get_args(val)
if len(args) == 0:
return KnownValue(args[0])
else:
return unite_values(*[KnownValue(arg) for arg in args])
elif typing_inspect.is_union_type(val):
args = typing_inspect.get_args(val)
return unite_values(*[_type_from_runtime(arg, ctx) for arg in args])
elif typing_inspect.is_tuple_type(val):
args = typing_inspect.get_args(val)
if not args:
return TypedValue(tuple)
elif len(args) == 2 and args[1] is Ellipsis:
return GenericValue(tuple, [_type_from_runtime(args[0], ctx)])
elif len(args) == 1 and args[0] == ():
return SequenceIncompleteValue(tuple, []) # empty tuple
else:
args_vals = [_type_from_runtime(arg, ctx) for arg in args]
return SequenceIncompleteValue(tuple, args_vals)
elif is_instance_of_typing_name(val, "_TypedDictMeta"):
required_keys = getattr(val, "__required_keys__", None)
# 3.8's typing.TypedDict doesn't have __required_keys__. With
# inheritance, this makes it apparently impossible to figure out which
# keys are required at runtime.
total = getattr(val, "__total__", True)
return TypedDictValue(
{
key: _get_typeddict_value(value, ctx, key, required_keys, total)
for key, value in val.__annotations__.items()
}
)
elif val is InitVar:
# On 3.6 and 3.7, InitVar[T] just returns InitVar at runtime, so we can't
# get the actual type out.
return AnyValue(AnySource.inference)
elif isinstance(val, InitVar):
# val.type exists only on 3.8+, but on earlier versions
# InitVar instances aren't being created
# static analysis: ignore[undefined_attribute]
return type_from_runtime(val.type)
elif is_instance_of_typing_name(val, "AnnotatedMeta"):
# Annotated in 3.6's typing_extensions
origin, metadata = val.__args__
return _make_annotated(
_type_from_runtime(origin, ctx), [KnownValue(v) for v in metadata], ctx
)
elif is_instance_of_typing_name(val, "_AnnotatedAlias"):
# Annotated in typing and newer typing_extensions
return _make_annotated(
_type_from_runtime(val.__origin__, ctx),
[KnownValue(v) for v in val.__metadata__],
ctx,
)
elif typing_inspect.is_generic_type(val):
origin = typing_inspect.get_origin(val)
args = typing_inspect.get_args(val)
if getattr(val, "_special", False):
args = [] # distinguish List from List[T] on 3.7 and 3.8
return _value_of_origin_args(origin, args, val, ctx, is_typeddict=is_typeddict)
elif typing_inspect.is_callable_type(val):
args = typing_inspect.get_args(val)
return _value_of_origin_args(Callable, args, val, ctx)
elif val is AsynqCallable:
return CallableValue(Signature.make([ELLIPSIS_PARAM], is_asynq=True))
elif isinstance(val, type):
return _maybe_typed_value(val)
elif val is None:
return KnownValue(None)
elif is_typing_name(val, "NoReturn") or is_typing_name(val, "Never"):
return NO_RETURN_VALUE
elif is_typing_name(val, "Self"):
return SelfTVV
elif val is typing.Any:
return AnyValue(AnySource.explicit)
elif hasattr(val, "__supertype__"):
if isinstance(val.__supertype__, type):
# NewType
return NewTypeValue(val)
elif typing_inspect.is_tuple_type(val.__supertype__):
# TODO figure out how to make NewTypes over tuples work
return AnyValue(AnySource.inference)
else:
ctx.show_error(f"Invalid NewType {val}")
return AnyValue(AnySource.error)
elif typing_inspect.is_typevar(val):
tv = cast(TypeVar, val)
return make_type_var_value(tv, ctx)
elif is_instance_of_typing_name(val, "ParamSpec"):
return TypeVarValue(val, is_paramspec=True)
elif is_instance_of_typing_name(val, "ParamSpecArgs"):
return ParamSpecArgsValue(val.__origin__)
elif is_instance_of_typing_name(val, "ParamSpecKwargs"):
return ParamSpecKwargsValue(val.__origin__)
elif is_typing_name(val, "Final") or is_typing_name(val, "ClassVar"):
return AnyValue(AnySource.incomplete_annotation)
elif typing_inspect.is_classvar(val) or typing_inspect.is_final_type(val):
if hasattr(val, "__type__"):
# 3.6
typ = val.__type__
else:
# 3.7+
typ = val.__args__[0]
return _type_from_runtime(typ, ctx)
elif is_instance_of_typing_name(val, "_ForwardRef") or is_instance_of_typing_name(
val, "ForwardRef"
):
# This has issues because the forward ref may be defined in a different file, in
# which case we don't know which names are valid in it.
with ctx.suppress_undefined_names():
try:
code = ast.parse(val.__forward_arg__)
except SyntaxError:
ctx.show_error(
f"Syntax error in forward reference: {val.__forward_arg__}"
)
return AnyValue(AnySource.error)
return _type_from_ast(code.body[0], ctx, is_typeddict=is_typeddict)
elif val is Ellipsis:
# valid in Callable[..., ]
return AnyValue(AnySource.explicit)
elif is_instance_of_typing_name(val, "_TypeAlias"):
# typing.Pattern and Match, which are not normal generic types for some reason
return GenericValue(val.impl_type, [_type_from_runtime(val.type_var, ctx)])
elif isinstance(val, TypeGuard):
return AnnotatedValue(
TypedValue(bool),
[TypeGuardExtension(_type_from_runtime(val.guarded_type, ctx))],
)
elif is_instance_of_typing_name(val, "_TypeGuard"):
# 3.6 only
return AnnotatedValue(
TypedValue(bool),
[TypeGuardExtension(_type_from_runtime(val.__type__, ctx))],
)
elif isinstance(val, AsynqCallable):
params = _callable_args_from_runtime(val.args, "AsynqCallable", ctx)
sig = Signature.make(
params, _type_from_runtime(val.return_type, ctx), is_asynq=True
)
return CallableValue(sig)
elif isinstance(val, ExternalType):
try:
typ = qcore.helpers.object_from_string(val.type_path)
except Exception:
ctx.show_error(f"Cannot resolve type {val.type_path!r}")
return AnyValue(AnySource.error)
return _type_from_runtime(typ, ctx)
# Python 3.6 only (on later versions Required/NotRequired match
# is_generic_type).
elif is_instance_of_typing_name(val, "_MaybeRequired"):
required = is_instance_of_typing_name(val, "_Required")
if is_typeddict:
return Pep655Value(required, _type_from_runtime(val.__type__, ctx))
else:
cls = "Required" if required else "NotRequired"
ctx.show_error(f"{cls}[] used in unsupported context")
return AnyValue(AnySource.error)
elif is_typing_name(val, "TypeAlias"):
return AnyValue(AnySource.incomplete_annotation)
elif is_typing_name(val, "TypedDict"):
return KnownValue(TypedDict)
else:
origin = get_origin(val)
if isinstance(origin, type):
return _maybe_typed_value(origin)
elif val is NamedTuple:
return TypedValue(tuple)
ctx.show_error(f"Invalid type annotation {val}")
return AnyValue(AnySource.error)
def make_type_var_value(tv: TypeVar, ctx: Context) -> TypeVarValue:
if tv.__bound__ is not None:
bound = _type_from_runtime(tv.__bound__, ctx)
else:
bound = None
if tv.__constraints__:
constraints = tuple(
_type_from_runtime(constraint, ctx) for constraint in tv.__constraints__
)
else:
constraints = ()
return TypeVarValue(tv, bound=bound, constraints=constraints)
def _callable_args_from_runtime(
arg_types: Any, label: str, ctx: Context
) -> Sequence[SigParameter]:
if arg_types is Ellipsis or arg_types == [Ellipsis]:
return [ELLIPSIS_PARAM]
elif type(arg_types) in (tuple, list):
if len(arg_types) == 1:
(arg,) = arg_types
if arg is Ellipsis:
return [ELLIPSIS_PARAM]
elif is_typing_name(getattr(arg, "__origin__", None), "Concatenate"):
return _args_from_concatenate(arg, ctx)
elif is_instance_of_typing_name(arg, "ParamSpec"):
param_spec = TypeVarValue(arg, is_paramspec=True)
param = SigParameter(
"__P", kind=ParameterKind.PARAM_SPEC, annotation=param_spec
)
return [param]
types = [_type_from_runtime(arg, ctx) for arg in arg_types]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if isinstance(typ, TypeVarValue) and typ.is_paramspec
else ParameterKind.POSITIONAL_ONLY,
annotation=typ,
)
for i, typ in enumerate(types)
]
return params
elif is_instance_of_typing_name(arg_types, "ParamSpec"):
param_spec = TypeVarValue(arg_types, is_paramspec=True)
param = SigParameter(
"__P", kind=ParameterKind.PARAM_SPEC, annotation=param_spec
)
return [param]
elif is_typing_name(getattr(arg_types, "__origin__", None), "Concatenate"):
return _args_from_concatenate(arg_types, ctx)
else:
ctx.show_error(f"Invalid arguments to {label}: {arg_types!r}")
return [ELLIPSIS_PARAM]
def _args_from_concatenate(concatenate: Any, ctx: Context) -> Sequence[SigParameter]:
types = [_type_from_runtime(arg, ctx) for arg in concatenate.__args__]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if i == len(types) - 1
else ParameterKind.POSITIONAL_ONLY,
annotation=annotation,
)
for i, annotation in enumerate(types)
]
return params
def _get_typeddict_value(
value: Value,
ctx: Context,
key: str,
required_keys: Optional[Container[str]],
total: bool,
) -> Tuple[bool, Value]:
val = _type_from_runtime(value, ctx, is_typeddict=True)
if isinstance(val, Pep655Value):
return (val.required, val.value)
if required_keys is None:
required = total
else:
required = key in required_keys
return required, val
def _eval_forward_ref(val: str, ctx: Context, is_typeddict: bool = False) -> Value:
try:
tree = ast.parse(val, mode="eval")
except SyntaxError:
ctx.show_error(f"Syntax error in type annotation: {val}")
return AnyValue(AnySource.error)
else:
return _type_from_ast(tree.body, ctx, is_typeddict=is_typeddict)
def _type_from_value(value: Value, ctx: Context, is_typeddict: bool = False) -> Value:
if isinstance(value, KnownValue):
return _type_from_runtime(value.val, ctx, is_typeddict=is_typeddict)
elif isinstance(value, TypeVarValue):
return value
elif isinstance(value, MultiValuedValue):
return unite_values(*[_type_from_value(val, ctx) for val in value.vals])
elif isinstance(value, AnnotatedValue):
return _type_from_value(value.value, ctx)
elif isinstance(value, _SubscriptedValue):
return _type_from_subscripted_value(
value.root, value.members, ctx, is_typeddict=is_typeddict
)
elif isinstance(value, AnyValue):
return value
elif isinstance(value, SubclassValue) and value.exactly:
return value.typ
elif isinstance(value, TypedValue) and isinstance(value.typ, str):
# Synthetic type
return value
else:
ctx.show_error(f"Unrecognized annotation {value}")
return AnyValue(AnySource.error)
def _type_from_subscripted_value(
root: Optional[Value],
members: Sequence[Value],
ctx: Context,
is_typeddict: bool = False,
) -> Value:
if isinstance(root, GenericValue):
if len(root.args) == len(members):
return GenericValue(
root.typ, [_type_from_value(member, ctx) for member in members]
)
if isinstance(root, _SubscriptedValue):
root_type = _type_from_value(root, ctx)
return _type_from_subscripted_value(root_type, members, ctx)
elif isinstance(root, MultiValuedValue):
return unite_values(
*[
_type_from_subscripted_value(subval, members, ctx, is_typeddict)
for subval in root.vals
]
)
if (
isinstance(root, SubclassValue)
and root.exactly
and isinstance(root.typ, TypedValue)
):
return GenericValue(
root.typ.typ, [_type_from_value(elt, ctx) for elt in members]
)
if isinstance(root, TypedValue) and isinstance(root.typ, str):
return GenericValue(root.typ, [_type_from_value(elt, ctx) for elt in members])
if not isinstance(root, KnownValue):
if root != AnyValue(AnySource.error):
ctx.show_error(f"Cannot resolve subscripted annotation: {root}")
return AnyValue(AnySource.error)
root = root.val
if root is typing.Union:
return unite_values(*[_type_from_value(elt, ctx) for elt in members])
elif is_typing_name(root, "Literal"):
# Note that in Python 3.8, the way typing's internal cache works means that
# Literal[1] and Literal[True] are cached to the same value, so if you use
# both, you'll get whichever one was used first in later calls. There's nothing
# we can do about that.
if all(isinstance(elt, KnownValue) for elt in members):
return unite_values(*members)
else:
ctx.show_error(f"Arguments to Literal[] must be literals, not {members}")
return AnyValue(AnySource.error)
elif root is typing.Tuple or root is tuple:
if len(members) == 2 and members[1] == KnownValue(Ellipsis):
return GenericValue(tuple, [_type_from_value(members[0], ctx)])
elif len(members) == 1 and members[0] == KnownValue(()):
return SequenceIncompleteValue(tuple, [])
else:
return SequenceIncompleteValue(
tuple, [_type_from_value(arg, ctx) for arg in members]
)
elif root is typing.Optional:
if len(members) != 1:
ctx.show_error("Optional[] takes only one argument")
return AnyValue(AnySource.error)
return unite_values(KnownValue(None), _type_from_value(members[0], ctx))
elif root is typing.Type or root is type:
if len(members) != 1:
ctx.show_error("Type[] takes only one argument")
return AnyValue(AnySource.error)
argument = _type_from_value(members[0], ctx)
return SubclassValue.make(argument)
elif is_typing_name(root, "Annotated"):
origin, *metadata = members
return _make_annotated(_type_from_value(origin, ctx), metadata, ctx)
elif is_typing_name(root, "TypeGuard"):
if len(members) != 1:
ctx.show_error("TypeGuard requires a single argument")
return AnyValue(AnySource.error)
return AnnotatedValue(
TypedValue(bool), [TypeGuardExtension(_type_from_value(members[0], ctx))]
)
elif is_typing_name(root, "Required"):
if not is_typeddict:
ctx.show_error("Required[] used in unsupported context")
return AnyValue(AnySource.error)
if len(members) != 1:
ctx.show_error("Required[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(True, _type_from_value(members[0], ctx))
elif is_typing_name(root, "NotRequired"):
if not is_typeddict:
ctx.show_error("NotRequired[] used in unsupported context")
return AnyValue(AnySource.error)
if len(members) != 1:
ctx.show_error("NotRequired[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(False, _type_from_value(members[0], ctx))
elif root is Callable or root is typing.Callable:
if len(members) == 2:
args, return_value = members
return _make_callable_from_value(args, return_value, ctx)
ctx.show_error("Callable requires exactly two arguments")
return AnyValue(AnySource.error)
elif root is AsynqCallable:
if len(members) == 2:
args, return_value = members
return _make_callable_from_value(args, return_value, ctx, is_asynq=True)
ctx.show_error("AsynqCallable requires exactly two arguments")
return AnyValue(AnySource.error)
elif typing_inspect.is_generic_type(root):
origin = typing_inspect.get_origin(root)
if origin is None:
# On Python 3.9 at least, get_origin() of a class that inherits
# from Generic[T] is None.
origin = root
origin = _maybe_get_extra(origin)
return GenericValue(origin, [_type_from_value(elt, ctx) for elt in members])
elif isinstance(root, type):
return GenericValue(root, [_type_from_value(elt, ctx) for elt in members])
else:
# In Python 3.9, generics are implemented differently and typing.get_origin
# can help.
origin = get_origin(root)
if isinstance(origin, type):
return GenericValue(origin, [_type_from_value(elt, ctx) for elt in members])
ctx.show_error(f"Unrecognized subscripted annotation: {root}")
return AnyValue(AnySource.error)
def _maybe_get_extra(origin: type) -> Union[type, str]:
# ContextManager is defined oddly and we lose the Protocol if we don't use
# synthetic types.
if any(origin is cls for cls in CONTEXT_MANAGER_TYPES):
return "typing.ContextManager"
elif any(origin is cls for cls in ASYNC_CONTEXT_MANAGER_TYPES):
return "typing.AsyncContextManager"
else:
# turn typing.List into list in some Python versions
# compare https://github.com/ilevkivskyi/typing_inspect/issues/36
extra_origin = getattr(origin, "__extra__", None)
if extra_origin is not None:
return extra_origin
return origin
class _DefaultContext(Context):
def __init__(
self,
visitor: "NameCheckVisitor",
node: Optional[ast.AST],
globals: Optional[Mapping[str, object]] = None,
use_name_node_for_error: bool = False,
) -> None:
super().__init__()
self.visitor = visitor
self.node = node
self.globals = globals
self.use_name_node_for_error = use_name_node_for_error
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
if node is None:
node = self.node
if self.visitor is not None and node is not None:
self.visitor.show_error(node, message, error_code)
def get_name(self, node: ast.Name) -> Value:
if self.visitor is not None:
val, _ = self.visitor.resolve_name(
node,
error_node=node if self.use_name_node_for_error else self.node,
suppress_errors=self.should_suppress_undefined_names,
)
return val
elif self.globals is not None:
if node.id in self.globals:
return KnownValue(self.globals[node.id])
elif hasattr(builtins, node.id):
return KnownValue(getattr(builtins, node.id))
if self.should_suppress_undefined_names:
return AnyValue(AnySource.inference)
self.show_error(
f"Undefined name {node.id!r} used in annotation",
ErrorCode.undefined_name,
node=node,
)
return AnyValue(AnySource.error)
@dataclass(frozen=True)
class _SubscriptedValue(Value):
root: Optional[Value]
members: Tuple[Value, ...]
@dataclass
class Pep655Value(Value):
required: bool
value: Value
class _Visitor(ast.NodeVisitor):
def __init__(self, ctx: Context) -> None:
self.ctx = ctx
def generic_visit(self, node: ast.AST) -> None:
raise NotImplementedError(f"no visitor implemented for {node!r}")
def visit_Name(self, node: ast.Name) -> Value:
return self.ctx.get_name(node)
def visit_Subscript(self, node: ast.Subscript) -> Value:
value = self.visit(node.value)
index = self.visit(node.slice)
if isinstance(index, SequenceIncompleteValue):
members = index.members
else:
members = (index,)
return _SubscriptedValue(value, members)
def visit_Attribute(self, node: ast.Attribute) -> Optional[Value]:
root_value = self.visit(node.value)
if isinstance(root_value, KnownValue):
try:
return KnownValue(getattr(root_value.val, node.attr))
except AttributeError:
self.ctx.show_error(
f"{root_value.val!r} has no attribute {node.attr!r}", node=node
)
return AnyValue(AnySource.error)
elif not isinstance(root_value, AnyValue):
self.ctx.show_error(f"Cannot resolve annotation {root_value}", node=node)
return AnyValue(AnySource.error)
def visit_Tuple(self, node: ast.Tuple) -> Value:
elts = [self.visit(elt) for elt in node.elts]
return SequenceIncompleteValue(tuple, elts)
def visit_List(self, node: ast.List) -> Value:
elts = [self.visit(elt) for elt in node.elts]
return SequenceIncompleteValue(list, elts)
def visit_Index(self, node: ast.Index) -> Value:
# class is unused in 3.9
return self.visit(node.value) # static analysis: ignore[undefined_attribute]
def visit_Ellipsis(self, node: ast.Ellipsis) -> Value:
return KnownValue(Ellipsis)
def visit_Constant(self, node: ast.Constant) -> Value:
return KnownValue(node.value)
def visit_NameConstant(self, node: ast.NameConstant) -> Value:
return KnownValue(node.value)
def visit_Num(self, node: ast.Num) -> Value:
return KnownValue(node.n)
def visit_Str(self, node: ast.Str) -> Value:
return KnownValue(node.s)
def visit_Bytes(self, node: ast.Bytes) -> Value:
return KnownValue(node.s)
def visit_Expr(self, node: ast.Expr) -> Value:
return self.visit(node.value)
def visit_BinOp(self, node: ast.BinOp) -> Optional[Value]:
if isinstance(node.op, ast.BitOr):
return _SubscriptedValue(
KnownValue(Union), (self.visit(node.left), self.visit(node.right))
)
else:
return None
def visit_UnaryOp(self, node: ast.UnaryOp) -> Optional[Value]:
# Only int and float negation on literals are supported.
if isinstance(node.op, ast.USub):
operand = self.visit(node.operand)
if isinstance(operand, KnownValue) and isinstance(
operand.val, (int, float)
):
return KnownValue(-operand.val)
return None
def visit_Call(self, node: ast.Call) -> Optional[Value]:
func = self.visit(node.func)
if not isinstance(func, KnownValue):
return None
if func.val == NewType:
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
args = []
kwargs = {}
for arg_value in arg_values:
if isinstance(arg_value, KnownValue):
args.append(arg_value.val)
else:
return None
for name, kwarg_value in kwarg_values:
if name is None:
if isinstance(kwarg_value, KnownValue) and isinstance(
kwarg_value.val, dict
):
kwargs.update(kwarg_value.val)
else:
return None
else:
if isinstance(kwarg_value, KnownValue):
kwargs[name] = kwarg_value.val
else:
return None
return KnownValue(func.val(*args, **kwargs))
elif func.val == TypeVar:
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
if not arg_values:
self.ctx.show_error(
"TypeVar() requires at least one argument", node=node
)
return None
name_val = arg_values[0]
if not isinstance(name_val, KnownValue):
self.ctx.show_error("TypeVar name must be a literal", node=node.args[0])
return None
constraints = []
for arg_value in arg_values[1:]:
constraints.append(_type_from_value(arg_value, self.ctx))
bound = None
for name, kwarg_value in kwarg_values:
if name in ("covariant", "contravariant"):
continue
elif name == "bound":
bound = _type_from_value(kwarg_value, self.ctx)
else:
self.ctx.show_error(f"Unrecognized TypeVar kwarg {name}", node=node)
return None
tv = TypeVar(name_val.val)
return TypeVarValue(tv, bound, tuple(constraints))
elif is_typing_name(func.val, "ParamSpec"):
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
if not arg_values:
self.ctx.show_error(
"ParamSpec() requires at least one argument", node=node
)
return None
name_val = arg_values[0]
if not isinstance(name_val, KnownValue):
self.ctx.show_error(
"ParamSpec name must be a literal", node=node.args[0]
)
return None
for name, _ in kwarg_values:
self.ctx.show_error(f"Unrecognized ParamSpec kwarg {name}", node=node)
return None
tv = ParamSpec(name_val.val)
return TypeVarValue(tv, is_paramspec=True)
elif isinstance(func.val, type):
if func.val is object:
return AnyValue(AnySource.inference)
return TypedValue(func.val)
else:
return None
def _value_of_origin_args(
origin: object,
args: Sequence[object],
val: object,
ctx: Context,
is_typeddict: bool = False,
) -> Value:
if origin is typing.Type or origin is type:
if not args:
return TypedValue(type)
return SubclassValue.make(_type_from_runtime(args[0], ctx))
elif origin is typing.Tuple or origin is tuple:
if not args:
return TypedValue(tuple)
elif len(args) == 2 and args[1] is Ellipsis:
return GenericValue(tuple, [_type_from_runtime(args[0], ctx)])
elif len(args) == 1 and args[0] == ():
return SequenceIncompleteValue(tuple, [])
else:
args_vals = [_type_from_runtime(arg, ctx) for arg in args]
return SequenceIncompleteValue(tuple, args_vals)
elif origin is typing.Union:
return unite_values(*[_type_from_runtime(arg, ctx) for arg in args])
elif origin is Callable or origin is typing.Callable:
if len(args) == 0:
return TypedValue(Callable)
*arg_types, return_type = args
if len(arg_types) == 1 and isinstance(arg_types[0], list):
arg_types = arg_types[0]
params = _callable_args_from_runtime(arg_types, "Callable", ctx)
sig = Signature.make(params, _type_from_runtime(return_type, ctx))
return CallableValue(sig)
elif is_typing_name(origin, "Annotated"):
origin, metadata = args
# This should never happen
if not isinstance(metadata, Iterable):
ctx.show_error("Unexpected format in Annotated")
return AnyValue(AnySource.error)
return _make_annotated(
_type_from_runtime(origin, ctx),
[KnownValue(data) for data in metadata],
ctx,
)
elif isinstance(origin, type):
origin = _maybe_get_extra(origin)
if args:
args_vals = [_type_from_runtime(val, ctx) for val in args]
return GenericValue(origin, args_vals)
else:
return _maybe_typed_value(origin)
elif is_typing_name(origin, "TypeGuard"):
if len(args) != 1:
ctx.show_error("TypeGuard requires a single argument")
return AnyValue(AnySource.error)
return AnnotatedValue(
TypedValue(bool), [TypeGuardExtension(_type_from_runtime(args[0], ctx))]
)
elif is_typing_name(origin, "Final"):
if len(args) != 1:
ctx.show_error("Final requires a single argument")
return AnyValue(AnySource.error)
# TODO(#160): properly support Final
return _type_from_runtime(args[0], ctx)
elif is_typing_name(origin, "ClassVar"):
if len(args) != 1:
ctx.show_error("ClassVar requires a single argument")
return AnyValue(AnySource.error)
return _type_from_runtime(args[0], ctx)
elif is_typing_name(origin, "Required"):
if not is_typeddict:
ctx.show_error("Required[] used in unsupported context")
return AnyValue(AnySource.error)
if len(args) != 1:
ctx.show_error("Required[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(True, _type_from_runtime(args[0], ctx))
elif is_typing_name(origin, "NotRequired"):
if not is_typeddict:
ctx.show_error("NotRequired[] used in unsupported context")
return AnyValue(AnySource.error)
if len(args) != 1:
ctx.show_error("NotRequired[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(False, _type_from_runtime(args[0], ctx))
elif origin is None and isinstance(val, type):
# This happens for SupportsInt in 3.7.
return _maybe_typed_value(val)
else:
ctx.show_error(
f"Unrecognized annotation {origin}[{", ".join(map(repr, args))}]"
)
return AnyValue(AnySource.error)
def _maybe_typed_value(val: Union[type, str]) -> Value:
if val is type(None):
return KnownValue(None)
elif val is Hashable:
return _HashableValue(val)
return TypedValue(val)
def _make_callable_from_value(
args: Value, return_value: Value, ctx: Context, is_asynq: bool = False
) -> Value:
return_annotation = _type_from_value(return_value, ctx)
if args == KnownValue(Ellipsis):
return CallableValue(
Signature.make(
[ELLIPSIS_PARAM], return_annotation=return_annotation, is_asynq=is_asynq
)
)
elif isinstance(args, SequenceIncompleteValue):
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.POSITIONAL_ONLY,
annotation=_type_from_value(arg, ctx),
)
for i, arg in enumerate(args.members)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif isinstance(args, KnownValue) and is_instance_of_typing_name(
args.val, "ParamSpec"
):
annotation = TypeVarValue(args.val, is_paramspec=True)
params = [
SigParameter("__P", kind=ParameterKind.PARAM_SPEC, annotation=annotation)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif isinstance(args, TypeVarValue) and args.is_paramspec:
params = [SigParameter("__P", kind=ParameterKind.PARAM_SPEC, annotation=args)]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif (
isinstance(args, _SubscriptedValue)
and isinstance(args.root, KnownValue)
and is_typing_name(args.root.val, "Concatenate")
):
annotations = [_type_from_value(arg, ctx) for arg in args.members]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if i == len(annotations) - 1
else ParameterKind.POSITIONAL_ONLY,
annotation=annotation,
)
for i, annotation in enumerate(annotations)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
else:
ctx.show_error(f"Unrecognized Callable type argument {args}")
return AnyValue(AnySource.error)
def _make_annotated(origin: Value, metadata: Sequence[Value], ctx: Context) -> Value:
metadata = [_value_from_metadata(entry, ctx) for entry in metadata]
return annotate_value(origin, metadata)
def _value_from_metadata(entry: Value, ctx: Context) -> Union[Value, Extension]:
if isinstance(entry, KnownValue):
if isinstance(entry.val, ParameterTypeGuard):
return ParameterTypeGuardExtension(
entry.val.varname, _type_from_runtime(entry.val.guarded_type, ctx)
)
elif isinstance(entry.val, NoReturnGuard):
return NoReturnGuardExtension(
entry.val.varname, _type_from_runtime(entry.val.guarded_type, ctx)
)
elif isinstance(entry.val, HasAttrGuard):
return HasAttrGuardExtension(
entry.val.varname,
_type_from_runtime(entry.val.attribute_name, ctx),
_type_from_runtime(entry.val.attribute_type, ctx),
)
elif isinstance(entry.val, CustomCheck):
return CustomCheckExtension(entry.val)
return entry
| """
Code for understanding type annotations.
This file contains functions that turn various representations of
Python type annotations into :class:`pyanalyze.value.Value` objects.
There are three major functions:
- :func:`type_from_runtime` takes a runtime Python object, for example
``type_from_value(int)`` -> ``TypedValue(int)``.
- :func:`type_from_value` takes an existing :class:`pyanalyze.value.Value`
object. For example, evaluating the expression ``int`` will produce
``KnownValue(int)``, and calling :func:`type_from_value` on that value
will produce ``TypedValue(int)``.
- :func:`type_from_ast` takes an AST node and evaluates it into a type.
These functions all rely on each other. For example, when a forward
reference is found in a runtime annotation, the code parses it and calls
:func:`type_from_ast` to evaluate it.
These functions all use :class:`Context` objects to resolve names and
show errors.
"""
import contextlib
from dataclasses import dataclass, InitVar, field
import typing
import typing_inspect
import qcore
import ast
import builtins
from collections.abc import Callable, Iterable, Hashable
import sys
from typing import (
Any,
Container,
NamedTuple,
cast,
TypeVar,
ContextManager,
Mapping,
NewType,
Sequence,
Optional,
Tuple,
Union,
TYPE_CHECKING,
)
from typing_extensions import ParamSpec, TypedDict
from .error_code import ErrorCode
from .extensions import (
AsynqCallable,
CustomCheck,
ExternalType,
HasAttrGuard,
NoReturnGuard,
ParameterTypeGuard,
TypeGuard,
)
from .find_unused import used
from .functions import FunctionDefNode
from .node_visitor import ErrorContext
from .signature import ELLIPSIS_PARAM, SigParameter, Signature, ParameterKind
from .safe import is_typing_name, is_instance_of_typing_name
from . import type_evaluation
from .value import (
AnnotatedValue,
AnySource,
AnyValue,
CallableValue,
CustomCheckExtension,
Extension,
HasAttrGuardExtension,
KnownValue,
MultiValuedValue,
NO_RETURN_VALUE,
NoReturnGuardExtension,
ParamSpecArgsValue,
ParamSpecKwargsValue,
ParameterTypeGuardExtension,
SelfTVV,
TypeGuardExtension,
TypedValue,
SequenceIncompleteValue,
annotate_value,
unite_values,
Value,
GenericValue,
SubclassValue,
TypedDictValue,
NewTypeValue,
TypeVarValue,
_HashableValue,
)
if TYPE_CHECKING:
from .name_check_visitor import NameCheckVisitor
try:
from typing import get_origin, get_args # Python 3.9
from types import GenericAlias
except ImportError:
GenericAlias = None
def get_origin(obj: object) -> Any:
return None
def get_args(obj: object) -> Tuple[Any, ...]:
return ()
CONTEXT_MANAGER_TYPES = (typing.ContextManager, contextlib.AbstractContextManager)
if sys.version_info >= (3, 7):
ASYNC_CONTEXT_MANAGER_TYPES = (
typing.AsyncContextManager,
# Doesn't exist on 3.6
# static analysis: ignore[undefined_attribute]
contextlib.AbstractAsyncContextManager,
)
else:
ASYNC_CONTEXT_MANAGER_TYPES = (typing.AsyncContextManager,)
@dataclass
class Context:
"""A context for evaluating annotations.
The base implementation does very little. Subclass this to do something more useful.
"""
should_suppress_undefined_names: bool = field(default=False, init=False)
"""While this is True, no errors are shown for undefined names."""
def suppress_undefined_names(self) -> ContextManager[None]:
"""Temporarily suppress errors about undefined names."""
return qcore.override(self, "should_suppress_undefined_names", True)
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
"""Show an error found while evaluating an annotation."""
pass
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return AnyValue(AnySource.inference)
def handle_undefined_name(self, name: str) -> Value:
if self.should_suppress_undefined_names:
return AnyValue(AnySource.inference)
self.show_error(
f"Undefined name {name!r} used in annotation", ErrorCode.undefined_name
)
return AnyValue(AnySource.error)
def get_name_from_globals(self, name: str, globals: Mapping[str, Any]) -> Value:
if name in globals:
return KnownValue(globals[name])
elif hasattr(builtins, name):
return KnownValue(getattr(builtins, name))
return self.handle_undefined_name(name)
@dataclass
class RuntimeEvaluator(type_evaluation.Evaluator, Context):
globals: Mapping[str, object] = field(repr=False)
func: typing.Callable[..., Any]
def evaluate_type(self, node: ast.AST) -> Value:
return type_from_ast(node, ctx=self)
def evaluate_value(self, node: ast.AST) -> Value:
return value_from_ast(node, ctx=self, error_on_unrecognized=False)
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return self.get_name_from_globals(node.id, self.globals)
@dataclass
class SyntheticEvaluator(type_evaluation.Evaluator):
error_ctx: ErrorContext
annotations_context: Context
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
self.error_ctx.show_error(node or self.node, message, error_code=error_code)
def evaluate_type(self, node: ast.AST) -> Value:
return type_from_ast(node, ctx=self.annotations_context)
def evaluate_value(self, node: ast.AST) -> Value:
return value_from_ast(
node, ctx=self.annotations_context, error_on_unrecognized=False
)
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return self.annotations_context.get_name(node)
@classmethod
def from_visitor(
cls,
node: FunctionDefNode,
visitor: "NameCheckVisitor",
return_annotation: Value,
) -> "SyntheticEvaluator":
return cls(
node,
return_annotation,
visitor,
_DefaultContext(visitor, node, use_name_node_for_error=True),
)
@used # part of an API
def type_from_ast(
ast_node: ast.AST,
visitor: Optional["NameCheckVisitor"] = None,
ctx: Optional[Context] = None,
) -> Value:
"""Given an AST node representing an annotation, return a
:class:`Value <pyanalyze.value.Value>`.
:param ast_node: AST node to evaluate.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
"""
if ctx is None:
ctx = _DefaultContext(visitor, ast_node)
return _type_from_ast(ast_node, ctx)
def type_from_annotations(
annotations: Mapping[str, object],
key: str,
*,
globals: Optional[Mapping[str, object]] = None,
ctx: Optional[Context] = None,
) -> Optional[Value]:
try:
annotation = annotations[key]
except Exception:
# Malformed __annotations__
return None
else:
maybe_val = type_from_runtime(annotation, globals=globals, ctx=ctx)
if maybe_val != AnyValue(AnySource.incomplete_annotation):
return maybe_val
return None
def type_from_runtime(
val: object,
visitor: Optional["NameCheckVisitor"] = None,
node: Optional[ast.AST] = None,
globals: Optional[Mapping[str, object]] = None,
ctx: Optional[Context] = None,
) -> Value:
"""Given a runtime annotation object, return a
:class:`Value <pyanalyze.value.Value>`.
:param val: Object to evaluate. This will usually come from an
``__annotations__`` dictionary.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param node: AST node that the annotation derives from. This is
used for showing errors. Ignored if `ctx` is given.
:param globals: Dictionary of global variables that can be used
to resolve names. Ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
"""
if ctx is None:
ctx = _DefaultContext(visitor, node, globals)
return _type_from_runtime(val, ctx)
def type_from_value(
value: Value,
visitor: Optional["NameCheckVisitor"] = None,
node: Optional[ast.AST] = None,
ctx: Optional[Context] = None,
is_typeddict: bool = False,
) -> Value:
"""Given a :class:`Value <pyanalyze.value.Value` representing an annotation,
return a :class:`Value <pyanalyze.value.Value>` representing the type.
The input value represents an expression, the output value represents
a type. For example, the :term:`impl` of ``typing.cast(typ, val)``
calls :func:`type_from_value` on the value it receives for its
`typ` argument and returns the result.
:param value: :class:`Value <pyanalyze.value.Value` to evaluate.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param node: AST node that the annotation derives from. This is
used for showing errors. Ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
:param is_typeddict: Whether we are at the top level of a `TypedDict`
definition.
"""
if ctx is None:
ctx = _DefaultContext(visitor, node)
return _type_from_value(value, ctx, is_typeddict=is_typeddict)
def value_from_ast(
ast_node: ast.AST, ctx: Context, *, error_on_unrecognized: bool = True
) -> Value:
val = _Visitor(ctx).visit(ast_node)
if val is None:
if error_on_unrecognized:
ctx.show_error("Invalid type annotation", node=ast_node)
return AnyValue(AnySource.error)
return val
def _type_from_ast(node: ast.AST, ctx: Context, is_typeddict: bool = False) -> Value:
val = value_from_ast(node, ctx)
return _type_from_value(val, ctx, is_typeddict=is_typeddict)
def _type_from_runtime(val: Any, ctx: Context, is_typeddict: bool = False) -> Value:
if isinstance(val, str):
return _eval_forward_ref(val, ctx, is_typeddict=is_typeddict)
elif isinstance(val, tuple):
# This happens under some Python versions for types
# nested in tuples, e.g. on 3.6:
# > typing_inspect.get_args(Union[Set[int], List[str]])
# ((typing.Set, int), (typing.List, str))
if not val:
# from Tuple[()]
return KnownValue(())
origin = val[0]
if len(val) == 2:
args = (val[1],)
else:
args = val[1:]
return _value_of_origin_args(origin, args, val, ctx)
elif GenericAlias is not None and isinstance(val, GenericAlias):
origin = get_origin(val)
args = get_args(val)
if origin is tuple and not args:
return SequenceIncompleteValue(tuple, [])
return _value_of_origin_args(origin, args, val, ctx)
elif typing_inspect.is_literal_type(val):
args = typing_inspect.get_args(val)
if len(args) == 0:
return KnownValue(args[0])
else:
return unite_values(*[KnownValue(arg) for arg in args])
elif typing_inspect.is_union_type(val):
args = typing_inspect.get_args(val)
return unite_values(*[_type_from_runtime(arg, ctx) for arg in args])
elif typing_inspect.is_tuple_type(val):
args = typing_inspect.get_args(val)
if not args:
return TypedValue(tuple)
elif len(args) == 2 and args[1] is Ellipsis:
return GenericValue(tuple, [_type_from_runtime(args[0], ctx)])
elif len(args) == 1 and args[0] == ():
return SequenceIncompleteValue(tuple, []) # empty tuple
else:
args_vals = [_type_from_runtime(arg, ctx) for arg in args]
return SequenceIncompleteValue(tuple, args_vals)
elif is_instance_of_typing_name(val, "_TypedDictMeta"):
required_keys = getattr(val, "__required_keys__", None)
# 3.8's typing.TypedDict doesn't have __required_keys__. With
# inheritance, this makes it apparently impossible to figure out which
# keys are required at runtime.
total = getattr(val, "__total__", True)
return TypedDictValue(
{
key: _get_typeddict_value(value, ctx, key, required_keys, total)
for key, value in val.__annotations__.items()
}
)
elif val is InitVar:
# On 3.6 and 3.7, InitVar[T] just returns InitVar at runtime, so we can't
# get the actual type out.
return AnyValue(AnySource.inference)
elif isinstance(val, InitVar):
# val.type exists only on 3.8+, but on earlier versions
# InitVar instances aren't being created
# static analysis: ignore[undefined_attribute]
return type_from_runtime(val.type)
elif is_instance_of_typing_name(val, "AnnotatedMeta"):
# Annotated in 3.6's typing_extensions
origin, metadata = val.__args__
return _make_annotated(
_type_from_runtime(origin, ctx), [KnownValue(v) for v in metadata], ctx
)
elif is_instance_of_typing_name(val, "_AnnotatedAlias"):
# Annotated in typing and newer typing_extensions
return _make_annotated(
_type_from_runtime(val.__origin__, ctx),
[KnownValue(v) for v in val.__metadata__],
ctx,
)
elif typing_inspect.is_generic_type(val):
origin = typing_inspect.get_origin(val)
args = typing_inspect.get_args(val)
if getattr(val, "_special", False):
args = [] # distinguish List from List[T] on 3.7 and 3.8
return _value_of_origin_args(origin, args, val, ctx, is_typeddict=is_typeddict)
elif typing_inspect.is_callable_type(val):
args = typing_inspect.get_args(val)
return _value_of_origin_args(Callable, args, val, ctx)
elif val is AsynqCallable:
return CallableValue(Signature.make([ELLIPSIS_PARAM], is_asynq=True))
elif isinstance(val, type):
return _maybe_typed_value(val)
elif val is None:
return KnownValue(None)
elif is_typing_name(val, "NoReturn") or is_typing_name(val, "Never"):
return NO_RETURN_VALUE
elif is_typing_name(val, "Self"):
return SelfTVV
elif val is typing.Any:
return AnyValue(AnySource.explicit)
elif hasattr(val, "__supertype__"):
if isinstance(val.__supertype__, type):
# NewType
return NewTypeValue(val)
elif typing_inspect.is_tuple_type(val.__supertype__):
# TODO figure out how to make NewTypes over tuples work
return AnyValue(AnySource.inference)
else:
ctx.show_error(f"Invalid NewType {val}")
return AnyValue(AnySource.error)
elif typing_inspect.is_typevar(val):
tv = cast(TypeVar, val)
return make_type_var_value(tv, ctx)
elif is_instance_of_typing_name(val, "ParamSpec"):
return TypeVarValue(val, is_paramspec=True)
elif is_instance_of_typing_name(val, "ParamSpecArgs"):
return ParamSpecArgsValue(val.__origin__)
elif is_instance_of_typing_name(val, "ParamSpecKwargs"):
return ParamSpecKwargsValue(val.__origin__)
elif is_typing_name(val, "Final") or is_typing_name(val, "ClassVar"):
return AnyValue(AnySource.incomplete_annotation)
elif typing_inspect.is_classvar(val) or typing_inspect.is_final_type(val):
if hasattr(val, "__type__"):
# 3.6
typ = val.__type__
else:
# 3.7+
typ = val.__args__[0]
return _type_from_runtime(typ, ctx)
elif is_instance_of_typing_name(val, "_ForwardRef") or is_instance_of_typing_name(
val, "ForwardRef"
):
# This has issues because the forward ref may be defined in a different file, in
# which case we don't know which names are valid in it.
with ctx.suppress_undefined_names():
try:
code = ast.parse(val.__forward_arg__)
except SyntaxError:
ctx.show_error(
f"Syntax error in forward reference: {val.__forward_arg__}"
)
return AnyValue(AnySource.error)
return _type_from_ast(code.body[0], ctx, is_typeddict=is_typeddict)
elif val is Ellipsis:
# valid in Callable[..., ]
return AnyValue(AnySource.explicit)
elif is_instance_of_typing_name(val, "_TypeAlias"):
# typing.Pattern and Match, which are not normal generic types for some reason
return GenericValue(val.impl_type, [_type_from_runtime(val.type_var, ctx)])
elif isinstance(val, TypeGuard):
return AnnotatedValue(
TypedValue(bool),
[TypeGuardExtension(_type_from_runtime(val.guarded_type, ctx))],
)
elif is_instance_of_typing_name(val, "_TypeGuard"):
# 3.6 only
return AnnotatedValue(
TypedValue(bool),
[TypeGuardExtension(_type_from_runtime(val.__type__, ctx))],
)
elif isinstance(val, AsynqCallable):
params = _callable_args_from_runtime(val.args, "AsynqCallable", ctx)
sig = Signature.make(
params, _type_from_runtime(val.return_type, ctx), is_asynq=True
)
return CallableValue(sig)
elif isinstance(val, ExternalType):
try:
typ = qcore.helpers.object_from_string(val.type_path)
except Exception:
ctx.show_error(f"Cannot resolve type {val.type_path!r}")
return AnyValue(AnySource.error)
return _type_from_runtime(typ, ctx)
# Python 3.6 only (on later versions Required/NotRequired match
# is_generic_type).
elif is_instance_of_typing_name(val, "_MaybeRequired"):
required = is_instance_of_typing_name(val, "_Required")
if is_typeddict:
return Pep655Value(required, _type_from_runtime(val.__type__, ctx))
else:
cls = "Required" if required else "NotRequired"
ctx.show_error(f"{cls}[] used in unsupported context")
return AnyValue(AnySource.error)
elif is_typing_name(val, "TypeAlias"):
return AnyValue(AnySource.incomplete_annotation)
elif is_typing_name(val, "TypedDict"):
return KnownValue(TypedDict)
else:
origin = get_origin(val)
if isinstance(origin, type):
return _maybe_typed_value(origin)
elif val is NamedTuple:
return TypedValue(tuple)
ctx.show_error(f"Invalid type annotation {val}")
return AnyValue(AnySource.error)
def make_type_var_value(tv: TypeVar, ctx: Context) -> TypeVarValue:
if tv.__bound__ is not None:
bound = _type_from_runtime(tv.__bound__, ctx)
else:
bound = None
if tv.__constraints__:
constraints = tuple(
_type_from_runtime(constraint, ctx) for constraint in tv.__constraints__
)
else:
constraints = ()
return TypeVarValue(tv, bound=bound, constraints=constraints)
def _callable_args_from_runtime(
arg_types: Any, label: str, ctx: Context
) -> Sequence[SigParameter]:
if arg_types is Ellipsis or arg_types == [Ellipsis]:
return [ELLIPSIS_PARAM]
elif type(arg_types) in (tuple, list):
if len(arg_types) == 1:
(arg,) = arg_types
if arg is Ellipsis:
return [ELLIPSIS_PARAM]
elif is_typing_name(getattr(arg, "__origin__", None), "Concatenate"):
return _args_from_concatenate(arg, ctx)
elif is_instance_of_typing_name(arg, "ParamSpec"):
param_spec = TypeVarValue(arg, is_paramspec=True)
param = SigParameter(
"__P", kind=ParameterKind.PARAM_SPEC, annotation=param_spec
)
return [param]
types = [_type_from_runtime(arg, ctx) for arg in arg_types]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if isinstance(typ, TypeVarValue) and typ.is_paramspec
else ParameterKind.POSITIONAL_ONLY,
annotation=typ,
)
for i, typ in enumerate(types)
]
return params
elif is_instance_of_typing_name(arg_types, "ParamSpec"):
param_spec = TypeVarValue(arg_types, is_paramspec=True)
param = SigParameter(
"__P", kind=ParameterKind.PARAM_SPEC, annotation=param_spec
)
return [param]
elif is_typing_name(getattr(arg_types, "__origin__", None), "Concatenate"):
return _args_from_concatenate(arg_types, ctx)
else:
ctx.show_error(f"Invalid arguments to {label}: {arg_types!r}")
return [ELLIPSIS_PARAM]
def _args_from_concatenate(concatenate: Any, ctx: Context) -> Sequence[SigParameter]:
types = [_type_from_runtime(arg, ctx) for arg in concatenate.__args__]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if i == len(types) - 1
else ParameterKind.POSITIONAL_ONLY,
annotation=annotation,
)
for i, annotation in enumerate(types)
]
return params
def _get_typeddict_value(
value: Value,
ctx: Context,
key: str,
required_keys: Optional[Container[str]],
total: bool,
) -> Tuple[bool, Value]:
val = _type_from_runtime(value, ctx, is_typeddict=True)
if isinstance(val, Pep655Value):
return (val.required, val.value)
if required_keys is None:
required = total
else:
required = key in required_keys
return required, val
def _eval_forward_ref(val: str, ctx: Context, is_typeddict: bool = False) -> Value:
try:
tree = ast.parse(val, mode="eval")
except SyntaxError:
ctx.show_error(f"Syntax error in type annotation: {val}")
return AnyValue(AnySource.error)
else:
return _type_from_ast(tree.body, ctx, is_typeddict=is_typeddict)
def _type_from_value(value: Value, ctx: Context, is_typeddict: bool = False) -> Value:
if isinstance(value, KnownValue):
return _type_from_runtime(value.val, ctx, is_typeddict=is_typeddict)
elif isinstance(value, TypeVarValue):
return value
elif isinstance(value, MultiValuedValue):
return unite_values(*[_type_from_value(val, ctx) for val in value.vals])
elif isinstance(value, AnnotatedValue):
return _type_from_value(value.value, ctx)
elif isinstance(value, _SubscriptedValue):
return _type_from_subscripted_value(
value.root, value.members, ctx, is_typeddict=is_typeddict
)
elif isinstance(value, AnyValue):
return value
elif isinstance(value, SubclassValue) and value.exactly:
return value.typ
elif isinstance(value, TypedValue) and isinstance(value.typ, str):
# Synthetic type
return value
else:
ctx.show_error(f"Unrecognized annotation {value}")
return AnyValue(AnySource.error)
def _type_from_subscripted_value(
root: Optional[Value],
members: Sequence[Value],
ctx: Context,
is_typeddict: bool = False,
) -> Value:
if isinstance(root, GenericValue):
if len(root.args) == len(members):
return GenericValue(
root.typ, [_type_from_value(member, ctx) for member in members]
)
if isinstance(root, _SubscriptedValue):
root_type = _type_from_value(root, ctx)
return _type_from_subscripted_value(root_type, members, ctx)
elif isinstance(root, MultiValuedValue):
return unite_values(
*[
_type_from_subscripted_value(subval, members, ctx, is_typeddict)
for subval in root.vals
]
)
if (
isinstance(root, SubclassValue)
and root.exactly
and isinstance(root.typ, TypedValue)
):
return GenericValue(
root.typ.typ, [_type_from_value(elt, ctx) for elt in members]
)
if isinstance(root, TypedValue) and isinstance(root.typ, str):
return GenericValue(root.typ, [_type_from_value(elt, ctx) for elt in members])
if not isinstance(root, KnownValue):
if root != AnyValue(AnySource.error):
ctx.show_error(f"Cannot resolve subscripted annotation: {root}")
return AnyValue(AnySource.error)
root = root.val
if root is typing.Union:
return unite_values(*[_type_from_value(elt, ctx) for elt in members])
elif is_typing_name(root, "Literal"):
# Note that in Python 3.8, the way typing's internal cache works means that
# Literal[1] and Literal[True] are cached to the same value, so if you use
# both, you'll get whichever one was used first in later calls. There's nothing
# we can do about that.
if all(isinstance(elt, KnownValue) for elt in members):
return unite_values(*members)
else:
ctx.show_error(f"Arguments to Literal[] must be literals, not {members}")
return AnyValue(AnySource.error)
elif root is typing.Tuple or root is tuple:
if len(members) == 2 and members[1] == KnownValue(Ellipsis):
return GenericValue(tuple, [_type_from_value(members[0], ctx)])
elif len(members) == 1 and members[0] == KnownValue(()):
return SequenceIncompleteValue(tuple, [])
else:
return SequenceIncompleteValue(
tuple, [_type_from_value(arg, ctx) for arg in members]
)
elif root is typing.Optional:
if len(members) != 1:
ctx.show_error("Optional[] takes only one argument")
return AnyValue(AnySource.error)
return unite_values(KnownValue(None), _type_from_value(members[0], ctx))
elif root is typing.Type or root is type:
if len(members) != 1:
ctx.show_error("Type[] takes only one argument")
return AnyValue(AnySource.error)
argument = _type_from_value(members[0], ctx)
return SubclassValue.make(argument)
elif is_typing_name(root, "Annotated"):
origin, *metadata = members
return _make_annotated(_type_from_value(origin, ctx), metadata, ctx)
elif is_typing_name(root, "TypeGuard"):
if len(members) != 1:
ctx.show_error("TypeGuard requires a single argument")
return AnyValue(AnySource.error)
return AnnotatedValue(
TypedValue(bool), [TypeGuardExtension(_type_from_value(members[0], ctx))]
)
elif is_typing_name(root, "Required"):
if not is_typeddict:
ctx.show_error("Required[] used in unsupported context")
return AnyValue(AnySource.error)
if len(members) != 1:
ctx.show_error("Required[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(True, _type_from_value(members[0], ctx))
elif is_typing_name(root, "NotRequired"):
if not is_typeddict:
ctx.show_error("NotRequired[] used in unsupported context")
return AnyValue(AnySource.error)
if len(members) != 1:
ctx.show_error("NotRequired[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(False, _type_from_value(members[0], ctx))
elif root is Callable or root is typing.Callable:
if len(members) == 2:
args, return_value = members
return _make_callable_from_value(args, return_value, ctx)
ctx.show_error("Callable requires exactly two arguments")
return AnyValue(AnySource.error)
elif root is AsynqCallable:
if len(members) == 2:
args, return_value = members
return _make_callable_from_value(args, return_value, ctx, is_asynq=True)
ctx.show_error("AsynqCallable requires exactly two arguments")
return AnyValue(AnySource.error)
elif typing_inspect.is_generic_type(root):
origin = typing_inspect.get_origin(root)
if origin is None:
# On Python 3.9 at least, get_origin() of a class that inherits
# from Generic[T] is None.
origin = root
origin = _maybe_get_extra(origin)
return GenericValue(origin, [_type_from_value(elt, ctx) for elt in members])
elif isinstance(root, type):
return GenericValue(root, [_type_from_value(elt, ctx) for elt in members])
else:
# In Python 3.9, generics are implemented differently and typing.get_origin
# can help.
origin = get_origin(root)
if isinstance(origin, type):
return GenericValue(origin, [_type_from_value(elt, ctx) for elt in members])
ctx.show_error(f"Unrecognized subscripted annotation: {root}")
return AnyValue(AnySource.error)
def _maybe_get_extra(origin: type) -> Union[type, str]:
# ContextManager is defined oddly and we lose the Protocol if we don't use
# synthetic types.
if any(origin is cls for cls in CONTEXT_MANAGER_TYPES):
return "typing.ContextManager"
elif any(origin is cls for cls in ASYNC_CONTEXT_MANAGER_TYPES):
return "typing.AsyncContextManager"
else:
# turn typing.List into list in some Python versions
# compare https://github.com/ilevkivskyi/typing_inspect/issues/36
extra_origin = getattr(origin, "__extra__", None)
if extra_origin is not None:
return extra_origin
return origin
class _DefaultContext(Context):
def __init__(
self,
visitor: "NameCheckVisitor",
node: Optional[ast.AST],
globals: Optional[Mapping[str, object]] = None,
use_name_node_for_error: bool = False,
) -> None:
super().__init__()
self.visitor = visitor
self.node = node
self.globals = globals
self.use_name_node_for_error = use_name_node_for_error
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
if node is None:
node = self.node
if self.visitor is not None and node is not None:
self.visitor.show_error(node, message, error_code)
def get_name(self, node: ast.Name) -> Value:
if self.visitor is not None:
val, _ = self.visitor.resolve_name(
node,
error_node=node if self.use_name_node_for_error else self.node,
suppress_errors=self.should_suppress_undefined_names,
)
return val
elif self.globals is not None:
if node.id in self.globals:
return KnownValue(self.globals[node.id])
elif hasattr(builtins, node.id):
return KnownValue(getattr(builtins, node.id))
if self.should_suppress_undefined_names:
return AnyValue(AnySource.inference)
self.show_error(
f"Undefined name {node.id!r} used in annotation",
ErrorCode.undefined_name,
node=node,
)
return AnyValue(AnySource.error)
@dataclass(frozen=True)
class _SubscriptedValue(Value):
root: Optional[Value]
members: Tuple[Value, ...]
@dataclass
class Pep655Value(Value):
required: bool
value: Value
class _Visitor(ast.NodeVisitor):
def __init__(self, ctx: Context) -> None:
self.ctx = ctx
def generic_visit(self, node: ast.AST) -> None:
raise NotImplementedError(f"no visitor implemented for {node!r}")
def visit_Name(self, node: ast.Name) -> Value:
return self.ctx.get_name(node)
def visit_Subscript(self, node: ast.Subscript) -> Value:
value = self.visit(node.value)
index = self.visit(node.slice)
if isinstance(index, SequenceIncompleteValue):
members = index.members
else:
members = (index,)
return _SubscriptedValue(value, members)
def visit_Attribute(self, node: ast.Attribute) -> Optional[Value]:
root_value = self.visit(node.value)
if isinstance(root_value, KnownValue):
try:
return KnownValue(getattr(root_value.val, node.attr))
except AttributeError:
self.ctx.show_error(
f"{root_value.val!r} has no attribute {node.attr!r}", node=node
)
return AnyValue(AnySource.error)
elif not isinstance(root_value, AnyValue):
self.ctx.show_error(f"Cannot resolve annotation {root_value}", node=node)
return AnyValue(AnySource.error)
def visit_Tuple(self, node: ast.Tuple) -> Value:
elts = [self.visit(elt) for elt in node.elts]
return SequenceIncompleteValue(tuple, elts)
def visit_List(self, node: ast.List) -> Value:
elts = [self.visit(elt) for elt in node.elts]
return SequenceIncompleteValue(list, elts)
def visit_Index(self, node: ast.Index) -> Value:
# class is unused in 3.9
return self.visit(node.value) # static analysis: ignore[undefined_attribute]
def visit_Ellipsis(self, node: ast.Ellipsis) -> Value:
return KnownValue(Ellipsis)
def visit_Constant(self, node: ast.Constant) -> Value:
return KnownValue(node.value)
def visit_NameConstant(self, node: ast.NameConstant) -> Value:
return KnownValue(node.value)
def visit_Num(self, node: ast.Num) -> Value:
return KnownValue(node.n)
def visit_Str(self, node: ast.Str) -> Value:
return KnownValue(node.s)
def visit_Bytes(self, node: ast.Bytes) -> Value:
return KnownValue(node.s)
def visit_Expr(self, node: ast.Expr) -> Value:
return self.visit(node.value)
def visit_BinOp(self, node: ast.BinOp) -> Optional[Value]:
if isinstance(node.op, ast.BitOr):
return _SubscriptedValue(
KnownValue(Union), (self.visit(node.left), self.visit(node.right))
)
else:
return None
def visit_UnaryOp(self, node: ast.UnaryOp) -> Optional[Value]:
# Only int and float negation on literals are supported.
if isinstance(node.op, ast.USub):
operand = self.visit(node.operand)
if isinstance(operand, KnownValue) and isinstance(
operand.val, (int, float)
):
return KnownValue(-operand.val)
return None
def visit_Call(self, node: ast.Call) -> Optional[Value]:
func = self.visit(node.func)
if not isinstance(func, KnownValue):
return None
if func.val == NewType:
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
args = []
kwargs = {}
for arg_value in arg_values:
if isinstance(arg_value, KnownValue):
args.append(arg_value.val)
else:
return None
for name, kwarg_value in kwarg_values:
if name is None:
if isinstance(kwarg_value, KnownValue) and isinstance(
kwarg_value.val, dict
):
kwargs.update(kwarg_value.val)
else:
return None
else:
if isinstance(kwarg_value, KnownValue):
kwargs[name] = kwarg_value.val
else:
return None
return KnownValue(func.val(*args, **kwargs))
elif func.val == TypeVar:
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
if not arg_values:
self.ctx.show_error(
"TypeVar() requires at least one argument", node=node
)
return None
name_val = arg_values[0]
if not isinstance(name_val, KnownValue):
self.ctx.show_error("TypeVar name must be a literal", node=node.args[0])
return None
constraints = []
for arg_value in arg_values[1:]:
constraints.append(_type_from_value(arg_value, self.ctx))
bound = None
for name, kwarg_value in kwarg_values:
if name in ("covariant", "contravariant"):
continue
elif name == "bound":
bound = _type_from_value(kwarg_value, self.ctx)
else:
self.ctx.show_error(f"Unrecognized TypeVar kwarg {name}", node=node)
return None
tv = TypeVar(name_val.val)
return TypeVarValue(tv, bound, tuple(constraints))
elif is_typing_name(func.val, "ParamSpec"):
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
if not arg_values:
self.ctx.show_error(
"ParamSpec() requires at least one argument", node=node
)
return None
name_val = arg_values[0]
if not isinstance(name_val, KnownValue):
self.ctx.show_error(
"ParamSpec name must be a literal", node=node.args[0]
)
return None
for name, _ in kwarg_values:
self.ctx.show_error(f"Unrecognized ParamSpec kwarg {name}", node=node)
return None
tv = ParamSpec(name_val.val)
return TypeVarValue(tv, is_paramspec=True)
elif isinstance(func.val, type):
if func.val is object:
return AnyValue(AnySource.inference)
return TypedValue(func.val)
else:
return None
def _value_of_origin_args(
origin: object,
args: Sequence[object],
val: object,
ctx: Context,
is_typeddict: bool = False,
) -> Value:
if origin is typing.Type or origin is type:
if not args:
return TypedValue(type)
return SubclassValue.make(_type_from_runtime(args[0], ctx))
elif origin is typing.Tuple or origin is tuple:
if not args:
return TypedValue(tuple)
elif len(args) == 2 and args[1] is Ellipsis:
return GenericValue(tuple, [_type_from_runtime(args[0], ctx)])
elif len(args) == 1 and args[0] == ():
return SequenceIncompleteValue(tuple, [])
else:
args_vals = [_type_from_runtime(arg, ctx) for arg in args]
return SequenceIncompleteValue(tuple, args_vals)
elif origin is typing.Union:
return unite_values(*[_type_from_runtime(arg, ctx) for arg in args])
elif origin is Callable or origin is typing.Callable:
if len(args) == 0:
return TypedValue(Callable)
*arg_types, return_type = args
if len(arg_types) == 1 and isinstance(arg_types[0], list):
arg_types = arg_types[0]
params = _callable_args_from_runtime(arg_types, "Callable", ctx)
sig = Signature.make(params, _type_from_runtime(return_type, ctx))
return CallableValue(sig)
elif is_typing_name(origin, "Annotated"):
origin, metadata = args
# This should never happen
if not isinstance(metadata, Iterable):
ctx.show_error("Unexpected format in Annotated")
return AnyValue(AnySource.error)
return _make_annotated(
_type_from_runtime(origin, ctx),
[KnownValue(data) for data in metadata],
ctx,
)
elif isinstance(origin, type):
origin = _maybe_get_extra(origin)
if args:
args_vals = [_type_from_runtime(val, ctx) for val in args]
return GenericValue(origin, args_vals)
else:
return _maybe_typed_value(origin)
elif is_typing_name(origin, "TypeGuard"):
if len(args) != 1:
ctx.show_error("TypeGuard requires a single argument")
return AnyValue(AnySource.error)
return AnnotatedValue(
TypedValue(bool), [TypeGuardExtension(_type_from_runtime(args[0], ctx))]
)
elif is_typing_name(origin, "Final"):
if len(args) != 1:
ctx.show_error("Final requires a single argument")
return AnyValue(AnySource.error)
# TODO(#160): properly support Final
return _type_from_runtime(args[0], ctx)
elif is_typing_name(origin, "ClassVar"):
if len(args) != 1:
ctx.show_error("ClassVar requires a single argument")
return AnyValue(AnySource.error)
return _type_from_runtime(args[0], ctx)
elif is_typing_name(origin, "Required"):
if not is_typeddict:
ctx.show_error("Required[] used in unsupported context")
return AnyValue(AnySource.error)
if len(args) != 1:
ctx.show_error("Required[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(True, _type_from_runtime(args[0], ctx))
elif is_typing_name(origin, "NotRequired"):
if not is_typeddict:
ctx.show_error("NotRequired[] used in unsupported context")
return AnyValue(AnySource.error)
if len(args) != 1:
ctx.show_error("NotRequired[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(False, _type_from_runtime(args[0], ctx))
elif origin is None and isinstance(val, type):
# This happens for SupportsInt in 3.7.
return _maybe_typed_value(val)
else:
ctx.show_error(
f"Unrecognized annotation {origin}[{', '.join(map(repr, args))}]"
)
return AnyValue(AnySource.error)
def _maybe_typed_value(val: Union[type, str]) -> Value:
if val is type(None):
return KnownValue(None)
elif val is Hashable:
return _HashableValue(val)
return TypedValue(val)
def _make_callable_from_value(
args: Value, return_value: Value, ctx: Context, is_asynq: bool = False
) -> Value:
return_annotation = _type_from_value(return_value, ctx)
if args == KnownValue(Ellipsis):
return CallableValue(
Signature.make(
[ELLIPSIS_PARAM], return_annotation=return_annotation, is_asynq=is_asynq
)
)
elif isinstance(args, SequenceIncompleteValue):
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.POSITIONAL_ONLY,
annotation=_type_from_value(arg, ctx),
)
for i, arg in enumerate(args.members)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif isinstance(args, KnownValue) and is_instance_of_typing_name(
args.val, "ParamSpec"
):
annotation = TypeVarValue(args.val, is_paramspec=True)
params = [
SigParameter("__P", kind=ParameterKind.PARAM_SPEC, annotation=annotation)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif isinstance(args, TypeVarValue) and args.is_paramspec:
params = [SigParameter("__P", kind=ParameterKind.PARAM_SPEC, annotation=args)]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif (
isinstance(args, _SubscriptedValue)
and isinstance(args.root, KnownValue)
and is_typing_name(args.root.val, "Concatenate")
):
annotations = [_type_from_value(arg, ctx) for arg in args.members]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if i == len(annotations) - 1
else ParameterKind.POSITIONAL_ONLY,
annotation=annotation,
)
for i, annotation in enumerate(annotations)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
else:
ctx.show_error(f"Unrecognized Callable type argument {args}")
return AnyValue(AnySource.error)
def _make_annotated(origin: Value, metadata: Sequence[Value], ctx: Context) -> Value:
metadata = [_value_from_metadata(entry, ctx) for entry in metadata]
return annotate_value(origin, metadata)
def _value_from_metadata(entry: Value, ctx: Context) -> Union[Value, Extension]:
if isinstance(entry, KnownValue):
if isinstance(entry.val, ParameterTypeGuard):
return ParameterTypeGuardExtension(
entry.val.varname, _type_from_runtime(entry.val.guarded_type, ctx)
)
elif isinstance(entry.val, NoReturnGuard):
return NoReturnGuardExtension(
entry.val.varname, _type_from_runtime(entry.val.guarded_type, ctx)
)
elif isinstance(entry.val, HasAttrGuard):
return HasAttrGuardExtension(
entry.val.varname,
_type_from_runtime(entry.val.attribute_name, ctx),
_type_from_runtime(entry.val.attribute_type, ctx),
)
elif isinstance(entry.val, CustomCheck):
return CustomCheckExtension(entry.val)
return entry
|
# -*- coding: utf-8 -*-
"""Functions to make simple plots with M/EEG data."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import base64
import copy
from glob import glob
from io import BytesIO
from itertools import cycle
import os.path as op
import warnings
from distutils.version import LooseVersion
from collections import defaultdict
import numpy as np
from scipy import linalg
from ..defaults import DEFAULTS
from ..fixes import _get_img_fdata
from ..rank import compute_rank
from ..source_space import _mri_orientation
from ..surface import read_surface
from ..io.constants import FIFF
from ..io.proj import make_projector
from ..io.pick import (_DATA_CH_TYPES_SPLIT, pick_types, pick_info,
pick_channels)
from ..source_space import (read_source_spaces, SourceSpaces, _read_mri_info,
_check_mri, _ensure_src)
from ..transforms import invert_transform, apply_trans, _frame_to_str
from ..utils import (logger, verbose, warn, _check_option, get_subjects_dir,
_mask_to_onsets_offsets, _pl, _on_missing)
from ..io.pick import _picks_by_type
from ..filter import estimate_ringing_samples
from .utils import tight_layout, _get_color_list, _prepare_trellis, plt_show
def _index_info_cov(info, cov, exclude):
if exclude == 'bads':
exclude = info['bads']
info = pick_info(info, pick_channels(info['ch_names'], cov['names'],
exclude))
del exclude
picks_list = \
_picks_by_type(info, meg_combined=False, ref_meg=False,
exclude=())
picks_by_type = dict(picks_list)
ch_names = [n for n in cov.ch_names if n in info['ch_names']]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
idx_by_type = defaultdict(list)
for ch_type, sel in picks_by_type.items():
idx_by_type[ch_type] = [ch_names.index(info_ch_names[c])
for c in sel if info_ch_names[c] in ch_names]
idx_names = [(idx_by_type[key],
'%s covariance' % DEFAULTS['titles'][key],
DEFAULTS['units'][key],
DEFAULTS['scalings'][key],
key)
for key in _DATA_CH_TYPES_SPLIT
if len(idx_by_type[key]) > 0]
C = cov.data[ch_idx][:, ch_idx]
return info, C, ch_names, idx_names
@verbose
def plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data.
Parameters
----------
cov : instance of Covariance
The covariance matrix.
info : dict
Measurement info.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
%(verbose)s
Returns
-------
fig_cov : instance of matplotlib.figure.Figure
The covariance plot.
fig_svd : instance of matplotlib.figure.Figure | None
The SVD spectra plot of the covariance.
See Also
--------
mne.compute_rank
Notes
-----
For each channel type, the rank is estimated using
:func:`mne.compute_rank`.
.. versionchanged:: 0.19
Approximate ranks for each channel type are shown with red dashed lines.
"""
from ..cov import Covariance
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude)
del cov, exclude
projs = []
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
fig_cov, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, _, _, _) in enumerate(idx_names):
vlim = np.max(np.abs(C[idx][:, idx]))
im = axes[0, k].imshow(C[idx][:, idx], interpolation="nearest",
norm=Normalize(vmin=-vlim, vmax=vlim),
cmap='RdBu_r')
axes[0, k].set(title=name)
if colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(axes[0, k])
cax = divider.append_axes("right", size="5.5%", pad=0.05)
plt.colorbar(im, cax=cax, format='%.0e')
fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, unit, scaling, key) in enumerate(idx_names):
this_C = C[idx][:, idx]
s = linalg.svd(this_C, compute_uv=False)
this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx],
[], [], 0)
this_info = pick_info(info, idx)
this_info['projs'] = []
this_rank = compute_rank(this_C, info=this_info)
# Protect against true zero singular values
s[s <= 0] = 1e-10 * s[s > 0].min()
s = np.sqrt(s) * scaling
axes[0, k].plot(s, color='k', zorder=3)
this_rank = this_rank[key]
axes[0, k].axvline(this_rank - 1, ls='--', color='r',
alpha=0.5, zorder=4, clip_on=False)
axes[0, k].text(this_rank - 1, axes[0, k].get_ylim()[1],
'rank ≈ %d' % (this_rank,), ha='right', va='top',
color='r', alpha=0.5, zorder=4)
axes[0, k].set(ylabel=u'Noise σ (%s)' % unit, yscale='log',
xlabel='Eigenvalue index', title=name,
xlim=[0, len(s) - 1])
tight_layout(fig=fig_svd)
plt_show(show)
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
# Covering frequency gaps with horizontal bars
for lower_bound, upper_bound in gap_bounds:
plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
lower_bound, time_bounds[0], color='#666666')
plt_show(show)
return fig
def _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal',
slices=None, show=True, show_indices=False,
show_orientation=False, img_output=False):
"""Plot BEM contours on anatomical slices."""
import matplotlib.pyplot as plt
from matplotlib import patheffects
# For ease of plotting, we will do everything in voxel coordinates.
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
# Load the T1 data
_, vox_mri_t, _, _, _, nim = _read_mri_info(
mri_fname, units='mm', return_img=True)
mri_vox_t = invert_transform(vox_mri_t)['trans']
del vox_mri_t
# plot axes (x, y, z) as data axes
(x, y, z), (flip_x, flip_y, flip_z), order = _mri_orientation(
nim, orientation)
transpose = x < y
data = _get_img_fdata(nim)
shift_x = data.shape[x] if flip_x < 0 else 0
shift_y = data.shape[y] if flip_y < 0 else 0
n_slices = data.shape[z]
if slices is None:
slices = np.round(np.linspace(0, n_slices - 1, 14)).astype(int)[1:-1]
slices = np.atleast_1d(slices).copy()
slices[slices < 0] += n_slices # allow negative indexing
if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \
slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \
slices.dtype.kind not in 'iu':
raise ValueError('slices must be a sorted 1D array of int with unique '
'elements, at least one element, and no elements '
'greater than %d, got %s' % (n_slices - 1, slices))
if flip_z < 0:
# Proceed in the opposite order to maintain left-to-right / orientation
slices = slices[::-1]
# create of list of surfaces
surfs = list()
for file_name, color in surfaces:
surf = dict()
surf['rr'], surf['tris'] = read_surface(file_name)
# move surface to voxel coordinate system
surf['rr'] = apply_trans(mri_vox_t, surf['rr'])
surfs.append((surf, color))
sources = list()
if src is not None:
_ensure_src(src, extra=' or None')
# Eventually we can relax this by allowing ``trans`` if need be
if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI:
raise ValueError(
'Source space must be in MRI coordinates, got '
f'{_frame_to_str[src[0]['coord_frame']]}')
for src_ in src:
points = src_['rr'][src_['inuse'].astype(bool)]
sources.append(apply_trans(mri_vox_t, points * 1e3))
sources = np.concatenate(sources, axis=0)
if img_output:
n_col = n_axes = 1
fig, ax = plt.subplots(1, 1, figsize=(7.0, 7.0))
axs = [ax] * len(slices)
w = fig.get_size_inches()[0]
fig.set_size_inches([w, w / data.shape[x] * data.shape[y]])
plt.close(fig)
else:
n_col = 4
fig, axs, _, _ = _prepare_trellis(len(slices), n_col)
n_axes = len(axs)
fig.set_facecolor('k')
bounds = np.concatenate(
[[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]]) # float
slicer = [slice(None)] * 3
ori_labels = dict(R='LR', A='PA', S='IS')
xlabels, ylabels = ori_labels[order[0]], ori_labels[order[1]]
path_effects = [patheffects.withStroke(linewidth=4, foreground="k",
alpha=0.75)]
out = list() if img_output else fig
for ai, (ax, sl, lower, upper) in enumerate(zip(
axs, slices, bounds[:-1], bounds[1:])):
# adjust the orientations for good view
slicer[z] = sl
dat = data[tuple(slicer)]
dat = dat.T if transpose else dat
dat = dat[::flip_y, ::flip_x]
# First plot the anatomical data
if img_output:
ax.clear()
ax.imshow(dat, cmap=plt.cm.gray, origin='lower')
ax.set_autoscale_on(False)
ax.axis('off')
ax.set_aspect('equal') # XXX eventually could deal with zooms
# and then plot the contours on top
for surf, color in surfs:
with warnings.catch_warnings(record=True): # ignore contour warn
warnings.simplefilter('ignore')
ax.tricontour(flip_x * surf['rr'][:, x] + shift_x,
flip_y * surf['rr'][:, y] + shift_y,
surf['tris'], surf['rr'][:, z],
levels=[sl], colors=color, linewidths=1.0,
zorder=1)
if len(sources):
in_slice = (sources[:, z] >= lower) & (sources[:, z] < upper)
ax.scatter(flip_x * sources[in_slice, x] + shift_x,
flip_y * sources[in_slice, y] + shift_y,
marker='.', color='#FF00FF', s=1, zorder=2)
if show_indices:
ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl),
color='w', fontsize='x-small', va='bottom', ha='left')
# label the axes
kwargs = dict(
color='#66CCEE', fontsize='medium', path_effects=path_effects,
family='monospace', clip_on=False, zorder=5, weight='bold')
if show_orientation:
if ai % n_col == 0: # left
ax.text(0, dat.shape[0] / 2., xlabels[0],
va='center', ha='left', **kwargs)
if ai % n_col == n_col - 1 or ai == n_axes - 1: # right
ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1],
va='center', ha='right', **kwargs)
if ai >= n_axes - n_col: # bottom
ax.text(dat.shape[1] / 2., 0, ylabels[0],
ha='center', va='bottom', **kwargs)
if ai < n_col or n_col == 1: # top
ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1],
ha='center', va='top', **kwargs)
if img_output:
output = BytesIO()
fig.savefig(output, bbox_inches='tight',
pad_inches=0, format='png')
out.append(base64.b64encode(output.getvalue()).decode('ascii'))
fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt_show(show, fig=fig)
return out
def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
slices=None, brain_surfaces=None, src=None, show=True,
show_indices=True, mri='T1.mgz', show_orientation=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
brain_surfaces : None | str | list of str
One or more brain surface to plot (optional). Entries should correspond
to files in the subject's ``surf`` directory (e.g. ``"white"``).
src : None | SourceSpaces | str
SourceSpaces instance or path to a source space to plot individual
sources as scatter-plot. Sources will be shown on exactly one slice
(whichever slice is closest to each source in the given orientation
plane). Path can be absolute or relative to the subject's ``bem``
folder.
.. versionchanged:: 0.20
All sources are shown on the nearest slice rather than some
being omitted.
show : bool
Show figure if True.
show_indices : bool
Show slice indices if True.
.. versionadded:: 0.20
mri : str
The name of the MRI to use. Can be a standard FreeSurfer MRI such as
``'T1.mgz'``, or a full path to a custom MRI file.
.. versionadded:: 0.21
show_orientation : str
Show the orientation (L/R, P/A, I/S) of the data slices.
.. versionadded:: 0.21
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
mne.viz.plot_alignment
Notes
-----
Images are plotted in MRI voxel coordinates.
If ``src`` is not None, for a given slice index, all source points are
shown that are halfway between the previous slice and the given slice,
and halfway between the given slice and the next slice.
For large slice decimations, this can
make some source points appear outside the BEM contour, which is shown
for the given slice index. For example, in the case where the single
midpoint slice is used ``slices=[128]``, all source points will be shown
on top of the midpoint MRI slice with the BEM boundary drawn for that
slice.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri_fname = _check_mri(mri, subject, subjects_dir)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
raise IOError('Subject bem directory "%s" does not exist' % bem_path)
surfaces = _get_bem_plotting_surfaces(bem_path)
if brain_surfaces is not None:
if isinstance(brain_surfaces, str):
brain_surfaces = (brain_surfaces,)
for surf_name in brain_surfaces:
for hemi in ('lh', 'rh'):
surf_fname = op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf_name)
if op.exists(surf_fname):
surfaces.append((surf_fname, '#00DD00'))
else:
raise IOError("Surface %s does not exist." % surf_fname)
if isinstance(src, str):
if not op.exists(src):
src_ = op.join(subjects_dir, subject, 'bem', src)
if op.exists(src_):
src = src_
else:
raise IOError("%s does not exist" % src)
src = read_source_spaces(src)
elif src is not None and not isinstance(src, SourceSpaces):
raise TypeError("src needs to be None, str or SourceSpaces instance, "
"not %s" % repr(src))
if len(surfaces) == 0:
raise IOError('No surface files found. Surface files must end with '
'inner_skull.surf, outer_skull.surf or outer_skin.surf')
# Plot the contours
return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices,
show, show_indices, show_orientation)
def _get_bem_plotting_surfaces(bem_path):
surfaces = []
for surf_name, color in (('*inner_skull', '#FF0000'),
('*outer_skull', '#FFFF00'),
('*outer_skin', '#FFAA80')):
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
logger.info("Using surface: %s" % surf_fname)
surfaces.append((surf_fname, color))
return surfaces
@verbose
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
axes=None, equal_spacing=True, show=True, on_missing='raise',
verbose=None):
"""Plot events to get a visual display of the paradigm.
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Recordings made on Neuromag systems
number samples relative to the system start (not relative to the
beginning of the recording). In such cases the ``raw.first_samp``
attribute can be passed here. Default is 0.
color : dict | None
Dictionary of event_id integers as keys and colors as values. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors). Color can be any
valid :doc:`matplotlib color <tutorials/colors/colors>`.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and their associated
event_id values. Labels are used to plot a legend. If None, no legend
is drawn.
axes : instance of Axes
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
%(on_missing_events)s
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
if sfreq is None:
sfreq = 1.0
xlabel = 'Samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
if len(events) == 0:
raise ValueError('No events in events array, cannot plot.')
unique_events = np.unique(events[:, 2])
if event_id is not None:
# get labels and unique event ids from event_id dict,
# sorted by value
event_id_rev = {v: k for k, v in event_id.items()}
conditions, unique_events_id = zip(*sorted(event_id.items(),
key=lambda x: x[1]))
keep = np.ones(len(unique_events_id), bool)
for ii, this_event in enumerate(unique_events_id):
if this_event not in unique_events:
msg = f'{this_event} from event_id is not present in events.'
_on_missing(on_missing, msg)
keep[ii] = False
conditions = [cond for cond, k in zip(conditions, keep) if k]
unique_events_id = [id_ for id_, k in zip(unique_events_id, keep) if k]
if len(unique_events_id) == 0:
raise RuntimeError('No usable event IDs found')
for this_event in unique_events:
if this_event not in unique_events_id:
warn('event %s missing from event_id will be ignored'
% this_event)
else:
unique_events_id = unique_events
color = _handle_event_colors(color, unique_events, event_id)
import matplotlib.pyplot as plt
fig = None
if axes is None:
fig = plt.figure()
ax = axes if axes else plt.gca()
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
max_x = (events[np.in1d(events[:, 2], unique_events_id), 0].max() -
first_samp) / sfreq
handles, labels = list(), list()
for idx, ev in enumerate(unique_events_id):
ev_mask = events[:, 2] == ev
count = ev_mask.sum()
if count == 0:
continue
y = np.full(count, idx + 1 if equal_spacing else events[ev_mask, 2][0])
if event_id is not None:
event_label = '%s (%s)' % (event_id_rev[ev], count)
else:
event_label = 'N=%d' % (count,)
labels.append(event_label)
kwargs = {}
if ev in color:
kwargs['color'] = color[ev]
handles.append(
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
y, '.', clip_on=False, **kwargs)[0])
if equal_spacing:
ax.set_ylim(0, unique_events_id.size + 1)
ax.set_yticks(1 + np.arange(unique_events_id.size))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([min_event - 1, max_event + 1])
ax.set(xlabel=xlabel, ylabel='Events id', xlim=[0, max_x])
ax.grid(True)
fig = fig if fig is not None else plt.gcf()
# reverse order so that the highest numbers are at the top
# (match plot order)
handles, labels = handles[::-1], labels[::-1]
box = ax.get_position()
factor = 0.8 if event_id is not None else 0.9
ax.set_position([box.x0, box.y0, box.width * factor, box.height])
ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5),
fontsize='small')
fig.canvas.draw()
plt_show(show)
return fig
def _get_presser(fig):
"""Get our press callback."""
import matplotlib
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for key, val in callbacks.items():
if LooseVersion(matplotlib.__version__) >= '3':
func = val()
else:
func = val.func
if func.__class__.__name__ == 'partial':
break
else:
func = None
assert func is not None
return func
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
"""Plot the amplitude traces of a set of dipoles.
Parameters
----------
dipoles : list of instance of Dipole
The dipoles whose amplitudes should be shown.
colors : list of color | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if colors is None:
colors = cycle(_get_color_list())
fig, ax = plt.subplots(1, 1)
xlim = [np.inf, -np.inf]
for dip, color in zip(dipoles, colors):
ax.plot(dip.times, dip.amplitude * 1e9, color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[-1])
ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')
if show:
fig.show(warn=False)
return fig
def adjust_axes(axes, remove_spines=('top', 'right'), grid=True):
"""Adjust some properties of axes.
Parameters
----------
axes : list
List of axes to process.
remove_spines : list of str
Which axis spines to remove.
grid : bool
Turn grid on (True) or off (False).
"""
axes = [axes] if not isinstance(axes, (list, tuple, np.ndarray)) else axes
for ax in axes:
if grid:
ax.grid(zorder=0)
for key in remove_spines:
ax.spines[key].set_visible(False)
def _filter_ticks(lims, fscale):
"""Create approximately spaced ticks between lims."""
if fscale == 'linear':
return None, None # let matplotlib handle it
lims = np.array(lims)
ticks = list()
if lims[1] > 20 * lims[0]:
base = np.array([1, 2, 4])
else:
base = np.arange(1, 11)
for exp in range(int(np.floor(np.log10(lims[0]))),
int(np.floor(np.log10(lims[1]))) + 1):
ticks += (base * (10 ** exp)).tolist()
ticks = np.array(ticks)
ticks = ticks[(ticks >= lims[0]) & (ticks <= lims[1])]
ticklabels = [('%g' if t < 1 else '%d') % t for t in ticks]
return ticks, ticklabels
def _get_flim(flim, fscale, freq, sfreq=None):
"""Get reasonable frequency limits."""
if flim is None:
if freq is None:
flim = [0.1 if fscale == 'log' else 0., sfreq / 2.]
else:
if fscale == 'linear':
flim = [freq[0]]
else:
flim = [freq[0] if freq[0] > 0 else 0.1 * freq[1]]
flim += [freq[-1]]
if fscale == 'log':
if flim[0] <= 0:
raise ValueError('flim[0] must be positive, got %s' % flim[0])
elif flim[0] < 0:
raise ValueError('flim[0] must be non-negative, got %s' % flim[0])
return flim
def _check_fscale(fscale):
"""Check for valid fscale."""
if not isinstance(fscale, str) or fscale not in ('log', 'linear'):
raise ValueError('fscale must be "log" or "linear", got %s'
% (fscale,))
_DEFAULT_ALIM = (-80, 10)
def plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4',
flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True,
compensate=False, plot=('time', 'magnitude', 'delay'),
axes=None):
"""Plot properties of a filter.
Parameters
----------
h : dict or ndarray
An IIR dict or 1D ndarray of coefficients (for FIR filter).
sfreq : float
Sample rate of the data (Hz).
freq : array-like or None
The ideal response frequencies to plot (must be in ascending order).
If None (default), do not plot the ideal response.
gain : array-like or None
The ideal response gains to plot.
If None (default), do not plot the ideal response.
title : str | None
The title to use. If None (default), determine the title based
on the type of the system.
color : color object
The color to use (default '#1f77b4').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None, freq will be used. If None (default) and freq is None,
``(0.1, sfreq / 2.)`` will be used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
The y-axis amplitude limits (dB) to use (default: (-60, 10)).
show : bool
Show figure if True (default).
compensate : bool
If True, compensate for the filter delay (phase will not be shown).
- For linear-phase FIR filters, this visualizes the filter coefficients
assuming that the output will be shifted by ``N // 2``.
- For IIR filters, this changes the filter coefficient display
by filtering backward and forward, and the frequency response
by squaring it.
.. versionadded:: 0.18
plot : list | tuple | str
A list of the requested plots from ``time``, ``magnitude`` and
``delay``. Default is to plot all three filter properties
('time', 'magnitude', 'delay').
.. versionadded:: 0.21.0
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of requested plot types. If instance of
Axes, there must be only one filter property plotted.
Defaults to ``None``.
.. versionadded:: 0.21.0
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the plots.
See Also
--------
mne.filter.create_filter
plot_ideal_filter
Notes
-----
.. versionadded:: 0.14
"""
from scipy.signal import (
freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt)
import matplotlib.pyplot as plt
sfreq = float(sfreq)
_check_option('fscale', fscale, ['log', 'linear'])
if isinstance(plot, str):
plot = [plot]
for xi, x in enumerate(plot):
_check_option('plot[%d]' % xi, x, ('magnitude', 'delay', 'time'))
flim = _get_flim(flim, fscale, freq, sfreq)
if fscale == 'log':
omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)
else:
omega = np.linspace(flim[0], flim[1], 1000)
xticks, xticklabels = _filter_ticks(flim, fscale)
omega /= sfreq / (2 * np.pi)
if isinstance(h, dict): # IIR h.ndim == 2: # second-order sections
if 'sos' in h:
H = np.ones(len(omega), np.complex128)
gd = np.zeros(len(omega))
for section in h['sos']:
this_H = freqz(section[:3], section[3:], omega)[1]
H *= this_H
if compensate:
H *= this_H.conj() # time reversal is freq conj
else:
# Assume the forward-backward delay zeros out, which it
# mostly should
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd += group_delay((section[:3], section[3:]), omega)[1]
n = estimate_ringing_samples(h['sos'])
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = sosfiltfilt
gd += (len(delta) - 1) // 2
else:
func = sosfilt
h = func(h['sos'], delta)
else:
H = freqz(h['b'], h['a'], omega)[1]
if compensate:
H *= H.conj()
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h['b'], h['a']), omega)[1]
if compensate:
gd += group_delay(h['b'].conj(), h['a'].conj(), omega)[1]
n = estimate_ringing_samples((h['b'], h['a']))
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = filtfilt
else:
func = lfilter
h = func(h['b'], h['a'], delta)
if title is None:
title = 'SOS (IIR) filter'
if compensate:
title += ' (forward-backward)'
else:
H = freqz(h, worN=omega)[1]
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h, [1.]), omega)[1]
title = 'FIR filter' if title is None else title
if compensate:
title += ' (delay-compensated)'
fig = None
if axes is None:
fig, axes = plt.subplots(len(plot), 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if len(axes) != len(plot):
raise ValueError('Length of axes (%d) must be the same as number of '
'requested filter properties (%d)'
% (len(axes), len(plot)))
t = np.arange(len(h))
dlim = np.abs(t).max() / 2.
dlim = [-dlim, dlim]
if compensate:
n_shift = (len(h) - 1) // 2
t -= n_shift
assert t[0] == -t[-1]
gd -= n_shift
t = t / sfreq
gd = gd / sfreq
f = omega * sfreq / (2 * np.pi)
sl = slice(0 if fscale == 'linear' else 1, None, None)
mag = 10 * np.log10(np.maximum((H * H.conj()).real, 1e-20))
if 'time' in plot:
ax_time_idx = np.where([p == 'time' for p in plot])[0][0]
axes[ax_time_idx].plot(t, h, color=color)
axes[ax_time_idx].set(xlim=t[[0, -1]], xlabel='Time (s)',
ylabel='Amplitude', title=title)
# Magnitude
if 'magnitude' in plot:
ax_mag_idx = np.where([p == 'magnitude' for p in plot])[0][0]
axes[ax_mag_idx].plot(f[sl], mag[sl], color=color,
linewidth=2, zorder=4)
if freq is not None and gain is not None:
plot_ideal_filter(freq, gain, axes[ax_mag_idx],
fscale=fscale, show=False)
axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel='', xscale=fscale)
if xticks is not None:
axes[ax_mag_idx].set(xticks=xticks)
axes[ax_mag_idx].set(xticklabels=xticklabels)
axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)',
ylabel='Amplitude (dB)')
# Delay
if 'delay' in plot:
ax_delay_idx = np.where([p == 'delay' for p in plot])[0][0]
axes[ax_delay_idx].plot(f[sl], gd[sl], color=color,
linewidth=2, zorder=4)
# shade nulled regions
for start, stop in zip(*_mask_to_onsets_offsets(mag <= -39.9)):
axes[ax_delay_idx].axvspan(f[start], f[stop - 1],
facecolor='k', alpha=0.05,
zorder=5)
axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)',
xlabel='Frequency (Hz)',
xscale=fscale)
if xticks is not None:
axes[ax_delay_idx].set(xticks=xticks)
axes[ax_delay_idx].set(xticklabels=xticklabels)
axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)',
ylabel='Delay (s)')
adjust_axes(axes)
tight_layout()
plt_show(show)
return fig
def plot_ideal_filter(freq, gain, axes=None, title='', flim=None, fscale='log',
alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--',
show=True):
"""Plot an ideal filter response.
Parameters
----------
freq : array-like
The ideal response frequencies to plot (must be in ascending order).
gain : array-like or None
The ideal response gains to plot.
axes : instance of Axes | None
The subplot handle. With None (default), axes are created.
title : str
The title to use, (default: '').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None (default), freq used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
If not None (default), the y-axis limits (dB) to use.
color : color object
The color to use (default: 'r').
alpha : float
The alpha to use (default: 0.5).
linestyle : str
The line style to use (default: '--').
show : bool
Show figure if True (default).
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
plot_filter
Notes
-----
.. versionadded:: 0.14
Examples
--------
Plot a simple ideal band-pass filter::
>>> from mne.viz import plot_ideal_filter
>>> freq = [0, 1, 40, 50]
>>> gain = [0, 1, 1, 0]
>>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS
<...Figure...>
"""
import matplotlib.pyplot as plt
my_freq, my_gain = list(), list()
if freq[0] != 0:
raise ValueError('freq should start with DC (zero) and end with '
'Nyquist, but got %s for DC' % (freq[0],))
freq = np.array(freq)
# deal with semilogx problems @ x=0
_check_option('fscale', fscale, ['log', 'linear'])
if fscale == 'log':
freq[0] = 0.1 * freq[1] if flim is None else min(flim[0], freq[1])
flim = _get_flim(flim, fscale, freq)
transitions = list()
for ii in range(len(freq)):
if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:
transitions += [[freq[ii], freq[ii + 1]]]
my_freq += np.linspace(freq[ii], freq[ii + 1], 20,
endpoint=False).tolist()
my_gain += np.linspace(gain[ii], gain[ii + 1], 20,
endpoint=False).tolist()
else:
my_freq.append(freq[ii])
my_gain.append(gain[ii])
my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (alim[0] / 10.)))
if axes is None:
axes = plt.subplots(1)[1]
for transition in transitions:
axes.axvspan(*transition, color=color, alpha=0.1)
axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5,
linewidth=4, zorder=3)
xticks, xticklabels = _filter_ticks(flim, fscale)
axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)',
xscale=fscale)
if xticks is not None:
axes.set(xticks=xticks)
axes.set(xticklabels=xticklabels)
axes.set(xlim=flim)
if title:
axes.set(title=title)
adjust_axes(axes)
tight_layout()
plt_show(show)
return axes.figure
def _handle_event_colors(color_dict, unique_events, event_id):
"""Create event-integer-to-color mapping, assigning defaults as needed."""
default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))
# warn if not enough colors
if color_dict is None:
if len(unique_events) > len(_get_color_list()):
warn('More events than default colors available. You should pass '
'a list of unique colors.')
else:
custom_colors = dict()
for key, color in color_dict.items():
if key in unique_events: # key was a valid event integer
custom_colors[key] = color
elif key in event_id: # key was an event label
custom_colors[event_id[key]] = color
else: # key not a valid event, warn and ignore
warn('Event ID %s is in the color dict but is not '
'present in events or event_id.' % str(key))
# warn if color_dict is missing any entries
unassigned = sorted(set(unique_events) - set(custom_colors))
if len(unassigned):
unassigned_str = ', '.join(str(e) for e in unassigned)
warn('Color was not assigned for event%s %s. Default colors will '
'be used.' % (_pl(unassigned), unassigned_str))
default_colors.update(custom_colors)
return default_colors
def plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None,
n_cols=None, show=True):
"""Plot CSD matrices.
A sub-plot is created for each frequency. If an info object is passed to
the function, different channel types are plotted in different figures.
Parameters
----------
csd : instance of CrossSpectralDensity
The CSD matrix to plot.
info : instance of Info | None
To split the figure by channel-type, provide the measurement info.
By default, the CSD matrix is plotted as a whole.
mode : 'csd' | 'coh'
Whether to plot the cross-spectral density ('csd', the default), or
the coherence ('coh') between the channels.
colorbar : bool
Whether to show a colorbar. Defaults to ``True``.
cmap : str | None
The matplotlib colormap to use. Defaults to None, which means the
colormap will default to matplotlib's default.
n_cols : int | None
CSD matrices are plotted in a grid. This parameter controls how
many matrix to plot side by side before starting a new row. By
default, a number will be chosen to make the grid as square as
possible.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : list of Figure
The figures created by this function.
"""
import matplotlib.pyplot as plt
if mode not in ['csd', 'coh']:
raise ValueError('"mode" should be either "csd" or "coh".')
if info is not None:
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[])
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=[])
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=[])
idx_eeg = [csd.ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in csd.ch_names]
idx_mag = [csd.ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in csd.ch_names]
idx_grad = [csd.ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in csd.ch_names]
indices = [idx_eeg, idx_mag, idx_grad]
titles = ['EEG', 'Magnetometers', 'Gradiometers']
if mode == 'csd':
# The units in which to plot the CSD
units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')
scalings = dict(eeg=1e12, grad=1e26, mag=1e30)
else:
indices = [np.arange(len(csd.ch_names))]
if mode == 'csd':
titles = ['Cross-spectral density']
# Units and scaling unknown
units = dict()
scalings = dict()
elif mode == 'coh':
titles = ['Coherence']
n_freqs = len(csd.frequencies)
if n_cols is None:
n_cols = int(np.ceil(np.sqrt(n_freqs)))
n_rows = int(np.ceil(n_freqs / float(n_cols)))
figs = []
for ind, title, ch_type in zip(indices, titles, ['eeg', 'mag', 'grad']):
if len(ind) == 0:
continue
fig, axes = plt.subplots(n_rows, n_cols, squeeze=False,
figsize=(2 * n_cols + 1, 2.2 * n_rows))
csd_mats = []
for i in range(len(csd.frequencies)):
cm = csd.get_data(index=i)[ind][:, ind]
if mode == 'csd':
cm = np.abs(cm) * scalings.get(ch_type, 1)
elif mode == 'coh':
# Compute coherence from the CSD matrix
psd = np.diag(cm).real
cm = np.abs(cm) ** 2 / psd[np.newaxis, :] / psd[:, np.newaxis]
csd_mats.append(cm)
vmax = np.max(csd_mats)
for i, (freq, mat) in enumerate(zip(csd.frequencies, csd_mats)):
ax = axes[i // n_cols][i % n_cols]
im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0,
vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if csd._is_sum:
ax.set_title('%.1f-%.1f Hz.' % (np.min(freq),
np.max(freq)))
else:
ax.set_title('%.1f Hz.' % freq)
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
if colorbar:
cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])
if mode == 'csd':
label = u'CSD'
if ch_type in units:
label += u' (%s)' % units[ch_type]
cb.set_label(label)
elif mode == 'coh':
cb.set_label('Coherence')
figs.append(fig)
plt_show(show)
return figs
| # -*- coding: utf-8 -*-
"""Functions to make simple plots with M/EEG data."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import base64
import copy
from glob import glob
from io import BytesIO
from itertools import cycle
import os.path as op
import warnings
from distutils.version import LooseVersion
from collections import defaultdict
import numpy as np
from scipy import linalg
from ..defaults import DEFAULTS
from ..fixes import _get_img_fdata
from ..rank import compute_rank
from ..source_space import _mri_orientation
from ..surface import read_surface
from ..io.constants import FIFF
from ..io.proj import make_projector
from ..io.pick import (_DATA_CH_TYPES_SPLIT, pick_types, pick_info,
pick_channels)
from ..source_space import (read_source_spaces, SourceSpaces, _read_mri_info,
_check_mri, _ensure_src)
from ..transforms import invert_transform, apply_trans, _frame_to_str
from ..utils import (logger, verbose, warn, _check_option, get_subjects_dir,
_mask_to_onsets_offsets, _pl, _on_missing)
from ..io.pick import _picks_by_type
from ..filter import estimate_ringing_samples
from .utils import tight_layout, _get_color_list, _prepare_trellis, plt_show
def _index_info_cov(info, cov, exclude):
if exclude == 'bads':
exclude = info['bads']
info = pick_info(info, pick_channels(info['ch_names'], cov['names'],
exclude))
del exclude
picks_list = \
_picks_by_type(info, meg_combined=False, ref_meg=False,
exclude=())
picks_by_type = dict(picks_list)
ch_names = [n for n in cov.ch_names if n in info['ch_names']]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
idx_by_type = defaultdict(list)
for ch_type, sel in picks_by_type.items():
idx_by_type[ch_type] = [ch_names.index(info_ch_names[c])
for c in sel if info_ch_names[c] in ch_names]
idx_names = [(idx_by_type[key],
'%s covariance' % DEFAULTS['titles'][key],
DEFAULTS['units'][key],
DEFAULTS['scalings'][key],
key)
for key in _DATA_CH_TYPES_SPLIT
if len(idx_by_type[key]) > 0]
C = cov.data[ch_idx][:, ch_idx]
return info, C, ch_names, idx_names
@verbose
def plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data.
Parameters
----------
cov : instance of Covariance
The covariance matrix.
info : dict
Measurement info.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
%(verbose)s
Returns
-------
fig_cov : instance of matplotlib.figure.Figure
The covariance plot.
fig_svd : instance of matplotlib.figure.Figure | None
The SVD spectra plot of the covariance.
See Also
--------
mne.compute_rank
Notes
-----
For each channel type, the rank is estimated using
:func:`mne.compute_rank`.
.. versionchanged:: 0.19
Approximate ranks for each channel type are shown with red dashed lines.
"""
from ..cov import Covariance
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude)
del cov, exclude
projs = []
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
fig_cov, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, _, _, _) in enumerate(idx_names):
vlim = np.max(np.abs(C[idx][:, idx]))
im = axes[0, k].imshow(C[idx][:, idx], interpolation="nearest",
norm=Normalize(vmin=-vlim, vmax=vlim),
cmap='RdBu_r')
axes[0, k].set(title=name)
if colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(axes[0, k])
cax = divider.append_axes("right", size="5.5%", pad=0.05)
plt.colorbar(im, cax=cax, format='%.0e')
fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, unit, scaling, key) in enumerate(idx_names):
this_C = C[idx][:, idx]
s = linalg.svd(this_C, compute_uv=False)
this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx],
[], [], 0)
this_info = pick_info(info, idx)
this_info['projs'] = []
this_rank = compute_rank(this_C, info=this_info)
# Protect against true zero singular values
s[s <= 0] = 1e-10 * s[s > 0].min()
s = np.sqrt(s) * scaling
axes[0, k].plot(s, color='k', zorder=3)
this_rank = this_rank[key]
axes[0, k].axvline(this_rank - 1, ls='--', color='r',
alpha=0.5, zorder=4, clip_on=False)
axes[0, k].text(this_rank - 1, axes[0, k].get_ylim()[1],
'rank ≈ %d' % (this_rank,), ha='right', va='top',
color='r', alpha=0.5, zorder=4)
axes[0, k].set(ylabel=u'Noise σ (%s)' % unit, yscale='log',
xlabel='Eigenvalue index', title=name,
xlim=[0, len(s) - 1])
tight_layout(fig=fig_svd)
plt_show(show)
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
# Covering frequency gaps with horizontal bars
for lower_bound, upper_bound in gap_bounds:
plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
lower_bound, time_bounds[0], color='#666666')
plt_show(show)
return fig
def _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal',
slices=None, show=True, show_indices=False,
show_orientation=False, img_output=False):
"""Plot BEM contours on anatomical slices."""
import matplotlib.pyplot as plt
from matplotlib import patheffects
# For ease of plotting, we will do everything in voxel coordinates.
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
# Load the T1 data
_, vox_mri_t, _, _, _, nim = _read_mri_info(
mri_fname, units='mm', return_img=True)
mri_vox_t = invert_transform(vox_mri_t)['trans']
del vox_mri_t
# plot axes (x, y, z) as data axes
(x, y, z), (flip_x, flip_y, flip_z), order = _mri_orientation(
nim, orientation)
transpose = x < y
data = _get_img_fdata(nim)
shift_x = data.shape[x] if flip_x < 0 else 0
shift_y = data.shape[y] if flip_y < 0 else 0
n_slices = data.shape[z]
if slices is None:
slices = np.round(np.linspace(0, n_slices - 1, 14)).astype(int)[1:-1]
slices = np.atleast_1d(slices).copy()
slices[slices < 0] += n_slices # allow negative indexing
if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \
slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \
slices.dtype.kind not in 'iu':
raise ValueError('slices must be a sorted 1D array of int with unique '
'elements, at least one element, and no elements '
'greater than %d, got %s' % (n_slices - 1, slices))
if flip_z < 0:
# Proceed in the opposite order to maintain left-to-right / orientation
slices = slices[::-1]
# create of list of surfaces
surfs = list()
for file_name, color in surfaces:
surf = dict()
surf['rr'], surf['tris'] = read_surface(file_name)
# move surface to voxel coordinate system
surf['rr'] = apply_trans(mri_vox_t, surf['rr'])
surfs.append((surf, color))
sources = list()
if src is not None:
_ensure_src(src, extra=' or None')
# Eventually we can relax this by allowing ``trans`` if need be
if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI:
raise ValueError(
'Source space must be in MRI coordinates, got '
f'{_frame_to_str[src[0]["coord_frame"]]}')
for src_ in src:
points = src_['rr'][src_['inuse'].astype(bool)]
sources.append(apply_trans(mri_vox_t, points * 1e3))
sources = np.concatenate(sources, axis=0)
if img_output:
n_col = n_axes = 1
fig, ax = plt.subplots(1, 1, figsize=(7.0, 7.0))
axs = [ax] * len(slices)
w = fig.get_size_inches()[0]
fig.set_size_inches([w, w / data.shape[x] * data.shape[y]])
plt.close(fig)
else:
n_col = 4
fig, axs, _, _ = _prepare_trellis(len(slices), n_col)
n_axes = len(axs)
fig.set_facecolor('k')
bounds = np.concatenate(
[[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]]) # float
slicer = [slice(None)] * 3
ori_labels = dict(R='LR', A='PA', S='IS')
xlabels, ylabels = ori_labels[order[0]], ori_labels[order[1]]
path_effects = [patheffects.withStroke(linewidth=4, foreground="k",
alpha=0.75)]
out = list() if img_output else fig
for ai, (ax, sl, lower, upper) in enumerate(zip(
axs, slices, bounds[:-1], bounds[1:])):
# adjust the orientations for good view
slicer[z] = sl
dat = data[tuple(slicer)]
dat = dat.T if transpose else dat
dat = dat[::flip_y, ::flip_x]
# First plot the anatomical data
if img_output:
ax.clear()
ax.imshow(dat, cmap=plt.cm.gray, origin='lower')
ax.set_autoscale_on(False)
ax.axis('off')
ax.set_aspect('equal') # XXX eventually could deal with zooms
# and then plot the contours on top
for surf, color in surfs:
with warnings.catch_warnings(record=True): # ignore contour warn
warnings.simplefilter('ignore')
ax.tricontour(flip_x * surf['rr'][:, x] + shift_x,
flip_y * surf['rr'][:, y] + shift_y,
surf['tris'], surf['rr'][:, z],
levels=[sl], colors=color, linewidths=1.0,
zorder=1)
if len(sources):
in_slice = (sources[:, z] >= lower) & (sources[:, z] < upper)
ax.scatter(flip_x * sources[in_slice, x] + shift_x,
flip_y * sources[in_slice, y] + shift_y,
marker='.', color='#FF00FF', s=1, zorder=2)
if show_indices:
ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl),
color='w', fontsize='x-small', va='bottom', ha='left')
# label the axes
kwargs = dict(
color='#66CCEE', fontsize='medium', path_effects=path_effects,
family='monospace', clip_on=False, zorder=5, weight='bold')
if show_orientation:
if ai % n_col == 0: # left
ax.text(0, dat.shape[0] / 2., xlabels[0],
va='center', ha='left', **kwargs)
if ai % n_col == n_col - 1 or ai == n_axes - 1: # right
ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1],
va='center', ha='right', **kwargs)
if ai >= n_axes - n_col: # bottom
ax.text(dat.shape[1] / 2., 0, ylabels[0],
ha='center', va='bottom', **kwargs)
if ai < n_col or n_col == 1: # top
ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1],
ha='center', va='top', **kwargs)
if img_output:
output = BytesIO()
fig.savefig(output, bbox_inches='tight',
pad_inches=0, format='png')
out.append(base64.b64encode(output.getvalue()).decode('ascii'))
fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt_show(show, fig=fig)
return out
def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
slices=None, brain_surfaces=None, src=None, show=True,
show_indices=True, mri='T1.mgz', show_orientation=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
brain_surfaces : None | str | list of str
One or more brain surface to plot (optional). Entries should correspond
to files in the subject's ``surf`` directory (e.g. ``"white"``).
src : None | SourceSpaces | str
SourceSpaces instance or path to a source space to plot individual
sources as scatter-plot. Sources will be shown on exactly one slice
(whichever slice is closest to each source in the given orientation
plane). Path can be absolute or relative to the subject's ``bem``
folder.
.. versionchanged:: 0.20
All sources are shown on the nearest slice rather than some
being omitted.
show : bool
Show figure if True.
show_indices : bool
Show slice indices if True.
.. versionadded:: 0.20
mri : str
The name of the MRI to use. Can be a standard FreeSurfer MRI such as
``'T1.mgz'``, or a full path to a custom MRI file.
.. versionadded:: 0.21
show_orientation : str
Show the orientation (L/R, P/A, I/S) of the data slices.
.. versionadded:: 0.21
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
mne.viz.plot_alignment
Notes
-----
Images are plotted in MRI voxel coordinates.
If ``src`` is not None, for a given slice index, all source points are
shown that are halfway between the previous slice and the given slice,
and halfway between the given slice and the next slice.
For large slice decimations, this can
make some source points appear outside the BEM contour, which is shown
for the given slice index. For example, in the case where the single
midpoint slice is used ``slices=[128]``, all source points will be shown
on top of the midpoint MRI slice with the BEM boundary drawn for that
slice.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri_fname = _check_mri(mri, subject, subjects_dir)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
raise IOError('Subject bem directory "%s" does not exist' % bem_path)
surfaces = _get_bem_plotting_surfaces(bem_path)
if brain_surfaces is not None:
if isinstance(brain_surfaces, str):
brain_surfaces = (brain_surfaces,)
for surf_name in brain_surfaces:
for hemi in ('lh', 'rh'):
surf_fname = op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf_name)
if op.exists(surf_fname):
surfaces.append((surf_fname, '#00DD00'))
else:
raise IOError("Surface %s does not exist." % surf_fname)
if isinstance(src, str):
if not op.exists(src):
src_ = op.join(subjects_dir, subject, 'bem', src)
if op.exists(src_):
src = src_
else:
raise IOError("%s does not exist" % src)
src = read_source_spaces(src)
elif src is not None and not isinstance(src, SourceSpaces):
raise TypeError("src needs to be None, str or SourceSpaces instance, "
"not %s" % repr(src))
if len(surfaces) == 0:
raise IOError('No surface files found. Surface files must end with '
'inner_skull.surf, outer_skull.surf or outer_skin.surf')
# Plot the contours
return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices,
show, show_indices, show_orientation)
def _get_bem_plotting_surfaces(bem_path):
surfaces = []
for surf_name, color in (('*inner_skull', '#FF0000'),
('*outer_skull', '#FFFF00'),
('*outer_skin', '#FFAA80')):
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
logger.info("Using surface: %s" % surf_fname)
surfaces.append((surf_fname, color))
return surfaces
@verbose
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
axes=None, equal_spacing=True, show=True, on_missing='raise',
verbose=None):
"""Plot events to get a visual display of the paradigm.
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Recordings made on Neuromag systems
number samples relative to the system start (not relative to the
beginning of the recording). In such cases the ``raw.first_samp``
attribute can be passed here. Default is 0.
color : dict | None
Dictionary of event_id integers as keys and colors as values. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors). Color can be any
valid :doc:`matplotlib color <tutorials/colors/colors>`.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and their associated
event_id values. Labels are used to plot a legend. If None, no legend
is drawn.
axes : instance of Axes
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
%(on_missing_events)s
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
if sfreq is None:
sfreq = 1.0
xlabel = 'Samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
if len(events) == 0:
raise ValueError('No events in events array, cannot plot.')
unique_events = np.unique(events[:, 2])
if event_id is not None:
# get labels and unique event ids from event_id dict,
# sorted by value
event_id_rev = {v: k for k, v in event_id.items()}
conditions, unique_events_id = zip(*sorted(event_id.items(),
key=lambda x: x[1]))
keep = np.ones(len(unique_events_id), bool)
for ii, this_event in enumerate(unique_events_id):
if this_event not in unique_events:
msg = f'{this_event} from event_id is not present in events.'
_on_missing(on_missing, msg)
keep[ii] = False
conditions = [cond for cond, k in zip(conditions, keep) if k]
unique_events_id = [id_ for id_, k in zip(unique_events_id, keep) if k]
if len(unique_events_id) == 0:
raise RuntimeError('No usable event IDs found')
for this_event in unique_events:
if this_event not in unique_events_id:
warn('event %s missing from event_id will be ignored'
% this_event)
else:
unique_events_id = unique_events
color = _handle_event_colors(color, unique_events, event_id)
import matplotlib.pyplot as plt
fig = None
if axes is None:
fig = plt.figure()
ax = axes if axes else plt.gca()
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
max_x = (events[np.in1d(events[:, 2], unique_events_id), 0].max() -
first_samp) / sfreq
handles, labels = list(), list()
for idx, ev in enumerate(unique_events_id):
ev_mask = events[:, 2] == ev
count = ev_mask.sum()
if count == 0:
continue
y = np.full(count, idx + 1 if equal_spacing else events[ev_mask, 2][0])
if event_id is not None:
event_label = '%s (%s)' % (event_id_rev[ev], count)
else:
event_label = 'N=%d' % (count,)
labels.append(event_label)
kwargs = {}
if ev in color:
kwargs['color'] = color[ev]
handles.append(
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
y, '.', clip_on=False, **kwargs)[0])
if equal_spacing:
ax.set_ylim(0, unique_events_id.size + 1)
ax.set_yticks(1 + np.arange(unique_events_id.size))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([min_event - 1, max_event + 1])
ax.set(xlabel=xlabel, ylabel='Events id', xlim=[0, max_x])
ax.grid(True)
fig = fig if fig is not None else plt.gcf()
# reverse order so that the highest numbers are at the top
# (match plot order)
handles, labels = handles[::-1], labels[::-1]
box = ax.get_position()
factor = 0.8 if event_id is not None else 0.9
ax.set_position([box.x0, box.y0, box.width * factor, box.height])
ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5),
fontsize='small')
fig.canvas.draw()
plt_show(show)
return fig
def _get_presser(fig):
"""Get our press callback."""
import matplotlib
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for key, val in callbacks.items():
if LooseVersion(matplotlib.__version__) >= '3':
func = val()
else:
func = val.func
if func.__class__.__name__ == 'partial':
break
else:
func = None
assert func is not None
return func
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
"""Plot the amplitude traces of a set of dipoles.
Parameters
----------
dipoles : list of instance of Dipole
The dipoles whose amplitudes should be shown.
colors : list of color | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if colors is None:
colors = cycle(_get_color_list())
fig, ax = plt.subplots(1, 1)
xlim = [np.inf, -np.inf]
for dip, color in zip(dipoles, colors):
ax.plot(dip.times, dip.amplitude * 1e9, color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[-1])
ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')
if show:
fig.show(warn=False)
return fig
def adjust_axes(axes, remove_spines=('top', 'right'), grid=True):
"""Adjust some properties of axes.
Parameters
----------
axes : list
List of axes to process.
remove_spines : list of str
Which axis spines to remove.
grid : bool
Turn grid on (True) or off (False).
"""
axes = [axes] if not isinstance(axes, (list, tuple, np.ndarray)) else axes
for ax in axes:
if grid:
ax.grid(zorder=0)
for key in remove_spines:
ax.spines[key].set_visible(False)
def _filter_ticks(lims, fscale):
"""Create approximately spaced ticks between lims."""
if fscale == 'linear':
return None, None # let matplotlib handle it
lims = np.array(lims)
ticks = list()
if lims[1] > 20 * lims[0]:
base = np.array([1, 2, 4])
else:
base = np.arange(1, 11)
for exp in range(int(np.floor(np.log10(lims[0]))),
int(np.floor(np.log10(lims[1]))) + 1):
ticks += (base * (10 ** exp)).tolist()
ticks = np.array(ticks)
ticks = ticks[(ticks >= lims[0]) & (ticks <= lims[1])]
ticklabels = [('%g' if t < 1 else '%d') % t for t in ticks]
return ticks, ticklabels
def _get_flim(flim, fscale, freq, sfreq=None):
"""Get reasonable frequency limits."""
if flim is None:
if freq is None:
flim = [0.1 if fscale == 'log' else 0., sfreq / 2.]
else:
if fscale == 'linear':
flim = [freq[0]]
else:
flim = [freq[0] if freq[0] > 0 else 0.1 * freq[1]]
flim += [freq[-1]]
if fscale == 'log':
if flim[0] <= 0:
raise ValueError('flim[0] must be positive, got %s' % flim[0])
elif flim[0] < 0:
raise ValueError('flim[0] must be non-negative, got %s' % flim[0])
return flim
def _check_fscale(fscale):
"""Check for valid fscale."""
if not isinstance(fscale, str) or fscale not in ('log', 'linear'):
raise ValueError('fscale must be "log" or "linear", got %s'
% (fscale,))
_DEFAULT_ALIM = (-80, 10)
def plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4',
flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True,
compensate=False, plot=('time', 'magnitude', 'delay'),
axes=None):
"""Plot properties of a filter.
Parameters
----------
h : dict or ndarray
An IIR dict or 1D ndarray of coefficients (for FIR filter).
sfreq : float
Sample rate of the data (Hz).
freq : array-like or None
The ideal response frequencies to plot (must be in ascending order).
If None (default), do not plot the ideal response.
gain : array-like or None
The ideal response gains to plot.
If None (default), do not plot the ideal response.
title : str | None
The title to use. If None (default), determine the title based
on the type of the system.
color : color object
The color to use (default '#1f77b4').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None, freq will be used. If None (default) and freq is None,
``(0.1, sfreq / 2.)`` will be used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
The y-axis amplitude limits (dB) to use (default: (-60, 10)).
show : bool
Show figure if True (default).
compensate : bool
If True, compensate for the filter delay (phase will not be shown).
- For linear-phase FIR filters, this visualizes the filter coefficients
assuming that the output will be shifted by ``N // 2``.
- For IIR filters, this changes the filter coefficient display
by filtering backward and forward, and the frequency response
by squaring it.
.. versionadded:: 0.18
plot : list | tuple | str
A list of the requested plots from ``time``, ``magnitude`` and
``delay``. Default is to plot all three filter properties
('time', 'magnitude', 'delay').
.. versionadded:: 0.21.0
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of requested plot types. If instance of
Axes, there must be only one filter property plotted.
Defaults to ``None``.
.. versionadded:: 0.21.0
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the plots.
See Also
--------
mne.filter.create_filter
plot_ideal_filter
Notes
-----
.. versionadded:: 0.14
"""
from scipy.signal import (
freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt)
import matplotlib.pyplot as plt
sfreq = float(sfreq)
_check_option('fscale', fscale, ['log', 'linear'])
if isinstance(plot, str):
plot = [plot]
for xi, x in enumerate(plot):
_check_option('plot[%d]' % xi, x, ('magnitude', 'delay', 'time'))
flim = _get_flim(flim, fscale, freq, sfreq)
if fscale == 'log':
omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)
else:
omega = np.linspace(flim[0], flim[1], 1000)
xticks, xticklabels = _filter_ticks(flim, fscale)
omega /= sfreq / (2 * np.pi)
if isinstance(h, dict): # IIR h.ndim == 2: # second-order sections
if 'sos' in h:
H = np.ones(len(omega), np.complex128)
gd = np.zeros(len(omega))
for section in h['sos']:
this_H = freqz(section[:3], section[3:], omega)[1]
H *= this_H
if compensate:
H *= this_H.conj() # time reversal is freq conj
else:
# Assume the forward-backward delay zeros out, which it
# mostly should
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd += group_delay((section[:3], section[3:]), omega)[1]
n = estimate_ringing_samples(h['sos'])
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = sosfiltfilt
gd += (len(delta) - 1) // 2
else:
func = sosfilt
h = func(h['sos'], delta)
else:
H = freqz(h['b'], h['a'], omega)[1]
if compensate:
H *= H.conj()
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h['b'], h['a']), omega)[1]
if compensate:
gd += group_delay(h['b'].conj(), h['a'].conj(), omega)[1]
n = estimate_ringing_samples((h['b'], h['a']))
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = filtfilt
else:
func = lfilter
h = func(h['b'], h['a'], delta)
if title is None:
title = 'SOS (IIR) filter'
if compensate:
title += ' (forward-backward)'
else:
H = freqz(h, worN=omega)[1]
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h, [1.]), omega)[1]
title = 'FIR filter' if title is None else title
if compensate:
title += ' (delay-compensated)'
fig = None
if axes is None:
fig, axes = plt.subplots(len(plot), 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if len(axes) != len(plot):
raise ValueError('Length of axes (%d) must be the same as number of '
'requested filter properties (%d)'
% (len(axes), len(plot)))
t = np.arange(len(h))
dlim = np.abs(t).max() / 2.
dlim = [-dlim, dlim]
if compensate:
n_shift = (len(h) - 1) // 2
t -= n_shift
assert t[0] == -t[-1]
gd -= n_shift
t = t / sfreq
gd = gd / sfreq
f = omega * sfreq / (2 * np.pi)
sl = slice(0 if fscale == 'linear' else 1, None, None)
mag = 10 * np.log10(np.maximum((H * H.conj()).real, 1e-20))
if 'time' in plot:
ax_time_idx = np.where([p == 'time' for p in plot])[0][0]
axes[ax_time_idx].plot(t, h, color=color)
axes[ax_time_idx].set(xlim=t[[0, -1]], xlabel='Time (s)',
ylabel='Amplitude', title=title)
# Magnitude
if 'magnitude' in plot:
ax_mag_idx = np.where([p == 'magnitude' for p in plot])[0][0]
axes[ax_mag_idx].plot(f[sl], mag[sl], color=color,
linewidth=2, zorder=4)
if freq is not None and gain is not None:
plot_ideal_filter(freq, gain, axes[ax_mag_idx],
fscale=fscale, show=False)
axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel='', xscale=fscale)
if xticks is not None:
axes[ax_mag_idx].set(xticks=xticks)
axes[ax_mag_idx].set(xticklabels=xticklabels)
axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)',
ylabel='Amplitude (dB)')
# Delay
if 'delay' in plot:
ax_delay_idx = np.where([p == 'delay' for p in plot])[0][0]
axes[ax_delay_idx].plot(f[sl], gd[sl], color=color,
linewidth=2, zorder=4)
# shade nulled regions
for start, stop in zip(*_mask_to_onsets_offsets(mag <= -39.9)):
axes[ax_delay_idx].axvspan(f[start], f[stop - 1],
facecolor='k', alpha=0.05,
zorder=5)
axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)',
xlabel='Frequency (Hz)',
xscale=fscale)
if xticks is not None:
axes[ax_delay_idx].set(xticks=xticks)
axes[ax_delay_idx].set(xticklabels=xticklabels)
axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)',
ylabel='Delay (s)')
adjust_axes(axes)
tight_layout()
plt_show(show)
return fig
def plot_ideal_filter(freq, gain, axes=None, title='', flim=None, fscale='log',
alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--',
show=True):
"""Plot an ideal filter response.
Parameters
----------
freq : array-like
The ideal response frequencies to plot (must be in ascending order).
gain : array-like or None
The ideal response gains to plot.
axes : instance of Axes | None
The subplot handle. With None (default), axes are created.
title : str
The title to use, (default: '').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None (default), freq used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
If not None (default), the y-axis limits (dB) to use.
color : color object
The color to use (default: 'r').
alpha : float
The alpha to use (default: 0.5).
linestyle : str
The line style to use (default: '--').
show : bool
Show figure if True (default).
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
plot_filter
Notes
-----
.. versionadded:: 0.14
Examples
--------
Plot a simple ideal band-pass filter::
>>> from mne.viz import plot_ideal_filter
>>> freq = [0, 1, 40, 50]
>>> gain = [0, 1, 1, 0]
>>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS
<...Figure...>
"""
import matplotlib.pyplot as plt
my_freq, my_gain = list(), list()
if freq[0] != 0:
raise ValueError('freq should start with DC (zero) and end with '
'Nyquist, but got %s for DC' % (freq[0],))
freq = np.array(freq)
# deal with semilogx problems @ x=0
_check_option('fscale', fscale, ['log', 'linear'])
if fscale == 'log':
freq[0] = 0.1 * freq[1] if flim is None else min(flim[0], freq[1])
flim = _get_flim(flim, fscale, freq)
transitions = list()
for ii in range(len(freq)):
if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:
transitions += [[freq[ii], freq[ii + 1]]]
my_freq += np.linspace(freq[ii], freq[ii + 1], 20,
endpoint=False).tolist()
my_gain += np.linspace(gain[ii], gain[ii + 1], 20,
endpoint=False).tolist()
else:
my_freq.append(freq[ii])
my_gain.append(gain[ii])
my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (alim[0] / 10.)))
if axes is None:
axes = plt.subplots(1)[1]
for transition in transitions:
axes.axvspan(*transition, color=color, alpha=0.1)
axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5,
linewidth=4, zorder=3)
xticks, xticklabels = _filter_ticks(flim, fscale)
axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)',
xscale=fscale)
if xticks is not None:
axes.set(xticks=xticks)
axes.set(xticklabels=xticklabels)
axes.set(xlim=flim)
if title:
axes.set(title=title)
adjust_axes(axes)
tight_layout()
plt_show(show)
return axes.figure
def _handle_event_colors(color_dict, unique_events, event_id):
"""Create event-integer-to-color mapping, assigning defaults as needed."""
default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))
# warn if not enough colors
if color_dict is None:
if len(unique_events) > len(_get_color_list()):
warn('More events than default colors available. You should pass '
'a list of unique colors.')
else:
custom_colors = dict()
for key, color in color_dict.items():
if key in unique_events: # key was a valid event integer
custom_colors[key] = color
elif key in event_id: # key was an event label
custom_colors[event_id[key]] = color
else: # key not a valid event, warn and ignore
warn('Event ID %s is in the color dict but is not '
'present in events or event_id.' % str(key))
# warn if color_dict is missing any entries
unassigned = sorted(set(unique_events) - set(custom_colors))
if len(unassigned):
unassigned_str = ', '.join(str(e) for e in unassigned)
warn('Color was not assigned for event%s %s. Default colors will '
'be used.' % (_pl(unassigned), unassigned_str))
default_colors.update(custom_colors)
return default_colors
def plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None,
n_cols=None, show=True):
"""Plot CSD matrices.
A sub-plot is created for each frequency. If an info object is passed to
the function, different channel types are plotted in different figures.
Parameters
----------
csd : instance of CrossSpectralDensity
The CSD matrix to plot.
info : instance of Info | None
To split the figure by channel-type, provide the measurement info.
By default, the CSD matrix is plotted as a whole.
mode : 'csd' | 'coh'
Whether to plot the cross-spectral density ('csd', the default), or
the coherence ('coh') between the channels.
colorbar : bool
Whether to show a colorbar. Defaults to ``True``.
cmap : str | None
The matplotlib colormap to use. Defaults to None, which means the
colormap will default to matplotlib's default.
n_cols : int | None
CSD matrices are plotted in a grid. This parameter controls how
many matrix to plot side by side before starting a new row. By
default, a number will be chosen to make the grid as square as
possible.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : list of Figure
The figures created by this function.
"""
import matplotlib.pyplot as plt
if mode not in ['csd', 'coh']:
raise ValueError('"mode" should be either "csd" or "coh".')
if info is not None:
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[])
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=[])
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=[])
idx_eeg = [csd.ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in csd.ch_names]
idx_mag = [csd.ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in csd.ch_names]
idx_grad = [csd.ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in csd.ch_names]
indices = [idx_eeg, idx_mag, idx_grad]
titles = ['EEG', 'Magnetometers', 'Gradiometers']
if mode == 'csd':
# The units in which to plot the CSD
units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')
scalings = dict(eeg=1e12, grad=1e26, mag=1e30)
else:
indices = [np.arange(len(csd.ch_names))]
if mode == 'csd':
titles = ['Cross-spectral density']
# Units and scaling unknown
units = dict()
scalings = dict()
elif mode == 'coh':
titles = ['Coherence']
n_freqs = len(csd.frequencies)
if n_cols is None:
n_cols = int(np.ceil(np.sqrt(n_freqs)))
n_rows = int(np.ceil(n_freqs / float(n_cols)))
figs = []
for ind, title, ch_type in zip(indices, titles, ['eeg', 'mag', 'grad']):
if len(ind) == 0:
continue
fig, axes = plt.subplots(n_rows, n_cols, squeeze=False,
figsize=(2 * n_cols + 1, 2.2 * n_rows))
csd_mats = []
for i in range(len(csd.frequencies)):
cm = csd.get_data(index=i)[ind][:, ind]
if mode == 'csd':
cm = np.abs(cm) * scalings.get(ch_type, 1)
elif mode == 'coh':
# Compute coherence from the CSD matrix
psd = np.diag(cm).real
cm = np.abs(cm) ** 2 / psd[np.newaxis, :] / psd[:, np.newaxis]
csd_mats.append(cm)
vmax = np.max(csd_mats)
for i, (freq, mat) in enumerate(zip(csd.frequencies, csd_mats)):
ax = axes[i // n_cols][i % n_cols]
im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0,
vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if csd._is_sum:
ax.set_title('%.1f-%.1f Hz.' % (np.min(freq),
np.max(freq)))
else:
ax.set_title('%.1f Hz.' % freq)
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
if colorbar:
cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])
if mode == 'csd':
label = u'CSD'
if ch_type in units:
label += u' (%s)' % units[ch_type]
cb.set_label(label)
elif mode == 'coh':
cb.set_label('Coherence')
figs.append(fig)
plt_show(show)
return figs
|
import os
import copy
import pytest
import time
import shutil
import tempfile
import logging
from _pytest.logging import caplog as _caplog
from contextlib import suppress
from panoptes.utils.logging import logger
from panoptes.utils.database import PanDB
from panoptes.utils.config.client import get_config
from panoptes.utils.config.client import set_config
from panoptes.utils.config.server import config_server
# Doctest modules
import numpy as np
from matplotlib import pyplot as plt
_all_databases = ['file', 'memory']
logger.enable('panoptes')
logger.level("testing", no=15, icon="🤖", color="<YELLOW><black>")
log_file_path = os.path.join(
os.getenv('PANLOG', '/var/panoptes/logs'),
'panoptes-testing.log'
)
log_fmt = "<lvl>{level:.1s}</lvl> " \
"<light-blue>{time:MM-DD HH:mm:ss.ss!UTC}</>" \
"<blue>({time:HH:mm:ss.ss})</> " \
"| <c>{name} {function}:{line}</c> | " \
"<lvl>{message}</lvl>\n"
startup_message = ' STARTING NEW PYTEST RUN '
logger.add(log_file_path,
enqueue=True, # multiprocessing
format=log_fmt,
colorize=True,
backtrace=True,
diagnose=True,
catch=True,
# Start new log file for each testing run.
rotation=lambda msg, _: startup_message in msg,
level='TRACE')
logger.log('testing', '*' * 25 + startup_message + '*' * 25)
def pytest_addoption(parser):
db_names = ",".join(_all_databases) + ' (or all for all databases)'
group = parser.getgroup("PANOPTES pytest options")
group.addoption(
"--astrometry",
action="store_true",
default=False,
help="If tests that require solving should be run")
group.addoption(
"--theskyx",
action="store_true",
default=False,
help="If running tests alongside a running TheSkyX program.")
group.addoption(
"--test-databases",
nargs="+",
default=['file'],
help=("Test databases in the list. List items can include: " + db_names +
". Note that travis-ci will test all of them by default."))
@pytest.fixture(scope='session')
def db_name():
return 'panoptes_testing'
@pytest.fixture(scope='session')
def images_dir(tmpdir_factory):
directory = tmpdir_factory.mktemp('images')
return str(directory)
@pytest.fixture(scope='session')
def config_path():
return os.path.expandvars('${PANDIR}/panoptes-utils/tests/panoptes_utils_testing.yaml')
@pytest.fixture(scope='session', autouse=True)
def static_config_server(config_path, images_dir, db_name):
logger.log('testing', f'Starting static_config_server for testing session')
proc = config_server(
config_file=config_path,
ignore_local=True,
auto_save=False
)
logger.log('testing', f'static_config_server started with {proc.pid=}')
# Give server time to start
while get_config('name') is None: # pragma: no cover
logger.log('testing', f'Waiting for static_config_server {proc.pid=}, sleeping 1 second.')
time.sleep(1)
logger.log('testing', f'Startup config_server name=[{get_config('name')}]')
# Adjust various config items for testing
unit_id = 'PAN000'
logger.log('testing', f'Setting testing name and unit_id to {unit_id}')
set_config('pan_id', unit_id)
logger.log('testing', f'Setting testing database to {db_name}')
set_config('db.name', db_name)
fields_file = 'simulator.yaml'
logger.log('testing', f'Setting testing scheduler fields_file to {fields_file}')
set_config('scheduler.fields_file', fields_file)
logger.log('testing', f'Setting temporary image directory for testing')
set_config('directories.images', images_dir)
yield
logger.log('testing', f'Killing static_config_server started with PID={proc.pid}')
proc.terminate()
@pytest.fixture(scope='function', params=_all_databases)
def db_type(request):
db_list = request.config.option.test_databases
if request.param not in db_list and 'all' not in db_list: # pragma: no cover
pytest.skip(f"Skipping {request.param} DB, set --test-all-databases=True")
PanDB.permanently_erase_database(
request.param, 'panoptes_testing', really='Yes', dangerous='Totally')
return request.param
@pytest.fixture(scope='function')
def db(db_type):
return PanDB(db_type=db_type, db_name='panoptes_testing', connect=True)
@pytest.fixture(scope='function')
def save_environ():
old_env = copy.deepcopy(os.environ)
yield
os.environ = old_env
@pytest.fixture(scope='session')
def data_dir():
return os.path.expandvars('/var/panoptes/panoptes-utils/tests/data')
@pytest.fixture(scope='function')
def unsolved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'unsolved.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def solved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'solved.fits.fz')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def tiny_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'tiny.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def noheader_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'noheader.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def cr2_file(data_dir):
cr2_path = os.path.join(data_dir, 'canon.cr2')
if not os.path.exists(cr2_path):
pytest.skip("No CR2 file found, skipping test.")
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(cr2_path, tmpdirname)
yield copy_file
@pytest.fixture(autouse=True)
def add_doctest_dependencies(doctest_namespace):
doctest_namespace['np'] = np
doctest_namespace['plt'] = plt
@pytest.fixture
def caplog(_caplog):
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
logger.enable('panoptes')
handler_id = logger.add(PropogateHandler(), format="{message}")
yield _caplog
with suppress(ValueError):
logger.remove(handler_id)
| import os
import copy
import pytest
import time
import shutil
import tempfile
import logging
from _pytest.logging import caplog as _caplog
from contextlib import suppress
from panoptes.utils.logging import logger
from panoptes.utils.database import PanDB
from panoptes.utils.config.client import get_config
from panoptes.utils.config.client import set_config
from panoptes.utils.config.server import config_server
# Doctest modules
import numpy as np
from matplotlib import pyplot as plt
_all_databases = ['file', 'memory']
logger.enable('panoptes')
logger.level("testing", no=15, icon="🤖", color="<YELLOW><black>")
log_file_path = os.path.join(
os.getenv('PANLOG', '/var/panoptes/logs'),
'panoptes-testing.log'
)
log_fmt = "<lvl>{level:.1s}</lvl> " \
"<light-blue>{time:MM-DD HH:mm:ss.ss!UTC}</>" \
"<blue>({time:HH:mm:ss.ss})</> " \
"| <c>{name} {function}:{line}</c> | " \
"<lvl>{message}</lvl>\n"
startup_message = ' STARTING NEW PYTEST RUN '
logger.add(log_file_path,
enqueue=True, # multiprocessing
format=log_fmt,
colorize=True,
backtrace=True,
diagnose=True,
catch=True,
# Start new log file for each testing run.
rotation=lambda msg, _: startup_message in msg,
level='TRACE')
logger.log('testing', '*' * 25 + startup_message + '*' * 25)
def pytest_addoption(parser):
db_names = ",".join(_all_databases) + ' (or all for all databases)'
group = parser.getgroup("PANOPTES pytest options")
group.addoption(
"--astrometry",
action="store_true",
default=False,
help="If tests that require solving should be run")
group.addoption(
"--theskyx",
action="store_true",
default=False,
help="If running tests alongside a running TheSkyX program.")
group.addoption(
"--test-databases",
nargs="+",
default=['file'],
help=("Test databases in the list. List items can include: " + db_names +
". Note that travis-ci will test all of them by default."))
@pytest.fixture(scope='session')
def db_name():
return 'panoptes_testing'
@pytest.fixture(scope='session')
def images_dir(tmpdir_factory):
directory = tmpdir_factory.mktemp('images')
return str(directory)
@pytest.fixture(scope='session')
def config_path():
return os.path.expandvars('${PANDIR}/panoptes-utils/tests/panoptes_utils_testing.yaml')
@pytest.fixture(scope='session', autouse=True)
def static_config_server(config_path, images_dir, db_name):
logger.log('testing', f'Starting static_config_server for testing session')
proc = config_server(
config_file=config_path,
ignore_local=True,
auto_save=False
)
logger.log('testing', f'static_config_server started with {proc.pid=}')
# Give server time to start
while get_config('name') is None: # pragma: no cover
logger.log('testing', f'Waiting for static_config_server {proc.pid=}, sleeping 1 second.')
time.sleep(1)
logger.log('testing', f'Startup config_server name=[{get_config("name")}]')
# Adjust various config items for testing
unit_id = 'PAN000'
logger.log('testing', f'Setting testing name and unit_id to {unit_id}')
set_config('pan_id', unit_id)
logger.log('testing', f'Setting testing database to {db_name}')
set_config('db.name', db_name)
fields_file = 'simulator.yaml'
logger.log('testing', f'Setting testing scheduler fields_file to {fields_file}')
set_config('scheduler.fields_file', fields_file)
logger.log('testing', f'Setting temporary image directory for testing')
set_config('directories.images', images_dir)
yield
logger.log('testing', f'Killing static_config_server started with PID={proc.pid}')
proc.terminate()
@pytest.fixture(scope='function', params=_all_databases)
def db_type(request):
db_list = request.config.option.test_databases
if request.param not in db_list and 'all' not in db_list: # pragma: no cover
pytest.skip(f"Skipping {request.param} DB, set --test-all-databases=True")
PanDB.permanently_erase_database(
request.param, 'panoptes_testing', really='Yes', dangerous='Totally')
return request.param
@pytest.fixture(scope='function')
def db(db_type):
return PanDB(db_type=db_type, db_name='panoptes_testing', connect=True)
@pytest.fixture(scope='function')
def save_environ():
old_env = copy.deepcopy(os.environ)
yield
os.environ = old_env
@pytest.fixture(scope='session')
def data_dir():
return os.path.expandvars('/var/panoptes/panoptes-utils/tests/data')
@pytest.fixture(scope='function')
def unsolved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'unsolved.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def solved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'solved.fits.fz')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def tiny_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'tiny.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def noheader_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'noheader.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def cr2_file(data_dir):
cr2_path = os.path.join(data_dir, 'canon.cr2')
if not os.path.exists(cr2_path):
pytest.skip("No CR2 file found, skipping test.")
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(cr2_path, tmpdirname)
yield copy_file
@pytest.fixture(autouse=True)
def add_doctest_dependencies(doctest_namespace):
doctest_namespace['np'] = np
doctest_namespace['plt'] = plt
@pytest.fixture
def caplog(_caplog):
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
logger.enable('panoptes')
handler_id = logger.add(PropogateHandler(), format="{message}")
yield _caplog
with suppress(ValueError):
logger.remove(handler_id)
|
from datetime import datetime
from typing import Dict, List, Union
import numpy as np
from pydantic import Field, PyObject
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.sets import VaspInputSet
from emmet.core.settings import EmmetSettings
from emmet.core.base import EmmetBaseModel
from emmet.core.mpid import MPID
from emmet.core.utils import DocEnum
from emmet.core.vasp.task import TaskDocument
SETTINGS = EmmetSettings()
class DeprecationMessage(DocEnum):
MANUAL = "M", "manual deprecation"
KPTS = "C001", "Too few KPoints"
KSPACING = "C002", "KSpacing not high enough"
ENCUT = "C002", "ENCUT too low"
FORCES = "C003", "Forces too large"
CONVERGENCE = "E001", "Calculation did not converge"
MAX_SCF = "E002", "Max SCF gradient too large"
LDAU = "I001", "LDAU Parameters don't match the inputset"
class ValidationDoc(EmmetBaseModel):
"""
Validation document for a VASP calculation
"""
task_id: MPID = Field(..., description="The task_id for this validation document")
valid: bool = Field(False, description="Whether this task is valid or not")
last_updated: datetime = Field(
description="Last updated date for this document",
default_factory=datetime.utcnow,
)
reasons: List[Union[DeprecationMessage, str]] = Field(
None, description="List of deprecation tags detailing why this task isn't valid"
)
warnings: List[str] = Field(
[], description="List of potential warnings about this calculation"
)
data: Dict = Field(
description="Dictioary of data used to perform validation."
" Useful for post-mortem analysis"
)
class Config:
extra = "allow"
@classmethod
def from_task_doc(
cls,
task_doc: TaskDocument,
kpts_tolerance: float = SETTINGS.VASP_KPTS_TOLERANCE,
kspacing_tolerance: float = SETTINGS.VASP_KSPACING_TOLERANCE,
input_sets: Dict[str, PyObject] = SETTINGS.VASP_DEFAULT_INPUT_SETS,
LDAU_fields: List[str] = SETTINGS.VASP_CHECKED_LDAU_FIELDS,
max_allowed_scf_gradient: float = SETTINGS.VASP_MAX_SCF_GRADIENT,
) -> "ValidationDoc":
"""
Determines if a calculation is valid based on expected input parameters from a pymatgen inputset
Args:
task_doc: the task document to process
kpts_tolerance: the tolerance to allow kpts to lag behind the input set settings
kspacing_tolerance: the tolerance to allow kspacing to lag behind the input set settings
input_sets: a dictionary of task_types -> pymatgen input set for validation
LDAU_fields: LDAU fields to check for consistency
max_allowed_scf_gradient: maximum uphill gradient allowed for SCF steps after the
initial equillibriation period
"""
structure = task_doc.output.structure
calc_type = task_doc.calc_type
inputs = task_doc.orig_inputs
bandgap = task_doc.output.bandgap
chemsys = task_doc.chemsys
reasons = []
data = {}
warnings = []
if str(calc_type) in input_sets:
# Ensure inputsets that need the bandgap get it
try:
valid_input_set: VaspInputSet = input_sets[str(calc_type)](
structure, bandgap=bandgap
)
except TypeError:
valid_input_set = input_sets[str(calc_type)](structure)
# Checking K-Points
# Calculations that use KSPACING will not have a .kpoints attr
if valid_input_set.kpoints is not None:
valid_num_kpts = valid_input_set.kpoints.num_kpts or np.prod(
valid_input_set.kpoints.kpts[0]
)
num_kpts = inputs.get("kpoints", {}).get("nkpoints", 0) or np.prod(
inputs.get("kpoints", {}).get("kpoints", [1, 1, 1])
)
data["kpts_ratio"] = num_kpts / valid_num_kpts
if data["kpts_ratio"] < kpts_tolerance:
reasons.append(DeprecationMessage.KPTS)
else:
valid_kspacing = valid_input_set.incar.get("KSPACING", 0)
if inputs.get("incar", {}).get("KSPACING"):
data["kspacing_delta"] = (
inputs["incar"].get("KSPACING") - valid_kspacing
)
# larger KSPACING means fewer k-points
if data["kspacing_delta"] > kspacing_tolerance:
warnings.append(
f"KSPACING is greater than input set: {data["kspacing_delta"]}"
f" lower than {kspacing_tolerance} ",
)
elif data["kspacing_delta"] < kspacing_tolerance:
warnings.append(
f"KSPACING is lower than input set: {data["kspacing_delta"]}"
f" lower than {kspacing_tolerance} ",
)
# warn, but don't invalidate if wrong ISMEAR
valid_ismear = valid_input_set.incar.get("ISMEAR", 1)
curr_ismear = inputs.get("incar", {}).get("ISMEAR", 1)
if curr_ismear != valid_ismear:
warnings.append(
f"Inappropriate smearing settings. Set to {curr_ismear},"
f" but should be {valid_ismear}"
)
# Checking ENCUT
encut = inputs.get("incar", {}).get("ENCUT")
valid_encut = valid_input_set.incar["ENCUT"]
data["encut_ratio"] = float(encut) / valid_encut # type: ignore
if data["encut_ratio"] < 1:
reasons.append(DeprecationMessage.ENCUT)
# U-value checks
# NOTE: Reverting to old method of just using input.hubbards which is wrong in many instances
input_hubbards = task_doc.input.hubbards
if valid_input_set.incar.get("LDAU", False) or len(input_hubbards) > 0:
# Assemble required input_set LDAU params into dictionary
input_set_hubbards = dict(
zip(
valid_input_set.poscar.site_symbols,
valid_input_set.incar.get("LDAUU", []),
)
)
all_elements = list(
set(input_set_hubbards.keys()) | set(input_hubbards.keys())
)
diff_ldau_params = {
el: (input_set_hubbards.get(el, 0), input_hubbards.get(el, 0))
for el in all_elements
if not np.allclose(
input_set_hubbards.get(el, 0), input_hubbards.get(el, 0)
)
}
if len(diff_ldau_params) > 0:
reasons.append(DeprecationMessage.LDAU)
warnings.extend(
[
f"U-value for {el} should be {good} but was {bad}"
for el, (good, bad) in diff_ldau_params.items()
]
)
# Check the max upwards SCF step
skip = abs(inputs.get("incar", {}).get("NLEMDL", -5)) - 1
energies = [
d["e_fr_energy"]
for d in task_doc.calcs_reversed[0]["output"]["ionic_steps"][-1][
"electronic_steps"
]
]
if len(energies) > skip:
max_gradient = np.max(np.gradient(energies)[skip:])
data["max_gradient"] = max_gradient
if max_gradient > max_allowed_scf_gradient:
reasons.append(DeprecationMessage.MAX_SCF)
else:
warnings.append(
"Not enough electronic steps to compute valid gradient"
" and compare with max SCF gradient tolerance"
)
# Check for Am and Po elements. These currently do not have proper elemental entries
# and will not get treated properly by the thermo builder.
if ("Am" in chemsys) or ("Po" in chemsys):
reasons.append(DeprecationMessage.MANUAL)
doc = ValidationDoc(
task_id=task_doc.task_id,
calc_type=calc_type,
run_type=task_doc.run_type,
valid=len(reasons) == 0,
reasons=reasons,
data=data,
warnings=warnings,
)
return doc
def _get_unsorted_symbol_set(structure: Structure):
"""
Have to build structure_symbol set manually to ensure we get the right order since pymatgen sorts its symbol_set list
"""
return list(
{
str(sp): 1 for site in structure for sp, v in site.species.items() if v != 0
}.keys()
)
| from datetime import datetime
from typing import Dict, List, Union
import numpy as np
from pydantic import Field, PyObject
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.sets import VaspInputSet
from emmet.core.settings import EmmetSettings
from emmet.core.base import EmmetBaseModel
from emmet.core.mpid import MPID
from emmet.core.utils import DocEnum
from emmet.core.vasp.task import TaskDocument
SETTINGS = EmmetSettings()
class DeprecationMessage(DocEnum):
MANUAL = "M", "manual deprecation"
KPTS = "C001", "Too few KPoints"
KSPACING = "C002", "KSpacing not high enough"
ENCUT = "C002", "ENCUT too low"
FORCES = "C003", "Forces too large"
CONVERGENCE = "E001", "Calculation did not converge"
MAX_SCF = "E002", "Max SCF gradient too large"
LDAU = "I001", "LDAU Parameters don't match the inputset"
class ValidationDoc(EmmetBaseModel):
"""
Validation document for a VASP calculation
"""
task_id: MPID = Field(..., description="The task_id for this validation document")
valid: bool = Field(False, description="Whether this task is valid or not")
last_updated: datetime = Field(
description="Last updated date for this document",
default_factory=datetime.utcnow,
)
reasons: List[Union[DeprecationMessage, str]] = Field(
None, description="List of deprecation tags detailing why this task isn't valid"
)
warnings: List[str] = Field(
[], description="List of potential warnings about this calculation"
)
data: Dict = Field(
description="Dictioary of data used to perform validation."
" Useful for post-mortem analysis"
)
class Config:
extra = "allow"
@classmethod
def from_task_doc(
cls,
task_doc: TaskDocument,
kpts_tolerance: float = SETTINGS.VASP_KPTS_TOLERANCE,
kspacing_tolerance: float = SETTINGS.VASP_KSPACING_TOLERANCE,
input_sets: Dict[str, PyObject] = SETTINGS.VASP_DEFAULT_INPUT_SETS,
LDAU_fields: List[str] = SETTINGS.VASP_CHECKED_LDAU_FIELDS,
max_allowed_scf_gradient: float = SETTINGS.VASP_MAX_SCF_GRADIENT,
) -> "ValidationDoc":
"""
Determines if a calculation is valid based on expected input parameters from a pymatgen inputset
Args:
task_doc: the task document to process
kpts_tolerance: the tolerance to allow kpts to lag behind the input set settings
kspacing_tolerance: the tolerance to allow kspacing to lag behind the input set settings
input_sets: a dictionary of task_types -> pymatgen input set for validation
LDAU_fields: LDAU fields to check for consistency
max_allowed_scf_gradient: maximum uphill gradient allowed for SCF steps after the
initial equillibriation period
"""
structure = task_doc.output.structure
calc_type = task_doc.calc_type
inputs = task_doc.orig_inputs
bandgap = task_doc.output.bandgap
chemsys = task_doc.chemsys
reasons = []
data = {}
warnings = []
if str(calc_type) in input_sets:
# Ensure inputsets that need the bandgap get it
try:
valid_input_set: VaspInputSet = input_sets[str(calc_type)](
structure, bandgap=bandgap
)
except TypeError:
valid_input_set = input_sets[str(calc_type)](structure)
# Checking K-Points
# Calculations that use KSPACING will not have a .kpoints attr
if valid_input_set.kpoints is not None:
valid_num_kpts = valid_input_set.kpoints.num_kpts or np.prod(
valid_input_set.kpoints.kpts[0]
)
num_kpts = inputs.get("kpoints", {}).get("nkpoints", 0) or np.prod(
inputs.get("kpoints", {}).get("kpoints", [1, 1, 1])
)
data["kpts_ratio"] = num_kpts / valid_num_kpts
if data["kpts_ratio"] < kpts_tolerance:
reasons.append(DeprecationMessage.KPTS)
else:
valid_kspacing = valid_input_set.incar.get("KSPACING", 0)
if inputs.get("incar", {}).get("KSPACING"):
data["kspacing_delta"] = (
inputs["incar"].get("KSPACING") - valid_kspacing
)
# larger KSPACING means fewer k-points
if data["kspacing_delta"] > kspacing_tolerance:
warnings.append(
f"KSPACING is greater than input set: {data['kspacing_delta']}"
f" lower than {kspacing_tolerance} ",
)
elif data["kspacing_delta"] < kspacing_tolerance:
warnings.append(
f"KSPACING is lower than input set: {data['kspacing_delta']}"
f" lower than {kspacing_tolerance} ",
)
# warn, but don't invalidate if wrong ISMEAR
valid_ismear = valid_input_set.incar.get("ISMEAR", 1)
curr_ismear = inputs.get("incar", {}).get("ISMEAR", 1)
if curr_ismear != valid_ismear:
warnings.append(
f"Inappropriate smearing settings. Set to {curr_ismear},"
f" but should be {valid_ismear}"
)
# Checking ENCUT
encut = inputs.get("incar", {}).get("ENCUT")
valid_encut = valid_input_set.incar["ENCUT"]
data["encut_ratio"] = float(encut) / valid_encut # type: ignore
if data["encut_ratio"] < 1:
reasons.append(DeprecationMessage.ENCUT)
# U-value checks
# NOTE: Reverting to old method of just using input.hubbards which is wrong in many instances
input_hubbards = task_doc.input.hubbards
if valid_input_set.incar.get("LDAU", False) or len(input_hubbards) > 0:
# Assemble required input_set LDAU params into dictionary
input_set_hubbards = dict(
zip(
valid_input_set.poscar.site_symbols,
valid_input_set.incar.get("LDAUU", []),
)
)
all_elements = list(
set(input_set_hubbards.keys()) | set(input_hubbards.keys())
)
diff_ldau_params = {
el: (input_set_hubbards.get(el, 0), input_hubbards.get(el, 0))
for el in all_elements
if not np.allclose(
input_set_hubbards.get(el, 0), input_hubbards.get(el, 0)
)
}
if len(diff_ldau_params) > 0:
reasons.append(DeprecationMessage.LDAU)
warnings.extend(
[
f"U-value for {el} should be {good} but was {bad}"
for el, (good, bad) in diff_ldau_params.items()
]
)
# Check the max upwards SCF step
skip = abs(inputs.get("incar", {}).get("NLEMDL", -5)) - 1
energies = [
d["e_fr_energy"]
for d in task_doc.calcs_reversed[0]["output"]["ionic_steps"][-1][
"electronic_steps"
]
]
if len(energies) > skip:
max_gradient = np.max(np.gradient(energies)[skip:])
data["max_gradient"] = max_gradient
if max_gradient > max_allowed_scf_gradient:
reasons.append(DeprecationMessage.MAX_SCF)
else:
warnings.append(
"Not enough electronic steps to compute valid gradient"
" and compare with max SCF gradient tolerance"
)
# Check for Am and Po elements. These currently do not have proper elemental entries
# and will not get treated properly by the thermo builder.
if ("Am" in chemsys) or ("Po" in chemsys):
reasons.append(DeprecationMessage.MANUAL)
doc = ValidationDoc(
task_id=task_doc.task_id,
calc_type=calc_type,
run_type=task_doc.run_type,
valid=len(reasons) == 0,
reasons=reasons,
data=data,
warnings=warnings,
)
return doc
def _get_unsorted_symbol_set(structure: Structure):
"""
Have to build structure_symbol set manually to ensure we get the right order since pymatgen sorts its symbol_set list
"""
return list(
{
str(sp): 1 for site in structure for sp, v in site.species.items() if v != 0
}.keys()
)
|
import os
import json
import numpy as np
import itertools
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from mpl_toolkits import mplot3d
def liver_dump_init(env, name = None):
liver = {'x':[],'Fes':[],'Fis':[],'Ficp':[],'volume':[],'col_p_n':[],'crash':[]}
liver['vtx'] = env.liver.x.copy()
if name is not None:
liver['name'] = name
else:
liver['name'] = f"_dt{env.timestep}_down_gm{env.liver.gamma}"
return liver
def liver_dump_step(liver,env):
liver['x'].append(env.liver.x)
liver['Fes'].append(env.liver.Fes)
liver['Fis'].append(env.liver.Fis)
liver['Ficp'].append(env.liver.Ficp)
liver['volume'].append(np.round(env.liver.volumes6.sum() / env.liver.init_volume6.sum(),3))
liver['col_p_n'].append(len(env.liver.check_tet_aabb_collision(env.sg.x)))
liver['crash'].append(env.liver.crash_flag)
return liver
def liver_dump(liver,ep = None):
liver_save ={}
liver_save['vtx'] = liver['vtx'].tolist()
liver_save['x'] = np.array(liver['x']).tolist()
liver_save['Fes'] = np.array(liver['Fes']).tolist()
liver_save['Fis'] = np.array(liver['Fis']).tolist()
liver_save['Ficp'] = np.array(liver['Ficp']).tolist()
liver_save['volume'] = np.array(liver['volume']).tolist()
liver_save['col_p_n']= np.array(liver['col_p_n']).tolist()
liver_save['crash'] = np.array(liver['crash']).tolist()
if ep is None:
with open(os.path.join('liver_json',f"liver_record{liver["name"]}.json"),'w') as f:
json.dump(liver_save,f)
else:
with open(os.path.join('liver_json',f"liver_record_{int(ep)}.json"),'w') as f:
json.dump(liver_save,f)
def liver_dump_load(liver):
vtx = np.array(liver['vtx'])
x = np.array(liver['x'])
Fes = np.array(liver['Fes'])
Fis = np.array(liver['Fis'])
Ficp = np.array(liver['Ficp'])
volume = np.array(liver['volume'])
col_p_n = np.array(liver['col_p_n'])
crash = np.array(liver['crash'])
return vtx, x, Fes, Fis, Ficp, volume, col_p_n, crash
'''
temp:
1. collision_response_cotin
2. collision_response_self
'''
def collision_response_cotin(pair,liver,past_p,current_p):
# check bc_co for all surface tri_element
# add dn to decide
move_v_disp_dict = {}
move_tri_indexs = []
flat_list = [item for sublist in list(pair.values()) for item in sublist]
p_indexs = np.array(flat_list).reshape(-1)
p_n = p_indexs.shape[0]
ray = current_p[p_indexs]-past_p[p_indexs]
ray = ray*(1/np.linalg.norm(ray,axis=-1))[:,None] # p_n x3
# compute ray and normal vector, d= ray,n=normal_vec
dn = ray@liver.tri_normal_vec.T # p_n x n_tri
ap = liver.x[liver.tri_elements[:,0]][None,:] - past_p[p_indexs][:,None] # p_n x n_tri x 3 #choose first point as a
apn = (ap * liver.tri_normal_vec[None,:]).sum(axis=-1) # p_n x n_tri x 3 -> p_n x n_tri
ts = apn * (1/dn) # p_n x n_tri
int_p = ts[:,:,None]*ray[:,None]+past_p[p_indexs][:,None] # p_n x n_tri x3 <- p_n x n_tri x1 * p_n x1 x3 + p_n x1 x3
# compute barycentric coordinates of intersection points
v1 = liver.x[liver.tri_elements[:,1]]-liver.x[liver.tri_elements[:,0]] # n_tri x3
v2 = liver.x[liver.tri_elements[:,2]]-liver.x[liver.tri_elements[:,0]]
tri_areax2 = np.linalg.norm(np.cross(v1,v2,axis=-1),axis=-1) # n_tri
bc_temp = np.zeros((p_n,liver.n_tri,3,3,3))
bc_temp[:] = np.tile(liver.x[liver.tri_elements], 3).reshape(-1, 3, 3, 3).transpose(0, 2, 1, 3) # p_n x n_tri x 3area x 3ps x 3
for itemp in range(p_n):
bc_temp[itemp, :, [0, 1, 2], [0, 1, 2]] = int_p[itemp]
v1 = bc_temp[:, :, :, 1] - bc_temp[:, :, :, 0] # p_n x n_tri x 3area x 3xyz
v2 = bc_temp[:, :, :, 2] - bc_temp[:, :, :, 0]
areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # p_n x n_tri x 3area
bc_co = areax2 * (1.0 / tri_areax2)[np.newaxis, :,
np.newaxis] # p_n x n_tri x 3area<- p_n x n_tri x 3area * 1 x n_tri x 3area
for itemp in range(p_n):
# check bc_co
check1 = np.argwhere(abs(bc_co[itemp].sum(axis=-1) - 1) < 1e-3).flatten() # each p should have at least 1
check2 = np.argwhere(dn[itemp] < 0).flatten()
psb_tri_index = np.intersect1d(check1,check2) # all possible tri_elements satisfies the bc_co and the negative normal vector
if psb_tri_index.size!=0:
psb_ts = ts[itemp,psb_tri_index] # n_psb_tri_index
# if np.any(psb_ts<0):
# raise ValueError("liver shape error")
move_tri_index = psb_tri_index[psb_ts.argmin()] # only 1 the tri_elements should move
move_t = current_p[p_indexs[itemp]] - int_p[itemp,move_tri_index]
move_v_index_p = liver.tri_elements[move_tri_index]
for ividx in move_v_index_p: # same points may move multiple times.
if ividx not in move_v_disp_dict.keys():
move_v_disp_dict[ividx] = move_t # move_t put in for new vindex
else:# compare move_t for old vindex
if np.linalg.norm(np.c_[move_v_disp_dict[ividx],move_t].T,axis=-1).argmax() == 1 : # older move closer than new
move_v_disp_dict[ividx] = move_t
move_tri_indexs.append(move_tri_index.tolist())
print(move_tri_indexs)
return move_v_disp_dict
def collision_response_self(pair, liver, tool):
# not so good when the deform is bigger
# change to old fixed to test, problem still, try cotin methods
new_vtx_delta = None
move_tris = {}
nv_aves = {}
new_vtx_deltas = {}
for key, value in pair.items():
new_vtx_delta = np.zeros(liver.x.shape)
i_tet, p_index = int(key), np.array(value)
p_n = p_index.shape[0]
# find potential collpaision surface tri_element
col_tri_index = np.argwhere(liver.tri_tet[:, 0] == i_tet).flatten()
if col_tri_index.size == 0: raise ValueError(
"Update time step too big, vertices skip the surface tetrahedron elements")
col_tri_n = col_tri_index.shape[0]
col_tri_nv = liver.tri_normal_vec[col_tri_index]
col_tri_p = liver.x[liver.tri_elements[col_tri_index].T[0]] # chose the first points
# compute nv_ave
nv_ave = tool.vtx_normal_vec[p_index].sum(axis=0)
nv_ave = nv_ave / np.linalg.norm(nv_ave)
nv_aves[key] = nv_ave
# compute ts and intersection points
dn = nv_ave.dot(col_tri_nv.T) # col_tri_n
ap = col_tri_p[np.newaxis, :] - tool.x[p_index, np.newaxis] # p_n x col_tri_n x 3
dotn = np.tile(col_tri_nv, p_n).reshape(-1, p_n, 3).transpose(1, 0, 2)
apn = (ap * dotn).sum(axis=-1) # p_n x col_tri_n
ts = apn * (1 / dn) # p_n x col_tri_n
int_col_p = ts[:, :, np.newaxis] * nv_ave[np.newaxis, np.newaxis, :] \
+ tool.vertices[p_index][:, np.newaxis, :] # p_n x col_tri_n x 1 * 1 x 1 x 3 + p_n x 1 x 3
# compute barycentric coordinates of intersection points
tri_vertices = liver.x[liver.tri_elements[col_tri_index]] # n_tri x 3 x 3
v1 = tri_vertices[:, 1] - tri_vertices[:, 0]
v2 = tri_vertices[:, 2] - tri_vertices[:, 0]
tri_areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # n_tri
bc_temp = np.zeros((p_n, col_tri_n, 3, 3, 3))
bc_temp[:] = np.tile(tri_vertices, 3).reshape(-1, 3, 3, 3).transpose(0, 2, 1, 3) # p_n x col_tri_n x 3 x 3 x 3
for itemp in range(p_n):
bc_temp[itemp, :, [0, 1, 2], [0, 1, 2]] = int_col_p[itemp]
v1 = bc_temp[:, :, :, 1] - bc_temp[:, :, :, 0] # p_n x col_tri_n x 3area x 3xyz
v2 = bc_temp[:, :, :, 2] - bc_temp[:, :, :, 0]
areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # p_n x col_tri_n x 3area
bc_co = areax2 * (1.0 / tri_areax2)[np.newaxis, :,
np.newaxis] # p_n x col_tri_n x 3area * 1 x col_tri_n x 3area = p_n x col_tri_n x 3area
# Move tri to point with tmax
check1 = np.argwhere(abs(bc_co.sum(axis=-1) - 1) < 1e-3)
check2 = np.argwhere(dn < 0)
inter_tri_index = np.intersect1d(check1[:, 1], check2) # find colliable surface tri_elements index
# no colliable tri_elements
if inter_tri_index.size == 0:
the_best_tri = dn.argmin() # chose one of most collidable tri
move_tri = liver.tri_elements[col_tri_index[the_best_tri]]
tri_nv = liver.tri_normal_vec[col_tri_index[the_best_tri]].flatten()
tri_vtx = liver.x[move_tri].reshape(3, 3)
v = nv_ave - tri_nv # find a new direction, not so sharp as nv_ave
v = v / np.linalg.norm(v)
dn_t = v.dot(tri_nv) # 1
ap_t = tri_vtx[0] - tool.x[p_index]
t_t = ap_t.dot(tri_nv) / dn_t
move_t = t_t.min()
new_vtx_delta[move_tri] += - move_t * v
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
move_tris.setdefault(key, []).append(move_tri.flatten())
print(' None ',end='')
else:
# more than 1 colliable tri_elements
if len(inter_tri_index) > 1:
temp_delta = np.zeros((liver.x.shape[0], len(inter_tri_index))) # n_v * n_inter
itemp = 0
for inter_tri_i in inter_tri_index:
part_p_index = check1[ check1[:, 1] == inter_tri_i, 0] # p index of each tri_element that satisfies bc_co condition
move_t = ts[part_p_index, inter_tri_i].min()
move_tri = liver.tri_elements[col_tri_index[inter_tri_i]]
temp_delta[move_tri, itemp] = - move_t # collect all possible move_t for all vertices
move_tris.setdefault(key, []).append(move_tri.flatten())
itemp += 1
new_vtx_delta += temp_delta.max(axis=-1)[:, np.newaxis] * nv_ave[np.newaxis,:] # move with the maximal move_t
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
print(' Multi ',end='')
else:
# only 1 colliable tri_elements
move_t = ts[:, inter_tri_index].min()
move_tri = liver.tri_elements[col_tri_index[inter_tri_index]]
new_vtx_delta[move_tri] += -move_t * nv_ave
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
move_tris.setdefault(key, []).append(move_tri.flatten())
print(' Single ',end='')
return new_vtx_delta, move_tris, nv_aves, new_vtx_deltas
'''
static methods:
1. lame_param
2. tri_mid_vec
3. rotation_matrix
4. flatten_list
'''
def lame_param(E, v):
la = E * v / (1 + v) / (1 - 2 * v)
mu = E / 2 / (1 + v)
return la, mu
def tri_mid_vec(vertices, tri_elements):
tri_vtx = vertices[tri_elements]
tri_mid = tri_vtx.mean(axis=1)
tri_normal_vec = np.cross(tri_vtx[:, 1] - tri_vtx[:, 0], tri_vtx[:, 2] - tri_vtx[:, 0])
tri_normal_vec = tri_normal_vec * (1.0 / np.linalg.norm(tri_normal_vec, axis=1))[:, np.newaxis]
return tri_mid, tri_normal_vec
def rotation_matrix(deg,axis='x'):
rad = np.deg2rad(deg)
s,c = np.sin(rad),np.cos(rad)
if axis=='x':
return np.array([ 1, 0, 0,
0, c, -s,
0, s, c]).reshape(-1,3)
elif axis=='y':
return np.array([ c, 0, s,
0, 1, 0,
-s, 0, c]).reshape(-1,3)
elif axis=='z':
return np.array([ c, -s, 0,
s, c, 0,
0, 0, 1]).reshape(-1,3)
else:
return np.ones((3,3))
# def flatten_list(l):
# # not work well
# for el in l:
# if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
# return flatten_list(el)
# else:
# return el
'''
matplotlibe subplot
1. create_axs
2. draw_liver
3. draw_liver_tool
'''
def create_axs(subplot_n,block=False,return_fig=False):
r = int(np.floor(np.sqrt(subplot_n)))
c = int(subplot_n/r)
fig = plt.figure(figsize=plt.figaspect(0.5))
axs = {}
for i in range(subplot_n):
axs[i] = fig.add_subplot(r, c, i+1, projection='3d')
if return_fig:
return axs,fig
return axs
def draw_liver(liver,ax):
ax.cla()
ax = liver.plt_vtx(ax=ax)
ax = liver.plt_x(ax=ax)
plt_equal(ax)
return ax
def draw_liver_F(liver,axs,f_scl = 5e0):
# Fes, Ficp, Fis+ displacement
axs[0].cla()
axs[0] = liver.plt_x(ax=axs[0])
axs[0] = liver.plt_Fes(vec_to_scl=f_scl,ax=axs[0])
plt_equal(axs[0])
axs[1].cla()
axs[1] = liver.plt_x(ax=axs[1])
axs[1] = liver.plt_Ficp(vec_to_scl=f_scl,ax=axs[1])
plt_equal(axs[1])
axs[2].cla()
axs[2] = liver.plt_vtx(ax=axs[2])
axs[2] = liver.plt_x(ax=axs[2])
axs[2] = liver.plt_Fis(vec_to_scl=f_scl,ax=axs[2])
plt_equal(axs[2])
return axs
def draw_liver_tool(liver,sg,axs,f_scl=5e0):
axs[0].cla()
axs[0] = liver.plt_x(ax=axs[0])
axs[0] = liver.plt_tri_normal_vec(vec_scl=f_scl/2,ax=axs[0])
plt_equal(axs[0])
axs[1].cla()
axs[1] = sg.plt_sg_x(ax=axs[1])
axs[1] = sg._plt_vtx_normal_vec(sg.x,vec_scl=f_scl/2,ax=axs[1])
plt_equal(axs[1])
axs[2].cla()
axs[2] = liver.plt_x(ax=axs[2])
axs[2] = sg.plt_sg_x(ax=axs[2])
plt_equal(axs[2])
axs_l = {axs[3],axs[4],axs[5]}
axs_l = draw_liver(liver,axs_l,f_scl=f_scl)
axs[3],axs[4],axs[5] = axs_l[0],axs_l[1],axs_l[2]
plt.draw()#plt.show(block=False)
return axs
'''
aabb
1. xyzminmax
2. _plt_AABB
3. plt_aabb_p
'''
def xyzminmax(aabb):
# xmin, ymin, zmin, xmax, ymax, zmax = aabb[0], aabb[1], aabb[2], aabb[3], aabb[4], aabb[5]
return aabb[0], aabb[1], aabb[2], aabb[3], aabb[4], aabb[5]
def plt_AABB(aabb, **kwargs):
c_line = '#9467bd'
c_p = '#e377c2'
if 'c' in kwargs.keys():
colors = kwargs['c']
if type(colors) is list:
c_line = colors[0]
c_p = colors[1]
elif type(colors) is str:
c_line = colors
ax = ax3d_handle(**kwargs)
# aabb: 1x6, xmin, ymin, zmin, xmax, ymax, zmax
xmin, ymin, zmin, xmax, ymax, zmax = xyzminmax(aabb)
xyz = np.array([xmin, ymin, zmin, xmax, ymin, zmin, xmax, ymax, zmin, xmin, ymax, zmin,
xmin, ymin, zmax, xmax, ymin, zmax, xmax, ymax, zmax, xmin, ymax, zmax]).reshape(-1, 3)
line_segs = np.array([1, 2, 2, 3, 3, 4, 4, 1,
1, 5, 2, 6, 3, 7, 4, 8,
5, 6, 6, 7, 7, 8, 8, 5]).reshape(-1, 2) - 1
line_vt = np.hstack((xyz[line_segs[:, 0]], xyz[line_segs[:, 1]])).copy()
lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors=c_line, linestyles='--')
ax.add_collection(lc)
ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], marker='o', c=c_p)
return ax
def plt_aabb_p(aabb, p, **kwargs):
ax = ax3d_handle(**kwargs)
ax.scatter(p[0], p[1], p[2], c='#22D8C3')
plt_AABB(aabb, ax=ax)
return ax
'''
ax handle
1. 1) plt_equal
2) plt_show_equal
3) set_axes_equal
4) _set_axes_radius
2. ax3d_handle
3. plt_tet
4. plt_tet_ps
5. plt_normal_vecs
6. plt_tri
7. plt_tri_ps
'''
def plt_equal(ax,limits = None):
ax.set_box_aspect((1, 1, 1)) # IMPORTANT - this is the new, key line
set_axes_equal(ax,limits=limits) # IMPORTANT - this is also required
def plt_show_equal(ax,block=False,limits = None):
plt_equal(ax,limits=limits)
plt.show(block=block)
def set_axes_equal(ax: plt.Axes,limits = None):
"""Set 3D plot axes to equal scale.
Make axes of 3D plot have equal scale so that spheres appear as
spheres and cubes as cubes. Required since `ax.axis('equal')`
and `ax.set_aspect('equal')` don't work on 3D.
"""
if limits is None:
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),
])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
_set_axes_radius(ax, origin, radius)
def _set_axes_radius(ax, origin, radius):
x, y, z = origin
ax.set_xlim3d([x - radius, x + radius])
ax.set_ylim3d([y - radius, y + radius])
ax.set_zlim3d([z - radius, z + radius])
def ax3d_handle(return_fig=False,**kwargs):
if 'ax' in kwargs:
ax = kwargs['ax']
else:
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(projection='3d')
if return_fig:
return ax,fig
return ax
def plt_tet(vs, text_opt='off', **kwargs):
ax = ax3d_handle(**kwargs)
ax.scatter(vs[:, 0], vs[:, 1], vs[:, 2], c='#BCB6E3')
if text_opt == "on":
for i in range(4): ax.text(vs[i, 0], vs[i, 1], vs[i, 2], f'{i + 1}')
line_order = np.array([1, 2, 1, 3, 1, 4, 2, 3, 2, 4, 3, 4]).reshape(-1, 2) - 1
line_vt = np.hstack((vs[line_order[:, 0]], vs[line_order[:, 1]]))
lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors='#8A7BFB')
ax.add_collection(lc)
return ax
def plt_tet_ps(vs, p, text_opt='off', **kwargs):
p = np.array(p)
ax = ax3d_handle(**kwargs)
ax = plt_tet(vs, text_opt=text_opt, ax=ax)
if len(p.shape) == 1: p = p.reshape(1, -1)
ax.scatter(p[:, 0], p[:, 1], p[:, 2], c='#22D8C3')
return ax
def plt_normal_vecs(base_ps, vecs, scl=1, **kwargs):
vesc_scl = vecs * scl
ax = ax3d_handle(**kwargs)
ax.scatter(base_ps[:, 0], base_ps[:, 1], base_ps[:, 2], c='#1D1788')
ax.quiver(base_ps[:, 0], base_ps[:, 1], base_ps[:, 2],
vesc_scl[:, 0], vesc_scl[:, 1], vesc_scl[:, 2], color='#7D75FE')
return ax
def plt_tet_ps_vecs(vs, p, vec, scl=1, text_opt = 'off', **kwargs):
ax = ax3d_handle(**kwargs)
ax = plt_tet_ps(vs, p, ax=ax, text_opt = text_opt)
if len(p.shape) == 1: p = p.reshape(1, -1)
if len(vec.shape) == 1: vec = vec.reshape(1, -1)
ax = plt_normal_vecs(p, vec, scl=scl, ax=ax)
return ax
def plt_tri(vs, text_opt='off', **kwargs):
ax = ax3d_handle(**kwargs)
ax.scatter(vs[:, 0], vs[:, 1], vs[:, 2], c='#ff00ff')
if text_opt == "on":
for i in range(3): ax.text(vs[i, 0], vs[i, 1], vs[i, 2], f'{i + 1}')
line_order = np.array([1, 2, 1, 3, 2, 3]).reshape(-1, 2) - 1
line_vt = np.hstack((vs[line_order[:, 0]], vs[line_order[:, 1]]))
lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors='#9933ff')
ax.add_collection(lc)
return ax
def plt_tri_ps(vs, p, text_opt='off', **kwargs):
ax = ax3d_handle(**kwargs)
ax = plt_tri(vs, text_opt=text_opt, ax=ax)
if len(p.shape) == 1: p = p.reshape(1, -1)
ax.scatter(p[:, 0], p[:, 1], p[:, 2], c='#22D8C3')
return ax
| import os
import json
import numpy as np
import itertools
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from mpl_toolkits import mplot3d
def liver_dump_init(env, name = None):
liver = {'x':[],'Fes':[],'Fis':[],'Ficp':[],'volume':[],'col_p_n':[],'crash':[]}
liver['vtx'] = env.liver.x.copy()
if name is not None:
liver['name'] = name
else:
liver['name'] = f"_dt{env.timestep}_down_gm{env.liver.gamma}"
return liver
def liver_dump_step(liver,env):
liver['x'].append(env.liver.x)
liver['Fes'].append(env.liver.Fes)
liver['Fis'].append(env.liver.Fis)
liver['Ficp'].append(env.liver.Ficp)
liver['volume'].append(np.round(env.liver.volumes6.sum() / env.liver.init_volume6.sum(),3))
liver['col_p_n'].append(len(env.liver.check_tet_aabb_collision(env.sg.x)))
liver['crash'].append(env.liver.crash_flag)
return liver
def liver_dump(liver,ep = None):
liver_save ={}
liver_save['vtx'] = liver['vtx'].tolist()
liver_save['x'] = np.array(liver['x']).tolist()
liver_save['Fes'] = np.array(liver['Fes']).tolist()
liver_save['Fis'] = np.array(liver['Fis']).tolist()
liver_save['Ficp'] = np.array(liver['Ficp']).tolist()
liver_save['volume'] = np.array(liver['volume']).tolist()
liver_save['col_p_n']= np.array(liver['col_p_n']).tolist()
liver_save['crash'] = np.array(liver['crash']).tolist()
if ep is None:
with open(os.path.join('liver_json',f"liver_record{liver['name']}.json"),'w') as f:
json.dump(liver_save,f)
else:
with open(os.path.join('liver_json',f"liver_record_{int(ep)}.json"),'w') as f:
json.dump(liver_save,f)
def liver_dump_load(liver):
vtx = np.array(liver['vtx'])
x = np.array(liver['x'])
Fes = np.array(liver['Fes'])
Fis = np.array(liver['Fis'])
Ficp = np.array(liver['Ficp'])
volume = np.array(liver['volume'])
col_p_n = np.array(liver['col_p_n'])
crash = np.array(liver['crash'])
return vtx, x, Fes, Fis, Ficp, volume, col_p_n, crash
'''
temp:
1. collision_response_cotin
2. collision_response_self
'''
def collision_response_cotin(pair,liver,past_p,current_p):
# check bc_co for all surface tri_element
# add dn to decide
move_v_disp_dict = {}
move_tri_indexs = []
flat_list = [item for sublist in list(pair.values()) for item in sublist]
p_indexs = np.array(flat_list).reshape(-1)
p_n = p_indexs.shape[0]
ray = current_p[p_indexs]-past_p[p_indexs]
ray = ray*(1/np.linalg.norm(ray,axis=-1))[:,None] # p_n x3
# compute ray and normal vector, d= ray,n=normal_vec
dn = ray@liver.tri_normal_vec.T # p_n x n_tri
ap = liver.x[liver.tri_elements[:,0]][None,:] - past_p[p_indexs][:,None] # p_n x n_tri x 3 #choose first point as a
apn = (ap * liver.tri_normal_vec[None,:]).sum(axis=-1) # p_n x n_tri x 3 -> p_n x n_tri
ts = apn * (1/dn) # p_n x n_tri
int_p = ts[:,:,None]*ray[:,None]+past_p[p_indexs][:,None] # p_n x n_tri x3 <- p_n x n_tri x1 * p_n x1 x3 + p_n x1 x3
# compute barycentric coordinates of intersection points
v1 = liver.x[liver.tri_elements[:,1]]-liver.x[liver.tri_elements[:,0]] # n_tri x3
v2 = liver.x[liver.tri_elements[:,2]]-liver.x[liver.tri_elements[:,0]]
tri_areax2 = np.linalg.norm(np.cross(v1,v2,axis=-1),axis=-1) # n_tri
bc_temp = np.zeros((p_n,liver.n_tri,3,3,3))
bc_temp[:] = np.tile(liver.x[liver.tri_elements], 3).reshape(-1, 3, 3, 3).transpose(0, 2, 1, 3) # p_n x n_tri x 3area x 3ps x 3
for itemp in range(p_n):
bc_temp[itemp, :, [0, 1, 2], [0, 1, 2]] = int_p[itemp]
v1 = bc_temp[:, :, :, 1] - bc_temp[:, :, :, 0] # p_n x n_tri x 3area x 3xyz
v2 = bc_temp[:, :, :, 2] - bc_temp[:, :, :, 0]
areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # p_n x n_tri x 3area
bc_co = areax2 * (1.0 / tri_areax2)[np.newaxis, :,
np.newaxis] # p_n x n_tri x 3area<- p_n x n_tri x 3area * 1 x n_tri x 3area
for itemp in range(p_n):
# check bc_co
check1 = np.argwhere(abs(bc_co[itemp].sum(axis=-1) - 1) < 1e-3).flatten() # each p should have at least 1
check2 = np.argwhere(dn[itemp] < 0).flatten()
psb_tri_index = np.intersect1d(check1,check2) # all possible tri_elements satisfies the bc_co and the negative normal vector
if psb_tri_index.size!=0:
psb_ts = ts[itemp,psb_tri_index] # n_psb_tri_index
# if np.any(psb_ts<0):
# raise ValueError("liver shape error")
move_tri_index = psb_tri_index[psb_ts.argmin()] # only 1 the tri_elements should move
move_t = current_p[p_indexs[itemp]] - int_p[itemp,move_tri_index]
move_v_index_p = liver.tri_elements[move_tri_index]
for ividx in move_v_index_p: # same points may move multiple times.
if ividx not in move_v_disp_dict.keys():
move_v_disp_dict[ividx] = move_t # move_t put in for new vindex
else:# compare move_t for old vindex
if np.linalg.norm(np.c_[move_v_disp_dict[ividx],move_t].T,axis=-1).argmax() == 1 : # older move closer than new
move_v_disp_dict[ividx] = move_t
move_tri_indexs.append(move_tri_index.tolist())
print(move_tri_indexs)
return move_v_disp_dict
def collision_response_self(pair, liver, tool):
# not so good when the deform is bigger
# change to old fixed to test, problem still, try cotin methods
new_vtx_delta = None
move_tris = {}
nv_aves = {}
new_vtx_deltas = {}
for key, value in pair.items():
new_vtx_delta = np.zeros(liver.x.shape)
i_tet, p_index = int(key), np.array(value)
p_n = p_index.shape[0]
# find potential collpaision surface tri_element
col_tri_index = np.argwhere(liver.tri_tet[:, 0] == i_tet).flatten()
if col_tri_index.size == 0: raise ValueError(
"Update time step too big, vertices skip the surface tetrahedron elements")
col_tri_n = col_tri_index.shape[0]
col_tri_nv = liver.tri_normal_vec[col_tri_index]
col_tri_p = liver.x[liver.tri_elements[col_tri_index].T[0]] # chose the first points
# compute nv_ave
nv_ave = tool.vtx_normal_vec[p_index].sum(axis=0)
nv_ave = nv_ave / np.linalg.norm(nv_ave)
nv_aves[key] = nv_ave
# compute ts and intersection points
dn = nv_ave.dot(col_tri_nv.T) # col_tri_n
ap = col_tri_p[np.newaxis, :] - tool.x[p_index, np.newaxis] # p_n x col_tri_n x 3
dotn = np.tile(col_tri_nv, p_n).reshape(-1, p_n, 3).transpose(1, 0, 2)
apn = (ap * dotn).sum(axis=-1) # p_n x col_tri_n
ts = apn * (1 / dn) # p_n x col_tri_n
int_col_p = ts[:, :, np.newaxis] * nv_ave[np.newaxis, np.newaxis, :] \
+ tool.vertices[p_index][:, np.newaxis, :] # p_n x col_tri_n x 1 * 1 x 1 x 3 + p_n x 1 x 3
# compute barycentric coordinates of intersection points
tri_vertices = liver.x[liver.tri_elements[col_tri_index]] # n_tri x 3 x 3
v1 = tri_vertices[:, 1] - tri_vertices[:, 0]
v2 = tri_vertices[:, 2] - tri_vertices[:, 0]
tri_areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # n_tri
bc_temp = np.zeros((p_n, col_tri_n, 3, 3, 3))
bc_temp[:] = np.tile(tri_vertices, 3).reshape(-1, 3, 3, 3).transpose(0, 2, 1, 3) # p_n x col_tri_n x 3 x 3 x 3
for itemp in range(p_n):
bc_temp[itemp, :, [0, 1, 2], [0, 1, 2]] = int_col_p[itemp]
v1 = bc_temp[:, :, :, 1] - bc_temp[:, :, :, 0] # p_n x col_tri_n x 3area x 3xyz
v2 = bc_temp[:, :, :, 2] - bc_temp[:, :, :, 0]
areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # p_n x col_tri_n x 3area
bc_co = areax2 * (1.0 / tri_areax2)[np.newaxis, :,
np.newaxis] # p_n x col_tri_n x 3area * 1 x col_tri_n x 3area = p_n x col_tri_n x 3area
# Move tri to point with tmax
check1 = np.argwhere(abs(bc_co.sum(axis=-1) - 1) < 1e-3)
check2 = np.argwhere(dn < 0)
inter_tri_index = np.intersect1d(check1[:, 1], check2) # find colliable surface tri_elements index
# no colliable tri_elements
if inter_tri_index.size == 0:
the_best_tri = dn.argmin() # chose one of most collidable tri
move_tri = liver.tri_elements[col_tri_index[the_best_tri]]
tri_nv = liver.tri_normal_vec[col_tri_index[the_best_tri]].flatten()
tri_vtx = liver.x[move_tri].reshape(3, 3)
v = nv_ave - tri_nv # find a new direction, not so sharp as nv_ave
v = v / np.linalg.norm(v)
dn_t = v.dot(tri_nv) # 1
ap_t = tri_vtx[0] - tool.x[p_index]
t_t = ap_t.dot(tri_nv) / dn_t
move_t = t_t.min()
new_vtx_delta[move_tri] += - move_t * v
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
move_tris.setdefault(key, []).append(move_tri.flatten())
print(' None ',end='')
else:
# more than 1 colliable tri_elements
if len(inter_tri_index) > 1:
temp_delta = np.zeros((liver.x.shape[0], len(inter_tri_index))) # n_v * n_inter
itemp = 0
for inter_tri_i in inter_tri_index:
part_p_index = check1[ check1[:, 1] == inter_tri_i, 0] # p index of each tri_element that satisfies bc_co condition
move_t = ts[part_p_index, inter_tri_i].min()
move_tri = liver.tri_elements[col_tri_index[inter_tri_i]]
temp_delta[move_tri, itemp] = - move_t # collect all possible move_t for all vertices
move_tris.setdefault(key, []).append(move_tri.flatten())
itemp += 1
new_vtx_delta += temp_delta.max(axis=-1)[:, np.newaxis] * nv_ave[np.newaxis,:] # move with the maximal move_t
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
print(' Multi ',end='')
else:
# only 1 colliable tri_elements
move_t = ts[:, inter_tri_index].min()
move_tri = liver.tri_elements[col_tri_index[inter_tri_index]]
new_vtx_delta[move_tri] += -move_t * nv_ave
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
move_tris.setdefault(key, []).append(move_tri.flatten())
print(' Single ',end='')
return new_vtx_delta, move_tris, nv_aves, new_vtx_deltas
'''
static methods:
1. lame_param
2. tri_mid_vec
3. rotation_matrix
4. flatten_list
'''
def lame_param(E, v):
la = E * v / (1 + v) / (1 - 2 * v)
mu = E / 2 / (1 + v)
return la, mu
def tri_mid_vec(vertices, tri_elements):
tri_vtx = vertices[tri_elements]
tri_mid = tri_vtx.mean(axis=1)
tri_normal_vec = np.cross(tri_vtx[:, 1] - tri_vtx[:, 0], tri_vtx[:, 2] - tri_vtx[:, 0])
tri_normal_vec = tri_normal_vec * (1.0 / np.linalg.norm(tri_normal_vec, axis=1))[:, np.newaxis]
return tri_mid, tri_normal_vec
def rotation_matrix(deg,axis='x'):
rad = np.deg2rad(deg)
s,c = np.sin(rad),np.cos(rad)
if axis=='x':
return np.array([ 1, 0, 0,
0, c, -s,
0, s, c]).reshape(-1,3)
elif axis=='y':
return np.array([ c, 0, s,
0, 1, 0,
-s, 0, c]).reshape(-1,3)
elif axis=='z':
return np.array([ c, -s, 0,
s, c, 0,
0, 0, 1]).reshape(-1,3)
else:
return np.ones((3,3))
# def flatten_list(l):
# # not work well
# for el in l:
# if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
# return flatten_list(el)
# else:
# return el
'''
matplotlibe subplot
1. create_axs
2. draw_liver
3. draw_liver_tool
'''
def create_axs(subplot_n,block=False,return_fig=False):
r = int(np.floor(np.sqrt(subplot_n)))
c = int(subplot_n/r)
fig = plt.figure(figsize=plt.figaspect(0.5))
axs = {}
for i in range(subplot_n):
axs[i] = fig.add_subplot(r, c, i+1, projection='3d')
if return_fig:
return axs,fig
return axs
def draw_liver(liver,ax):
ax.cla()
ax = liver.plt_vtx(ax=ax)
ax = liver.plt_x(ax=ax)
plt_equal(ax)
return ax
def draw_liver_F(liver,axs,f_scl = 5e0):
# Fes, Ficp, Fis+ displacement
axs[0].cla()
axs[0] = liver.plt_x(ax=axs[0])
axs[0] = liver.plt_Fes(vec_to_scl=f_scl,ax=axs[0])
plt_equal(axs[0])
axs[1].cla()
axs[1] = liver.plt_x(ax=axs[1])
axs[1] = liver.plt_Ficp(vec_to_scl=f_scl,ax=axs[1])
plt_equal(axs[1])
axs[2].cla()
axs[2] = liver.plt_vtx(ax=axs[2])
axs[2] = liver.plt_x(ax=axs[2])
axs[2] = liver.plt_Fis(vec_to_scl=f_scl,ax=axs[2])
plt_equal(axs[2])
return axs
def draw_liver_tool(liver,sg,axs,f_scl=5e0):
axs[0].cla()
axs[0] = liver.plt_x(ax=axs[0])
axs[0] = liver.plt_tri_normal_vec(vec_scl=f_scl/2,ax=axs[0])
plt_equal(axs[0])
axs[1].cla()
axs[1] = sg.plt_sg_x(ax=axs[1])
axs[1] = sg._plt_vtx_normal_vec(sg.x,vec_scl=f_scl/2,ax=axs[1])
plt_equal(axs[1])
axs[2].cla()
axs[2] = liver.plt_x(ax=axs[2])
axs[2] = sg.plt_sg_x(ax=axs[2])
plt_equal(axs[2])
axs_l = {axs[3],axs[4],axs[5]}
axs_l = draw_liver(liver,axs_l,f_scl=f_scl)
axs[3],axs[4],axs[5] = axs_l[0],axs_l[1],axs_l[2]
plt.draw()#plt.show(block=False)
return axs
'''
aabb
1. xyzminmax
2. _plt_AABB
3. plt_aabb_p
'''
def xyzminmax(aabb):
# xmin, ymin, zmin, xmax, ymax, zmax = aabb[0], aabb[1], aabb[2], aabb[3], aabb[4], aabb[5]
return aabb[0], aabb[1], aabb[2], aabb[3], aabb[4], aabb[5]
def plt_AABB(aabb, **kwargs):
c_line = '#9467bd'
c_p = '#e377c2'
if 'c' in kwargs.keys():
colors = kwargs['c']
if type(colors) is list:
c_line = colors[0]
c_p = colors[1]
elif type(colors) is str:
c_line = colors
ax = ax3d_handle(**kwargs)
# aabb: 1x6, xmin, ymin, zmin, xmax, ymax, zmax
xmin, ymin, zmin, xmax, ymax, zmax = xyzminmax(aabb)
xyz = np.array([xmin, ymin, zmin, xmax, ymin, zmin, xmax, ymax, zmin, xmin, ymax, zmin,
xmin, ymin, zmax, xmax, ymin, zmax, xmax, ymax, zmax, xmin, ymax, zmax]).reshape(-1, 3)
line_segs = np.array([1, 2, 2, 3, 3, 4, 4, 1,
1, 5, 2, 6, 3, 7, 4, 8,
5, 6, 6, 7, 7, 8, 8, 5]).reshape(-1, 2) - 1
line_vt = np.hstack((xyz[line_segs[:, 0]], xyz[line_segs[:, 1]])).copy()
lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors=c_line, linestyles='--')
ax.add_collection(lc)
ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], marker='o', c=c_p)
return ax
def plt_aabb_p(aabb, p, **kwargs):
ax = ax3d_handle(**kwargs)
ax.scatter(p[0], p[1], p[2], c='#22D8C3')
plt_AABB(aabb, ax=ax)
return ax
'''
ax handle
1. 1) plt_equal
2) plt_show_equal
3) set_axes_equal
4) _set_axes_radius
2. ax3d_handle
3. plt_tet
4. plt_tet_ps
5. plt_normal_vecs
6. plt_tri
7. plt_tri_ps
'''
def plt_equal(ax,limits = None):
ax.set_box_aspect((1, 1, 1)) # IMPORTANT - this is the new, key line
set_axes_equal(ax,limits=limits) # IMPORTANT - this is also required
def plt_show_equal(ax,block=False,limits = None):
plt_equal(ax,limits=limits)
plt.show(block=block)
def set_axes_equal(ax: plt.Axes,limits = None):
"""Set 3D plot axes to equal scale.
Make axes of 3D plot have equal scale so that spheres appear as
spheres and cubes as cubes. Required since `ax.axis('equal')`
and `ax.set_aspect('equal')` don't work on 3D.
"""
if limits is None:
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),
])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
_set_axes_radius(ax, origin, radius)
def _set_axes_radius(ax, origin, radius):
x, y, z = origin
ax.set_xlim3d([x - radius, x + radius])
ax.set_ylim3d([y - radius, y + radius])
ax.set_zlim3d([z - radius, z + radius])
def ax3d_handle(return_fig=False,**kwargs):
if 'ax' in kwargs:
ax = kwargs['ax']
else:
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(projection='3d')
if return_fig:
return ax,fig
return ax
def plt_tet(vs, text_opt='off', **kwargs):
ax = ax3d_handle(**kwargs)
ax.scatter(vs[:, 0], vs[:, 1], vs[:, 2], c='#BCB6E3')
if text_opt == "on":
for i in range(4): ax.text(vs[i, 0], vs[i, 1], vs[i, 2], f'{i + 1}')
line_order = np.array([1, 2, 1, 3, 1, 4, 2, 3, 2, 4, 3, 4]).reshape(-1, 2) - 1
line_vt = np.hstack((vs[line_order[:, 0]], vs[line_order[:, 1]]))
lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors='#8A7BFB')
ax.add_collection(lc)
return ax
def plt_tet_ps(vs, p, text_opt='off', **kwargs):
p = np.array(p)
ax = ax3d_handle(**kwargs)
ax = plt_tet(vs, text_opt=text_opt, ax=ax)
if len(p.shape) == 1: p = p.reshape(1, -1)
ax.scatter(p[:, 0], p[:, 1], p[:, 2], c='#22D8C3')
return ax
def plt_normal_vecs(base_ps, vecs, scl=1, **kwargs):
vesc_scl = vecs * scl
ax = ax3d_handle(**kwargs)
ax.scatter(base_ps[:, 0], base_ps[:, 1], base_ps[:, 2], c='#1D1788')
ax.quiver(base_ps[:, 0], base_ps[:, 1], base_ps[:, 2],
vesc_scl[:, 0], vesc_scl[:, 1], vesc_scl[:, 2], color='#7D75FE')
return ax
def plt_tet_ps_vecs(vs, p, vec, scl=1, text_opt = 'off', **kwargs):
ax = ax3d_handle(**kwargs)
ax = plt_tet_ps(vs, p, ax=ax, text_opt = text_opt)
if len(p.shape) == 1: p = p.reshape(1, -1)
if len(vec.shape) == 1: vec = vec.reshape(1, -1)
ax = plt_normal_vecs(p, vec, scl=scl, ax=ax)
return ax
def plt_tri(vs, text_opt='off', **kwargs):
ax = ax3d_handle(**kwargs)
ax.scatter(vs[:, 0], vs[:, 1], vs[:, 2], c='#ff00ff')
if text_opt == "on":
for i in range(3): ax.text(vs[i, 0], vs[i, 1], vs[i, 2], f'{i + 1}')
line_order = np.array([1, 2, 1, 3, 2, 3]).reshape(-1, 2) - 1
line_vt = np.hstack((vs[line_order[:, 0]], vs[line_order[:, 1]]))
lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors='#9933ff')
ax.add_collection(lc)
return ax
def plt_tri_ps(vs, p, text_opt='off', **kwargs):
ax = ax3d_handle(**kwargs)
ax = plt_tri(vs, text_opt=text_opt, ax=ax)
if len(p.shape) == 1: p = p.reshape(1, -1)
ax.scatter(p[:, 0], p[:, 1], p[:, 2], c='#22D8C3')
return ax
|
# Copyright (C) 2020 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
import html
import json
import re
import textwrap
from io import BytesIO, StringIO
import aiohttp
import bs4
import jikanpy
import pendulum
import requests
from jikanpy import Jikan
from jikanpy.exceptions import APIException
from telethon.errors.rpcerrorlist import FilePartsInvalidError
from telethon.tl.types import (
DocumentAttributeAnimated,
DocumentAttributeFilename,
MessageMediaDocument,
)
from telethon.utils import is_image, is_video
from userbot import CMD_HELP
from userbot.events import register
jikan = Jikan()
# Anime Helper
def getPosterLink(mal):
# grab poster from kitsu
kitsu = getKitsu(mal)
image = requests.get(f"https://kitsu.io/api/edge/anime/{kitsu}").json()
return image["data"]["attributes"]["posterImage"]["original"]
def getKitsu(mal):
# get kitsu id from mal id
link = f"https://kitsu.io/api/edge/mappings?filter[external_site]=myanimelist/anime&filter[external_id]={mal}"
result = requests.get(link).json()["data"][0]["id"]
link = f"https://kitsu.io/api/edge/mappings/{result}/item?fields[anime]=slug"
return requests.get(link).json()["data"]["id"]
def getBannerLink(mal, kitsu_search=True):
# try getting kitsu backdrop
if kitsu_search:
kitsu = getKitsu(mal)
image = f"http://media.kitsu.io/anime/cover_images/{kitsu}/original.jpg"
response = requests.get(image)
if response.status_code == 200:
return image
# try getting anilist banner
query = """
query ($idMal: Int){
Media(idMal: $idMal){
bannerImage
}
}
"""
data = {"query": query, "variables": {"idMal": int(mal)}}
image = requests.post("https://graphql.anilist.co", json=data).json()["data"][
"Media"
]["bannerImage"]
if image:
return image
return getPosterLink(mal)
def get_anime_manga(mal_id, search_type, _user_id):
jikan = jikanpy.jikan.Jikan()
if search_type == "anime_anime":
result = jikan.anime(mal_id)
trailer = result["trailer_url"]
if trailer:
LOL = f"<a href='{trailer}'>YouTube</a>"
else:
LOL = "<code>No Trailer Available</code>"
image = getBannerLink(mal_id)
studio_string = ", ".join(
studio_info["name"] for studio_info in result["studios"]
)
producer_string = ", ".join(
producer_info["name"] for producer_info in result["producers"]
)
elif search_type == "anime_manga":
result = jikan.manga(mal_id)
image = result["image_url"]
caption = f"<a href='{result["url"]}'>{result["title"]}</a>"
if result["title_japanese"]:
caption += f" ({result["title_japanese"]})\n"
else:
caption += "\n"
alternative_names = []
if result["title_english"] is not None:
alternative_names.append(result["title_english"])
alternative_names.extend(result["title_synonyms"])
if alternative_names:
alternative_names_string = ", ".join(alternative_names)
caption += f"\n<b>Also known as</b> : <code>{alternative_names_string}</code>\n"
genre_string = ", ".join(genre_info["name"] for genre_info in result["genres"])
if result["synopsis"] is not None:
synopsis = result["synopsis"].split(" ", 60)
try:
synopsis.pop(60)
except IndexError:
pass
synopsis_string = " ".join(synopsis) + "..."
else:
synopsis_string = "Unknown"
for entity in result:
if result[entity] is None:
result[entity] = "Unknown"
if search_type == "anime_anime":
caption += textwrap.dedent(
f"""
<b>Type :</b> <code>{result['type']}</code>
<b>Status :</b> <code>{result['status']}</code>
<b>Aired :</b> <code>{result['aired']['string']}</code>
<b>Episodes :</b> <code>{result['episodes']} ({result['duration']})</code>
<b>Rating :</b> <code>{result['score']}</code>
<b>Premiered :</b> <code>{result['premiered']}</code>
<b>Genres :</b> <code>{genre_string}</code>
<b>Studios :</b> <code>{studio_string}</code>
<b>Producers :</b> <code>{producer_string}</code>
<b>Trailer :</b> {LOL}
<b>📖 Synopsis :</b> <i>{synopsis_string}</i> <a href='{result['url']}'>Read More</a>
"""
)
elif search_type == "anime_manga":
caption += textwrap.dedent(
f"""
<b>Type :</b> <code>{result['type']}</code>
<b>Status :</b> <code>{result['status']}</code>
<b>Volumes :</b> <code>{result['volumes']}</code>
<b>Chapters :</b> <code>{result['chapters']}</code>
<b>Score :</b> <code>{result['score']}</code>
<b>Genres :</b> <code>{genre_string}</code>
📖 <b>Synopsis :</b> <i>{synopsis_string}</i> <a href='{result['url']}'>Read More</a>
"""
)
return caption, image
def get_poster(query):
url_enc_name = query.replace(" ", "+")
# Searching for query list in imdb
page = requests.get(
f"https://www.imdb.com/find?ref_=nv_sr_fn&q={url_enc_name}&s=all"
)
soup = bs4.BeautifulSoup(page.content, "lxml")
odds = soup.findAll("tr", "odd")
# Fetching the first post from search
page_link = "http://www.imdb.com/" + odds[0].findNext("td").findNext("td").a["href"]
page1 = requests.get(page_link)
soup = bs4.BeautifulSoup(page1.content, "lxml")
# Poster Link
image = soup.find("link", attrs={"rel": "image_src"}).get("href", None)
if image is not None:
# img_path = wget.download(image, os.path.join(Config.DOWNLOAD_LOCATION, 'imdb_poster.jpg'))
return image
def replace_text(text):
return text.replace('"', "").replace("\\r", "").replace("\\n", "").replace("\\", "")
async def callAPI(search_str):
query = """
query ($id: Int,$search: String) {
Media (id: $id, type: ANIME,search: $search) {
id
title {
romaji
english
}
description (asHtml: false)
startDate{
year
}
episodes
chapters
volumes
season
type
format
status
duration
averageScore
genres
bannerImage
}
}
"""
variables = {"search": search_str}
url = "https://graphql.anilist.co"
response = requests.post(url, json={"query": query, "variables": variables})
return response.text
async def formatJSON(outData):
msg = ""
jsonData = json.loads(outData)
res = list(jsonData.keys())
if "errors" in res:
msg += f"**Error** : `{jsonData["errors"][0]["message"]}`"
else:
jsonData = jsonData["data"]["Media"]
if "bannerImage" in jsonData.keys():
msg += f"[〽️]({jsonData["bannerImage"]})"
else:
msg += "〽️"
title = jsonData["title"]["romaji"]
link = f"https://anilist.co/anime/{jsonData["id"]}"
msg += f"[{title}]({link})"
msg += f"\n\n**Type** : {jsonData["format"]}"
msg += "\n**Genres** : "
for g in jsonData["genres"]:
msg += g + " "
msg += f"\n**Status** : {jsonData["status"]}"
msg += f"\n**Episode** : {jsonData["episodes"]}"
msg += f"\n**Year** : {jsonData["startDate"]["year"]}"
msg += f"\n**Score** : {jsonData["averageScore"]}"
msg += f"\n**Duration** : {jsonData["duration"]} min\n\n"
cat = f"{jsonData["description"]}"
msg += " __" + re.sub("<br>", "\n", cat) + "__"
return msg
@register(outgoing=True, pattern=r"^\.anilist ?(.*)")
async def anilist(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
result = await callAPI(input_str)
msg = await formatJSON(result)
await event.edit(msg, link_preview=True)
@register(outgoing=True, pattern=r"^\.anime ?(.*)")
async def search_anime(message):
search_query = message.pattern_match.group(1)
await message.get_reply_message()
await message.edit("`Searching Anime..`")
jikan = jikanpy.jikan.Jikan()
search_result = jikan.search("anime", search_query)
first_mal_id = search_result["results"][0]["mal_id"]
caption, image = get_anime_manga(first_mal_id, "anime_anime", message.chat_id)
try:
await message.delete()
await message.client.send_file(
message.chat_id, file=image, caption=caption, parse_mode="HTML"
)
except BaseException:
image = getBannerLink(first_mal_id, False)
await message.client.send_file(
message.chat_id, file=image, caption=caption, parse_mode="HTML"
)
@register(outgoing=True, pattern=r"^\.manga ?(.*)")
async def search_manga(message):
search_query = message.pattern_match.group(1)
await message.get_reply_message()
await message.edit("`Searching Manga..`")
jikan = jikanpy.jikan.Jikan()
search_result = jikan.search("manga", search_query)
first_mal_id = search_result["results"][0]["mal_id"]
caption, image = get_anime_manga(first_mal_id, "anime_manga", message.chat_id)
await message.delete()
await message.client.send_file(
message.chat_id, file=image, caption=caption, parse_mode="HTML"
)
@register(outgoing=True, pattern=r"^\.a(kaizoku|kayo) ?(.*)")
async def site_search(event):
message = await event.get_reply_message()
search_query = event.pattern_match.group(2)
site = event.pattern_match.group(1)
if search_query:
pass
elif message:
search_query = message.text
else:
await event.edit("`Uuf Bro.. Gib something to Search`")
return
if site == "kaizoku":
search_url = f"https://animekaizoku.com/?s={search_query}"
html_text = requests.get(search_url).text
soup = bs4.BeautifulSoup(html_text, "html.parser")
search_result = soup.find_all("h2", {"class": "post-title"})
if search_result:
result = f"<a href='{search_url}'>Click Here For More Results</a> <b>of</b> <code>{html.escape(search_query)}</code> <b>on</b> <code>AnimeKaizoku</code>: \n\n"
for entry in search_result:
post_link = entry.a["href"]
post_name = html.escape(entry.text.strip())
result += f"• <a href='{post_link}'>{post_name}</a>\n"
await event.edit(result, parse_mode="HTML")
else:
result = f"<b>No result found for</b> <code>{html.escape(search_query)}</code> <b>on</b> <code>AnimeKaizoku</code>"
await event.edit(result, parse_mode="HTML")
elif site == "kayo":
search_url = f"https://animekayo.com/?s={search_query}"
html_text = requests.get(search_url).text
soup = bs4.BeautifulSoup(html_text, "html.parser")
search_result = soup.find_all("h2", {"class": "title"})
result = f"<a href='{search_url}'>Click Here For More Results</a> <b>of</b> <code>{html.escape(search_query)}</code> <b>on</b> <code>AnimeKayo</code>: \n\n"
for entry in search_result:
if entry.text.strip() == "Nothing Found":
result = f"<b>No result found for</b> <code>{html.escape(search_query)}</code> <b>on</b> <code>AnimeKayo</code>"
break
post_link = entry.a["href"]
post_name = html.escape(entry.text.strip())
result += f"• <a href='{post_link}'>{post_name}</a>\n"
await event.edit(result, parse_mode="HTML")
@register(outgoing=True, pattern=r"^\.char ?(.*)")
async def character(event):
message = await event.get_reply_message()
search_query = event.pattern_match.group(1)
if search_query:
pass
elif message:
search_query = message.text
else:
await event.edit("Format: `.char <character name>`")
return
await event.edit("`Searching Character...`")
try:
search_result = jikan.search("character", search_query)
except APIException:
await event.edit("`Character not found.`")
return
first_mal_id = search_result["results"][0]["mal_id"]
character = jikan.character(first_mal_id)
caption = f"[{character["name"]}]({character["url"]})"
if character["name_kanji"] != "Japanese":
caption += f" ({character["name_kanji"]})\n"
else:
caption += "\n"
if character["nicknames"]:
nicknames_string = ", ".join(character["nicknames"])
caption += f"\n**Nicknames** : `{nicknames_string}`"
about = character["about"].split(" ", 60)
try:
about.pop(60)
except IndexError:
pass
about_string = " ".join(about)
mal_url = search_result["results"][0]["url"]
for entity in character:
if character[entity] is None:
character[entity] = "Unknown"
caption += f"\n🔰**Extracted Character Data**🔰\n\n{about_string}"
caption += f" [Read More]({mal_url})..."
await event.delete()
await event.client.send_file(
event.chat_id,
file=character["image_url"],
caption=replace_text(caption),
reply_to=event,
)
@register(outgoing=True, pattern=r"^\.upcoming$")
async def upcoming(message):
rep = "<b>Upcoming anime</b>\n"
later = jikan.season_later()
anime = later.get("anime")
for new in anime:
name = new.get("title")
url = new.get("url")
rep += f"• <a href='{url}'>{name}</a>\n"
if len(rep) > 1000:
break
await message.edit(rep, parse_mode="html")
@register(outgoing=True, pattern=r"^\.whatanime$")
async def whatanime(e):
media = e.media
if not media:
r = await e.get_reply_message()
media = getattr(r, "media", None)
if not media:
await e.edit("`Media required`")
return
ig = is_gif(media) or is_video(media)
if not is_image(media) and not ig:
await e.edit("`Media must be an image or gif or video`")
return
filename = "file.jpg"
if not ig and isinstance(media, MessageMediaDocument):
attribs = media.document.attributes
for i in attribs:
if isinstance(i, DocumentAttributeFilename):
filename = i.file_name
break
await e.edit("`Downloading image..`")
content = await e.client.download_media(media, bytes, thumb=-1 if ig else None)
await e.edit("`Searching for result..`")
file = memory_file(filename, content)
async with aiohttp.ClientSession() as session:
url = "https://api.trace.moe/search?anilistInfo"
async with session.post(url, data={"image": file}) as raw_resp0:
resp0 = await raw_resp0.json()
js0 = resp0.get("result")
if not js0:
await e.edit("`No results found.`")
return
js0 = js0[0]
text = f'<b>{html.escape(js0['anilist']['title']['romaji'])}'
if js0["anilist"]["title"]["native"]:
text += f' ({html.escape(js0['anilist']['title']['native'])})'
text += "</b>\n"
if js0["episode"]:
text += f'<b>Episode:</b> {html.escape(str(js0['episode']))}\n'
percent = round(js0["similarity"] * 100, 2)
text += f"<b>Similarity:</b> {percent}%\n"
at = re.findall(r"t=(.+)&", js0["video"])[0]
dt = pendulum.from_timestamp(float(at))
text += f"<b>At:</b> {html.escape(dt.to_time_string())}"
await e.edit(text, parse_mode="html")
dt0 = pendulum.from_timestamp(js0["from"])
dt1 = pendulum.from_timestamp(js0["to"])
ctext = (
f"{html.escape(dt0.to_time_string())} - {html.escape(dt1.to_time_string())}"
)
async with session.get(js0["video"]) as raw_resp1:
file = memory_file("preview.mp4", await raw_resp1.read())
try:
await e.reply(ctext, file=file, parse_mode="html")
except FilePartsInvalidError:
await e.reply("`Cannot send preview.`")
def memory_file(name=None, contents=None, *, _bytes=True):
if isinstance(contents, str) and _bytes:
contents = contents.encode()
file = BytesIO() if _bytes else StringIO()
if name:
file.name = name
if contents:
file.write(contents)
file.seek(0)
return file
def is_gif(file):
# ngl this should be fixed, telethon.utils.is_gif but working
# lazy to go to github and make an issue kek
if not is_video(file):
return False
return DocumentAttributeAnimated() in getattr(file, "document", file).attributes
CMD_HELP.update(
{
"anime": "**Plugin : **`anime`\
\n\n • **Syntax :** `.anilist` **<nama anime>**\
\n • **Function : **Mencari informasi anime dari anilist\
\n\n • **Syntax :** `.anime` **<nama anime>**\
\n • **Function : **Mencari infomasi anime.\
\n\n • **Syntax :** `.manga` **<manga name>**\
\n • **Function : **Menari informasi manga.\
\n\n • **Syntax :** `.akaizoku` atau `.akayo` **<nama anime>**\
\n • **Function : **Mencari anime dan memberikan link tautan Unduh Anime.\
\n\n • **Syntax :** `.char` **<nama character anime>**\
\n • **Function : **Mencari informasi karakter anime.\
\n\n • **Syntax :** `.upcoming`\
\n • **Function : **Mencari informasi Anime yang akan datang.\
\n\n • **Syntax :** `.scanime` **<nama anime>** atau `.sanime` **<nama anime>**\
\n • **Function : **Mencari anime\
\n\n • **Syntax :** `.smanga` **<manga>**\
\n • **Function : **Untuk mencari akun terhapus dalam grup\
\n\n • **Syntax :** `.whatanime` **<Reply Gambar scene Anime.>**\
\n • **Function : **Temukan anime dari file media.\
"
}
)
| # Copyright (C) 2020 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
import html
import json
import re
import textwrap
from io import BytesIO, StringIO
import aiohttp
import bs4
import jikanpy
import pendulum
import requests
from jikanpy import Jikan
from jikanpy.exceptions import APIException
from telethon.errors.rpcerrorlist import FilePartsInvalidError
from telethon.tl.types import (
DocumentAttributeAnimated,
DocumentAttributeFilename,
MessageMediaDocument,
)
from telethon.utils import is_image, is_video
from userbot import CMD_HELP
from userbot.events import register
jikan = Jikan()
# Anime Helper
def getPosterLink(mal):
# grab poster from kitsu
kitsu = getKitsu(mal)
image = requests.get(f"https://kitsu.io/api/edge/anime/{kitsu}").json()
return image["data"]["attributes"]["posterImage"]["original"]
def getKitsu(mal):
# get kitsu id from mal id
link = f"https://kitsu.io/api/edge/mappings?filter[external_site]=myanimelist/anime&filter[external_id]={mal}"
result = requests.get(link).json()["data"][0]["id"]
link = f"https://kitsu.io/api/edge/mappings/{result}/item?fields[anime]=slug"
return requests.get(link).json()["data"]["id"]
def getBannerLink(mal, kitsu_search=True):
# try getting kitsu backdrop
if kitsu_search:
kitsu = getKitsu(mal)
image = f"http://media.kitsu.io/anime/cover_images/{kitsu}/original.jpg"
response = requests.get(image)
if response.status_code == 200:
return image
# try getting anilist banner
query = """
query ($idMal: Int){
Media(idMal: $idMal){
bannerImage
}
}
"""
data = {"query": query, "variables": {"idMal": int(mal)}}
image = requests.post("https://graphql.anilist.co", json=data).json()["data"][
"Media"
]["bannerImage"]
if image:
return image
return getPosterLink(mal)
def get_anime_manga(mal_id, search_type, _user_id):
jikan = jikanpy.jikan.Jikan()
if search_type == "anime_anime":
result = jikan.anime(mal_id)
trailer = result["trailer_url"]
if trailer:
LOL = f"<a href='{trailer}'>YouTube</a>"
else:
LOL = "<code>No Trailer Available</code>"
image = getBannerLink(mal_id)
studio_string = ", ".join(
studio_info["name"] for studio_info in result["studios"]
)
producer_string = ", ".join(
producer_info["name"] for producer_info in result["producers"]
)
elif search_type == "anime_manga":
result = jikan.manga(mal_id)
image = result["image_url"]
caption = f"<a href='{result['url']}'>{result['title']}</a>"
if result["title_japanese"]:
caption += f" ({result['title_japanese']})\n"
else:
caption += "\n"
alternative_names = []
if result["title_english"] is not None:
alternative_names.append(result["title_english"])
alternative_names.extend(result["title_synonyms"])
if alternative_names:
alternative_names_string = ", ".join(alternative_names)
caption += f"\n<b>Also known as</b> : <code>{alternative_names_string}</code>\n"
genre_string = ", ".join(genre_info["name"] for genre_info in result["genres"])
if result["synopsis"] is not None:
synopsis = result["synopsis"].split(" ", 60)
try:
synopsis.pop(60)
except IndexError:
pass
synopsis_string = " ".join(synopsis) + "..."
else:
synopsis_string = "Unknown"
for entity in result:
if result[entity] is None:
result[entity] = "Unknown"
if search_type == "anime_anime":
caption += textwrap.dedent(
f"""
<b>Type :</b> <code>{result['type']}</code>
<b>Status :</b> <code>{result['status']}</code>
<b>Aired :</b> <code>{result['aired']['string']}</code>
<b>Episodes :</b> <code>{result['episodes']} ({result['duration']})</code>
<b>Rating :</b> <code>{result['score']}</code>
<b>Premiered :</b> <code>{result['premiered']}</code>
<b>Genres :</b> <code>{genre_string}</code>
<b>Studios :</b> <code>{studio_string}</code>
<b>Producers :</b> <code>{producer_string}</code>
<b>Trailer :</b> {LOL}
<b>📖 Synopsis :</b> <i>{synopsis_string}</i> <a href='{result['url']}'>Read More</a>
"""
)
elif search_type == "anime_manga":
caption += textwrap.dedent(
f"""
<b>Type :</b> <code>{result['type']}</code>
<b>Status :</b> <code>{result['status']}</code>
<b>Volumes :</b> <code>{result['volumes']}</code>
<b>Chapters :</b> <code>{result['chapters']}</code>
<b>Score :</b> <code>{result['score']}</code>
<b>Genres :</b> <code>{genre_string}</code>
📖 <b>Synopsis :</b> <i>{synopsis_string}</i> <a href='{result['url']}'>Read More</a>
"""
)
return caption, image
def get_poster(query):
url_enc_name = query.replace(" ", "+")
# Searching for query list in imdb
page = requests.get(
f"https://www.imdb.com/find?ref_=nv_sr_fn&q={url_enc_name}&s=all"
)
soup = bs4.BeautifulSoup(page.content, "lxml")
odds = soup.findAll("tr", "odd")
# Fetching the first post from search
page_link = "http://www.imdb.com/" + odds[0].findNext("td").findNext("td").a["href"]
page1 = requests.get(page_link)
soup = bs4.BeautifulSoup(page1.content, "lxml")
# Poster Link
image = soup.find("link", attrs={"rel": "image_src"}).get("href", None)
if image is not None:
# img_path = wget.download(image, os.path.join(Config.DOWNLOAD_LOCATION, 'imdb_poster.jpg'))
return image
def replace_text(text):
return text.replace('"', "").replace("\\r", "").replace("\\n", "").replace("\\", "")
async def callAPI(search_str):
query = """
query ($id: Int,$search: String) {
Media (id: $id, type: ANIME,search: $search) {
id
title {
romaji
english
}
description (asHtml: false)
startDate{
year
}
episodes
chapters
volumes
season
type
format
status
duration
averageScore
genres
bannerImage
}
}
"""
variables = {"search": search_str}
url = "https://graphql.anilist.co"
response = requests.post(url, json={"query": query, "variables": variables})
return response.text
async def formatJSON(outData):
msg = ""
jsonData = json.loads(outData)
res = list(jsonData.keys())
if "errors" in res:
msg += f"**Error** : `{jsonData['errors'][0]['message']}`"
else:
jsonData = jsonData["data"]["Media"]
if "bannerImage" in jsonData.keys():
msg += f"[〽️]({jsonData['bannerImage']})"
else:
msg += "〽️"
title = jsonData["title"]["romaji"]
link = f"https://anilist.co/anime/{jsonData['id']}"
msg += f"[{title}]({link})"
msg += f"\n\n**Type** : {jsonData['format']}"
msg += "\n**Genres** : "
for g in jsonData["genres"]:
msg += g + " "
msg += f"\n**Status** : {jsonData['status']}"
msg += f"\n**Episode** : {jsonData['episodes']}"
msg += f"\n**Year** : {jsonData['startDate']['year']}"
msg += f"\n**Score** : {jsonData['averageScore']}"
msg += f"\n**Duration** : {jsonData['duration']} min\n\n"
cat = f"{jsonData['description']}"
msg += " __" + re.sub("<br>", "\n", cat) + "__"
return msg
@register(outgoing=True, pattern=r"^\.anilist ?(.*)")
async def anilist(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
result = await callAPI(input_str)
msg = await formatJSON(result)
await event.edit(msg, link_preview=True)
@register(outgoing=True, pattern=r"^\.anime ?(.*)")
async def search_anime(message):
search_query = message.pattern_match.group(1)
await message.get_reply_message()
await message.edit("`Searching Anime..`")
jikan = jikanpy.jikan.Jikan()
search_result = jikan.search("anime", search_query)
first_mal_id = search_result["results"][0]["mal_id"]
caption, image = get_anime_manga(first_mal_id, "anime_anime", message.chat_id)
try:
await message.delete()
await message.client.send_file(
message.chat_id, file=image, caption=caption, parse_mode="HTML"
)
except BaseException:
image = getBannerLink(first_mal_id, False)
await message.client.send_file(
message.chat_id, file=image, caption=caption, parse_mode="HTML"
)
@register(outgoing=True, pattern=r"^\.manga ?(.*)")
async def search_manga(message):
search_query = message.pattern_match.group(1)
await message.get_reply_message()
await message.edit("`Searching Manga..`")
jikan = jikanpy.jikan.Jikan()
search_result = jikan.search("manga", search_query)
first_mal_id = search_result["results"][0]["mal_id"]
caption, image = get_anime_manga(first_mal_id, "anime_manga", message.chat_id)
await message.delete()
await message.client.send_file(
message.chat_id, file=image, caption=caption, parse_mode="HTML"
)
@register(outgoing=True, pattern=r"^\.a(kaizoku|kayo) ?(.*)")
async def site_search(event):
message = await event.get_reply_message()
search_query = event.pattern_match.group(2)
site = event.pattern_match.group(1)
if search_query:
pass
elif message:
search_query = message.text
else:
await event.edit("`Uuf Bro.. Gib something to Search`")
return
if site == "kaizoku":
search_url = f"https://animekaizoku.com/?s={search_query}"
html_text = requests.get(search_url).text
soup = bs4.BeautifulSoup(html_text, "html.parser")
search_result = soup.find_all("h2", {"class": "post-title"})
if search_result:
result = f"<a href='{search_url}'>Click Here For More Results</a> <b>of</b> <code>{html.escape(search_query)}</code> <b>on</b> <code>AnimeKaizoku</code>: \n\n"
for entry in search_result:
post_link = entry.a["href"]
post_name = html.escape(entry.text.strip())
result += f"• <a href='{post_link}'>{post_name}</a>\n"
await event.edit(result, parse_mode="HTML")
else:
result = f"<b>No result found for</b> <code>{html.escape(search_query)}</code> <b>on</b> <code>AnimeKaizoku</code>"
await event.edit(result, parse_mode="HTML")
elif site == "kayo":
search_url = f"https://animekayo.com/?s={search_query}"
html_text = requests.get(search_url).text
soup = bs4.BeautifulSoup(html_text, "html.parser")
search_result = soup.find_all("h2", {"class": "title"})
result = f"<a href='{search_url}'>Click Here For More Results</a> <b>of</b> <code>{html.escape(search_query)}</code> <b>on</b> <code>AnimeKayo</code>: \n\n"
for entry in search_result:
if entry.text.strip() == "Nothing Found":
result = f"<b>No result found for</b> <code>{html.escape(search_query)}</code> <b>on</b> <code>AnimeKayo</code>"
break
post_link = entry.a["href"]
post_name = html.escape(entry.text.strip())
result += f"• <a href='{post_link}'>{post_name}</a>\n"
await event.edit(result, parse_mode="HTML")
@register(outgoing=True, pattern=r"^\.char ?(.*)")
async def character(event):
message = await event.get_reply_message()
search_query = event.pattern_match.group(1)
if search_query:
pass
elif message:
search_query = message.text
else:
await event.edit("Format: `.char <character name>`")
return
await event.edit("`Searching Character...`")
try:
search_result = jikan.search("character", search_query)
except APIException:
await event.edit("`Character not found.`")
return
first_mal_id = search_result["results"][0]["mal_id"]
character = jikan.character(first_mal_id)
caption = f"[{character['name']}]({character['url']})"
if character["name_kanji"] != "Japanese":
caption += f" ({character['name_kanji']})\n"
else:
caption += "\n"
if character["nicknames"]:
nicknames_string = ", ".join(character["nicknames"])
caption += f"\n**Nicknames** : `{nicknames_string}`"
about = character["about"].split(" ", 60)
try:
about.pop(60)
except IndexError:
pass
about_string = " ".join(about)
mal_url = search_result["results"][0]["url"]
for entity in character:
if character[entity] is None:
character[entity] = "Unknown"
caption += f"\n🔰**Extracted Character Data**🔰\n\n{about_string}"
caption += f" [Read More]({mal_url})..."
await event.delete()
await event.client.send_file(
event.chat_id,
file=character["image_url"],
caption=replace_text(caption),
reply_to=event,
)
@register(outgoing=True, pattern=r"^\.upcoming$")
async def upcoming(message):
rep = "<b>Upcoming anime</b>\n"
later = jikan.season_later()
anime = later.get("anime")
for new in anime:
name = new.get("title")
url = new.get("url")
rep += f"• <a href='{url}'>{name}</a>\n"
if len(rep) > 1000:
break
await message.edit(rep, parse_mode="html")
@register(outgoing=True, pattern=r"^\.whatanime$")
async def whatanime(e):
media = e.media
if not media:
r = await e.get_reply_message()
media = getattr(r, "media", None)
if not media:
await e.edit("`Media required`")
return
ig = is_gif(media) or is_video(media)
if not is_image(media) and not ig:
await e.edit("`Media must be an image or gif or video`")
return
filename = "file.jpg"
if not ig and isinstance(media, MessageMediaDocument):
attribs = media.document.attributes
for i in attribs:
if isinstance(i, DocumentAttributeFilename):
filename = i.file_name
break
await e.edit("`Downloading image..`")
content = await e.client.download_media(media, bytes, thumb=-1 if ig else None)
await e.edit("`Searching for result..`")
file = memory_file(filename, content)
async with aiohttp.ClientSession() as session:
url = "https://api.trace.moe/search?anilistInfo"
async with session.post(url, data={"image": file}) as raw_resp0:
resp0 = await raw_resp0.json()
js0 = resp0.get("result")
if not js0:
await e.edit("`No results found.`")
return
js0 = js0[0]
text = f'<b>{html.escape(js0["anilist"]["title"]["romaji"])}'
if js0["anilist"]["title"]["native"]:
text += f' ({html.escape(js0["anilist"]["title"]["native"])})'
text += "</b>\n"
if js0["episode"]:
text += f'<b>Episode:</b> {html.escape(str(js0["episode"]))}\n'
percent = round(js0["similarity"] * 100, 2)
text += f"<b>Similarity:</b> {percent}%\n"
at = re.findall(r"t=(.+)&", js0["video"])[0]
dt = pendulum.from_timestamp(float(at))
text += f"<b>At:</b> {html.escape(dt.to_time_string())}"
await e.edit(text, parse_mode="html")
dt0 = pendulum.from_timestamp(js0["from"])
dt1 = pendulum.from_timestamp(js0["to"])
ctext = (
f"{html.escape(dt0.to_time_string())} - {html.escape(dt1.to_time_string())}"
)
async with session.get(js0["video"]) as raw_resp1:
file = memory_file("preview.mp4", await raw_resp1.read())
try:
await e.reply(ctext, file=file, parse_mode="html")
except FilePartsInvalidError:
await e.reply("`Cannot send preview.`")
def memory_file(name=None, contents=None, *, _bytes=True):
if isinstance(contents, str) and _bytes:
contents = contents.encode()
file = BytesIO() if _bytes else StringIO()
if name:
file.name = name
if contents:
file.write(contents)
file.seek(0)
return file
def is_gif(file):
# ngl this should be fixed, telethon.utils.is_gif but working
# lazy to go to github and make an issue kek
if not is_video(file):
return False
return DocumentAttributeAnimated() in getattr(file, "document", file).attributes
CMD_HELP.update(
{
"anime": "**Plugin : **`anime`\
\n\n • **Syntax :** `.anilist` **<nama anime>**\
\n • **Function : **Mencari informasi anime dari anilist\
\n\n • **Syntax :** `.anime` **<nama anime>**\
\n • **Function : **Mencari infomasi anime.\
\n\n • **Syntax :** `.manga` **<manga name>**\
\n • **Function : **Menari informasi manga.\
\n\n • **Syntax :** `.akaizoku` atau `.akayo` **<nama anime>**\
\n • **Function : **Mencari anime dan memberikan link tautan Unduh Anime.\
\n\n • **Syntax :** `.char` **<nama character anime>**\
\n • **Function : **Mencari informasi karakter anime.\
\n\n • **Syntax :** `.upcoming`\
\n • **Function : **Mencari informasi Anime yang akan datang.\
\n\n • **Syntax :** `.scanime` **<nama anime>** atau `.sanime` **<nama anime>**\
\n • **Function : **Mencari anime\
\n\n • **Syntax :** `.smanga` **<manga>**\
\n • **Function : **Untuk mencari akun terhapus dalam grup\
\n\n • **Syntax :** `.whatanime` **<Reply Gambar scene Anime.>**\
\n • **Function : **Temukan anime dari file media.\
"
}
)
|
# -*- coding: utf-8 -*-
# # How long does a Computron take?
#
# - [build model of computron\-to\-wallclock relationship · Issue \#3459 · Agoric/agoric\-sdk](https://github.com/Agoric/agoric-sdk/issues/3459)
# ## Preface: Python Data Tools
#
# See also [shell.nix](shell.nix).
# +
import pandas as pd
import numpy as np
import sqlalchemy as sqla
import matplotlib.cm as cm
import dask
import dask.dataframe as dd
import dask.bag as db
dict(pandas=pd.__version__,
numpy=np.__version__,
sqlalchemy=sqla.__version__,
dask=dask.__version__)
# -
# ### Notebook / Scripting Authority
#
# As a nod to OCap discipline, we avoid ambient authority unless we're in a `TOP`-level scripting or notebook context.
TOP = __name__ == '__main__'
# Logging is a bit of an exception to OCap discipline, as is stderr.
# +
import logging
from sys import stderr
logging.basicConfig(level=logging.INFO, stream=stderr,
format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
log = logging.getLogger(__name__)
if TOP:
log.info('notebook start')
# -
# ### Dask Parallel Scheduler UI
# +
from dask.distributed import Client, LocalCluster
if TOP:
cluster = LocalCluster(n_workers=8)
client = Client(cluster)
TOP and client
# -
# ## Result Store
# +
db4_uri = 'sqlite:///slog4.db'
if TOP:
db4 = sqla.create_engine(db4_uri)
# -
# ## SLog files
#
# [rclone support for Google drive](https://rclone.org/drive/)
#
# > This contains 564GB of data from 117 participants, spread across 172 slogfiles ...
#
# ```
# [nix-shell:~/t4]$ rclone sync --progress 'Engineering:/2021-07-04 testnet phase4-stress data/validator slogfiles' ./slogfiles/
# Transferred: 78.633G / 78.633 GBytes, 100%, 101.302 MBytes/s, ETA 0s
# Checks: 5 / 5, 100%
# Transferred: 182 / 182, 100%
# Elapsed time: 13m16.0s
# ```
#
# +
import importlib
import slogdata
importlib.reload(slogdata)
from slogdata import SlogAccess, CLI, show_times
if TOP:
def _dir(path):
import pathlib
return pathlib.Path(path)
def _cli(bin):
from subprocess import run, Popen
return CLI(bin, run, Popen, debug=True)
_sa4 = SlogAccess(_dir('/home/customer/t4/slogfiles'),
_cli('/home/customer/projects/gztool/gztool'))
TOP and show_times(_sa4.get_records('pathrocknetwork/chain-15.pathrocknetwork.slog.gz', 7721, 2))
# -
_bySize = _sa4.files_by_size()
_bySize
_bySize[_bySize.parent == 'KingSuper']
TOP and _bySize[::5].set_index('name')[['st_size']].plot.barh(
title='slogfile sizes (sample)',
figsize=(10, 8));
# ### random access with `gztool`
#
# [gztool](https://github.com/circulosmeos/gztool) `a03c5b4fd5b3` Jul 13 2021.
#
#
# ```
# ~/projects/gztool/gztool -C -e */*.slog.gz
# ...
# ERROR: Compressed data error in 'atlantean/atlantean-agorictest16-chain.slog.gz'.
# ...
# Index file 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi' already exists and will be used.
# Processing 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gz' ...
# Processing index to 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi'...
#
# 172 files processed
# 1 files processed with errors!
# ```
# +
# count lines on all slogfiles in parallel
# TODO: if it's already in the DB, don't compute it again.
if TOP:
_withLines = _bySize.assign(
lines=db.from_sequence(_bySize.values).map(
lambda v: _sa4.line_count(*v[1:3])).compute())
TOP and _withLines
# -
_withLines.to_sql('file_meta', db4, index=False, if_exists='replace')
# !sqlite3 slog4.db '.header on' '.mode column' 'select * from file_meta limit 3'
_withLines = pd.read_sql_table('file_meta', db4)
# +
def file_chart(slogdf, sample=5, **plotkw):
df = slogdf[['name', 'st_size', 'lines']].copy()
df['b64'] = df.st_size / 64
df.drop('st_size', axis=1, inplace=True)
df.set_index('name')[::sample].plot.barh(**plotkw)
TOP and file_chart(_withLines, title='slogfile sizes (sample)', figsize=(10, 8))
# -
# ## slogfile basics
pd.read_sql("""
select st_size, lines
from file_meta
order by st_size desc
""", db4).describe()
# ## Runs, Blocks, and Deliveries
#
# > split each slogfile into runs (each beginning with an import-kernel event)
# +
def partition_lines(lines, step=1000000):
"""Note: line numbers are **1-based**
"""
lo = pd.DataFrame.from_records([
dict(start=lo, qty=min(lines + 1 - lo, step), lines=lines)
for lo in range(1, lines + 1, step)])
return lo
partition_lines(_withLines.lines.iloc[-1])
# +
#client.restart()
# +
# # !sqlite3 slog4.db 'drop table run'
# +
def provide_table(engine, table, todo, chunksize=None, index=True):
if sqla.inspect(engine).has_table(table):
return pd.read_sql_table(table, engine, chunksize=chunksize)
df = todo()
df.to_sql(table, engine, index=index)
return df
def runs_todo(withLines):
runs = dd.from_delayed([
dask.delayed(_sa4.provide_runs)(f.parent, f['name'], part.start, part.qty)
for fid, f in withLines.iterrows()
for _, part in partition_lines(f.lines).iterrows()
]).compute().sort_values(['file_id', 'line'])
withNames = pd.merge(runs, withLines[['file_id', 'parent', 'name', 'st_size', 'lines']],
on='file_id')
# Compute end times
byFile = withNames.groupby('file_id')
runs = pd.concat([
withNames,
byFile.apply(lambda g: pd.DataFrame(dict(time_end=g.time.shift(-1)))),
byFile.apply(lambda g: pd.DataFrame(dict(line_end=g.line.shift(-1)))),
], axis=1)
runs.line_end = np.where(runs.line_end.isnull(), runs.lines, runs.line_end)
return runs.sort_values(['st_size', 'file_id', 'line']).reset_index(drop=True)
_runs = provide_table(db4, 'run', lambda: runs_todo(_withLines))
# -
# !sqlite3 slog4.db '.schema run'
show_times(_runs, ['time', 'time_end'])[['st_size', 'line', 'line_end', 'parent', 'file_id', 'time', 'time_end']]
# ### runs per slogfile
df = _runs.groupby('file_id')[['line']].count()
df.describe()
# +
df = pd.read_sql("""
select file_id, count(*) runs, name, st_size, lines
from run r
-- join file_id s on s."index" = r.slogfile
group by file_id
order by 2
""", db4)
df.set_index('name')[['runs']][::5].plot.barh(
log=True,
title='slogfile runs (sample)',
figsize=(10, 8));
# -
# ## agorictest-16 genesis: `2021-07-01 19:00:00`
gen16 = show_times(pd.DataFrame(dict(blockHeight=64628, blockTime=[1625166000], ts=1625166000)), ['blockTime'])
gen16
# ## Block end start / finish events
# +
import importlib
import slogdata
from slogdata import SlogAccess
importlib.reload(slogdata)
_sa4 = SlogAccess(_dir('/home/customer/t4/slogfiles'),
_cli('/home/customer/projects/gztool/gztool'))
show_times(
_sa4.provide_blocks('ChainodeTech', 'agorictest-16_chain.slog.gz', 1, 1000000)
)
# -
# ## Separate runs by chain
# +
def first_block(sa, run,
head=5000,
ts=gen16.ts[0]):
log.info('1st block: %s/%s', run.parent, run['name'])
qty = min(int(run.line_end) - run.line + 1, head)
df = sa.get_blocks(f'{run.parent}/{run['name']}', run.line, qty)[:2]
if not len(df):
return pd.DataFrame.from_records([dict(
blockHeight=-1,
blockTime=-1,
run=run.name,
chain=np.nan)], index=[run.name])
df = df.assign(run=run.name,
chain=16 if df.blockTime[0] >= ts else 15)
return df
show_times(first_block(_sa4, _runs.loc[0]))
# +
def run2chain(sa, runs):
df = runs.apply(lambda run: first_block(sa, run).iloc[0][['blockHeight', 'blockTime', 'chain']],
axis=1)
return df
_r2c = run2chain(_sa4, _runs)
_r2c
# -
_runchain = pd.concat([_runs.drop(columns=['index']), _r2c], axis=1)
_runchain.to_sql('runchain', db4)
_runchain.groupby('chain')[['line']].count()
# !sqlite3 slog4.db '.header on' '.mode column' 'select * from runchain limit 3'
_runchain = pd.read_sql('runchain', db4)
_runchain.groupby('chain')[['line']].count()
_runs['chain'] = _runchain.chain
_runs.groupby('chain')[['file_id', 'lines']].count()
# +
# # !sqlite3 slog4.db 'drop table blockval;'
# +
def blockval_todo(file_meta):
return dd.from_delayed([
dask.delayed(_sa4.provide_blocks)(f.parent, f['name'], part.start, part.qty)
for fid, f in file_meta.iterrows()
for _, part in partition_lines(f.lines).iterrows()
]).compute()
_blockval = provide_table(db4, 'blockval', lambda: blockval_todo(_withLines), index=True)
show_times(_blockval)
# -
# !sqlite3 slog4.db '.schema blockval'
pd.read_sql("""
select file_id, max(blockHeight)
from blockval
where blockTime >= 1625166000
group by file_id
order by 2 desc
""", db4)
# ### Consensus Block-to-Block Time
# +
# db4.execute("""drop table if exists block""")
# -
db4.execute("""
create table block as
select distinct
case when blockTime >= 1625166000 then 16 else 15 end chain
, blockHeight, blockTime
from blockval
order by blockTime
""")
pd.read_sql("""
select * from block limit 10
""", db4)
# ### What is the range of blocks in `agorictest-16`?
pd.read_sql("""
select lo, n, lo + n - 1, hi from (
select min(blockHeight) lo, max(blockHeight) hi, count(distinct blockHeight) n
from block
where chain = 16
)
""", db4)
# +
blk16 = pd.read_sql("""
select blockHeight, blockTime
from block
where chain = 16
""", db4, index_col='blockHeight')
show_times(blk16).describe(datetime_is_numeric=True)
# -
b16time = pd.read_sql("""
select * from block
where chain = 16
""", db4, index_col='blockHeight')
b16time['delta'] = b16time.shift(-1).blockTime - b16time.blockTime
b16time[['delta']].describe()
b16time[b16time.index < 90527].delta.max()
b16time[b16time.delta == 120]
b16time[['delta']].plot(
title='agorictest-16 consensus blockTime delta',
ylabel='sec',
figsize=(9, 6));
show_times(b16time, ['blockTime']).set_index('blockTime')[['delta']].plot(
title='agorictest-16 consensus blockTime delta',
ylabel='sec',
figsize=(9, 6));
# histogram of block-to-block time delta for agorictest-16. (_Note the log scale on the y axis._)
b16time[['delta']].hist(bins=20, log=True);
df = show_times(b16time, ['blockTime'])
df[df.blockTime <= '2021-07-02 19:00:00'][['delta']].hist(bins=20, log=True);
df[df.blockTime <= '2021-07-02 19:00:00'][['delta']].describe()
# ### How many validators logged each block in agorictest-16?
df = pd.read_sql("""
select blockHeight, count(distinct file_id) qty
from blockval
where sign = -1
and blockTime >= 1625166000
group by blockHeight
""", db4)
df.head()
df.set_index('blockHeight').plot(title='agorictest-16 validator coverage by block', figsize=(9, 6));
# !sqlite3 slog4.db '.schema run'
# +
# db4.execute('drop table if exists blockrun16')
db4.execute("""
create table blockrun16 as
with b as (
select *
from blockval
where blockTime >= 1625166000
)
select file_id
, (select r."index"
from run r
where r.file_id = b.file_id and r.line <= b.line and b.line < r.line_end) run
, b.line, b.time
, b.sign
, blockHeight, blockTime
from b
""")
df = pd.read_sql("""
select * from blockrun16
""", db4)
df.tail()
# -
x = df.groupby('blockHeight')[['run']].count()
x.plot();
x['blockHeight'].sort_values('max').reset_index(drop=True).plot();
# ## Slow Blocks
df = show_times(b16time, ['blockTime'])
df[(df.blockTime <= '2021-07-02 19:00:00') &
(df.delta >= 30)]
# Which runs include block 72712, which took 31 sec?
b33 = pd.read_sql("""
select lo.file_id, lo.run, lo.line, hi.line - lo.line + 1 range, lo.blockHeight
from blockrun16 lo
join blockrun16 hi on hi.run = lo.run and hi.blockHeight = lo.blockHeight
where lo.blockHeight in (72712)
and lo.sign = -1
and hi.sign = 1
""", db4)
b33
# ## Correlating block start with block end
_blockrun16 = df = pd.read_sql_table('blockrun16', db4)
df.tail()
lo = df[df.sign == -1]
hi = df.shift(-1)
hi = hi[hi.sign == 1]
dur = hi.time - lo.time
# show_times(df, ['time', 'time_end'])
lo['dur'] = dur
lo['s_hi'] = hi.file_id
lo['l_hi'] = hi.line
lo['t_hi'] = hi.time
dur = lo[lo.file_id == lo.s_hi]
show_times(dur, ['time', 'blockTime'])
show_times(
dur.sort_values('dur').dropna().tail(),
['time', 'blockTime', 't_hi']
)
dur[dur.dur.abs() <= 120].plot.scatter(x='blockHeight', y='dur')
dur[['blockHeight', 'dur']].describe()
# ## Cranks in a Block
# +
def long_runs_including(runs, blockrun, blockHeight):
runs_matching = blockrun[blockrun.blockHeight == blockHeight].run
runs = runs.assign(length=runs.line_end - runs.line)
runs = runs[runs.index.isin(runs_matching)]
return runs.sort_values('length', ascending=False)
_long16 = long_runs_including(_runs, _blockrun16, 64628)
_long16.head()
# -
show_times(dur[dur.run == _long16.index[0]], ['time', 'blockTime', 't_hi'])
_blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64628)].iloc[:2]
# +
def blockrun_records(blockHeight, run, slogAccess, blockrun,
target=None, include=None):
ref = f'{run.parent}/{run['name']}'
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = block_end.line - block_start.line + 1
df = slogAccess.get_records(f'{run.parent}/{run['name']}', int(block_start.line), int(length),
target=target, include=include)
return df.assign(file_id=run.file_id)
def get_vats(slogAccess, ref, start, qty):
df = slogAccess.get_records(ref, start, qty,
target='create-vat',
include=['create-vat'])
return df
def vats_in_blockrun(blockHeight, run, slogAccess, blockrun):
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = block_end.line - block_start.line + 1
ref = f'{run.parent}/{run['name']}'
df = get_vats(slogAccess, ref, int(block_start.line), int(length))
return df.assign(blockHeight=blockHeight, parent=run.parent)
# _sa4.get_records('Nodeasy.com/Nodeasy.com-agorictest15-chain.slog.gz', 1662497, 1671912 - 1662497)
vats_in_blockrun(_blockrun16.iloc[0].blockHeight, _runs.loc[_long16.index[0]],
_sa4, _blockrun16)
# -
vats_in_blockrun(64629, _runs.loc[_long16.index[0]],
_sa4, _blockrun16)
no_deliveries = pd.DataFrame.from_records([
{'time': 1625198620.6265895,
'type': 'deliver-result',
'crankNum': 1291,
'vatID': 'v11',
'deliveryNum': 124,
'kd': object(),
'line': 1673077,
'dr': object(),
'syscalls': 2,
'method': 'inbound',
'compute': 119496.0, # missing compute is possible... from replay.
'dur': 0.1912224292755127,
}]).iloc[:0]
no_deliveries.dtypes
# +
import json
import itertools
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
def block_cranks(records):
deliveries = []
syscalls = 0
deliver = None
for record in records:
ty = record['type']
if ty == 'deliver':
deliver = record
syscalls = 0
elif ty == 'syscall-result':
syscalls += 1
elif ty == 'deliver-result':
if not deliver:
log.warn('no deliver? %s', record)
continue
dur = record['time'] - deliver['time']
method = deliver['kd'][2]['method'] if deliver['kd'][0] == 'message' else None
compute = record['dr'][2]['compute'] if type(record['dr'][2]) is type({}) else np.nan
detail = dict(record,
syscalls=syscalls,
kd=deliver['kd'],
method=method,
compute=compute,
dur=dur)
deliveries.append(detail)
if deliveries:
return pd.DataFrame.from_records(deliveries)
else:
return no_deliveries
def get_deliveries(slogAccess, ref, start, qty):
if qty <= 2: # just block start, block end
return no_deliveries
df = slogAccess.get_records(
ref, int(start), int(qty),
target=None, include=['deliver', 'deliver-result', 'syscall-result'])
if len(df) > 0 and 'syscallNum' in df.columns:
for c in ['syscallNum', 'ksr', 'vsr', 'vd']:
df = df.drop(columns=list(set(df.columns) & set(['syscallNum', 'ksr', 'vsr', 'vd'])))
return block_cranks(df.to_dict('records'))
else:
return no_deliveries
_g16 = _blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64628)].iloc[:2]
_run1 = _runs.loc[_long16.index[0]]
get_deliveries(_sa4, f'{_run1.parent}/{_run1['name']}', _g16.iloc[0].line, _g16.iloc[1].line - _g16.iloc[0].line + 1)
# -
df = dur[dur.run == _long16.index[0]].assign(length=dur.l_hi - dur.line + 1)
# df[df.length > 2].head(10)
df[df.dur > 5].head(10)
# +
# https://avi.im/blag/2021/fast-sqlite-inserts/
def run_sql(script, engine):
for stmt in script.strip().split(';\n'):
engine.execute(stmt)
run_sql('''
PRAGMA journal_mode = OFF;
PRAGMA synchronous = 0;
PRAGMA cache_size = 1000000;
PRAGMA locking_mode = NORMAL;
PRAGMA temp_store = MEMORY;
''', db4)
# -
len(dur)
dur.to_sql('blockrun16dur', db4, if_exists='replace', chunksize=25000, index=False)
# +
_br2 = _blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64632)].iloc[:2]
get_deliveries(_sa4, f'{_run1.parent}/{_run1['name']}',
_br2.iloc[0].line, _br2.iloc[1].line - _br2.iloc[0].line + 1)
# +
# chain_id, vatID, deliveryNum -> blockHeight, kd, compute
import inspect
def provide_deliveries(slogAccess, blockHeight, run, blockrun):
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
if len(br) < 2:
return no_deliveries.assign(file_id=-1, chain=-1, blockHeight=blockHeight, run=run.name)
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = int(block_end.line - block_start.line + 1)
df = slogAccess.provide_data(run.parent, run['name'], int(block_start.line), length,
f'deliveries-{blockHeight}', no_deliveries,
lambda ref, start, qty: get_deliveries(slogAccess, ref, start, qty),
'gzip')
df = df.assign(chain=run.chain, blockHeight=blockHeight, run=run.name)
if df.dtypes['chain'] not in ['int64', 'float64'] or 'vatID' not in df.columns or 'vd' in df.columns:
raise NotImplementedError(f'cols: {df.columns} dtypes: {df.dtypes} block {blockHeight, int(block_start.line)}, run\n{run}')
return df
df = provide_deliveries(_sa4, 66371, _run1, _blockrun16)
show_times(df)
# -
# Computron rate for just this one block?
df.compute.sum() / df.dur.sum()
# test empty
provide_deliveries(_sa4, 64629, _run1, _blockrun16)
_runs.loc[455:456]
# ## Cranks in one long run starting at agorictest-16 genesis
gen16
df = pd.read_sql("""
with lo as (
select *
, time - blockTime delta
from blockrun16
where blockHeight = 64628
and blockTime = 1625166000
and sign = -1
and run is not null
), hi as (
select run, max(blockHeight) hi, max(blockTime) t_hi
from blockrun16
where run is not null
and sign = -1
group by run
), agg as (
select lo.*, hi.hi, hi.t_hi
from lo join hi on lo.run = hi.run
where abs(delta) < 7
order by hi.t_hi desc
)
select agg.*, run.parent, run.name
from agg
join run on agg.run = run."index"
limit 5
""", db4)
show_times(df, ['time', 'blockTime', 't_hi'])
show_times(_runs).loc[445]
# +
import json
def run1_deliveries(con, sa, lo, hi, run, br,
json_cols=['kd', 'dr'],
table='run1'):
if sqla.inspect(con).has_table(table):
lo = pd.read_sql(f'select max(blockHeight) + 1 lo from {table}', con).iloc[0].lo
if_exists = 'append'
else:
if_exists = 'replace'
for blockHeight in range(lo, hi):
df = provide_deliveries(sa, blockHeight, run, br)
if not len(df):
# log.info('block %d: no deliveries', blockHeight)
continue
for col in json_cols:
df[col] = df[col].apply(json.dumps)
log.info('block %d of %d: %s += %d rows', blockHeight, hi, table, len(df))
df.to_sql(table, con, if_exists=if_exists, index=False)
if_exists = 'append'
run1_deliveries(db4, _sa4, 64628, 75000, _runs.loc[445], _blockrun16)
# run1_deliveries(db4, _sa4, 75000, 90530, _runs.loc[445], _blockrun16, table='run1b')
# -
_run1 = df = pd.read_sql('select * from run1 union all select * from run1b', db4)
show_times(_run1.tail(3))
_run1.blockHeight.describe()
_run1[_run1.blockHeight >= 88296 - 2].sort_values('blockHeight').head(30).drop(columns=['kd', 'dr', 'file_id'])
df = _run1[_run1.blockHeight == 88295].sort_values('dur', ascending=False).drop(columns=['kd', 'dr', 'file_id'])
df.head(10)
df[df.dur >= 1]
# TODO: compare `getPayout` here (in 88295) vs something earlier... same computrons? same duration?
#
# e.g. if harden weakset grew, the duration could grow while keeping computrons constant
_run1[_run1.method == 'getPayout'][['compute', 'dur']].describe()
_run1[_run1.method == 'getPayout'].compute.hist()
_run1[(_run1.method == 'getPayout') & (_run1.compute == 31654)].plot.scatter(x='blockHeight', y='dur')
lg = _run1[_run1.blockHeight > 76000]
lg = lg[lg.dur < 1]
lg[(lg.method == 'getPayout') & (lg.compute == 31654)].plot.scatter(x='blockHeight', y='dur')
# Things got slower over time.
#
# Hypothesis: GC didn't happen -> weak set got big -> weakset access time got big
# So computron model should not be based on this range, but rather on pre-loadgen time.
# When looking at comptron / wallclock, we should look at:
#
# - all getCurrentAmount calls
# - within a narrow range of blockHeight
# - that all use the same # of computrons
#
# (as above)
#
b16time[b16time.delta == 224]
_run1[['compute', 'dur']].describe()
# +
def drate(df):
rate = df.compute / (df.syscalls + 1) / df.dur
# rate = df.compute / df.dur
return df.assign(rate=rate)
df = drate(_run1).groupby('method')[['rate']].aggregate(['count', 'mean', 'std', 'max'])
df = df.sort_values(('rate', 'mean'), ascending=False)
df
# -
common = _run1.groupby('method')[['line']].count()
common = common[common.line > 20]
common
drate(_run1[_run1.method.isin(common.index)])[['method', 'rate']].boxplot(by='method', rot=90, figsize=(20, 12))
common.sort_values('line', ascending=False).head()
_run1.blockHeight.describe()
_run1.sort_values('dur', ascending=False)
# This is an always-busy sim, but **TODO** we'd like to look at the arrival pattern that we have.
# +
def sim(df, c_eg, dur_eg, target):
df = df[df.chain == 16]
df['running'] = df.compute.cumsum() # try exp
threshold = target * (c_eg / dur_eg)
log.info('threshold: %s', threshold)
df['sim_blk'] = (df.running / threshold).round()
# df['adj'] = df.sim_blk - df.blockHeight
return df.reset_index(drop=True)
df = _run1.drop(columns=['type', 'kd', 'dr', 'file_id', 'line', 'run'])
# df = df[df.method != 'executeContract']
# df = df[df.method == 'getCurrentAmount'] # getPayout
# df.blockHeight = df.blockHeight - df.blockHeight.iloc[0]
df = sim(df, 48390.0, 0.074363, 5)
df = df[df.sim_blk.notnull()]
df.sim_blk = df.sim_blk.astype('int64')
show_times(df)
# -
pd.read_sql('''
select count(distinct run)
from blockrun16
''', db4)
len(_runs)
# +
def nth_block(sa, blockHeight, run, blockrun,
ts=gen16.ts[0]):
log.info('%d th block: %s/%s', blockHeight, run.parent, run['name'])
br = blockrun[(blockrun.blockHeight == blockHeight) & (blockrun.run == run.name)]
df = provide_deliveries(sa, blockHeight, run, br)
if not len(df):
return df
df = df.assign(run=run.name, chain=run.chain)
return df
m1b1 = pd.concat(
df
for _, run in _runs.iterrows()
for df in [nth_block(_sa4, 80001, run, _blockrun16)]
if len(df)
)
m1b1
# -
m1b1[(m1b1.method == 'getCurrentAmount') & (m1b1.deliveryNum == 44721)][['compute', 'dur', 'run']]
df = m1b1[(m1b1.method == 'getCurrentAmount') & (m1b1.deliveryNum == 44721)][['compute', 'dur', 'run']]
df.describe()
# ## Validator speed: 2-4x spread for `getCurrentAmount`
df[['dur']].hist()
# +
# df.groupby('method')[['compute']].describe().loc['executeContract']
# -
df.compute.hist(log=True);
df.dur.hist(log=True);
df[df.dur < .1].dur.hist()
# #### Total delivery duration per block
x = pd.concat([
df.groupby('blockHeight')[['dur']].sum(),
df.groupby('sim_blk')[['dur']].sum().rename(columns=dict(dur='dur_sim')),
], axis=1)
x.hist(); # log=True);
x.describe()
x.dur.quantile(.9)
xx = df.groupby('sim_blk')[['dur']].sum().rename(columns=dict(dur='dur_sim'))
xx[xx.dur_sim > 25]
df[df.blockHeight == 88295].sort_values('dur', ascending=False)
df[df.sim_blk == 32607].sort_values('dur', ascending=False)
_run1[_run1.compute == 381240].dur.describe()
_run1[_run1.compute == 381240].plot.scatter(x='blockHeight', y='dur')
# This wasn't a big deal during most of the chain (.25sec 75th percentile).
#
# We could model this within 2x or 3x by ignoring the spike.
# **TODO**: what happened during that spike? is it consensus-observable? kernel-observable?
df = _run1[_run1.compute == 381240]
df[(df.blockHeight >= 88100) & (df.blockHeight < 88400)].plot.scatter(x='blockHeight', y='dur')
df[df.sim_blk == 32607].compute.sum()
df[df.sim_blk == 32607].dur.sum()
df[df.sim_blk == 32607].syscalls.sum()
df.groupby('blockHeight')[['syscalls']].sum().describe()
# #### Total compute per block
x = pd.concat([
df.groupby('blockHeight')[['compute']].sum(),
df.groupby('sim_blk')[['compute']].sum().rename(columns=dict(compute='cmp_sim')),
], axis=1)
x.hist(log=True);
x.describe()
cluster.scale(8)
client.restart()
f'{12:04}'
# +
def pick_chain(ht,
gen=1625166000, hi=16, lo=15):
return np.where(ht > gen, hi, lo)
def run_deliveries(slogs, sa, run):
chain_id = f'agorictest-{run.chain}'
blocks = pd.concat(
pd.read_csv(blockFile)
for blockFile in (slogs / run.parent).glob('*-blocks.csv')
)
blocks = blocks[(blocks.line >= run.line) &
(blocks.line < run.line_end)]
blocks = blocks.assign(run=run.name)
heights = blocks.blockHeight.unique()
log.info('run %s %-3d blocks %.16s %s', run.name, len(heights),
pd.to_datetime(run.time, unit='s'), run['name'])
tot = 0
for blockHeight in heights:
detail = provide_deliveries(sa, blockHeight, run, blocks)
if not len(detail):
continue
tot += len(detail)
yield detail
if not tot:
yield no_deliveries.assign(file_id=-1, chain=-1, blockHeight=-1, run=run.name)
def by_vat(dest, run, detail):
chain_id = f'agorictest-{run.chain}'
run_detail = f'{run.name:04}-{run.parent}-{run.file_id}-{run.line}'
for vatID, g in detail.groupby('vatID'):
try:
(dest / chain_id / vatID).mkdir(parents=True)
except:
pass
vat_dir = dest / chain_id / vatID
f = vat_dir / f'delivery-detail-{run_detail}.csv.gz'
log.info('saving to %s:\n%s', f, g.set_index(['vatID', 'deliveryNum'])[['compute', 'dur']].tail(3))
g.to_csv(f, index=False)
f = vat_dir / f'delivery-summary-{run_detail}.csv.gz'
g[['vatID', 'deliveryNum', 'kd', 'syscalls', 'compute']].to_csv(f, index=False)
return detail.assign(run=run.name).groupby(['run', 'vatID'])[['deliveryNum']].count()
#by_vat(_dir('slogfiles/'), _dir('vat-details/'), _sa4, _runs)
for df in run_deliveries(_dir('slogfiles/'), _sa4, _runs.loc[58]):
print(df)
print(by_vat(_dir('vat-details/'), _runs.loc[58], df))
break
# +
def run_deliveries_todo(sa, slogs, dest, runs):
def do_run(run):
df = pd.concat(
detail
for detail in run_deliveries(slogs, sa, run)
)
return by_vat(dest, run, df)
todo = (
dask.delayed(do_run)(run)
for _, run in runs.iterrows()
)
return todo
per_run = dd.from_delayed(run_deliveries_todo(_sa4, _dir('slogfiles/'), _dir('vat-details/'), _runs))
per_run.compute()
# -
pd.to_datetime(1625213913.1672082, unit='s')
# +
import inspect
from slogdata import show_times
db4.execute('drop table if exists crankrun') #@@
def deliveries_todo(sa, blockrun, runs):
todo = (
dask.delayed(provide_deliveries)(sa, blockHeight, run,
blockrun[(blockrun.run == run.name) &
(blockrun.blockHeight == blockHeight)])
for run_ix, run in runs.iterrows()
for heights in [blockrun[blockrun.run == run_ix].blockHeight.unique()]
for _ in [log.info('run %s %-3d blocks %.16s %s', run_ix, len(heights),
pd.to_datetime(run.time, unit='s'), run['name'])]
for blockHeight in heights
)
log.info('todo: %s', type(todo))
df = dd.from_delayed(todo,
meta=no_deliveries.assign(file_id=1, chain=1, blockHeight=1, run=1))
return df.compute()
# _dr16 = provide_table(
# db4, 'crankrun',
# # 65517
# lambda: deliveries_todo(_sa4, _blockrun16[_blockrun16.blockHeight <= 65000], _runs.loc[200:275]))
_dr16 = deliveries_todo(_sa4, _blockrun16, # [_blockrun16.blockHeight <= 65000]
_runs[_runs.chain == 16])
_dr16
# -
# ## deliveries from batch
_delrun = pd.read_sql('select * from delrun', db4)
_delrun.groupby('chain')[['line']].count()
# ## Are compute meter values consistent?
# +
def compute_meter_consistent(df):
compute_count = df.groupby(['vatID', 'deliveryNum'])[['compute']].nunique()
dups = compute_count[compute_count['compute'] > 1]
return pd.merge(dups.reset_index(),
df[['run', 'vatID', 'deliveryNum', 'compute']],
how='left', suffixes=['_dup', ''],
left_on=['vatID', 'deliveryNum'],
right_on=['vatID', 'deliveryNum'])
# x = compute_meter_consistent(_alld16).compute()
x = compute_meter_consistent(_delrun[_delrun.chain == 16]).sort_values(['vatID', 'deliveryNum']) # .compute()
x
# -
compute_meter_consistent(_delrun[_delrun.chain == 15]).sort_values(['vatID', 'deliveryNum']) # .compute()
# ## Computrons per block
blockdel = _delrun[_delrun.method != 'executeContract']
key = ['chain', 'blockHeight', 'vatID', 'deliveryNum', 'compute']
blockdel = blockdel.sort_values(key).drop_duplicates()
df = blockdel.groupby(['chain', 'blockHeight'])[['deliveryNum']].count().sort_index()
df.plot()
_bkcomp = df = blockdel.groupby(['chain', 'blockHeight'])[['compute']].sum()
df
df.plot()
# +
def type2sign(df):
df['sign'] = np.where(df.type == 'cosmic-swingset-end-block-start', -1, 1)
return df
def byChain(df, gen=gen16.ts[0], hi=16, lo=15):
return df.assign(chain=np.where(df.blockTime >= gen, hi, lo))
return df
def slog_blocks(slogfiles,
pattern='**/*-blocks.csv'):
df = pd.concat(type2sign(pd.read_csv(p)[['type', 'blockHeight', 'blockTime']])
for p in slogfiles.glob(pattern))
df = byChain(df)
key = ['chain', 'blockHeight', 'blockTime']
df = df[key].sort_values(key).drop_duplicates()
return df.reset_index(drop=True)
_blk = slog_blocks(_dir('slogfiles/'))
_blk.tail()
# -
_byChain = _blk.groupby('chain')
df = pd.merge(
_byChain[['blockHeight']].nunique(),
_byChain[['blockHeight']].aggregate(['min', 'max'])['blockHeight'],
left_index=True, right_index=True,
)
df['span'] = df['max'] - df['min'] + 1
df
# +
def blockdur(df):
df = df.set_index(['chain', 'blockHeight'])
df['dur'] = df.shift(-1).blockTime - df.blockTime
return df
_bkdur = blockdur(_blk)
_bkdur
# -
# compute by block with duration
_bkcmpdur = _bkcomp.join(_bkdur, lsuffix='_d', rsuffix='_b')
_bkcmpdur['rate'] = (_bkcmpdur.compute / _bkcmpdur.dur).astype(float)
_bkcmpdur
_bkcmpdur[_bkcmpdur.dur > _bkcmpdur.dur.quantile(0.99)]
df = _bkcmpdur.loc[16]
df[df.dur < 8][['rate']].hist(log=True)
_bkcmpdur[_bkcmpdur.dur < 8][['rate']].describe()
# ## simulation
_delrun.groupby('run')[['line']].count()
_delrun[['crankNum', 'run']].groupby(['crankNum'])[['run']].aggregate(['count']).plot()
# +
def sim(df, percentile):
df = df[df.chain == 16]
df = df[df.method != 'executeContract']
key = ['blockHeight', 'crankNum', 'vatID', 'deliveryNum', 'compute']
df = df.groupby(key)[['dur']].aggregate(['count', 'mean', 'median', 'sum'])
return df
df = df[['blockHeight', 'crankNum', 'vatID', 'deliveryNum', 'compute']].sort_values(
['blockHeight', 'crankNum', 'vatID', 'deliveryNum']).drop_duplicates()
threshold = df.compute.quantile(percentile)
df['running'] = df.compute.cumsum()
df['sim_block'] = (df.running / threshold).round()
return df.reset_index(drop=True)
df = sim(_run1, .99)
df
# -
df[['blockHeight']].plot()
df.set_index('blockHeight')[['sim_block']].plot()
# ## Compute rate by vat
plt.cm.rainbow[1]
pd.Categorical(_delrun.method.dropna(), ordered=True)
# +
import matplotlib as plt
def cmap_of(df, color,
cmap=plt.cm.get_cmap('hot')):
df = df.loc[:, [color]].fillna('???')
byColor = df.groupby(color).count() #.set_index(color)
byColor['unit'] = range(len(byColor))
byColor.unit = byColor.unit / len(byColor)
byColor['color'] = byColor.unit.apply(cmap)
return byColor.loc[df[color]].color
cmap_of(_delrun, 'method')
# +
def vat_rate(df, vatID):
df = df[['vatID', 'deliveryNum', 'compute', 'dur']].dropna()
df['rate'] = df.compute / df.dur
df = df[df.vatID == vatID]
# df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()
#df.sort_values('dur', ascending=False)
#df
df = df.set_index('deliveryNum').sort_index()
return df
def show_rate(df, vatID, figsize=(8, 9)):
df = vat_rate(df, vatID)
ax = df.plot(subplots=True, figsize=figsize)
def fit_line(df, x, y, color=None, figsize=(9, 6)):
df = df[~df[x].isnull() & ~df[y].isnull()]
cs = np.polyfit(df[x], df[y], 1)
f = np.poly1d(cs)
if color:
color = cmap_of(df, color)
ax1 = df[[x, y]].plot.scatter(x=x, y=y, color=color, figsize=figsize)
df['fit'] = f(df[x])
df.plot(x=x, y='fit', color='Red', legend=False, ax=ax1);
# show_rate(start1, 'v10');
# vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur')
# fastSlog = start1[start1.slogfile == 'PDPnodeTestnet-agorictest16-chain.slog.gz']
# fit_line(vat_rate(fastSlog, 'v10'), 'compute', 'dur')
# len(fastSlog[fastSlog.vatID == 'v10'])
# fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) #.sort_values('compute', ascending=False)
#fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True)
fit_line(_delrun[_delrun.chain == 16], 'compute', 'dur', color='method')
# -
_r = _delrun[['compute', 'dur', 'method']].assign(rate=_delrun.compute / _delrun.dur)
_r.groupby('method')[['rate']].describe().sort_values(('rate', 'mean'))
df.sort_values(('compute', 'mean'))
df = fastSlog[fastSlog.vatID == 'v10']
df['rate'] = df.compute / df.dur
df[['deliveryNum', 'dur', 'compute', 'rate']].set_index('deliveryNum').plot(subplots=True)
df.rate.describe()
# ### exclude dynamic vat creation
fastSlog.groupby('method')[['compute']].mean().plot.barh(log=True, figsize=(12, 10))
noContract = df =fastSlog[fastSlog.method != 'executeContract'].copy()
df['rate'] = df.compute / df.dur
df[['dur', 'compute', 'rate']].plot(subplots=True)
fit_line(noContract, 'compute', 'dur')
fit_line(fastSlog, 'compute', 'dur')
# ## Add syscalls to the model
df = noContract
cs = np.polyfit(df[['compute', 'syscalls']], df['dur'], 1)
df = _dr16.assign(chain_id=16)
df = df[['chain_id', 'vatID', 'deliveryNum', 'blockHeight', 'kd', 'compute']].drop_duplicates()
df = df.set_index(['chain_id', 'vatID', 'deliveryNum']).sort_index()
df[df.index.duplicated()]
df
df.loc[16].loc['v1'].loc[0]
_dr16.query('(deliveryNum == 0) & (vatID == "v1")').groupby('compute')[['line']].count()
pd.merge(_dr16,
df[df.index.duplicated()].reset_index()[['vatID', 'deliveryNum']],
left_on=['vatID', 'deliveryNum'], right_on=['vatID', 'deliveryNum']
)[['vatID', 'deliveryNum', 'blockHeight', 'kd', 'compute']]
# _dr16.assign(chain_id=16).set_index(['chain_id', 'vatID', 'deliveryNum'])
dall = pd.concat(
pd.read_csv(f)
for f in _dir('slogfiles/').glob('**/*-deliveries-*.csv.gz')
)
dall
# +
def load_deliveries(files, con, table):
if_exists = 'replace'
for file in files:
df = pd.read_csv(file)
df.to_sql(table, con, if_exists=if_exists)
if_exists = 'append'
log.info('loaded %d records from %s', len(df), file)
load_deliveries(
_dir('slogfiles/').glob('**/*-deliveries-*.csv.gz'),
db4,
'delrun3')
# -
# ### Did we ever do more than 1000 cranks in a block?
#
# if not, current policy never fired
df = _dr16[['blockHeight', 'crankNum']].drop_duplicates()
df.groupby('blockHeight')[['crankNum']].count().sort_values('crankNum', ascending=False)
# ## @@ Older approaches
# ## Delivery statistics
#
# > For each delivery in the corpus, we want to get statistics on the range of wallclock times taken by these validators.
# +
import gzip
import itertools
def iter_cranks(path):
"""split each slogfile into runs (each beginning with an import-kernel event),
process each run by finding sequential matching deliver+deliver-result pairs,
turn each pair into a (crankNum, computrons, wallclock) triple
"""
log.info('iter_cranks: %s', path)
with gzip.open(path) as f:
kernel = None
deliver = None
block = None
syscalls = None
for (ix, line) in enumerate(f):
try:
data = json.loads(line)
except json.JSONDecodeError:
log.warning('%s:%d: bad JSON: %s', path.name, ix, repr(line))
continue
ty = data['type']
# print(ix, data['type'], kernel, deliver)
if ty == 'import-kernel-finish':
kernel = data
deliver = None
syscalls = None
yield dict(kernel,
slogfile=path.name, line=ix)
elif ty == 'create-vat':
yield dict(slogfile=path.name,
line=ix,
time=data['time'],
type=ty,
vatID=data['vatID'],
description=data['description'],
managerType=data['managerType'],
time_kernel=kernel['time'])
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
elif ty == 'cosmic-swingset-end-block-start':
block = data
elif ty == 'cosmic-swingset-end-block-finish':
time = data['time']
time_start = block['time']
dur = time - time_start
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
yield dict(slogfile=path.name,
line=ix,
time=time,
type=ty,
time_start=time_start,
dur=dur,
blockHeight=data['blockHeight'],
blockTime=data['blockTime'],
time_kernel=time_kernel)
block = None
elif deliver is None:
if ty == 'deliver':
deliver = data
syscalls = 0
elif data['type'] == 'deliver-result':
time = data['time']
time_start = deliver['time']
dur = time - time_start
method = deliver['kd'][2]['method'] if deliver['kd'][0] == 'message' else None
compute = data['dr'][2]['compute'] if type(data['dr'][2]) is type({}) else None
if block:
blockHeight = block['blockHeight']
blockTime=block['blockTime']
else:
# odd... how do we get here without block info???
log.warning('%s:%d: missing block context', path.name, ix)
blockHeight = blockTime = np.nan
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
yield dict(slogfile=path.name,
line=ix,
time=time,
type=ty,
crankNum=data['crankNum'],
deliveryNum=data['deliveryNum'],
vatID=data['vatID'],
kd=deliver['kd'],
method=method,
syscalls=syscalls,
dr=data['dr'],
compute=compute,
time_start=time_start,
dur=dur,
blockHeight=blockHeight,
blockTime=blockTime,
time_kernel=time_kernel)
deliver = None
elif ty == 'syscall-result':
syscalls += 1
elif ty in ['clist', 'syscall']:
continue
else:
log.warning("%s:%d: expected deliver-result; got: %s", path.name, ix, ty)
deliver = None
def sample(files=50, cranks=2000, slogdir=slogdir):
return pd.DataFrame.from_records(
r
for slogfile in itertools.islice(slogdir.glob('**/*.slog.gz'), files)
for r in itertools.islice(iter_cranks(slogfile), cranks))
# files_top = sample(200, 100)
c500 = sample()
# -
show_times(
files_top[files_top.crankNum == 1][[
'slogfile', 'line', 'time', 'vatID', 'deliveryNum', 'syscalls', 'compute', 'time_kernel', 'blockHeight']
].sort_values('blockHeight').set_index(['slogfile', 'line']),
['time'])
# +
def show_times(df, cols):
out = df.copy()
for col in cols:
out[col] = pd.to_datetime(out[col], unit='s')
return out
def slogfile_summary(df):
g = df.groupby(['slogfile', 'type'])
out = g[['line']].count()
out['time_min'] = g[['time']].min().time
out['time_max'] = g[['time']].max().time
out['blockHeight_min'] = g[['blockHeight']].min().blockHeight
# out['blockHeight_max'] = g[['blockHeight']].max().blockHeight
out['crankNum_min'] = g[['crankNum']].min().crankNum
return show_times(out, ['time_min', 'time_max'])
slogfile_summary(files_top) # [files_top.type == 'deliver-result']).sort_values('crankNum_min', ascending=False).head(15)
# +
def stuff(df, slogfile):
return df[(df.slogfile==slogfile) &
(df.type == 'deliver-result')][['crankNum', 'vatID', 'deliveryNum', 'kd', 'line', 'blockHeight' ]]
coolex = stuff(c500, 'coolex-agorictest16-chain.slog.gz').set_index('crankNum')
mym = stuff(c500, 'mymoniker-agorictest16-chain.slog.gz').set_index('crankNum')
xwalk = pd.merge(coolex, mym, left_index=True, right_index=True)
xwalk[xwalk.kd_x != xwalk.kd_y]
# -
xwalk[xwalk.deliveryNum_y == 2801].kd_y.iloc[0]
# warner says: suppose we have 2 deliverInboundAcks
#
# when swingset tells mb device, device consults state _in RAM_ for dup ack num...
# not durable... differs between run-from-start and restart
# ## global crankNum -> vatID, deliveryNum
cranks = c500[c500['type'] == 'deliver-result']
cranks = cranks[['chain_id', 'crankNum', 'vatID', 'deliveryNum']].set_index(['chain_id', 'crankNum']).drop_duplicates().sort_index()
cranks # .sort_values('deliveryNum')
c500 = c500[~c500.line.isnull()]
show_times(c500[c500.blockHeight == 64628], ['time', 'time_start', 'blockTime'])
cranks.pivot(columns='vatID', values='deliveryNum')
cranks.plot(subplots=True)
c500[['kd']].dropna()
c500[['compute']].dropna()
# +
## reduced data set
# chain-wide deliveries
# chain_id, crankNum -> blockHeight, vatID, deliveryNum, kd, compute
# chain_id, vatID, deliveryNum -> blockHeight, kd, compute
# except vatTP?
# per-validator data
# chain_id, crankNum, run (slogfile, kernel-start) -> dur
# +
# global crankNum -> vatID, deliveryNum
c500[['crankNum', 'vatID', 'deliveryNum']].set_index()
# ignore un-full blocks?
# histogram of block durations; interval between...
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
# "blockTime":1625059381 <- consensus block time is median of block times (?)
# vatID, deliveryNum -> args / syscalls
# watch out for GC esp.
# c.run(runPolicy)
# simple model: kernel says how many computrons
# refinement: computrons, syscalls
# fitness: block distribution... 10s blocks...
# blocks that aren't too big (latency, validator variance risk)
# cpu that isn't idle (throughput)
# an ideal: median block time 10s
# 80 20 %ile
# importing a contract is an outlier
# +
# median validator - existing distribution of deliveries / compute -> blocks
# supplement: study wallclock stuff
# -
show_times(c500[c500['type'] == 'deliver-result'].set_index(['crankNum', 'vatID', 'deliveryNum', 'slogfile'])
.drop(['type', 'kd', 'dr', 'time_dr', 'description', 'managerType'], axis=1).sort_index(),
['time', 'time_kernel', 'blockTime'])
# ### Missing `compute` meter info?
start1 = c500
start1[(start1['type'] == 'deliver-result') & start1.compute.isnull()]
compute_ref = start1[(start1.slogfile == 'coolex-agorictest16-chain.slog.gz') &
(start1['type'] == 'deliver-result')].set_index('crankNum')[['compute']]
compute_ref
compute_delta = start1[['slogfile', 'crankNum', 'compute']]
compute_delta = pd.merge(compute_delta, compute_ref,
left_on='crankNum', right_index=True, suffixes=['', '_ref'])
compute_delta['delta'] = (compute_delta.compute - compute_delta.compute_ref).abs()
compute_delta.sort_values('delta', ascending=False)
# +
df = start1
categories = df.vatID.apply(lambda v: int(v[1:]))
colors = cm.rainbow(np.linspace(0, 1, categories.max() + 1))
df.plot.scatter(x='compute', y='dur', c=colors[categories],
title='Deliveries (colored by vatID)',
figsize=(12, 9), ylabel="dur (sec)");
# -
start1[~start1.compute.isnull()].groupby('vatID')[['crankNum']].count().sort_values('crankNum', ascending=False)
# +
def vat_rate(df, vatID):
df = df[['vatID', 'deliveryNum', 'compute', 'dur']].dropna()
df['rate'] = df.compute / df.dur
df = df[df.vatID == vatID]
# df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()
#df.sort_values('dur', ascending=False)
#df
df = df.set_index('deliveryNum').sort_index()
return df
def show_rate(df, vatID, figsize=(8, 9)):
df = vat_rate(df, vatID)
ax = df.plot(subplots=True, figsize=figsize)
def fit_line(df, x, y, figsize=(9, 6)):
cs = np.polyfit(df[x], df[y], 1)
f = np.poly1d(cs)
ax1 = df[[x, y]].plot.scatter(x=x, y=y, figsize=figsize)
df['fit'] = f(df[x])
df.plot(x=x, y='fit', color='Red', legend=False, ax=ax1);
# show_rate(start1, 'v10');
# vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur')
fastSlog = start1[start1.slogfile == 'PDPnodeTestnet-agorictest16-chain.slog.gz']
fit_line(vat_rate(fastSlog, 'v10'), 'compute', 'dur')
# len(fastSlog[fastSlog.vatID == 'v10'])
# fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) #.sort_values('compute', ascending=False)
#fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True)
# -
vat_rate(start1, 'v16');
df = start1.pivot(columns='vatID', values=['compute', 'dur'],
index=['vatID', 'deliveryNum', 'crankNum', 'slogfile', 'line'])
df.reset_index().set_index('deliveryNum').drop(['crankNum', 'line'], axis=1) #.plot(figsize=(12, 8));
df.reset_index().set_index('deliveryNum')[['v23']].sort_index().dropna() #.plot()
df.describe()
df[['v14']].dropna()
df.crankNum.hist();
df.deliveryNum.hist();
df.groupby('method')[['compute', 'rate']].describe()
df.groupby('method')[['rate', 'compute', 'dur']].mean().sort_values('rate').head(90).plot(
subplots=True, rot=90, figsize=(8, 6), title='Method Compute Cost, Rate: bottom 90');
df.groupby('method')[['rate', 'compute', 'dur']].mean().sort_values('rate').tail(8).plot(
subplots=True, rot=90, figsize=(8, 6), title='Method Compute Cost, Rate: top 8');
durByMethod.dur.sum()
# +
durByMethod = df.groupby('method')[['dur']].sum().sort_values('dur', ascending=False)
durByMethod.plot.pie(y='dur', figsize=(12, 9), autopct='%1.1f%%')
# -
df.groupby('vatID')[['rate']].describe().head(20)
df.groupby('slogfile')[['rate']].describe().head(20)
df.plot.scatter(x='deliveryNum', y='rate')
speed = df.groupby('slogfile')[['rate']].describe()[['rate'][0]][['count', 'mean', 'std']]
speed = speed.sort_values('mean', ascending=False)
speed['relative'] = speed['mean'] / speed['mean'][0]
speed
# +
def boxplot_sorted(df, by, column, **config):
df2 = pd.DataFrame({col:vals[column] for col, vals in df.groupby(by)})
meds = df2.median().sort_values()
return df2[meds.index].boxplot(**config)
ax = boxplot_sorted(df, by=["slogfile"], column="rate", rot=90, figsize=(12, 9))
ax.set_title('Validator Speed: Sample of 20 from Phase 4');
ax.set_ylabel('computrons / sec')
# -
ax = df.sort_values('crankNum').plot.scatter(x='crankNum', y='compute');
ax.set_yscale('log')
df[(df.dur < df.dur.mean() + df.dur.std()) &
(df.compute < df.compute.mean() + df.compute.std())][['compute', 'dur']].hist();
# +
df = crank_info(c500)
df = df[df.crankNum.isin(compute_ref.index)]
rate = np.polyfit(df.compute, df.dur, 1)
f = np.poly1d(rate)
df['rate'] = f(df.compute)
# df[['compute', 'dur', 'rate']].head()
print(f)
# -
ax1 = df[['compute', 'dur']].plot.scatter(x='compute', y='dur', figsize=(9, 6))
df.plot(x='compute', y='rate', color='Red', legend=False, ax=ax1);
ax1.set_title(f"{len(df)} cranks from w3m: Duration vs. Compute Meter");
ax1.set_xlabel("compute units")
ax1.set_ylabel("duration (sec)")
r = df.compute / df.dur
r.max() / r.min()
df.sort_values('rate', ascending=False).drop(['time', 'type', 'detail', 'detail_dr'], axis=1)
# ## Colophon: jupytext
#
# This is a jupyter notebook paired with a python script using [jupytext](https://jupytext.readthedocs.io/en/latest/).
#
# We use the [python38Packages.jupytext](https://search.nixos.org/packages?channel=21.05&from=0&size=50&buckets=%7B%22package_attr_set%22%3A%5B%22python38Packages%22%5D%2C%22package_license_set%22%3A%5B%5D%2C%22package_maintainers_set%22%3A%5B%5D%2C%22package_platforms%22%3A%5B%5D%7D&sort=relevance&query=jupytext) nix package; in particular, `/nix/store/a9911qj06dy0ah7fshl39x3w4cjs7bxk-python3.8-jupytext-1.11.2`.
#
| # -*- coding: utf-8 -*-
# # How long does a Computron take?
#
# - [build model of computron\-to\-wallclock relationship · Issue \#3459 · Agoric/agoric\-sdk](https://github.com/Agoric/agoric-sdk/issues/3459)
# ## Preface: Python Data Tools
#
# See also [shell.nix](shell.nix).
# +
import pandas as pd
import numpy as np
import sqlalchemy as sqla
import matplotlib.cm as cm
import dask
import dask.dataframe as dd
import dask.bag as db
dict(pandas=pd.__version__,
numpy=np.__version__,
sqlalchemy=sqla.__version__,
dask=dask.__version__)
# -
# ### Notebook / Scripting Authority
#
# As a nod to OCap discipline, we avoid ambient authority unless we're in a `TOP`-level scripting or notebook context.
TOP = __name__ == '__main__'
# Logging is a bit of an exception to OCap discipline, as is stderr.
# +
import logging
from sys import stderr
logging.basicConfig(level=logging.INFO, stream=stderr,
format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
log = logging.getLogger(__name__)
if TOP:
log.info('notebook start')
# -
# ### Dask Parallel Scheduler UI
# +
from dask.distributed import Client, LocalCluster
if TOP:
cluster = LocalCluster(n_workers=8)
client = Client(cluster)
TOP and client
# -
# ## Result Store
# +
db4_uri = 'sqlite:///slog4.db'
if TOP:
db4 = sqla.create_engine(db4_uri)
# -
# ## SLog files
#
# [rclone support for Google drive](https://rclone.org/drive/)
#
# > This contains 564GB of data from 117 participants, spread across 172 slogfiles ...
#
# ```
# [nix-shell:~/t4]$ rclone sync --progress 'Engineering:/2021-07-04 testnet phase4-stress data/validator slogfiles' ./slogfiles/
# Transferred: 78.633G / 78.633 GBytes, 100%, 101.302 MBytes/s, ETA 0s
# Checks: 5 / 5, 100%
# Transferred: 182 / 182, 100%
# Elapsed time: 13m16.0s
# ```
#
# +
import importlib
import slogdata
importlib.reload(slogdata)
from slogdata import SlogAccess, CLI, show_times
if TOP:
def _dir(path):
import pathlib
return pathlib.Path(path)
def _cli(bin):
from subprocess import run, Popen
return CLI(bin, run, Popen, debug=True)
_sa4 = SlogAccess(_dir('/home/customer/t4/slogfiles'),
_cli('/home/customer/projects/gztool/gztool'))
TOP and show_times(_sa4.get_records('pathrocknetwork/chain-15.pathrocknetwork.slog.gz', 7721, 2))
# -
_bySize = _sa4.files_by_size()
_bySize
_bySize[_bySize.parent == 'KingSuper']
TOP and _bySize[::5].set_index('name')[['st_size']].plot.barh(
title='slogfile sizes (sample)',
figsize=(10, 8));
# ### random access with `gztool`
#
# [gztool](https://github.com/circulosmeos/gztool) `a03c5b4fd5b3` Jul 13 2021.
#
#
# ```
# ~/projects/gztool/gztool -C -e */*.slog.gz
# ...
# ERROR: Compressed data error in 'atlantean/atlantean-agorictest16-chain.slog.gz'.
# ...
# Index file 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi' already exists and will be used.
# Processing 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gz' ...
# Processing index to 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi'...
#
# 172 files processed
# 1 files processed with errors!
# ```
# +
# count lines on all slogfiles in parallel
# TODO: if it's already in the DB, don't compute it again.
if TOP:
_withLines = _bySize.assign(
lines=db.from_sequence(_bySize.values).map(
lambda v: _sa4.line_count(*v[1:3])).compute())
TOP and _withLines
# -
_withLines.to_sql('file_meta', db4, index=False, if_exists='replace')
# !sqlite3 slog4.db '.header on' '.mode column' 'select * from file_meta limit 3'
_withLines = pd.read_sql_table('file_meta', db4)
# +
def file_chart(slogdf, sample=5, **plotkw):
df = slogdf[['name', 'st_size', 'lines']].copy()
df['b64'] = df.st_size / 64
df.drop('st_size', axis=1, inplace=True)
df.set_index('name')[::sample].plot.barh(**plotkw)
TOP and file_chart(_withLines, title='slogfile sizes (sample)', figsize=(10, 8))
# -
# ## slogfile basics
pd.read_sql("""
select st_size, lines
from file_meta
order by st_size desc
""", db4).describe()
# ## Runs, Blocks, and Deliveries
#
# > split each slogfile into runs (each beginning with an import-kernel event)
# +
def partition_lines(lines, step=1000000):
"""Note: line numbers are **1-based**
"""
lo = pd.DataFrame.from_records([
dict(start=lo, qty=min(lines + 1 - lo, step), lines=lines)
for lo in range(1, lines + 1, step)])
return lo
partition_lines(_withLines.lines.iloc[-1])
# +
#client.restart()
# +
# # !sqlite3 slog4.db 'drop table run'
# +
def provide_table(engine, table, todo, chunksize=None, index=True):
if sqla.inspect(engine).has_table(table):
return pd.read_sql_table(table, engine, chunksize=chunksize)
df = todo()
df.to_sql(table, engine, index=index)
return df
def runs_todo(withLines):
runs = dd.from_delayed([
dask.delayed(_sa4.provide_runs)(f.parent, f['name'], part.start, part.qty)
for fid, f in withLines.iterrows()
for _, part in partition_lines(f.lines).iterrows()
]).compute().sort_values(['file_id', 'line'])
withNames = pd.merge(runs, withLines[['file_id', 'parent', 'name', 'st_size', 'lines']],
on='file_id')
# Compute end times
byFile = withNames.groupby('file_id')
runs = pd.concat([
withNames,
byFile.apply(lambda g: pd.DataFrame(dict(time_end=g.time.shift(-1)))),
byFile.apply(lambda g: pd.DataFrame(dict(line_end=g.line.shift(-1)))),
], axis=1)
runs.line_end = np.where(runs.line_end.isnull(), runs.lines, runs.line_end)
return runs.sort_values(['st_size', 'file_id', 'line']).reset_index(drop=True)
_runs = provide_table(db4, 'run', lambda: runs_todo(_withLines))
# -
# !sqlite3 slog4.db '.schema run'
show_times(_runs, ['time', 'time_end'])[['st_size', 'line', 'line_end', 'parent', 'file_id', 'time', 'time_end']]
# ### runs per slogfile
df = _runs.groupby('file_id')[['line']].count()
df.describe()
# +
df = pd.read_sql("""
select file_id, count(*) runs, name, st_size, lines
from run r
-- join file_id s on s."index" = r.slogfile
group by file_id
order by 2
""", db4)
df.set_index('name')[['runs']][::5].plot.barh(
log=True,
title='slogfile runs (sample)',
figsize=(10, 8));
# -
# ## agorictest-16 genesis: `2021-07-01 19:00:00`
gen16 = show_times(pd.DataFrame(dict(blockHeight=64628, blockTime=[1625166000], ts=1625166000)), ['blockTime'])
gen16
# ## Block end start / finish events
# +
import importlib
import slogdata
from slogdata import SlogAccess
importlib.reload(slogdata)
_sa4 = SlogAccess(_dir('/home/customer/t4/slogfiles'),
_cli('/home/customer/projects/gztool/gztool'))
show_times(
_sa4.provide_blocks('ChainodeTech', 'agorictest-16_chain.slog.gz', 1, 1000000)
)
# -
# ## Separate runs by chain
# +
def first_block(sa, run,
head=5000,
ts=gen16.ts[0]):
log.info('1st block: %s/%s', run.parent, run['name'])
qty = min(int(run.line_end) - run.line + 1, head)
df = sa.get_blocks(f'{run.parent}/{run["name"]}', run.line, qty)[:2]
if not len(df):
return pd.DataFrame.from_records([dict(
blockHeight=-1,
blockTime=-1,
run=run.name,
chain=np.nan)], index=[run.name])
df = df.assign(run=run.name,
chain=16 if df.blockTime[0] >= ts else 15)
return df
show_times(first_block(_sa4, _runs.loc[0]))
# +
def run2chain(sa, runs):
df = runs.apply(lambda run: first_block(sa, run).iloc[0][['blockHeight', 'blockTime', 'chain']],
axis=1)
return df
_r2c = run2chain(_sa4, _runs)
_r2c
# -
_runchain = pd.concat([_runs.drop(columns=['index']), _r2c], axis=1)
_runchain.to_sql('runchain', db4)
_runchain.groupby('chain')[['line']].count()
# !sqlite3 slog4.db '.header on' '.mode column' 'select * from runchain limit 3'
_runchain = pd.read_sql('runchain', db4)
_runchain.groupby('chain')[['line']].count()
_runs['chain'] = _runchain.chain
_runs.groupby('chain')[['file_id', 'lines']].count()
# +
# # !sqlite3 slog4.db 'drop table blockval;'
# +
def blockval_todo(file_meta):
return dd.from_delayed([
dask.delayed(_sa4.provide_blocks)(f.parent, f['name'], part.start, part.qty)
for fid, f in file_meta.iterrows()
for _, part in partition_lines(f.lines).iterrows()
]).compute()
_blockval = provide_table(db4, 'blockval', lambda: blockval_todo(_withLines), index=True)
show_times(_blockval)
# -
# !sqlite3 slog4.db '.schema blockval'
pd.read_sql("""
select file_id, max(blockHeight)
from blockval
where blockTime >= 1625166000
group by file_id
order by 2 desc
""", db4)
# ### Consensus Block-to-Block Time
# +
# db4.execute("""drop table if exists block""")
# -
db4.execute("""
create table block as
select distinct
case when blockTime >= 1625166000 then 16 else 15 end chain
, blockHeight, blockTime
from blockval
order by blockTime
""")
pd.read_sql("""
select * from block limit 10
""", db4)
# ### What is the range of blocks in `agorictest-16`?
pd.read_sql("""
select lo, n, lo + n - 1, hi from (
select min(blockHeight) lo, max(blockHeight) hi, count(distinct blockHeight) n
from block
where chain = 16
)
""", db4)
# +
blk16 = pd.read_sql("""
select blockHeight, blockTime
from block
where chain = 16
""", db4, index_col='blockHeight')
show_times(blk16).describe(datetime_is_numeric=True)
# -
b16time = pd.read_sql("""
select * from block
where chain = 16
""", db4, index_col='blockHeight')
b16time['delta'] = b16time.shift(-1).blockTime - b16time.blockTime
b16time[['delta']].describe()
b16time[b16time.index < 90527].delta.max()
b16time[b16time.delta == 120]
b16time[['delta']].plot(
title='agorictest-16 consensus blockTime delta',
ylabel='sec',
figsize=(9, 6));
show_times(b16time, ['blockTime']).set_index('blockTime')[['delta']].plot(
title='agorictest-16 consensus blockTime delta',
ylabel='sec',
figsize=(9, 6));
# histogram of block-to-block time delta for agorictest-16. (_Note the log scale on the y axis._)
b16time[['delta']].hist(bins=20, log=True);
df = show_times(b16time, ['blockTime'])
df[df.blockTime <= '2021-07-02 19:00:00'][['delta']].hist(bins=20, log=True);
df[df.blockTime <= '2021-07-02 19:00:00'][['delta']].describe()
# ### How many validators logged each block in agorictest-16?
df = pd.read_sql("""
select blockHeight, count(distinct file_id) qty
from blockval
where sign = -1
and blockTime >= 1625166000
group by blockHeight
""", db4)
df.head()
df.set_index('blockHeight').plot(title='agorictest-16 validator coverage by block', figsize=(9, 6));
# !sqlite3 slog4.db '.schema run'
# +
# db4.execute('drop table if exists blockrun16')
db4.execute("""
create table blockrun16 as
with b as (
select *
from blockval
where blockTime >= 1625166000
)
select file_id
, (select r."index"
from run r
where r.file_id = b.file_id and r.line <= b.line and b.line < r.line_end) run
, b.line, b.time
, b.sign
, blockHeight, blockTime
from b
""")
df = pd.read_sql("""
select * from blockrun16
""", db4)
df.tail()
# -
x = df.groupby('blockHeight')[['run']].count()
x.plot();
x['blockHeight'].sort_values('max').reset_index(drop=True).plot();
# ## Slow Blocks
df = show_times(b16time, ['blockTime'])
df[(df.blockTime <= '2021-07-02 19:00:00') &
(df.delta >= 30)]
# Which runs include block 72712, which took 31 sec?
b33 = pd.read_sql("""
select lo.file_id, lo.run, lo.line, hi.line - lo.line + 1 range, lo.blockHeight
from blockrun16 lo
join blockrun16 hi on hi.run = lo.run and hi.blockHeight = lo.blockHeight
where lo.blockHeight in (72712)
and lo.sign = -1
and hi.sign = 1
""", db4)
b33
# ## Correlating block start with block end
_blockrun16 = df = pd.read_sql_table('blockrun16', db4)
df.tail()
lo = df[df.sign == -1]
hi = df.shift(-1)
hi = hi[hi.sign == 1]
dur = hi.time - lo.time
# show_times(df, ['time', 'time_end'])
lo['dur'] = dur
lo['s_hi'] = hi.file_id
lo['l_hi'] = hi.line
lo['t_hi'] = hi.time
dur = lo[lo.file_id == lo.s_hi]
show_times(dur, ['time', 'blockTime'])
show_times(
dur.sort_values('dur').dropna().tail(),
['time', 'blockTime', 't_hi']
)
dur[dur.dur.abs() <= 120].plot.scatter(x='blockHeight', y='dur')
dur[['blockHeight', 'dur']].describe()
# ## Cranks in a Block
# +
def long_runs_including(runs, blockrun, blockHeight):
runs_matching = blockrun[blockrun.blockHeight == blockHeight].run
runs = runs.assign(length=runs.line_end - runs.line)
runs = runs[runs.index.isin(runs_matching)]
return runs.sort_values('length', ascending=False)
_long16 = long_runs_including(_runs, _blockrun16, 64628)
_long16.head()
# -
show_times(dur[dur.run == _long16.index[0]], ['time', 'blockTime', 't_hi'])
_blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64628)].iloc[:2]
# +
def blockrun_records(blockHeight, run, slogAccess, blockrun,
target=None, include=None):
ref = f'{run.parent}/{run["name"]}'
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = block_end.line - block_start.line + 1
df = slogAccess.get_records(f'{run.parent}/{run["name"]}', int(block_start.line), int(length),
target=target, include=include)
return df.assign(file_id=run.file_id)
def get_vats(slogAccess, ref, start, qty):
df = slogAccess.get_records(ref, start, qty,
target='create-vat',
include=['create-vat'])
return df
def vats_in_blockrun(blockHeight, run, slogAccess, blockrun):
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = block_end.line - block_start.line + 1
ref = f'{run.parent}/{run["name"]}'
df = get_vats(slogAccess, ref, int(block_start.line), int(length))
return df.assign(blockHeight=blockHeight, parent=run.parent)
# _sa4.get_records('Nodeasy.com/Nodeasy.com-agorictest15-chain.slog.gz', 1662497, 1671912 - 1662497)
vats_in_blockrun(_blockrun16.iloc[0].blockHeight, _runs.loc[_long16.index[0]],
_sa4, _blockrun16)
# -
vats_in_blockrun(64629, _runs.loc[_long16.index[0]],
_sa4, _blockrun16)
no_deliveries = pd.DataFrame.from_records([
{'time': 1625198620.6265895,
'type': 'deliver-result',
'crankNum': 1291,
'vatID': 'v11',
'deliveryNum': 124,
'kd': object(),
'line': 1673077,
'dr': object(),
'syscalls': 2,
'method': 'inbound',
'compute': 119496.0, # missing compute is possible... from replay.
'dur': 0.1912224292755127,
}]).iloc[:0]
no_deliveries.dtypes
# +
import json
import itertools
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
def block_cranks(records):
deliveries = []
syscalls = 0
deliver = None
for record in records:
ty = record['type']
if ty == 'deliver':
deliver = record
syscalls = 0
elif ty == 'syscall-result':
syscalls += 1
elif ty == 'deliver-result':
if not deliver:
log.warn('no deliver? %s', record)
continue
dur = record['time'] - deliver['time']
method = deliver['kd'][2]['method'] if deliver['kd'][0] == 'message' else None
compute = record['dr'][2]['compute'] if type(record['dr'][2]) is type({}) else np.nan
detail = dict(record,
syscalls=syscalls,
kd=deliver['kd'],
method=method,
compute=compute,
dur=dur)
deliveries.append(detail)
if deliveries:
return pd.DataFrame.from_records(deliveries)
else:
return no_deliveries
def get_deliveries(slogAccess, ref, start, qty):
if qty <= 2: # just block start, block end
return no_deliveries
df = slogAccess.get_records(
ref, int(start), int(qty),
target=None, include=['deliver', 'deliver-result', 'syscall-result'])
if len(df) > 0 and 'syscallNum' in df.columns:
for c in ['syscallNum', 'ksr', 'vsr', 'vd']:
df = df.drop(columns=list(set(df.columns) & set(['syscallNum', 'ksr', 'vsr', 'vd'])))
return block_cranks(df.to_dict('records'))
else:
return no_deliveries
_g16 = _blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64628)].iloc[:2]
_run1 = _runs.loc[_long16.index[0]]
get_deliveries(_sa4, f'{_run1.parent}/{_run1["name"]}', _g16.iloc[0].line, _g16.iloc[1].line - _g16.iloc[0].line + 1)
# -
df = dur[dur.run == _long16.index[0]].assign(length=dur.l_hi - dur.line + 1)
# df[df.length > 2].head(10)
df[df.dur > 5].head(10)
# +
# https://avi.im/blag/2021/fast-sqlite-inserts/
def run_sql(script, engine):
for stmt in script.strip().split(';\n'):
engine.execute(stmt)
run_sql('''
PRAGMA journal_mode = OFF;
PRAGMA synchronous = 0;
PRAGMA cache_size = 1000000;
PRAGMA locking_mode = NORMAL;
PRAGMA temp_store = MEMORY;
''', db4)
# -
len(dur)
dur.to_sql('blockrun16dur', db4, if_exists='replace', chunksize=25000, index=False)
# +
_br2 = _blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64632)].iloc[:2]
get_deliveries(_sa4, f'{_run1.parent}/{_run1["name"]}',
_br2.iloc[0].line, _br2.iloc[1].line - _br2.iloc[0].line + 1)
# +
# chain_id, vatID, deliveryNum -> blockHeight, kd, compute
import inspect
def provide_deliveries(slogAccess, blockHeight, run, blockrun):
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
if len(br) < 2:
return no_deliveries.assign(file_id=-1, chain=-1, blockHeight=blockHeight, run=run.name)
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = int(block_end.line - block_start.line + 1)
df = slogAccess.provide_data(run.parent, run['name'], int(block_start.line), length,
f'deliveries-{blockHeight}', no_deliveries,
lambda ref, start, qty: get_deliveries(slogAccess, ref, start, qty),
'gzip')
df = df.assign(chain=run.chain, blockHeight=blockHeight, run=run.name)
if df.dtypes['chain'] not in ['int64', 'float64'] or 'vatID' not in df.columns or 'vd' in df.columns:
raise NotImplementedError(f'cols: {df.columns} dtypes: {df.dtypes} block {blockHeight, int(block_start.line)}, run\n{run}')
return df
df = provide_deliveries(_sa4, 66371, _run1, _blockrun16)
show_times(df)
# -
# Computron rate for just this one block?
df.compute.sum() / df.dur.sum()
# test empty
provide_deliveries(_sa4, 64629, _run1, _blockrun16)
_runs.loc[455:456]
# ## Cranks in one long run starting at agorictest-16 genesis
gen16
df = pd.read_sql("""
with lo as (
select *
, time - blockTime delta
from blockrun16
where blockHeight = 64628
and blockTime = 1625166000
and sign = -1
and run is not null
), hi as (
select run, max(blockHeight) hi, max(blockTime) t_hi
from blockrun16
where run is not null
and sign = -1
group by run
), agg as (
select lo.*, hi.hi, hi.t_hi
from lo join hi on lo.run = hi.run
where abs(delta) < 7
order by hi.t_hi desc
)
select agg.*, run.parent, run.name
from agg
join run on agg.run = run."index"
limit 5
""", db4)
show_times(df, ['time', 'blockTime', 't_hi'])
show_times(_runs).loc[445]
# +
import json
def run1_deliveries(con, sa, lo, hi, run, br,
json_cols=['kd', 'dr'],
table='run1'):
if sqla.inspect(con).has_table(table):
lo = pd.read_sql(f'select max(blockHeight) + 1 lo from {table}', con).iloc[0].lo
if_exists = 'append'
else:
if_exists = 'replace'
for blockHeight in range(lo, hi):
df = provide_deliveries(sa, blockHeight, run, br)
if not len(df):
# log.info('block %d: no deliveries', blockHeight)
continue
for col in json_cols:
df[col] = df[col].apply(json.dumps)
log.info('block %d of %d: %s += %d rows', blockHeight, hi, table, len(df))
df.to_sql(table, con, if_exists=if_exists, index=False)
if_exists = 'append'
run1_deliveries(db4, _sa4, 64628, 75000, _runs.loc[445], _blockrun16)
# run1_deliveries(db4, _sa4, 75000, 90530, _runs.loc[445], _blockrun16, table='run1b')
# -
_run1 = df = pd.read_sql('select * from run1 union all select * from run1b', db4)
show_times(_run1.tail(3))
_run1.blockHeight.describe()
_run1[_run1.blockHeight >= 88296 - 2].sort_values('blockHeight').head(30).drop(columns=['kd', 'dr', 'file_id'])
df = _run1[_run1.blockHeight == 88295].sort_values('dur', ascending=False).drop(columns=['kd', 'dr', 'file_id'])
df.head(10)
df[df.dur >= 1]
# TODO: compare `getPayout` here (in 88295) vs something earlier... same computrons? same duration?
#
# e.g. if harden weakset grew, the duration could grow while keeping computrons constant
_run1[_run1.method == 'getPayout'][['compute', 'dur']].describe()
_run1[_run1.method == 'getPayout'].compute.hist()
_run1[(_run1.method == 'getPayout') & (_run1.compute == 31654)].plot.scatter(x='blockHeight', y='dur')
lg = _run1[_run1.blockHeight > 76000]
lg = lg[lg.dur < 1]
lg[(lg.method == 'getPayout') & (lg.compute == 31654)].plot.scatter(x='blockHeight', y='dur')
# Things got slower over time.
#
# Hypothesis: GC didn't happen -> weak set got big -> weakset access time got big
# So computron model should not be based on this range, but rather on pre-loadgen time.
# When looking at comptron / wallclock, we should look at:
#
# - all getCurrentAmount calls
# - within a narrow range of blockHeight
# - that all use the same # of computrons
#
# (as above)
#
b16time[b16time.delta == 224]
_run1[['compute', 'dur']].describe()
# +
def drate(df):
rate = df.compute / (df.syscalls + 1) / df.dur
# rate = df.compute / df.dur
return df.assign(rate=rate)
df = drate(_run1).groupby('method')[['rate']].aggregate(['count', 'mean', 'std', 'max'])
df = df.sort_values(('rate', 'mean'), ascending=False)
df
# -
common = _run1.groupby('method')[['line']].count()
common = common[common.line > 20]
common
drate(_run1[_run1.method.isin(common.index)])[['method', 'rate']].boxplot(by='method', rot=90, figsize=(20, 12))
common.sort_values('line', ascending=False).head()
_run1.blockHeight.describe()
_run1.sort_values('dur', ascending=False)
# This is an always-busy sim, but **TODO** we'd like to look at the arrival pattern that we have.
# +
def sim(df, c_eg, dur_eg, target):
df = df[df.chain == 16]
df['running'] = df.compute.cumsum() # try exp
threshold = target * (c_eg / dur_eg)
log.info('threshold: %s', threshold)
df['sim_blk'] = (df.running / threshold).round()
# df['adj'] = df.sim_blk - df.blockHeight
return df.reset_index(drop=True)
df = _run1.drop(columns=['type', 'kd', 'dr', 'file_id', 'line', 'run'])
# df = df[df.method != 'executeContract']
# df = df[df.method == 'getCurrentAmount'] # getPayout
# df.blockHeight = df.blockHeight - df.blockHeight.iloc[0]
df = sim(df, 48390.0, 0.074363, 5)
df = df[df.sim_blk.notnull()]
df.sim_blk = df.sim_blk.astype('int64')
show_times(df)
# -
pd.read_sql('''
select count(distinct run)
from blockrun16
''', db4)
len(_runs)
# +
def nth_block(sa, blockHeight, run, blockrun,
ts=gen16.ts[0]):
log.info('%d th block: %s/%s', blockHeight, run.parent, run['name'])
br = blockrun[(blockrun.blockHeight == blockHeight) & (blockrun.run == run.name)]
df = provide_deliveries(sa, blockHeight, run, br)
if not len(df):
return df
df = df.assign(run=run.name, chain=run.chain)
return df
m1b1 = pd.concat(
df
for _, run in _runs.iterrows()
for df in [nth_block(_sa4, 80001, run, _blockrun16)]
if len(df)
)
m1b1
# -
m1b1[(m1b1.method == 'getCurrentAmount') & (m1b1.deliveryNum == 44721)][['compute', 'dur', 'run']]
df = m1b1[(m1b1.method == 'getCurrentAmount') & (m1b1.deliveryNum == 44721)][['compute', 'dur', 'run']]
df.describe()
# ## Validator speed: 2-4x spread for `getCurrentAmount`
df[['dur']].hist()
# +
# df.groupby('method')[['compute']].describe().loc['executeContract']
# -
df.compute.hist(log=True);
df.dur.hist(log=True);
df[df.dur < .1].dur.hist()
# #### Total delivery duration per block
x = pd.concat([
df.groupby('blockHeight')[['dur']].sum(),
df.groupby('sim_blk')[['dur']].sum().rename(columns=dict(dur='dur_sim')),
], axis=1)
x.hist(); # log=True);
x.describe()
x.dur.quantile(.9)
xx = df.groupby('sim_blk')[['dur']].sum().rename(columns=dict(dur='dur_sim'))
xx[xx.dur_sim > 25]
df[df.blockHeight == 88295].sort_values('dur', ascending=False)
df[df.sim_blk == 32607].sort_values('dur', ascending=False)
_run1[_run1.compute == 381240].dur.describe()
_run1[_run1.compute == 381240].plot.scatter(x='blockHeight', y='dur')
# This wasn't a big deal during most of the chain (.25sec 75th percentile).
#
# We could model this within 2x or 3x by ignoring the spike.
# **TODO**: what happened during that spike? is it consensus-observable? kernel-observable?
df = _run1[_run1.compute == 381240]
df[(df.blockHeight >= 88100) & (df.blockHeight < 88400)].plot.scatter(x='blockHeight', y='dur')
df[df.sim_blk == 32607].compute.sum()
df[df.sim_blk == 32607].dur.sum()
df[df.sim_blk == 32607].syscalls.sum()
df.groupby('blockHeight')[['syscalls']].sum().describe()
# #### Total compute per block
x = pd.concat([
df.groupby('blockHeight')[['compute']].sum(),
df.groupby('sim_blk')[['compute']].sum().rename(columns=dict(compute='cmp_sim')),
], axis=1)
x.hist(log=True);
x.describe()
cluster.scale(8)
client.restart()
f'{12:04}'
# +
def pick_chain(ht,
gen=1625166000, hi=16, lo=15):
return np.where(ht > gen, hi, lo)
def run_deliveries(slogs, sa, run):
chain_id = f'agorictest-{run.chain}'
blocks = pd.concat(
pd.read_csv(blockFile)
for blockFile in (slogs / run.parent).glob('*-blocks.csv')
)
blocks = blocks[(blocks.line >= run.line) &
(blocks.line < run.line_end)]
blocks = blocks.assign(run=run.name)
heights = blocks.blockHeight.unique()
log.info('run %s %-3d blocks %.16s %s', run.name, len(heights),
pd.to_datetime(run.time, unit='s'), run['name'])
tot = 0
for blockHeight in heights:
detail = provide_deliveries(sa, blockHeight, run, blocks)
if not len(detail):
continue
tot += len(detail)
yield detail
if not tot:
yield no_deliveries.assign(file_id=-1, chain=-1, blockHeight=-1, run=run.name)
def by_vat(dest, run, detail):
chain_id = f'agorictest-{run.chain}'
run_detail = f'{run.name:04}-{run.parent}-{run.file_id}-{run.line}'
for vatID, g in detail.groupby('vatID'):
try:
(dest / chain_id / vatID).mkdir(parents=True)
except:
pass
vat_dir = dest / chain_id / vatID
f = vat_dir / f'delivery-detail-{run_detail}.csv.gz'
log.info('saving to %s:\n%s', f, g.set_index(['vatID', 'deliveryNum'])[['compute', 'dur']].tail(3))
g.to_csv(f, index=False)
f = vat_dir / f'delivery-summary-{run_detail}.csv.gz'
g[['vatID', 'deliveryNum', 'kd', 'syscalls', 'compute']].to_csv(f, index=False)
return detail.assign(run=run.name).groupby(['run', 'vatID'])[['deliveryNum']].count()
#by_vat(_dir('slogfiles/'), _dir('vat-details/'), _sa4, _runs)
for df in run_deliveries(_dir('slogfiles/'), _sa4, _runs.loc[58]):
print(df)
print(by_vat(_dir('vat-details/'), _runs.loc[58], df))
break
# +
def run_deliveries_todo(sa, slogs, dest, runs):
def do_run(run):
df = pd.concat(
detail
for detail in run_deliveries(slogs, sa, run)
)
return by_vat(dest, run, df)
todo = (
dask.delayed(do_run)(run)
for _, run in runs.iterrows()
)
return todo
per_run = dd.from_delayed(run_deliveries_todo(_sa4, _dir('slogfiles/'), _dir('vat-details/'), _runs))
per_run.compute()
# -
pd.to_datetime(1625213913.1672082, unit='s')
# +
import inspect
from slogdata import show_times
db4.execute('drop table if exists crankrun') #@@
def deliveries_todo(sa, blockrun, runs):
todo = (
dask.delayed(provide_deliveries)(sa, blockHeight, run,
blockrun[(blockrun.run == run.name) &
(blockrun.blockHeight == blockHeight)])
for run_ix, run in runs.iterrows()
for heights in [blockrun[blockrun.run == run_ix].blockHeight.unique()]
for _ in [log.info('run %s %-3d blocks %.16s %s', run_ix, len(heights),
pd.to_datetime(run.time, unit='s'), run['name'])]
for blockHeight in heights
)
log.info('todo: %s', type(todo))
df = dd.from_delayed(todo,
meta=no_deliveries.assign(file_id=1, chain=1, blockHeight=1, run=1))
return df.compute()
# _dr16 = provide_table(
# db4, 'crankrun',
# # 65517
# lambda: deliveries_todo(_sa4, _blockrun16[_blockrun16.blockHeight <= 65000], _runs.loc[200:275]))
_dr16 = deliveries_todo(_sa4, _blockrun16, # [_blockrun16.blockHeight <= 65000]
_runs[_runs.chain == 16])
_dr16
# -
# ## deliveries from batch
_delrun = pd.read_sql('select * from delrun', db4)
_delrun.groupby('chain')[['line']].count()
# ## Are compute meter values consistent?
# +
def compute_meter_consistent(df):
compute_count = df.groupby(['vatID', 'deliveryNum'])[['compute']].nunique()
dups = compute_count[compute_count['compute'] > 1]
return pd.merge(dups.reset_index(),
df[['run', 'vatID', 'deliveryNum', 'compute']],
how='left', suffixes=['_dup', ''],
left_on=['vatID', 'deliveryNum'],
right_on=['vatID', 'deliveryNum'])
# x = compute_meter_consistent(_alld16).compute()
x = compute_meter_consistent(_delrun[_delrun.chain == 16]).sort_values(['vatID', 'deliveryNum']) # .compute()
x
# -
compute_meter_consistent(_delrun[_delrun.chain == 15]).sort_values(['vatID', 'deliveryNum']) # .compute()
# ## Computrons per block
blockdel = _delrun[_delrun.method != 'executeContract']
key = ['chain', 'blockHeight', 'vatID', 'deliveryNum', 'compute']
blockdel = blockdel.sort_values(key).drop_duplicates()
df = blockdel.groupby(['chain', 'blockHeight'])[['deliveryNum']].count().sort_index()
df.plot()
_bkcomp = df = blockdel.groupby(['chain', 'blockHeight'])[['compute']].sum()
df
df.plot()
# +
def type2sign(df):
df['sign'] = np.where(df.type == 'cosmic-swingset-end-block-start', -1, 1)
return df
def byChain(df, gen=gen16.ts[0], hi=16, lo=15):
return df.assign(chain=np.where(df.blockTime >= gen, hi, lo))
return df
def slog_blocks(slogfiles,
pattern='**/*-blocks.csv'):
df = pd.concat(type2sign(pd.read_csv(p)[['type', 'blockHeight', 'blockTime']])
for p in slogfiles.glob(pattern))
df = byChain(df)
key = ['chain', 'blockHeight', 'blockTime']
df = df[key].sort_values(key).drop_duplicates()
return df.reset_index(drop=True)
_blk = slog_blocks(_dir('slogfiles/'))
_blk.tail()
# -
_byChain = _blk.groupby('chain')
df = pd.merge(
_byChain[['blockHeight']].nunique(),
_byChain[['blockHeight']].aggregate(['min', 'max'])['blockHeight'],
left_index=True, right_index=True,
)
df['span'] = df['max'] - df['min'] + 1
df
# +
def blockdur(df):
df = df.set_index(['chain', 'blockHeight'])
df['dur'] = df.shift(-1).blockTime - df.blockTime
return df
_bkdur = blockdur(_blk)
_bkdur
# -
# compute by block with duration
_bkcmpdur = _bkcomp.join(_bkdur, lsuffix='_d', rsuffix='_b')
_bkcmpdur['rate'] = (_bkcmpdur.compute / _bkcmpdur.dur).astype(float)
_bkcmpdur
_bkcmpdur[_bkcmpdur.dur > _bkcmpdur.dur.quantile(0.99)]
df = _bkcmpdur.loc[16]
df[df.dur < 8][['rate']].hist(log=True)
_bkcmpdur[_bkcmpdur.dur < 8][['rate']].describe()
# ## simulation
_delrun.groupby('run')[['line']].count()
_delrun[['crankNum', 'run']].groupby(['crankNum'])[['run']].aggregate(['count']).plot()
# +
def sim(df, percentile):
df = df[df.chain == 16]
df = df[df.method != 'executeContract']
key = ['blockHeight', 'crankNum', 'vatID', 'deliveryNum', 'compute']
df = df.groupby(key)[['dur']].aggregate(['count', 'mean', 'median', 'sum'])
return df
df = df[['blockHeight', 'crankNum', 'vatID', 'deliveryNum', 'compute']].sort_values(
['blockHeight', 'crankNum', 'vatID', 'deliveryNum']).drop_duplicates()
threshold = df.compute.quantile(percentile)
df['running'] = df.compute.cumsum()
df['sim_block'] = (df.running / threshold).round()
return df.reset_index(drop=True)
df = sim(_run1, .99)
df
# -
df[['blockHeight']].plot()
df.set_index('blockHeight')[['sim_block']].plot()
# ## Compute rate by vat
plt.cm.rainbow[1]
pd.Categorical(_delrun.method.dropna(), ordered=True)
# +
import matplotlib as plt
def cmap_of(df, color,
cmap=plt.cm.get_cmap('hot')):
df = df.loc[:, [color]].fillna('???')
byColor = df.groupby(color).count() #.set_index(color)
byColor['unit'] = range(len(byColor))
byColor.unit = byColor.unit / len(byColor)
byColor['color'] = byColor.unit.apply(cmap)
return byColor.loc[df[color]].color
cmap_of(_delrun, 'method')
# +
def vat_rate(df, vatID):
df = df[['vatID', 'deliveryNum', 'compute', 'dur']].dropna()
df['rate'] = df.compute / df.dur
df = df[df.vatID == vatID]
# df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()
#df.sort_values('dur', ascending=False)
#df
df = df.set_index('deliveryNum').sort_index()
return df
def show_rate(df, vatID, figsize=(8, 9)):
df = vat_rate(df, vatID)
ax = df.plot(subplots=True, figsize=figsize)
def fit_line(df, x, y, color=None, figsize=(9, 6)):
df = df[~df[x].isnull() & ~df[y].isnull()]
cs = np.polyfit(df[x], df[y], 1)
f = np.poly1d(cs)
if color:
color = cmap_of(df, color)
ax1 = df[[x, y]].plot.scatter(x=x, y=y, color=color, figsize=figsize)
df['fit'] = f(df[x])
df.plot(x=x, y='fit', color='Red', legend=False, ax=ax1);
# show_rate(start1, 'v10');
# vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur')
# fastSlog = start1[start1.slogfile == 'PDPnodeTestnet-agorictest16-chain.slog.gz']
# fit_line(vat_rate(fastSlog, 'v10'), 'compute', 'dur')
# len(fastSlog[fastSlog.vatID == 'v10'])
# fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) #.sort_values('compute', ascending=False)
#fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True)
fit_line(_delrun[_delrun.chain == 16], 'compute', 'dur', color='method')
# -
_r = _delrun[['compute', 'dur', 'method']].assign(rate=_delrun.compute / _delrun.dur)
_r.groupby('method')[['rate']].describe().sort_values(('rate', 'mean'))
df.sort_values(('compute', 'mean'))
df = fastSlog[fastSlog.vatID == 'v10']
df['rate'] = df.compute / df.dur
df[['deliveryNum', 'dur', 'compute', 'rate']].set_index('deliveryNum').plot(subplots=True)
df.rate.describe()
# ### exclude dynamic vat creation
fastSlog.groupby('method')[['compute']].mean().plot.barh(log=True, figsize=(12, 10))
noContract = df =fastSlog[fastSlog.method != 'executeContract'].copy()
df['rate'] = df.compute / df.dur
df[['dur', 'compute', 'rate']].plot(subplots=True)
fit_line(noContract, 'compute', 'dur')
fit_line(fastSlog, 'compute', 'dur')
# ## Add syscalls to the model
df = noContract
cs = np.polyfit(df[['compute', 'syscalls']], df['dur'], 1)
df = _dr16.assign(chain_id=16)
df = df[['chain_id', 'vatID', 'deliveryNum', 'blockHeight', 'kd', 'compute']].drop_duplicates()
df = df.set_index(['chain_id', 'vatID', 'deliveryNum']).sort_index()
df[df.index.duplicated()]
df
df.loc[16].loc['v1'].loc[0]
_dr16.query('(deliveryNum == 0) & (vatID == "v1")').groupby('compute')[['line']].count()
pd.merge(_dr16,
df[df.index.duplicated()].reset_index()[['vatID', 'deliveryNum']],
left_on=['vatID', 'deliveryNum'], right_on=['vatID', 'deliveryNum']
)[['vatID', 'deliveryNum', 'blockHeight', 'kd', 'compute']]
# _dr16.assign(chain_id=16).set_index(['chain_id', 'vatID', 'deliveryNum'])
dall = pd.concat(
pd.read_csv(f)
for f in _dir('slogfiles/').glob('**/*-deliveries-*.csv.gz')
)
dall
# +
def load_deliveries(files, con, table):
if_exists = 'replace'
for file in files:
df = pd.read_csv(file)
df.to_sql(table, con, if_exists=if_exists)
if_exists = 'append'
log.info('loaded %d records from %s', len(df), file)
load_deliveries(
_dir('slogfiles/').glob('**/*-deliveries-*.csv.gz'),
db4,
'delrun3')
# -
# ### Did we ever do more than 1000 cranks in a block?
#
# if not, current policy never fired
df = _dr16[['blockHeight', 'crankNum']].drop_duplicates()
df.groupby('blockHeight')[['crankNum']].count().sort_values('crankNum', ascending=False)
# ## @@ Older approaches
# ## Delivery statistics
#
# > For each delivery in the corpus, we want to get statistics on the range of wallclock times taken by these validators.
# +
import gzip
import itertools
def iter_cranks(path):
"""split each slogfile into runs (each beginning with an import-kernel event),
process each run by finding sequential matching deliver+deliver-result pairs,
turn each pair into a (crankNum, computrons, wallclock) triple
"""
log.info('iter_cranks: %s', path)
with gzip.open(path) as f:
kernel = None
deliver = None
block = None
syscalls = None
for (ix, line) in enumerate(f):
try:
data = json.loads(line)
except json.JSONDecodeError:
log.warning('%s:%d: bad JSON: %s', path.name, ix, repr(line))
continue
ty = data['type']
# print(ix, data['type'], kernel, deliver)
if ty == 'import-kernel-finish':
kernel = data
deliver = None
syscalls = None
yield dict(kernel,
slogfile=path.name, line=ix)
elif ty == 'create-vat':
yield dict(slogfile=path.name,
line=ix,
time=data['time'],
type=ty,
vatID=data['vatID'],
description=data['description'],
managerType=data['managerType'],
time_kernel=kernel['time'])
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
elif ty == 'cosmic-swingset-end-block-start':
block = data
elif ty == 'cosmic-swingset-end-block-finish':
time = data['time']
time_start = block['time']
dur = time - time_start
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
yield dict(slogfile=path.name,
line=ix,
time=time,
type=ty,
time_start=time_start,
dur=dur,
blockHeight=data['blockHeight'],
blockTime=data['blockTime'],
time_kernel=time_kernel)
block = None
elif deliver is None:
if ty == 'deliver':
deliver = data
syscalls = 0
elif data['type'] == 'deliver-result':
time = data['time']
time_start = deliver['time']
dur = time - time_start
method = deliver['kd'][2]['method'] if deliver['kd'][0] == 'message' else None
compute = data['dr'][2]['compute'] if type(data['dr'][2]) is type({}) else None
if block:
blockHeight = block['blockHeight']
blockTime=block['blockTime']
else:
# odd... how do we get here without block info???
log.warning('%s:%d: missing block context', path.name, ix)
blockHeight = blockTime = np.nan
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
yield dict(slogfile=path.name,
line=ix,
time=time,
type=ty,
crankNum=data['crankNum'],
deliveryNum=data['deliveryNum'],
vatID=data['vatID'],
kd=deliver['kd'],
method=method,
syscalls=syscalls,
dr=data['dr'],
compute=compute,
time_start=time_start,
dur=dur,
blockHeight=blockHeight,
blockTime=blockTime,
time_kernel=time_kernel)
deliver = None
elif ty == 'syscall-result':
syscalls += 1
elif ty in ['clist', 'syscall']:
continue
else:
log.warning("%s:%d: expected deliver-result; got: %s", path.name, ix, ty)
deliver = None
def sample(files=50, cranks=2000, slogdir=slogdir):
return pd.DataFrame.from_records(
r
for slogfile in itertools.islice(slogdir.glob('**/*.slog.gz'), files)
for r in itertools.islice(iter_cranks(slogfile), cranks))
# files_top = sample(200, 100)
c500 = sample()
# -
show_times(
files_top[files_top.crankNum == 1][[
'slogfile', 'line', 'time', 'vatID', 'deliveryNum', 'syscalls', 'compute', 'time_kernel', 'blockHeight']
].sort_values('blockHeight').set_index(['slogfile', 'line']),
['time'])
# +
def show_times(df, cols):
out = df.copy()
for col in cols:
out[col] = pd.to_datetime(out[col], unit='s')
return out
def slogfile_summary(df):
g = df.groupby(['slogfile', 'type'])
out = g[['line']].count()
out['time_min'] = g[['time']].min().time
out['time_max'] = g[['time']].max().time
out['blockHeight_min'] = g[['blockHeight']].min().blockHeight
# out['blockHeight_max'] = g[['blockHeight']].max().blockHeight
out['crankNum_min'] = g[['crankNum']].min().crankNum
return show_times(out, ['time_min', 'time_max'])
slogfile_summary(files_top) # [files_top.type == 'deliver-result']).sort_values('crankNum_min', ascending=False).head(15)
# +
def stuff(df, slogfile):
return df[(df.slogfile==slogfile) &
(df.type == 'deliver-result')][['crankNum', 'vatID', 'deliveryNum', 'kd', 'line', 'blockHeight' ]]
coolex = stuff(c500, 'coolex-agorictest16-chain.slog.gz').set_index('crankNum')
mym = stuff(c500, 'mymoniker-agorictest16-chain.slog.gz').set_index('crankNum')
xwalk = pd.merge(coolex, mym, left_index=True, right_index=True)
xwalk[xwalk.kd_x != xwalk.kd_y]
# -
xwalk[xwalk.deliveryNum_y == 2801].kd_y.iloc[0]
# warner says: suppose we have 2 deliverInboundAcks
#
# when swingset tells mb device, device consults state _in RAM_ for dup ack num...
# not durable... differs between run-from-start and restart
# ## global crankNum -> vatID, deliveryNum
cranks = c500[c500['type'] == 'deliver-result']
cranks = cranks[['chain_id', 'crankNum', 'vatID', 'deliveryNum']].set_index(['chain_id', 'crankNum']).drop_duplicates().sort_index()
cranks # .sort_values('deliveryNum')
c500 = c500[~c500.line.isnull()]
show_times(c500[c500.blockHeight == 64628], ['time', 'time_start', 'blockTime'])
cranks.pivot(columns='vatID', values='deliveryNum')
cranks.plot(subplots=True)
c500[['kd']].dropna()
c500[['compute']].dropna()
# +
## reduced data set
# chain-wide deliveries
# chain_id, crankNum -> blockHeight, vatID, deliveryNum, kd, compute
# chain_id, vatID, deliveryNum -> blockHeight, kd, compute
# except vatTP?
# per-validator data
# chain_id, crankNum, run (slogfile, kernel-start) -> dur
# +
# global crankNum -> vatID, deliveryNum
c500[['crankNum', 'vatID', 'deliveryNum']].set_index()
# ignore un-full blocks?
# histogram of block durations; interval between...
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
# "blockTime":1625059381 <- consensus block time is median of block times (?)
# vatID, deliveryNum -> args / syscalls
# watch out for GC esp.
# c.run(runPolicy)
# simple model: kernel says how many computrons
# refinement: computrons, syscalls
# fitness: block distribution... 10s blocks...
# blocks that aren't too big (latency, validator variance risk)
# cpu that isn't idle (throughput)
# an ideal: median block time 10s
# 80 20 %ile
# importing a contract is an outlier
# +
# median validator - existing distribution of deliveries / compute -> blocks
# supplement: study wallclock stuff
# -
show_times(c500[c500['type'] == 'deliver-result'].set_index(['crankNum', 'vatID', 'deliveryNum', 'slogfile'])
.drop(['type', 'kd', 'dr', 'time_dr', 'description', 'managerType'], axis=1).sort_index(),
['time', 'time_kernel', 'blockTime'])
# ### Missing `compute` meter info?
start1 = c500
start1[(start1['type'] == 'deliver-result') & start1.compute.isnull()]
compute_ref = start1[(start1.slogfile == 'coolex-agorictest16-chain.slog.gz') &
(start1['type'] == 'deliver-result')].set_index('crankNum')[['compute']]
compute_ref
compute_delta = start1[['slogfile', 'crankNum', 'compute']]
compute_delta = pd.merge(compute_delta, compute_ref,
left_on='crankNum', right_index=True, suffixes=['', '_ref'])
compute_delta['delta'] = (compute_delta.compute - compute_delta.compute_ref).abs()
compute_delta.sort_values('delta', ascending=False)
# +
df = start1
categories = df.vatID.apply(lambda v: int(v[1:]))
colors = cm.rainbow(np.linspace(0, 1, categories.max() + 1))
df.plot.scatter(x='compute', y='dur', c=colors[categories],
title='Deliveries (colored by vatID)',
figsize=(12, 9), ylabel="dur (sec)");
# -
start1[~start1.compute.isnull()].groupby('vatID')[['crankNum']].count().sort_values('crankNum', ascending=False)
# +
def vat_rate(df, vatID):
df = df[['vatID', 'deliveryNum', 'compute', 'dur']].dropna()
df['rate'] = df.compute / df.dur
df = df[df.vatID == vatID]
# df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()
#df.sort_values('dur', ascending=False)
#df
df = df.set_index('deliveryNum').sort_index()
return df
def show_rate(df, vatID, figsize=(8, 9)):
df = vat_rate(df, vatID)
ax = df.plot(subplots=True, figsize=figsize)
def fit_line(df, x, y, figsize=(9, 6)):
cs = np.polyfit(df[x], df[y], 1)
f = np.poly1d(cs)
ax1 = df[[x, y]].plot.scatter(x=x, y=y, figsize=figsize)
df['fit'] = f(df[x])
df.plot(x=x, y='fit', color='Red', legend=False, ax=ax1);
# show_rate(start1, 'v10');
# vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur')
fastSlog = start1[start1.slogfile == 'PDPnodeTestnet-agorictest16-chain.slog.gz']
fit_line(vat_rate(fastSlog, 'v10'), 'compute', 'dur')
# len(fastSlog[fastSlog.vatID == 'v10'])
# fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) #.sort_values('compute', ascending=False)
#fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True)
# -
vat_rate(start1, 'v16');
df = start1.pivot(columns='vatID', values=['compute', 'dur'],
index=['vatID', 'deliveryNum', 'crankNum', 'slogfile', 'line'])
df.reset_index().set_index('deliveryNum').drop(['crankNum', 'line'], axis=1) #.plot(figsize=(12, 8));
df.reset_index().set_index('deliveryNum')[['v23']].sort_index().dropna() #.plot()
df.describe()
df[['v14']].dropna()
df.crankNum.hist();
df.deliveryNum.hist();
df.groupby('method')[['compute', 'rate']].describe()
df.groupby('method')[['rate', 'compute', 'dur']].mean().sort_values('rate').head(90).plot(
subplots=True, rot=90, figsize=(8, 6), title='Method Compute Cost, Rate: bottom 90');
df.groupby('method')[['rate', 'compute', 'dur']].mean().sort_values('rate').tail(8).plot(
subplots=True, rot=90, figsize=(8, 6), title='Method Compute Cost, Rate: top 8');
durByMethod.dur.sum()
# +
durByMethod = df.groupby('method')[['dur']].sum().sort_values('dur', ascending=False)
durByMethod.plot.pie(y='dur', figsize=(12, 9), autopct='%1.1f%%')
# -
df.groupby('vatID')[['rate']].describe().head(20)
df.groupby('slogfile')[['rate']].describe().head(20)
df.plot.scatter(x='deliveryNum', y='rate')
speed = df.groupby('slogfile')[['rate']].describe()[['rate'][0]][['count', 'mean', 'std']]
speed = speed.sort_values('mean', ascending=False)
speed['relative'] = speed['mean'] / speed['mean'][0]
speed
# +
def boxplot_sorted(df, by, column, **config):
df2 = pd.DataFrame({col:vals[column] for col, vals in df.groupby(by)})
meds = df2.median().sort_values()
return df2[meds.index].boxplot(**config)
ax = boxplot_sorted(df, by=["slogfile"], column="rate", rot=90, figsize=(12, 9))
ax.set_title('Validator Speed: Sample of 20 from Phase 4');
ax.set_ylabel('computrons / sec')
# -
ax = df.sort_values('crankNum').plot.scatter(x='crankNum', y='compute');
ax.set_yscale('log')
df[(df.dur < df.dur.mean() + df.dur.std()) &
(df.compute < df.compute.mean() + df.compute.std())][['compute', 'dur']].hist();
# +
df = crank_info(c500)
df = df[df.crankNum.isin(compute_ref.index)]
rate = np.polyfit(df.compute, df.dur, 1)
f = np.poly1d(rate)
df['rate'] = f(df.compute)
# df[['compute', 'dur', 'rate']].head()
print(f)
# -
ax1 = df[['compute', 'dur']].plot.scatter(x='compute', y='dur', figsize=(9, 6))
df.plot(x='compute', y='rate', color='Red', legend=False, ax=ax1);
ax1.set_title(f"{len(df)} cranks from w3m: Duration vs. Compute Meter");
ax1.set_xlabel("compute units")
ax1.set_ylabel("duration (sec)")
r = df.compute / df.dur
r.max() / r.min()
df.sort_values('rate', ascending=False).drop(['time', 'type', 'detail', 'detail_dr'], axis=1)
# ## Colophon: jupytext
#
# This is a jupyter notebook paired with a python script using [jupytext](https://jupytext.readthedocs.io/en/latest/).
#
# We use the [python38Packages.jupytext](https://search.nixos.org/packages?channel=21.05&from=0&size=50&buckets=%7B%22package_attr_set%22%3A%5B%22python38Packages%22%5D%2C%22package_license_set%22%3A%5B%5D%2C%22package_maintainers_set%22%3A%5B%5D%2C%22package_platforms%22%3A%5B%5D%7D&sort=relevance&query=jupytext) nix package; in particular, `/nix/store/a9911qj06dy0ah7fshl39x3w4cjs7bxk-python3.8-jupytext-1.11.2`.
#
|
from flask import Blueprint, request, jsonify
import subprocess
import json
import yamale
import yaml
import app_conf
import logging.handlers
import mydb
imageinfo = Blueprint('imageinfo', __name__)
# set logger
logger = logging.getLogger(__name__)
path = f'./logs/{__name__}.log'
fileHandler = logging.handlers.RotatingFileHandler(path,
maxBytes=app_conf.Log.log_max_size,
backupCount=app_conf.Log.log_backup_count)
fileHandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(filename)s:%(lineno)s %(message)s'))
logger.addHandler(fileHandler)
logger.setLevel(app_conf.Log.log_level)
# temp
logger.addHandler(logging.StreamHandler())
db_path = "data/imageinfo.db"
mydb.init(db_path)
schema_create = yamale.make_schema(content="""
name: str(required=True)
namespace: str(required=True)
""")
@imageinfo.route('/create', methods=['post'])
def create():
msg = {
'err': None,
'res': None
}
try:
# schema validation
# yamale.validate(schema_create, yamale.make_data(content=request.data.decode('utf-8')))
# name
body = yaml.load(request.data, Loader=yaml.Loader)
k = f"{body["namespace"]}/{body["name"]}"
v = json.dumps(body).encode()
mydb.upsert(db_path, k, v)
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
schema_delete = yamale.make_schema(content="""
name: str(required=True)
namespace: str(required=True)
""")
@imageinfo.route('/delete', methods=['delete'])
def delete():
msg = {
'err': None,
'res': None
}
try:
# schema validation
yamale.validate(schema_delete, yamale.make_data(content=request.data.decode('utf-8')))
body = yaml.load(request.data, Loader=yaml.Loader)
k = f"{body["namespace"]}/{body["name"]}"
mydb.delete(db_path, k)
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
@imageinfo.route('/list', methods=['get'])
def list_():
msg = {
'err': None,
'res': []
}
try:
namespace = request.args.get('namespace')
temp = mydb.keys(db_path)
for x in temp:
term = x.split('/')
if term[0] == namespace:
msg['res'].append(term[1])
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
@imageinfo.route('/get', methods=['get'])
def get():
msg = {
'err': None,
'res': None
}
try:
name = request.args.get('name')
namespace = request.args.get('namespace')
k = f"{namespace}/{name}"
v = mydb.get(db_path, k)
if v is not None:
msg['res'] = json.loads(v.decode())
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
| from flask import Blueprint, request, jsonify
import subprocess
import json
import yamale
import yaml
import app_conf
import logging.handlers
import mydb
imageinfo = Blueprint('imageinfo', __name__)
# set logger
logger = logging.getLogger(__name__)
path = f'./logs/{__name__}.log'
fileHandler = logging.handlers.RotatingFileHandler(path,
maxBytes=app_conf.Log.log_max_size,
backupCount=app_conf.Log.log_backup_count)
fileHandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(filename)s:%(lineno)s %(message)s'))
logger.addHandler(fileHandler)
logger.setLevel(app_conf.Log.log_level)
# temp
logger.addHandler(logging.StreamHandler())
db_path = "data/imageinfo.db"
mydb.init(db_path)
schema_create = yamale.make_schema(content="""
name: str(required=True)
namespace: str(required=True)
""")
@imageinfo.route('/create', methods=['post'])
def create():
msg = {
'err': None,
'res': None
}
try:
# schema validation
# yamale.validate(schema_create, yamale.make_data(content=request.data.decode('utf-8')))
# name
body = yaml.load(request.data, Loader=yaml.Loader)
k = f"{body['namespace']}/{body['name']}"
v = json.dumps(body).encode()
mydb.upsert(db_path, k, v)
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
schema_delete = yamale.make_schema(content="""
name: str(required=True)
namespace: str(required=True)
""")
@imageinfo.route('/delete', methods=['delete'])
def delete():
msg = {
'err': None,
'res': None
}
try:
# schema validation
yamale.validate(schema_delete, yamale.make_data(content=request.data.decode('utf-8')))
body = yaml.load(request.data, Loader=yaml.Loader)
k = f"{body['namespace']}/{body['name']}"
mydb.delete(db_path, k)
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
@imageinfo.route('/list', methods=['get'])
def list_():
msg = {
'err': None,
'res': []
}
try:
namespace = request.args.get('namespace')
temp = mydb.keys(db_path)
for x in temp:
term = x.split('/')
if term[0] == namespace:
msg['res'].append(term[1])
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
@imageinfo.route('/get', methods=['get'])
def get():
msg = {
'err': None,
'res': None
}
try:
name = request.args.get('name')
namespace = request.args.get('namespace')
k = f"{namespace}/{name}"
v = mydb.get(db_path, k)
if v is not None:
msg['res'] = json.loads(v.decode())
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# type: ignore
import copy
import os
import platform
from dataclasses import dataclass
from pathlib import Path
from typing import List
import nox
from nox.logger import logger
BASE = os.path.abspath(os.path.dirname(__file__))
DEFAULT_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
DEFAULT_OS_NAMES = ["Linux", "MacOS", "Windows"]
PYTHON_VERSIONS = os.environ.get(
"NOX_PYTHON_VERSIONS", ",".join(DEFAULT_PYTHON_VERSIONS)
).split(",")
INSTALL_EDITABLE_MODE = os.environ.get("INSTALL_EDITABLE_MODE", 0)
INSTALL_COMMAND = (
["pip", "install", "-e"] if INSTALL_EDITABLE_MODE else ["pip", "install"]
)
# Allow limiting testing to specific plugins
# The list ['ALL'] indicates all plugins
PLUGINS = os.environ.get("PLUGINS", "ALL").split(",")
SKIP_CORE_TESTS = "0"
SKIP_CORE_TESTS = os.environ.get("SKIP_CORE_TESTS", SKIP_CORE_TESTS) != "0"
FIX = os.environ.get("FIX", "0") == "1"
VERBOSE = os.environ.get("VERBOSE", "0")
SILENT = VERBOSE == "0"
@dataclass
class Plugin:
name: str
path: str
module: str
def get_current_os() -> str:
current_os = platform.system()
if current_os == "Darwin":
current_os = "MacOS"
return current_os
print(f"Operating system\t:\t{get_current_os()}")
print(f"NOX_PYTHON_VERSIONS\t:\t{PYTHON_VERSIONS}")
print(f"PLUGINS\t\t\t:\t{PLUGINS}")
print(f"SKIP_CORE_TESTS\t\t:\t{SKIP_CORE_TESTS}")
print(f"FIX\t\t\t:\t{FIX}")
print(f"VERBOSE\t\t\t:\t{VERBOSE}")
print(f"INSTALL_EDITABLE_MODE\t:\t{INSTALL_EDITABLE_MODE}")
def _upgrade_basic(session):
session.run(
"python",
"-m",
"pip",
"install",
"--upgrade",
"setuptools",
"pip",
silent=SILENT,
)
def find_dirs(path: str):
for file in os.listdir(path):
fullname = os.path.join(path, file)
if os.path.isdir(fullname):
yield fullname
def install_hydra(session, cmd):
# clean install hydra
session.chdir(BASE)
session.run(*cmd, ".", silent=SILENT)
if not SILENT:
session.install("pipdeptree", silent=SILENT)
session.run("pipdeptree", "-p", "hydra-core")
def pytest_args(*args):
ret = ["pytest", "-Werror"]
ret.extend(args)
return ret
def run_pytest(session, directory=".", *args):
pytest_cmd = pytest_args(directory, *args)
# silent=False to enable some output on CI
# (otherwise we risk no-output timeout)
session.run(*pytest_cmd, silent=False)
def get_setup_python_versions(classifiers):
pythons = filter(lambda line: "Programming Language :: Python" in line, classifiers)
return [p[len("Programming Language :: Python :: ") :] for p in pythons]
def get_plugin_os_names(classifiers: List[str]) -> List[str]:
oses = list(filter(lambda line: "Operating System" in line, classifiers))
if len(oses) == 0:
# No Os is specified so all oses are supported
return DEFAULT_OS_NAMES
if len(oses) == 1 and oses[0] == "Operating System :: OS Independent":
# All oses are supported
return DEFAULT_OS_NAMES
else:
return [p.split("::")[-1].strip() for p in oses]
def select_plugins(session, directory: str) -> List[Plugin]:
"""
Select all plugins that should be tested in this session.
Considers the current Python version and operating systems against the supported ones,
as well as the user plugins selection (via the PLUGINS environment variable).
"""
assert session.python is not None, "Session python version is not specified"
blacklist = [".isort.cfg", "examples"]
plugins = [
{"dir_name": x, "path": x}
for x in sorted(os.listdir(os.path.join(BASE, directory)))
if x not in blacklist
]
ret = []
skipped = []
for plugin in plugins:
if not (plugin["dir_name"] in PLUGINS or PLUGINS == ["ALL"]):
skipped.append(f"Deselecting {plugin["dir_name"]}: User request")
continue
setup_py = os.path.join(BASE, directory, plugin["path"], "setup.py")
classifiers = session.run(
"python", setup_py, "--name", "--classifiers", silent=True
).splitlines()
plugin_name = classifiers.pop(0)
plugin_python_versions = get_setup_python_versions(classifiers)
python_supported = session.python in plugin_python_versions
plugin_os_names = get_plugin_os_names(classifiers)
os_supported = get_current_os() in plugin_os_names
if not python_supported:
py_str = ", ".join(plugin_python_versions)
skipped.append(
f"Deselecting {plugin["dir_name"]} : Incompatible Python {session.python}. Supports [{py_str}]"
)
continue
# Verify this plugin supports the OS we are testing on, skip otherwise
if not os_supported:
os_str = ", ".join(plugin_os_names)
skipped.append(
f"Deselecting {plugin["dir_name"]}: Incompatible OS {get_current_os()}. Supports [{os_str}]"
)
continue
ret.append(
Plugin(
name=plugin_name,
path=plugin["path"],
module="hydra_plugins." + plugin["dir_name"],
)
)
for msg in skipped:
logger.warn(msg)
if len(ret) == 0:
logger.warn("No plugins selected")
return ret
def install_dev_deps(session):
_upgrade_basic(session)
session.run("pip", "install", "-r", "requirements/dev.txt", silent=SILENT)
def _black_cmd():
black = ["black", "."]
if not FIX:
black += ["--check"]
return black
def _isort_cmd():
isort = ["isort", "."]
if not FIX:
isort += ["--check", "--diff"]
return isort
@nox.session(python=PYTHON_VERSIONS)
def lint(session):
install_dev_deps(session)
install_hydra(session, ["pip", "install", "-e"])
apps = _get_standalone_apps_dirs()
session.log("Installing standalone apps")
for subdir in apps:
session.chdir(str(subdir))
session.run(*_black_cmd(), silent=SILENT)
session.run(*_isort_cmd(), silent=SILENT)
session.chdir(BASE)
session.run(*_black_cmd(), silent=SILENT)
skiplist = apps + [
".git",
"website",
"plugins",
"tools",
".nox",
"hydra/grammar/gen",
"tools/configen/example/gen",
"tools/configen/tests/test_modules/expected",
"temp",
]
isort = _isort_cmd() + [f"--skip={skip}" for skip in skiplist]
session.run(*isort, silent=SILENT)
session.run("mypy", ".", "--strict", silent=SILENT)
session.run("flake8", "--config", ".flake8")
session.run("yamllint", ".")
example_dirs = [
"examples/advanced/",
"examples/configure_hydra",
"examples/patterns",
"examples/instantiate",
"examples/tutorials/basic/your_first_hydra_app",
"examples/tutorials/basic/running_your_hydra_app",
"examples/tutorials/structured_configs/",
]
for edir in example_dirs:
dirs = find_dirs(path=edir)
for d in dirs:
session.run("mypy", d, "--strict", silent=SILENT)
# lint example plugins
lint_plugins_in_dir(session=session, directory="examples/plugins")
# bandit static security analysis
session.run("bandit", "--exclude", "./.nox/**", "-ll", "-r", ".", silent=SILENT)
@nox.session(python=PYTHON_VERSIONS)
def lint_plugins(session):
lint_plugins_in_dir(session, "plugins")
def lint_plugins_in_dir(session, directory: str) -> None:
install_cmd = ["pip", "install", "-e"]
install_hydra(session, install_cmd)
plugins = select_plugins(session=session, directory=directory)
# plugin linting requires the plugins and their dependencies to be installed
for plugin in plugins:
cmd = install_cmd + [os.path.join(directory, plugin.path)]
session.run(*cmd, silent=SILENT)
install_dev_deps(session)
session.run("flake8", "--config", ".flake8", directory)
# Mypy for plugins
for plugin in plugins:
path = os.path.join(directory, plugin.path)
session.chdir(path)
session.run(*_black_cmd(), silent=SILENT)
session.run(*_isort_cmd(), silent=SILENT)
session.chdir(BASE)
files = []
for file in ["tests", "example"]:
abs = os.path.join(path, file)
if os.path.exists(abs):
files.append(abs)
session.run(
"mypy",
"--strict",
f"{path}/hydra_plugins",
"--config-file",
f"{BASE}/.mypy.ini",
silent=SILENT,
)
session.run(
"mypy",
"--strict",
"--namespace-packages",
"--config-file",
f"{BASE}/.mypy.ini",
*files,
silent=SILENT,
)
@nox.session(python=PYTHON_VERSIONS)
def test_tools(session):
install_cmd = ["pip", "install"]
_upgrade_basic(session)
session.install("pytest")
install_hydra(session, install_cmd)
tools = [
x
for x in sorted(os.listdir(os.path.join(BASE, "tools")))
if not os.path.isfile(x)
]
for tool in tools:
tool_path = os.path.join("tools", tool)
session.chdir(BASE)
if (Path(tool_path) / "setup.py").exists():
cmd = list(install_cmd) + ["-e", tool_path]
session.run(*cmd, silent=SILENT)
session.run("pytest", tool_path)
session.chdir(BASE)
def _get_standalone_apps_dirs():
standalone_apps_dir = Path(f"{BASE}/tests/standalone_apps")
apps = [standalone_apps_dir / subdir for subdir in os.listdir(standalone_apps_dir)]
apps.append(f"{BASE}/examples/advanced/hydra_app_example")
return apps
@nox.session(python=PYTHON_VERSIONS)
def test_core(session):
_upgrade_basic(session)
install_hydra(session, INSTALL_COMMAND)
session.install("pytest")
if not SKIP_CORE_TESTS:
run_pytest(session, "build_helpers", "tests", *session.posargs)
else:
session.log("Skipping Hydra core tests")
apps = _get_standalone_apps_dirs()
session.log("Testing standalone apps")
for subdir in apps:
session.chdir(subdir)
session.run(*INSTALL_COMMAND, ".", silent=SILENT)
run_pytest(session, ".")
session.chdir(BASE)
test_plugins_in_directory(
session,
install_cmd=INSTALL_COMMAND,
directory="examples/plugins",
test_hydra_core=False,
)
@nox.session(python=PYTHON_VERSIONS)
def test_plugins(session):
test_plugins_in_directory(
session=session,
install_cmd=INSTALL_COMMAND,
directory="plugins",
test_hydra_core=True,
)
def test_plugins_in_directory(
session, install_cmd, directory: str, test_hydra_core: bool
):
_upgrade_basic(session)
session.install("pytest")
install_hydra(session, install_cmd)
selected_plugin = select_plugins(session=session, directory=directory)
for plugin in selected_plugin:
cmd = list(install_cmd) + [os.path.join(directory, plugin.path)]
session.run(*cmd, silent=SILENT)
if not SILENT:
session.run("pipdeptree", "-p", plugin.name)
# Test that we can import Hydra
session.run("python", "-c", "from hydra import main", silent=SILENT)
# Test that we can import all installed plugins
for plugin in selected_plugin:
session.run("python", "-c", f"import {plugin.module}")
# Run Hydra tests to verify installed plugins did not break anything
if test_hydra_core:
if not SKIP_CORE_TESTS:
# exclude test_completion for plugins tests.
# 1. It's tested during normal core tests.
# 2. it's somewhat fragile and tend to timeout in mac.
# 3. it's expensive and it's not worth the cost to run it for plugins as well.
run_pytest(session, "tests", "--ignore=tests/test_completion.py")
else:
session.log("Skipping Hydra core tests")
# Run tests for all installed plugins
for plugin in selected_plugin:
# install all other plugins that are compatible with the current Python version
session.chdir(os.path.join(BASE, directory, plugin.path))
run_pytest(session)
@nox.session(python="3.8")
def coverage(session):
coverage_env = {
"COVERAGE_HOME": BASE,
"COVERAGE_FILE": f"{BASE}/.coverage",
"COVERAGE_RCFILE": f"{BASE}/.coveragerc",
}
_upgrade_basic(session)
session.install("coverage", "pytest")
install_hydra(session, ["pip", "install", "-e"])
session.run("coverage", "erase", env=coverage_env)
for directory in ["plugins", "examples/plugins"]:
selected_plugins = select_plugins(session=session, directory=directory)
for plugin in selected_plugins:
session.run(
"pip",
"install",
"-e",
os.path.join(directory, plugin.path),
silent=SILENT,
)
# run plugin coverage
for plugin in selected_plugins:
session.chdir(os.path.join(directory, plugin.path))
cov_args = ["coverage", "run", "--append", "-m"]
cov_args.extend(pytest_args())
session.run(*cov_args, silent=SILENT, env=coverage_env)
session.chdir(BASE)
# run hydra-core coverage
session.run(
"coverage",
"run",
"--append",
"-m",
silent=SILENT,
env=coverage_env,
*pytest_args(),
)
# Increase the fail_under as coverage improves
session.run("coverage", "report", "--fail-under=80", env=coverage_env)
session.run("coverage", "erase", env=coverage_env)
@nox.session(python=PYTHON_VERSIONS)
def test_jupyter_notebooks(session):
versions = copy.copy(DEFAULT_PYTHON_VERSIONS)
if session.python not in versions:
session.skip(
f"Not testing Jupyter notebook on Python {session.python}, supports [{",".join(versions)}]"
)
session.install("jupyter", "nbval", "pyzmq")
install_hydra(session, ["pip", "install", "-e"])
args = pytest_args(
"--nbval", "examples/jupyter_notebooks/compose_configs_in_notebook.ipynb"
)
# Jupyter notebook test on Windows yield warnings
args = [x for x in args if x != "-Werror"]
session.run(*args, silent=SILENT)
notebooks_dir = Path("tests/jupyter")
for notebook in [
file for file in notebooks_dir.iterdir() if str(file).endswith(".ipynb")
]:
args = pytest_args("--nbval", str(notebook))
args = [x for x in args if x != "-Werror"]
session.run(*args, silent=SILENT)
@nox.session(python=PYTHON_VERSIONS)
def benchmark(session):
_upgrade_basic(session)
install_dev_deps(session)
install_hydra(session, INSTALL_COMMAND)
session.install("pytest")
run_pytest(session, "build_helpers", "tests/benchmark.py", *session.posargs)
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# type: ignore
import copy
import os
import platform
from dataclasses import dataclass
from pathlib import Path
from typing import List
import nox
from nox.logger import logger
BASE = os.path.abspath(os.path.dirname(__file__))
DEFAULT_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
DEFAULT_OS_NAMES = ["Linux", "MacOS", "Windows"]
PYTHON_VERSIONS = os.environ.get(
"NOX_PYTHON_VERSIONS", ",".join(DEFAULT_PYTHON_VERSIONS)
).split(",")
INSTALL_EDITABLE_MODE = os.environ.get("INSTALL_EDITABLE_MODE", 0)
INSTALL_COMMAND = (
["pip", "install", "-e"] if INSTALL_EDITABLE_MODE else ["pip", "install"]
)
# Allow limiting testing to specific plugins
# The list ['ALL'] indicates all plugins
PLUGINS = os.environ.get("PLUGINS", "ALL").split(",")
SKIP_CORE_TESTS = "0"
SKIP_CORE_TESTS = os.environ.get("SKIP_CORE_TESTS", SKIP_CORE_TESTS) != "0"
FIX = os.environ.get("FIX", "0") == "1"
VERBOSE = os.environ.get("VERBOSE", "0")
SILENT = VERBOSE == "0"
@dataclass
class Plugin:
name: str
path: str
module: str
def get_current_os() -> str:
current_os = platform.system()
if current_os == "Darwin":
current_os = "MacOS"
return current_os
print(f"Operating system\t:\t{get_current_os()}")
print(f"NOX_PYTHON_VERSIONS\t:\t{PYTHON_VERSIONS}")
print(f"PLUGINS\t\t\t:\t{PLUGINS}")
print(f"SKIP_CORE_TESTS\t\t:\t{SKIP_CORE_TESTS}")
print(f"FIX\t\t\t:\t{FIX}")
print(f"VERBOSE\t\t\t:\t{VERBOSE}")
print(f"INSTALL_EDITABLE_MODE\t:\t{INSTALL_EDITABLE_MODE}")
def _upgrade_basic(session):
session.run(
"python",
"-m",
"pip",
"install",
"--upgrade",
"setuptools",
"pip",
silent=SILENT,
)
def find_dirs(path: str):
for file in os.listdir(path):
fullname = os.path.join(path, file)
if os.path.isdir(fullname):
yield fullname
def install_hydra(session, cmd):
# clean install hydra
session.chdir(BASE)
session.run(*cmd, ".", silent=SILENT)
if not SILENT:
session.install("pipdeptree", silent=SILENT)
session.run("pipdeptree", "-p", "hydra-core")
def pytest_args(*args):
ret = ["pytest", "-Werror"]
ret.extend(args)
return ret
def run_pytest(session, directory=".", *args):
pytest_cmd = pytest_args(directory, *args)
# silent=False to enable some output on CI
# (otherwise we risk no-output timeout)
session.run(*pytest_cmd, silent=False)
def get_setup_python_versions(classifiers):
pythons = filter(lambda line: "Programming Language :: Python" in line, classifiers)
return [p[len("Programming Language :: Python :: ") :] for p in pythons]
def get_plugin_os_names(classifiers: List[str]) -> List[str]:
oses = list(filter(lambda line: "Operating System" in line, classifiers))
if len(oses) == 0:
# No Os is specified so all oses are supported
return DEFAULT_OS_NAMES
if len(oses) == 1 and oses[0] == "Operating System :: OS Independent":
# All oses are supported
return DEFAULT_OS_NAMES
else:
return [p.split("::")[-1].strip() for p in oses]
def select_plugins(session, directory: str) -> List[Plugin]:
"""
Select all plugins that should be tested in this session.
Considers the current Python version and operating systems against the supported ones,
as well as the user plugins selection (via the PLUGINS environment variable).
"""
assert session.python is not None, "Session python version is not specified"
blacklist = [".isort.cfg", "examples"]
plugins = [
{"dir_name": x, "path": x}
for x in sorted(os.listdir(os.path.join(BASE, directory)))
if x not in blacklist
]
ret = []
skipped = []
for plugin in plugins:
if not (plugin["dir_name"] in PLUGINS or PLUGINS == ["ALL"]):
skipped.append(f"Deselecting {plugin['dir_name']}: User request")
continue
setup_py = os.path.join(BASE, directory, plugin["path"], "setup.py")
classifiers = session.run(
"python", setup_py, "--name", "--classifiers", silent=True
).splitlines()
plugin_name = classifiers.pop(0)
plugin_python_versions = get_setup_python_versions(classifiers)
python_supported = session.python in plugin_python_versions
plugin_os_names = get_plugin_os_names(classifiers)
os_supported = get_current_os() in plugin_os_names
if not python_supported:
py_str = ", ".join(plugin_python_versions)
skipped.append(
f"Deselecting {plugin['dir_name']} : Incompatible Python {session.python}. Supports [{py_str}]"
)
continue
# Verify this plugin supports the OS we are testing on, skip otherwise
if not os_supported:
os_str = ", ".join(plugin_os_names)
skipped.append(
f"Deselecting {plugin['dir_name']}: Incompatible OS {get_current_os()}. Supports [{os_str}]"
)
continue
ret.append(
Plugin(
name=plugin_name,
path=plugin["path"],
module="hydra_plugins." + plugin["dir_name"],
)
)
for msg in skipped:
logger.warn(msg)
if len(ret) == 0:
logger.warn("No plugins selected")
return ret
def install_dev_deps(session):
_upgrade_basic(session)
session.run("pip", "install", "-r", "requirements/dev.txt", silent=SILENT)
def _black_cmd():
black = ["black", "."]
if not FIX:
black += ["--check"]
return black
def _isort_cmd():
isort = ["isort", "."]
if not FIX:
isort += ["--check", "--diff"]
return isort
@nox.session(python=PYTHON_VERSIONS)
def lint(session):
install_dev_deps(session)
install_hydra(session, ["pip", "install", "-e"])
apps = _get_standalone_apps_dirs()
session.log("Installing standalone apps")
for subdir in apps:
session.chdir(str(subdir))
session.run(*_black_cmd(), silent=SILENT)
session.run(*_isort_cmd(), silent=SILENT)
session.chdir(BASE)
session.run(*_black_cmd(), silent=SILENT)
skiplist = apps + [
".git",
"website",
"plugins",
"tools",
".nox",
"hydra/grammar/gen",
"tools/configen/example/gen",
"tools/configen/tests/test_modules/expected",
"temp",
]
isort = _isort_cmd() + [f"--skip={skip}" for skip in skiplist]
session.run(*isort, silent=SILENT)
session.run("mypy", ".", "--strict", silent=SILENT)
session.run("flake8", "--config", ".flake8")
session.run("yamllint", ".")
example_dirs = [
"examples/advanced/",
"examples/configure_hydra",
"examples/patterns",
"examples/instantiate",
"examples/tutorials/basic/your_first_hydra_app",
"examples/tutorials/basic/running_your_hydra_app",
"examples/tutorials/structured_configs/",
]
for edir in example_dirs:
dirs = find_dirs(path=edir)
for d in dirs:
session.run("mypy", d, "--strict", silent=SILENT)
# lint example plugins
lint_plugins_in_dir(session=session, directory="examples/plugins")
# bandit static security analysis
session.run("bandit", "--exclude", "./.nox/**", "-ll", "-r", ".", silent=SILENT)
@nox.session(python=PYTHON_VERSIONS)
def lint_plugins(session):
lint_plugins_in_dir(session, "plugins")
def lint_plugins_in_dir(session, directory: str) -> None:
install_cmd = ["pip", "install", "-e"]
install_hydra(session, install_cmd)
plugins = select_plugins(session=session, directory=directory)
# plugin linting requires the plugins and their dependencies to be installed
for plugin in plugins:
cmd = install_cmd + [os.path.join(directory, plugin.path)]
session.run(*cmd, silent=SILENT)
install_dev_deps(session)
session.run("flake8", "--config", ".flake8", directory)
# Mypy for plugins
for plugin in plugins:
path = os.path.join(directory, plugin.path)
session.chdir(path)
session.run(*_black_cmd(), silent=SILENT)
session.run(*_isort_cmd(), silent=SILENT)
session.chdir(BASE)
files = []
for file in ["tests", "example"]:
abs = os.path.join(path, file)
if os.path.exists(abs):
files.append(abs)
session.run(
"mypy",
"--strict",
f"{path}/hydra_plugins",
"--config-file",
f"{BASE}/.mypy.ini",
silent=SILENT,
)
session.run(
"mypy",
"--strict",
"--namespace-packages",
"--config-file",
f"{BASE}/.mypy.ini",
*files,
silent=SILENT,
)
@nox.session(python=PYTHON_VERSIONS)
def test_tools(session):
install_cmd = ["pip", "install"]
_upgrade_basic(session)
session.install("pytest")
install_hydra(session, install_cmd)
tools = [
x
for x in sorted(os.listdir(os.path.join(BASE, "tools")))
if not os.path.isfile(x)
]
for tool in tools:
tool_path = os.path.join("tools", tool)
session.chdir(BASE)
if (Path(tool_path) / "setup.py").exists():
cmd = list(install_cmd) + ["-e", tool_path]
session.run(*cmd, silent=SILENT)
session.run("pytest", tool_path)
session.chdir(BASE)
def _get_standalone_apps_dirs():
standalone_apps_dir = Path(f"{BASE}/tests/standalone_apps")
apps = [standalone_apps_dir / subdir for subdir in os.listdir(standalone_apps_dir)]
apps.append(f"{BASE}/examples/advanced/hydra_app_example")
return apps
@nox.session(python=PYTHON_VERSIONS)
def test_core(session):
_upgrade_basic(session)
install_hydra(session, INSTALL_COMMAND)
session.install("pytest")
if not SKIP_CORE_TESTS:
run_pytest(session, "build_helpers", "tests", *session.posargs)
else:
session.log("Skipping Hydra core tests")
apps = _get_standalone_apps_dirs()
session.log("Testing standalone apps")
for subdir in apps:
session.chdir(subdir)
session.run(*INSTALL_COMMAND, ".", silent=SILENT)
run_pytest(session, ".")
session.chdir(BASE)
test_plugins_in_directory(
session,
install_cmd=INSTALL_COMMAND,
directory="examples/plugins",
test_hydra_core=False,
)
@nox.session(python=PYTHON_VERSIONS)
def test_plugins(session):
test_plugins_in_directory(
session=session,
install_cmd=INSTALL_COMMAND,
directory="plugins",
test_hydra_core=True,
)
def test_plugins_in_directory(
session, install_cmd, directory: str, test_hydra_core: bool
):
_upgrade_basic(session)
session.install("pytest")
install_hydra(session, install_cmd)
selected_plugin = select_plugins(session=session, directory=directory)
for plugin in selected_plugin:
cmd = list(install_cmd) + [os.path.join(directory, plugin.path)]
session.run(*cmd, silent=SILENT)
if not SILENT:
session.run("pipdeptree", "-p", plugin.name)
# Test that we can import Hydra
session.run("python", "-c", "from hydra import main", silent=SILENT)
# Test that we can import all installed plugins
for plugin in selected_plugin:
session.run("python", "-c", f"import {plugin.module}")
# Run Hydra tests to verify installed plugins did not break anything
if test_hydra_core:
if not SKIP_CORE_TESTS:
# exclude test_completion for plugins tests.
# 1. It's tested during normal core tests.
# 2. it's somewhat fragile and tend to timeout in mac.
# 3. it's expensive and it's not worth the cost to run it for plugins as well.
run_pytest(session, "tests", "--ignore=tests/test_completion.py")
else:
session.log("Skipping Hydra core tests")
# Run tests for all installed plugins
for plugin in selected_plugin:
# install all other plugins that are compatible with the current Python version
session.chdir(os.path.join(BASE, directory, plugin.path))
run_pytest(session)
@nox.session(python="3.8")
def coverage(session):
coverage_env = {
"COVERAGE_HOME": BASE,
"COVERAGE_FILE": f"{BASE}/.coverage",
"COVERAGE_RCFILE": f"{BASE}/.coveragerc",
}
_upgrade_basic(session)
session.install("coverage", "pytest")
install_hydra(session, ["pip", "install", "-e"])
session.run("coverage", "erase", env=coverage_env)
for directory in ["plugins", "examples/plugins"]:
selected_plugins = select_plugins(session=session, directory=directory)
for plugin in selected_plugins:
session.run(
"pip",
"install",
"-e",
os.path.join(directory, plugin.path),
silent=SILENT,
)
# run plugin coverage
for plugin in selected_plugins:
session.chdir(os.path.join(directory, plugin.path))
cov_args = ["coverage", "run", "--append", "-m"]
cov_args.extend(pytest_args())
session.run(*cov_args, silent=SILENT, env=coverage_env)
session.chdir(BASE)
# run hydra-core coverage
session.run(
"coverage",
"run",
"--append",
"-m",
silent=SILENT,
env=coverage_env,
*pytest_args(),
)
# Increase the fail_under as coverage improves
session.run("coverage", "report", "--fail-under=80", env=coverage_env)
session.run("coverage", "erase", env=coverage_env)
@nox.session(python=PYTHON_VERSIONS)
def test_jupyter_notebooks(session):
versions = copy.copy(DEFAULT_PYTHON_VERSIONS)
if session.python not in versions:
session.skip(
f"Not testing Jupyter notebook on Python {session.python}, supports [{','.join(versions)}]"
)
session.install("jupyter", "nbval", "pyzmq")
install_hydra(session, ["pip", "install", "-e"])
args = pytest_args(
"--nbval", "examples/jupyter_notebooks/compose_configs_in_notebook.ipynb"
)
# Jupyter notebook test on Windows yield warnings
args = [x for x in args if x != "-Werror"]
session.run(*args, silent=SILENT)
notebooks_dir = Path("tests/jupyter")
for notebook in [
file for file in notebooks_dir.iterdir() if str(file).endswith(".ipynb")
]:
args = pytest_args("--nbval", str(notebook))
args = [x for x in args if x != "-Werror"]
session.run(*args, silent=SILENT)
@nox.session(python=PYTHON_VERSIONS)
def benchmark(session):
_upgrade_basic(session)
install_dev_deps(session)
install_hydra(session, INSTALL_COMMAND)
session.install("pytest")
run_pytest(session, "build_helpers", "tests/benchmark.py", *session.posargs)
|
"""Tasks to help Robot Framework packaging and other development.
Executed by Invoke <http://pyinvoke.org>. Install it with `pip install invoke`
and run `invoke --help` and `invoke --list` for details how to execute tasks.
See BUILD.rst for packaging and releasing instructions.
"""
from pathlib import Path
from urllib.request import urlretrieve
import os
import shutil
import sys
import tarfile
import tempfile
import zipfile
assert Path.cwd().resolve() == Path(__file__).resolve().parent
sys.path.insert(0, 'src')
from invoke import Exit, task
from rellu import initialize_labels, ReleaseNotesGenerator, Version
from rellu.tasks import clean
from robot.libdoc import libdoc
REPOSITORY = 'robotframework/robotframework'
VERSION_PATH = Path('src/robot/version.py')
VERSION_PATTERN = "VERSION = '(.*)'"
POM_PATH = Path('pom.xml')
POM_VERSION_PATTERN = '<version>(.*)</version>'
RELEASE_NOTES_PATH = Path('doc/releasenotes/rf-{version}.rst')
RELEASE_NOTES_TITLE = 'Robot Framework {version}'
RELEASE_NOTES_INTRO = '''
`Robot Framework`_ {version} is a new release with **UPDATE** enhancements
and bug fixes. **MORE intro stuff...**
**REMOVE reference to tracker if release notes contain all issues.**
All issues targeted for Robot Framework {version.milestone} can be found
from the `issue tracker milestone`_.
Questions and comments related to the release can be sent to the
`robotframework-users`_ mailing list or to `Robot Framework Slack`_,
and possible bugs submitted to the `issue tracker`_.
**REMOVE ``--pre`` from the next command with final releases.**
If you have pip_ installed, just run
::
pip install --pre --upgrade robotframework
to install the latest available release or use
::
pip install robotframework=={version}
to install exactly this version. Alternatively you can download the source
distribution from PyPI_ and install it manually. For more details and other
installation approaches, see the `installation instructions`_.
Robot Framework {version} was released on {date}.
.. _Robot Framework: http://robotframework.org
.. _pip: http://pip-installer.org
.. _PyPI: https://pypi.python.org/pypi/robotframework
.. _issue tracker milestone: https://github.com/robotframework/robotframework/issues?q=milestone%3A{version.milestone}
.. _issue tracker: https://github.com/robotframework/robotframework/issues
.. _robotframework-users: http://groups.google.com/group/robotframework-users
.. _Robot Framework Slack: https://robotframework-slack-invite.herokuapp.com
.. _installation instructions: ../../INSTALL.rst
'''
@task
def set_version(ctx, version):
"""Set project version in `src/robot/version.py`` file.
Args:
version: Project version to set or ``dev`` to set development version.
Following PEP-440 compatible version numbers are supported:
- Final version like 3.0 or 3.1.2.
- Alpha, beta or release candidate with ``a``, ``b`` or ``rc`` postfix,
respectively, and an incremented number like 3.0a1 or 3.0.1rc1.
- Development version with ``.dev`` postix and an incremented number like
3.0.dev1 or 3.1a1.dev2.
When the given version is ``dev``, the existing version number is updated
to the next suitable development version. For example, 3.0 -> 3.0.1.dev1,
3.1.1 -> 3.1.2.dev1, 3.2a1 -> 3.2a2.dev1, 3.2.dev1 -> 3.2.dev2.
"""
version = Version(version, VERSION_PATH, VERSION_PATTERN)
version.write()
pom = Version(str(version), POM_PATH, POM_VERSION_PATTERN)
pom.write()
print(version)
@task
def print_version(ctx):
"""Print the current project version."""
print(Version(path=VERSION_PATH, pattern=VERSION_PATTERN))
@task
def library_docs(ctx, name):
"""Generate standard library documentation.
Args:
name: Name of the library or ``all`` to generate docs for all libs.
Name is case-insensitive and can be shortened as long as it
is a unique prefix. For example, ``b`` is equivalent to
``BuiltIn`` and ``di`` equivalent to ``Dialogs``.
"""
libraries = ['BuiltIn', 'Collections', 'DateTime', 'Dialogs',
'OperatingSystem', 'Process', 'Screenshot', 'String',
'Telnet', 'XML']
name = name.lower()
if name != 'all':
libraries = [lib for lib in libraries if lib.lower().startswith(name)]
if len(libraries) != 1:
raise Exit(f"'{name}' is not a unique library prefix.")
for lib in libraries:
libdoc(lib, str(Path(f'doc/libraries/{lib}.html')))
@task
def release_notes(ctx, version=None, username=None, password=None, write=False):
"""Generate release notes based on issues in the issue tracker.
Args:
version: Generate release notes for this version. If not given,
generated them for the current version.
username: GitHub username.
password: GitHub password.
write: When set to True, write release notes to a file overwriting
possible existing file. Otherwise just print them to the
terminal.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively. If they aren't
specified at all, communication with GitHub is anonymous and typically
pretty slow.
"""
version = Version(version, VERSION_PATH, VERSION_PATTERN)
file = RELEASE_NOTES_PATH if write else sys.stdout
generator = ReleaseNotesGenerator(REPOSITORY, RELEASE_NOTES_TITLE,
RELEASE_NOTES_INTRO)
generator.generate(version, username, password, file)
@task
def init_labels(ctx, username=None, password=None):
"""Initialize project by setting labels in the issue tracker.
Args:
username: GitHub username.
password: GitHub password.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively.
Should only be executed once when taking ``rellu`` tooling to use or
when labels it uses have changed.
"""
initialize_labels(REPOSITORY, username, password)
@task
def jar(ctx, jython_version='2.7.0', pyyaml_version='3.11', remove_dist=False):
"""Create JAR distribution.
Downloads Jython JAR and PyYAML if needed.
Args:
jython_version: Jython version to use as a base. Must match version in
`jython-standalone-<version>.jar` found from Maven central.
pyyaml_version: Version of PyYAML that will be included in the
standalone jar. The version must be available from PyPI.
remove_dist: Control is 'dist' directory initially removed or not.
"""
clean(ctx, remove_dist, create_dirs=True)
jython_jar = get_jython_jar(jython_version)
print(f"Using '{jython_jar}'.")
compile_java_files(ctx, jython_jar)
unzip_jar(jython_jar)
copy_robot_files()
pyaml_archive = get_pyyaml(pyyaml_version)
extract_and_copy_pyyaml_files(pyyaml_version, pyaml_archive)
compile_python_files(ctx, jython_jar)
version = Version(path=VERSION_PATH, pattern=VERSION_PATTERN)
create_robot_jar(ctx, str(version))
def get_jython_jar(version):
filename = 'jython-standalone-{0}.jar'.format(version)
url = (f'http://search.maven.org/remotecontent?filepath=org/python/'
f'jython-standalone/{version}/{filename}')
return get_extlib_file(filename, url)
def get_pyyaml(version):
filename = f'PyYAML-{version}.tar.gz'
url = f'https://pypi.python.org/packages/source/P/PyYAML/{filename}'
return get_extlib_file(filename, url)
def get_extlib_file(filename, url):
lib = Path('ext-lib')
path = Path(lib, filename)
if path.exists():
return path
print(f"'{filename}' not found, downloading it from '{url}'.")
lib.mkdir(exist_ok=True)
urlretrieve(url, path)
return path
def extract_and_copy_pyyaml_files(version, filename, build_dir='build'):
extracted = Path(tempfile.gettempdir(), 'pyyaml-for-robot')
if extracted.is_dir():
shutil.rmtree(str(extracted))
print(f"Extracting '{filename}' to '{extracted}'.")
with tarfile.open(filename) as t:
t.extractall(extracted)
source = Path(extracted, f'PyYAML-{version}', 'lib', 'yaml')
target = Path(build_dir, 'Lib', 'yaml')
shutil.copytree(str(source), str(target),
ignore=shutil.ignore_patterns('*.pyc'))
def compile_java_files(ctx, jython_jar, build_dir='build'):
root = Path('src/java/org/robotframework')
files = [str(path) for path in root.iterdir() if path.suffix == '.java']
print(f'Compiling {len(files)} Java files.')
ctx.run(f"javac -d {build_dir} -target 1.7 -source 1.7 -cp {jython_jar} "
f"{" ".join(files)}")
def unzip_jar(path, target='build'):
zipfile.ZipFile(path).extractall(target)
def copy_robot_files(build_dir='build'):
source = Path('src', 'robot')
target = Path(build_dir, 'Lib', 'robot')
shutil.copytree(str(source), str(target),
ignore=shutil.ignore_patterns('*.pyc'))
shutil.rmtree(str(Path(target, 'htmldata', 'testdata')))
def compile_python_files(ctx, jython_jar, build_dir='build'):
ctx.run(f"java -jar {jython_jar} -m compileall -x '.*3.py' {build_dir}")
# Jython will not work without its py-files, but robot will
for directory, _, files in os.walk(str(Path(build_dir, 'Lib', 'robot'))):
for name in files:
if name.endswith('.py'):
Path(directory, name).unlink()
def create_robot_jar(ctx, version, source='build'):
write_manifest(version, source)
target = Path(f'dist/robotframework-{version}.jar')
ctx.run(f'jar cvfM {target} -C {source} .')
print(f"Created '{target}'.")
def write_manifest(version, build_dir='build'):
with open(Path(build_dir, 'META-INF', 'MANIFEST.MF'), 'w') as mf:
mf.write(f'''\
Manifest-Version: 1.0
Main-Class: org.robotframework.RobotFramework
Specification-Version: 2
Implementation-Version: {version}
''')
| """Tasks to help Robot Framework packaging and other development.
Executed by Invoke <http://pyinvoke.org>. Install it with `pip install invoke`
and run `invoke --help` and `invoke --list` for details how to execute tasks.
See BUILD.rst for packaging and releasing instructions.
"""
from pathlib import Path
from urllib.request import urlretrieve
import os
import shutil
import sys
import tarfile
import tempfile
import zipfile
assert Path.cwd().resolve() == Path(__file__).resolve().parent
sys.path.insert(0, 'src')
from invoke import Exit, task
from rellu import initialize_labels, ReleaseNotesGenerator, Version
from rellu.tasks import clean
from robot.libdoc import libdoc
REPOSITORY = 'robotframework/robotframework'
VERSION_PATH = Path('src/robot/version.py')
VERSION_PATTERN = "VERSION = '(.*)'"
POM_PATH = Path('pom.xml')
POM_VERSION_PATTERN = '<version>(.*)</version>'
RELEASE_NOTES_PATH = Path('doc/releasenotes/rf-{version}.rst')
RELEASE_NOTES_TITLE = 'Robot Framework {version}'
RELEASE_NOTES_INTRO = '''
`Robot Framework`_ {version} is a new release with **UPDATE** enhancements
and bug fixes. **MORE intro stuff...**
**REMOVE reference to tracker if release notes contain all issues.**
All issues targeted for Robot Framework {version.milestone} can be found
from the `issue tracker milestone`_.
Questions and comments related to the release can be sent to the
`robotframework-users`_ mailing list or to `Robot Framework Slack`_,
and possible bugs submitted to the `issue tracker`_.
**REMOVE ``--pre`` from the next command with final releases.**
If you have pip_ installed, just run
::
pip install --pre --upgrade robotframework
to install the latest available release or use
::
pip install robotframework=={version}
to install exactly this version. Alternatively you can download the source
distribution from PyPI_ and install it manually. For more details and other
installation approaches, see the `installation instructions`_.
Robot Framework {version} was released on {date}.
.. _Robot Framework: http://robotframework.org
.. _pip: http://pip-installer.org
.. _PyPI: https://pypi.python.org/pypi/robotframework
.. _issue tracker milestone: https://github.com/robotframework/robotframework/issues?q=milestone%3A{version.milestone}
.. _issue tracker: https://github.com/robotframework/robotframework/issues
.. _robotframework-users: http://groups.google.com/group/robotframework-users
.. _Robot Framework Slack: https://robotframework-slack-invite.herokuapp.com
.. _installation instructions: ../../INSTALL.rst
'''
@task
def set_version(ctx, version):
"""Set project version in `src/robot/version.py`` file.
Args:
version: Project version to set or ``dev`` to set development version.
Following PEP-440 compatible version numbers are supported:
- Final version like 3.0 or 3.1.2.
- Alpha, beta or release candidate with ``a``, ``b`` or ``rc`` postfix,
respectively, and an incremented number like 3.0a1 or 3.0.1rc1.
- Development version with ``.dev`` postix and an incremented number like
3.0.dev1 or 3.1a1.dev2.
When the given version is ``dev``, the existing version number is updated
to the next suitable development version. For example, 3.0 -> 3.0.1.dev1,
3.1.1 -> 3.1.2.dev1, 3.2a1 -> 3.2a2.dev1, 3.2.dev1 -> 3.2.dev2.
"""
version = Version(version, VERSION_PATH, VERSION_PATTERN)
version.write()
pom = Version(str(version), POM_PATH, POM_VERSION_PATTERN)
pom.write()
print(version)
@task
def print_version(ctx):
"""Print the current project version."""
print(Version(path=VERSION_PATH, pattern=VERSION_PATTERN))
@task
def library_docs(ctx, name):
"""Generate standard library documentation.
Args:
name: Name of the library or ``all`` to generate docs for all libs.
Name is case-insensitive and can be shortened as long as it
is a unique prefix. For example, ``b`` is equivalent to
``BuiltIn`` and ``di`` equivalent to ``Dialogs``.
"""
libraries = ['BuiltIn', 'Collections', 'DateTime', 'Dialogs',
'OperatingSystem', 'Process', 'Screenshot', 'String',
'Telnet', 'XML']
name = name.lower()
if name != 'all':
libraries = [lib for lib in libraries if lib.lower().startswith(name)]
if len(libraries) != 1:
raise Exit(f"'{name}' is not a unique library prefix.")
for lib in libraries:
libdoc(lib, str(Path(f'doc/libraries/{lib}.html')))
@task
def release_notes(ctx, version=None, username=None, password=None, write=False):
"""Generate release notes based on issues in the issue tracker.
Args:
version: Generate release notes for this version. If not given,
generated them for the current version.
username: GitHub username.
password: GitHub password.
write: When set to True, write release notes to a file overwriting
possible existing file. Otherwise just print them to the
terminal.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively. If they aren't
specified at all, communication with GitHub is anonymous and typically
pretty slow.
"""
version = Version(version, VERSION_PATH, VERSION_PATTERN)
file = RELEASE_NOTES_PATH if write else sys.stdout
generator = ReleaseNotesGenerator(REPOSITORY, RELEASE_NOTES_TITLE,
RELEASE_NOTES_INTRO)
generator.generate(version, username, password, file)
@task
def init_labels(ctx, username=None, password=None):
"""Initialize project by setting labels in the issue tracker.
Args:
username: GitHub username.
password: GitHub password.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively.
Should only be executed once when taking ``rellu`` tooling to use or
when labels it uses have changed.
"""
initialize_labels(REPOSITORY, username, password)
@task
def jar(ctx, jython_version='2.7.0', pyyaml_version='3.11', remove_dist=False):
"""Create JAR distribution.
Downloads Jython JAR and PyYAML if needed.
Args:
jython_version: Jython version to use as a base. Must match version in
`jython-standalone-<version>.jar` found from Maven central.
pyyaml_version: Version of PyYAML that will be included in the
standalone jar. The version must be available from PyPI.
remove_dist: Control is 'dist' directory initially removed or not.
"""
clean(ctx, remove_dist, create_dirs=True)
jython_jar = get_jython_jar(jython_version)
print(f"Using '{jython_jar}'.")
compile_java_files(ctx, jython_jar)
unzip_jar(jython_jar)
copy_robot_files()
pyaml_archive = get_pyyaml(pyyaml_version)
extract_and_copy_pyyaml_files(pyyaml_version, pyaml_archive)
compile_python_files(ctx, jython_jar)
version = Version(path=VERSION_PATH, pattern=VERSION_PATTERN)
create_robot_jar(ctx, str(version))
def get_jython_jar(version):
filename = 'jython-standalone-{0}.jar'.format(version)
url = (f'http://search.maven.org/remotecontent?filepath=org/python/'
f'jython-standalone/{version}/{filename}')
return get_extlib_file(filename, url)
def get_pyyaml(version):
filename = f'PyYAML-{version}.tar.gz'
url = f'https://pypi.python.org/packages/source/P/PyYAML/{filename}'
return get_extlib_file(filename, url)
def get_extlib_file(filename, url):
lib = Path('ext-lib')
path = Path(lib, filename)
if path.exists():
return path
print(f"'{filename}' not found, downloading it from '{url}'.")
lib.mkdir(exist_ok=True)
urlretrieve(url, path)
return path
def extract_and_copy_pyyaml_files(version, filename, build_dir='build'):
extracted = Path(tempfile.gettempdir(), 'pyyaml-for-robot')
if extracted.is_dir():
shutil.rmtree(str(extracted))
print(f"Extracting '{filename}' to '{extracted}'.")
with tarfile.open(filename) as t:
t.extractall(extracted)
source = Path(extracted, f'PyYAML-{version}', 'lib', 'yaml')
target = Path(build_dir, 'Lib', 'yaml')
shutil.copytree(str(source), str(target),
ignore=shutil.ignore_patterns('*.pyc'))
def compile_java_files(ctx, jython_jar, build_dir='build'):
root = Path('src/java/org/robotframework')
files = [str(path) for path in root.iterdir() if path.suffix == '.java']
print(f'Compiling {len(files)} Java files.')
ctx.run(f"javac -d {build_dir} -target 1.7 -source 1.7 -cp {jython_jar} "
f"{' '.join(files)}")
def unzip_jar(path, target='build'):
zipfile.ZipFile(path).extractall(target)
def copy_robot_files(build_dir='build'):
source = Path('src', 'robot')
target = Path(build_dir, 'Lib', 'robot')
shutil.copytree(str(source), str(target),
ignore=shutil.ignore_patterns('*.pyc'))
shutil.rmtree(str(Path(target, 'htmldata', 'testdata')))
def compile_python_files(ctx, jython_jar, build_dir='build'):
ctx.run(f"java -jar {jython_jar} -m compileall -x '.*3.py' {build_dir}")
# Jython will not work without its py-files, but robot will
for directory, _, files in os.walk(str(Path(build_dir, 'Lib', 'robot'))):
for name in files:
if name.endswith('.py'):
Path(directory, name).unlink()
def create_robot_jar(ctx, version, source='build'):
write_manifest(version, source)
target = Path(f'dist/robotframework-{version}.jar')
ctx.run(f'jar cvfM {target} -C {source} .')
print(f"Created '{target}'.")
def write_manifest(version, build_dir='build'):
with open(Path(build_dir, 'META-INF', 'MANIFEST.MF'), 'w') as mf:
mf.write(f'''\
Manifest-Version: 1.0
Main-Class: org.robotframework.RobotFramework
Specification-Version: 2
Implementation-Version: {version}
''')
|
from __future__ import annotations
from typing import List, Tuple, Optional
from network_simulator.BloodType import BloodType
from network_simulator.compatibility_markers import OrganType
path_structure = Optional[List[Optional[int]]]
shortest_path_structure = Tuple[path_structure, float]
class Organ:
"""
A class representing a given organ which is available for transplant.
Each organ has a name, a unique ID, lifetime (a maximum out of body duration),
type matching, and a location.
"""
organ_count = 0
def __init__(self, organ_type: OrganType, blood_type: BloodType,
location: int, organ_list: 'OrganList' = None) -> None: # type: ignore
Organ.organ_count = Organ.organ_count + 1
self.organ_id: int = Organ.organ_count
self.organ_type: OrganType = organ_type
self.blood_type: BloodType = blood_type
self.viability: float = Organ.get_viability(self.organ_type)
self.origin_location: int = location
self.current_location: int = location
self.path: path_structure = [location]
if organ_list:
organ_list.add_organ(self)
def move_organ(self, new_location: int, cost: float,
shortest_path: shortest_path_structure) -> None:
"""
This function allows an organ's attributes to be altered to represent it's
transportation across the network. This is intended to be used with
Dijkstra.shortest_path (this will be the source of the cost parameter)
:param int new_location: node id representing the destination location
:param cost: weight/cost associated with then most efficient path
:param list shortest_path: transit path taken when moving organ
"""
if self.viability < cost:
print('ERROR: organ no longer viable!')
return
path, weight = shortest_path
self.path = path
self.current_location = new_location
self.viability -= cost
@staticmethod
def get_viability(organ_type: OrganType) -> float:
"""
Gets viability rating for each organ individually
Viability is represented by hours an organ can be out of body * 10
:param int organ_type: constant corresponding to an organ type
:return: int viability rating (used in __init__())
"""
viability = {
OrganType.Heart.value: 60,
OrganType.Kidney.value: 300,
OrganType.Liver.value: 120,
OrganType.Lungs.value: 60,
OrganType.Pancreas.value: 120,
OrganType.Intestines.value: 80}
return viability[organ_type.value]
def __str__(self) -> str:
"""
Builds an easily readable string representing an organ
:return: str
"""
return f'Organ:\n' \
f'\tOrgan ID: {'{:05d}'.format(self.organ_id)}\n' \
f'\tOrgan type: {OrganType(self.organ_type).name}\n' \
f'\tBlood type: {self.blood_type}\n' \
f'\tViability: {self.viability}\n' \
f'\tOrigin location: {self.origin_location}\n' \
f'\tCurrent location: {self.current_location}\n' \
f'\tTransit path: {self.path}\n'
| from __future__ import annotations
from typing import List, Tuple, Optional
from network_simulator.BloodType import BloodType
from network_simulator.compatibility_markers import OrganType
path_structure = Optional[List[Optional[int]]]
shortest_path_structure = Tuple[path_structure, float]
class Organ:
"""
A class representing a given organ which is available for transplant.
Each organ has a name, a unique ID, lifetime (a maximum out of body duration),
type matching, and a location.
"""
organ_count = 0
def __init__(self, organ_type: OrganType, blood_type: BloodType,
location: int, organ_list: 'OrganList' = None) -> None: # type: ignore
Organ.organ_count = Organ.organ_count + 1
self.organ_id: int = Organ.organ_count
self.organ_type: OrganType = organ_type
self.blood_type: BloodType = blood_type
self.viability: float = Organ.get_viability(self.organ_type)
self.origin_location: int = location
self.current_location: int = location
self.path: path_structure = [location]
if organ_list:
organ_list.add_organ(self)
def move_organ(self, new_location: int, cost: float,
shortest_path: shortest_path_structure) -> None:
"""
This function allows an organ's attributes to be altered to represent it's
transportation across the network. This is intended to be used with
Dijkstra.shortest_path (this will be the source of the cost parameter)
:param int new_location: node id representing the destination location
:param cost: weight/cost associated with then most efficient path
:param list shortest_path: transit path taken when moving organ
"""
if self.viability < cost:
print('ERROR: organ no longer viable!')
return
path, weight = shortest_path
self.path = path
self.current_location = new_location
self.viability -= cost
@staticmethod
def get_viability(organ_type: OrganType) -> float:
"""
Gets viability rating for each organ individually
Viability is represented by hours an organ can be out of body * 10
:param int organ_type: constant corresponding to an organ type
:return: int viability rating (used in __init__())
"""
viability = {
OrganType.Heart.value: 60,
OrganType.Kidney.value: 300,
OrganType.Liver.value: 120,
OrganType.Lungs.value: 60,
OrganType.Pancreas.value: 120,
OrganType.Intestines.value: 80}
return viability[organ_type.value]
def __str__(self) -> str:
"""
Builds an easily readable string representing an organ
:return: str
"""
return f'Organ:\n' \
f'\tOrgan ID: {"{:05d}".format(self.organ_id)}\n' \
f'\tOrgan type: {OrganType(self.organ_type).name}\n' \
f'\tBlood type: {self.blood_type}\n' \
f'\tViability: {self.viability}\n' \
f'\tOrigin location: {self.origin_location}\n' \
f'\tCurrent location: {self.current_location}\n' \
f'\tTransit path: {self.path}\n'
|
import logging
import sys
from abc import ABC, abstractmethod
logger = logging.getLogger(__name__)
class PaddownException(Exception):
pass
class Paddown(ABC):
@abstractmethod
def has_valid_padding(self, ciphertext: bytes) -> bool:
"""
Override this method and send off the ciphertext to check for valid padding.
:param bytes ciphertext: The ciphertext to check, send this to your padding oracle.
:rtype: True for valid padding, False otherwise.
"""
raise PaddownException("Not implemented")
def __init__(self, ciphertext: bytes, blocksize: int = 16):
if not isinstance(ciphertext, bytes):
raise Exception(f"Ciphertext {type(ciphertext)} not an instance of {bytes}")
self.ciphertext = ciphertext
self.blocksize = blocksize
def find_c_prime_at_index(self, ciphertext: bytearray, index: int):
if not isinstance(ciphertext, bytearray):
raise PaddownException(f"ciphertext not an instance of {bytearray}")
# Replace ciphertext at index with a guessed byte
ciphertext_temp = ciphertext
for c_prime in range(256):
ciphertext_temp[index] = c_prime
if self.has_valid_padding(ciphertext_temp):
return c_prime
raise PaddownException(f"No valid padding found, is .has_valid_padding(...) implemented correctly?")
def decrypt_block(self, c_i):
if not isinstance(c_i, bytearray):
raise PaddownException(f"block c_i not an instance of {bytearray}")
c_previous = bytearray(b"\x00" * self.blocksize)
intermediate = bytearray(b"\x00" * self.blocksize)
for i in range(self.blocksize):
self.progress_bar(i, self.blocksize - 1, "Decrypting ")
for j in range(i):
c_previous[(self.blocksize - 1) - j] = intermediate[(self.blocksize - 1) - j] ^ (i + 1)
c_prime = self.find_c_prime_at_index(c_previous + c_i, (self.blocksize - 1) - i)
intermediate[(self.blocksize - 1) - i] = c_prime ^ (i + 1)
logger.debug(f"intermediate: {[hex(x)[2:] for x in intermediate]}")
return intermediate
def get_intermediate(self, ciphertext) -> bytes:
key = b""
blocks = len(ciphertext) // self.blocksize
# Iterate blocks last to first
for i in range(blocks):
block_start = len(ciphertext) - (i + 1) * self.blocksize
block_end = len(ciphertext) - (i * self.blocksize)
key = self.decrypt_block(ciphertext[block_start:block_end]) + key
return key
def decrypt(self) -> bytes:
logger.debug(f"Ciphertext length: {len(self.ciphertext)}")
logger.debug(f"Blocks to decrypt: {len(self.ciphertext) // self.blocksize}")
# Convert self.ciphertext to mutable bytearray
self.ciphertext = bytearray(self.ciphertext)
key = self.get_intermediate(self.ciphertext)
plaintext = bytearray()
for i in range(len(self.ciphertext) - self.blocksize):
b = self.ciphertext[i] ^ key[i + self.blocksize]
plaintext += (b).to_bytes(1, byteorder="big")
print("\n") # print variable on new line from progress bar
return plaintext
def progress_bar(self, i, total_length, post_text):
n_bar = 100 # size of progress bar
j = i / total_length
sys.stdout.write("\r")
sys.stdout.write(f"[{"#" * int(n_bar * j):{n_bar}s}] {int(100 * j)}% {post_text}")
sys.stdout.flush()
| import logging
import sys
from abc import ABC, abstractmethod
logger = logging.getLogger(__name__)
class PaddownException(Exception):
pass
class Paddown(ABC):
@abstractmethod
def has_valid_padding(self, ciphertext: bytes) -> bool:
"""
Override this method and send off the ciphertext to check for valid padding.
:param bytes ciphertext: The ciphertext to check, send this to your padding oracle.
:rtype: True for valid padding, False otherwise.
"""
raise PaddownException("Not implemented")
def __init__(self, ciphertext: bytes, blocksize: int = 16):
if not isinstance(ciphertext, bytes):
raise Exception(f"Ciphertext {type(ciphertext)} not an instance of {bytes}")
self.ciphertext = ciphertext
self.blocksize = blocksize
def find_c_prime_at_index(self, ciphertext: bytearray, index: int):
if not isinstance(ciphertext, bytearray):
raise PaddownException(f"ciphertext not an instance of {bytearray}")
# Replace ciphertext at index with a guessed byte
ciphertext_temp = ciphertext
for c_prime in range(256):
ciphertext_temp[index] = c_prime
if self.has_valid_padding(ciphertext_temp):
return c_prime
raise PaddownException(f"No valid padding found, is .has_valid_padding(...) implemented correctly?")
def decrypt_block(self, c_i):
if not isinstance(c_i, bytearray):
raise PaddownException(f"block c_i not an instance of {bytearray}")
c_previous = bytearray(b"\x00" * self.blocksize)
intermediate = bytearray(b"\x00" * self.blocksize)
for i in range(self.blocksize):
self.progress_bar(i, self.blocksize - 1, "Decrypting ")
for j in range(i):
c_previous[(self.blocksize - 1) - j] = intermediate[(self.blocksize - 1) - j] ^ (i + 1)
c_prime = self.find_c_prime_at_index(c_previous + c_i, (self.blocksize - 1) - i)
intermediate[(self.blocksize - 1) - i] = c_prime ^ (i + 1)
logger.debug(f"intermediate: {[hex(x)[2:] for x in intermediate]}")
return intermediate
def get_intermediate(self, ciphertext) -> bytes:
key = b""
blocks = len(ciphertext) // self.blocksize
# Iterate blocks last to first
for i in range(blocks):
block_start = len(ciphertext) - (i + 1) * self.blocksize
block_end = len(ciphertext) - (i * self.blocksize)
key = self.decrypt_block(ciphertext[block_start:block_end]) + key
return key
def decrypt(self) -> bytes:
logger.debug(f"Ciphertext length: {len(self.ciphertext)}")
logger.debug(f"Blocks to decrypt: {len(self.ciphertext) // self.blocksize}")
# Convert self.ciphertext to mutable bytearray
self.ciphertext = bytearray(self.ciphertext)
key = self.get_intermediate(self.ciphertext)
plaintext = bytearray()
for i in range(len(self.ciphertext) - self.blocksize):
b = self.ciphertext[i] ^ key[i + self.blocksize]
plaintext += (b).to_bytes(1, byteorder="big")
print("\n") # print variable on new line from progress bar
return plaintext
def progress_bar(self, i, total_length, post_text):
n_bar = 100 # size of progress bar
j = i / total_length
sys.stdout.write("\r")
sys.stdout.write(f"[{'#' * int(n_bar * j):{n_bar}s}] {int(100 * j)}% {post_text}")
sys.stdout.flush()
|
from typing import Tuple, Union
from discord import Embed, Member, PermissionOverwrite, TextChannel, VoiceChannel, VoiceState
from discord.ext.commands import bot_has_guild_permissions
from discord_slash import (
Button,
ComponentContext,
Modal,
ModalContext,
Select,
SelectOption,
SlashCommandOptionType,
SlashContext,
TextInput,
TextInputStyle,
)
from discord_slash.cog_ext import cog_subcommand as slash_subcommand
from discord_slash.utils.manage_commands import create_option
from utils import (
AsteroidBot,
Cog,
DiscordColors,
DontHavePrivateRoom,
GuildData,
GuildPrivateVoice,
bot_owner_or_permissions,
cog_is_enabled,
get_content,
is_enabled,
)
class PrivateRooms(Cog):
def __init__(self, bot: AsteroidBot) -> None:
self.bot = bot
self.emoji = "🔊"
self.name = "PrivateRooms"
async def __check(
self, ctx: SlashContext, *, return_guild_data: bool = False
) -> Union[Tuple[VoiceChannel, dict], Tuple[VoiceChannel, dict, GuildData]]:
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
raise DontHavePrivateRoom
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
if return_guild_data:
return voice_channel, content, guild_data
return voice_channel, content
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="close",
description="Closes your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_close(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, connect=False)
await ctx.send(content["ROOM_CLOSED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="open",
description="Opens your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_open(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, connect=True)
await ctx.send(content["ROOM_OPENED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="hide",
description="Hides your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_hide(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=False)
await ctx.send(content["ROOM_HIDED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="unhide",
description="Unhides your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_unhide(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=True)
await ctx.send(content["ROOM_UNHIDED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="change_name",
description="Change room name",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_change__name(self, ctx: SlashContext, name: str):
voice_channel, content = await self.__check(ctx)
await voice_channel.edit(name=name)
await ctx.send(content["ROOM_NAME_WAS_SETUP"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="ban",
description="Bans member to room",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
async def private__rooms_control_ban(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(member, connect=False)
if member.voice and member.voice.channel.id == voice_channel.id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_BANNED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="unban",
description="Unbans member from room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_unban(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(member, connect=True)
await ctx.send(content["MEMBER_WAS_UNBANNED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="kick",
description="Kicks a member from room",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
async def private__rooms_control_kick(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
if member.voice and member.voice.channel.id == voice_channel.id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_KICKED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="transfer_ownership",
description="Transfer room ownership",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_transfer__ownership(self, ctx: SlashContext, member: Member):
voice_channel, content, guild_data = await self.__check(ctx, return_guild_data=True)
await guild_data.private_voice.set_private_voice_channel(member.id, voice_channel.id)
await voice_channel.set_permissions(
member, manage_channels=True, connect=True, move_members=True
)
await voice_channel.set_permissions(
ctx.author, manage_channels=False, connect=False, move_members=False
)
await ctx.send(content["OWNERSHIP_TRANSFERED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="set_limit",
description="Sets room limit",
options=[
create_option(
name="limit",
description="The limit of members in your room",
option_type=SlashCommandOptionType.INTEGER,
min_value=1,
max_value=99,
)
],
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_set__limit(self, ctx: SlashContext, limit: int):
voice_channel, content = await self.__check(ctx)
await voice_channel.edit(user_limit=limit)
await ctx.send(content["LIMIT_WAS_SETUP"], hidden=True)
@slash_subcommand(
base="private_rooms",
name="create_menu",
description="Creates a control menu",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
@bot_owner_or_permissions(manage_guild=True)
async def private__rooms_create__menu(self, ctx: SlashContext):
await ctx.defer(hidden=True)
guild_data = await self.bot.get_guild_data(ctx.guild_id)
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
components = [
[
Button(emoji=self.bot.get_emoji(959124362840113182), custom_id="voice_close"),
Button(emoji=self.bot.get_emoji(959124362890461254), custom_id="voice_open"),
Button(emoji=self.bot.get_emoji(959124362890461325), custom_id="voice_hide"),
Button(emoji=self.bot.get_emoji(959124362890473582), custom_id="voice_unhide"),
Button(
emoji=self.bot.get_emoji(959124362798174319), custom_id="voice_change_room_name"
),
],
[
Button(emoji=self.bot.get_emoji(959124362882068550), custom_id="voice_ban"),
Button(emoji=self.bot.get_emoji(959124362835931236), custom_id="voice_unban"),
Button(emoji=self.bot.get_emoji(959124362974343169), custom_id="voice_kick"),
Button(emoji=self.bot.get_emoji(959124362823340052), custom_id="voice_transfer"),
Button(
emoji=self.bot.get_emoji(959124362835927080), custom_id="voice_set_room_limit"
),
],
]
category = await ctx.guild.create_category(content["PRIVATE_ROOMS"])
voice_channel = await category.create_voice_channel(content["CREATE_ROOM"])
overwrites = {
ctx.guild.default_role: PermissionOverwrite(
send_messages=False, use_slash_commands=False
)
}
text_channel: TextChannel = await category.create_text_channel(
content["ROOM_CONTROL"], overwrites=overwrites
)
await guild_data.create_private_voice(text_channel.id, voice_channel.id)
embed = Embed(
title=content["ROOM_CONTROL_TITLE"],
description="".join(content["ROOM_CONTROL_DESCRIPTION"]),
color=DiscordColors.EMBED_COLOR,
)
await text_channel.send(embed=embed, components=components)
await ctx.send(content["SUCCESSFULLY_CREATED"])
@Cog.listener()
@cog_is_enabled()
async def on_voice_state_update(self, member: Member, before: VoiceState, after: VoiceState):
guild_data = await self.bot.get_guild_data(member.guild.id)
private_voice = guild_data.private_voice
voice_channel_id = private_voice.voice_channel_id
if after.channel and after.channel.id == voice_channel_id:
if before.channel:
await self._check_channel(member, before, private_voice)
# Creating a private voice channel
overwrites = {
member.guild.default_role: PermissionOverwrite(connect=False),
member: PermissionOverwrite(manage_channels=True, connect=True, move_members=True),
}
channel: VoiceChannel = await after.channel.category.create_voice_channel(
f"{member.display_name}'s channel", overwrites=overwrites
)
await member.move_to(channel)
await private_voice.set_private_voice_channel(member.id, channel.id)
return
if before.channel:
await self._check_channel(member, before, private_voice)
async def _check_channel(
self, member: Member, before: VoiceState, private_voice: GuildPrivateVoice
):
if not (channel_id := private_voice.active_channels.get(str(member.id))):
return
if before.channel.id != channel_id:
return
if not before.channel.members:
await before.channel.delete()
await private_voice.delete_private_voice_channel(member.id)
return
first_member = before.channel.members[0]
await private_voice.set_private_voice_channel(first_member.id, before.channel.id)
await before.channel.set_permissions(
member, manage_channels=False, connect=False, move_members=False
)
await before.channel.set_permissions(
first_member, manage_channels=True, connect=True, move_members=True
)
@Cog.listener()
@cog_is_enabled()
async def on_button_click(self, ctx: ComponentContext):
if not ctx.custom_id.startswith("voice"):
return
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
match ctx.custom_id:
case "voice_close":
await voice_channel.set_permissions(ctx.guild.default_role, connect=False)
await ctx.send(content["ROOM_CLOSED"], hidden=True)
case "voice_open":
await voice_channel.set_permissions(ctx.guild.default_role, connect=True)
await ctx.send(content["ROOM_OPENED"], hidden=True)
case "voice_hide":
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=False)
await ctx.send(content["ROOM_HIDED"], hidden=True)
case "voice_unhide":
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=True)
await ctx.send(content["ROOM_UNHIDED"], hidden=True)
case "voice_change_room_name":
modal = Modal(
custom_id="voice_modal_change_room_name",
title=content["PRIVATE_ROOM_CONTROL_MODAL"],
components=[
TextInput(
custom_id="channel_name",
label=content["ROOM_NAME"],
style=TextInputStyle.SHORT,
)
],
)
await ctx.popup(modal)
case "voice_ban" | "voice_unban" | "voice_kick" | "voice_transfer":
modal = Modal(
custom_id=f"voice_modal_{ctx.custom_id.replace("voice", "")}",
title=content["PRIVATE_ROOM_CONTROL_MODAL"],
components=[
TextInput(
custom_id="user_id",
label=content["MEMBER_ID"],
style=TextInputStyle.SHORT,
)
],
)
await ctx.popup(modal)
case "voice_set_room_limit":
select = Select(
custom_id="voice_select_set_room_limit",
options=[
SelectOption(label=content["REMOVE_LIMIT"], value=0),
SelectOption(label="2", value=2),
SelectOption(label="3", value=3),
SelectOption(label="4", value=4),
SelectOption(label="5", value=5),
SelectOption(label="10", value=10),
],
)
await ctx.send(content["SETUP_ROOM_LIMIT"], components=[select], hidden=True)
@Cog.listener()
@cog_is_enabled()
async def on_select_option(self, ctx: ComponentContext):
if not ctx.custom_id.startswith("voice"):
return
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
await voice_channel.edit(user_limit=ctx.values[0])
await ctx.send(content["LIMIT_WAS_SETUP"], hidden=True)
@Cog.listener(name="on_modal")
@cog_is_enabled()
async def on_voice_modal(self, ctx: ModalContext):
if not ctx.custom_id.startswith("voice"):
return
await ctx.defer(hidden=True)
guild_data = await self.bot.get_guild_data(ctx.guild_id)
voice_channel_id = guild_data.private_voice.active_channels.get(str(ctx.author_id))
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if voice_channel_id is None:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(voice_channel_id)
if channel_name := ctx.values.get("channel_name"):
await voice_channel.edit(name=channel_name)
return await ctx.send(content["ROOM_NAME_WAS_SETUP"], hidden=True)
user_id: str = ctx.values["user_id"]
if not user_id.isdigit():
return await ctx.send(content["NOT_ID"], hidden=True)
member: Member = ctx.guild.get_member(int(user_id))
if member is None:
return await ctx.send(content["NOT_MEMBER_ID"], hidden=True)
match ctx.custom_id:
case "voice_modal_ban":
await voice_channel.set_permissions(member, connect=False)
if member.voice and member.voice.channel.id == voice_channel_id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_BANNED"], hidden=True)
case "voice_modal_unban":
await voice_channel.set_permissions(member, connect=True)
await ctx.send(content["MEMBER_WAS_UNBANNED"], hidden=True)
case "voice_modal_kick":
if member.voice and member.voice.channel.id == voice_channel_id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_KICKED"], hidden=True)
case "voice_modal_transfer":
await guild_data.private_voice.set_private_voice_channel(user_id, voice_channel_id)
await voice_channel.set_permissions(
member, manage_channels=True, connect=True, move_members=True
)
await voice_channel.set_permissions(
ctx.author, manage_channels=False, connect=False, move_members=False
)
await ctx.send(content["OWNERSHIP_TRANSFERED"], hidden=True)
def setup(bot):
bot.add_cog(PrivateRooms(bot))
| from typing import Tuple, Union
from discord import Embed, Member, PermissionOverwrite, TextChannel, VoiceChannel, VoiceState
from discord.ext.commands import bot_has_guild_permissions
from discord_slash import (
Button,
ComponentContext,
Modal,
ModalContext,
Select,
SelectOption,
SlashCommandOptionType,
SlashContext,
TextInput,
TextInputStyle,
)
from discord_slash.cog_ext import cog_subcommand as slash_subcommand
from discord_slash.utils.manage_commands import create_option
from utils import (
AsteroidBot,
Cog,
DiscordColors,
DontHavePrivateRoom,
GuildData,
GuildPrivateVoice,
bot_owner_or_permissions,
cog_is_enabled,
get_content,
is_enabled,
)
class PrivateRooms(Cog):
def __init__(self, bot: AsteroidBot) -> None:
self.bot = bot
self.emoji = "🔊"
self.name = "PrivateRooms"
async def __check(
self, ctx: SlashContext, *, return_guild_data: bool = False
) -> Union[Tuple[VoiceChannel, dict], Tuple[VoiceChannel, dict, GuildData]]:
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
raise DontHavePrivateRoom
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
if return_guild_data:
return voice_channel, content, guild_data
return voice_channel, content
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="close",
description="Closes your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_close(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, connect=False)
await ctx.send(content["ROOM_CLOSED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="open",
description="Opens your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_open(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, connect=True)
await ctx.send(content["ROOM_OPENED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="hide",
description="Hides your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_hide(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=False)
await ctx.send(content["ROOM_HIDED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="unhide",
description="Unhides your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_unhide(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=True)
await ctx.send(content["ROOM_UNHIDED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="change_name",
description="Change room name",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_change__name(self, ctx: SlashContext, name: str):
voice_channel, content = await self.__check(ctx)
await voice_channel.edit(name=name)
await ctx.send(content["ROOM_NAME_WAS_SETUP"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="ban",
description="Bans member to room",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
async def private__rooms_control_ban(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(member, connect=False)
if member.voice and member.voice.channel.id == voice_channel.id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_BANNED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="unban",
description="Unbans member from room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_unban(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(member, connect=True)
await ctx.send(content["MEMBER_WAS_UNBANNED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="kick",
description="Kicks a member from room",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
async def private__rooms_control_kick(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
if member.voice and member.voice.channel.id == voice_channel.id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_KICKED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="transfer_ownership",
description="Transfer room ownership",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_transfer__ownership(self, ctx: SlashContext, member: Member):
voice_channel, content, guild_data = await self.__check(ctx, return_guild_data=True)
await guild_data.private_voice.set_private_voice_channel(member.id, voice_channel.id)
await voice_channel.set_permissions(
member, manage_channels=True, connect=True, move_members=True
)
await voice_channel.set_permissions(
ctx.author, manage_channels=False, connect=False, move_members=False
)
await ctx.send(content["OWNERSHIP_TRANSFERED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="set_limit",
description="Sets room limit",
options=[
create_option(
name="limit",
description="The limit of members in your room",
option_type=SlashCommandOptionType.INTEGER,
min_value=1,
max_value=99,
)
],
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_set__limit(self, ctx: SlashContext, limit: int):
voice_channel, content = await self.__check(ctx)
await voice_channel.edit(user_limit=limit)
await ctx.send(content["LIMIT_WAS_SETUP"], hidden=True)
@slash_subcommand(
base="private_rooms",
name="create_menu",
description="Creates a control menu",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
@bot_owner_or_permissions(manage_guild=True)
async def private__rooms_create__menu(self, ctx: SlashContext):
await ctx.defer(hidden=True)
guild_data = await self.bot.get_guild_data(ctx.guild_id)
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
components = [
[
Button(emoji=self.bot.get_emoji(959124362840113182), custom_id="voice_close"),
Button(emoji=self.bot.get_emoji(959124362890461254), custom_id="voice_open"),
Button(emoji=self.bot.get_emoji(959124362890461325), custom_id="voice_hide"),
Button(emoji=self.bot.get_emoji(959124362890473582), custom_id="voice_unhide"),
Button(
emoji=self.bot.get_emoji(959124362798174319), custom_id="voice_change_room_name"
),
],
[
Button(emoji=self.bot.get_emoji(959124362882068550), custom_id="voice_ban"),
Button(emoji=self.bot.get_emoji(959124362835931236), custom_id="voice_unban"),
Button(emoji=self.bot.get_emoji(959124362974343169), custom_id="voice_kick"),
Button(emoji=self.bot.get_emoji(959124362823340052), custom_id="voice_transfer"),
Button(
emoji=self.bot.get_emoji(959124362835927080), custom_id="voice_set_room_limit"
),
],
]
category = await ctx.guild.create_category(content["PRIVATE_ROOMS"])
voice_channel = await category.create_voice_channel(content["CREATE_ROOM"])
overwrites = {
ctx.guild.default_role: PermissionOverwrite(
send_messages=False, use_slash_commands=False
)
}
text_channel: TextChannel = await category.create_text_channel(
content["ROOM_CONTROL"], overwrites=overwrites
)
await guild_data.create_private_voice(text_channel.id, voice_channel.id)
embed = Embed(
title=content["ROOM_CONTROL_TITLE"],
description="".join(content["ROOM_CONTROL_DESCRIPTION"]),
color=DiscordColors.EMBED_COLOR,
)
await text_channel.send(embed=embed, components=components)
await ctx.send(content["SUCCESSFULLY_CREATED"])
@Cog.listener()
@cog_is_enabled()
async def on_voice_state_update(self, member: Member, before: VoiceState, after: VoiceState):
guild_data = await self.bot.get_guild_data(member.guild.id)
private_voice = guild_data.private_voice
voice_channel_id = private_voice.voice_channel_id
if after.channel and after.channel.id == voice_channel_id:
if before.channel:
await self._check_channel(member, before, private_voice)
# Creating a private voice channel
overwrites = {
member.guild.default_role: PermissionOverwrite(connect=False),
member: PermissionOverwrite(manage_channels=True, connect=True, move_members=True),
}
channel: VoiceChannel = await after.channel.category.create_voice_channel(
f"{member.display_name}'s channel", overwrites=overwrites
)
await member.move_to(channel)
await private_voice.set_private_voice_channel(member.id, channel.id)
return
if before.channel:
await self._check_channel(member, before, private_voice)
async def _check_channel(
self, member: Member, before: VoiceState, private_voice: GuildPrivateVoice
):
if not (channel_id := private_voice.active_channels.get(str(member.id))):
return
if before.channel.id != channel_id:
return
if not before.channel.members:
await before.channel.delete()
await private_voice.delete_private_voice_channel(member.id)
return
first_member = before.channel.members[0]
await private_voice.set_private_voice_channel(first_member.id, before.channel.id)
await before.channel.set_permissions(
member, manage_channels=False, connect=False, move_members=False
)
await before.channel.set_permissions(
first_member, manage_channels=True, connect=True, move_members=True
)
@Cog.listener()
@cog_is_enabled()
async def on_button_click(self, ctx: ComponentContext):
if not ctx.custom_id.startswith("voice"):
return
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
match ctx.custom_id:
case "voice_close":
await voice_channel.set_permissions(ctx.guild.default_role, connect=False)
await ctx.send(content["ROOM_CLOSED"], hidden=True)
case "voice_open":
await voice_channel.set_permissions(ctx.guild.default_role, connect=True)
await ctx.send(content["ROOM_OPENED"], hidden=True)
case "voice_hide":
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=False)
await ctx.send(content["ROOM_HIDED"], hidden=True)
case "voice_unhide":
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=True)
await ctx.send(content["ROOM_UNHIDED"], hidden=True)
case "voice_change_room_name":
modal = Modal(
custom_id="voice_modal_change_room_name",
title=content["PRIVATE_ROOM_CONTROL_MODAL"],
components=[
TextInput(
custom_id="channel_name",
label=content["ROOM_NAME"],
style=TextInputStyle.SHORT,
)
],
)
await ctx.popup(modal)
case "voice_ban" | "voice_unban" | "voice_kick" | "voice_transfer":
modal = Modal(
custom_id=f"voice_modal_{ctx.custom_id.replace('voice', '')}",
title=content["PRIVATE_ROOM_CONTROL_MODAL"],
components=[
TextInput(
custom_id="user_id",
label=content["MEMBER_ID"],
style=TextInputStyle.SHORT,
)
],
)
await ctx.popup(modal)
case "voice_set_room_limit":
select = Select(
custom_id="voice_select_set_room_limit",
options=[
SelectOption(label=content["REMOVE_LIMIT"], value=0),
SelectOption(label="2", value=2),
SelectOption(label="3", value=3),
SelectOption(label="4", value=4),
SelectOption(label="5", value=5),
SelectOption(label="10", value=10),
],
)
await ctx.send(content["SETUP_ROOM_LIMIT"], components=[select], hidden=True)
@Cog.listener()
@cog_is_enabled()
async def on_select_option(self, ctx: ComponentContext):
if not ctx.custom_id.startswith("voice"):
return
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
await voice_channel.edit(user_limit=ctx.values[0])
await ctx.send(content["LIMIT_WAS_SETUP"], hidden=True)
@Cog.listener(name="on_modal")
@cog_is_enabled()
async def on_voice_modal(self, ctx: ModalContext):
if not ctx.custom_id.startswith("voice"):
return
await ctx.defer(hidden=True)
guild_data = await self.bot.get_guild_data(ctx.guild_id)
voice_channel_id = guild_data.private_voice.active_channels.get(str(ctx.author_id))
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if voice_channel_id is None:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(voice_channel_id)
if channel_name := ctx.values.get("channel_name"):
await voice_channel.edit(name=channel_name)
return await ctx.send(content["ROOM_NAME_WAS_SETUP"], hidden=True)
user_id: str = ctx.values["user_id"]
if not user_id.isdigit():
return await ctx.send(content["NOT_ID"], hidden=True)
member: Member = ctx.guild.get_member(int(user_id))
if member is None:
return await ctx.send(content["NOT_MEMBER_ID"], hidden=True)
match ctx.custom_id:
case "voice_modal_ban":
await voice_channel.set_permissions(member, connect=False)
if member.voice and member.voice.channel.id == voice_channel_id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_BANNED"], hidden=True)
case "voice_modal_unban":
await voice_channel.set_permissions(member, connect=True)
await ctx.send(content["MEMBER_WAS_UNBANNED"], hidden=True)
case "voice_modal_kick":
if member.voice and member.voice.channel.id == voice_channel_id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_KICKED"], hidden=True)
case "voice_modal_transfer":
await guild_data.private_voice.set_private_voice_channel(user_id, voice_channel_id)
await voice_channel.set_permissions(
member, manage_channels=True, connect=True, move_members=True
)
await voice_channel.set_permissions(
ctx.author, manage_channels=False, connect=False, move_members=False
)
await ctx.send(content["OWNERSHIP_TRANSFERED"], hidden=True)
def setup(bot):
bot.add_cog(PrivateRooms(bot))
|
from string import ascii_lowercase
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from plotly.colors import hex_to_rgb
from src.timeit import timeit
@timeit
def plotOverTime(FSCPData: pd.DataFrame, FSCPDataSteel: pd.DataFrame, config: dict):
# select which lines to plot based on function argument
FSCPsCols, plotFSCP, plotLines = __selectPlotFSCPs(FSCPData, config['showFSCPs'], config['refFuelTop'],
config['n_samples'])
FSCPsCols, plotFSCPSteel, plotLinesSteel = __selectPlotFSCPs(FSCPDataSteel, config['showFSCPs'],
config['refFuelBottom'], config['n_samples'])
# produce figure
fig = __produceFigure(FSCPsCols, plotFSCP, plotFSCPSteel, plotLines, plotLinesSteel, config)
# styling figure
__styling(fig, config)
return {'fig3': fig}
def __selectPlotFSCPs(FSCPData: pd.DataFrame, showFSCPs: dict, refFuel: str, n_samples: int):
FSCPsCols = [None] * len(showFSCPs)
listOfFSCPs = pd.DataFrame(columns=(FSCPData.keys().tolist() + ['plotIndex']))
for index, args in enumerate(showFSCPs):
cols, fuel_x, fuel_y = args
if fuel_x == 'ref': fuel_x = refFuel
addFSCP = FSCPData.query(f"fuel_x=='{fuel_x}' & fuel_y=='{fuel_y}' & year_x==year_y").reset_index(drop=True)
if fuel_x == refFuel: addFSCP.loc[:, 'fuel_x'] = 'ref'
addFSCP.insert(1, 'plotIndex', len(addFSCP) * [index])
FSCPsCols[index] = cols
listOfFSCPs = pd.concat([listOfFSCPs, addFSCP], ignore_index=True)
# year_x == year_y, so we only need one of them from now on
listOfFSCPs['year'] = listOfFSCPs['year_x']
# return FSCPs for scatter plots
plotFSCP = listOfFSCPs[['plotIndex', 'fuel_x', 'fuel_y', 'year', 'fscp', 'fscp_uu', 'fscp_ul']]
# return costs and GHGIs for line plots
plotLines = listOfFSCPs[['plotIndex', 'fuel_x', 'fuel_y', 'year', 'cost_x', 'cost_y', 'ghgi_x', 'ghgi_y']]
# interpolation of plotLines
t = np.linspace(plotLines['year'].min(), plotLines['year'].max(), n_samples)
dtypes = {'year': float, 'cost_x': float, 'cost_y': float, 'ghgi_x': float, 'ghgi_y': float}
allEntries = []
for index in plotLines['plotIndex'].unique():
samples = plotLines.query(f"plotIndex=={index}").reset_index(drop=True).astype(dtypes)
fuel_x = samples.fuel_x.iloc[0]
fuel_y = samples.fuel_y.iloc[0]
new = dict(
plotIndex=n_samples * [int(index)],
fuel_x=n_samples * [fuel_x],
fuel_y=n_samples * [fuel_y],
year=t,
)
tmp = pd.DataFrame(new, columns=plotLines.keys())
tmp.index = np.arange(len(samples), len(tmp) + len(samples))
tmp = tmp.merge(samples, how='outer').sort_values(by=['year']).astype(dtypes)
allEntries.append(tmp.interpolate())
plotLinesInterpolated = pd.concat(allEntries, ignore_index=True)
plotLinesInterpolated['fscp'] = (plotLinesInterpolated['cost_x'] - plotLinesInterpolated['cost_y']) / (
plotLinesInterpolated['ghgi_y'] - plotLinesInterpolated['ghgi_x'])
return FSCPsCols, plotFSCP, plotLinesInterpolated
def __produceFigure(FSCPsCols: list, plotFSCP: pd.DataFrame, plotFSCPSteel: pd.DataFrame,
plotLines: pd.DataFrame, plotLinesSteel: pd.DataFrame, config: dict):
# plot
fig = make_subplots(
rows=2,
cols=2,
subplot_titles=ascii_lowercase,
shared_yaxes=True,
horizontal_spacing=0.025,
vertical_spacing=0.1,
)
# add FSCP traces for heating
traces = __addFSCPTraces(plotFSCP, plotLines, len(FSCPsCols), config['refFuelTop'], config)
for id, trace in traces:
for j, col in enumerate(FSCPsCols[id]):
if j: trace.showlegend = False
fig.add_trace(trace, row=1, col=col)
# add FSCP traces for steel
traces = __addFSCPTraces(plotFSCPSteel, plotLinesSteel, len(FSCPsCols), config['refFuelBottom'], config)
for id, trace in traces:
for j, col in enumerate(FSCPsCols[id]):
trace.showlegend = False
fig.add_trace(trace, row=2, col=col)
# compute and plot carbon price tracjetory
cpTrajData = __computeCPTraj(config['co2price_traj']['years'], config['co2price_traj']['values'], config['n_samples'])
traces = __addCPTraces(cpTrajData, config)
for trace in traces:
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
if i or j: trace.showlegend = False
fig.add_trace(trace, row=i + 1, col=j + 1)
# zero y line
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
fig.add_hline(0.0, line_width=config['global']['lw_thin'], line_color='black', row=i + 1, col=j + 1)
# add text annotations explaining figure content
annotationStyling = dict(xanchor='center', yanchor='middle', showarrow=False,
bordercolor='black', borderwidth=2, borderpad=3, bgcolor='white')
for i in range(2):
axisNumb = str(i+1) if i else ''
blueTech = config['annotationLabels']['blueTechs'][i]
fig.add_annotation(
x=0.50,
xref=f"x{axisNumb} domain",
y=1.15,
yref=f"y{axisNumb} domain",
text=f"Blue H<sub>2</sub> from {blueTech}",
**annotationStyling
)
for i in range(2):
axisNumb = str(i+2) if i else ''
application = config['annotationLabels']['applications'][i]
fig.add_annotation(
x=-0.17,
xref=f"x{axisNumb} domain",
y=0.5,
yref=f"y{axisNumb} domain",
text=f"{application}",
textangle=-90,
**annotationStyling
)
# add circles on intersects
__addAnnotations(fig, cpTrajData, plotLines, plotLinesSteel, config)
# add arrows in 2025
__addAnnotationArrows(fig, config)
# add legend for annotations
__addAnnotationsLegend(fig, config)
# update axes titles and ranges
fig.update_layout(
xaxis=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
xaxis2=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
xaxis3=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
xaxis4=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
yaxis=dict(
title=config['labels']['fscp'],
range=[config['plotting']['fscp_min'], config['plotting']['fscp_max']]
),
yaxis3=dict(
title=config['labels']['fscp_steel'],
range=[config['plotting']['fscp_min'], config['plotting']['fscp_max']]
),
margin_l=180.0,
margin_b=520.0,
)
return fig
def __addAnnotations(fig: go.Figure, cpTrajData: pd.DataFrame, plotLines: pd.DataFrame, plotLinesSteel: pd.DataFrame, config: dict):
traceArgs = [
dict(row=1, col=1, lines=plotLines, anno=config['annotationFuels']['left']),
dict(row=1, col=2, lines=plotLines, anno=config['annotationFuels']['right']),
dict(row=2, col=1, lines=plotLinesSteel, anno=config['annotationFuels']['left']),
dict(row=2, col=2, lines=plotLinesSteel, anno=config['annotationFuels']['right']),
]
for args in traceArgs:
points = __calcPoints(cpTrajData, args['lines'], args['anno'])
data = pd.DataFrame(points).T
fig.add_trace(go.Scatter(
x=data.year,
y=data.fscp,
text=data.index,
mode='markers+text',
marker=dict(symbol='circle-open', size=config['global']['highlight_marker'], line={'width': config['global']['lw_thin']}, color='Black'),
textposition='bottom center',
showlegend=False,
# hovertemplate = f"{name}<br>Carbon price: %{{x:.2f}}±%{{error_x.array:.2f}}<extra></extra>",
), row=args['row'], col=args['col'])
def __calcPoints(cpTrajData: pd.DataFrame, plotLines: pd.DataFrame, fuels: list) -> dict:
points = {}
fuelRef, fuelGreen, fuelBlue = fuels
dropCols = ['plotIndex', 'fuel_x', 'fuel_y', 'cost_x', 'cost_y', 'ghgi_x', 'ghgi_y']
greenLine = plotLines.query(f"fuel_x=='{fuelRef}' & fuel_y=='{fuelGreen}'").drop(columns=dropCols).reset_index(drop=True)
blueLine = plotLines.query(f"fuel_x=='{fuelRef}' & fuel_y=='{fuelBlue}'").drop(columns=dropCols).reset_index(drop=True)
redLine = plotLines.query(f"fuel_x=='{fuelBlue}' & fuel_y=='{fuelGreen}'").drop(columns=dropCols).reset_index(drop=True)
purpleLine = cpTrajData.drop(columns=['name', 'CP_u', 'CP_l'])
for i, line in enumerate([blueLine, greenLine, redLine]):
diffLines = pd.merge(line, purpleLine, on=['year'])
diffLines['delta'] = (diffLines['fscp'] - diffLines['CP']).abs()
points[i+2] = diffLines.nsmallest(1, 'delta').drop(columns=['CP', 'delta']).iloc[0]
diffLines = pd.merge(blueLine, greenLine, on=['year'], suffixes=('', '_right'))
diffLines['delta'] = (diffLines['fscp'] - diffLines['fscp_right']).abs()
points[5] = diffLines.nsmallest(1, 'delta').drop(columns=['fscp_right', 'delta']).iloc[0]
points[6] = redLine.abs().nsmallest(1, 'fscp').iloc[0]
return points
def __addAnnotationArrows(fig: go.Figure, config: dict):
__addArrow(fig, 2025.0, 150.0, 600.0, 1, 1, config)
__addArrow(fig, 2025.5, 150.0, 800.0, 1, 1, config)
fig.add_annotation(text='1', x=2024.5, y=200.0, row=1, col=1, showarrow=False)
__addArrow(fig, 2025.0, 150.0, 300.0, 1, 2, config)
__addArrow(fig, 2025.5, 150.0, 800.0, 1, 2, config)
fig.add_annotation(text='1', x=2024.5, y=200.0, row=1, col=2, showarrow=False)
__addArrow(fig, 2024.5, 90.0, 200.0, 2, 1, config)
fig.add_annotation(text='1', x=2024.0, y=150.0, row=2, col=1, showarrow=False)
__addArrow(fig, 2024.5, 90.0, 200.0, 2, 2, config)
fig.add_annotation(text='1', x=2024.0, y=150.0, row=2, col=2, showarrow=False)
def __addArrow(fig: go.Figure, x: float, y1: float, y2: float, row: int, col: int, config: dict):
xaxes = [['x', 'x2'], ['x3', 'x4']]
yaxes = [['y', 'y2'], ['y3', 'y4']]
for ay, y in [(y1, y2), (y2, y1)]:
fig.add_annotation(
axref=xaxes[row-1][col-1],
xref=xaxes[row-1][col-1],
ayref=yaxes[row-1][col-1],
yref=yaxes[row-1][col-1],
ax=x,
x=x,
ay=ay,
y=y,
arrowcolor='black',
arrowwidth=config['global']['lw_thin'],
#arrowsize=config['global']['highlight_marker_sm'],
arrowhead=2,
showarrow=True,
row=row,
col=col,
)
def __addAnnotationsLegend(fig: go.Figure, config: dict):
y0 = -0.40
fig.add_shape(
type='rect',
x0=0.0,
y0=y0,
x1=0.80,
y1=y0-0.2,
xref='paper',
yref='paper',
line_width=2,
fillcolor='white',
)
fig.add_annotation(
text=f"<b>{config["annotationTexts"]["heading1"]}:</b><br><br><br><b>{config["annotationTexts"]["heading2"]}:</b>",
align='left',
xanchor='left',
x=0.0,
yanchor='top',
y=y0,
xref='paper',
yref='paper',
showarrow=False,
)
for i in range(6):
fig.add_annotation(
text=f"{i+1}: "+config['annotationTexts'][f"point{i+1}"],
align='left',
xanchor='left',
x=0.0 + i%3 * 0.22,
yanchor='top',
y=y0-(0.03 if i<3 else 0.13),
xref='paper',
yref='paper',
showarrow=False,
)
def __addFSCPTraces(plotData: pd.DataFrame, plotLines: pd.DataFrame, n_lines: int, refFuel: str, config: dict, sensitivityNG: bool = False):
traces = []
for index in range(n_lines):
thisDataScatter = plotData.query(f"plotIndex=={index}").reset_index(drop=True)
thisDataLine = plotLines.query(f"plotIndex=={index}").reset_index(drop=True)
# styling of individual lines
truncated = (thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB' and thisDataScatter.loc[0, 'fuel_y'] == 'green RE') or \
thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB lowscco2'
dashed = thisDataScatter.loc[0, 'fuel_y'] in ['green pure RE', 'blue LEB lowscco2']
longdashed = thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB lowscco2'
shift = 0
if thisDataScatter.loc[0, 'fuel_y'] == 'green RE':
if thisDataScatter.loc[0, 'fuel_x'] == 'ref':
shift = -1
else:
shift = +1
elif thisDataScatter.loc[0, 'fuel_y'] == 'green pure RE':
shift = +2
thisDataScatter = thisDataScatter.query(f"year<=2035")
thisDataLine = thisDataLine.query(f"year<=2035")
# line properties
fuel_x = thisDataScatter.iloc[thisDataScatter.first_valid_index()]['fuel_x']
fuel_y = thisDataScatter.iloc[0]['fuel_y']
name = f"Fossil→{config["names"][fuel_y]}" if fuel_x == 'ref' else f"{config["names"][fuel_x]}→{config["names"][fuel_y]}"
col = config['fscp_colours'][f"{fuel_x} to {fuel_y}"] if f"{fuel_x} to {fuel_y}" in config['fscp_colours'] else \
config['colours'][fuel_y]
# do not plot awkward red line in sensitivity analysis row 2
if sensitivityNG and fuel_x == 'blue LEB':
continue
# scatter plot
traces.append((index, go.Scatter(
x=thisDataScatter['year'],
y=thisDataScatter['fscp'],
name=name,
legendgroup=0 if fuel_x == 'ref' else 1,
showlegend=False,
mode='markers',
line=dict(color=col, width=config['global']['lw_default'], dash='dot' if dashed else 'solid'),
marker=dict(symbol='x-thin', size=config['global']['highlight_marker_sm'], line={'width': config['global']['lw_thin'], 'color': col}, ),
hovertemplate=f"<b>{name}</b><br>Year: %{{x:d}}<br>FSCP: %{{y:.2f}}±%{{error_y.array:.2f}}<extra></extra>",
)))
# remove unphysical negative FSCPs
if truncated and not sensitivityNG:
thisDataLine = thisDataLine.query(f"(year>=2030 & fscp>0.0) | year>=2040")
# line plot
traces.append((index, go.Scatter(
x=thisDataLine['year'],
y=thisDataLine['fscp'],
legendgroup=0 if fuel_x == 'ref' else 1,
legendgrouptitle=dict(text=f"<b>{config["legendlabels"][0]}:</b>" if fuel_x=='ref' else f"<b>{config["legendlabels"][0]}:</b>"),
name=name,
mode='lines',
line=dict(color=col, width=config['global']['lw_default'], dash='dot' if dashed else 'dash' if longdashed else 'solid'),
)))
# error bars
thisDataScatter = thisDataScatter.query(f"year==[2030,2040,2050]")
thisDataScatter = thisDataScatter.query(f"fscp<={config["plotting"]["fscp_max"]} and (fscp>0.0 | year > 2040)")
traces.append((index, go.Scatter(
x=thisDataScatter['year'] + shift * 0.1,
y=thisDataScatter['fscp'],
error_y=dict(type='data', array=thisDataScatter['fscp_uu'], arrayminus=thisDataScatter['fscp_ul'],
thickness=config['global']['lw_thin']),
name=name,
legendgroup=0 if fuel_x == 'ref' else 1,
showlegend=False,
mode='markers',
marker=dict(symbol='x-thin', size=0.00001,),
line_color=('rgba({}, {}, {}, {})'.format(*hex_to_rgb(col), .4)),
hovertemplate=f"<b>{name}</b><br>Year: %{{x:d}}<br>FSCP: %{{y:.2f}}±%{{error_y.array:.2f}}<extra></extra>",
)))
return traces
# compute carbon price trajectories
def __computeCPTraj(years: list, values: dict, n_samples: int):
v_mean = []
v_upper = []
v_lower = []
for i, year in enumerate(years):
vals = [v[i] for v in values.values()]
mean = sum(vals)/len(vals)
v_mean.append(mean)
v_upper.append(max(vals)-mean)
v_lower.append(mean-min(vals))
# create data frame with time and cp values
cpData = pd.DataFrame({
'year': years,
'CP': v_mean,
'CP_u': v_upper,
'CP_l': v_lower,
})
# interpolate in between
samples = pd.DataFrame({'year': np.linspace(years[0], years[-1], n_samples)})
dtypes = {'year': float, 'CP': float, 'CP_u': float, 'CP_l': float}
cpData = cpData.merge(samples, how='outer').sort_values(by=['year']).astype(dtypes).interpolate()
# add name to dataframe
cpData['name'] = 'cp'
return cpData
# plot traces
def __addCPTraces(cpTrajData: pd.DataFrame, config: dict):
traces = []
name = config['carbon_price_config']['name']
colour = config['carbon_price_config']['colour']
# add main graphs (FSCP and CP)
traces.append(go.Scatter(
name=name,
legendgroup=1,
mode='lines',
x=cpTrajData['year'],
y=cpTrajData['CP'],
line_color=colour,
line_width=config['global']['lw_thin'],
showlegend=True,
hovertemplate=f"<b>{name}</b><br>Time: %{{x:.2f}}<br>Carbon price: %{{y:.2f}}<extra></extra>"
))
data_x = cpTrajData['year']
data_yu = cpTrajData['CP'] + cpTrajData['CP_u']
data_yl = cpTrajData['CP'] - cpTrajData['CP_l']
errorBand = go.Scatter(
name='Uncertainty Range',
legendgroup=1,
x=pd.concat([data_x, data_x[::-1]], ignore_index=True),
y=pd.concat([data_yl, data_yu[::-1]], ignore_index=True),
mode='lines',
marker=dict(color=colour),
fillcolor=("rgba({}, {}, {}, 0.1)".format(*hex_to_rgb(colour))),
fill='toself',
line=dict(width=config['global']['lw_ultrathin']),
showlegend=False,
hoverinfo='skip'
)
traces.append(errorBand)
return traces
def __styling(fig: go.Figure, config: dict):
# update legend styling
fig.update_layout(
legend=dict(
orientation='h',
xanchor='left',
x=0.0,
yanchor='top',
y=-0.1,
bgcolor='rgba(255,255,255,1.0)',
bordercolor='black',
borderwidth=2,
),
)
# update axis styling
for axis in ['xaxis', 'xaxis2', 'xaxis3', 'xaxis4', 'yaxis', 'yaxis2', 'yaxis3', 'yaxis4']:
update = {axis: dict(
showline=True,
linewidth=2,
linecolor='black',
showgrid=False,
zeroline=False,
mirror=True,
ticks='outside',
)}
fig.update_layout(**update)
# update figure background colour and font colour and type
fig.update_layout(
paper_bgcolor='rgba(255, 255, 255, 1.0)',
plot_bgcolor='rgba(255, 255, 255, 0.0)',
font_color='black',
font_family='Helvetica',
)
# move title annotations
for i, annotation in enumerate(fig['layout']['annotations'][:len(config['subplot_title_positions'])]):
x_pos, y_pos = config['subplot_title_positions'][i]
annotation['xanchor'] = 'left'
annotation['yanchor'] = 'top'
annotation['xref'] = 'paper'
annotation['yref'] = 'paper'
annotation['x'] = x_pos
annotation['y'] = y_pos
annotation['text'] = "<b>{0}</b>".format(annotation['text'])
| from string import ascii_lowercase
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from plotly.colors import hex_to_rgb
from src.timeit import timeit
@timeit
def plotOverTime(FSCPData: pd.DataFrame, FSCPDataSteel: pd.DataFrame, config: dict):
# select which lines to plot based on function argument
FSCPsCols, plotFSCP, plotLines = __selectPlotFSCPs(FSCPData, config['showFSCPs'], config['refFuelTop'],
config['n_samples'])
FSCPsCols, plotFSCPSteel, plotLinesSteel = __selectPlotFSCPs(FSCPDataSteel, config['showFSCPs'],
config['refFuelBottom'], config['n_samples'])
# produce figure
fig = __produceFigure(FSCPsCols, plotFSCP, plotFSCPSteel, plotLines, plotLinesSteel, config)
# styling figure
__styling(fig, config)
return {'fig3': fig}
def __selectPlotFSCPs(FSCPData: pd.DataFrame, showFSCPs: dict, refFuel: str, n_samples: int):
FSCPsCols = [None] * len(showFSCPs)
listOfFSCPs = pd.DataFrame(columns=(FSCPData.keys().tolist() + ['plotIndex']))
for index, args in enumerate(showFSCPs):
cols, fuel_x, fuel_y = args
if fuel_x == 'ref': fuel_x = refFuel
addFSCP = FSCPData.query(f"fuel_x=='{fuel_x}' & fuel_y=='{fuel_y}' & year_x==year_y").reset_index(drop=True)
if fuel_x == refFuel: addFSCP.loc[:, 'fuel_x'] = 'ref'
addFSCP.insert(1, 'plotIndex', len(addFSCP) * [index])
FSCPsCols[index] = cols
listOfFSCPs = pd.concat([listOfFSCPs, addFSCP], ignore_index=True)
# year_x == year_y, so we only need one of them from now on
listOfFSCPs['year'] = listOfFSCPs['year_x']
# return FSCPs for scatter plots
plotFSCP = listOfFSCPs[['plotIndex', 'fuel_x', 'fuel_y', 'year', 'fscp', 'fscp_uu', 'fscp_ul']]
# return costs and GHGIs for line plots
plotLines = listOfFSCPs[['plotIndex', 'fuel_x', 'fuel_y', 'year', 'cost_x', 'cost_y', 'ghgi_x', 'ghgi_y']]
# interpolation of plotLines
t = np.linspace(plotLines['year'].min(), plotLines['year'].max(), n_samples)
dtypes = {'year': float, 'cost_x': float, 'cost_y': float, 'ghgi_x': float, 'ghgi_y': float}
allEntries = []
for index in plotLines['plotIndex'].unique():
samples = plotLines.query(f"plotIndex=={index}").reset_index(drop=True).astype(dtypes)
fuel_x = samples.fuel_x.iloc[0]
fuel_y = samples.fuel_y.iloc[0]
new = dict(
plotIndex=n_samples * [int(index)],
fuel_x=n_samples * [fuel_x],
fuel_y=n_samples * [fuel_y],
year=t,
)
tmp = pd.DataFrame(new, columns=plotLines.keys())
tmp.index = np.arange(len(samples), len(tmp) + len(samples))
tmp = tmp.merge(samples, how='outer').sort_values(by=['year']).astype(dtypes)
allEntries.append(tmp.interpolate())
plotLinesInterpolated = pd.concat(allEntries, ignore_index=True)
plotLinesInterpolated['fscp'] = (plotLinesInterpolated['cost_x'] - plotLinesInterpolated['cost_y']) / (
plotLinesInterpolated['ghgi_y'] - plotLinesInterpolated['ghgi_x'])
return FSCPsCols, plotFSCP, plotLinesInterpolated
def __produceFigure(FSCPsCols: list, plotFSCP: pd.DataFrame, plotFSCPSteel: pd.DataFrame,
plotLines: pd.DataFrame, plotLinesSteel: pd.DataFrame, config: dict):
# plot
fig = make_subplots(
rows=2,
cols=2,
subplot_titles=ascii_lowercase,
shared_yaxes=True,
horizontal_spacing=0.025,
vertical_spacing=0.1,
)
# add FSCP traces for heating
traces = __addFSCPTraces(plotFSCP, plotLines, len(FSCPsCols), config['refFuelTop'], config)
for id, trace in traces:
for j, col in enumerate(FSCPsCols[id]):
if j: trace.showlegend = False
fig.add_trace(trace, row=1, col=col)
# add FSCP traces for steel
traces = __addFSCPTraces(plotFSCPSteel, plotLinesSteel, len(FSCPsCols), config['refFuelBottom'], config)
for id, trace in traces:
for j, col in enumerate(FSCPsCols[id]):
trace.showlegend = False
fig.add_trace(trace, row=2, col=col)
# compute and plot carbon price tracjetory
cpTrajData = __computeCPTraj(config['co2price_traj']['years'], config['co2price_traj']['values'], config['n_samples'])
traces = __addCPTraces(cpTrajData, config)
for trace in traces:
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
if i or j: trace.showlegend = False
fig.add_trace(trace, row=i + 1, col=j + 1)
# zero y line
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
fig.add_hline(0.0, line_width=config['global']['lw_thin'], line_color='black', row=i + 1, col=j + 1)
# add text annotations explaining figure content
annotationStyling = dict(xanchor='center', yanchor='middle', showarrow=False,
bordercolor='black', borderwidth=2, borderpad=3, bgcolor='white')
for i in range(2):
axisNumb = str(i+1) if i else ''
blueTech = config['annotationLabels']['blueTechs'][i]
fig.add_annotation(
x=0.50,
xref=f"x{axisNumb} domain",
y=1.15,
yref=f"y{axisNumb} domain",
text=f"Blue H<sub>2</sub> from {blueTech}",
**annotationStyling
)
for i in range(2):
axisNumb = str(i+2) if i else ''
application = config['annotationLabels']['applications'][i]
fig.add_annotation(
x=-0.17,
xref=f"x{axisNumb} domain",
y=0.5,
yref=f"y{axisNumb} domain",
text=f"{application}",
textangle=-90,
**annotationStyling
)
# add circles on intersects
__addAnnotations(fig, cpTrajData, plotLines, plotLinesSteel, config)
# add arrows in 2025
__addAnnotationArrows(fig, config)
# add legend for annotations
__addAnnotationsLegend(fig, config)
# update axes titles and ranges
fig.update_layout(
xaxis=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
xaxis2=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
xaxis3=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
xaxis4=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
yaxis=dict(
title=config['labels']['fscp'],
range=[config['plotting']['fscp_min'], config['plotting']['fscp_max']]
),
yaxis3=dict(
title=config['labels']['fscp_steel'],
range=[config['plotting']['fscp_min'], config['plotting']['fscp_max']]
),
margin_l=180.0,
margin_b=520.0,
)
return fig
def __addAnnotations(fig: go.Figure, cpTrajData: pd.DataFrame, plotLines: pd.DataFrame, plotLinesSteel: pd.DataFrame, config: dict):
traceArgs = [
dict(row=1, col=1, lines=plotLines, anno=config['annotationFuels']['left']),
dict(row=1, col=2, lines=plotLines, anno=config['annotationFuels']['right']),
dict(row=2, col=1, lines=plotLinesSteel, anno=config['annotationFuels']['left']),
dict(row=2, col=2, lines=plotLinesSteel, anno=config['annotationFuels']['right']),
]
for args in traceArgs:
points = __calcPoints(cpTrajData, args['lines'], args['anno'])
data = pd.DataFrame(points).T
fig.add_trace(go.Scatter(
x=data.year,
y=data.fscp,
text=data.index,
mode='markers+text',
marker=dict(symbol='circle-open', size=config['global']['highlight_marker'], line={'width': config['global']['lw_thin']}, color='Black'),
textposition='bottom center',
showlegend=False,
# hovertemplate = f"{name}<br>Carbon price: %{{x:.2f}}±%{{error_x.array:.2f}}<extra></extra>",
), row=args['row'], col=args['col'])
def __calcPoints(cpTrajData: pd.DataFrame, plotLines: pd.DataFrame, fuels: list) -> dict:
points = {}
fuelRef, fuelGreen, fuelBlue = fuels
dropCols = ['plotIndex', 'fuel_x', 'fuel_y', 'cost_x', 'cost_y', 'ghgi_x', 'ghgi_y']
greenLine = plotLines.query(f"fuel_x=='{fuelRef}' & fuel_y=='{fuelGreen}'").drop(columns=dropCols).reset_index(drop=True)
blueLine = plotLines.query(f"fuel_x=='{fuelRef}' & fuel_y=='{fuelBlue}'").drop(columns=dropCols).reset_index(drop=True)
redLine = plotLines.query(f"fuel_x=='{fuelBlue}' & fuel_y=='{fuelGreen}'").drop(columns=dropCols).reset_index(drop=True)
purpleLine = cpTrajData.drop(columns=['name', 'CP_u', 'CP_l'])
for i, line in enumerate([blueLine, greenLine, redLine]):
diffLines = pd.merge(line, purpleLine, on=['year'])
diffLines['delta'] = (diffLines['fscp'] - diffLines['CP']).abs()
points[i+2] = diffLines.nsmallest(1, 'delta').drop(columns=['CP', 'delta']).iloc[0]
diffLines = pd.merge(blueLine, greenLine, on=['year'], suffixes=('', '_right'))
diffLines['delta'] = (diffLines['fscp'] - diffLines['fscp_right']).abs()
points[5] = diffLines.nsmallest(1, 'delta').drop(columns=['fscp_right', 'delta']).iloc[0]
points[6] = redLine.abs().nsmallest(1, 'fscp').iloc[0]
return points
def __addAnnotationArrows(fig: go.Figure, config: dict):
__addArrow(fig, 2025.0, 150.0, 600.0, 1, 1, config)
__addArrow(fig, 2025.5, 150.0, 800.0, 1, 1, config)
fig.add_annotation(text='1', x=2024.5, y=200.0, row=1, col=1, showarrow=False)
__addArrow(fig, 2025.0, 150.0, 300.0, 1, 2, config)
__addArrow(fig, 2025.5, 150.0, 800.0, 1, 2, config)
fig.add_annotation(text='1', x=2024.5, y=200.0, row=1, col=2, showarrow=False)
__addArrow(fig, 2024.5, 90.0, 200.0, 2, 1, config)
fig.add_annotation(text='1', x=2024.0, y=150.0, row=2, col=1, showarrow=False)
__addArrow(fig, 2024.5, 90.0, 200.0, 2, 2, config)
fig.add_annotation(text='1', x=2024.0, y=150.0, row=2, col=2, showarrow=False)
def __addArrow(fig: go.Figure, x: float, y1: float, y2: float, row: int, col: int, config: dict):
xaxes = [['x', 'x2'], ['x3', 'x4']]
yaxes = [['y', 'y2'], ['y3', 'y4']]
for ay, y in [(y1, y2), (y2, y1)]:
fig.add_annotation(
axref=xaxes[row-1][col-1],
xref=xaxes[row-1][col-1],
ayref=yaxes[row-1][col-1],
yref=yaxes[row-1][col-1],
ax=x,
x=x,
ay=ay,
y=y,
arrowcolor='black',
arrowwidth=config['global']['lw_thin'],
#arrowsize=config['global']['highlight_marker_sm'],
arrowhead=2,
showarrow=True,
row=row,
col=col,
)
def __addAnnotationsLegend(fig: go.Figure, config: dict):
y0 = -0.40
fig.add_shape(
type='rect',
x0=0.0,
y0=y0,
x1=0.80,
y1=y0-0.2,
xref='paper',
yref='paper',
line_width=2,
fillcolor='white',
)
fig.add_annotation(
text=f"<b>{config['annotationTexts']['heading1']}:</b><br><br><br><b>{config['annotationTexts']['heading2']}:</b>",
align='left',
xanchor='left',
x=0.0,
yanchor='top',
y=y0,
xref='paper',
yref='paper',
showarrow=False,
)
for i in range(6):
fig.add_annotation(
text=f"{i+1}: "+config['annotationTexts'][f"point{i+1}"],
align='left',
xanchor='left',
x=0.0 + i%3 * 0.22,
yanchor='top',
y=y0-(0.03 if i<3 else 0.13),
xref='paper',
yref='paper',
showarrow=False,
)
def __addFSCPTraces(plotData: pd.DataFrame, plotLines: pd.DataFrame, n_lines: int, refFuel: str, config: dict, sensitivityNG: bool = False):
traces = []
for index in range(n_lines):
thisDataScatter = plotData.query(f"plotIndex=={index}").reset_index(drop=True)
thisDataLine = plotLines.query(f"plotIndex=={index}").reset_index(drop=True)
# styling of individual lines
truncated = (thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB' and thisDataScatter.loc[0, 'fuel_y'] == 'green RE') or \
thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB lowscco2'
dashed = thisDataScatter.loc[0, 'fuel_y'] in ['green pure RE', 'blue LEB lowscco2']
longdashed = thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB lowscco2'
shift = 0
if thisDataScatter.loc[0, 'fuel_y'] == 'green RE':
if thisDataScatter.loc[0, 'fuel_x'] == 'ref':
shift = -1
else:
shift = +1
elif thisDataScatter.loc[0, 'fuel_y'] == 'green pure RE':
shift = +2
thisDataScatter = thisDataScatter.query(f"year<=2035")
thisDataLine = thisDataLine.query(f"year<=2035")
# line properties
fuel_x = thisDataScatter.iloc[thisDataScatter.first_valid_index()]['fuel_x']
fuel_y = thisDataScatter.iloc[0]['fuel_y']
name = f"Fossil→{config['names'][fuel_y]}" if fuel_x == 'ref' else f"{config['names'][fuel_x]}→{config['names'][fuel_y]}"
col = config['fscp_colours'][f"{fuel_x} to {fuel_y}"] if f"{fuel_x} to {fuel_y}" in config['fscp_colours'] else \
config['colours'][fuel_y]
# do not plot awkward red line in sensitivity analysis row 2
if sensitivityNG and fuel_x == 'blue LEB':
continue
# scatter plot
traces.append((index, go.Scatter(
x=thisDataScatter['year'],
y=thisDataScatter['fscp'],
name=name,
legendgroup=0 if fuel_x == 'ref' else 1,
showlegend=False,
mode='markers',
line=dict(color=col, width=config['global']['lw_default'], dash='dot' if dashed else 'solid'),
marker=dict(symbol='x-thin', size=config['global']['highlight_marker_sm'], line={'width': config['global']['lw_thin'], 'color': col}, ),
hovertemplate=f"<b>{name}</b><br>Year: %{{x:d}}<br>FSCP: %{{y:.2f}}±%{{error_y.array:.2f}}<extra></extra>",
)))
# remove unphysical negative FSCPs
if truncated and not sensitivityNG:
thisDataLine = thisDataLine.query(f"(year>=2030 & fscp>0.0) | year>=2040")
# line plot
traces.append((index, go.Scatter(
x=thisDataLine['year'],
y=thisDataLine['fscp'],
legendgroup=0 if fuel_x == 'ref' else 1,
legendgrouptitle=dict(text=f"<b>{config['legendlabels'][0]}:</b>" if fuel_x=='ref' else f"<b>{config['legendlabels'][0]}:</b>"),
name=name,
mode='lines',
line=dict(color=col, width=config['global']['lw_default'], dash='dot' if dashed else 'dash' if longdashed else 'solid'),
)))
# error bars
thisDataScatter = thisDataScatter.query(f"year==[2030,2040,2050]")
thisDataScatter = thisDataScatter.query(f"fscp<={config['plotting']['fscp_max']} and (fscp>0.0 | year > 2040)")
traces.append((index, go.Scatter(
x=thisDataScatter['year'] + shift * 0.1,
y=thisDataScatter['fscp'],
error_y=dict(type='data', array=thisDataScatter['fscp_uu'], arrayminus=thisDataScatter['fscp_ul'],
thickness=config['global']['lw_thin']),
name=name,
legendgroup=0 if fuel_x == 'ref' else 1,
showlegend=False,
mode='markers',
marker=dict(symbol='x-thin', size=0.00001,),
line_color=('rgba({}, {}, {}, {})'.format(*hex_to_rgb(col), .4)),
hovertemplate=f"<b>{name}</b><br>Year: %{{x:d}}<br>FSCP: %{{y:.2f}}±%{{error_y.array:.2f}}<extra></extra>",
)))
return traces
# compute carbon price trajectories
def __computeCPTraj(years: list, values: dict, n_samples: int):
v_mean = []
v_upper = []
v_lower = []
for i, year in enumerate(years):
vals = [v[i] for v in values.values()]
mean = sum(vals)/len(vals)
v_mean.append(mean)
v_upper.append(max(vals)-mean)
v_lower.append(mean-min(vals))
# create data frame with time and cp values
cpData = pd.DataFrame({
'year': years,
'CP': v_mean,
'CP_u': v_upper,
'CP_l': v_lower,
})
# interpolate in between
samples = pd.DataFrame({'year': np.linspace(years[0], years[-1], n_samples)})
dtypes = {'year': float, 'CP': float, 'CP_u': float, 'CP_l': float}
cpData = cpData.merge(samples, how='outer').sort_values(by=['year']).astype(dtypes).interpolate()
# add name to dataframe
cpData['name'] = 'cp'
return cpData
# plot traces
def __addCPTraces(cpTrajData: pd.DataFrame, config: dict):
traces = []
name = config['carbon_price_config']['name']
colour = config['carbon_price_config']['colour']
# add main graphs (FSCP and CP)
traces.append(go.Scatter(
name=name,
legendgroup=1,
mode='lines',
x=cpTrajData['year'],
y=cpTrajData['CP'],
line_color=colour,
line_width=config['global']['lw_thin'],
showlegend=True,
hovertemplate=f"<b>{name}</b><br>Time: %{{x:.2f}}<br>Carbon price: %{{y:.2f}}<extra></extra>"
))
data_x = cpTrajData['year']
data_yu = cpTrajData['CP'] + cpTrajData['CP_u']
data_yl = cpTrajData['CP'] - cpTrajData['CP_l']
errorBand = go.Scatter(
name='Uncertainty Range',
legendgroup=1,
x=pd.concat([data_x, data_x[::-1]], ignore_index=True),
y=pd.concat([data_yl, data_yu[::-1]], ignore_index=True),
mode='lines',
marker=dict(color=colour),
fillcolor=("rgba({}, {}, {}, 0.1)".format(*hex_to_rgb(colour))),
fill='toself',
line=dict(width=config['global']['lw_ultrathin']),
showlegend=False,
hoverinfo='skip'
)
traces.append(errorBand)
return traces
def __styling(fig: go.Figure, config: dict):
# update legend styling
fig.update_layout(
legend=dict(
orientation='h',
xanchor='left',
x=0.0,
yanchor='top',
y=-0.1,
bgcolor='rgba(255,255,255,1.0)',
bordercolor='black',
borderwidth=2,
),
)
# update axis styling
for axis in ['xaxis', 'xaxis2', 'xaxis3', 'xaxis4', 'yaxis', 'yaxis2', 'yaxis3', 'yaxis4']:
update = {axis: dict(
showline=True,
linewidth=2,
linecolor='black',
showgrid=False,
zeroline=False,
mirror=True,
ticks='outside',
)}
fig.update_layout(**update)
# update figure background colour and font colour and type
fig.update_layout(
paper_bgcolor='rgba(255, 255, 255, 1.0)',
plot_bgcolor='rgba(255, 255, 255, 0.0)',
font_color='black',
font_family='Helvetica',
)
# move title annotations
for i, annotation in enumerate(fig['layout']['annotations'][:len(config['subplot_title_positions'])]):
x_pos, y_pos = config['subplot_title_positions'][i]
annotation['xanchor'] = 'left'
annotation['yanchor'] = 'top'
annotation['xref'] = 'paper'
annotation['yref'] = 'paper'
annotation['x'] = x_pos
annotation['y'] = y_pos
annotation['text'] = "<b>{0}</b>".format(annotation['text'])
|
import os
import re
import uuid
import typing as t
import logging
import pathlib
import functools
from typing import TYPE_CHECKING
from distutils.dir_util import copy_tree
from simple_di import inject
from simple_di import Provide
import bentoml
from bentoml import Tag
from bentoml.exceptions import BentoMLException
from bentoml.exceptions import MissingDependencyException
from ..types import LazyType
from ..runner.utils import Params
from ..utils.tensorflow import get_tf_version
from ..utils.tensorflow import is_gpu_available
from ..utils.tensorflow import hook_loaded_model
from .common.model_runner import BaseModelRunner
from ..configuration.containers import BentoMLContainer
logger = logging.getLogger(__name__)
try:
import tensorflow as tf # type: ignore
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""\
`tensorflow` is required in order to use `bentoml.tensorflow`.
Instruction: `pip install tensorflow`
"""
)
try:
import tensorflow_hub as hub # type: ignore
from tensorflow_hub import resolve # type: ignore
from tensorflow_hub import native_module # type: ignore
except ImportError: # pragma: no cover
logger.warning(
"""\
If you want to use `bentoml.tensorflow.import_from_tfhub(),
make sure to `pip install --upgrade tensorflow_hub` before using.
"""
)
hub = None
try:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
if TYPE_CHECKING:
from tensorflow_hub import Module as HubModule # type: ignore
from tensorflow_hub import KerasLayer # type: ignore
from .. import external_typing as ext
from ..types import PathType
from ..models import ModelStore
from ..external_typing import tensorflow as tf_ext
TFArgType = t.Union[t.List[t.Union[int, float]], ext.NpNDArray, tf_ext.Tensor]
MODULE_NAME = "bentoml.tensorflow_v2"
def _clean_name(name: str) -> str: # pragma: no cover
if name.startswith(("http://", "https://")):
name = name.split("/", maxsplit=3)[-1]
else:
name = name.split("/")[-1]
return re.sub(r"\W|^(?=\d)-", "_", name)
@inject
def load(
bento_tag: t.Union[str, Tag],
tags: t.Optional[t.List[str]] = None,
options: t.Optional["tf_ext.SaveOptions"] = None,
load_as_hub_module: t.Optional[bool] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> t.Union["tf_ext.AutoTrackable", "tf_ext.Module", "HubModule", "KerasLayer"]:
"""
Load a model from BentoML local modelstore with given name.
Args:
bento_tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
tags (:code:`str`, `optional`, defaults to `None`):
A set of strings specifying the graph variant to use, if loading from a v1 module.
options (:code:`tensorflow.saved_model.SaveOptions`, `optional`, default to :code:`None`):
:code:`tensorflow.saved_model.LoadOptions` object that specifies options for loading. This
argument can only be used from TensorFlow 2.3 onwards.
load_as_hub_module (`bool`, `optional`, default to :code:`True`):
Load the given weight that is saved from tfhub as either `hub.KerasLayer` or `hub.Module`.
The latter only applies for TF1.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`SavedModel`: an instance of :obj:`SavedModel` format from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
# load a model back into memory
model = bentoml.tensorflow.load("my_tensorflow_model")
""" # noqa: LN001
model = model_store.get(bento_tag)
if model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {bento_tag} was saved with module {model.info.module}, failed loading with {MODULE_NAME}."
)
if model.info.context["import_from_tfhub"]:
assert load_as_hub_module is not None, (
"You have to specified `load_as_hub_module=True | False`"
" to load a `tensorflow_hub` module. If True is chosen,"
" then BentoML will return either an instance of `hub.KerasLayer`"
" or `hub.Module` depending on your TF version. For most usecase,"
" we recommend to keep `load_as_hub_module=True`. If you wish to extend"
" the functionalities of the given model, set `load_as_hub_module=False`"
" will return a SavedModel object."
)
if hub is None:
raise MissingDependencyException(
"""\
`tensorflow_hub` does not exists.
Make sure to `pip install --upgrade tensorflow_hub` before using.
"""
)
module_path = model.path_of(model.info.options["local_path"])
if load_as_hub_module:
return (
hub.Module(module_path)
if get_tf_version().startswith("1")
else hub.KerasLayer(module_path)
)
# In case users want to load as a SavedModel file object.
# https://github.com/tensorflow/hub/blob/master/tensorflow_hub/module_v2.py#L93
is_hub_module_v1: bool = tf.io.gfile.exists( # type: ignore
native_module.get_module_proto_path(module_path)
)
if tags is None and is_hub_module_v1:
tags = []
if options is not None:
if not LazyType(
"tensorflow.python.saved_model.save_options.SaveOptions"
).isinstance(options):
raise BentoMLException(
f"`options` has to be of type `tf.saved_model.SaveOptions`, got {type(options)} instead."
)
if not hasattr(getattr(tf, "saved_model", None), "LoadOptions"):
raise NotImplementedError(
"options are not supported for TF < 2.3.x,"
f" Current version: {get_tf_version()}"
)
tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2( # type: ignore
module_path,
tags=tags,
options=options, # type: ignore
)
else:
tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2( # type: ignore
module_path,
tags=tags,
)
tf_model._is_hub_module_v1 = (
is_hub_module_v1 # pylint: disable=protected-access # noqa
)
return tf_model
else:
tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2(model.path) # type: ignore
return hook_loaded_model(tf_model, MODULE_NAME)
@inject
def import_from_tfhub(
identifier: t.Union[str, "HubModule", "KerasLayer"],
name: t.Optional[str] = None,
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
metadata: t.Optional[t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Import a model from `Tensorflow Hub <https://tfhub.dev/>`_ to BentoML modelstore.
Args:
identifier (:code:`Union[str, tensorflow_hub.Module, tensorflow_hub.KerasLayer]`): Identifier accepts
two type of inputs:
- if `type` of :code:`identifier` either of type :code:`tensorflow_hub.Module` (**legacy** `tensorflow_hub`) or :code:`tensorflow_hub.KerasLayer` (`tensorflow_hub`), then we will save the given model to a :code:`SavedModel` format.
- if `type` of :code:`identifier` is a :obj:`str`, we assume that this is the URI retrieved from Tensorflow Hub. We then clean the given URI, and get a local copy of a given model to BentoML modelstore. name (:code:`str`, `optional`, defaults to `None`): An optional name for the model. If :code:`identifier` is a :obj:`str`, then name can be autogenerated from the given URI.
name (:code:`str`, `optional`, default to `None`):
Optional name for the saved model. If None, then name will be generated from :code:`identifier`.
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml.Tag`: A :obj:`~bentoml.Tag` object that can be used to retrieve the model with :func:`bentoml.tensorflow.load`:
Example for importing a model from Tensorflow Hub:
.. code-block:: python
import tensorflow_text as text # noqa # pylint: disable
import bentoml
tag = bentoml.tensorflow.import_from_tfhub("https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")
# load model back with `load`:
model = bentoml.tensorflow.load(tag, load_as_hub_module=True)
Example for importing a custom Tensorflow Hub model:
.. code-block:: python
import tensorflow as tf
import tensorflow_hub as hub
import bentoml
def _plus_one_model_tf2():
obj = tf.train.Checkpoint()
@tf.function(input_signature=[tf.TensorSpec(None, dtype=tf.float32)])
def plus_one(x):
return x + 1
obj.__call__ = plus_one
return obj
# then save the given model to BentoML modelstore:
model = _plus_one_model_tf2()
tag = bentoml.tensorflow.import_from_tfhub(model)
""" # noqa
if hub is None:
raise MissingDependencyException(
"""\
`tensorflow_hub` does not exists.
Make sure to `pip install --upgrade tensorflow_hub` before using.
"""
)
context: t.Dict[str, t.Any] = {
"framework_name": "tensorflow",
"pip_dependencies": [
f"tensorflow=={get_tf_version()}",
f"tensorflow_hub=={importlib_metadata.version("tensorflow_hub")}",
],
"import_from_tfhub": True,
}
if name is None:
if isinstance(identifier, str):
name = _clean_name(identifier)
else:
name = f"{identifier.__class__.__name__}_{uuid.uuid4().hex[:5].upper()}"
with bentoml.models.create(
name,
module=MODULE_NAME,
options=None,
context=context,
metadata=metadata,
labels=labels,
custom_objects=custom_objects,
) as _model:
if isinstance(identifier, str):
current_cache_dir = os.environ.get("TFHUB_CACHE_DIR")
os.environ["TFHUB_CACHE_DIR"] = _model.path
fpath: str = resolve(identifier)
folder = fpath.split("/")[-1]
_model.info.options = {"model": identifier, "local_path": folder}
if current_cache_dir is not None:
os.environ["TFHUB_CACHE_DIR"] = current_cache_dir
else:
if hasattr(identifier, "export"):
# hub.Module.export()
with tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph()) as sess: # type: ignore
sess.run(tf.compat.v1.global_variables_initializer()) # type: ignore
identifier.export(_model.path, sess) # type: ignore
else:
tf.saved_model.save(identifier, _model.path)
_model.info.options = {
"model": identifier.__class__.__name__,
"local_path": ".",
}
return _model.tag
@inject
def save(
name: str,
model: t.Union["PathType", "tf_ext.KerasModel", "tf_ext.Module"],
*,
signatures: t.Optional["tf_ext.ConcreteFunction"] = None,
options: t.Optional["tf_ext.SaveOptions"] = None,
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
metadata: t.Optional[t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (:code:`Union[keras.Model, tf.Module, path-like objects]`):
Instance of model to be saved
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
signatures (:code:`Union[Callable[..., Any], dict]`, `optional`, default to :code:`None`):
Refers to `Signatures explanation <https://www.tensorflow.org/api_docs/python/tf/saved_model/save>`_
from Tensorflow documentation for more information.
options (`tf.saved_model.SaveOptions`, `optional`, default to :code:`None`):
:obj:`tf.saved_model.SaveOptions` object that specifies options for saving.
Raises:
ValueError: If :obj:`obj` is not trackable.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import tensorflow as tf
import numpy as np
import bentoml
class NativeModel(tf.Module):
def __init__(self):
super().__init__()
self.weights = np.asfarray([[1.0], [1.0], [1.0], [1.0], [1.0]])
self.dense = lambda inputs: tf.matmul(inputs, self.weights)
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 5], dtype=tf.float64, name="inputs")]
)
def __call__(self, inputs):
return self.dense(inputs)
# then save the given model to BentoML modelstore:
model = NativeModel()
tag = bentoml.tensorflow.save("native_toy", model)
.. note::
:code:`bentoml.tensorflow.save` API also support saving `RaggedTensor <https://www.tensorflow.org/guide/ragged_tensor>`_ model and Keras model. If you choose to save a Keras model
with :code:`bentoml.tensorflow.save`, then the model will be saved under a :obj:`SavedModel` format instead of :obj:`.h5`.
""" # noqa
context: t.Dict[str, t.Any] = {
"framework_name": "tensorflow",
"pip_dependencies": [f"tensorflow=={get_tf_version()}"],
"import_from_tfhub": False,
}
with bentoml.models.create(
name,
module=MODULE_NAME,
options=None,
context=context,
labels=labels,
custom_objects=custom_objects,
metadata=metadata,
) as _model:
if isinstance(model, (str, bytes, os.PathLike, pathlib.Path)): # type: ignore[reportUnknownMemberType]
assert os.path.isdir(model)
copy_tree(str(model), _model.path)
else:
if options:
logger.warning(
f"Parameter 'options: {str(options)}' is ignored when "
f"using tensorflow {get_tf_version()}"
)
tf.saved_model.save(
model, _model.path, signatures=signatures, options=options
)
return _model.tag
class _TensorflowRunner(BaseModelRunner):
def __init__(
self,
tag: t.Union[str, Tag],
predict_fn_name: str,
device_id: str,
partial_kwargs: t.Optional[t.Dict[str, t.Any]],
name: t.Optional[str] = None,
):
super().__init__(tag, name=name)
self._device_id = device_id
self._configure(device_id)
self._predict_fn_name = predict_fn_name
self._partial_kwargs: t.Dict[str, t.Any] = (
partial_kwargs if partial_kwargs is not None else dict()
)
def _configure(self, device_id: str) -> None:
if "GPU" in device_id:
tf.config.set_visible_devices(device_id, "GPU")
self._config_proto = dict(
allow_soft_placement=True,
log_device_placement=False,
intra_op_parallelism_threads=self._num_threads,
inter_op_parallelism_threads=self._num_threads,
)
@property
def _num_threads(self) -> int:
if is_gpu_available() and self.resource_quota.on_gpu:
return 1
return int(round(self.resource_quota.cpu))
@property
def num_replica(self) -> int:
if is_gpu_available() and self.resource_quota.on_gpu:
return len(self.resource_quota.gpus)
return 1
def _setup(self) -> None:
self._model = load(self._tag, model_store=self.model_store)
raw_predict_fn = getattr(self._model, self._predict_fn_name) # type: ignore
self._predict_fn = functools.partial(raw_predict_fn, **self._partial_kwargs)
def _run_batch(self, *args: "TFArgType", **kwargs: "TFArgType") -> "ext.NpNDArray":
params = Params["TFArgType"](*args, **kwargs)
with tf.device(self._device_id): # type: ignore
def _mapping(item: "TFArgType") -> "tf_ext.TensorLike":
if not LazyType["tf_ext.TensorLike"]("tf.Tensor").isinstance(item):
return t.cast("tf_ext.TensorLike", tf.convert_to_tensor(item))
else:
return item
params = params.map(_mapping)
tf.compat.v1.global_variables_initializer() # type: ignore
res = self._predict_fn(*params.args, **params.kwargs)
return t.cast("ext.NpNDArray", res.numpy())
def load_runner(
tag: t.Union[str, Tag],
*,
predict_fn_name: str = "__call__",
device_id: str = "CPU:0",
name: t.Optional[str] = None,
partial_kwargs: t.Optional[t.Dict[str, t.Any]] = None,
) -> "_TensorflowRunner":
"""
Runner represents a unit of serving logic that can be scaled horizontally to
maximize throughput. `bentoml.tensorflow.load_runner` implements a Runner class that
wrap around a Tensorflow model, which optimize it for the BentoML runtime.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
predict_fn_name (:code:`str`, default to :code:`__call__`):
Inference function to be used.
partial_kwargs (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Dictionary of partial kwargs that can be shared across different model.
device_id (:code:`str`, `optional`, default to the first CPU):
Optional devices to put the given model on. Refers to `Logical Devices <https://www.tensorflow.org/api_docs/python/tf/config/list_logical_devices>`_ from TF documentation.
Returns:
:obj:`~bentoml._internal.runner.Runner`: Runner instances for :mod:`bentoml.tensorflow` model
Examples:
.. code-block:: python
import bentoml
# load a runner from a given flag
runner = bentoml.tensorflow.load_runner(tag)
# load a runner on GPU:0
runner = bentoml.tensorflow.load_runner(tag, resource_quota=dict(gpus=0), device_id="GPU:0")
"""
return _TensorflowRunner(
tag=tag,
predict_fn_name=predict_fn_name,
device_id=device_id,
partial_kwargs=partial_kwargs,
name=name,
)
| import os
import re
import uuid
import typing as t
import logging
import pathlib
import functools
from typing import TYPE_CHECKING
from distutils.dir_util import copy_tree
from simple_di import inject
from simple_di import Provide
import bentoml
from bentoml import Tag
from bentoml.exceptions import BentoMLException
from bentoml.exceptions import MissingDependencyException
from ..types import LazyType
from ..runner.utils import Params
from ..utils.tensorflow import get_tf_version
from ..utils.tensorflow import is_gpu_available
from ..utils.tensorflow import hook_loaded_model
from .common.model_runner import BaseModelRunner
from ..configuration.containers import BentoMLContainer
logger = logging.getLogger(__name__)
try:
import tensorflow as tf # type: ignore
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""\
`tensorflow` is required in order to use `bentoml.tensorflow`.
Instruction: `pip install tensorflow`
"""
)
try:
import tensorflow_hub as hub # type: ignore
from tensorflow_hub import resolve # type: ignore
from tensorflow_hub import native_module # type: ignore
except ImportError: # pragma: no cover
logger.warning(
"""\
If you want to use `bentoml.tensorflow.import_from_tfhub(),
make sure to `pip install --upgrade tensorflow_hub` before using.
"""
)
hub = None
try:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
if TYPE_CHECKING:
from tensorflow_hub import Module as HubModule # type: ignore
from tensorflow_hub import KerasLayer # type: ignore
from .. import external_typing as ext
from ..types import PathType
from ..models import ModelStore
from ..external_typing import tensorflow as tf_ext
TFArgType = t.Union[t.List[t.Union[int, float]], ext.NpNDArray, tf_ext.Tensor]
MODULE_NAME = "bentoml.tensorflow_v2"
def _clean_name(name: str) -> str: # pragma: no cover
if name.startswith(("http://", "https://")):
name = name.split("/", maxsplit=3)[-1]
else:
name = name.split("/")[-1]
return re.sub(r"\W|^(?=\d)-", "_", name)
@inject
def load(
bento_tag: t.Union[str, Tag],
tags: t.Optional[t.List[str]] = None,
options: t.Optional["tf_ext.SaveOptions"] = None,
load_as_hub_module: t.Optional[bool] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> t.Union["tf_ext.AutoTrackable", "tf_ext.Module", "HubModule", "KerasLayer"]:
"""
Load a model from BentoML local modelstore with given name.
Args:
bento_tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
tags (:code:`str`, `optional`, defaults to `None`):
A set of strings specifying the graph variant to use, if loading from a v1 module.
options (:code:`tensorflow.saved_model.SaveOptions`, `optional`, default to :code:`None`):
:code:`tensorflow.saved_model.LoadOptions` object that specifies options for loading. This
argument can only be used from TensorFlow 2.3 onwards.
load_as_hub_module (`bool`, `optional`, default to :code:`True`):
Load the given weight that is saved from tfhub as either `hub.KerasLayer` or `hub.Module`.
The latter only applies for TF1.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`SavedModel`: an instance of :obj:`SavedModel` format from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
# load a model back into memory
model = bentoml.tensorflow.load("my_tensorflow_model")
""" # noqa: LN001
model = model_store.get(bento_tag)
if model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {bento_tag} was saved with module {model.info.module}, failed loading with {MODULE_NAME}."
)
if model.info.context["import_from_tfhub"]:
assert load_as_hub_module is not None, (
"You have to specified `load_as_hub_module=True | False`"
" to load a `tensorflow_hub` module. If True is chosen,"
" then BentoML will return either an instance of `hub.KerasLayer`"
" or `hub.Module` depending on your TF version. For most usecase,"
" we recommend to keep `load_as_hub_module=True`. If you wish to extend"
" the functionalities of the given model, set `load_as_hub_module=False`"
" will return a SavedModel object."
)
if hub is None:
raise MissingDependencyException(
"""\
`tensorflow_hub` does not exists.
Make sure to `pip install --upgrade tensorflow_hub` before using.
"""
)
module_path = model.path_of(model.info.options["local_path"])
if load_as_hub_module:
return (
hub.Module(module_path)
if get_tf_version().startswith("1")
else hub.KerasLayer(module_path)
)
# In case users want to load as a SavedModel file object.
# https://github.com/tensorflow/hub/blob/master/tensorflow_hub/module_v2.py#L93
is_hub_module_v1: bool = tf.io.gfile.exists( # type: ignore
native_module.get_module_proto_path(module_path)
)
if tags is None and is_hub_module_v1:
tags = []
if options is not None:
if not LazyType(
"tensorflow.python.saved_model.save_options.SaveOptions"
).isinstance(options):
raise BentoMLException(
f"`options` has to be of type `tf.saved_model.SaveOptions`, got {type(options)} instead."
)
if not hasattr(getattr(tf, "saved_model", None), "LoadOptions"):
raise NotImplementedError(
"options are not supported for TF < 2.3.x,"
f" Current version: {get_tf_version()}"
)
tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2( # type: ignore
module_path,
tags=tags,
options=options, # type: ignore
)
else:
tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2( # type: ignore
module_path,
tags=tags,
)
tf_model._is_hub_module_v1 = (
is_hub_module_v1 # pylint: disable=protected-access # noqa
)
return tf_model
else:
tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2(model.path) # type: ignore
return hook_loaded_model(tf_model, MODULE_NAME)
@inject
def import_from_tfhub(
identifier: t.Union[str, "HubModule", "KerasLayer"],
name: t.Optional[str] = None,
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
metadata: t.Optional[t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Import a model from `Tensorflow Hub <https://tfhub.dev/>`_ to BentoML modelstore.
Args:
identifier (:code:`Union[str, tensorflow_hub.Module, tensorflow_hub.KerasLayer]`): Identifier accepts
two type of inputs:
- if `type` of :code:`identifier` either of type :code:`tensorflow_hub.Module` (**legacy** `tensorflow_hub`) or :code:`tensorflow_hub.KerasLayer` (`tensorflow_hub`), then we will save the given model to a :code:`SavedModel` format.
- if `type` of :code:`identifier` is a :obj:`str`, we assume that this is the URI retrieved from Tensorflow Hub. We then clean the given URI, and get a local copy of a given model to BentoML modelstore. name (:code:`str`, `optional`, defaults to `None`): An optional name for the model. If :code:`identifier` is a :obj:`str`, then name can be autogenerated from the given URI.
name (:code:`str`, `optional`, default to `None`):
Optional name for the saved model. If None, then name will be generated from :code:`identifier`.
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml.Tag`: A :obj:`~bentoml.Tag` object that can be used to retrieve the model with :func:`bentoml.tensorflow.load`:
Example for importing a model from Tensorflow Hub:
.. code-block:: python
import tensorflow_text as text # noqa # pylint: disable
import bentoml
tag = bentoml.tensorflow.import_from_tfhub("https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")
# load model back with `load`:
model = bentoml.tensorflow.load(tag, load_as_hub_module=True)
Example for importing a custom Tensorflow Hub model:
.. code-block:: python
import tensorflow as tf
import tensorflow_hub as hub
import bentoml
def _plus_one_model_tf2():
obj = tf.train.Checkpoint()
@tf.function(input_signature=[tf.TensorSpec(None, dtype=tf.float32)])
def plus_one(x):
return x + 1
obj.__call__ = plus_one
return obj
# then save the given model to BentoML modelstore:
model = _plus_one_model_tf2()
tag = bentoml.tensorflow.import_from_tfhub(model)
""" # noqa
if hub is None:
raise MissingDependencyException(
"""\
`tensorflow_hub` does not exists.
Make sure to `pip install --upgrade tensorflow_hub` before using.
"""
)
context: t.Dict[str, t.Any] = {
"framework_name": "tensorflow",
"pip_dependencies": [
f"tensorflow=={get_tf_version()}",
f"tensorflow_hub=={importlib_metadata.version('tensorflow_hub')}",
],
"import_from_tfhub": True,
}
if name is None:
if isinstance(identifier, str):
name = _clean_name(identifier)
else:
name = f"{identifier.__class__.__name__}_{uuid.uuid4().hex[:5].upper()}"
with bentoml.models.create(
name,
module=MODULE_NAME,
options=None,
context=context,
metadata=metadata,
labels=labels,
custom_objects=custom_objects,
) as _model:
if isinstance(identifier, str):
current_cache_dir = os.environ.get("TFHUB_CACHE_DIR")
os.environ["TFHUB_CACHE_DIR"] = _model.path
fpath: str = resolve(identifier)
folder = fpath.split("/")[-1]
_model.info.options = {"model": identifier, "local_path": folder}
if current_cache_dir is not None:
os.environ["TFHUB_CACHE_DIR"] = current_cache_dir
else:
if hasattr(identifier, "export"):
# hub.Module.export()
with tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph()) as sess: # type: ignore
sess.run(tf.compat.v1.global_variables_initializer()) # type: ignore
identifier.export(_model.path, sess) # type: ignore
else:
tf.saved_model.save(identifier, _model.path)
_model.info.options = {
"model": identifier.__class__.__name__,
"local_path": ".",
}
return _model.tag
@inject
def save(
name: str,
model: t.Union["PathType", "tf_ext.KerasModel", "tf_ext.Module"],
*,
signatures: t.Optional["tf_ext.ConcreteFunction"] = None,
options: t.Optional["tf_ext.SaveOptions"] = None,
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
metadata: t.Optional[t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (:code:`Union[keras.Model, tf.Module, path-like objects]`):
Instance of model to be saved
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
signatures (:code:`Union[Callable[..., Any], dict]`, `optional`, default to :code:`None`):
Refers to `Signatures explanation <https://www.tensorflow.org/api_docs/python/tf/saved_model/save>`_
from Tensorflow documentation for more information.
options (`tf.saved_model.SaveOptions`, `optional`, default to :code:`None`):
:obj:`tf.saved_model.SaveOptions` object that specifies options for saving.
Raises:
ValueError: If :obj:`obj` is not trackable.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import tensorflow as tf
import numpy as np
import bentoml
class NativeModel(tf.Module):
def __init__(self):
super().__init__()
self.weights = np.asfarray([[1.0], [1.0], [1.0], [1.0], [1.0]])
self.dense = lambda inputs: tf.matmul(inputs, self.weights)
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 5], dtype=tf.float64, name="inputs")]
)
def __call__(self, inputs):
return self.dense(inputs)
# then save the given model to BentoML modelstore:
model = NativeModel()
tag = bentoml.tensorflow.save("native_toy", model)
.. note::
:code:`bentoml.tensorflow.save` API also support saving `RaggedTensor <https://www.tensorflow.org/guide/ragged_tensor>`_ model and Keras model. If you choose to save a Keras model
with :code:`bentoml.tensorflow.save`, then the model will be saved under a :obj:`SavedModel` format instead of :obj:`.h5`.
""" # noqa
context: t.Dict[str, t.Any] = {
"framework_name": "tensorflow",
"pip_dependencies": [f"tensorflow=={get_tf_version()}"],
"import_from_tfhub": False,
}
with bentoml.models.create(
name,
module=MODULE_NAME,
options=None,
context=context,
labels=labels,
custom_objects=custom_objects,
metadata=metadata,
) as _model:
if isinstance(model, (str, bytes, os.PathLike, pathlib.Path)): # type: ignore[reportUnknownMemberType]
assert os.path.isdir(model)
copy_tree(str(model), _model.path)
else:
if options:
logger.warning(
f"Parameter 'options: {str(options)}' is ignored when "
f"using tensorflow {get_tf_version()}"
)
tf.saved_model.save(
model, _model.path, signatures=signatures, options=options
)
return _model.tag
class _TensorflowRunner(BaseModelRunner):
def __init__(
self,
tag: t.Union[str, Tag],
predict_fn_name: str,
device_id: str,
partial_kwargs: t.Optional[t.Dict[str, t.Any]],
name: t.Optional[str] = None,
):
super().__init__(tag, name=name)
self._device_id = device_id
self._configure(device_id)
self._predict_fn_name = predict_fn_name
self._partial_kwargs: t.Dict[str, t.Any] = (
partial_kwargs if partial_kwargs is not None else dict()
)
def _configure(self, device_id: str) -> None:
if "GPU" in device_id:
tf.config.set_visible_devices(device_id, "GPU")
self._config_proto = dict(
allow_soft_placement=True,
log_device_placement=False,
intra_op_parallelism_threads=self._num_threads,
inter_op_parallelism_threads=self._num_threads,
)
@property
def _num_threads(self) -> int:
if is_gpu_available() and self.resource_quota.on_gpu:
return 1
return int(round(self.resource_quota.cpu))
@property
def num_replica(self) -> int:
if is_gpu_available() and self.resource_quota.on_gpu:
return len(self.resource_quota.gpus)
return 1
def _setup(self) -> None:
self._model = load(self._tag, model_store=self.model_store)
raw_predict_fn = getattr(self._model, self._predict_fn_name) # type: ignore
self._predict_fn = functools.partial(raw_predict_fn, **self._partial_kwargs)
def _run_batch(self, *args: "TFArgType", **kwargs: "TFArgType") -> "ext.NpNDArray":
params = Params["TFArgType"](*args, **kwargs)
with tf.device(self._device_id): # type: ignore
def _mapping(item: "TFArgType") -> "tf_ext.TensorLike":
if not LazyType["tf_ext.TensorLike"]("tf.Tensor").isinstance(item):
return t.cast("tf_ext.TensorLike", tf.convert_to_tensor(item))
else:
return item
params = params.map(_mapping)
tf.compat.v1.global_variables_initializer() # type: ignore
res = self._predict_fn(*params.args, **params.kwargs)
return t.cast("ext.NpNDArray", res.numpy())
def load_runner(
tag: t.Union[str, Tag],
*,
predict_fn_name: str = "__call__",
device_id: str = "CPU:0",
name: t.Optional[str] = None,
partial_kwargs: t.Optional[t.Dict[str, t.Any]] = None,
) -> "_TensorflowRunner":
"""
Runner represents a unit of serving logic that can be scaled horizontally to
maximize throughput. `bentoml.tensorflow.load_runner` implements a Runner class that
wrap around a Tensorflow model, which optimize it for the BentoML runtime.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
predict_fn_name (:code:`str`, default to :code:`__call__`):
Inference function to be used.
partial_kwargs (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Dictionary of partial kwargs that can be shared across different model.
device_id (:code:`str`, `optional`, default to the first CPU):
Optional devices to put the given model on. Refers to `Logical Devices <https://www.tensorflow.org/api_docs/python/tf/config/list_logical_devices>`_ from TF documentation.
Returns:
:obj:`~bentoml._internal.runner.Runner`: Runner instances for :mod:`bentoml.tensorflow` model
Examples:
.. code-block:: python
import bentoml
# load a runner from a given flag
runner = bentoml.tensorflow.load_runner(tag)
# load a runner on GPU:0
runner = bentoml.tensorflow.load_runner(tag, resource_quota=dict(gpus=0), device_id="GPU:0")
"""
return _TensorflowRunner(
tag=tag,
predict_fn_name=predict_fn_name,
device_id=device_id,
partial_kwargs=partial_kwargs,
name=name,
)
|
"""A Couchbase CLI subcommand"""
import getpass
import inspect
import ipaddress
import json
import os
import platform
import random
import re
import string
import subprocess
import sys
import urllib.parse
import tempfile
import time
from typing import Optional, List, Any, Dict
from argparse import ArgumentError, ArgumentParser, HelpFormatter, Action, SUPPRESS
from operator import itemgetter
from cluster_manager import ClusterManager
from pbar import TopologyProgressBar
try:
from cb_version import VERSION # pylint: disable=import-error
except ImportError:
VERSION = "0.0.0-0000-community"
print(f'WARNING: Could not import cb_version, setting VERSION to {VERSION}')
COUCHBASE_DEFAULT_PORT = 8091
BUCKET_PRIORITY_HIGH_INT = 8
BUCKET_PRIORITY_HIGH_STR = "high"
BUCKET_PRIORITY_LOW_INT = 3
BUCKET_PRIORITY_LOW_STR = "low"
BUCKET_TYPE_COUCHBASE = "membase"
BUCKET_TYPE_MEMCACHED = "memcached"
CB_BIN_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "bin"))
CB_ETC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "etc", "couchbase"))
CB_LIB_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "lib"))
# On MacOS the config is store in the users home directory
if platform.system() == "Darwin":
CB_CFG_PATH = os.path.expanduser("~/Library/Application Support/Couchbase/var/lib/couchbase")
else:
CB_CFG_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "var", "lib", "couchbase"))
CB_MAN_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "share"))
if os.name == "nt":
CB_MAN_PATH = os.path.join(CB_MAN_PATH, "html")
else:
CB_MAN_PATH = os.path.join(CB_MAN_PATH, "man", "man1")
def remove_prefix(val: str, prefix: str) -> str:
"""This function removes a prefix from a string.
Note this is a built-in function in Python 3.9 once we upgrade to it we should use it instead.
"""
return val[len(prefix):] if val.startswith(prefix) else val
def rest_initialiser(cluster_init_check=False, version_check=False, enterprise_check=None):
"""rest_initialiser is a decorator that does common subcommand tasks.
The decorator will always creates a cluster manager and assign it to the subcommand variable rest
:param cluster_init_check: if true it will check if the cluster is initialized before executing the subcommand
:param version_check: if true it will check if the cluster and CLI version match if they do not it prints a warning
:param enterprise_check: if true it will check if the cluster is enterprise and fail if not. If it is false it does
the check but it does not fail if not enterprise. If none it does not perform the check. The result of the check
is stored on the instance parameter enterprise
"""
def inner(fn):
def decorator(self, opts):
self.rest = ClusterManager(opts.cluster, opts.username, opts.password, opts.ssl, opts.ssl_verify,
opts.cacert, opts.debug)
if cluster_init_check:
check_cluster_initialized(self.rest)
if version_check:
check_versions(self.rest)
if enterprise_check is not None:
enterprise, errors = self.rest.is_enterprise()
_exit_if_errors(errors)
if enterprise_check and not enterprise:
_exit_if_errors(['Command only available in enterprise edition'])
self.enterprise = enterprise
return fn(self, opts)
return decorator
return inner
def check_cluster_initialized(rest):
initialized, errors = rest.is_cluster_initialized()
if errors:
_exit_if_errors(errors)
if not initialized:
_exit_if_errors(["Cluster is not initialized, use cluster-init to initialize the cluster"])
def check_versions(rest):
result, errors = rest.pools()
if errors:
return
server_version = result['implementationVersion']
if server_version is None or VERSION is None:
return
major_couch = server_version[: server_version.index('.')]
minor_couch = server_version[server_version.index('.') + 1: server_version.index('.', len(major_couch) + 1)]
major_cli = VERSION[: VERSION.index('.')]
minor_cli = VERSION[VERSION.index('.') + 1: VERSION.index('.', len(major_cli) + 1)]
if major_cli != major_couch or minor_cli != minor_couch:
_warning(f'couchbase-cli version {VERSION} does not match couchbase server version {server_version}')
def index_storage_mode_to_param(value, default="plasma"):
"""Converts the index storage mode to what Couchbase understands"""
if value == "default":
return default
if value == "memopt":
return "memory_optimized"
return value
def process_services(services, enterprise):
"""Converts services to a format Couchbase understands"""
sep = ","
if services.find(sep) < 0:
# backward compatible when using ";" as separator
sep = ";"
svc_set = set([w.strip() for w in services.split(sep)])
svc_candidate = ["data", "index", "query", "fts", "eventing", "analytics", "backup"]
for svc in svc_set:
if svc not in svc_candidate:
return None, [f'`{svc}` is not a valid service']
if not enterprise and svc in ["eventing", "analytics", "backup"]:
return None, [f'{svc} service is only available on Enterprise Edition']
if not enterprise:
# Valid CE node service configuration
ce_svc_30 = set(["data"])
ce_svc_40 = set(["data", "index", "query"])
ce_svc_45 = set(["data", "index", "query", "fts"])
if svc_set not in [ce_svc_30, ce_svc_40, ce_svc_45]:
return None, [f"Invalid service configuration. Community Edition only supports nodes with the following"
f" combinations of services: '{"".join(ce_svc_30)}', '{",".join(ce_svc_40)}' or "
f"'{",".join(ce_svc_45)}'"]
services = ",".join(svc_set)
for old, new in [[";", ","], ["data", "kv"], ["query", "n1ql"], ["analytics", "cbas"]]:
services = services.replace(old, new)
return services, None
def find_subcommands():
"""Finds all subcommand classes"""
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
subclasses = [cls for cls in clsmembers if issubclass(cls[1], (Subcommand, LocalSubcommand))
and cls[1] not in [Subcommand, LocalSubcommand]]
subcommands = []
for subclass in subclasses:
name = '-'.join([part.lower() for part in re.findall('[A-Z][a-z]*', subclass[0])])
subcommands.append((name, subclass[1]))
return subcommands
def _success(msg):
print(f'SUCCESS: {msg}')
def _deprecated(msg):
print(f'DEPRECATED: {msg}')
def _warning(msg):
print(f'WARNING: {msg}')
def _exit_if_errors(errors):
if errors:
for error in errors:
# Some endpoint return errors prefixed with '_ -' this has to be stripped out. For more information see
# MB-42801
print(f'ERROR: {remove_prefix(error, '_ -').lstrip(' ')}')
sys.exit(1)
def _exit_on_file_write_failure(fname, to_write):
try:
wfile = open(fname, 'w')
wfile.write(to_write)
wfile.close()
except IOError as error:
_exit_if_errors([error])
def _exit_on_file_read_failure(fname, to_report=None):
try:
rfile = open(fname, 'r')
read_bytes = rfile.read()
rfile.close()
return read_bytes
except IOError as error:
if to_report is None:
_exit_if_errors([f'{error.strerror} `{fname}`'])
else:
_exit_if_errors([to_report])
def apply_default_port(nodes):
"""
Adds the default port if the port is missing.
@type nodes: string
@param nodes: A comma seprated list of nodes
@rtype: array of strings
@return: The nodes with the port postfixed on each one
"""
nodes = nodes.split(',')
def append_port(node):
if re.match(r'.*:\d+$', node):
return node
return f'{node}:8091'
return [append_port(x) for x in nodes]
class CLIHelpFormatter(HelpFormatter):
"""Format help with indented section bodies"""
def __init__(self, prog, indent_increment=2, max_help_position=30, width=None):
HelpFormatter.__init__(self, prog, indent_increment, max_help_position, width)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent + 2
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
if action.nargs == 0:
parts.extend(action.option_strings)
return ','.join(parts)
else:
default = action.dest
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append(option_string)
return ','.join(parts) + ' ' + args_string
class CBDeprecatedAction(Action):
"""Indicates that a specific option is deprecated"""
def __call__(self, parser, namespace, values, option_string=None):
_deprecated('Specifying ' + '/'.join(self.option_strings) + ' is deprecated')
if self.nargs == 0:
setattr(namespace, self.dest, self.const)
else:
setattr(namespace, self.dest, values)
class CBHostAction(Action):
"""Allows the handling of hostnames on the command line"""
def __call__(self, parser, namespace, values, option_string=None):
parsed = urllib.parse.urlparse(values)
# If the netloc is empty then it means that there was no scheme added
# to the URI and we are parsing it as a path. In this case no scheme
# means HTTP so we can add that scheme to the hostname provided.
if parsed.netloc == "":
parsed = urllib.parse.urlparse("http://" + values)
if parsed.scheme == "":
parsed = urllib.parse.urlparse("http://" + values)
if parsed.path != "" or parsed.params != "" or parsed.query != "" or parsed.fragment != "":
raise ArgumentError(self, f"{values} is not an accepted hostname")
if not parsed.hostname:
raise ArgumentError(self, f"{values} is not an accepted hostname")
hostname_regex = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*'
+ r'([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$')
if not hostname_regex.match(parsed.hostname):
try:
ipaddress.ip_address(parsed.hostname)
except ValueError as val_error:
raise ArgumentError(self, f"{values} is not an accepted hostname") from val_error
scheme = parsed.scheme
port = None
if scheme in ["http", "couchbase"]:
if not parsed.port:
port = 8091
if scheme == "couchbase":
scheme = "http"
elif scheme in ["https", "couchbases"]:
if not parsed.port:
port = 18091
if scheme == "couchbases":
scheme = "https"
else:
raise ArgumentError(self, "%s is not an accepted scheme" % scheme)
if parsed.port:
setattr(namespace, self.dest, (scheme + "://" + parsed.netloc))
else:
setattr(namespace, self.dest, (scheme + "://" + parsed.netloc + ":" + str(port)))
class CBEnvAction(Action):
"""Allows the custom handling of environment variables for command line options"""
def __init__(self, envvar, required=True, default=None, **kwargs):
if not default and envvar and envvar in os.environ:
default = os.environ[envvar]
if required and default:
required = False
super(CBEnvAction, self).__init__(default=default, required=required,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class CBNonEchoedAction(CBEnvAction):
"""Allows an argument to be specified by use of a non-echoed value passed through
stdin, through an environment variable, or as a value to the argument"""
def __init__(self, envvar, prompt_text="Enter password:", confirm_text=None,
required=True, default=None, nargs='?', **kwargs):
self.prompt_text = prompt_text
self.confirm_text = confirm_text
super(CBNonEchoedAction, self).__init__(envvar, required=required, default=default,
nargs=nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
values = getpass.getpass(self.prompt_text)
if self.confirm_text is not None:
confirm = getpass.getpass(self.prompt_text)
if values != confirm:
raise ArgumentError(self, "Passwords entered do not match, please retry")
super(CBNonEchoedAction, self).__call__(parser, namespace, values, option_string=None)
class CBHelpAction(Action):
"""Allows the custom handling of the help command line argument"""
# pylint: disable=redefined-builtin
def __init__(self, option_strings, klass, dest=SUPPRESS, default=SUPPRESS, help=None):
super(CBHelpAction, self).__init__(option_strings=option_strings, dest=dest,
default=default, nargs=0, help=help) # pylint: disable=redefined-builtin
self.klass = klass
def __call__(self, parser, namespace, values, option_string=None):
if option_string == "-h":
parser.print_help()
else:
CBHelpAction._show_man_page(self.klass.get_man_page_name())
parser.exit()
@staticmethod
def _show_man_page(page):
if os.name == "nt":
try:
subprocess.call(["rundll32.exe", "url.dll,FileProtocolHandler", os.path.join(CB_MAN_PATH, page)])
except OSError as e:
_exit_if_errors(["Unable to open man page using your browser, %s" % e])
else:
try:
subprocess.call(["man", os.path.join(CB_MAN_PATH, page)])
except OSError:
_exit_if_errors(["Unable to open man page using the 'man' command, ensure it is on your path or"
+ "install a manual reader"])
class CliParser(ArgumentParser):
def __init__(self, *args, **kwargs):
super(CliParser, self).__init__(*args, **kwargs)
def error(self, message):
self.exit(2, f'ERROR: {message}\n')
class Command(object):
"""A Couchbase CLI Command"""
def __init__(self):
self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False)
def parse(self, args):
"""Parses the subcommand"""
if len(args) == 0:
self.short_help()
return self.parser.parse_args(args)
def short_help(self, code=0):
"""Prints the short help message and exits"""
self.parser.print_help()
self.parser.exit(code)
def execute(self, opts):
"""Executes the subcommand"""
raise NotImplementedError
@staticmethod
def get_man_page_name():
"""Returns the man page name"""
raise NotImplementedError
@staticmethod
def get_description():
"""Returns the command description"""
raise NotImplementedError
class CouchbaseCLI(Command):
"""A Couchbase CLI command"""
def __init__(self):
super(CouchbaseCLI, self).__init__()
self.parser.prog = "couchbase-cli"
subparser = self.parser.add_subparsers(title="Commands", metavar="")
for (name, klass) in find_subcommands():
if klass.is_hidden():
subcommand = subparser.add_parser(name)
else:
subcommand = subparser.add_parser(name, help=klass.get_description())
subcommand.set_defaults(klass=klass)
group = self.parser.add_argument_group("Options")
group.add_argument("-h", "--help", action=CBHelpAction, klass=self,
help="Prints the short or long help message")
group.add_argument("--version", help="Get couchbase-cli version")
def parse(self, args):
if len(sys.argv) == 1:
self.parser.print_help()
self.parser.exit(1)
if args[1] == "--version":
print(VERSION)
sys.exit(0)
if not args[1] in ["-h", "--help", "--version"] and args[1].startswith("-"):
_exit_if_errors([f"Unknown subcommand: '{args[1]}'. The first argument has to be a subcommand like"
f" 'bucket-list' or 'rebalance', please see couchbase-cli -h for the full list of commands"
f" and options"])
l1_args = self.parser.parse_args(args[1:2])
l2_args = l1_args.klass().parse(args[2:])
setattr(l2_args, 'klass', l1_args.klass)
return l2_args
def execute(self, opts):
opts.klass().execute(opts)
@staticmethod
def get_man_page_name():
"""Returns the man page name"""
return "couchbase-cli" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "A Couchbase cluster administration utility"
class Subcommand(Command):
"""
A Couchbase CLI Subcommand: This is for subcommand that interact with a remote Couchbase Server over the REST API.
"""
def __init__(self, deprecate_username=False, deprecate_password=False, cluster_default=None):
super(Subcommand, self).__init__()
# Filled by the decorators
self.rest = None
self.enterprise = None
self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False)
group = self.parser.add_argument_group("Cluster options")
group.add_argument("-c", "--cluster", dest="cluster", required=(cluster_default is None),
metavar="<cluster>", action=CBHostAction, default=cluster_default,
help="The hostname of the Couchbase cluster")
if deprecate_username:
group.add_argument("-u", "--username", dest="username",
action=CBDeprecatedAction, help=SUPPRESS)
else:
group.add_argument("-u", "--username", dest="username", required=True,
action=CBEnvAction, envvar='CB_REST_USERNAME',
metavar="<username>", help="The username for the Couchbase cluster")
if deprecate_password:
group.add_argument("-p", "--password", dest="password",
action=CBDeprecatedAction, help=SUPPRESS)
else:
group.add_argument("-p", "--password", dest="password", required=True,
action=CBNonEchoedAction, envvar='CB_REST_PASSWORD',
metavar="<password>", help="The password for the Couchbase cluster")
group.add_argument("-o", "--output", dest="output", default="standard", metavar="<output>",
choices=["json", "standard"], help="The output type (json or standard)")
group.add_argument("-d", "--debug", dest="debug", action="store_true",
help="Run the command with extra logging")
group.add_argument("-s", "--ssl", dest="ssl", const=True, default=False,
nargs=0, action=CBDeprecatedAction,
help="Use ssl when connecting to Couchbase (Deprecated)")
group.add_argument("--no-ssl-verify", dest="ssl_verify", action="store_false", default=True,
help="Skips SSL verification of certificates against the CA")
group.add_argument("--cacert", dest="cacert", default=True,
help="Verifies the cluster identity with this certificate")
group.add_argument("-h", "--help", action=CBHelpAction, klass=self,
help="Prints the short or long help message")
def execute(self, opts): # pylint: disable=useless-super-delegation
super(Subcommand, self).execute(opts)
@staticmethod
def get_man_page_name():
return Command.get_man_page_name()
@staticmethod
def get_description():
return Command.get_description()
@staticmethod
def is_hidden():
"""Whether or not the subcommand should be hidden from the help message"""
return False
class LocalSubcommand(Command):
"""
A Couchbase CLI Localcommand: This is for subcommands that interact with the local Couchbase Server via the
filesystem or a local socket.
"""
def __init__(self):
super(LocalSubcommand, self).__init__()
self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False)
group = self.parser.add_argument_group(title="Local command options",
description="This command has to be execute on the locally running"
+ " Couchbase Server.")
group.add_argument("-h", "--help", action=CBHelpAction, klass=self,
help="Prints the short or long help message")
group.add_argument("--config-path", dest="config_path", metavar="<path>",
default=CB_CFG_PATH, help=SUPPRESS)
def execute(self, opts): # pylint: disable=useless-super-delegation
super(LocalSubcommand, self).execute(opts)
@staticmethod
def get_man_page_name():
return Command.get_man_page_name()
@staticmethod
def get_description():
return Command.get_description()
@staticmethod
def is_hidden():
"""Whether or not the subcommand should be hidden from the help message"""
return False
class ClusterInit(Subcommand):
"""The cluster initialization subcommand"""
def __init__(self):
super(ClusterInit, self).__init__(True, True, "http://127.0.0.1:8091")
self.parser.prog = "couchbase-cli cluster-init"
group = self.parser.add_argument_group("Cluster initialization options")
group.add_argument("--cluster-username", dest="username", required=True,
metavar="<username>", help="The cluster administrator username")
group.add_argument("--cluster-password", dest="password", required=True,
metavar="<password>", help="The cluster administrator password")
group.add_argument("--cluster-port", dest="port", type=(int),
metavar="<port>", help="The cluster administration console port")
group.add_argument("--cluster-ramsize", dest="data_mem_quota", type=(int),
metavar="<quota>", help="The data service memory quota in mebibytes")
group.add_argument("--cluster-index-ramsize", dest="index_mem_quota", type=(int),
metavar="<quota>", help="The index service memory quota in mebibytes")
group.add_argument("--cluster-fts-ramsize", dest="fts_mem_quota", type=(int),
metavar="<quota>",
help="The full-text service memory quota in mebibytes")
group.add_argument("--cluster-eventing-ramsize", dest="eventing_mem_quota", type=(int),
metavar="<quota>",
help="The Eventing service memory quota in mebibytes")
group.add_argument("--cluster-analytics-ramsize", dest="cbas_mem_quota", type=(int),
metavar="<quota>",
help="The analytics service memory quota in mebibytes")
group.add_argument("--cluster-name", dest="name", metavar="<name>", help="The cluster name")
group.add_argument("--index-storage-setting", dest="index_storage_mode",
choices=["default", "memopt"], metavar="<mode>",
help="The index storage backend (Defaults to \"default)\"")
group.add_argument("--services", dest="services", default="data", metavar="<service_list>",
help="The services to run on this server")
group.add_argument("--update-notifications", dest="notifications", metavar="<1|0>", choices=["0", "1"],
default="1", help="Enables/disable software update notifications")
@rest_initialiser(enterprise_check=False)
def execute(self, opts):
# We need to ensure that creating the REST username/password is the
# last REST API that is called because once that API succeeds the
# cluster is initialized and cluster-init cannot be run again.
initialized, errors = self.rest.is_cluster_initialized()
_exit_if_errors(errors)
if initialized:
_exit_if_errors(["Cluster is already initialized, use setting-cluster to change settings"])
if not self.enterprise and opts.index_storage_mode == 'memopt':
_exit_if_errors(["memopt option for --index-storage-setting can only be configured on enterprise edition"])
services, errors = process_services(opts.services, self.enterprise)
_exit_if_errors(errors)
if 'kv' not in services.split(','):
_exit_if_errors(["Cannot set up first cluster node without the data service"])
if opts.data_mem_quota or opts.index_mem_quota or opts.fts_mem_quota or opts.cbas_mem_quota \
or opts.eventing_mem_quota or opts.name is not None:
_, errors = self.rest.set_pools_default(opts.data_mem_quota, opts.index_mem_quota, opts.fts_mem_quota,
opts.cbas_mem_quota, opts.eventing_mem_quota, opts.name)
_exit_if_errors(errors)
# Set the index storage mode
if not opts.index_storage_mode and 'index' in services.split(','):
opts.index_storage_mode = "default"
default = "plasma"
if not self.enterprise:
default = "forestdb"
if opts.index_storage_mode:
param = index_storage_mode_to_param(opts.index_storage_mode, default)
_, errors = self.rest.set_index_settings(param, None, None, None, None, None, None, None)
_exit_if_errors(errors)
# Setup services
_, errors = self.rest.setup_services(services)
_exit_if_errors(errors)
# Enable notifications
if opts.notifications == "1":
_, errors = self.rest.enable_notifications(True)
else:
_, errors = self.rest.enable_notifications(False)
_exit_if_errors(errors)
# Setup Administrator credentials and Admin Console port
_, errors = self.rest.set_admin_credentials(opts.username, opts.password,
opts.port)
_exit_if_errors(errors)
_success("Cluster initialized")
@staticmethod
def get_man_page_name():
return "couchbase-cli-cluster-init" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Initialize a Couchbase cluster"
class BucketCompact(Subcommand):
"""The bucket compact subcommand"""
def __init__(self):
super(BucketCompact, self).__init__()
self.parser.prog = "couchbase-cli bucket-compact"
group = self.parser.add_argument_group("Bucket compaction options")
group.add_argument("--bucket", dest="bucket_name", metavar="<name>",
help="The name of bucket to compact")
group.add_argument("--data-only", dest="data_only", action="store_true",
help="Only compact the data files")
group.add_argument("--view-only", dest="view_only", action="store_true",
help="Only compact the view files")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
bucket, errors = self.rest.get_bucket(opts.bucket_name)
_exit_if_errors(errors)
if bucket["bucketType"] != BUCKET_TYPE_COUCHBASE:
_exit_if_errors(["Cannot compact memcached buckets"])
_, errors = self.rest.compact_bucket(opts.bucket_name, opts.data_only, opts.view_only)
_exit_if_errors(errors)
_success("Bucket compaction started")
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-compact" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Compact database and view data"
class BucketCreate(Subcommand):
"""The bucket create subcommand"""
def __init__(self):
super(BucketCreate, self).__init__()
self.parser.prog = "couchbase-cli bucket-create"
group = self.parser.add_argument_group("Bucket create options")
group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True,
help="The name of bucket to create")
group.add_argument("--bucket-type", dest="type", metavar="<type>", required=True,
choices=["couchbase", "ephemeral", "memcached"],
help="The bucket type (couchbase, ephemeral, or memcached)")
group.add_argument("--storage-backend", dest="storage", metavar="<storage>",
choices=["couchstore", "magma"],
help="Type of storage backend (only for couchbase buckets)")
group.add_argument("--bucket-ramsize", dest="memory_quota", metavar="<quota>", type=(int),
required=True, help="The amount of memory to allocate the bucket")
group.add_argument("--bucket-replica", dest="replica_count", metavar="<num>",
choices=["0", "1", "2", "3"],
help="The replica count for the bucket")
group.add_argument("--bucket-priority", dest="priority", metavar="<priority>",
choices=[BUCKET_PRIORITY_LOW_STR, BUCKET_PRIORITY_HIGH_STR],
help="The bucket disk io priority (low or high)")
group.add_argument("--durability-min-level", dest="durability_min_level", metavar="<level>",
choices=["none", "majority", "majorityAndPersistActive",
"persistToMajority"],
help="The bucket durability minimum level")
group.add_argument("--bucket-eviction-policy", dest="eviction_policy", metavar="<policy>",
choices=["valueOnly", "fullEviction", "noEviction", "nruEviction"],
help="The bucket eviction policy")
group.add_argument("--conflict-resolution", dest="conflict_resolution", default=None,
choices=["sequence", "timestamp"], metavar="<type>",
help="The XDCR conflict resolution type (timestamp or sequence)")
group.add_argument("--max-ttl", dest="max_ttl", default=None, type=(int), metavar="<seconds>",
help="Set the maximum TTL the bucket will accept. Couchbase server Enterprise Edition only.")
group.add_argument("--compression-mode", dest="compression_mode",
choices=["off", "passive", "active"], metavar="<mode>",
help="Set the compression mode of the bucket")
group.add_argument("--enable-flush", dest="enable_flush", metavar="<0|1>",
choices=["0", "1"], help="Enable bucket flush on this bucket (0 or 1)")
group.add_argument("--enable-index-replica", dest="replica_indexes", metavar="<0|1>",
choices=["0", "1"], help="Enable replica indexes (0 or 1)")
group.add_argument("--wait", dest="wait", action="store_true",
help="Wait for bucket creation to complete")
group.add_argument("--database-fragmentation-threshold-percentage", dest="db_frag_perc",
metavar="<perc>", type=(int), help="Set Database Fragmentation level percent")
group.add_argument("--database-fragmentation-threshold-size", dest="db_frag_size",
metavar="<mebibytes>", type=(int), help="Set Database Fragmentation level")
group.add_argument("--view-fragmentation-threshold-percentage", dest="view_frag_perc",
metavar="<perc>", type=(int), help="Set View Fragmentation level percent")
group.add_argument("--view-fragmentation-threshold-size", dest="view_frag_size",
metavar="<mebibytes>", type=(int), help="Set View Fragmentation level size")
group.add_argument("--from-hour", dest="from_hour",
metavar="<quota>", type=(int), help="Set start time hour")
group.add_argument("--from-minute", dest="from_min",
metavar="<quota>", type=(int), help="Set start time minutes")
group.add_argument("--to-hour", dest="to_hour",
metavar="<quota>", type=(int), help="Set end time hour")
group.add_argument("--to-minute", dest="to_min",
metavar="<quota>", type=(int), help="Set end time minutes")
group.add_argument("--abort-outside", dest="abort_outside",
metavar="<0|1>", choices=["0", "1"], help="Allow Time period")
group.add_argument("--parallel-db-view-compaction", dest="paralleldb_and_view_compact",
metavar="<0|1>", choices=["0", "1"], help="Set parallel DB and View Compaction")
group.add_argument("--purge-interval", dest="purge_interval", type=(float),
metavar="<float>", help="Sets the frequency of the tombstone purge interval")
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if opts.max_ttl and not self.enterprise:
_exit_if_errors(["Maximum TTL can only be configured on enterprise edition"])
if opts.compression_mode and not self.enterprise:
_exit_if_errors(["Compression mode can only be configured on enterprise edition"])
if opts.type == "memcached":
_deprecated("Memcached buckets are deprecated, please use ephemeral buckets instead")
if opts.replica_count is not None:
_exit_if_errors(["--bucket-replica cannot be specified for a memcached bucket"])
if opts.conflict_resolution is not None:
_exit_if_errors(["--conflict-resolution cannot be specified for a memcached bucket"])
if opts.replica_indexes is not None:
_exit_if_errors(["--enable-index-replica cannot be specified for a memcached bucket"])
if opts.priority is not None:
_exit_if_errors(["--bucket-priority cannot be specified for a memcached bucket"])
if opts.eviction_policy is not None:
_exit_if_errors(["--bucket-eviction-policy cannot be specified for a memcached bucket"])
if opts.max_ttl is not None:
_exit_if_errors(["--max-ttl cannot be specified for a memcached bucket"])
if opts.compression_mode is not None:
_exit_if_errors(["--compression-mode cannot be specified for a memcached bucket"])
if opts.durability_min_level is not None:
_exit_if_errors(["--durability-min-level cannot be specified for a memcached bucket"])
elif opts.type == "ephemeral" and opts.eviction_policy in ["valueOnly", "fullEviction"]:
_exit_if_errors(["--bucket-eviction-policy must either be noEviction or nruEviction"])
elif opts.type == "couchbase" and opts.eviction_policy in ["noEviction", "nruEviction"]:
_exit_if_errors(["--bucket-eviction-policy must either be valueOnly or fullEviction"])
if ((opts.type == "memcached" or opts.type == "ephemeral")
and (opts.db_frag_perc is not None
or opts.db_frag_size is not None or opts.view_frag_perc is not None
or opts.view_frag_size is not None or opts.from_hour is not None or opts.from_min is not None
or opts.to_hour is not None or opts.to_min is not None or opts.abort_outside is not None
or opts.paralleldb_and_view_compact is not None)):
_warning(f'ignoring compaction settings as bucket type {opts.type} does not accept it')
storage_type = "couchstore"
if opts.storage is not None:
if opts.type != "couchbase":
_exit_if_errors(["--storage-backend is only valid for couchbase buckets"])
if opts.storage == "magma":
storage_type = "magma"
priority = None
if opts.priority is not None:
if opts.priority == BUCKET_PRIORITY_HIGH_STR:
priority = BUCKET_PRIORITY_HIGH_INT
elif opts.priority == BUCKET_PRIORITY_LOW_STR:
priority = BUCKET_PRIORITY_LOW_INT
conflict_resolution_type = None
if opts.conflict_resolution is not None:
if opts.conflict_resolution == "sequence":
conflict_resolution_type = "seqno"
elif opts.conflict_resolution == "timestamp":
conflict_resolution_type = "lww"
_, errors = self.rest.create_bucket(opts.bucket_name, opts.type, storage_type, opts.memory_quota,
opts.durability_min_level, opts.eviction_policy, opts.replica_count,
opts.replica_indexes, priority, conflict_resolution_type, opts.enable_flush,
opts.max_ttl, opts.compression_mode, opts.wait, opts.db_frag_perc,
opts.db_frag_size, opts.view_frag_perc, opts.view_frag_size,
opts.from_hour, opts.from_min, opts.to_hour, opts.to_min,
opts.abort_outside, opts.paralleldb_and_view_compact, opts.purge_interval)
_exit_if_errors(errors)
_success("Bucket created")
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-create" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Add a new bucket to the cluster"
class BucketDelete(Subcommand):
"""The bucket delete subcommand"""
def __init__(self):
super(BucketDelete, self).__init__()
self.parser.prog = "couchbase-cli bucket-delete"
group = self.parser.add_argument_group("Bucket delete options")
group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True,
help="The name of bucket to delete")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
_, errors = self.rest.get_bucket(opts.bucket_name)
_exit_if_errors(errors)
_, errors = self.rest.delete_bucket(opts.bucket_name)
_exit_if_errors(errors)
_success("Bucket deleted")
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-delete" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Delete an existing bucket"
class BucketEdit(Subcommand):
"""The bucket edit subcommand"""
def __init__(self):
super(BucketEdit, self).__init__()
self.parser.prog = "couchbase-cli bucket-edit"
group = self.parser.add_argument_group("Bucket edit options")
group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True,
help="The name of bucket to create")
group.add_argument("--bucket-ramsize", dest="memory_quota", metavar="<quota>",
type=(int), help="The amount of memory to allocate the bucket")
group.add_argument("--bucket-replica", dest="replica_count", metavar="<num>",
choices=["0", "1", "2", "3"],
help="The replica count for the bucket")
group.add_argument("--bucket-priority", dest="priority", metavar="<priority>",
choices=["low", "high"], help="The bucket disk io priority (low or high)")
group.add_argument("--durability-min-level", dest="durability_min_level", metavar="<level>",
choices=["none", "majority", "majorityAndPersistActive", "persistToMajority"],
help="The bucket durability minimum level")
group.add_argument("--bucket-eviction-policy", dest="eviction_policy", metavar="<policy>",
type=(str), help="The bucket eviction policy (valueOnly or fullEviction)")
group.add_argument("--max-ttl", dest="max_ttl", default=None, type=(int), metavar="<seconds>",
help="Set the maximum TTL the bucket will accept")
group.add_argument("--compression-mode", dest="compression_mode",
choices=["off", "passive", "active"], metavar="<mode>",
help="Set the compression mode of the bucket")
group.add_argument("--enable-flush", dest="enable_flush", metavar="<0|1>",
choices=["0", "1"], help="Enable bucket flush on this bucket (0 or 1)")
group.add_argument("--remove-bucket-port", dest="remove_port", metavar="<0|1>",
choices=["0", "1"], help="Removes the bucket-port setting")
group.add_argument("--database-fragmentation-threshold-percentage", dest="db_frag_perc",
metavar="<perc>", type=(int), help="Set Database Fragmentation level percent")
group.add_argument("--database-fragmentation-threshold-size", dest="db_frag_size",
metavar="<mebibytes>", type=(int), help="Set Database Fragmentation level")
group.add_argument("--view-fragmentation-threshold-percentage", dest="view_frag_perc",
metavar="<perc>", type=(int), help="Set View Fragmentation level percent")
group.add_argument("--view-fragmentation-threshold-size", dest="view_frag_size",
metavar="<mebibytes>", type=(int), help="Set View Fragmentation level size")
group.add_argument("--from-hour", dest="from_hour",
metavar="<hour>", type=(int), help="Set start time hour")
group.add_argument("--from-minute", dest="from_min",
metavar="<min>", type=(int), help="Set start time minutes")
group.add_argument("--to-hour", dest="to_hour",
metavar="<hour>", type=(int), help="Set end time hour")
group.add_argument("--to-minute", dest="to_min",
metavar="<min>", type=(int), help="Set end time minutes")
group.add_argument("--abort-outside", dest="abort_outside",
metavar="<0|1>", choices=["0", "1"], help="Allow Time period")
group.add_argument("--parallel-db-view-compaction", dest="paralleldb_and_view_compact",
metavar="<0|1>", choices=["0", "1"], help="Set parallel DB and View Compaction")
group.add_argument("--purge-interval", dest="purge_interval", type=(float),
metavar="<num>", help="Set the bucket metadata purge interval")
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if opts.max_ttl and not self.enterprise:
_exit_if_errors(["Maximum TTL can only be configured on enterprise edition"])
if opts.compression_mode and not self.enterprise:
_exit_if_errors(["Compression mode can only be configured on enterprise edition"])
# Note that we accept 'noEviction' and 'nruEviction' as valid values even though they are undocumented; this is
# so that users attempting to modify the eviction policy of an ephemeral bucket will receive a meaningful
# message from 'ns_server'. See MB-39036 for more information.
if (opts.eviction_policy is not None
and opts.eviction_policy not in ["valueOnly", "fullEviction", "noEviction", "nruEviction"]):
_exit_if_errors([f"argument --bucket-eviction-policy: invalid choice: '{opts.eviction_policy}'"+
" (choose from 'valueOnly', 'fullEviction')"])
bucket, errors = self.rest.get_bucket(opts.bucket_name)
_exit_if_errors(errors)
if "bucketType" in bucket and bucket["bucketType"] == "memcached":
_deprecated("Memcached buckets are deprecated, please use ephemeral buckets instead")
if opts.memory_quota is not None:
_exit_if_errors(["--bucket-ramsize cannot be specified for a memcached bucket"])
if opts.replica_count is not None:
_exit_if_errors(["--bucket-replica cannot be specified for a memcached bucket"])
if opts.priority is not None:
_exit_if_errors(["--bucket-priority cannot be specified for a memcached bucket"])
if opts.eviction_policy is not None:
_exit_if_errors(["--bucket-eviction-policy cannot be specified for a memcached bucket"])
if opts.max_ttl is not None:
_exit_if_errors(["--max-ttl cannot be specified for a memcached bucket"])
if opts.compression_mode is not None:
_exit_if_errors(["--compression-mode cannot be specified for a memcached bucket"])
if opts.durability_min_level is not None:
_exit_if_errors(["--durability-min-level cannot be specified for a memcached bucket"])
if (("bucketType" in bucket and (bucket["bucketType"] == "memcached" or bucket["bucketType"] == "ephemeral"))
and (opts.db_frag_perc is not None or opts.db_frag_size is not None
or opts.view_frag_perc is not None or opts.view_frag_size is not None or opts.from_hour is not None
or opts.from_min is not None or opts.to_hour is not None or opts.to_min is not None
or opts.abort_outside is not None or opts.paralleldb_and_view_compact is not None)):
_exit_if_errors([f'compaction settings can not be specified for a {bucket['bucketType']} bucket'])
priority = None
if opts.priority is not None:
if opts.priority == BUCKET_PRIORITY_HIGH_STR:
priority = BUCKET_PRIORITY_HIGH_INT
elif opts.priority == BUCKET_PRIORITY_LOW_STR:
priority = BUCKET_PRIORITY_LOW_INT
if opts.remove_port:
if opts.remove_port == '1':
opts.remove_port = True
else:
opts.remove_port = False
_, errors = self.rest.edit_bucket(opts.bucket_name, opts.memory_quota, opts.durability_min_level,
opts.eviction_policy, opts.replica_count, priority, opts.enable_flush,
opts.max_ttl, opts.compression_mode, opts.remove_port, opts.db_frag_perc,
opts.db_frag_size, opts.view_frag_perc, opts.view_frag_size, opts.from_hour,
opts.from_min, opts.to_hour, opts.to_min, opts.abort_outside,
opts.paralleldb_and_view_compact, opts.purge_interval,
'bucketType' in bucket and bucket['bucketType'] == 'membase')
_exit_if_errors(errors)
_success("Bucket edited")
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-edit" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify settings for an existing bucket"
class BucketFlush(Subcommand):
"""The bucket edit subcommand"""
def __init__(self):
super(BucketFlush, self).__init__()
self.parser.prog = "couchbase-cli bucket-flush"
group = self.parser.add_argument_group("Bucket flush options")
group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True,
help="The name of bucket to delete")
group.add_argument("--force", dest="force", action="store_true",
help="Execute the command without asking to confirm")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
_, errors = self.rest.get_bucket(opts.bucket_name)
_exit_if_errors(errors)
if not opts.force:
question = "Running this command will totally PURGE database data from disk. " + \
"Do you really want to do it? (Yes/No)"
confirm = input(question)
if confirm not in ('y', 'Y', 'yes', 'Yes'):
return
_, errors = self.rest.flush_bucket(opts.bucket_name)
_exit_if_errors(errors)
_success("Bucket flushed")
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-flush" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Flush all data from disk for a given bucket"
class BucketList(Subcommand):
"""The bucket list subcommand"""
def __init__(self):
super(BucketList, self).__init__()
self.parser.prog = "couchbase-cli bucket-list"
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
result, errors = self.rest.list_buckets(extended=True)
_exit_if_errors(errors)
if opts.output == 'json':
print(json.dumps(result))
else:
for bucket in result:
print(f'{bucket['name']}')
print(f' bucketType: {bucket['bucketType']}')
print(f' numReplicas: {bucket['replicaNumber']}')
print(f' ramQuota: {bucket['quota']['ram']}')
print(f' ramUsed: {bucket['basicStats']['memUsed']}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-list" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "List all buckets in a cluster"
class CollectLogsStart(Subcommand):
"""The collect-logs-start subcommand"""
def __init__(self):
super(CollectLogsStart, self).__init__()
self.parser.prog = "couchbase-cli collect-logs-start"
group = self.parser.add_argument_group("Collect logs start options")
group.add_argument("--all-nodes", dest="all_nodes", action="store_true",
default=False, help="Collect logs for all nodes")
group.add_argument("--nodes", dest="nodes", metavar="<node_list>",
help="A comma separated list of nodes to collect logs from")
group.add_argument("--redaction-level", dest="redaction_level", metavar="<none|partial>",
choices=["none", "partial"], help="Level of log redaction to apply")
group.add_argument("--salt", dest="salt", metavar="<string>",
help="The salt to use to redact the log")
group.add_argument("--output-directory", dest="output_dir", metavar="<directory>",
help="Output directory to place the generated logs file")
group.add_argument("--temporary-directory", dest="tmp_dir", metavar="<directory>",
help="Temporary directory to use when generating the logs")
group.add_argument("--upload", dest="upload", action="store_true",
default=False, help="Logs should be uploaded for Couchbase support")
group.add_argument("--upload-host", dest="upload_host", metavar="<host>",
help="The host to upload logs to")
group.add_argument("--upload-proxy", dest="upload_proxy", metavar="<proxy>",
help="The proxy to used to upload the logs via")
group.add_argument("--customer", dest="upload_customer", metavar="<name>",
help="The name of the customer uploading logs")
group.add_argument("--ticket", dest="upload_ticket", metavar="<num>",
help="The ticket number the logs correspond to")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if not opts.nodes and not opts.all_nodes:
_exit_if_errors(["Must specify either --all-nodes or --nodes"])
if opts.nodes and opts.all_nodes:
_exit_if_errors(["Cannot specify both --all-nodes and --nodes"])
if opts.salt and opts.redaction_level != "partial":
_exit_if_errors(["--redaction-level has to be set to 'partial' when --salt is specified"])
servers = opts.nodes
if opts.all_nodes:
servers = "*"
if opts.upload:
if not opts.upload_host:
_exit_if_errors(["--upload-host is required when --upload is specified"])
if not opts.upload_customer:
_exit_if_errors(["--upload-customer is required when --upload is specified"])
else:
if opts.upload_host:
_warning("--upload-host has no effect with specifying --upload")
if opts.upload_customer:
_warning("--upload-customer has no effect with specifying --upload")
if opts.upload_ticket:
_warning("--upload_ticket has no effect with specifying --upload")
if opts.upload_proxy:
_warning("--upload_proxy has no effect with specifying --upload")
_, errors = self.rest.collect_logs_start(servers, opts.redaction_level, opts.salt, opts.output_dir,
opts.tmp_dir, opts.upload, opts.upload_host, opts.upload_proxy,
opts.upload_customer, opts.upload_ticket)
_exit_if_errors(errors)
_success("Log collection started")
@staticmethod
def get_man_page_name():
return "couchbase-cli-collect-logs-start" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Start cluster log collection"
class CollectLogsStatus(Subcommand):
"""The collect-logs-status subcommand"""
def __init__(self):
super(CollectLogsStatus, self).__init__()
self.parser.prog = "couchbase-cli collect-logs-status"
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
tasks, errors = self.rest.get_tasks()
_exit_if_errors(errors)
found = False
for task in tasks:
if isinstance(task, dict) and 'type' in task and task['type'] == 'clusterLogsCollection':
found = True
self._print_task(task)
if not found:
print("No log collection tasks were found")
def _print_task(self, task):
print(f'Status: {task['status']}')
if 'perNode' in task:
print("Details:")
for node, node_status in task["perNode"].items():
print('\tNode:', node)
print('\tStatus:', node_status['status'])
for field in ["path", "statusCode", "url", "uploadStatusCode", "uploadOutput"]:
if field in node_status:
print('\t', field, ":", node_status[field])
print()
@staticmethod
def get_man_page_name():
return "couchbase-cli-collect-logs-status" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "View the status of cluster log collection"
class CollectLogsStop(Subcommand):
"""The collect-logs-stop subcommand"""
def __init__(self):
super(CollectLogsStop, self).__init__()
self.parser.prog = "couchbase-cli collect-logs-stop"
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
_, errors = self.rest.collect_logs_stop()
_exit_if_errors(errors)
_success("Log collection stopped")
@staticmethod
def get_man_page_name():
return "couchbase-cli-collect-logs-stop" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Stop cluster log collection"
class Failover(Subcommand):
"""The failover subcommand"""
def __init__(self):
super(Failover, self).__init__()
self.parser.prog = "couchbase-cli failover"
group = self.parser.add_argument_group("Failover options")
group.add_argument("--server-failover", dest="servers_to_failover", metavar="<server_list>",
required=True, help="A list of servers to fail over")
group.add_argument("--hard", dest="hard", action="store_true",
help="Hard failover the server")
group.add_argument("--force", dest="force", action="store_true",
help="Force a hard failover")
group.add_argument("--no-progress-bar", dest="no_bar", action="store_true",
default=False, help="Disables the progress bar")
group.add_argument("--no-wait", dest="wait", action="store_false",
default=True, help="Don't wait for rebalance completion")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.force and not opts.hard:
_exit_if_errors(["--hard is required with --force flag"])
opts.servers_to_failover = apply_default_port(opts.servers_to_failover)
_, errors = self.rest.failover(opts.servers_to_failover, opts.hard, opts.force)
_exit_if_errors(errors)
if not opts.hard:
time.sleep(1)
if opts.wait:
bar = TopologyProgressBar(self.rest, 'Gracefully failing over', opts.no_bar)
errors = bar.show()
_exit_if_errors(errors)
_success("Server failed over")
else:
_success("Server failed over started")
else:
_success("Server failed over")
@staticmethod
def get_man_page_name():
return "couchbase-cli-failover" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Failover one or more servers"
class GroupManage(Subcommand):
"""The group manage subcommand"""
def __init__(self):
super(GroupManage, self).__init__()
self.parser.prog = "couchbase-cli group-manage"
group = self.parser.add_argument_group("Group manage options")
group.add_argument("--create", dest="create", action="store_true",
default=None, help="Create a new server group")
group.add_argument("--delete", dest="delete", action="store_true",
default=None, help="Delete a server group")
group.add_argument("--list", dest="list", action="store_true",
default=None, help="List all server groups")
group.add_argument("--rename", dest="rename", help="Rename a server group. It takes the new name of the group.")
group.add_argument("--group-name", dest="name", metavar="<name>",
help="The name of the server group")
group.add_argument("--move-servers", dest="move_servers", metavar="<server_list>",
help="A list of servers to move between groups")
group.add_argument("--from-group", dest="from_group", metavar="<group>",
help="The group to move servers from")
group.add_argument("--to-group", dest="to_group", metavar="<group>",
help="The group to move servers to")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
cmds = [opts.create, opts.delete, opts.list, opts.rename, opts.move_servers]
if sum(cmd is not None for cmd in cmds) == 0:
_exit_if_errors(["Must specify one of the following: --create, "
+ "--delete, --list, --move-servers, or --rename"])
elif sum(cmd is not None for cmd in cmds) != 1:
_exit_if_errors(["Only one of the following may be specified: --create"
+ ", --delete, --list, --move-servers, or --rename"])
if opts.create:
self._create(opts)
elif opts.delete:
self._delete(opts)
elif opts.list:
self._list(opts)
elif opts.rename:
self._rename(opts)
elif opts.move_servers is not None:
self._move(opts)
def _create(self, opts):
if opts.name is None:
_exit_if_errors(["--group-name is required with --create flag"])
_, errors = self.rest.create_server_group(opts.name)
_exit_if_errors(errors)
_success("Server group created")
def _delete(self, opts):
if opts.name is None:
_exit_if_errors(["--group-name is required with --delete flag"])
_, errors = self.rest.delete_server_group(opts.name)
_exit_if_errors(errors)
_success("Server group deleted")
def _list(self, opts):
groups, errors = self.rest.get_server_groups()
_exit_if_errors(errors)
found = False
for group in groups["groups"]:
if opts.name is None or opts.name == group['name']:
found = True
print(group['name'])
for node in group['nodes']:
print(f' server: {node['hostname']}')
if not found and opts.name:
_exit_if_errors([f'Invalid group name: {opts.name}'])
def _move(self, opts):
if opts.from_group is None:
_exit_if_errors(["--from-group is required with --move-servers"])
if opts.to_group is None:
_exit_if_errors(["--to-group is required with --move-servers"])
servers = apply_default_port(opts.move_servers)
_, errors = self.rest.move_servers_between_groups(servers, opts.from_group, opts.to_group)
_exit_if_errors(errors)
_success("Servers moved between groups")
def _rename(self, opts):
if opts.name is None:
_exit_if_errors(["--group-name is required with --rename option"])
_, errors = self.rest.rename_server_group(opts.name, opts.rename)
_exit_if_errors(errors)
_success("Server group renamed")
@staticmethod
def get_man_page_name():
return "couchbase-cli-group-manage" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage server groups"
class HostList(Subcommand):
"""The host list subcommand"""
def __init__(self):
super(HostList, self).__init__()
self.parser.prog = "couchbase-cli host-list"
@rest_initialiser(version_check=True)
def execute(self, opts):
result, errors = self.rest.pools('default')
_exit_if_errors(errors)
if opts.output == 'json':
nodes_out = {'nodes': []}
for node in result['nodes']:
nodes_out['nodes'].append(node['configuredHostname'])
print(json.dumps(nodes_out))
else:
for node in result['nodes']:
print(node['configuredHostname'])
@staticmethod
def get_man_page_name():
return "couchbase-cli-host-list" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "List all hosts in a cluster"
class ResetCipherSuites(LocalSubcommand):
"""The reset cipher suites subcommand """
def __init__(self):
super(ResetCipherSuites, self).__init__()
self.parser.prog = "couchbase-cli reset-cipher-suites"
group = self.parser.add_argument_group("Reset Cipher Suites")
group.add_argument("--force", action='store_true', default=False, help="Force resetting of the cipher suites")
group.add_argument("-P", "--port", metavar="<port>", default="8091",
help="The REST API port, defaults to 8091")
def execute(self, opts):
token = _exit_on_file_read_failure(os.path.join(opts.config_path, "localtoken")).rstrip()
rest = ClusterManager("http://127.0.0.1:" + opts.port, "@localtoken", token)
check_cluster_initialized(rest)
check_versions(rest)
if not opts.force:
confirm = str(input("Are you sure that the cipher should be reset?: Y/[N]"))
if confirm != "Y":
_success("Cipher suites have not been reset to default")
_, errors = rest.reset_cipher_suites()
_exit_if_errors(errors)
_success("Cipher suites have been reset to the default")
@staticmethod
def get_man_page_name():
return "couchbase-cli-reset-cipher-suites" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Rests cipher suites to the default"
class MasterPassword(LocalSubcommand):
"""The master password subcommand"""
def __init__(self):
super(MasterPassword, self).__init__()
self.parser.prog = "couchbase-cli master-password"
group = self.parser.add_argument_group("Master password options")
group.add_argument("--send-password", dest="send_password", metavar="<password>",
required=False, action=CBNonEchoedAction, envvar=None,
prompt_text="Enter master password:",
help="Sends the master password to start the server")
def execute(self, opts):
if opts.send_password is not None:
path = [CB_BIN_PATH, os.environ['PATH']]
if os.name == 'posix':
os.environ['PATH'] = ':'.join(path)
else:
os.environ['PATH'] = ';'.join(path)
cookiefile = os.path.join(opts.config_path, "couchbase-server.babysitter.cookie")
if not os.path.isfile(cookiefile):
_exit_if_errors(["The node is down"])
cookie = _exit_on_file_read_failure(cookiefile, "Insufficient privileges to send master password - Please"
" execute this command as a operating system user who has"
" file system read permission on the Couchbase Server "
" configuration").rstrip()
nodefile = os.path.join(opts.config_path, "couchbase-server.babysitter.node")
node = _exit_on_file_read_failure(nodefile).rstrip()
self.prompt_for_master_pwd(node, cookie, opts.send_password, opts.config_path)
else:
_exit_if_errors(["No parameters set"])
def prompt_for_master_pwd(self, node, cookie, password, cb_cfg_path):
ns_server_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_server", "ebin")
babystr_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_babysitter", "ebin")
inetrc_file = os.path.join(CB_ETC_PATH, "hosts.cfg")
dist_cfg_file = os.path.join(cb_cfg_path, "config", "dist_cfg")
if password == '':
password = getpass.getpass("\nEnter master password:")
name = 'executioner@cb.local'
args = ['-pa', ns_server_ebin_path, babystr_ebin_path, '-noinput', '-name', name,
'-proto_dist', 'cb', '-epmd_module', 'cb_epmd',
'-kernel', 'inetrc', f'"{inetrc_file}"', 'dist_config_file', f'"{dist_cfg_file}"',
'-setcookie', cookie,
'-run', 'encryption_service', 'remote_set_password', node, password]
rc, out, err = self.run_process("erl", args)
if rc == 0:
print("SUCCESS: Password accepted. Node started booting.")
elif rc == 101:
print("Incorrect password.")
self.prompt_for_master_pwd(node, cookie, '', cb_cfg_path)
elif rc == 102:
_exit_if_errors(["Password was already supplied"])
elif rc == 103:
_exit_if_errors(["The node is down"])
elif rc == 104:
_exit_if_errors(["Incorrect password. Node shuts down."])
else:
_exit_if_errors([f'Unknown error: {rc} {out}, {err}'])
def run_process(self, name, args):
try:
if os.name == "nt":
name = name + ".exe"
args.insert(0, name)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.stdout.read()
error = p.stderr.read()
p.wait()
rc = p.returncode
return rc, output, error
except OSError:
_exit_if_errors([f'Could not locate the {name} executable'])
@staticmethod
def get_man_page_name():
return "couchbase-cli-master-password" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Unlocking the master password"
class NodeInit(Subcommand):
"""The node initialization subcommand"""
def __init__(self):
super(NodeInit, self).__init__()
self.parser.prog = "couchbase-cli node-init"
group = self.parser.add_argument_group("Node initialization options")
group.add_argument("--node-init-data-path", dest="data_path", metavar="<path>",
help="The path to store database files")
group.add_argument("--node-init-index-path", dest="index_path", metavar="<path>",
help="The path to store index files")
group.add_argument("--node-init-analytics-path", dest="analytics_path", metavar="<path>", action="append",
help="The path to store analytics files (supply one parameter for each path desired)")
group.add_argument("--node-init-eventing-path", dest="eventing_path", metavar="<path>",
help="The path to store eventing files")
group.add_argument("--node-init-java-home", dest="java_home", metavar="<path>",
help="The path of the Java Runtime Environment (JRE) to use on this server")
group.add_argument("--node-init-hostname", dest="hostname", metavar="<hostname>",
help="Sets the hostname for this server")
group.add_argument("--ipv6", dest="ipv6", action="store_true", default=False,
help="Configure the node to communicate via ipv6")
group.add_argument("--ipv4", dest="ipv4", action="store_true", default=False,
help="Configure the node to communicate via ipv4")
@rest_initialiser()
def execute(self, opts):
# Cluster does not need to be initialized for this command
if (opts.data_path is None and opts.index_path is None and opts.analytics_path is None
and opts.eventing_path is None and opts.java_home is None and opts.hostname is None
and opts.ipv6 is None and opts.ipv4 is None):
_exit_if_errors(["No node initialization parameters specified"])
if opts.ipv4 and opts.ipv6:
_exit_if_errors(["Use either --ipv4 or --ipv6"])
if opts.ipv4:
afamily = 'ipv4'
elif opts.ipv6:
afamily = 'ipv6'
else:
afamily = None
_, errors = self.rest.node_init(hostname=opts.hostname,
afamily=afamily,
data_path=opts.data_path,
index_path=opts.index_path,
cbas_path=opts.analytics_path,
eventing_path=opts.eventing_path,
java_home=opts.java_home)
_exit_if_errors(errors)
_success("Node initialized")
@staticmethod
def get_man_page_name():
return "couchbase-cli-node-init" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Set node specific settings"
class Rebalance(Subcommand):
"""The rebalance subcommand"""
def __init__(self):
super(Rebalance, self).__init__()
self.parser.prog = "couchbase-cli rebalance"
group = self.parser.add_argument_group("Rebalance options")
group.add_argument("--server-remove", dest="server_remove", metavar="<server_list>",
help="A list of servers to remove from the cluster")
group.add_argument("--no-progress-bar", dest="no_bar", action="store_true",
default=False, help="Disables the progress bar")
group.add_argument("--no-wait", dest="wait", action="store_false",
default=True, help="Don't wait for rebalance completion")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
eject_nodes = []
if opts.server_remove:
eject_nodes = apply_default_port(opts.server_remove)
_, errors = self.rest.rebalance(eject_nodes)
_exit_if_errors(errors)
time.sleep(1)
if opts.wait:
bar = TopologyProgressBar(self.rest, 'Rebalancing', opts.no_bar)
errors = bar.show()
_exit_if_errors(errors)
_success("Rebalance complete")
else:
_success("Rebalance started")
@staticmethod
def get_man_page_name():
return "couchbase-cli-rebalance" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Start a cluster rebalancing"
class RebalanceStatus(Subcommand):
"""The rebalance status subcommand"""
def __init__(self):
super(RebalanceStatus, self).__init__()
self.parser.prog = "couchbase-cli rebalance-status"
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
status, errors = self.rest.rebalance_status()
_exit_if_errors(errors)
print(json.dumps(status, indent=2))
@staticmethod
def get_man_page_name():
return "couchbase-cli-rebalance-status" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Show rebalance status"
class RebalanceStop(Subcommand):
"""The rebalance stop subcommand"""
def __init__(self):
super(RebalanceStop, self).__init__()
self.parser.prog = "couchbase-cli rebalance-stop"
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
_, errors = self.rest.stop_rebalance()
_exit_if_errors(errors)
_success("Rebalance stopped")
@staticmethod
def get_man_page_name():
return "couchbase-cli-rebalance-stop" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Stop a rebalance"
class Recovery(Subcommand):
"""The recovery command"""
def __init__(self):
super(Recovery, self).__init__()
self.parser.prog = "couchbase-cli recovery"
group = self.parser.add_argument_group("Recovery options")
group.add_argument("--server-recovery", dest="servers", metavar="<server_list>",
required=True, help="The list of servers to recover")
group.add_argument("--recovery-type", dest="recovery_type", metavar="type",
choices=["delta", "full"], default="delta",
help="The recovery type (delta or full)")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
servers = apply_default_port(opts.servers)
for server in servers:
_, errors = self.rest.recovery(server, opts.recovery_type)
_exit_if_errors(errors)
_success("Servers recovered")
@staticmethod
def get_man_page_name():
return "couchbase-cli-recovery" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Recover one or more servers"
class ResetAdminPassword(LocalSubcommand):
"""The reset admin password command"""
def __init__(self):
super(ResetAdminPassword, self).__init__()
self.parser.prog = "couchbase-cli reset-admin-password"
group = self.parser.add_argument_group("Reset password options")
group.add_argument("--new-password", dest="new_password", metavar="<password>",
required=False, action=CBNonEchoedAction, envvar=None,
prompt_text="Enter new administrator password:",
confirm_text="Confirm new administrator password:",
help="The new administrator password")
group.add_argument("--regenerate", dest="regenerate", action="store_true",
help="Generates a random administrator password")
group.add_argument("-P", "--port", metavar="<port>", default="8091",
help="The REST API port, defaults to 8091")
def execute(self, opts):
token = _exit_on_file_read_failure(os.path.join(opts.config_path, "localtoken")).rstrip()
rest = ClusterManager("http://127.0.0.1:" + opts.port, "@localtoken", token)
check_cluster_initialized(rest)
check_versions(rest)
if opts.new_password is not None and opts.regenerate:
_exit_if_errors(["Cannot specify both --new-password and --regenerate at the same time"])
elif opts.new_password is not None:
_, errors = rest.set_admin_password(opts.new_password)
_exit_if_errors(errors)
_success("Administrator password changed")
elif opts.regenerate:
result, errors = rest.regenerate_admin_password()
_exit_if_errors(errors)
print(result["password"])
else:
_exit_if_errors(["No parameters specified"])
@staticmethod
def get_man_page_name():
return "couchbase-cli-reset-admin-password" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Resets the administrator password"
class ServerAdd(Subcommand):
"""The server add command"""
def __init__(self):
super(ServerAdd, self).__init__()
self.parser.prog = "couchbase-cli server-add"
group = self.parser.add_argument_group("Server add options")
group.add_argument("--server-add", dest="servers", metavar="<server_list>", required=True,
help="The list of servers to add")
group.add_argument("--server-add-username", dest="server_username", metavar="<username>",
required=True, help="The username for the server to add")
group.add_argument("--server-add-password", dest="server_password", metavar="<password>",
required=True, help="The password for the server to add")
group.add_argument("--group-name", dest="group_name", metavar="<name>",
help="The server group to add this server into")
group.add_argument("--services", dest="services", default="data", metavar="<services>",
help="The services this server will run")
group.add_argument("--index-storage-setting", dest="index_storage_mode", metavar="<mode>",
choices=["default", "memopt"], help="The index storage mode")
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if not self.enterprise and opts.index_storage_mode == 'memopt':
_exit_if_errors(["memopt option for --index-storage-setting can only be configured on enterprise edition"])
opts.services, errors = process_services(opts.services, self.enterprise)
_exit_if_errors(errors)
settings, errors = self.rest.index_settings()
_exit_if_errors(errors)
if opts.index_storage_mode is None and settings['storageMode'] == "" and "index" in opts.services:
opts.index_storage_mode = "default"
# For supporting the default index backend changing from forestdb to plasma in Couchbase 5.0
default = "plasma"
if opts.index_storage_mode == "default" and settings['storageMode'] == "forestdb" or not self.enterprise:
default = "forestdb"
if opts.index_storage_mode:
param = index_storage_mode_to_param(opts.index_storage_mode, default)
_, errors = self.rest.set_index_settings(param, None, None, None, None, None, None, None)
_exit_if_errors(errors)
servers = opts.servers.split(',')
for server in servers:
_, errors = self.rest.add_server(server, opts.group_name, opts.server_username, opts.server_password,
opts.services)
_exit_if_errors(errors)
_success("Server added")
@staticmethod
def get_man_page_name():
return "couchbase-cli-server-add" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Add servers to the cluster"
class ServerEshell(Subcommand):
"""The server eshell subcommand"""
def __init__(self):
super(ServerEshell, self).__init__()
self.parser.prog = "couchbase-cli server-eshell"
group = self.parser.add_argument_group("Server eshell options")
group.add_argument("--vm", dest="vm", default="ns_server", metavar="<name>",
help="The vm to connect to")
group.add_argument("--erl-path", dest="erl_path", metavar="<path>", default=CB_BIN_PATH,
help="Override the path to the erl executable")
@rest_initialiser(version_check=True)
def execute(self, opts):
# Cluster does not need to be initialized for this command
result, errors = self.rest.node_info()
_exit_if_errors(errors)
node = result['otpNode']
cookie = result['otpCookie']
if opts.vm != 'ns_server':
cookie, errors = self.rest.get_babysitter_cookie()
_exit_if_errors(errors)
[short, _] = node.split('@')
if opts.vm == 'babysitter':
node = f'babysitter_of_{short}@cb.local'
elif opts.vm == 'couchdb':
node = f'couchdb_{short}@cb.local'
else:
_exit_if_errors([f'Unknown vm type `{opts.vm}`'])
rand_chars = ''.join(random.choice(string.ascii_letters) for _ in range(20))
name = f'ctl-{rand_chars}@127.0.0.1'
cb_erl = os.path.join(opts.erl_path, 'erl')
if os.path.isfile(cb_erl):
path = cb_erl
else:
_warning("Cannot locate Couchbase erlang. Attempting to use non-Couchbase erlang")
path = 'erl'
inetrc_file = os.path.join(CB_ETC_PATH, 'hosts.cfg')
if os.path.isfile(inetrc_file):
inetrc_opt = ['-kernel', 'inetrc', f'"{inetrc_file}"']
else:
inetrc_opt = []
ns_server_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_server", "ebin")
with tempfile.NamedTemporaryFile() as temp:
temp.write(f'[{{preferred_local_proto,{result['addressFamily']}_tcp_dist}}].'.encode())
temp.flush()
temp_name = temp.name
args = [path, '-name', name, '-setcookie', cookie, '-hidden', '-remsh', node, '-proto_dist', 'cb',
'-epmd_module', 'cb_epmd', '-pa', ns_server_ebin_path, '-kernel', 'dist_config_file',
f'"{temp_name}"'] + inetrc_opt
if opts.debug:
print(f'Running {' '.join(args)}')
try:
subprocess.call(args)
except OSError:
_exit_if_errors(["Unable to find the erl executable"])
@staticmethod
def get_man_page_name():
return "couchbase-cli-server-eshell" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Opens a shell to the Couchbase cluster manager"
@staticmethod
def is_hidden():
# Internal command not recommended for production use
return True
class ServerInfo(Subcommand):
"""The server info subcommand"""
def __init__(self):
super(ServerInfo, self).__init__()
self.parser.prog = "couchbase-cli server-info"
@rest_initialiser(version_check=True)
def execute(self, opts):
# Cluster does not need to be initialized for this command
result, errors = self.rest.node_info()
_exit_if_errors(errors)
print(json.dumps(result, sort_keys=True, indent=2))
@staticmethod
def get_man_page_name():
return "couchbase-cli-server-info" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Show details of a node in the cluster"
class ServerList(Subcommand):
"""The server list subcommand"""
def __init__(self):
super(ServerList, self).__init__()
self.parser.prog = "couchbase-cli server-list"
@rest_initialiser(version_check=True)
def execute(self, opts):
result, errors = self.rest.pools('default')
_exit_if_errors(errors)
for node in result['nodes']:
if node.get('otpNode') is None:
raise Exception("could not access node")
print(node['otpNode'], node['hostname'], node['status'], node['clusterMembership'])
@staticmethod
def get_man_page_name():
return "couchbase-cli-server-list" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "List all nodes in a cluster"
class ServerReadd(Subcommand):
"""The server readd subcommand (Deprecated)"""
def __init__(self):
super(ServerReadd, self).__init__()
self.parser.prog = "couchbase-cli server-readd"
group = self.parser.add_argument_group("Server re-add options")
group.add_argument("--server-add", dest="servers", metavar="<server_list>", required=True,
help="The list of servers to recover")
# The parameters are unused, but kept for backwards compatibility
group.add_argument("--server-username", dest="server_username", metavar="<username>",
help="The admin username for the server")
group.add_argument("--server-password", dest="server_password", metavar="<password>",
help="The admin password for the server")
group.add_argument("--group-name", dest="name", metavar="<name>",
help="The name of the server group")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
_deprecated("Please use the recovery command instead")
servers = apply_default_port(opts.servers)
for server in servers:
_, errors = self.rest.readd_server(server)
_exit_if_errors(errors)
_success("Servers recovered")
@staticmethod
def get_man_page_name():
return "couchbase-cli-server-readd" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Add failed server back to the cluster"
@staticmethod
def is_hidden():
# Deprecated command in 4.6, hidden in 5.0, pending removal
return True
class SettingAlert(Subcommand):
"""The setting alert subcommand"""
def __init__(self):
super(SettingAlert, self).__init__()
self.parser.prog = "couchbase-cli setting-alert"
group = self.parser.add_argument_group("Alert settings")
group.add_argument("--enable-email-alert", dest="enabled", metavar="<1|0>", required=True,
choices=["0", "1"], help="Enable/disable email alerts")
group.add_argument("--email-recipients", dest="email_recipients", metavar="<email_list>",
help="A comma separated list of email addresses")
group.add_argument("--email-sender", dest="email_sender", metavar="<email_addr>",
help="The sender email address")
group.add_argument("--email-user", dest="email_username", metavar="<username>",
default="", help="The email server username")
group.add_argument("--email-password", dest="email_password", metavar="<password>",
default="", help="The email server password")
group.add_argument("--email-host", dest="email_host", metavar="<host>",
help="The email server host")
group.add_argument("--email-port", dest="email_port", metavar="<port>",
help="The email server port")
group.add_argument("--enable-email-encrypt", dest="email_encrypt", metavar="<1|0>",
choices=["0", "1"], help="Enable SSL encryption for emails")
group.add_argument("--alert-auto-failover-node", dest="alert_af_node",
action="store_true", help="Alert when a node is auto-failed over")
group.add_argument("--alert-auto-failover-max-reached", dest="alert_af_max_reached",
action="store_true",
help="Alert when the max number of auto-failover nodes was reached")
group.add_argument("--alert-auto-failover-node-down", dest="alert_af_node_down",
action="store_true",
help="Alert when a node wasn't auto-failed over because other nodes "
+ "were down")
group.add_argument("--alert-auto-failover-cluster-small", dest="alert_af_small",
action="store_true",
help="Alert when a node wasn't auto-failed over because cluster was"
+ " too small")
group.add_argument("--alert-auto-failover-disable", dest="alert_af_disable",
action="store_true",
help="Alert when a node wasn't auto-failed over because auto-failover"
+ " is disabled")
group.add_argument("--alert-ip-changed", dest="alert_ip_changed", action="store_true",
help="Alert when a nodes IP address changed")
group.add_argument("--alert-disk-space", dest="alert_disk_space", action="store_true",
help="Alert when disk usage on a node reaches 90%%")
group.add_argument("--alert-meta-overhead", dest="alert_meta_overhead", action="store_true",
help="Alert when metadata overhead is more than 50%%")
group.add_argument("--alert-meta-oom", dest="alert_meta_oom", action="store_true",
help="Alert when all bucket memory is used for metadata")
group.add_argument("--alert-write-failed", dest="alert_write_failed", action="store_true",
help="Alert when writing data to disk has failed")
group.add_argument("--alert-audit-msg-dropped", dest="alert_audit_dropped",
action="store_true", help="Alert when writing event to audit log failed")
group.add_argument("--alert-indexer-max-ram", dest="alert_indexer_max_ram",
action="store_true", help="Alert when indexer is using all of its allocated memory")
group.add_argument("--alert-timestamp-drift-exceeded", dest="alert_cas_drift",
action="store_true", help="Alert when clocks on two servers are more than five seconds"
+ "apart")
group.add_argument("--alert-communication-issue", dest="alert_communication_issue",
action="store_true", help="Alert when nodes are experiencing communication issues")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.enabled == "1":
if opts.email_recipients is None:
_exit_if_errors(["--email-recipient must be set when email alerts are enabled"])
if opts.email_sender is None:
_exit_if_errors(["--email-sender must be set when email alerts are enabled"])
if opts.email_host is None:
_exit_if_errors(["--email-host must be set when email alerts are enabled"])
if opts.email_port is None:
_exit_if_errors(["--email-port must be set when email alerts are enabled"])
alerts = list()
if opts.alert_af_node:
alerts.append('auto_failover_node')
if opts.alert_af_max_reached:
alerts.append('auto_failover_maximum_reached')
if opts.alert_af_node_down:
alerts.append('auto_failover_other_nodes_down')
if opts.alert_af_small:
alerts.append('auto_failover_cluster_too_small')
if opts.alert_af_disable:
alerts.append('auto_failover_disabled')
if opts.alert_ip_changed:
alerts.append('ip')
if opts.alert_disk_space:
alerts.append('disk')
if opts.alert_meta_overhead:
alerts.append('overhead')
if opts.alert_meta_oom:
alerts.append('ep_oom_errors')
if opts.alert_write_failed:
alerts.append('ep_item_commit_failed')
if opts.alert_audit_dropped:
alerts.append('audit_dropped_events')
if opts.alert_indexer_max_ram:
alerts.append('indexer_ram_max_usage')
if opts.alert_cas_drift:
alerts.append('ep_clock_cas_drift_threshold_exceeded')
if opts.alert_communication_issue:
alerts.append('communication_issue')
enabled = "true"
if opts.enabled == "0":
enabled = "false"
email_encrypt = "false"
if opts.email_encrypt == "1":
email_encrypt = "true"
_, errors = self.rest.set_alert_settings(enabled, opts.email_recipients, opts.email_sender, opts.email_username,
opts.email_password, opts.email_host, opts.email_port, email_encrypt,
",".join(alerts))
_exit_if_errors(errors)
_success("Email alert settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-alert" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify email alert settings"
class SettingAudit(Subcommand):
"""The settings audit subcommand"""
def __init__(self):
super(SettingAudit, self).__init__()
self.parser.prog = "couchbase-cli setting-audit"
self.parser.description = "Available only in Couchbase Server Enterprise Edition"
group = self.parser.add_argument_group("Audit settings")
group.add_argument("--list-filterable-events", dest="list_events", action="store_true",
help="Retrieve a list of filterable event IDs and the descriptions")
group.add_argument("--get-settings", dest="get_settings", action="store_true",
help="Retrieve current audit settings")
group.add_argument("--set", dest="set_settings", action="store_true",
help="Set current audit settings")
group.add_argument("--audit-enabled", dest="enabled", metavar="<1|0>", choices=["0", "1"],
help="Enable/disable auditing")
group.add_argument("--audit-log-path", dest="log_path", metavar="<path>",
help="The audit log path")
group.add_argument("--audit-log-rotate-interval", dest="rotate_interval", type=(int),
metavar="<seconds>", help="The audit log rotate interval")
group.add_argument("--audit-log-rotate-size", dest="rotate_size", type=(int),
metavar="<bytes>", help="The audit log rotate size")
group.add_argument("--disabled-users", dest="disabled_users", default=None,
help="A comma-separated list of users to ignore events from")
group.add_argument("--disable-events", dest="disable_events", default=None,
help="A comma-separated list of audit-event IDs to not audit")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
flags = sum([opts.list_events, opts.get_settings, opts.set_settings])
if flags != 1:
_exit_if_errors(["One of the following is required: --list-filterable-events, --get-settings or --set"])
if opts.list_events:
descriptors, errors = self.rest.get_id_descriptors()
_exit_if_errors(errors)
if opts.output == 'json':
print(json.dumps(descriptors, indent=4))
return
self.format_descriptors_in_table(descriptors)
elif opts.get_settings:
audit_settings, errors = self.rest.get_audit_settings()
_exit_if_errors(errors)
if opts.output == 'json':
print(json.dumps(audit_settings, indent=4))
return
descriptors, errors = self.rest.get_id_descriptors()
_exit_if_errors(errors)
self.format_audit_settings(audit_settings, descriptors)
elif opts.set_settings:
if not (opts.enabled or opts.log_path or opts.rotate_interval or opts.rotate_size
or opts.disable_events is not None or opts.disabled_users is not None):
_exit_if_errors(["At least one of [--audit-enabled, --audit-log-path, --audit-log-rotate-interval,"
" --audit-log-rotate-size, --disabled-users, --disable-events] is required with"
" --set"])
if opts.enabled == "1":
opts.enabled = "true"
elif opts.enabled == "0":
opts.enabled = "false"
_, errors = self.rest.set_audit_settings(opts.enabled, opts.log_path, opts.rotate_interval,
opts.rotate_size, opts.disable_events, opts.disabled_users)
_exit_if_errors(errors)
_success("Audit settings modified")
@staticmethod
def format_audit_settings(audit_settings, json_descriptors):
print(f'Audit enabled: {audit_settings['auditdEnabled']}')
print(f'UUID: {audit_settings['uid']}')
print(f'Log path: {audit_settings['logPath'] if 'logPath' in audit_settings else 'N/A'}')
print(f'Rotate interval: {audit_settings['rotateInterval']}')
print(f'Rotate size: {audit_settings['rotateSize']}')
print(f'Disabled users: {audit_settings['disabledUsers']}')
if not audit_settings["auditdEnabled"]:
return
# change id lists to maps to make lookup o(1)
disable_map = {eventID for eventID in audit_settings['disabled']}
json_descriptors.sort(key=itemgetter('module', 'id'))
all_descriptors_sets = {events["id"] for events in json_descriptors}
padding_name = 12
for descriptor in json_descriptors:
if len(descriptor['name']) > padding_name:
padding_name = len(descriptor['name'])
padding_name += 2
header = f'{'ID':<6}| {'Module':<15}| {'Name':<{padding_name}}| Enabled'
print(header)
print('-' * len(header))
for descriptor in json_descriptors:
print(f'{descriptor['id']:<6}| {descriptor['module']:<15}| {descriptor['name']:<{padding_name}}| '
f'{'False' if descriptor['id'] in disable_map else 'True'}')
not_recognized = disable_map - all_descriptors_sets
for unrecognized in not_recognized:
print(f'{unrecognized:<6}| {'unknown':<15}| {'unknown':<{padding_name}}| False')
@staticmethod
def format_descriptors_in_table(json_descriptors):
sorted_descriptors = sorted(json_descriptors, key=itemgetter('module', 'id'))
padding_name = 15
for descriptor in sorted_descriptors:
if len(descriptor['name']) > padding_name:
padding_name = len(descriptor['name'])
padding_name += 2
header = f'{'ID':<6}| {'Module':<15}| {'Name':<{padding_name}}| Description'
print(header)
print('-' * len(header))
for descriptor in sorted_descriptors:
print(f'{descriptor['id']:<6}| {descriptor['module']:<15}| {descriptor['name']:<{padding_name}}| '
f'{descriptor['description']}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-audit" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify audit settings"
class SettingAutofailover(Subcommand):
"""The settings auto-failover subcommand"""
def __init__(self):
super(SettingAutofailover, self).__init__()
self.parser.prog = "couchbase-cli setting-autofailover"
group = self.parser.add_argument_group("Auto-failover settings")
group.add_argument("--enable-auto-failover", dest="enabled", metavar="<1|0>",
choices=["0", "1"], help="Enable/disable auto-failover")
group.add_argument("--auto-failover-timeout", dest="timeout", metavar="<seconds>",
type=(int), help="The auto-failover timeout")
group.add_argument("--enable-failover-of-server-groups", dest="enable_failover_of_server_groups",
metavar="<1|0>", choices=["0", "1"], help="Enable/disable auto-failover of server Groups")
group.add_argument("--max-failovers", dest="max_failovers", metavar="<1|2|3>", choices=["1", "2", "3"],
help="Maximum number of times an auto-failover event can happen")
group.add_argument("--enable-failover-on-data-disk-issues", dest="enable_failover_on_data_disk_issues",
metavar="<1|0>", choices=["0", "1"],
help="Enable/disable auto-failover when the Data Service reports disk issues. "
+ "Couchbase Server Enterprise Edition only.")
group.add_argument("--failover-data-disk-period", dest="failover_on_data_disk_period",
metavar="<seconds>", type=(int),
help="The amount of time the Data Serivce disk failures has to be happening for to trigger"
" an auto-failover")
group.add_argument("--can-abort-rebalance", metavar="<1|0>", choices=["1", "0"], dest="can_abort_rebalance",
help="Enables auto-failover to abort rebalance and perform the failover. (EE only)")
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if opts.enabled == "1":
opts.enabled = "true"
elif opts.enabled == "0":
opts.enabled = "false"
if opts.enable_failover_on_data_disk_issues == "1":
opts.enable_failover_on_data_disk_issues = "true"
elif opts.enable_failover_on_data_disk_issues == "0":
opts.enable_failover_on_data_disk_issues = "false"
if opts.enable_failover_of_server_groups == "1":
opts.enable_failover_of_server_groups = "true"
elif opts.enable_failover_of_server_groups == "0":
opts.enable_failover_of_server_groups = "false"
if not self.enterprise:
if opts.enable_failover_of_server_groups:
_exit_if_errors(["--enable-failover-of-server-groups can only be configured on enterprise edition"])
if opts.enable_failover_on_data_disk_issues or opts.failover_on_data_disk_period:
_exit_if_errors(["Auto failover on Data Service disk issues can only be configured on enterprise"
+ " edition"])
if opts.max_failovers:
_exit_if_errors(["--max-count can only be configured on enterprise edition"])
if opts.can_abort_rebalance:
_exit_if_errors(["--can-abort-rebalance can only be configured on enterprise edition"])
if not any([opts.enabled, opts.timeout, opts.enable_failover_on_data_disk_issues,
opts.failover_on_data_disk_period, opts.enable_failover_of_server_groups, opts.max_failovers]):
_exit_if_errors(["No settings specified to be changed"])
if ((opts.enable_failover_on_data_disk_issues is None or opts.enable_failover_on_data_disk_issues == "false")
and opts.failover_on_data_disk_period):
_exit_if_errors(["--enable-failover-on-data-disk-issues must be set to 1 when auto-failover Data"
" Service disk period has been set"])
if opts.enable_failover_on_data_disk_issues and opts.failover_on_data_disk_period is None:
_exit_if_errors(["--failover-data-disk-period must be set when auto-failover on Data Service disk"
" is enabled"])
if opts.enabled == "false" or opts.enabled is None:
if opts.enable_failover_on_data_disk_issues or opts.failover_on_data_disk_period:
_exit_if_errors(["--enable-auto-failover must be set to 1 when auto-failover on Data Service disk issues"
" settings are being configured"])
if opts.enable_failover_of_server_groups:
_exit_if_errors(["--enable-auto-failover must be set to 1 when enabling auto-failover of Server Groups"])
if opts.timeout:
_warning("Timeout specified will not take affect because auto-failover is being disabled")
if opts.can_abort_rebalance == '1':
opts.can_abort_rebalance = 'true'
elif opts.can_abort_rebalance == '0':
opts.can_abort_rebalance = 'false'
_, errors = self.rest.set_autofailover_settings(opts.enabled, opts.timeout,
opts.enable_failover_of_server_groups, opts.max_failovers,
opts.enable_failover_on_data_disk_issues,
opts.failover_on_data_disk_period, opts.can_abort_rebalance)
_exit_if_errors(errors)
_success("Auto-failover settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-autofailover" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify auto failover settings"
class SettingAutoreprovision(Subcommand):
"""The settings auto-reprovision subcommand"""
def __init__(self):
super(SettingAutoreprovision, self).__init__()
self.parser.prog = "couchbase-cli setting-autoreprovision"
group = self.parser.add_argument_group("Auto-reprovision settings")
group.add_argument("--enabled", dest="enabled", metavar="<1|0>", required=True,
choices=["0", "1"], help="Enable/disable auto-reprovision")
group.add_argument("--max-nodes", dest="max_nodes", metavar="<num>", type=(int),
help="The numbers of server that can be auto-reprovisioned before a rebalance")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.enabled == "1":
opts.enabled = "true"
elif opts.enabled == "0":
opts.enabled = "false"
if opts.enabled == "true" and opts.max_nodes is None:
_exit_if_errors(["--max-nodes must be specified if auto-reprovision is enabled"])
if not (opts.enabled or opts.max_nodes):
_exit_if_errors(["No settings specified to be changed"])
if (opts.enabled is None or opts.enabled == "false") and opts.max_nodes:
_warning("--max-servers will not take affect because auto-reprovision is being disabled")
_, errors = self.rest.set_autoreprovision_settings(opts.enabled, opts.max_nodes)
_exit_if_errors(errors)
_success("Auto-reprovision settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-autoreprovision" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify auto-reprovision settings"
class SettingCluster(Subcommand):
"""The settings cluster subcommand"""
def __init__(self):
super(SettingCluster, self).__init__()
self.parser.prog = "couchbase-cli setting-cluster"
group = self.parser.add_argument_group("Cluster settings")
group.add_argument("--cluster-username", dest="new_username", metavar="<username>",
help="The cluster administrator username")
group.add_argument("--cluster-password", dest="new_password", metavar="<password>",
help="Only compact the data files")
group.add_argument("--cluster-port", dest="port", type=(int), metavar="<port>",
help="The cluster administration console port")
group.add_argument("--cluster-ramsize", dest="data_mem_quota", metavar="<quota>",
type=(int), help="The data service memory quota in mebibytes")
group.add_argument("--cluster-index-ramsize", dest="index_mem_quota", metavar="<quota>",
type=(int), help="The index service memory quota in mebibytes")
group.add_argument("--cluster-fts-ramsize", dest="fts_mem_quota", metavar="<quota>",
type=(int), help="The full-text service memory quota in mebibytes")
group.add_argument("--cluster-eventing-ramsize", dest="eventing_mem_quota", metavar="<quota>",
type=(int), help="The Eventing service memory quota in mebibytes")
group.add_argument("--cluster-analytics-ramsize", dest="cbas_mem_quota", metavar="<quota>",
type=(int), help="The analytics service memory quota in mebibytes")
group.add_argument("--cluster-name", dest="name", metavar="<name>", help="The cluster name")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if (opts.data_mem_quota or opts.index_mem_quota or opts.fts_mem_quota or opts.cbas_mem_quota
or opts.eventing_mem_quota or opts.name):
_, errors = self.rest.set_pools_default(opts.data_mem_quota, opts.index_mem_quota, opts.fts_mem_quota,
opts.cbas_mem_quota, opts.eventing_mem_quota, opts.name)
_exit_if_errors(errors)
if opts.new_username or opts.new_password or opts.port:
username = opts.username
if opts.new_username:
username = opts.new_username
password = opts.password
if opts.new_password:
password = opts.new_password
_, errors = self.rest.set_admin_credentials(username, password, opts.port)
_exit_if_errors(errors)
_success("Cluster settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-cluster" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify cluster settings"
class ClusterEdit(SettingCluster):
"""The cluster edit subcommand (Deprecated)"""
def __init__(self):
super(ClusterEdit, self).__init__()
self.parser.prog = "couchbase-cli cluster-edit"
def execute(self, opts):
_deprecated("Please use the setting-cluster command instead")
super(ClusterEdit, self).execute(opts)
@staticmethod
def get_man_page_name():
return "couchbase-cli-cluster-edit" + ".1" if os.name != "nt" else ".html"
@staticmethod
def is_hidden():
# Deprecated command in 4.6, hidden in 5.0, pending removal
return True
class SettingCompaction(Subcommand):
"""The setting compaction subcommand"""
def __init__(self):
super(SettingCompaction, self).__init__()
self.parser.prog = "couchbase-cli setting-compaction"
group = self.parser.add_argument_group("Compaction settings")
group.add_argument("--compaction-db-percentage", dest="db_perc", metavar="<perc>",
type=(int),
help="Compacts the db once the fragmentation reaches this percentage")
group.add_argument("--compaction-db-size", dest="db_size", metavar="<mebibytes>",
type=(int),
help="Compacts db once the fragmentation reaches this size (MiB)")
group.add_argument("--compaction-view-percentage", dest="view_perc", metavar="<perc>",
type=(int),
help="Compacts the view once the fragmentation reaches this percentage")
group.add_argument("--compaction-view-size", dest="view_size", metavar="<mebibytes>",
type=(int),
help="Compacts view once the fragmentation reaches this size (MiB)")
group.add_argument("--compaction-period-from", dest="from_period", metavar="<HH:MM>",
help="Only run compaction after this time")
group.add_argument("--compaction-period-to", dest="to_period", metavar="<HH:MM>",
help="Only run compaction before this time")
group.add_argument("--enable-compaction-abort", dest="enable_abort", metavar="<1|0>",
choices=["0", "1"], help="Allow compactions to be aborted")
group.add_argument("--enable-compaction-parallel", dest="enable_parallel", metavar="<1|0>",
choices=["0", "1"], help="Allow parallel compactions")
group.add_argument("--metadata-purge-interval", dest="purge_interval", metavar="<float>",
type=(float), help="The metadata purge interval")
group.add_argument("--gsi-compaction-mode", dest="gsi_mode", choices=["append", "circular"],
help="Sets the gsi compaction mode (append or circular)")
group.add_argument("--compaction-gsi-percentage", dest="gsi_perc", type=(int), metavar="<perc>",
help="Starts compaction once gsi file fragmentation has reached this percentage"
+ "(Append mode only)")
group.add_argument("--compaction-gsi-interval", dest="gsi_interval", metavar="<days>",
help="A comma separated list of days compaction can run (Circular mode only)")
group.add_argument("--compaction-gsi-period-from", dest="gsi_from_period", metavar="<HH:MM>",
help="Allow gsi compaction to run after this time (Circular mode only)")
group.add_argument("--compaction-gsi-period-to", dest="gsi_to_period", metavar="<HH:MM>",
help="Allow gsi compaction to run before this time (Circular mode only)")
group.add_argument("--enable-gsi-compaction-abort", dest="enable_gsi_abort", metavar="<1|0>",
choices=["0", "1"],
help="Abort gsi compaction if when run outside of the accepted interaval"
+ "(Circular mode only)")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.db_perc is not None and (opts.db_perc < 2 or opts.db_perc > 100):
_exit_if_errors(["--compaction-db-percentage must be between 2 and 100"])
if opts.view_perc is not None and (opts.view_perc < 2 or opts.view_perc > 100):
_exit_if_errors(["--compaction-view-percentage must be between 2 and 100"])
if opts.db_size is not None:
if int(opts.db_size) < 1:
_exit_if_errors(["--compaction-db-size must be between greater than 1 or infinity"])
opts.db_size = int(opts.db_size) * 1024**2
if opts.view_size is not None:
if int(opts.view_size) < 1:
_exit_if_errors(["--compaction-view-size must be between greater than 1 or infinity"])
opts.view_size = int(opts.view_size) * 1024**2
if opts.from_period and not (opts.to_period and opts.enable_abort):
errors = []
if opts.to_period is None:
errors.append("--compaction-period-to is required when using --compaction-period-from")
if opts.enable_abort is None:
errors.append("--enable-compaction-abort is required when using --compaction-period-from")
_exit_if_errors(errors)
if opts.to_period and not (opts.from_period and opts.enable_abort):
errors = []
if opts.from_period is None:
errors.append("--compaction-period-from is required when using --compaction-period-to")
if opts.enable_abort is None:
errors.append("--enable-compaction-abort is required when using --compaction-period-to")
_exit_if_errors(errors)
if opts.enable_abort and not (opts.from_period and opts.to_period):
errors = []
if opts.from_period is None:
errors.append("--compaction-period-from is required when using --enable-compaction-abort")
if opts.to_period is None:
errors.append("--compaction-period-to is required when using --enable-compaction-abort")
_exit_if_errors(errors)
from_hour, from_min = self._handle_timevalue(opts.from_period,
"--compaction-period-from")
to_hour, to_min = self._handle_timevalue(opts.to_period, "--compaction-period-to")
if opts.enable_abort == "1":
opts.enable_abort = "true"
elif opts.enable_abort == "0":
opts.enable_abort = "false"
if opts.enable_parallel == "1":
opts.enable_parallel = "true"
else:
opts.enable_parallel = "false"
if opts.purge_interval is not None and (opts.purge_interval < 0.04 or opts.purge_interval > 60.0):
_exit_if_errors(["--metadata-purge-interval must be between 0.04 and 60.0"])
g_from_hour = None
g_from_min = None
g_to_hour = None
g_to_min = None
if opts.gsi_mode == "append":
opts.gsi_mode = "full"
if opts.gsi_perc is None:
_exit_if_errors(['--compaction-gsi-percentage must be specified when --gsi-compaction-mode is set '
'to append'])
elif opts.gsi_mode == "circular":
if opts.gsi_from_period is not None and opts.gsi_to_period is None:
_exit_if_errors(["--compaction-gsi-period-to is required with --compaction-gsi-period-from"])
if opts.gsi_to_period is not None and opts.gsi_from_period is None:
_exit_if_errors(["--compaction-gsi-period-from is required with --compaction-gsi-period-to"])
g_from_hour, g_from_min = self._handle_timevalue(opts.gsi_from_period, "--compaction-gsi-period-from")
g_to_hour, g_to_min = self._handle_timevalue(opts.gsi_to_period, "--compaction-gsi-period-to")
if opts.enable_gsi_abort == "1":
opts.enable_gsi_abort = "true"
else:
opts.enable_gsi_abort = "false"
_, errors = self.rest.set_compaction_settings(opts.db_perc, opts.db_size, opts.view_perc, opts.view_size,
from_hour, from_min, to_hour, to_min, opts.enable_abort,
opts.enable_parallel, opts.purge_interval, opts.gsi_mode,
opts.gsi_perc, opts.gsi_interval, g_from_hour, g_from_min,
g_to_hour, g_to_min, opts.enable_gsi_abort)
_exit_if_errors(errors)
_success("Compaction settings modified")
def _handle_timevalue(self, opt_value, opt_name):
hour = None
minute = None
if opt_value:
if opt_value.find(':') == -1:
_exit_if_errors([f'Invalid value for {opt_name}, must be in form XX:XX'])
hour, minute = opt_value.split(':', 1)
try:
hour = int(hour)
except ValueError:
_exit_if_errors([f'Invalid hour value for {opt_name}, must be an integer'])
if hour not in range(24):
_exit_if_errors([f'Invalid hour value for {opt_name}, must be 0-23'])
try:
minute = int(minute)
except ValueError:
_exit_if_errors([f'Invalid minute value for {opt_name}, must be an integer'])
if minute not in range(60):
_exit_if_errors([f'Invalid minute value for {opt_name}, must be 0-59'])
return hour, minute
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-compaction" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify auto-compaction settings"
class SettingIndex(Subcommand):
"""The setting index subcommand"""
def __init__(self):
super(SettingIndex, self).__init__()
self.parser.prog = "couchbase-cli setting-index"
group = self.parser.add_argument_group("Index settings")
group.add_argument("--index-max-rollback-points", dest="max_rollback", metavar="<num>",
type=(int), help="Max rollback points")
group.add_argument("--index-stable-snapshot-interval", dest="stable_snap", type=(int),
metavar="<seconds>", help="Stable snapshot interval in seconds")
group.add_argument("--index-memory-snapshot-interval", dest="mem_snap", metavar="<ms>",
type=(int), help="Stable snapshot interval in milliseconds")
group.add_argument("--index-storage-setting", dest="storage_mode", metavar="<mode>",
choices=["default", "memopt"], help="The index storage backend")
group.add_argument("--index-threads", dest="threads", metavar="<num>",
type=(int), help="The number of indexer threads")
group.add_argument("--index-log-level", dest="log_level", metavar="<level>",
choices=["debug", "silent", "fatal", "error", "warn", "info", "verbose",
"timing", "trace"],
help="The indexer log level")
group.add_argument('--replicas', metavar='<num>', type=int, help='Number of index replicas')
group.add_argument('--optimize-placement', metavar='<1|0>', type=str,
help='Optimize index placement on a rebalance.')
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if (opts.max_rollback is None and opts.stable_snap is None
and opts.mem_snap is None and opts.storage_mode is None
and opts.threads is None and opts.log_level is None and opts.replicas is None
and opts.optimize_placement is None):
_exit_if_errors(["No settings specified to be changed"])
settings, errors = self.rest.index_settings()
_exit_if_errors(errors)
# For supporting the default index backend changing from forestdb to plasma in Couchbase 5.0
default = "plasma"
if opts.storage_mode == "default" and settings['storageMode'] == "forestdb" or not self.enterprise:
default = "forestdb"
opts.storage_mode = index_storage_mode_to_param(opts.storage_mode, default)
_, errors = self.rest.set_index_settings(opts.storage_mode, opts.max_rollback, opts.stable_snap, opts.mem_snap,
opts.threads, opts.log_level, opts.replicas, opts.optimize_placement)
_exit_if_errors(errors)
_success("Indexer settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-index" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify index settings"
class SettingSaslauthd(Subcommand):
"""The setting sasl subcommand"""
def __init__(self):
super(SettingSaslauthd, self).__init__()
self.parser.prog = "couchbase-cli setting-saslauthd"
group = self.parser.add_argument_group("saslauthd settings")
group.add_argument("--enabled", dest="enabled", metavar="<1|0>", required=True,
choices=["0", "1"], help="Enable/disable saslauthd")
group.add_argument("--admins", dest="admins", metavar="<user_list>",
help="A comma separated list of full admins")
group.add_argument("--roadmins", dest="roadmins", metavar="<user_list>",
help="A comma separated list of read only admins")
group.add_argument("--default", dest="default", default="none",
choices=["admins", "roadmins", "none"], metavar="<default>",
help="Default roles for saslauthd users")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
admins = ""
if opts.admins:
admins = opts.admins.replace(",", "\n")
ro_admins = ""
if opts.roadmins:
ro_admins = opts.roadmins.replace(",", "\n")
errors = None
if opts.enabled == '1':
if opts.default == 'admins':
if ro_admins:
_warning("--ro-admins option ignored since default is read only admins")
_, errors = self.rest.sasl_settings('true', ro_admins, None)
elif opts.default == 'roadmins':
if admins:
_warning("--admins option ignored since default is admins")
_, errors = self.rest.sasl_settings('true', None, admins)
else:
_, errors = self.rest.sasl_settings('true', ro_admins, admins)
else:
if admins:
_warning("--admins option ignored since saslauthd is being disabled")
if ro_admins:
_warning("--roadmins option ignored since saslauthd is being disabled")
_, errors = self.rest.sasl_settings('false', "", "")
_exit_if_errors(errors)
_success("saslauthd settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-saslauthd" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify saslauthd settings"
class SettingLdap(Subcommand):
"""The setting Ldap subcommand"""
def __init__(self):
super(SettingLdap, self).__init__()
self.parser.prog = "couchbase-cli setting-ldap"
group = self.parser.add_argument_group("LDAP settings")
group.add_argument("--get", dest="get", default=False, action="store_true",
help='When the get flag is provided it will retrieve the current ldap settings')
group.add_argument("--authentication-enabled", dest="authentication_enabled", metavar="<1|0>",
choices=["1", "0"], help="Enable LDAP authentication, otherwise it defaults to disable")
group.add_argument("--authorization-enabled", dest="authorization_enabled", metavar="<1|0>",
choices=["1", "0"], help="Enable LDAP authorization, otherwise defaults to false")
group.add_argument("--hosts", dest="hosts", metavar="<host_list>",
help="Coma separated list of LDAP servers")
group.add_argument("--port", dest="port", metavar="<port>", help="LDAP port", type=int)
group.add_argument("--encryption", dest="encryption", metavar="<tls|startTLS|none>",
choices=["tls", "startTLS", "none"], help="Encryption used")
group.add_argument("--server-cert-validation", dest="server_cert_val", metavar="<1|0>", choices=["0", "1"],
help="Enable or disable certificate validation when connecting to LDAP server")
group.add_argument("--ldap-cacert", dest="cacert_ldap", metavar="<path>",
help="CA certificate to be used for LDAP server certificate validation, required if"
+ " certificate validation is not disabled")
group.add_argument("--user-dn-query", metavar="<query>", dest="user_dn_query",
help="LDAP query to get user's DN. Must contains at least one instance of %%u")
group.add_argument("--user-dn-template", metavar="<template>", dest="user_dn_template",
help="Template to construct user's DN. Must contain at least one instance of %%u")
group.add_argument("--client-cert", metavar="<path>", dest="client_cert",
help="The client TLS certificate for authentication")
group.add_argument("--client-key", metavar="<path>", dest="client_key",
help="The client TLS key for authentication")
group.add_argument("--request-timeout", metavar="<ms>", dest="timeout",
help="Request time out in milliseconds")
group.add_argument("--max-parallel", dest="max_parallel", metavar="<max>", type=int,
help="Maximum number of parallel connections that can be established")
group.add_argument("--max-cache-size", dest="max_cache_size", metavar="<size>",
help="Maximum number of cached LDAP requests")
group.add_argument("--cache-value-lifetime", dest="cache_value_lifetime", metavar="<ms>",
help="Cache value lifetime in milliseconds")
group.add_argument("--bind-dn", dest="bind_dn", metavar="<DN>",
help="The DN of a user to bind as to performance lookups")
group.add_argument("--bind-password", dest="bind_password", metavar="<password>",
help="The password of the bind user")
group.add_argument("--group-query", dest="group_query", metavar="<query>",
help="LDAP query to get user's groups by username")
group.add_argument("--enable-nested-groups", dest="nested_groups", metavar="<1|0>",
choices=["0", "1"])
group.add_argument("--nested-group-max-depth", dest="nested_max_depth", metavar="<max>", type=int,
help="Maximum number of recursive group requests allowed. [1 - 100]")
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=True)
def execute(self, opts):
if opts.get:
data, rv = self.rest.get_ldap()
_exit_if_errors(rv)
print(json.dumps(data))
else:
self._set(opts)
def _set(self, opts):
if opts.authentication_enabled == '1':
opts.authentication_enabled = 'true'
elif opts.authentication_enabled == '0':
opts.authentication_enabled = 'false'
if opts.authorization_enabled == '1':
opts.authorization_enabled = 'true'
elif opts.authorization_enabled == '0':
opts.authorization_enabled = 'false'
if opts.server_cert_val == '1':
opts.server_cert_val = 'true'
elif opts.server_cert_val == '0':
opts.server_cert_val = 'false'
if opts.server_cert_val == 'false' and opts.cacert_ldap is not None:
_exit_if_errors(['--server-cert-validation 0 and --ldap-cert can not be used together'])
if opts.cacert_ldap is not None:
opts.cacert_ldap = _exit_on_file_read_failure(opts.cacert_ldap)
if opts.encryption == "tls":
opts.encryption = "TLS"
elif opts.encryption == "startTLS":
opts.encryption = "StartTLSExtension"
elif opts.encryption == "none":
opts.encryption = "None"
if opts.nested_groups == '1':
opts.nested_groups = 'true'
elif opts.nested_groups == '0':
opts.nested_groups = 'false'
if opts.user_dn_query is not None and opts.user_dn_template is not None:
_exit_if_errors(['--user-dn-query and --user-dn-template can not be used together'])
mapping = None
if opts.user_dn_query is not None:
mapping = f'{{'query': '{opts.user_dn_query}"}}'
if opts.user_dn_template is not None:
mapping = f'{{'template': '{opts.user_dn_template}"}}'
if (opts.client_cert and not opts.client_key) or (not opts.client_cert and opts.client_key):
_exit_if_errors(['--client-cert and --client--key have to be used together'])
if opts.client_cert is not None:
opts.client_cert = _exit_on_file_read_failure(opts.client_cert)
if opts.client_key is not None:
opts.client_key = _exit_on_file_read_failure(opts.client_key)
_, errors = self.rest.ldap_settings(opts.authentication_enabled, opts.authorization_enabled, opts.hosts,
opts.port, opts.encryption, mapping, opts.timeout, opts.max_parallel,
opts.max_cache_size, opts.cache_value_lifetime, opts.bind_dn,
opts.bind_password, opts.client_cert, opts.client_key, opts.group_query,
opts.nested_groups, opts.nested_max_depth, opts.server_cert_val,
opts.cacert_ldap)
_exit_if_errors(errors)
_success("LDAP settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-ldap" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify LDAP settings"
class SettingNotification(Subcommand):
"""The settings notification subcommand"""
def __init__(self):
super(SettingNotification, self).__init__()
self.parser.prog = "couchbase-cli setting-notification"
group = self.parser.add_argument_group("Notification Settings")
group.add_argument("--enable-notifications", dest="enabled", metavar="<1|0>", required=True,
choices=["0", "1"], help="Enables/disable software notifications")
@rest_initialiser(version_check=True)
def execute(self, opts):
enabled = None
if opts.enabled == "1":
enabled = True
elif opts.enabled == "0":
enabled = False
_, errors = self.rest.enable_notifications(enabled)
_exit_if_errors(errors)
_success("Software notification settings updated")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-notification" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify software notification settings"
class SettingPasswordPolicy(Subcommand):
"""The settings password policy subcommand"""
def __init__(self):
super(SettingPasswordPolicy, self).__init__()
self.parser.prog = "couchbase-cli setting-password-policy"
group = self.parser.add_argument_group("Password Policy Settings")
group.add_argument("--get", dest="get", action="store_true", default=False,
help="Get the current password policy")
group.add_argument("--set", dest="set", action="store_true", default=False,
help="Set a new password policy")
group.add_argument("--min-length", dest="min_length", type=int, default=None, metavar="<num>",
help="Specifies the minimum password length for new passwords")
group.add_argument("--uppercase", dest="upper_case", metavar="<0|1>", choices=["0", "1"],
help="Specifies new passwords must contain an upper case character")
group.add_argument("--lowercase", dest="lower_case", metavar="<0|1>", choices=["0", "1"],
help="Specifies new passwords must contain a lower case character")
group.add_argument("--digit", dest="digit", metavar="<0|1>", choices=["0", "1"],
help="Specifies new passwords must at least one digit")
group.add_argument("--special-char", dest="special_char", metavar="<0|1>", choices=["0", "1"],
help="Specifies new passwords must at least one special character")
@rest_initialiser(version_check=True)
def execute(self, opts):
actions = sum([opts.get, opts.set])
if actions == 0:
_exit_if_errors(["Must specify either --get or --set"])
elif actions > 1:
_exit_if_errors(["The --get and --set flags may not be specified at the same time"])
elif opts.get:
if opts.min_length is not None or any([opts.upper_case, opts.lower_case, opts.digit, opts.special_char]):
_exit_if_errors(["The --get flag must be used without any other arguments"])
self._get()
elif opts.set:
if opts.min_length is None:
_exit_if_errors(["--min-length is required when using --set flag"])
if opts.min_length <= 0:
_exit_if_errors(["--min-length has to be greater than 0"])
self._set(opts)
def _get(self):
policy, errors = self.rest.get_password_policy()
_exit_if_errors(errors)
print(json.dumps(policy, sort_keys=True, indent=2))
def _set(self, opts):
_, errors = self.rest.set_password_policy(opts.min_length, opts.upper_case, opts.lower_case, opts.digit,
opts.special_char)
_exit_if_errors(errors)
_success("Password policy updated")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-password-policy" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify the password policy"
class SettingSecurity(Subcommand):
"""The settings security subcommand"""
def __init__(self):
super(SettingSecurity, self).__init__()
self.parser.prog = "couchbase-cli setting-security"
group = self.parser.add_argument_group("Cluster Security Settings")
group.add_argument('--get', default=False, action='store_true', help='Get security settings.')
group.add_argument('--set', default=False, action='store_true', help='Set security settings.')
group.add_argument("--disable-http-ui", dest="disable_http_ui", metavar="<0|1>", choices=['0', '1'],
default=None, help="Disables access to the UI over HTTP (0 or 1)")
group.add_argument("--disable-www-authenticate", dest="disable_www_authenticate",
metavar="<0|1>", choices=['0', '1'], default=None,
help="Disables use of WWW-Authenticate (0 or 1")
group.add_argument("--cluster-encryption-level", dest="cluster_encryption_level", metavar="<all|control>",
choices=['all', 'control'], default=None,
help="Set cluster encryption level, only used when cluster encryption enabled.")
group.add_argument('--tls-min-version', dest='tls_min_version', metavar='<tlsv1|tlsv1.1|tlsv1.2>',
choices=['tlsv1', 'tlsv1.1', 'tlsv1.2'], default=None, help='Set the minimum TLS version')
group.add_argument('--tls-honor-cipher-order', dest='tls_honor_cipher_order', metavar='<1|0>',
choices=['1', '0'], help='Specify or not the cipher order has to be followed.', default=None)
group.add_argument('--cipher-suites', metavar='<ciphers>', default=None,
help='Comma separated list of ciphers to use.If an empty string (e.g "") given it will'
' reset ciphers to default.')
@rest_initialiser(version_check=True)
def execute(self, opts):
if sum([opts.get, opts.set]) != 1:
_exit_if_errors(['Provided either --set or --get.'])
if opts.get:
val, err = self.rest.get_security_settings()
_exit_if_errors(err)
print(json.dumps(val))
elif opts.set:
self._set(self.rest, opts.disable_http_ui, opts.cluster_encryption_level, opts.tls_min_version,
opts.tls_honor_cipher_order, opts.cipher_suites, opts.disable_www_authenticate)
@staticmethod
def _set(rest, disable_http_ui, encryption_level, tls_min_version, honor_order, cipher_suites,
disable_www_authenticate):
if not any([True if x is not None else False for x in [disable_http_ui, encryption_level, tls_min_version,
honor_order, cipher_suites, disable_www_authenticate]]):
_exit_if_errors(['please provide at least one of --cluster-encryption-level, --disable-http-ui,'
' --tls-min-version, --tls-honor-cipher-order or --cipher-suites together with --set'])
if disable_http_ui == '1':
disable_http_ui = 'true'
elif disable_http_ui == '0':
disable_http_ui = 'false'
if disable_www_authenticate == '1':
disable_www_authenticate = 'true'
elif disable_www_authenticate == '0':
disable_www_authenticate = 'false'
if honor_order == '1':
honor_order = 'true'
elif honor_order == '0':
honor_order = 'false'
if cipher_suites == '':
cipher_suites = json.dumps([])
elif cipher_suites is not None:
cipher_suites = json.dumps(cipher_suites.split(','))
_, errors = rest.set_security_settings(disable_http_ui, encryption_level, tls_min_version,
honor_order, cipher_suites, disable_www_authenticate)
_exit_if_errors(errors)
_success("Security settings updated")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-security" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify security settings"
class SettingXdcr(Subcommand):
"""The setting xdcr subcommand"""
def __init__(self):
super(SettingXdcr, self).__init__()
self.parser.prog = "couchbase-cli setting-xdcr"
group = self.parser.add_argument_group("XDCR Settings")
group.add_argument("--checkpoint-interval", dest="chk_int", type=(int), metavar="<num>",
help="Intervals between checkpoints in seconds (60 to 14400)")
group.add_argument("--worker-batch-size", dest="worker_batch_size", metavar="<num>",
type=(int), help="Doc batch size (500 to 10000)")
group.add_argument("--doc-batch-size", dest="doc_batch_size", type=(int), metavar="<KB>",
help="Document batching size in KB (10 to 100000)")
group.add_argument("--failure-restart-interval", dest="fail_interval", metavar="<seconds>",
type=(int),
help="Interval for restarting failed xdcr in seconds (1 to 300)")
group.add_argument("--optimistic-replication-threshold", dest="rep_thresh", type=(int),
metavar="<bytes>",
help="Document body size threshold (bytes) to trigger optimistic "
+ "replication")
group.add_argument("--source-nozzle-per-node", dest="src_nozzles", metavar="<num>",
type=(int),
help="The number of source nozzles per source node (1 to 10)")
group.add_argument("--target-nozzle-per-node", dest="dst_nozzles", metavar="<num>",
type=(int),
help="The number of outgoing nozzles per target node (1 to 10)")
group.add_argument("--bandwidth-usage-limit", dest="usage_limit", type=(int),
metavar="<num>", help="The bandwidth usage limit in MiB/Sec")
group.add_argument("--enable-compression", dest="compression", metavar="<1|0>", choices=["1", "0"],
help="Enable/disable compression")
group.add_argument("--log-level", dest="log_level", metavar="<level>",
choices=["Error", "Info", "Debug", "Trace"],
help="The XDCR log level")
group.add_argument("--stats-interval", dest="stats_interval", metavar="<ms>",
help="The interval for statistics updates (in milliseconds)")
group.add_argument('--max-processes', dest='max_proc', metavar="<num>", type=int,
help='Number of processes to be allocated to XDCR. The default is 4.')
@rest_initialiser(version_check=True, cluster_init_check=True, enterprise_check=False)
def execute(self, opts):
if not self.enterprise and opts.compression:
_exit_if_errors(["--enable-compression can only be configured on enterprise edition"])
if opts.compression == "0":
opts.compression = "None"
elif opts.compression == "1":
opts.compression = "Auto"
_, errors = self.rest.xdcr_global_settings(opts.chk_int, opts.worker_batch_size, opts.doc_batch_size,
opts.fail_interval, opts.rep_thresh, opts.src_nozzles,
opts.dst_nozzles, opts.usage_limit, opts.compression, opts.log_level,
opts.stats_interval, opts.max_proc)
_exit_if_errors(errors)
_success("Global XDCR settings updated")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-xdcr" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify XDCR related settings"
class SettingMasterPassword(Subcommand):
"""The setting master password subcommand"""
def __init__(self):
super(SettingMasterPassword, self).__init__()
self.parser.prog = "couchbase-cli setting-master-password"
group = self.parser.add_argument_group("Master password options")
group.add_argument("--new-password", dest="new_password", metavar="<password>",
required=False, action=CBNonEchoedAction, envvar=None,
prompt_text="Enter new master password:",
confirm_text="Confirm new master password:",
help="Sets a new master password")
group.add_argument("--rotate-data-key", dest="rotate_data_key", action="store_true",
help="Rotates the master password data key")
@rest_initialiser(version_check=True)
def execute(self, opts):
if opts.new_password is not None:
_, errors = self.rest.set_master_pwd(opts.new_password)
_exit_if_errors(errors)
_success("New master password set")
elif opts.rotate_data_key:
_, errors = self.rest.rotate_master_pwd()
_exit_if_errors(errors)
_success("Data key rotated")
else:
_exit_if_errors(["No parameters set"])
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-master-password" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Changing the settings of the master password"
class SslManage(Subcommand):
"""The user manage subcommand"""
def __init__(self):
super(SslManage, self).__init__()
self.parser.prog = "couchbase-cli ssl-manage"
group = self.parser.add_argument_group("SSL manage options")
group.add_argument("--cluster-cert-info", dest="cluster_cert", action="store_true",
default=False, help="Gets the cluster certificate")
group.add_argument("--node-cert-info", dest="node_cert", action="store_true",
default=False, help="Gets the node certificate")
group.add_argument("--regenerate-cert", dest="regenerate", metavar="<path>",
help="Regenerate the cluster certificate and save it to a file")
group.add_argument("--set-node-certificate", dest="set_cert", action="store_true",
default=False, help="Sets the node certificate")
group.add_argument("--upload-cluster-ca", dest="upload_cert", metavar="<path>",
help="Upload a new cluster certificate")
group.add_argument("--set-client-auth", dest="client_auth_path", metavar="<path>",
help="A path to a file containing the client auth configuration")
group.add_argument("--client-auth", dest="show_client_auth", action="store_true",
help="Show ssl client certificate authentication value")
group.add_argument("--extended", dest="extended", action="store_true",
default=False, help="Print extended certificate information")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.regenerate is not None:
try:
open(opts.regenerate, 'a').close()
except IOError:
_exit_if_errors([f'Unable to create file at `{opts.regenerate}`'])
certificate, errors = self.rest.regenerate_cluster_certificate()
_exit_if_errors(errors)
_exit_on_file_write_failure(opts.regenerate, certificate)
_success(f'Certificate regenerate and copied to `{opts.regenerate}`')
elif opts.cluster_cert:
certificate, errors = self.rest.retrieve_cluster_certificate(opts.extended)
_exit_if_errors(errors)
if isinstance(certificate, dict):
print(json.dumps(certificate, sort_keys=True, indent=2))
else:
print(certificate)
elif opts.node_cert:
host = urllib.parse.urlparse(opts.cluster).netloc
certificate, errors = self.rest.retrieve_node_certificate(host)
_exit_if_errors(errors)
print(json.dumps(certificate, sort_keys=True, indent=2))
elif opts.upload_cert:
certificate = _exit_on_file_read_failure(opts.upload_cert)
_, errors = self.rest.upload_cluster_certificate(certificate)
_exit_if_errors(errors)
_success(f'Uploaded cluster certificate to {opts.cluster}')
elif opts.set_cert:
_, errors = self.rest.set_node_certificate()
_exit_if_errors(errors)
_success("Node certificate set")
elif opts.client_auth_path:
data = _exit_on_file_read_failure(opts.client_auth_path)
try:
config = json.loads(data)
except ValueError as e:
_exit_if_errors([f'Client auth config does not contain valid json: {e}'])
_, errors = self.rest.set_client_cert_auth(config)
_exit_if_errors(errors)
_success("SSL client auth updated")
elif opts.show_client_auth:
result, errors = self.rest.retrieve_client_cert_auth()
_exit_if_errors(errors)
print(json.dumps(result, sort_keys=True, indent=2))
else:
_exit_if_errors(["No options specified"])
@staticmethod
def get_man_page_name():
return "couchbase-cli-ssl-manage" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage cluster certificates"
class UserManage(Subcommand):
"""The user manage subcommand"""
def __init__(self):
super(UserManage, self).__init__()
self.parser.prog = "couchbase-cli user-manage"
group = self.parser.add_argument_group("User manage options")
group.add_argument("--delete", dest="delete", action="store_true", default=False,
help="Delete an existing RBAC user")
group.add_argument("--get", dest="get", action="store_true", default=False,
help="Display RBAC user details")
group.add_argument("--list", dest="list", action="store_true", default=False,
help="List all RBAC users and their roles")
group.add_argument("--my-roles", dest="my_roles", action="store_true", default=False,
help="List my roles")
group.add_argument("--set", dest="set", action="store_true", default=False,
help="Create or edit an RBAC user")
group.add_argument("--set-group", dest="set_group", action="store_true", default=False,
help="Create or edit a user group")
group.add_argument("--delete-group", dest="delete_group", action="store_true", default=False,
help="Delete a user group")
group.add_argument("--list-groups", dest="list_group", action="store_true", default=False,
help="List all groups")
group.add_argument("--get-group", dest="get_group", action="store_true", default=False,
help="Get group")
group.add_argument("--rbac-username", dest="rbac_user", metavar="<username>",
help="The RBAC username")
group.add_argument("--rbac-password", dest="rbac_pass", metavar="<password>",
help="The RBAC password")
group.add_argument("--rbac-name", dest="rbac_name", metavar="<name>",
help="The full name of the user")
group.add_argument("--roles", dest="roles", metavar="<roles_list>",
help="The roles for the specified user")
group.add_argument("--auth-domain", dest="auth_domain", metavar="<domain>",
choices=["external", "local"],
help="The authentication type for the specified user")
group.add_argument("--user-groups", dest="groups", metavar="<groups>",
help="List of groups for the user to be added to")
group.add_argument("--group-name", dest="group", metavar="<group>", help="Group name")
group.add_argument("--group-description", dest="description", metavar="<text>", help="Group description")
group.add_argument("--ldap-ref", dest="ldap_ref", metavar="<ref>", help="LDAP group's distinguished name")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
num_selectors = sum([opts.delete, opts.list, opts.my_roles, opts.set, opts.get, opts.get_group,
opts.list_group, opts.delete_group, opts.set_group])
if num_selectors == 0:
_exit_if_errors(['Must specify --delete, --list, --my_roles, --set, --get, --get-group, --set-group, '
'--list-groups or --delete-group'])
elif num_selectors != 1:
_exit_if_errors(['Only one of the following can be specified:--delete, --list, --my_roles, --set, --get,'
' --get-group, --set-group, --list-groups or --delete-group'])
if opts.delete:
self._delete(opts)
elif opts.list:
self._list(opts)
elif opts.my_roles:
self._my_roles(opts)
elif opts.set:
self._set(opts)
elif opts.get:
self._get(opts)
elif opts.get_group:
self._get_group(opts)
elif opts.set_group:
self._set_group(opts)
elif opts.list_group:
self._list_groups()
elif opts.delete_group:
self._delete_group(opts)
def _delete_group(self, opts):
if opts.group is None:
_exit_if_errors(['--group-name is required with the --delete-group option'])
_, errors = self.rest.delete_user_group(opts.group)
_exit_if_errors(errors)
_success(f"Group '{opts.group}' was deleted")
def _get_group(self, opts):
if opts.group is None:
_exit_if_errors(['--group-name is required with the --get-group option'])
group, errors = self.rest.get_user_group(opts.group)
_exit_if_errors(errors)
print(json.dumps(group, indent=2))
def _set_group(self, opts):
if opts.group is None:
_exit_if_errors(['--group-name is required with --set-group'])
_, errors = self.rest.set_user_group(opts.group, opts.roles, opts.description, opts.ldap_ref)
_exit_if_errors(errors)
_success(f"Group '{opts.group}' set")
def _list_groups(self):
groups, errors = self.rest.list_user_groups()
_exit_if_errors(errors)
print(json.dumps(groups, indent=2))
def _delete(self, opts):
if opts.rbac_user is None:
_exit_if_errors(["--rbac-username is required with the --delete option"])
if opts.rbac_pass is not None:
_warning("--rbac-password is not used with the --delete option")
if opts.rbac_name is not None:
_warning("--rbac-name is not used with the --delete option")
if opts.roles is not None:
_warning("--roles is not used with the --delete option")
if opts.auth_domain is None:
_exit_if_errors(["--auth-domain is required with the --delete option"])
_, errors = self.rest.delete_rbac_user(opts.rbac_user, opts.auth_domain)
_exit_if_errors(errors)
_success(f"User '{opts.rbac_user}' was removed")
def _list(self, opts):
if opts.rbac_user is not None:
_warning(["--rbac-username is not used with the --list option"])
if opts.rbac_pass is not None:
_warning(["--rbac-password is not used with the --list option"])
if opts.rbac_name is not None:
_warning("--rbac-name is not used with the --list option")
if opts.roles is not None:
_warning("--roles is not used with the --list option")
if opts.auth_domain is not None:
_warning("--auth-domain is not used with the --list option")
result, errors = self.rest.list_rbac_users()
_exit_if_errors(errors)
print(json.dumps(result, indent=2))
def _get(self, opts):
if opts.rbac_user is None:
_exit_if_errors(["--rbac-username is required with the --get option"])
if opts.rbac_pass is not None:
_warning("--rbac-password is not used with the --get option")
if opts.rbac_name is not None:
_warning("--rbac-name is not used with the --get option")
if opts.roles is not None:
_warning("--roles is not used with the --get option")
if opts.auth_domain is not None:
_warning("--auth-domain is not used with the --get option")
result, errors = self.rest.list_rbac_users()
_exit_if_errors(errors)
user = [u for u in result if u['id'] == opts.rbac_user]
if len(user) != 0:
print(json.dumps(user, indent=2))
else:
_exit_if_errors([f'no user {opts.rbac_user}'])
def _my_roles(self, opts):
if opts.rbac_user is not None:
_warning("--rbac-username is not used with the --my-roles option")
if opts.rbac_pass is not None:
_warning("--rbac-password is not used with the --my-roles option")
if opts.rbac_name is not None:
_warning("--rbac-name is not used with the --my-roles option")
if opts.roles is not None:
_warning("--roles is not used with the --my-roles option")
if opts.auth_domain is not None:
_warning("--auth-domain is not used with the --my-roles option")
result, errors = self.rest.my_roles()
_exit_if_errors(errors)
print(json.dumps(result, indent=2))
def _set(self, opts):
if opts.rbac_user is None:
_exit_if_errors(["--rbac-username is required with the --set option"])
if opts.rbac_pass is not None and opts.auth_domain == "external":
_warning("--rbac-password cannot be used with the external auth domain")
opts.rbac_pass = None
if opts.auth_domain is None:
_exit_if_errors(["--auth-domain is required with the --set option"])
_, errors = self.rest.set_rbac_user(opts.rbac_user, opts.rbac_pass, opts.rbac_name, opts.roles,
opts.auth_domain, opts.groups)
_exit_if_errors(errors)
if opts.roles is not None and "query_external_access" in opts.roles:
_warning('Granting the query_external_access role permits execution of the N1QL '
'function CURL() and may allow access to other network endpoints in the local network and'
'the Internet.')
_success(f"User {opts.rbac_user} set")
@staticmethod
def get_man_page_name():
return "couchbase-cli-user-manage" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage RBAC users"
class XdcrReplicate(Subcommand):
"""The xdcr replicate subcommand"""
def __init__(self):
super(XdcrReplicate, self).__init__()
self.parser.prog = "couchbase-cli xdcr-replicate"
group = self.parser.add_argument_group("XDCR replicate options")
group.add_argument("--get", action="store_true", help="Retrieve the settings of a XDCR replication.")
group.add_argument("--create", dest="create", action="store_true",
default=False, help="Create an XDCR replication")
group.add_argument("--delete", dest="delete", action="store_true",
default=False, help="Delete an XDCR replication")
group.add_argument("--pause", dest="pause", action="store_true",
default=False, help="Pause an XDCR replication")
group.add_argument("--list", dest="list", action="store_true",
default=False, help="List all XDCR replications")
group.add_argument("--resume", dest="resume", action="store_true",
default=False, help="Resume an XDCR replication")
group.add_argument("--settings", dest="settings", action="store_true",
default=False, help="Set advanced settings for an XDCR replication")
group.add_argument("--xdcr-from-bucket", dest="from_bucket", metavar="<bucket>",
help="The name bucket to replicate data from")
group.add_argument("--xdcr-to-bucket", dest="to_bucket", metavar="<bucket>",
help="The name bucket to replicate data to")
group.add_argument("--xdcr-cluster-name", dest="cluster_name", metavar="<name>",
help="The name of the cluster reference to replicate to")
group.add_argument("--xdcr-replication-mode", dest="rep_mode", metavar="<mode>",
choices=["xmem", "capi"], action=CBDeprecatedAction, help=SUPPRESS)
group.add_argument("--filter-expression", dest="filter", metavar="<regex>",
help="Regular expression to filter replication streams")
group.add_argument("--filter-skip-restream", dest="filter_skip", action="store_true", default=False,
help="Restart the replication. It must be specified together with --filter-expression")
group.add_argument("--xdcr-replicator", dest="replicator_id", metavar="<id>",
help="Replication ID")
group.add_argument("--checkpoint-interval", dest="chk_int", type=(int), metavar="<seconds>",
help="Intervals between checkpoints in seconds (60 to 14400)")
group.add_argument("--worker-batch-size", dest="worker_batch_size", type=(int),
metavar="<num>", help="Doc batch size (500 to 10000)")
group.add_argument("--doc-batch-size", dest="doc_batch_size", type=(int), metavar="<KB>",
help="Document batching size in KB (10 to 100000)")
group.add_argument("--failure-restart-interval", dest="fail_interval", type=(int),
metavar="<seconds>",
help="Interval for restarting failed xdcr in seconds (1 to 300)")
group.add_argument("--optimistic-replication-threshold", dest="rep_thresh", type=(int),
metavar="<bytes>",
help="Document body size threshold to trigger optimistic replication"
+ " (bytes)")
group.add_argument("--source-nozzle-per-node", dest="src_nozzles", type=(int),
metavar="<num>",
help="The number of source nozzles per source node (1 to 10)")
group.add_argument("--target-nozzle-per-node", dest="dst_nozzles", type=(int),
metavar="<num>",
help="The number of outgoing nozzles per target node (1 to 10)")
group.add_argument("--bandwidth-usage-limit", dest="usage_limit", type=(int),
metavar="<num>", help="The bandwidth usage limit in MiB/Sec")
group.add_argument("--enable-compression", dest="compression", metavar="<1|0>", choices=["1", "0"],
help="Enable/disable compression")
group.add_argument("--log-level", dest="log_level", metavar="<level>",
choices=["Error", "Warn", "Info", "Debug", "Trace"],
help="The XDCR log level")
group.add_argument("--stats-interval", dest="stats_interval", metavar="<ms>",
help="The interval for statistics updates (in milliseconds)")
group.add_argument("--priority", dest="priority", choices=['High', 'Medium', 'Low'],
metavar="<High|Medium|Low>", help='XDCR priority, by default set to High')
group.add_argument('--reset-expiry', choices=['1', '0'], metavar='<1|0>', dest='reset_expiry',
default=None, help='When set to true the expiry of mutations will be set to zero')
group.add_argument('--filter-deletion', choices=['1', '0'], metavar='<1|0>', default=None, dest='filter_del',
help='When set to true delete mutations will be filter out and not sent to the target '
'cluster')
group.add_argument('--filter-expiration', choices=['1', '0'], metavar='<1|0>', default=None, dest='filter_exp',
help='When set to true expiry mutations will be filter out and not sent to the target '
'cluster')
collection_group = self.parser.add_argument_group("Collection options")
collection_group.add_argument('--collection-explicit-mappings', choices=['1', '0'], metavar='<1|0>',
default=None, help='If explicit collection mappings is to be used. '
'(Enterprise Edition Only)')
collection_group.add_argument('--collection-migration', choices=['1', '0'], metavar='<1|0>',
default=None, help='If XDCR is to run in collection migration mode. '
'(Enterprise Edition only)')
collection_group.add_argument('--collection-mapping-rules', type=str, default=None, metavar='<mappings>',
help='The mapping rules specified as a JSON formatted string. '
'(Enterprise Edition Only)')
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if not self.enterprise and opts.compression:
_exit_if_errors(["--enable-compression can only be configured on enterprise edition"])
if not self.enterprise and (opts.collection_migration or opts.collection_explicit_mappings is not None
or opts.collection_mapping_rules is not None):
_exit_if_errors(["[--collection-migration, --collection-explicit-mappings, --collection-mapping-rules] can"
" only be configured on enterprise edition"])
if opts.compression == "0":
opts.compression = "None"
elif opts.compression == "1":
opts.compression = "Auto"
actions = sum([opts.create, opts.delete, opts.pause, opts.list, opts.resume, opts.settings, opts.get])
if actions == 0:
_exit_if_errors(['Must specify one of --create, --delete, --pause, --list, --resume, --settings, --get'])
elif actions > 1:
_exit_if_errors(['The --create, --delete, --pause, --list, --resume, --settings, --get flags may not be '
'specified at the same time'])
elif opts.create:
self._create(opts)
elif opts.delete:
self._delete(opts)
elif opts.pause or opts.resume:
self._pause_resume(opts)
elif opts.list:
self._list()
elif opts.settings:
self._settings(opts)
elif opts.get:
self._get(opts)
def _get(self, opts):
if opts.replicator_id is None:
_exit_if_errors(["--xdcr-replicator is need to get the replicator settings"])
settings, errors = self.rest.get_xdcr_replicator_settings(opts.replicator_id)
_exit_if_errors(errors)
print(json.dumps(settings, indent=4, sort_keys=True))
def _create(self, opts):
if opts.collection_migration == '1' and opts.collection_explicit_mappings == '1':
_exit_if_errors(['cannot enable both collection migration and explicit mappings'])
if opts.filter_skip and opts.filter is None:
_exit_if_errors(["--filter-expersion is needed with the --filter-skip-restream option"])
_, errors = self.rest.create_xdcr_replication(opts.cluster_name, opts.to_bucket, opts.from_bucket, opts.chk_int,
opts.worker_batch_size, opts.doc_batch_size, opts.fail_interval,
opts.rep_thresh, opts.src_nozzles, opts.dst_nozzles,
opts.usage_limit, opts.compression, opts.log_level,
opts.stats_interval, opts.filter, opts.priority,
opts.reset_expiry, opts.filter_del, opts.filter_exp,
opts.collection_explicit_mappings, opts.collection_migration,
opts.collection_mapping_rules)
_exit_if_errors(errors)
_success("XDCR replication created")
def _delete(self, opts):
if opts.replicator_id is None:
_exit_if_errors(["--xdcr-replicator is needed to delete a replication"])
_, errors = self.rest.delete_xdcr_replicator(opts.replicator_id)
_exit_if_errors(errors)
_success("XDCR replication deleted")
def _pause_resume(self, opts):
if opts.replicator_id is None:
_exit_if_errors(["--xdcr-replicator is needed to pause or resume a replication"])
tasks, errors = self.rest.get_tasks()
_exit_if_errors(errors)
for task in tasks:
if task["type"] == "xdcr" and task["id"] == opts.replicator_id:
if opts.pause and task["status"] == "notRunning":
_exit_if_errors(["The replication is not running yet. Pause is not needed"])
if opts.resume and task["status"] == "running":
_exit_if_errors(["The replication is running already. Resume is not needed"])
break
if opts.pause:
_, errors = self.rest.pause_xdcr_replication(opts.replicator_id)
_exit_if_errors(errors)
_success("XDCR replication paused")
elif opts.resume:
_, errors = self.rest.resume_xdcr_replication(opts.replicator_id)
_exit_if_errors(errors)
_success("XDCR replication resume")
def _list(self):
tasks, errors = self.rest.get_tasks()
_exit_if_errors(errors)
for task in tasks:
if task["type"] == "xdcr":
print(f'stream id: {task['id']}')
print(f' status: {task['status']}')
print(f' source: {task['source']}')
print(f' target: {task['target']}')
if "filterExpression" in task and task["filterExpression"] != "":
print(f' filter: {task['filterExpression']}')
def _settings(self, opts):
if opts.replicator_id is None:
_exit_if_errors(["--xdcr-replicator is needed to change a replicators settings"])
if opts.filter_skip and opts.filter is None:
_exit_if_errors(["--filter-expersion is needed with the --filter-skip-restream option"])
if opts.collection_migration == '1' and opts.collection_explicit_mappings == '1':
_exit_if_errors(['cannot enable both collection migration and explicit mappings'])
_, errors = self.rest.xdcr_replicator_settings(opts.chk_int, opts.worker_batch_size, opts.doc_batch_size,
opts.fail_interval, opts.rep_thresh, opts.src_nozzles,
opts.dst_nozzles, opts.usage_limit, opts.compression,
opts.log_level, opts.stats_interval, opts.replicator_id,
opts.filter, opts.filter_skip, opts.priority, opts.reset_expiry,
opts.filter_del, opts.filter_exp,
opts.collection_explicit_mappings, opts.collection_migration,
opts.collection_mapping_rules)
_exit_if_errors(errors)
_success("XDCR replicator settings updated")
@staticmethod
def get_man_page_name():
return "couchbase-cli-xdcr-replicate" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage XDCR cluster references"
class XdcrSetup(Subcommand):
"""The xdcr setup subcommand"""
def __init__(self):
super(XdcrSetup, self).__init__()
self.parser.prog = "couchbase-cli xdcr-setup"
group = self.parser.add_argument_group("XDCR setup options")
group.add_argument("--create", dest="create", action="store_true",
default=False, help="Create an XDCR remote reference")
group.add_argument("--delete", dest="delete", action="store_true",
default=False, help="Delete an XDCR remote reference")
group.add_argument("--edit", dest="edit", action="store_true",
default=False, help="Set the local read-only user")
group.add_argument("--list", dest="list", action="store_true",
default=False, help="List all XDCR remote references")
group.add_argument("--xdcr-cluster-name", dest="name", metavar="<name>",
help="The name for the remote cluster reference")
group.add_argument("--xdcr-hostname", dest="hostname", metavar="<hostname>",
help="The hostname of the remote cluster reference")
group.add_argument("--xdcr-username", dest="r_username", metavar="<username>",
help="The username of the remote cluster reference")
group.add_argument("--xdcr-password", dest="r_password", metavar="<password>",
help="The password of the remote cluster reference")
group.add_argument("--xdcr-user-certificate", dest="r_certificate", metavar="<path>",
help="The user certificate for authentication")
group.add_argument("--xdcr-user-key", dest="r_key", metavar="<path>",
help="The user key for authentication")
group.add_argument("--xdcr-demand-encryption", dest="encrypt", choices=["0", "1"],
action=CBDeprecatedAction, help=SUPPRESS)
group.add_argument("--xdcr-encryption-type", dest="encryption_type", choices=["full", "half"],
metavar="<type>", action=CBDeprecatedAction, help=SUPPRESS)
group.add_argument("--xdcr-certificate", dest="certificate", metavar="<path>",
help="The certificate used for encryption")
group.add_argument("--xdcr-secure-connection", dest="secure_connection", choices=["none", "full", "half"],
metavar="<type>", help="The XDCR secure connection type")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
actions = sum([opts.create, opts.delete, opts.edit, opts.list])
if actions == 0:
_exit_if_errors(["Must specify one of --create, --delete, --edit, --list"])
elif actions > 1:
_exit_if_errors(["The --create, --delete, --edit, --list flags may not be specified at the same time"])
elif opts.create or opts.edit:
self._set(opts)
elif opts.delete:
self._delete(opts)
elif opts.list:
self._list()
def _set(self, opts):
cmd = "create"
if opts.edit:
cmd = "edit"
if opts.name is None:
_exit_if_errors([f'--xdcr-cluster-name is required to {cmd} a cluster connection'])
if opts.hostname is None:
_exit_if_errors([f'--xdcr-hostname is required to {cmd} a cluster connections'])
if opts.username is None:
_exit_if_errors([f'--xdcr-username is required to {cmd} a cluster connections'])
if opts.password is None:
_exit_if_errors([f'--xdcr-password is required to {cmd} a cluster connections'])
if (opts.encrypt is not None or opts.encryption_type is not None) and opts.secure_connection is not None:
_exit_if_errors(["Cannot use deprecated flags --xdcr-demand-encryption or --xdcr-encryption-type with"
" --xdcr-secure-connection"])
if opts.secure_connection == "none":
opts.encrypt = "0"
opts.encryption_type = None
elif opts.secure_connection == "half":
opts.encrypt = "1"
opts.encryption_type = "half"
elif opts.secure_connection == "full":
opts.encrypt = "1"
opts.encryption_type = "full"
elif opts.encrypt is None and opts.encryption_type is None:
opts.encrypt = "0"
opts.encryption_type = None
raw_cert = None
if opts.encrypt == "1":
if opts.encryption_type is None:
opts.encryption_type = "full"
if opts.encryption_type == "full":
if opts.certificate is None:
_exit_if_errors(["certificate required if encryption is demanded"])
raw_cert = _exit_on_file_read_failure(opts.certificate)
raw_user_key = None
if opts.r_key:
raw_user_key = _exit_on_file_read_failure(opts.r_key)
raw_user_cert = None
if opts.r_certificate:
raw_user_cert = _exit_on_file_read_failure(opts.r_certificate)
if opts.create:
_, errors = self.rest.create_xdcr_reference(opts.name, opts.hostname, opts.r_username, opts.r_password,
opts.encrypt, opts.encryption_type, raw_cert, raw_user_cert,
raw_user_key)
_exit_if_errors(errors)
_success("Cluster reference created")
else:
_, errors = self.rest.edit_xdcr_reference(opts.name, opts.hostname, opts.r_username, opts.r_password,
opts.encrypt, opts.encryption_type, raw_cert, raw_user_cert,
raw_user_key)
_exit_if_errors(errors)
_success("Cluster reference edited")
def _delete(self, opts):
if opts.name is None:
_exit_if_errors(["--xdcr-cluster-name is required to deleta a cluster connection"])
_, errors = self.rest.delete_xdcr_reference(opts.name)
_exit_if_errors(errors)
_success("Cluster reference deleted")
def _list(self):
clusters, errors = self.rest.list_xdcr_references()
_exit_if_errors(errors)
for cluster in clusters:
if not cluster["deleted"]:
print(f'cluster name: {cluster['name']}')
print(f' uuid: {cluster['uuid']}')
print(f' host name: {cluster['hostname']}')
print(f' user name: {cluster['username']}')
print(f' uri: {cluster['uri']}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-xdcr-setup" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage XDCR replications"
class EventingFunctionSetup(Subcommand):
"""The Eventing Service Function setup subcommand"""
def __init__(self):
super(EventingFunctionSetup, self).__init__()
self.parser.prog = "couchbase-cli eventing-function-setup"
group = self.parser.add_argument_group("Eventing Service Function setup options")
group.add_argument("--import", dest="_import", action="store_true",
default=False, help="Import functions")
group.add_argument("--export", dest="export", action="store_true",
default=False, help="Export a function")
group.add_argument("--export-all", dest="export_all", action="store_true",
default=False, help="Export all functions")
group.add_argument("--delete", dest="delete", action="store_true",
default=False, help="Delete a function")
group.add_argument("--list", dest="list", action="store_true",
default=False, help="List all functions")
group.add_argument("--deploy", dest="deploy", action="store_true",
default=False, help="Deploy a function")
group.add_argument("--undeploy", dest="undeploy", action="store_true",
default=False, help="Undeploy a function")
group.add_argument("--boundary", dest="boundary", metavar="<from-everything|from-now>",
choices=["from-everything", "from-now"], default=False,
help="Set the function deployment boundary")
group.add_argument("--name", dest="name", metavar="<name>",
default=False, help="The name of the function to take an action on")
group.add_argument("--file", dest="filename", metavar="<file>",
default=False, help="The file to export and import function(s) to and from")
group.add_argument("--pause", dest="pause", action="store_true", help="Pause a function")
group.add_argument("--resume", dest="resume", action="store_true", help="Resume a function")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
# pylint: disable=protected-access
actions = sum([opts._import, opts.export, opts.export_all, opts.delete, opts.list, opts.deploy, opts.undeploy,
opts.pause, opts.resume])
if actions == 0:
_exit_if_errors(["Must specify one of --import, --export, --export-all, --delete, --list, --deploy,"
" --undeploy, --pause, --resume"])
elif actions > 1:
_exit_if_errors(['The --import, --export, --export-all, --delete, --list, --deploy, --undeploy, --pause, '
'--resume flags may not be specified at the same time'])
elif opts._import: # pylint: disable=protected-access
self._import(opts)
elif opts.export:
self._export(opts)
elif opts.export_all:
self._export_all(opts)
elif opts.delete:
self._delete(opts)
elif opts.list:
self._list()
elif opts.deploy:
self._deploy_undeploy(opts, True)
elif opts.undeploy:
self._deploy_undeploy(opts, False)
elif opts.pause:
self._pause_resume(opts, True)
elif opts.resume:
self._pause_resume(opts, False)
def _pause_resume(self, opts, pause):
if not opts.name:
_exit_if_errors([f"Flag --name is required with the {"--pause" if pause else "--resume"} flag"])
_, err = self.rest.pause_resume_function(opts.name, pause)
_exit_if_errors(err)
_success(f"Function was {"paused" if pause else "resumed"}")
def _import(self, opts):
if not opts.filename:
_exit_if_errors(["--file is needed to import functions"])
import_functions = _exit_on_file_read_failure(opts.filename)
import_functions = json.loads(import_functions)
_, errors = self.rest.import_functions(import_functions)
_exit_if_errors(errors)
_success("Events imported")
def _export(self, opts):
if not opts.filename:
_exit_if_errors(["--file is needed to export a function"])
if not opts.name:
_exit_if_errors(["--name is needed to export a function"])
functions, errors = self.rest.export_functions()
_exit_if_errors(errors)
exported_function = None
for function in functions:
if function["appname"] == opts.name:
exported_function = [function]
if not exported_function:
_exit_if_errors([f'Function {opts.name} does not exist'])
_exit_on_file_write_failure(opts.filename, json.dumps(exported_function, separators=(',', ':')))
_success("Function exported to: " + opts.filename)
def _export_all(self, opts):
if not opts.filename:
_exit_if_errors(["--file is needed to export all functions"])
exported_functions, errors = self.rest.export_functions()
_exit_if_errors(errors)
_exit_on_file_write_failure(opts.filename, json.dumps(exported_functions, separators=(',', ':')))
_success(f'All functions exported to: {opts.filename}')
def _delete(self, opts):
if not opts.name:
_exit_if_errors(["--name is needed to delete a function"])
_, errors = self.rest.delete_function(opts.name)
_exit_if_errors(errors)
_success("Request to delete the function was accepted")
def _deploy_undeploy(self, opts, deploy):
if not opts.name:
_exit_if_errors([f"--name is needed to {"deploy" if deploy else "undeploy"} a function"])
if deploy and not opts.boundary:
_exit_if_errors(["--boundary is needed to deploy a function"])
_, errors = self.rest.deploy_undeploy_function(opts.name, deploy, opts.boundary)
_exit_if_errors(errors)
_success(f"Request to {"deploy" if deploy else "undeploy"} the function was accepted")
def _list(self):
functions, errors = self.rest.list_functions()
_exit_if_errors(errors)
for function in functions:
print(function['appname'])
status = ''
if function['settings']['deployment_status']:
status = 'Deployed'
else:
status = 'Undeployed'
print(f' Status: {status}')
print(f' Source Bucket: {function['depcfg']['source_bucket']}')
print(f' Metadata Bucket: {function['depcfg']['metadata_bucket']}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-eventing-function-setup" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage Eventing Service Functions"
class AnalyticsLinkSetup(Subcommand):
"""The analytics link setup subcommand"""
def __init__(self):
super(AnalyticsLinkSetup, self).__init__()
self.parser.prog = "couchbase-cli analytics-link-setup"
group = self.parser.add_argument_group("Analytics Service link setup options")
group.add_argument("--create", dest="create", action="store_true",
default=False, help="Create a link")
group.add_argument("--delete", dest="delete", action="store_true",
default=False, help="Delete a link")
group.add_argument("--edit", dest="edit", action="store_true",
default=False, help="Modify a link")
group.add_argument("--list", dest="list", action="store_true",
default=False, help="List all links")
group.add_argument("--dataverse", dest="dataverse", metavar="<name>",
help="The dataverse of the link (Deprecated)")
group.add_argument("--scope", dest="scope", metavar="<name>",
help="The analytics scope of the link in its canonical form")
group.add_argument("--name", dest="name", metavar="<name>",
help="The name of the link")
group.add_argument("--type", dest="type", metavar="<type>", choices=["couchbase", "s3", "azureblob"],
help="The type of the link ('couchbase', 's3' or 'azureblob')")
group = self.parser.add_argument_group("Analytics Service Couchbase link setup options")
group.add_argument("--hostname", dest="hostname", metavar="<hostname>",
help="The hostname of the link")
group.add_argument("--link-username", dest="link_username", metavar="<username>",
help="The username of the link")
group.add_argument("--link-password", dest="link_password", metavar="<password>",
help="The password of the link")
group.add_argument("--user-certificate", dest="user_certificate", metavar="<path>",
help="The user certificate for authentication")
group.add_argument("--user-key", dest="user_key", metavar="<path>",
help="The user key for authentication")
group.add_argument("--certificate", dest="certificate", metavar="<path>",
help="The certificate used for encryption")
group.add_argument("--encryption", dest="encryption", choices=["none", "full", "half"],
metavar="<type>",
help="The link encryption type ('none', 'full' or 'half')")
group = self.parser.add_argument_group("Analytics Service S3 link setup options")
group.add_argument("--access-key-id", dest="access_key_id", metavar="<id>",
help="The access key ID of the link")
group.add_argument("--secret-access-key", dest="secret_access_key", metavar="<key>",
help="The secret access key of the link")
group.add_argument("--session-token", dest="session_token", metavar="<token>",
help="Temporary credentials session token")
group.add_argument("--region", dest="region", metavar="<region>",
help="The region of the link")
group.add_argument("--service-endpoint", dest="service_endpoint", metavar="<url>",
help="The service endpoint of the link (optional)")
group = self.parser.add_argument_group("Analytics Service Azure Blob link setup options")
group.add_argument("--connection-string", dest="connection_string", metavar="<key>",
help="The connection string of the link")
group.add_argument("--account-name", dest="account_name", metavar="<id>",
help="The account name of the link")
group.add_argument("--account-key", dest="account_key", metavar="<key>",
help="The account key of the link")
group.add_argument("--shared-access-signature", dest="shared_access_signature", metavar="<token>",
help="The shared access signature of the link")
group.add_argument("--blob-endpoint", dest="blob_endpoint", metavar="<url>",
help="The blob endpoint of the link (optional)")
group.add_argument("--endpoint-suffix", dest="endpoint_suffix", metavar="<url>",
help="The endpoint suffix of the link (optional)")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
actions = sum([opts.create, opts.delete, opts.edit, opts.list])
if actions == 0:
_exit_if_errors(["Must specify one of --create, --delete, --edit, --list"])
elif actions > 1:
_exit_if_errors(["The --create, --delete, --edit, --list flags may not be specified at the same time"])
if opts.dataverse:
_deprecated("--dataverse is deprecated, please use --scope instead")
if opts.dataverse and opts.scope:
_exit_if_errors(['Only one of --dataverse and --scope is allowed'])
if opts.create or opts.edit:
self._set(opts)
elif opts.delete:
self._delete(opts)
elif opts.list:
self._list(opts)
def _set(self, opts):
cmd = "create"
if opts.edit:
cmd = "edit"
if opts.dataverse is None and opts.scope is None:
_exit_if_errors([f'--dataverse or --scope is required to {cmd} a link'])
if opts.name is None:
_exit_if_errors([f'--name is required to {cmd} a link'])
if opts.create and opts.type is None:
_exit_if_errors([f'--type is required to {cmd} a link'])
if opts.type == 'azureblob':
if opts.connection_string is None and opts.account_key is None and opts.shared_access_signature is None:
_exit_if_errors(['No authentication parameters provided'])
if opts.connection_string and (opts.account_key or opts.shared_access_signature):
_exit_if_errors(['Only a single authentication method is allowed'])
if opts.account_key and opts.shared_access_signature:
_exit_if_errors(['Only a single authentication method is allowed'])
if opts.dataverse:
opts.scope = opts.dataverse
if opts.certificate:
opts.certificate = _exit_on_file_read_failure(opts.certificate)
if opts.user_key:
opts.user_key = _exit_on_file_read_failure(opts.user_key)
if opts.user_certificate:
opts.user_certificate = _exit_on_file_read_failure(opts.user_certificate)
if opts.create:
_, errors = self.rest.create_analytics_link(opts)
_exit_if_errors(errors)
_success("Link created")
else:
_, errors = self.rest.edit_analytics_link(opts)
_exit_if_errors(errors)
_success("Link edited")
def _delete(self, opts):
if opts.dataverse is None and opts.scope is None:
_exit_if_errors(['--dataverse or --scope is required to delete a link'])
if opts.name is None:
_exit_if_errors(['--name is required to delete a link'])
if opts.dataverse:
opts.scope = opts.dataverse
_, errors = self.rest.delete_analytics_link(opts.scope, opts.name)
_exit_if_errors(errors)
_success("Link deleted")
def _list(self, opts):
if opts.dataverse:
opts.scope = opts.dataverse
clusters, errors = self.rest.list_analytics_links(opts.scope, opts.name, opts.type)
_exit_if_errors(errors)
print(json.dumps(clusters, sort_keys=True, indent=2))
@staticmethod
def get_man_page_name():
return "couchbase-cli-analytics-link-setup" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage Analytics Links"
class UserChangePassword(Subcommand):
"""The change password subcommand"""
def __init__(self):
super(UserChangePassword, self).__init__()
self.parser.prog = "couchbase-cli user-change-password"
group = self.parser.add_argument_group("User password change option")
group.add_argument("--new-password", dest="new_pass", metavar="<password>", required=True,
help="The new password")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.new_pass is None:
_exit_if_errors(["--new-password is required"])
_, rv = self.rest.user_change_passsword(opts.new_pass)
_exit_if_errors(rv)
_success(f'Changed password for {opts.username}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-user-change-password" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Change user password"
class CollectionManage(Subcommand):
"""The collections-manage subcommand"""
def __init__(self):
super(CollectionManage, self).__init__()
self.parser.prog = "couchbase-cli collection-manage"
group = self.parser.add_argument_group("Collection manage option")
group.add_argument("--bucket", dest="bucket", metavar="<bucket>", required=True, help="The bucket to use")
group.add_argument("--create-scope", dest="create_scope", metavar="<scope>", default=None,
help="The name of the scope to make")
group.add_argument("--drop-scope", dest="drop_scope", metavar="<scope>", default=None,
help="The name of the scope to remove")
group.add_argument("--list-scopes", dest="list_scopes", action="store_true", default=None,
help="List all of the scopes in the bucket")
group.add_argument("--create-collection", dest="create_collection", metavar="<collection>", default=None,
help="The path to the collection to make")
group.add_argument("--drop-collection", dest="drop_collection", metavar="<collection>", default=None,
help="The path to the collection to remove")
group.add_argument("--list-collections", dest="list_collections", metavar="<scope_list>", default=None,
const="", nargs='?', help="List all of the collections in the provided scopes. If no scopes "
"are provided it will print all collections")
group.add_argument("--max-ttl", dest="max_ttl", metavar="<seconds>", type=int,
help="Set the maximum TTL the collection will accept")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
cmds = [opts.create_scope, opts.drop_scope, opts.list_scopes, opts.create_collection, opts.drop_collection,
opts.list_collections]
cmd_total = sum(cmd is not None for cmd in cmds)
args = "--create-scope, --drop-scope, --list-scopes, --create-collection, --drop-collection, or " \
"--list-collections"
if cmd_total == 0:
_exit_if_errors([f'Must specify one of the following: {args}'])
elif cmd_total != 1:
_exit_if_errors([f'Only one of the following may be specified: {args}'])
if opts.max_ttl is not None and opts.create_collection is None:
_exit_if_errors(["--max-ttl can only be set with --create-collection"])
if opts.create_scope:
self._create_scope(opts)
if opts.drop_scope:
self._drop_scope(opts)
if opts.list_scopes:
self._list_scopes(opts)
if opts.create_collection:
self._create_collection(opts)
if opts.drop_collection:
self._drop_collection(opts)
if opts.list_collections is not None:
self._list_collections(opts)
def _create_scope(self, opts):
_, errors = self.rest.create_scope(opts.bucket, opts.create_scope)
_exit_if_errors(errors)
_success("Scope created")
def _drop_scope(self, opts):
_, errors = self.rest.drop_scope(opts.bucket, opts.drop_scope)
_exit_if_errors(errors)
_success("Scope dropped")
def _list_scopes(self, opts):
manifest, errors = self.rest.get_manifest(opts.bucket)
_exit_if_errors(errors)
for scope in manifest['scopes']:
print(scope['name'])
def _create_collection(self, opts):
scope, collection = self._get_scope_collection(opts.create_collection)
_, errors = self.rest.create_collection(opts.bucket, scope, collection, opts.max_ttl)
_exit_if_errors(errors)
_success("Collection created")
def _drop_collection(self, opts):
scope, collection = self._get_scope_collection(opts.drop_collection)
_, errors = self.rest.drop_collection(opts.bucket, scope, collection)
_exit_if_errors(errors)
_success("Collection dropped")
def _list_collections(self, opts):
manifest, errors = self.rest.get_manifest(opts.bucket)
_exit_if_errors(errors)
if opts.list_collections == "":
scope_dict = {}
else:
scope_dict = {scope: False for scope in opts.list_collections.split(',')}
if opts.output == 'json':
self._json_list_collections(manifest, scope_dict)
return
for scope in manifest['scopes']:
if len(scope_dict) == 0 or scope['name'] in scope_dict:
if len(scope_dict) > 0:
scope_dict[scope['name']] = True
print(f'Scope {scope['name']}:')
for collection in scope['collections']:
print(f' - {collection['name']}')
if len(scope_dict) > 0:
for scope, found in scope_dict.items():
if not found:
_warning(f'Scope "{scope}" does not exist')
@staticmethod
def _json_list_collections(manifest: Dict[str, Any], scope_dict: Dict[str, bool]):
out = {}
for scope in manifest['scopes']:
if len(scope_dict) == 0 or scope['name'] in scope_dict:
out[scope['name']] = [collection["name"] for collection in scope['collections']]
print(json.dumps(out, indent=4))
def _get_scope_collection(self, path):
scope, collection, err = self.expand_collection_shortcut(path)
if err is not None:
_exit_if_errors([err])
return scope, collection
@staticmethod
def expand_collection_shortcut(path):
parts = path.split('.')
if len(parts) != 2:
return None, None, f'invalid collection path {path}'
parts = ['_default' if x == '' else x for x in parts]
return parts[0], parts[1], None
@staticmethod
def get_man_page_name():
return "couchbase-cli-collection-manage" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage collections in a bucket"
class EnableDeveloperPreview(Subcommand):
""""The enable developer preview command"""
def __init__(self):
super(EnableDeveloperPreview, self).__init__()
self.parser.prog = "couchbase-cli enable-developer-preview"
group = self.parser.add_argument_group("Developer preview option")
group.add_argument('--enable', dest='enable', required=False, action="store_true",
help='Enable developer preview mode in target cluster')
group.add_argument('--list', dest='list', required=False, action="store_true",
help='Check if cluster is in developer preview mode')
@rest_initialiser(version_check=True)
def execute(self, opts):
if not (opts.enable or opts.list):
_exit_if_errors(['--enable or --list must be provided'])
if opts.enable and opts.list:
_exit_if_errors(['cannot provide both --enable and --list'])
if opts.enable:
confirm = input('Developer preview cannot be disabled once it is enabled. '
'If you enter developer preview mode you will not be able to '
'upgrade. DO NOT USE IN PRODUCTION.\nAre you sure [y/n]: ')
if confirm == 'y':
_, errors = self.rest.set_dp_mode()
_exit_if_errors(errors)
_success("Cluster is in developer preview mode")
elif confirm == 'n':
_success("Developer preview mode has NOT been enabled")
else:
_exit_if_errors(["Unknown option provided"])
if opts.list:
pools, rv = self.rest.pools()
_exit_if_errors(rv)
if 'isDeveloperPreview' in pools and pools['isDeveloperPreview']:
print('Cluster is in developer preview mode')
else:
print('Cluster is NOT in developer preview mode')
@staticmethod
def get_man_page_name():
return "couchbase-cli-enable-developer-preview" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Enable developer preview mode in target cluster"
class SettingAlternateAddress(Subcommand):
""""Setting alternate address command"""
def __init__(self):
super(SettingAlternateAddress, self).__init__()
self.parser.prog = "couchbase-cli setting-alternate-address"
group = self.parser.add_argument_group("Configure alternate addresses")
group.add_argument('--set', dest='set', required=False, action="store_true",
help='Set external address configuration for the node')
group.add_argument('--remove', dest='remove', required=False, action="store_true",
help='Remove external address configuration')
group.add_argument('--list', dest='list', required=False, action='store_true',
help='Retrieve current alternate address configuration for all nodes')
group.add_argument('--node', dest='node', metavar="<node>", help="Specify the node to update")
group.add_argument('--hostname', dest='alternate_hostname', metavar="<host>", help='Alternate address')
group.add_argument('--ports', dest='ports', metavar="<ports>",
help="A comma separated list specifying port mappings for the services")
@rest_initialiser(version_check=True)
def execute(self, opts):
flags_used = sum([opts.set, opts.list, opts.remove])
if flags_used != 1:
_exit_if_errors(['Use exactly one of --set, --list or --remove'])
if opts.set or opts.remove:
if not opts.node:
_exit_if_errors(['--node has to be set when using --set or --remove'])
# Alternate address can only be set on the node it self. The opts.cluster
# is updated with the opts.node instead to allow ease of use.
# The node name can have a port number (./cluster_run)
hostname, port = self._get_host_port(opts.node)
url = urllib.parse.urlparse(opts.cluster)
if url.scheme:
scheme = url.scheme
if url.port and not port:
port = url.port
elif not port:
_, old_port = self._get_host_port(opts.cluster)
if old_port:
port = old_port
if scheme:
cluster = f'{scheme}://'
cluster += hostname
if port:
cluster += f':{port}'
opts.cluster = cluster
# override rest client so it uses the node to be altered
self.rest = ClusterManager(opts.cluster, opts.username, opts.password, opts.ssl, opts.ssl_verify,
opts.cacert, opts.debug)
if opts.set:
ports, error = self._parse_ports(opts.ports)
_exit_if_errors(error)
_, error = self.rest.set_alternate_address(opts.alternate_hostname, ports)
_exit_if_errors(error)
if opts.remove:
_, error = self.rest.delete_alternate_address()
_exit_if_errors(error)
_success('Alternate address configuration deleted')
if opts.list:
add, error = self.rest.get_alternate_address()
_exit_if_errors(error)
if opts.output == 'standard':
port_names = set()
for node in add:
if 'alternateAddresses' in node and 'ports' in node['alternateAddresses']['external']:
for port in node['alternateAddresses']['external']['ports'].keys():
port_names.add(port)
print('{:20}{:20}{}'.format('Hostname', 'Alternate Address', 'Ports (Primary/Alternate)'))
print('{:40}'.format(' '), end='')
port_names = sorted(port_names)
for port in port_names:
column_size = len(port) + 1
if column_size < 11:
column_size = 11
print(f'{port:{column_size}}', end='')
print()
for node in add:
if 'alternateAddresses' in node:
# For cluster_run and single node clusters there is no hostname
try:
print(f'{node['hostname']:20}{node['alternateAddresses']['external']['hostname']:20}',
end='')
except KeyError:
host = 'UNKNOWN'
print(f'{host:20}{node['alternateAddresses']['external']['hostname']:20}', end='')
for port in port_names:
column_size = len(port) + 1
if column_size < 11:
column_size = 11
ports = ' '
if port in node['alternateAddresses']['external']['ports']:
ports = f'{str(node['services'][port])}' \
f'/{str(node['alternateAddresses']['external']['ports'][port])}'
print(f'{ports:{column_size}}', end='')
print()
else:
# For cluster_run and single node clusters there is no hostanme
try:
print(f'{node['hostname']}')
except KeyError:
print('UNKNOWN')
else:
print(json.dumps(add))
@staticmethod
def _parse_ports(ports):
if ports is None:
return None, None
port_mappings = ports.split(',')
port_tuple_list = []
for port_value_pair in port_mappings:
p_v = port_value_pair.split('=')
if len(p_v) != 2:
return None, [f'invalid port mapping: {port_value_pair}']
try:
int(p_v[1])
except ValueError:
return None, [f'invalid port mapping: {port_value_pair}']
port_tuple_list.append((p_v[0], p_v[1]))
return port_tuple_list, None
@staticmethod
def _get_host_port(host):
if ']' in host:
host_port = host.split(']:')
if len(host_port) == 2:
return host_port[0] + ']', host_port[1]
return host_port[0], None
else:
host_port = host.split(':')
if len(host_port) == 2:
return host_port[0], host_port[1]
return host_port[0], None
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-alternate-address" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Configure alternate addresses"
class SettingQuery(Subcommand):
""""Command to configure query settings"""
def __init__(self):
super(SettingQuery, self).__init__()
self.parser.prog = "couchbase-cli setting-query"
group = self.parser.add_argument_group("Query service settings")
group.add_argument('--set', dest='set', action="store_true",
help='Set query settings')
group.add_argument('--get', dest='get', action="store_true",
help='Retrieve current query settings')
group.add_argument('--pipeline-batch', metavar='<num>', type=int, default=None,
help='Number of items execution operators can batch.')
group.add_argument('--pipeline-cap', metavar='<num>', type=int, default=None,
help='Maximum number of items each execution operator can buffer.')
group.add_argument('--scan-cap', metavar='<size>', type=int, default=None,
help='Maximum buffer size for index scans.')
group.add_argument('--timeout', metavar='<ms>', type=int, default=None,
help='Server execution timeout.')
group.add_argument('--prepared-limit', metavar='<max>', type=int, default=None,
help='Maximum number of prepared statements.')
group.add_argument('--completed-limit', metavar='<max>', type=int, default=None,
help='Maximum number of completed requests.')
group.add_argument('--completed-threshold', metavar='<ms>', type=int, default=None,
help='Cache completed query lasting longer than this many milliseconds.')
group.add_argument('--log-level', choices=['trace', 'debug', 'info', 'warn', 'error', 'sever', 'none'],
default=None, metavar='<trace|debug|info|warn|error|server|none>',
help='Log level: debug, trace, info, warn, error, severe, none.')
group.add_argument('--max-parallelism', metavar='<max>', type=int, default=None,
help='Maximum parallelism per query.')
group.add_argument('--n1ql-feature-control', metavar='<num>', type=int, default=None,
help='N1QL Feature Controls')
group.add_argument('--temp-dir', metavar='<path>', type=str, default=None,
help='This specifies the directory for temporary query data.')
group.add_argument('--temp-dir-size', metavar='<mebibytes>', type=int, default=None,
help='Specify the maximum size in mebibytes for the temporary query data directory.')
group.add_argument('--cost-based-optimizer', metavar='<1|0>', type=str, default=None,
help='Use cost-based optimizer (Developer Preview).')
group.add_argument('--memory-quota', metavar='<mebibytes>', type=int, default=None,
help='Sets the query memory quota in MiB.')
group.add_argument('--transaction-timeout', metavar='<duration>', type=str, default=None,
help='A duration string for the transaction timeout i.e (100ns, 10ms, 1s, 1m).')
access_list_group = self.parser.add_argument_group('Query curl access settings')
access_list_group.add_argument('--curl-access', choices=['restricted', 'unrestricted'], default=None,
help='Specify either unrestricted or restricted, to determine which URLs are'
' permitted to be accessed by the curl function.')
access_list_group.add_argument('--allowed-urls', metavar='<urls>', type=str, default=None,
help='Comma separated lists of URLs that are allowed to be accessed by the curl'
' function.')
access_list_group.add_argument('--disallowed-urls', metavar='<urls>', type=str, default=None,
help='Comma separated lists of URLs that are disallowed to be accessed by the'
' curl function.')
@rest_initialiser(version_check=True)
def execute(self, opts):
if sum([opts.get, opts.set]) != 1:
_exit_if_errors(['Please provide --set or --get, both can not be provided at the same time'])
if opts.get:
settings, err = self.rest.get_query_settings()
_exit_if_errors(err)
print(json.dumps(settings))
if opts.set:
access_list = self._post_query_access_list(opts)
self._post_query_settings(opts, access_list)
_success('Updated the query settings')
def _post_query_access_list(self, opts) -> bool:
if opts.curl_access != 'restricted' and (opts.allowed_urls is not None or opts.disallowed_urls is not None):
_exit_if_errors(['Can only provide --allowed-urls or --disallowed-urls with --curl-access restricted'])
if opts.curl_access:
allowed = opts.allowed_urls.strip().split(',') if opts.allowed_urls is not None else None
disallowed = opts.disallowed_urls.strip().split(',') if opts.disallowed_urls is not None else None
_, err = self.rest.post_query_curl_access_settings(opts.curl_access == 'restricted', allowed, disallowed)
_exit_if_errors(err)
return True
return False
def _post_query_settings(self, opts, access_list):
if all(v is None for v in [opts.pipeline_batch, opts.pipeline_cap, opts.scan_cap, opts.timeout,
opts.prepared_limit, opts.completed_limit, opts.completed_threshold,
opts.log_level, opts.max_parallelism, opts.n1ql_feature_control, opts.temp_dir,
opts.temp_dir_size, opts.cost_based_optimizer, opts.memory_quota,
opts.transaction_timeout]):
if access_list:
return
_exit_if_errors(['Please provide at least one other option with --set'])
_, err = self.rest.post_query_settings(opts.pipeline_batch, opts.pipeline_cap, opts.scan_cap, opts.timeout,
opts.prepared_limit, opts.completed_limit, opts.completed_threshold,
opts.log_level, opts.max_parallelism, opts.n1ql_feature_control,
opts.temp_dir, opts.temp_dir_size, opts.cost_based_optimizer,
opts.memory_quota, opts.transaction_timeout)
_exit_if_errors(err)
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-query" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage query settings"
class IpFamily(Subcommand):
""""Command to switch between IP family for node to node communication"""
def __init__(self):
super(IpFamily, self).__init__()
self.parser.prog = "couchbase-cli ip-family"
group = self.parser.add_argument_group("IP family options")
group.add_argument('--get', action="store_true", default=False, help='Retrieve current used IP family')
group.add_argument('--set', action="store_true", default=False, help='Change current used IP family')
group.add_argument('--ipv4', dest='ipv4', default=False, action="store_true",
help='Set IP family to IPv4')
group.add_argument('--ipv6', dest='ipv6', default=False, action="store_true",
help='Set IP family to IPv6')
@rest_initialiser(version_check=True)
def execute(self, opts):
flags_used = sum([opts.set, opts.get])
if flags_used == 0:
_exit_if_errors(['Please provide one of --set, or --get'])
elif flags_used > 1:
_exit_if_errors(['Please provide only one of --set, or --get'])
if opts.get:
self._get(self.rest)
if opts.set:
if sum([opts.ipv6, opts.ipv4]) != 1:
_exit_if_errors(['Provided exactly one of --ipv4 or --ipv6 together with the --set option'])
self._set(self.rest, opts.ipv6, opts.ssl)
@staticmethod
def _set(rest, ipv6, ssl):
ip_fam, ip_fam_disable = ('ipv6', 'ipv4') if ipv6 else ('ipv4', 'ipv6')
node_data, err = rest.pools('nodes')
if err and err[0] == '"unknown pool"':
_, err = rest.enable_external_listener(ipfamily=ip_fam)
_exit_if_errors(err)
_, err = rest.setup_net_config(ipfamily=ip_fam)
_exit_if_errors(err)
_, err = rest.disable_unused_external_listeners(ipfamily=ip_fam_disable)
_exit_if_errors(err)
_success('Switched IP family of the cluster')
return
_exit_if_errors(err)
hosts = []
for n in node_data['nodes']:
host = f'http://{n['hostname']}'
if ssl:
addr = host.rsplit(":", 1)[0]
host = f'https://{addr}:{n['ports']['httpsMgmt']}'
_, err = rest.enable_external_listener(host=host, ipfamily=ip_fam)
_exit_if_errors(err)
hosts.append(host)
for h in hosts:
_, err = rest.setup_net_config(host=h, ipfamily=ip_fam)
_exit_if_errors(err)
print(f'Switched IP family for node: {h}')
for h in hosts:
_, err = rest.disable_unused_external_listeners(host=h, ipfamily=ip_fam_disable)
_exit_if_errors(err)
_success('Switched IP family of the cluster')
@staticmethod
def _get(rest):
nodes, err = rest.nodes_info()
_exit_if_errors(err)
fam = {}
for n in nodes:
fam[n['addressFamily']] = True
family = list(fam.keys())
if len(family) == 1:
ipv_fam = 'UNKNOWN'
if family[0] == 'inet' or family[0] == 'inet_tls':
ipv_fam = 'ipv4'
elif family[0] == 'inet6' or family[0] == 'inet6_tls':
ipv_fam = 'ipv6'
print(f'Cluster using {ipv_fam}')
else:
print('Cluster is in mixed mode')
@staticmethod
def get_man_page_name():
return "couchbase-cli-ip-family" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Change or get the address family"
class NodeToNodeEncryption(Subcommand):
""""Command to enable/disable cluster encryption"""
def __init__(self):
super(NodeToNodeEncryption, self).__init__()
self.parser.prog = "couchbase-cli node-to-node-encryption"
group = self.parser.add_argument_group("Node-to-node encryption options")
group.add_argument('--enable', action="store_true", default=False, help='Enable node-to-node encryption')
group.add_argument('--disable', action="store_true", default=False, help='Disable node-to-node encryption')
group.add_argument('--get', action="store_true", default=False,
help='Retrieve current status of node-to-node encryption (on or off)')
@rest_initialiser(version_check=True)
def execute(self, opts):
flags_used = sum([opts.enable, opts.disable, opts.get])
if flags_used == 0:
_exit_if_errors(['Please provide one of --enable, --disable or --get'])
elif flags_used > 1:
_exit_if_errors(['Please provide only one of --enable, --disable or --get'])
if opts.get:
self._get(self.rest)
elif opts.enable:
self._change_encryption(self.rest, 'on', opts.ssl)
elif opts.disable:
self._change_encryption(self.rest, 'off', opts.ssl)
@staticmethod
def _change_encryption(rest, encryption, ssl):
node_data, err = rest.pools('nodes')
encryption_disable = 'off' if encryption == 'on' else 'on'
if err and err[0] == '"unknown pool"':
_, err = rest.enable_external_listener(encryption=encryption)
_exit_if_errors(err)
_, err = rest.setup_net_config(encryption=encryption)
_exit_if_errors(err)
_, err = rest.disable_unused_external_listeners(encryption=encryption_disable)
_exit_if_errors(err)
_success(f'Switched node-to-node encryption {encryption}')
return
_exit_if_errors(err)
hosts = []
for n in node_data['nodes']:
host = f'http://{n['hostname']}'
if ssl:
addr = host.rsplit(":", 1)[0]
host = f'https://{addr}:{n['ports']['httpsMgmt']}'
_, err = rest.enable_external_listener(host=host, encryption=encryption)
_exit_if_errors(err)
hosts.append(host)
for h in hosts:
_, err = rest.setup_net_config(host=h, encryption=encryption)
_exit_if_errors(err)
print(f'Turned {encryption} encryption for node: {h}')
for h in hosts:
_, err = rest.disable_unused_external_listeners(host=h, encryption=encryption_disable)
_exit_if_errors(err)
_success(f'Switched node-to-node encryption {encryption}')
@staticmethod
def _get(rest):
# this will start the correct listeners in all the nodes
nodes, err = rest.nodes_info()
_exit_if_errors(err)
encrypted_nodes = []
unencrpyted_nodes = []
for n in nodes:
if n['nodeEncryption']:
encrypted_nodes.append(n['hostname'])
else:
unencrpyted_nodes.append(n['hostname'])
if len(encrypted_nodes) == len(nodes):
print('Node-to-node encryption is enabled')
elif len(unencrpyted_nodes) == len(nodes):
print('Node-to-node encryption is disabled')
else:
print('Cluster is in mixed mode')
print(f'Nodes with encryption enabled: {encrypted_nodes}')
print(f'Nodes with encryption disabled: {unencrpyted_nodes}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-node-to-node-encryption" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Change or get the cluster encryption configuration"
class SettingRebalance(Subcommand):
"""The rebalance subcommand"""
def __init__(self):
super(SettingRebalance, self).__init__()
self.parser.prog = "couchbase-cli setting-rebalance"
group = self.parser.add_argument_group("Rebalance configuration")
group.add_argument("--set", default=False, action='store_true',
help='Set the automatic rebalance retry settings.')
group.add_argument("--get", default=False, action='store_true',
help='Get the automatic rebalance retry settings.')
group.add_argument('--cancel', default=False, action='store_true',
help='Cancel pending rebalance retry.')
group.add_argument('--moves-per-node', type=int, metavar='<num>',
help='Specify the number of [1-64] vBuckets to move concurrently')
group.add_argument('--pending-info', default=False, action='store_true',
help='Get info for pending rebalance retry.')
group.add_argument("--enable", metavar="<1|0>", choices=["1", "0"],
help="Enable or disable automatic rebalance retry")
group.add_argument("--wait-for", metavar="<sec>", type=int,
help="Specify the time to wat before retrying the rebalance [5-3600] seconds.")
group.add_argument("--max-attempts", metavar="<num>", type=int,
help="Maximum number of rebalance retires [1-3].")
group.add_argument('--rebalance-id', metavar='<id>',
help='Specify the id of the failed rebalance to cancel the retry.')
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if sum([opts.set, opts.get, opts.cancel, opts.pending_info]) != 1:
_exit_if_errors(['Provide either --set, --get, --cancel or --pending-info'])
if opts.get:
settings, err = self.rest.get_settings_rebalance()
_exit_if_errors(err)
if self.enterprise:
retry_settings, err = self.rest.get_settings_rebalance_retry()
_exit_if_errors(err)
settings.update(retry_settings)
if opts.output == 'json':
print(json.dumps(settings))
else:
if self.enterprise:
print(f'Automatic rebalance retry {'enabled' if settings['enabled'] else 'disabled'}')
print(f'Retry wait time: {settings['afterTimePeriod']}')
print(f'Maximum number of retries: {settings['maxAttempts']}')
print(f'Maximum number of vBucket move per node: {settings['rebalanceMovesPerNode']}')
elif opts.set:
if (not self.enterprise and (opts.enable is not None or opts.wait_for is not None
or opts.max_attempts is not None)):
_exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"])
if opts.enable == '1':
opts.enable = 'true'
else:
opts.enable = 'false'
if opts.wait_for is not None and (opts.wait_for < 5 or opts.wait_for > 3600):
_exit_if_errors(['--wait-for must be a value between 5 and 3600'])
if opts.max_attempts is not None and (opts.max_attempts < 1 or opts.max_attempts > 3):
_exit_if_errors(['--max-attempts must be a value between 1 and 3'])
if self.enterprise:
_, err = self.rest.set_settings_rebalance_retry(opts.enable, opts.wait_for, opts.max_attempts)
_exit_if_errors(err)
if opts.moves_per_node is not None:
if not 1 <= opts.moves_per_node <= 64:
_exit_if_errors(['--moves-per-node must be a value between 1 and 64'])
_, err = self.rest.set_settings_rebalance(opts.moves_per_node)
_exit_if_errors(err)
_success('Rebalance settings updated')
elif opts.cancel and not self.enterprise:
_exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"])
if opts.rebalance_id is None:
_exit_if_errors(['Provide the failed rebalance id using --rebalance-id <id>'])
_, err = self.rest.cancel_rebalance_retry(opts.rebalance_id)
_exit_if_errors(err)
_success('Rebalance retry canceled')
else:
if not self.enterprise:
_exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"])
rebalance_info, err = self.rest.get_rebalance_info()
_exit_if_errors(err)
print(json.dumps(rebalance_info))
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-rebalance" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Configure automatic rebalance settings"
class BackupService(Subcommand):
"""BackupService class is a subcommand that will contain other commands to configure the service as well as manage
it. This approach attempts to make the interface more intuitive by keeping a hierarchical structure where the
service can have all its options under one command instead of having multiple completely separate commands (e.g
settings-backups, manage-backups and repository-setup-backup.)
The idea is that the interface will look like:
couchbase-cli backup-service [settings | plans | repositories | cloud-credentials] where each element in [] is a
subcommand to manage those options for that part of the backup service. As such if the user is not sure of what they
want to do they can always do couchbase-cli backup-service -h to get a top level details and then move down the
hierarchy to a more concrete option.
"""
def __init__(self):
super(BackupService, self).__init__()
self.parser.prog = "couchbase-cli backup-service"
self.subparser = self.parser.add_subparsers(help='Sub command help', dest='sub_cmd', metavar='<subcommand>')
self.settings_cmd = BackupServiceSettings(self.subparser)
self.repository_cmd = BackupServiceRepository(self.subparser)
self.plan_cmd = BackupServicePlan(self.subparser)
def execute(self, opts):
if opts.sub_cmd is None or opts.sub_cmd not in ['settings', 'repository', 'plan']:
_exit_if_errors(['<subcommand> must be one off [settings, repository, plan]'])
if opts.sub_cmd == 'settings':
self.settings_cmd.execute(opts)
elif opts.sub_cmd == 'repository':
self.repository_cmd.execute(opts)
elif opts.sub_cmd == 'plan':
self.plan_cmd.execute(opts)
@staticmethod
def get_man_page_name():
return 'couchbase-cli-backup-service' + '.1' if os.name != 'nt' else '.html'
@staticmethod
def get_description():
return "Manage the backup service"
class BackupServiceSettings:
"""Backup service settings is a nested command and manages the backup service settings"""
def __init__(self, subparser):
self.rest = None
setting_parser = subparser.add_parser('settings', help='Manage backup service settings', add_help=False,
allow_abbrev=False)
group = setting_parser.add_argument_group('Backup service settings options')
group.add_argument('--get', action='store_true', help='Get current backup service configuration')
group.add_argument('--set', action='store_true', help='Change the service configuration')
group.add_argument('--history-rotation-period', dest='rotation_period', type=int, metavar='<days>',
help='The number of days after which the task history should be rotated')
group.add_argument('--history-rotation-size', dest='rotation_size', type=int, metavar='<mebibytes>',
help='The size in MiB at which to rotate the task history')
group.add_argument("-h", "--help", action=CBHelpAction, klass=self,
help="Prints the short or long help message")
@rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True)
def execute(self, opts):
if sum([opts.get, opts.set]) != 1:
_exit_if_errors(['Must use one and only one of [--get, --set]'])
if opts.get:
self._get(opts)
if opts.set:
self._set(opts)
def _get(self, opts):
config, err = self.rest.get_backup_service_settings()
_exit_if_errors(err)
if opts.output == 'json':
print(json.dumps(config, indent=4))
else:
print('-- Backup service configuration --')
size = config['history_rotation_size'] if 'history_rotation_size' in config else 'N/A'
period = config['history_rotation_period'] if 'history_rotation_period' in config else 'N/A'
print(f'History rotation size: {size} MiB')
print(
f'History rotation period: {period} days')
def _set(self, opts):
if opts.rotation_period is None and opts.rotation_size is None:
_exit_if_errors(['At least one of --history-rotation-period or --history-rotation-size is required'])
_, err = self.rest.patch_backup_service_settings(opts.rotation_period, opts.rotation_size)
_exit_if_errors(err)
_success('Backup service settings patched')
@staticmethod
def get_man_page_name():
return 'couchbase-cli-backup-service-settings' + '.1' if os.name != 'nt' else '.html'
@staticmethod
def get_description():
return 'Manage backup service settings'
class BackupServiceRepository:
"""This command manages backup services repositories.
Things this command can do is:
- List repositories
- Get repository
- Add repository
- Archive repository
- Import repository
- Delete repository
"""
def __init__(self, subparser):
"""setup the parser"""
self.rest = None
repository_parser = subparser.add_parser('repository', help='Manage backup repositories', add_help=False,
allow_abbrev=False)
# action flags are mutually exclusive
action_group = repository_parser.add_mutually_exclusive_group(required=True)
action_group.add_argument('--list', action='store_true', help='Get all repositories')
action_group.add_argument('--get', action='store_true', help='Get repository by id')
action_group.add_argument('--archive', action='store_true', help='Archive a repository')
action_group.add_argument('--add', action='store_true', help='Add a new active repository')
action_group.add_argument('--remove', action='store_true', help='Remove an archived/imported repository')
action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self,
help="Prints the short or long help message")
# other arguments
group = repository_parser.add_argument_group('Backup service repository configuration')
group.add_argument('--id', metavar='<id>', help='The repository id')
group.add_argument('--new-id', metavar='<id>', help='The new repository id')
group.add_argument('--state', metavar='<state>', choices=['active', 'archived', 'imported'],
help='The repository state.')
group.add_argument('--plan', metavar='<plan_name>', help='The plan to use as base for the repository')
group.add_argument('--backup-archive', metavar='<archive>', help='The location to store the backups in')
group.add_argument('--bucket-name', metavar='<name>', help='The bucket to backup')
group.add_argument('--remove-data', action='store_true', help='Used to delete the repository data')
# the cloud arguments are given the own group so that the short help is a bit more readable
cloud_group = repository_parser.add_argument_group('Backup repository cloud arguments')
cloud_group.add_argument('--cloud-credentials-name', metavar='<name>',
help='The stored clouds credential name to use for the new repository')
cloud_group.add_argument('--cloud-staging-dir', metavar='<path>', help='The path to the staging directory')
cloud_group.add_argument('--cloud-credentials-id', metavar='<id>',
help='The ID to use to communicate with the object store')
cloud_group.add_argument('--cloud-credentials-key', metavar='<key>',
help='The key to use to communicate with the object store')
cloud_group.add_argument('--cloud-credentials-region', metavar='<region>',
help='The region for the object store')
cloud_group.add_argument('--cloud-endpoint', metavar='<endpoint>',
help='Overrides the default endpoint used to communicate with the cloud provider. '
'Use for object store compatible third party solutions')
cloud_group.add_argument('--s3-force-path-style', action='store_true',
help='When using S3 or S3 compatible storage it will use the old path style.')
@rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True)
def execute(self, opts):
"""Run the backup-service repository subcommand"""
if opts.list:
self.list_repositories(opts.state, opts.output == 'json')
elif opts.get:
self.get_repository(opts.id, opts.state, opts.output == 'json')
elif opts.archive:
self.archive_repository(opts.id, opts.new_id)
elif opts.remove:
self.remove_repository(opts.id, opts.state, opts.remove_data)
elif opts.add:
self.add_active_repository(opts.id, opts.plan, opts.backup_archive, bucket_name=opts.bucket_name,
credentials_name=opts.cloud_credentials_name,
credentials_id=opts.cloud_credentials_id,
credentials_key=opts.cloud_credentials_key,
cloud_region=opts.cloud_credentials_region, staging_dir=opts.cloud_staging_dir,
cloud_endpoint=opts.cloud_endpoint, s3_path_style=opts.s3_force_path_style)
def remove_repository(self, repository_id: str, state: str, delete_repo: bool = False):
"""Removes the repository in state 'state' and with id 'repository_id'
Args:
repository_id (str): The repository id
state (str): It must be either archived or imported otherwise it will return an error
delete_repo (bool): Whether or not the backup repository should be deleted
"""
if not repository_id:
_exit_if_errors(['--id is required'])
# the following is devided in two options to give better error messages depending if state is missing or if it
# is invalid
if not state:
_exit_if_errors(['--state is required'])
if state not in ['archived', 'imported']:
_exit_if_errors(['can only delete archived or imported repositories to delete an active repository it needs to '
'be archived first'])
# can only delete repo of archived repositories
if delete_repo and state == 'imported':
_exit_if_errors(['cannot delete the repository for an imported repository'])
_, errors = self.rest.delete_backup_repository(repository_id, state, delete_repo)
_exit_if_errors(errors)
_success('Repository was deleted')
def add_active_repository(self, repository_id: str, plan: str, archive: str, **kwargs):
"""Adds a new active repository identified by 'repository_id' and that uses 'plan' as base.
Args:
repository_id (str): The ID to give to the repository. This must be unique, if it is not an error will be
returned.
plan (str): The name of the plan to use as base for the repository. If it does not exist the service
will return an error.
archive (str): The location to store the data in. It must be accessible by all nodes. To use S3 instead of
providing a path to a filesystem directory use the syntax.
s3://<bucket-name>/<optional_prefix>/<archive>
**kwargs: Optional parameters [bucket_name, credentials_name, credentials_id, credentials_key, cloud_region,
staging_dir, cloud_endpoint, s3_path_style]
"""
if not repository_id:
_exit_if_errors(['--id is required'])
if not plan:
_exit_if_errors(['--plan is required'])
if not archive:
_exit_if_errors(['--backup-archive is required'])
_exit_if_errors(self.check_cloud_params(archive, **kwargs))
add_request_body = {
'plan': plan,
'archive': archive,
}
if kwargs.get('bucket_name', False):
add_request_body['bucket_name'] = kwargs.get('bucket_name')
if kwargs.get('credentials_name', False):
add_request_body['cloud_credential_name'] = kwargs.get('credentials_name')
if kwargs.get('credentials_id', False):
add_request_body['cloud_credentials_id'] = kwargs.get('credentials_id')
if kwargs.get('credentials_key', False):
add_request_body['cloud_credentials_key'] = kwargs.get('credentials_key')
if kwargs.get('cloud_region', False):
add_request_body['cloud_credentials_region'] = kwargs.get('cloud_region')
if kwargs.get('cloud_endpoint', False):
add_request_body['cloud_endpoint'] = kwargs.get('cloud_endpoint')
if kwargs.get('s3_path_style', False):
add_request_body['cloud_force_path_style'] = kwargs.get('s3_path_style')
_, errors = self.rest.add_backup_active_repository(repository_id, add_request_body)
_exit_if_errors(errors)
_success('Added repository')
@staticmethod
def check_cloud_params(archive: str, **kwargs) -> Optional[List[str]]:
"""Checks that inside kwargs there is a valid set of parameters to add a cloud repository
Args:
archive (str): The archive to use for the repository.
"""
# If not an s3 archive skip this
if not archive.startswith('s3://'):
return None
creds_name = kwargs.get('credentials_name')
region = kwargs.get('cloud_region')
creds_id = kwargs.get('credentials_id')
creds_key = kwargs.get('credentials_key')
staging_dir = kwargs.get('staging_dir')
if (creds_name and (creds_id or creds_key)) or (not creds_name and not (creds_id or creds_key)):
return ['must provide either --cloud-credentials-name or --cloud-credentials-key and '
'--cloud-credentials-id']
if not staging_dir:
return ['--cloud-staging-dir is required']
if not creds_name and not region:
return ['--cloud-credentials-region is required']
return None
def archive_repository(self, repository_id, new_id):
"""Archive an repository. The archived repository will have the id `new_id`
Args:
repository_id (str): The active repository ID to be archived
new_id (str): The id that will be given to the archived repository
"""
if not repository_id:
_exit_if_errors(['--id is required'])
if not new_id:
_exit_if_errors(['--new-id is required'])
_, errors = self.rest.archive_backup_repository(repository_id, new_id)
_exit_if_errors(errors)
_success('Archived repository')
def list_repositories(self, state=None, json_out=False):
"""List the backup repositories.
If a repository state is given only repositories in that state will be listed. This command supports listing both in
json and human friendly format.
Args:
state (str, optional): One of ['active', 'imported', 'archived']. The repository on this state will be
retrieved.
json_out (bool): If True the output will be JSON otherwise it will be a human friendly format.
"""
states = ['active', 'archived', 'imported'] if state is None else [state]
results = {}
for get_state in states:
repositories, errors = self.rest.get_backup_service_repositories(state=get_state)
_exit_if_errors(errors)
results[get_state] = repositories
if json_out:
print(json.dumps(results, indent=2))
else:
self.human_friendly_print_repositories(results)
def get_repository(self, repository_id, state, json_out=False):
"""Retrieves one repository from the backup service
If the repository does not exist an error will be returned
Args:
repository_id (str): The repository id to be retrieved
state (str): The state of the repository to retrieve
json_out (bool): If True the output will be JSON otherwise it will be a human friendly format.
"""
if not repository_id:
_exit_if_errors(['--id is required'])
if not state:
_exit_if_errors(['--state is required'])
repository, errors = self.rest.get_backup_service_repository(repository_id, state)
_exit_if_errors(errors)
if json_out:
print(json.dumps(repository, indent=2))
else:
self.human_firendly_print_repository(repository)
@staticmethod
def human_firendly_print_repository(repository):
"""Print the repository in a human friendly format
Args:
repository (obj): The backup repository information
"""
print(f'ID: {repository['id']}')
print(f'State: {repository['state']}')
print(f'Healthy: {(not ('health' in repository and not repository['health']['healthy']))!s}')
print(f'Archive: {repository['archive']}')
print(f'Repository: {repository['repo']}')
if 'bucket' in repository:
print(f'Bucket: {repository['bucket']['name']}')
if 'plan_name' in repository and repository['plan_name'] != "":
print(f'plan: {repository['plan_name']}')
print(f'Creation time: {repository['creation_time']}')
if 'scheduled' in repository and repository['scheduled']:
print()
BackupServiceRepository.human_firendly_print_repository_scheduled_tasks(repository['scheduled'])
one_off = repository['running_one_off'] if 'running_one_off' in repository else None
running_scheduled = repository['running_tasks'] if 'running_tasks' in repository else None
if one_off or running_scheduled:
print()
BackupServiceRepository.human_friendly_print_running_tasks(one_off, running_scheduled)
@staticmethod
def human_friendly_print_running_tasks(one_off, scheduled):
"""Prints the running task summary in a human friendly way
Args:
one_off (map<str, task object>): Running one off tasks
scheduled (map<str, task object>): Running scheduled tasks
"""
all_vals = []
name_pad = 5
if one_off:
for name in one_off:
if len(name) > name_pad:
name_pad = len(name)
all_vals += one_off.values()
if scheduled:
for name in scheduled:
if len(name) > name_pad:
name_pad = len(name)
all_vals += scheduled.values()
name_pad += 1
header = f'{'Name':<{name_pad}}| Task type | Status | Start'
print(header)
print('-' * (len(header) + 5))
for task in all_vals:
print(f'{task['name']:<{name_pad}}| {task['type'].title():<10}| {task['status']:<8} | {task['start']}')
@staticmethod
def human_firendly_print_repository_scheduled_tasks(scheduled):
"""Print the scheduled task in a tabular format"""
name_pad = 5
for name in scheduled:
if len(name) > name_pad:
name_pad = len(name)
name_pad += 1
header = f'{'Name':<{name_pad}}| Task type | Next run'
print('Scheduled tasks:')
print(header)
print('-' * (len(header) + 5))
for task in scheduled.values():
print(f'{task['name']:<{name_pad}}| {task['task_type'].title():<10}| {task['next_run']}')
@staticmethod
def human_friendly_print_repositories(repositories_map):
"""This will print the repositories in a tabular format
Args:
repository_map (map<state (str), repository (list of objects)>)
"""
repository_count = 0
id_pad = 5
plan_pad = 7
for repositories in repositories_map.values():
for repository in repositories:
repository_count += 1
if id_pad < len(repository['id']):
id_pad = len(repository['id'])
if 'plan_name' in repository and plan_pad < len(repository['plan_name']):
plan_pad = len(repository['plan_name'])
if repository_count == 0:
print('No repositories found')
return
# Get an extra space between the the information and the column separator
plan_pad += 1
id_pad += 1
# build header
header = f'{'ID':<{id_pad}}| {'State':<9}| {'plan':<{plan_pad}}| Healthy | Repository'
print(header)
print('-' * len(header))
# print repository summary
for _, repositories in sorted(repositories_map.items()):
for repository in repositories:
healthy = not ('health' in repository and not repository['health']['healthy'])
# archived and imported repositories may not have plans so we have to replace the empty string with N/A
plan_name = 'N/A'
if 'plan_name' in repository and len(repository['plan_name']) != 0:
plan_name = repository['plan_name']
print(f"{repository["id"]:<{id_pad}}| {repository["state"]:<9}| {plan_name:<{plan_pad}}| "
f" {healthy!s:<7}| {repository["repo"]}")
@staticmethod
def get_man_page_name():
return 'couchbase-cli-backup-service-repository' + '.1' if os.name != 'nt' else '.html'
@staticmethod
def get_description():
return 'Manage backup service repositories'
class BackupServicePlan:
"""This command manages backup services plans.
Things this command can do is:
- List plans
- Add delete
- Delete plans
"""
def __init__(self, subparser):
"""setup the parser"""
self.rest = None
plan_parser = subparser.add_parser('plan', help='Manage backup plans', add_help=False,
allow_abbrev=False)
# action flags are mutually exclusive
action_group = plan_parser.add_mutually_exclusive_group(required=True)
action_group.add_argument('--list', action='store_true', help='List all available backup plans')
action_group.add_argument('--get', action='store_true', help='Get a plan by name')
action_group.add_argument('--remove', action='store_true', help='Remove a plan by name')
action_group.add_argument('--add', action='store_true', help='Add a new plan')
action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self,
help="Prints the short or long help message")
options = plan_parser.add_argument_group('Plan options')
options.add_argument('--name', metavar='<name>', help='Plan name')
options.add_argument('--description', metavar='<description>', help='Optional description')
options.add_argument('--services', metavar='<services>', help='A comma separated list of services to backup')
options.add_argument('--task', metavar='<tasks>', nargs='+', help='JSON task definition')
@rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True)
def execute(self, opts):
"""Run the backup plan managment command"""
if opts.list:
self.list_plans(opts.output == 'json')
elif opts.get:
self.get_plan(opts.name, opts.output == 'json')
elif opts.remove:
self.remove_plan(opts.name)
elif opts.add:
self.add_plan(opts.name, opts.services, opts.task, opts.description)
def add_plan(self, name: str, services: Optional[str], tasks: Optional[List[str]], description: Optional[str]):
"""Add a new backup plan
The validation of the inputs in the CLI is intentionally lacking as this is offloaded to the backup service.
Args:
name (str): The name to give the new plan. It must be unique.
services (optional list): A list of services to backup if empty all services are backed up.
tasks (optional list): A list of JSON strings representing the tasks to be run.
description (optional str): A optional description string.
"""
if not name:
_exit_if_errors(['--name is required'])
service_list = []
if services:
service_list = [service.strip() for service in services.split(',')]
tasks_objects = []
if tasks:
for task_str in tasks:
try:
task = json.loads(task_str)
tasks_objects.append(task)
except json.decoder.JSONDecodeError as json_error:
_exit_if_errors([f'invalid task {json_error!s}'])
plan = {}
if service_list:
plan['services'] = service_list
if tasks_objects:
plan['tasks'] = tasks_objects
if description:
plan['description'] = description
_, errors = self.rest.add_backup_plan(name, plan)
_exit_if_errors(errors)
_success('Added plan')
def remove_plan(self, name: str):
"""Removes a plan by name"""
if not name:
_exit_if_errors(['--name is required'])
_, errors = self.rest.delete_backup_plan(name)
_exit_if_errors(errors)
_success('Plan removed')
def get_plan(self, name: str, json_output: bool = False):
"""Gets a backup plan by name
Args:
name (str): The name of the plan to retrieve
json_output (bool): Whether to print in JSON or a more human friendly way
"""
if not name:
_exit_if_errors(['--name is required'])
plan, errors = self.rest.get_backup_plan(name)
_exit_if_errors(errors)
if json_output:
print(json.dumps(plan, indent=2))
else:
self.human_print_plan(plan)
def list_plans(self, json_output: bool = False):
"""Prints all the plans stored in the backup service
Args:
json_output (bool): Whether to print in JSON or a more human friendly way
"""
plans, errors = self.rest.list_backup_plans()
_exit_if_errors(errors)
if json_output:
print(json.dumps(plans, indent=2))
else:
self.human_print_plans(plans)
@staticmethod
def human_print_plan(plan: object):
"""Prints the plan in a human friendly way"""
print(f'Name: {plan['name']}')
print(f'Description: {plan['description'] if 'description' in plan else 'N/A'}')
print(f'Services: {BackupServicePlan.service_list_to_str(plan['services'])}')
print(f'Default: {(plan['default'] if 'deafult' in plan else False)!s}')
# If the are no tasks return
if not plan["tasks"]:
return
print()
print('Tasks:')
task_name_pad = 5
schedule_pad = 10
for task in plan['tasks']:
if len(task['name']) > task_name_pad:
task_name_pad = len(task['name'])
task['schedule_str'] = BackupServicePlan.format_schedule(task['schedule'])
if len(task['schedule_str']) > schedule_pad:
schedule_pad = len(task['schedule_str'])
task_name_pad += 1
schedule_pad += 1
header = f'{'Name':<{task_name_pad}} | {'Schedule':<{schedule_pad}} | Options'
print(header)
print('-' * (len(header) + 5))
for task in plan['tasks']:
options = BackupServicePlan.format_options(task)
print(f'{task['name']:<{task_name_pad}} | {task['schedule_str']:<{schedule_pad}} | {options}')
@staticmethod
def format_options(task: object) -> str:
"""Format the full backup or merge options"""
options = 'N/A'
if task['task_type'] == 'BACKUP' and task['full_backup']:
options = 'Full backup'
elif task['task_type'] == 'MERGE':
if 'merge_options' in task:
options = (f'Merge from {task['merge_options']['offset_start']} to '
f'{task['merge_options']['offset_end']}')
else:
options = 'Merge everything'
return options
@staticmethod
def format_schedule(schedule: object) -> str:
"""Format the schedule object in a string of the format <task> every <frequency>? <period> (at <time>)?"""
task_start = f'{schedule['job_type'].lower()}'
frequency_part = 'every'
if schedule['frequency'] == 1:
period = schedule["period"].lower()
period = period if period[-1] != 's' else period[:-1]
frequency_part += f' {period}'
else:
frequency_part += f' {schedule['frequency']} {schedule['period'].lower()}'
time_part = f' at {schedule['time']}' if 'time' in schedule else ''
return f'{task_start} {frequency_part}{time_part}'
@staticmethod
def human_print_plans(plans: List[Any]):
"""Prints a table with an overview of each plan"""
# if plans is empty or none print no plans message
if not plans:
print('No plans')
return
name_pad = 5
service_pad = 8
for plan in plans:
if len(plan['name']) > name_pad:
name_pad = len(plan['name'])
services_str = BackupServicePlan.service_list_to_str(plan['services'])
if len(services_str) > service_pad:
service_pad = len(services_str)
name_pad += 1
service_pad += 1
header = f'{'Name':<{name_pad}} | # Tasks | {'Services':<{service_pad}} | Default'
print(header)
print('-' * (len(header) + 5))
for plan in plans:
task_len = len(plan['tasks']) if 'tasks' in plan and plan['tasks'] else 0
print(f'{plan['name']:<{name_pad}} | {task_len:<7} | '
f'{BackupServicePlan.service_list_to_str(plan['services']):<{service_pad}} | '
f'{(plan['default'] if 'default' in plan else False)!s}')
@staticmethod
def service_list_to_str(services: Optional[List[Any]]) -> str:
"""convert the list of services to a concise list of services"""
if not services:
return 'all'
# a way to convert codenames to visible name
convert = {'gsi': 'Indexing', 'cbas': 'Analytics', 'ft': 'Full Text Search'}
return ', '.join([convert[service] if service in convert else service.title() for service in services])
@staticmethod
def get_man_page_name():
return 'couchbase-cli-backup-service-plan' + '.1' if os.name != 'nt' else '.html'
@staticmethod
def get_description():
return 'Manage backup service plans'
| """A Couchbase CLI subcommand"""
import getpass
import inspect
import ipaddress
import json
import os
import platform
import random
import re
import string
import subprocess
import sys
import urllib.parse
import tempfile
import time
from typing import Optional, List, Any, Dict
from argparse import ArgumentError, ArgumentParser, HelpFormatter, Action, SUPPRESS
from operator import itemgetter
from cluster_manager import ClusterManager
from pbar import TopologyProgressBar
try:
from cb_version import VERSION # pylint: disable=import-error
except ImportError:
VERSION = "0.0.0-0000-community"
print(f'WARNING: Could not import cb_version, setting VERSION to {VERSION}')
COUCHBASE_DEFAULT_PORT = 8091
BUCKET_PRIORITY_HIGH_INT = 8
BUCKET_PRIORITY_HIGH_STR = "high"
BUCKET_PRIORITY_LOW_INT = 3
BUCKET_PRIORITY_LOW_STR = "low"
BUCKET_TYPE_COUCHBASE = "membase"
BUCKET_TYPE_MEMCACHED = "memcached"
CB_BIN_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "bin"))
CB_ETC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "etc", "couchbase"))
CB_LIB_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "lib"))
# On MacOS the config is store in the users home directory
if platform.system() == "Darwin":
CB_CFG_PATH = os.path.expanduser("~/Library/Application Support/Couchbase/var/lib/couchbase")
else:
CB_CFG_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "var", "lib", "couchbase"))
CB_MAN_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "share"))
if os.name == "nt":
CB_MAN_PATH = os.path.join(CB_MAN_PATH, "html")
else:
CB_MAN_PATH = os.path.join(CB_MAN_PATH, "man", "man1")
def remove_prefix(val: str, prefix: str) -> str:
"""This function removes a prefix from a string.
Note this is a built-in function in Python 3.9 once we upgrade to it we should use it instead.
"""
return val[len(prefix):] if val.startswith(prefix) else val
def rest_initialiser(cluster_init_check=False, version_check=False, enterprise_check=None):
"""rest_initialiser is a decorator that does common subcommand tasks.
The decorator will always creates a cluster manager and assign it to the subcommand variable rest
:param cluster_init_check: if true it will check if the cluster is initialized before executing the subcommand
:param version_check: if true it will check if the cluster and CLI version match if they do not it prints a warning
:param enterprise_check: if true it will check if the cluster is enterprise and fail if not. If it is false it does
the check but it does not fail if not enterprise. If none it does not perform the check. The result of the check
is stored on the instance parameter enterprise
"""
def inner(fn):
def decorator(self, opts):
self.rest = ClusterManager(opts.cluster, opts.username, opts.password, opts.ssl, opts.ssl_verify,
opts.cacert, opts.debug)
if cluster_init_check:
check_cluster_initialized(self.rest)
if version_check:
check_versions(self.rest)
if enterprise_check is not None:
enterprise, errors = self.rest.is_enterprise()
_exit_if_errors(errors)
if enterprise_check and not enterprise:
_exit_if_errors(['Command only available in enterprise edition'])
self.enterprise = enterprise
return fn(self, opts)
return decorator
return inner
def check_cluster_initialized(rest):
initialized, errors = rest.is_cluster_initialized()
if errors:
_exit_if_errors(errors)
if not initialized:
_exit_if_errors(["Cluster is not initialized, use cluster-init to initialize the cluster"])
def check_versions(rest):
result, errors = rest.pools()
if errors:
return
server_version = result['implementationVersion']
if server_version is None or VERSION is None:
return
major_couch = server_version[: server_version.index('.')]
minor_couch = server_version[server_version.index('.') + 1: server_version.index('.', len(major_couch) + 1)]
major_cli = VERSION[: VERSION.index('.')]
minor_cli = VERSION[VERSION.index('.') + 1: VERSION.index('.', len(major_cli) + 1)]
if major_cli != major_couch or minor_cli != minor_couch:
_warning(f'couchbase-cli version {VERSION} does not match couchbase server version {server_version}')
def index_storage_mode_to_param(value, default="plasma"):
"""Converts the index storage mode to what Couchbase understands"""
if value == "default":
return default
if value == "memopt":
return "memory_optimized"
return value
def process_services(services, enterprise):
"""Converts services to a format Couchbase understands"""
sep = ","
if services.find(sep) < 0:
# backward compatible when using ";" as separator
sep = ";"
svc_set = set([w.strip() for w in services.split(sep)])
svc_candidate = ["data", "index", "query", "fts", "eventing", "analytics", "backup"]
for svc in svc_set:
if svc not in svc_candidate:
return None, [f'`{svc}` is not a valid service']
if not enterprise and svc in ["eventing", "analytics", "backup"]:
return None, [f'{svc} service is only available on Enterprise Edition']
if not enterprise:
# Valid CE node service configuration
ce_svc_30 = set(["data"])
ce_svc_40 = set(["data", "index", "query"])
ce_svc_45 = set(["data", "index", "query", "fts"])
if svc_set not in [ce_svc_30, ce_svc_40, ce_svc_45]:
return None, [f"Invalid service configuration. Community Edition only supports nodes with the following"
f" combinations of services: '{''.join(ce_svc_30)}', '{','.join(ce_svc_40)}' or "
f"'{','.join(ce_svc_45)}'"]
services = ",".join(svc_set)
for old, new in [[";", ","], ["data", "kv"], ["query", "n1ql"], ["analytics", "cbas"]]:
services = services.replace(old, new)
return services, None
def find_subcommands():
"""Finds all subcommand classes"""
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
subclasses = [cls for cls in clsmembers if issubclass(cls[1], (Subcommand, LocalSubcommand))
and cls[1] not in [Subcommand, LocalSubcommand]]
subcommands = []
for subclass in subclasses:
name = '-'.join([part.lower() for part in re.findall('[A-Z][a-z]*', subclass[0])])
subcommands.append((name, subclass[1]))
return subcommands
def _success(msg):
print(f'SUCCESS: {msg}')
def _deprecated(msg):
print(f'DEPRECATED: {msg}')
def _warning(msg):
print(f'WARNING: {msg}')
def _exit_if_errors(errors):
if errors:
for error in errors:
# Some endpoint return errors prefixed with '_ -' this has to be stripped out. For more information see
# MB-42801
print(f'ERROR: {remove_prefix(error, "_ -").lstrip(" ")}')
sys.exit(1)
def _exit_on_file_write_failure(fname, to_write):
try:
wfile = open(fname, 'w')
wfile.write(to_write)
wfile.close()
except IOError as error:
_exit_if_errors([error])
def _exit_on_file_read_failure(fname, to_report=None):
try:
rfile = open(fname, 'r')
read_bytes = rfile.read()
rfile.close()
return read_bytes
except IOError as error:
if to_report is None:
_exit_if_errors([f'{error.strerror} `{fname}`'])
else:
_exit_if_errors([to_report])
def apply_default_port(nodes):
"""
Adds the default port if the port is missing.
@type nodes: string
@param nodes: A comma seprated list of nodes
@rtype: array of strings
@return: The nodes with the port postfixed on each one
"""
nodes = nodes.split(',')
def append_port(node):
if re.match(r'.*:\d+$', node):
return node
return f'{node}:8091'
return [append_port(x) for x in nodes]
class CLIHelpFormatter(HelpFormatter):
"""Format help with indented section bodies"""
def __init__(self, prog, indent_increment=2, max_help_position=30, width=None):
HelpFormatter.__init__(self, prog, indent_increment, max_help_position, width)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent + 2
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
if action.nargs == 0:
parts.extend(action.option_strings)
return ','.join(parts)
else:
default = action.dest
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append(option_string)
return ','.join(parts) + ' ' + args_string
class CBDeprecatedAction(Action):
"""Indicates that a specific option is deprecated"""
def __call__(self, parser, namespace, values, option_string=None):
_deprecated('Specifying ' + '/'.join(self.option_strings) + ' is deprecated')
if self.nargs == 0:
setattr(namespace, self.dest, self.const)
else:
setattr(namespace, self.dest, values)
class CBHostAction(Action):
"""Allows the handling of hostnames on the command line"""
def __call__(self, parser, namespace, values, option_string=None):
parsed = urllib.parse.urlparse(values)
# If the netloc is empty then it means that there was no scheme added
# to the URI and we are parsing it as a path. In this case no scheme
# means HTTP so we can add that scheme to the hostname provided.
if parsed.netloc == "":
parsed = urllib.parse.urlparse("http://" + values)
if parsed.scheme == "":
parsed = urllib.parse.urlparse("http://" + values)
if parsed.path != "" or parsed.params != "" or parsed.query != "" or parsed.fragment != "":
raise ArgumentError(self, f"{values} is not an accepted hostname")
if not parsed.hostname:
raise ArgumentError(self, f"{values} is not an accepted hostname")
hostname_regex = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*'
+ r'([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$')
if not hostname_regex.match(parsed.hostname):
try:
ipaddress.ip_address(parsed.hostname)
except ValueError as val_error:
raise ArgumentError(self, f"{values} is not an accepted hostname") from val_error
scheme = parsed.scheme
port = None
if scheme in ["http", "couchbase"]:
if not parsed.port:
port = 8091
if scheme == "couchbase":
scheme = "http"
elif scheme in ["https", "couchbases"]:
if not parsed.port:
port = 18091
if scheme == "couchbases":
scheme = "https"
else:
raise ArgumentError(self, "%s is not an accepted scheme" % scheme)
if parsed.port:
setattr(namespace, self.dest, (scheme + "://" + parsed.netloc))
else:
setattr(namespace, self.dest, (scheme + "://" + parsed.netloc + ":" + str(port)))
class CBEnvAction(Action):
"""Allows the custom handling of environment variables for command line options"""
def __init__(self, envvar, required=True, default=None, **kwargs):
if not default and envvar and envvar in os.environ:
default = os.environ[envvar]
if required and default:
required = False
super(CBEnvAction, self).__init__(default=default, required=required,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class CBNonEchoedAction(CBEnvAction):
"""Allows an argument to be specified by use of a non-echoed value passed through
stdin, through an environment variable, or as a value to the argument"""
def __init__(self, envvar, prompt_text="Enter password:", confirm_text=None,
required=True, default=None, nargs='?', **kwargs):
self.prompt_text = prompt_text
self.confirm_text = confirm_text
super(CBNonEchoedAction, self).__init__(envvar, required=required, default=default,
nargs=nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
values = getpass.getpass(self.prompt_text)
if self.confirm_text is not None:
confirm = getpass.getpass(self.prompt_text)
if values != confirm:
raise ArgumentError(self, "Passwords entered do not match, please retry")
super(CBNonEchoedAction, self).__call__(parser, namespace, values, option_string=None)
class CBHelpAction(Action):
"""Allows the custom handling of the help command line argument"""
# pylint: disable=redefined-builtin
def __init__(self, option_strings, klass, dest=SUPPRESS, default=SUPPRESS, help=None):
super(CBHelpAction, self).__init__(option_strings=option_strings, dest=dest,
default=default, nargs=0, help=help) # pylint: disable=redefined-builtin
self.klass = klass
def __call__(self, parser, namespace, values, option_string=None):
if option_string == "-h":
parser.print_help()
else:
CBHelpAction._show_man_page(self.klass.get_man_page_name())
parser.exit()
@staticmethod
def _show_man_page(page):
if os.name == "nt":
try:
subprocess.call(["rundll32.exe", "url.dll,FileProtocolHandler", os.path.join(CB_MAN_PATH, page)])
except OSError as e:
_exit_if_errors(["Unable to open man page using your browser, %s" % e])
else:
try:
subprocess.call(["man", os.path.join(CB_MAN_PATH, page)])
except OSError:
_exit_if_errors(["Unable to open man page using the 'man' command, ensure it is on your path or"
+ "install a manual reader"])
class CliParser(ArgumentParser):
def __init__(self, *args, **kwargs):
super(CliParser, self).__init__(*args, **kwargs)
def error(self, message):
self.exit(2, f'ERROR: {message}\n')
class Command(object):
"""A Couchbase CLI Command"""
def __init__(self):
self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False)
def parse(self, args):
"""Parses the subcommand"""
if len(args) == 0:
self.short_help()
return self.parser.parse_args(args)
def short_help(self, code=0):
"""Prints the short help message and exits"""
self.parser.print_help()
self.parser.exit(code)
def execute(self, opts):
"""Executes the subcommand"""
raise NotImplementedError
@staticmethod
def get_man_page_name():
"""Returns the man page name"""
raise NotImplementedError
@staticmethod
def get_description():
"""Returns the command description"""
raise NotImplementedError
class CouchbaseCLI(Command):
"""A Couchbase CLI command"""
def __init__(self):
super(CouchbaseCLI, self).__init__()
self.parser.prog = "couchbase-cli"
subparser = self.parser.add_subparsers(title="Commands", metavar="")
for (name, klass) in find_subcommands():
if klass.is_hidden():
subcommand = subparser.add_parser(name)
else:
subcommand = subparser.add_parser(name, help=klass.get_description())
subcommand.set_defaults(klass=klass)
group = self.parser.add_argument_group("Options")
group.add_argument("-h", "--help", action=CBHelpAction, klass=self,
help="Prints the short or long help message")
group.add_argument("--version", help="Get couchbase-cli version")
def parse(self, args):
if len(sys.argv) == 1:
self.parser.print_help()
self.parser.exit(1)
if args[1] == "--version":
print(VERSION)
sys.exit(0)
if not args[1] in ["-h", "--help", "--version"] and args[1].startswith("-"):
_exit_if_errors([f"Unknown subcommand: '{args[1]}'. The first argument has to be a subcommand like"
f" 'bucket-list' or 'rebalance', please see couchbase-cli -h for the full list of commands"
f" and options"])
l1_args = self.parser.parse_args(args[1:2])
l2_args = l1_args.klass().parse(args[2:])
setattr(l2_args, 'klass', l1_args.klass)
return l2_args
def execute(self, opts):
opts.klass().execute(opts)
@staticmethod
def get_man_page_name():
"""Returns the man page name"""
return "couchbase-cli" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "A Couchbase cluster administration utility"
class Subcommand(Command):
"""
A Couchbase CLI Subcommand: This is for subcommand that interact with a remote Couchbase Server over the REST API.
"""
def __init__(self, deprecate_username=False, deprecate_password=False, cluster_default=None):
super(Subcommand, self).__init__()
# Filled by the decorators
self.rest = None
self.enterprise = None
self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False)
group = self.parser.add_argument_group("Cluster options")
group.add_argument("-c", "--cluster", dest="cluster", required=(cluster_default is None),
metavar="<cluster>", action=CBHostAction, default=cluster_default,
help="The hostname of the Couchbase cluster")
if deprecate_username:
group.add_argument("-u", "--username", dest="username",
action=CBDeprecatedAction, help=SUPPRESS)
else:
group.add_argument("-u", "--username", dest="username", required=True,
action=CBEnvAction, envvar='CB_REST_USERNAME',
metavar="<username>", help="The username for the Couchbase cluster")
if deprecate_password:
group.add_argument("-p", "--password", dest="password",
action=CBDeprecatedAction, help=SUPPRESS)
else:
group.add_argument("-p", "--password", dest="password", required=True,
action=CBNonEchoedAction, envvar='CB_REST_PASSWORD',
metavar="<password>", help="The password for the Couchbase cluster")
group.add_argument("-o", "--output", dest="output", default="standard", metavar="<output>",
choices=["json", "standard"], help="The output type (json or standard)")
group.add_argument("-d", "--debug", dest="debug", action="store_true",
help="Run the command with extra logging")
group.add_argument("-s", "--ssl", dest="ssl", const=True, default=False,
nargs=0, action=CBDeprecatedAction,
help="Use ssl when connecting to Couchbase (Deprecated)")
group.add_argument("--no-ssl-verify", dest="ssl_verify", action="store_false", default=True,
help="Skips SSL verification of certificates against the CA")
group.add_argument("--cacert", dest="cacert", default=True,
help="Verifies the cluster identity with this certificate")
group.add_argument("-h", "--help", action=CBHelpAction, klass=self,
help="Prints the short or long help message")
def execute(self, opts): # pylint: disable=useless-super-delegation
super(Subcommand, self).execute(opts)
@staticmethod
def get_man_page_name():
return Command.get_man_page_name()
@staticmethod
def get_description():
return Command.get_description()
@staticmethod
def is_hidden():
"""Whether or not the subcommand should be hidden from the help message"""
return False
class LocalSubcommand(Command):
"""
A Couchbase CLI Localcommand: This is for subcommands that interact with the local Couchbase Server via the
filesystem or a local socket.
"""
def __init__(self):
super(LocalSubcommand, self).__init__()
self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False)
group = self.parser.add_argument_group(title="Local command options",
description="This command has to be execute on the locally running"
+ " Couchbase Server.")
group.add_argument("-h", "--help", action=CBHelpAction, klass=self,
help="Prints the short or long help message")
group.add_argument("--config-path", dest="config_path", metavar="<path>",
default=CB_CFG_PATH, help=SUPPRESS)
def execute(self, opts): # pylint: disable=useless-super-delegation
super(LocalSubcommand, self).execute(opts)
@staticmethod
def get_man_page_name():
return Command.get_man_page_name()
@staticmethod
def get_description():
return Command.get_description()
@staticmethod
def is_hidden():
"""Whether or not the subcommand should be hidden from the help message"""
return False
class ClusterInit(Subcommand):
"""The cluster initialization subcommand"""
def __init__(self):
super(ClusterInit, self).__init__(True, True, "http://127.0.0.1:8091")
self.parser.prog = "couchbase-cli cluster-init"
group = self.parser.add_argument_group("Cluster initialization options")
group.add_argument("--cluster-username", dest="username", required=True,
metavar="<username>", help="The cluster administrator username")
group.add_argument("--cluster-password", dest="password", required=True,
metavar="<password>", help="The cluster administrator password")
group.add_argument("--cluster-port", dest="port", type=(int),
metavar="<port>", help="The cluster administration console port")
group.add_argument("--cluster-ramsize", dest="data_mem_quota", type=(int),
metavar="<quota>", help="The data service memory quota in mebibytes")
group.add_argument("--cluster-index-ramsize", dest="index_mem_quota", type=(int),
metavar="<quota>", help="The index service memory quota in mebibytes")
group.add_argument("--cluster-fts-ramsize", dest="fts_mem_quota", type=(int),
metavar="<quota>",
help="The full-text service memory quota in mebibytes")
group.add_argument("--cluster-eventing-ramsize", dest="eventing_mem_quota", type=(int),
metavar="<quota>",
help="The Eventing service memory quota in mebibytes")
group.add_argument("--cluster-analytics-ramsize", dest="cbas_mem_quota", type=(int),
metavar="<quota>",
help="The analytics service memory quota in mebibytes")
group.add_argument("--cluster-name", dest="name", metavar="<name>", help="The cluster name")
group.add_argument("--index-storage-setting", dest="index_storage_mode",
choices=["default", "memopt"], metavar="<mode>",
help="The index storage backend (Defaults to \"default)\"")
group.add_argument("--services", dest="services", default="data", metavar="<service_list>",
help="The services to run on this server")
group.add_argument("--update-notifications", dest="notifications", metavar="<1|0>", choices=["0", "1"],
default="1", help="Enables/disable software update notifications")
@rest_initialiser(enterprise_check=False)
def execute(self, opts):
# We need to ensure that creating the REST username/password is the
# last REST API that is called because once that API succeeds the
# cluster is initialized and cluster-init cannot be run again.
initialized, errors = self.rest.is_cluster_initialized()
_exit_if_errors(errors)
if initialized:
_exit_if_errors(["Cluster is already initialized, use setting-cluster to change settings"])
if not self.enterprise and opts.index_storage_mode == 'memopt':
_exit_if_errors(["memopt option for --index-storage-setting can only be configured on enterprise edition"])
services, errors = process_services(opts.services, self.enterprise)
_exit_if_errors(errors)
if 'kv' not in services.split(','):
_exit_if_errors(["Cannot set up first cluster node without the data service"])
if opts.data_mem_quota or opts.index_mem_quota or opts.fts_mem_quota or opts.cbas_mem_quota \
or opts.eventing_mem_quota or opts.name is not None:
_, errors = self.rest.set_pools_default(opts.data_mem_quota, opts.index_mem_quota, opts.fts_mem_quota,
opts.cbas_mem_quota, opts.eventing_mem_quota, opts.name)
_exit_if_errors(errors)
# Set the index storage mode
if not opts.index_storage_mode and 'index' in services.split(','):
opts.index_storage_mode = "default"
default = "plasma"
if not self.enterprise:
default = "forestdb"
if opts.index_storage_mode:
param = index_storage_mode_to_param(opts.index_storage_mode, default)
_, errors = self.rest.set_index_settings(param, None, None, None, None, None, None, None)
_exit_if_errors(errors)
# Setup services
_, errors = self.rest.setup_services(services)
_exit_if_errors(errors)
# Enable notifications
if opts.notifications == "1":
_, errors = self.rest.enable_notifications(True)
else:
_, errors = self.rest.enable_notifications(False)
_exit_if_errors(errors)
# Setup Administrator credentials and Admin Console port
_, errors = self.rest.set_admin_credentials(opts.username, opts.password,
opts.port)
_exit_if_errors(errors)
_success("Cluster initialized")
@staticmethod
def get_man_page_name():
return "couchbase-cli-cluster-init" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Initialize a Couchbase cluster"
class BucketCompact(Subcommand):
"""The bucket compact subcommand"""
def __init__(self):
super(BucketCompact, self).__init__()
self.parser.prog = "couchbase-cli bucket-compact"
group = self.parser.add_argument_group("Bucket compaction options")
group.add_argument("--bucket", dest="bucket_name", metavar="<name>",
help="The name of bucket to compact")
group.add_argument("--data-only", dest="data_only", action="store_true",
help="Only compact the data files")
group.add_argument("--view-only", dest="view_only", action="store_true",
help="Only compact the view files")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
bucket, errors = self.rest.get_bucket(opts.bucket_name)
_exit_if_errors(errors)
if bucket["bucketType"] != BUCKET_TYPE_COUCHBASE:
_exit_if_errors(["Cannot compact memcached buckets"])
_, errors = self.rest.compact_bucket(opts.bucket_name, opts.data_only, opts.view_only)
_exit_if_errors(errors)
_success("Bucket compaction started")
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-compact" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Compact database and view data"
class BucketCreate(Subcommand):
"""The bucket create subcommand"""
def __init__(self):
super(BucketCreate, self).__init__()
self.parser.prog = "couchbase-cli bucket-create"
group = self.parser.add_argument_group("Bucket create options")
group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True,
help="The name of bucket to create")
group.add_argument("--bucket-type", dest="type", metavar="<type>", required=True,
choices=["couchbase", "ephemeral", "memcached"],
help="The bucket type (couchbase, ephemeral, or memcached)")
group.add_argument("--storage-backend", dest="storage", metavar="<storage>",
choices=["couchstore", "magma"],
help="Type of storage backend (only for couchbase buckets)")
group.add_argument("--bucket-ramsize", dest="memory_quota", metavar="<quota>", type=(int),
required=True, help="The amount of memory to allocate the bucket")
group.add_argument("--bucket-replica", dest="replica_count", metavar="<num>",
choices=["0", "1", "2", "3"],
help="The replica count for the bucket")
group.add_argument("--bucket-priority", dest="priority", metavar="<priority>",
choices=[BUCKET_PRIORITY_LOW_STR, BUCKET_PRIORITY_HIGH_STR],
help="The bucket disk io priority (low or high)")
group.add_argument("--durability-min-level", dest="durability_min_level", metavar="<level>",
choices=["none", "majority", "majorityAndPersistActive",
"persistToMajority"],
help="The bucket durability minimum level")
group.add_argument("--bucket-eviction-policy", dest="eviction_policy", metavar="<policy>",
choices=["valueOnly", "fullEviction", "noEviction", "nruEviction"],
help="The bucket eviction policy")
group.add_argument("--conflict-resolution", dest="conflict_resolution", default=None,
choices=["sequence", "timestamp"], metavar="<type>",
help="The XDCR conflict resolution type (timestamp or sequence)")
group.add_argument("--max-ttl", dest="max_ttl", default=None, type=(int), metavar="<seconds>",
help="Set the maximum TTL the bucket will accept. Couchbase server Enterprise Edition only.")
group.add_argument("--compression-mode", dest="compression_mode",
choices=["off", "passive", "active"], metavar="<mode>",
help="Set the compression mode of the bucket")
group.add_argument("--enable-flush", dest="enable_flush", metavar="<0|1>",
choices=["0", "1"], help="Enable bucket flush on this bucket (0 or 1)")
group.add_argument("--enable-index-replica", dest="replica_indexes", metavar="<0|1>",
choices=["0", "1"], help="Enable replica indexes (0 or 1)")
group.add_argument("--wait", dest="wait", action="store_true",
help="Wait for bucket creation to complete")
group.add_argument("--database-fragmentation-threshold-percentage", dest="db_frag_perc",
metavar="<perc>", type=(int), help="Set Database Fragmentation level percent")
group.add_argument("--database-fragmentation-threshold-size", dest="db_frag_size",
metavar="<mebibytes>", type=(int), help="Set Database Fragmentation level")
group.add_argument("--view-fragmentation-threshold-percentage", dest="view_frag_perc",
metavar="<perc>", type=(int), help="Set View Fragmentation level percent")
group.add_argument("--view-fragmentation-threshold-size", dest="view_frag_size",
metavar="<mebibytes>", type=(int), help="Set View Fragmentation level size")
group.add_argument("--from-hour", dest="from_hour",
metavar="<quota>", type=(int), help="Set start time hour")
group.add_argument("--from-minute", dest="from_min",
metavar="<quota>", type=(int), help="Set start time minutes")
group.add_argument("--to-hour", dest="to_hour",
metavar="<quota>", type=(int), help="Set end time hour")
group.add_argument("--to-minute", dest="to_min",
metavar="<quota>", type=(int), help="Set end time minutes")
group.add_argument("--abort-outside", dest="abort_outside",
metavar="<0|1>", choices=["0", "1"], help="Allow Time period")
group.add_argument("--parallel-db-view-compaction", dest="paralleldb_and_view_compact",
metavar="<0|1>", choices=["0", "1"], help="Set parallel DB and View Compaction")
group.add_argument("--purge-interval", dest="purge_interval", type=(float),
metavar="<float>", help="Sets the frequency of the tombstone purge interval")
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if opts.max_ttl and not self.enterprise:
_exit_if_errors(["Maximum TTL can only be configured on enterprise edition"])
if opts.compression_mode and not self.enterprise:
_exit_if_errors(["Compression mode can only be configured on enterprise edition"])
if opts.type == "memcached":
_deprecated("Memcached buckets are deprecated, please use ephemeral buckets instead")
if opts.replica_count is not None:
_exit_if_errors(["--bucket-replica cannot be specified for a memcached bucket"])
if opts.conflict_resolution is not None:
_exit_if_errors(["--conflict-resolution cannot be specified for a memcached bucket"])
if opts.replica_indexes is not None:
_exit_if_errors(["--enable-index-replica cannot be specified for a memcached bucket"])
if opts.priority is not None:
_exit_if_errors(["--bucket-priority cannot be specified for a memcached bucket"])
if opts.eviction_policy is not None:
_exit_if_errors(["--bucket-eviction-policy cannot be specified for a memcached bucket"])
if opts.max_ttl is not None:
_exit_if_errors(["--max-ttl cannot be specified for a memcached bucket"])
if opts.compression_mode is not None:
_exit_if_errors(["--compression-mode cannot be specified for a memcached bucket"])
if opts.durability_min_level is not None:
_exit_if_errors(["--durability-min-level cannot be specified for a memcached bucket"])
elif opts.type == "ephemeral" and opts.eviction_policy in ["valueOnly", "fullEviction"]:
_exit_if_errors(["--bucket-eviction-policy must either be noEviction or nruEviction"])
elif opts.type == "couchbase" and opts.eviction_policy in ["noEviction", "nruEviction"]:
_exit_if_errors(["--bucket-eviction-policy must either be valueOnly or fullEviction"])
if ((opts.type == "memcached" or opts.type == "ephemeral")
and (opts.db_frag_perc is not None
or opts.db_frag_size is not None or opts.view_frag_perc is not None
or opts.view_frag_size is not None or opts.from_hour is not None or opts.from_min is not None
or opts.to_hour is not None or opts.to_min is not None or opts.abort_outside is not None
or opts.paralleldb_and_view_compact is not None)):
_warning(f'ignoring compaction settings as bucket type {opts.type} does not accept it')
storage_type = "couchstore"
if opts.storage is not None:
if opts.type != "couchbase":
_exit_if_errors(["--storage-backend is only valid for couchbase buckets"])
if opts.storage == "magma":
storage_type = "magma"
priority = None
if opts.priority is not None:
if opts.priority == BUCKET_PRIORITY_HIGH_STR:
priority = BUCKET_PRIORITY_HIGH_INT
elif opts.priority == BUCKET_PRIORITY_LOW_STR:
priority = BUCKET_PRIORITY_LOW_INT
conflict_resolution_type = None
if opts.conflict_resolution is not None:
if opts.conflict_resolution == "sequence":
conflict_resolution_type = "seqno"
elif opts.conflict_resolution == "timestamp":
conflict_resolution_type = "lww"
_, errors = self.rest.create_bucket(opts.bucket_name, opts.type, storage_type, opts.memory_quota,
opts.durability_min_level, opts.eviction_policy, opts.replica_count,
opts.replica_indexes, priority, conflict_resolution_type, opts.enable_flush,
opts.max_ttl, opts.compression_mode, opts.wait, opts.db_frag_perc,
opts.db_frag_size, opts.view_frag_perc, opts.view_frag_size,
opts.from_hour, opts.from_min, opts.to_hour, opts.to_min,
opts.abort_outside, opts.paralleldb_and_view_compact, opts.purge_interval)
_exit_if_errors(errors)
_success("Bucket created")
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-create" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Add a new bucket to the cluster"
class BucketDelete(Subcommand):
"""The bucket delete subcommand"""
def __init__(self):
super(BucketDelete, self).__init__()
self.parser.prog = "couchbase-cli bucket-delete"
group = self.parser.add_argument_group("Bucket delete options")
group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True,
help="The name of bucket to delete")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
_, errors = self.rest.get_bucket(opts.bucket_name)
_exit_if_errors(errors)
_, errors = self.rest.delete_bucket(opts.bucket_name)
_exit_if_errors(errors)
_success("Bucket deleted")
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-delete" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Delete an existing bucket"
class BucketEdit(Subcommand):
"""The bucket edit subcommand"""
def __init__(self):
super(BucketEdit, self).__init__()
self.parser.prog = "couchbase-cli bucket-edit"
group = self.parser.add_argument_group("Bucket edit options")
group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True,
help="The name of bucket to create")
group.add_argument("--bucket-ramsize", dest="memory_quota", metavar="<quota>",
type=(int), help="The amount of memory to allocate the bucket")
group.add_argument("--bucket-replica", dest="replica_count", metavar="<num>",
choices=["0", "1", "2", "3"],
help="The replica count for the bucket")
group.add_argument("--bucket-priority", dest="priority", metavar="<priority>",
choices=["low", "high"], help="The bucket disk io priority (low or high)")
group.add_argument("--durability-min-level", dest="durability_min_level", metavar="<level>",
choices=["none", "majority", "majorityAndPersistActive", "persistToMajority"],
help="The bucket durability minimum level")
group.add_argument("--bucket-eviction-policy", dest="eviction_policy", metavar="<policy>",
type=(str), help="The bucket eviction policy (valueOnly or fullEviction)")
group.add_argument("--max-ttl", dest="max_ttl", default=None, type=(int), metavar="<seconds>",
help="Set the maximum TTL the bucket will accept")
group.add_argument("--compression-mode", dest="compression_mode",
choices=["off", "passive", "active"], metavar="<mode>",
help="Set the compression mode of the bucket")
group.add_argument("--enable-flush", dest="enable_flush", metavar="<0|1>",
choices=["0", "1"], help="Enable bucket flush on this bucket (0 or 1)")
group.add_argument("--remove-bucket-port", dest="remove_port", metavar="<0|1>",
choices=["0", "1"], help="Removes the bucket-port setting")
group.add_argument("--database-fragmentation-threshold-percentage", dest="db_frag_perc",
metavar="<perc>", type=(int), help="Set Database Fragmentation level percent")
group.add_argument("--database-fragmentation-threshold-size", dest="db_frag_size",
metavar="<mebibytes>", type=(int), help="Set Database Fragmentation level")
group.add_argument("--view-fragmentation-threshold-percentage", dest="view_frag_perc",
metavar="<perc>", type=(int), help="Set View Fragmentation level percent")
group.add_argument("--view-fragmentation-threshold-size", dest="view_frag_size",
metavar="<mebibytes>", type=(int), help="Set View Fragmentation level size")
group.add_argument("--from-hour", dest="from_hour",
metavar="<hour>", type=(int), help="Set start time hour")
group.add_argument("--from-minute", dest="from_min",
metavar="<min>", type=(int), help="Set start time minutes")
group.add_argument("--to-hour", dest="to_hour",
metavar="<hour>", type=(int), help="Set end time hour")
group.add_argument("--to-minute", dest="to_min",
metavar="<min>", type=(int), help="Set end time minutes")
group.add_argument("--abort-outside", dest="abort_outside",
metavar="<0|1>", choices=["0", "1"], help="Allow Time period")
group.add_argument("--parallel-db-view-compaction", dest="paralleldb_and_view_compact",
metavar="<0|1>", choices=["0", "1"], help="Set parallel DB and View Compaction")
group.add_argument("--purge-interval", dest="purge_interval", type=(float),
metavar="<num>", help="Set the bucket metadata purge interval")
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if opts.max_ttl and not self.enterprise:
_exit_if_errors(["Maximum TTL can only be configured on enterprise edition"])
if opts.compression_mode and not self.enterprise:
_exit_if_errors(["Compression mode can only be configured on enterprise edition"])
# Note that we accept 'noEviction' and 'nruEviction' as valid values even though they are undocumented; this is
# so that users attempting to modify the eviction policy of an ephemeral bucket will receive a meaningful
# message from 'ns_server'. See MB-39036 for more information.
if (opts.eviction_policy is not None
and opts.eviction_policy not in ["valueOnly", "fullEviction", "noEviction", "nruEviction"]):
_exit_if_errors([f"argument --bucket-eviction-policy: invalid choice: '{opts.eviction_policy}'"+
" (choose from 'valueOnly', 'fullEviction')"])
bucket, errors = self.rest.get_bucket(opts.bucket_name)
_exit_if_errors(errors)
if "bucketType" in bucket and bucket["bucketType"] == "memcached":
_deprecated("Memcached buckets are deprecated, please use ephemeral buckets instead")
if opts.memory_quota is not None:
_exit_if_errors(["--bucket-ramsize cannot be specified for a memcached bucket"])
if opts.replica_count is not None:
_exit_if_errors(["--bucket-replica cannot be specified for a memcached bucket"])
if opts.priority is not None:
_exit_if_errors(["--bucket-priority cannot be specified for a memcached bucket"])
if opts.eviction_policy is not None:
_exit_if_errors(["--bucket-eviction-policy cannot be specified for a memcached bucket"])
if opts.max_ttl is not None:
_exit_if_errors(["--max-ttl cannot be specified for a memcached bucket"])
if opts.compression_mode is not None:
_exit_if_errors(["--compression-mode cannot be specified for a memcached bucket"])
if opts.durability_min_level is not None:
_exit_if_errors(["--durability-min-level cannot be specified for a memcached bucket"])
if (("bucketType" in bucket and (bucket["bucketType"] == "memcached" or bucket["bucketType"] == "ephemeral"))
and (opts.db_frag_perc is not None or opts.db_frag_size is not None
or opts.view_frag_perc is not None or opts.view_frag_size is not None or opts.from_hour is not None
or opts.from_min is not None or opts.to_hour is not None or opts.to_min is not None
or opts.abort_outside is not None or opts.paralleldb_and_view_compact is not None)):
_exit_if_errors([f'compaction settings can not be specified for a {bucket["bucketType"]} bucket'])
priority = None
if opts.priority is not None:
if opts.priority == BUCKET_PRIORITY_HIGH_STR:
priority = BUCKET_PRIORITY_HIGH_INT
elif opts.priority == BUCKET_PRIORITY_LOW_STR:
priority = BUCKET_PRIORITY_LOW_INT
if opts.remove_port:
if opts.remove_port == '1':
opts.remove_port = True
else:
opts.remove_port = False
_, errors = self.rest.edit_bucket(opts.bucket_name, opts.memory_quota, opts.durability_min_level,
opts.eviction_policy, opts.replica_count, priority, opts.enable_flush,
opts.max_ttl, opts.compression_mode, opts.remove_port, opts.db_frag_perc,
opts.db_frag_size, opts.view_frag_perc, opts.view_frag_size, opts.from_hour,
opts.from_min, opts.to_hour, opts.to_min, opts.abort_outside,
opts.paralleldb_and_view_compact, opts.purge_interval,
'bucketType' in bucket and bucket['bucketType'] == 'membase')
_exit_if_errors(errors)
_success("Bucket edited")
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-edit" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify settings for an existing bucket"
class BucketFlush(Subcommand):
"""The bucket edit subcommand"""
def __init__(self):
super(BucketFlush, self).__init__()
self.parser.prog = "couchbase-cli bucket-flush"
group = self.parser.add_argument_group("Bucket flush options")
group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True,
help="The name of bucket to delete")
group.add_argument("--force", dest="force", action="store_true",
help="Execute the command without asking to confirm")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
_, errors = self.rest.get_bucket(opts.bucket_name)
_exit_if_errors(errors)
if not opts.force:
question = "Running this command will totally PURGE database data from disk. " + \
"Do you really want to do it? (Yes/No)"
confirm = input(question)
if confirm not in ('y', 'Y', 'yes', 'Yes'):
return
_, errors = self.rest.flush_bucket(opts.bucket_name)
_exit_if_errors(errors)
_success("Bucket flushed")
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-flush" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Flush all data from disk for a given bucket"
class BucketList(Subcommand):
"""The bucket list subcommand"""
def __init__(self):
super(BucketList, self).__init__()
self.parser.prog = "couchbase-cli bucket-list"
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
result, errors = self.rest.list_buckets(extended=True)
_exit_if_errors(errors)
if opts.output == 'json':
print(json.dumps(result))
else:
for bucket in result:
print(f'{bucket["name"]}')
print(f' bucketType: {bucket["bucketType"]}')
print(f' numReplicas: {bucket["replicaNumber"]}')
print(f' ramQuota: {bucket["quota"]["ram"]}')
print(f' ramUsed: {bucket["basicStats"]["memUsed"]}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-bucket-list" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "List all buckets in a cluster"
class CollectLogsStart(Subcommand):
"""The collect-logs-start subcommand"""
def __init__(self):
super(CollectLogsStart, self).__init__()
self.parser.prog = "couchbase-cli collect-logs-start"
group = self.parser.add_argument_group("Collect logs start options")
group.add_argument("--all-nodes", dest="all_nodes", action="store_true",
default=False, help="Collect logs for all nodes")
group.add_argument("--nodes", dest="nodes", metavar="<node_list>",
help="A comma separated list of nodes to collect logs from")
group.add_argument("--redaction-level", dest="redaction_level", metavar="<none|partial>",
choices=["none", "partial"], help="Level of log redaction to apply")
group.add_argument("--salt", dest="salt", metavar="<string>",
help="The salt to use to redact the log")
group.add_argument("--output-directory", dest="output_dir", metavar="<directory>",
help="Output directory to place the generated logs file")
group.add_argument("--temporary-directory", dest="tmp_dir", metavar="<directory>",
help="Temporary directory to use when generating the logs")
group.add_argument("--upload", dest="upload", action="store_true",
default=False, help="Logs should be uploaded for Couchbase support")
group.add_argument("--upload-host", dest="upload_host", metavar="<host>",
help="The host to upload logs to")
group.add_argument("--upload-proxy", dest="upload_proxy", metavar="<proxy>",
help="The proxy to used to upload the logs via")
group.add_argument("--customer", dest="upload_customer", metavar="<name>",
help="The name of the customer uploading logs")
group.add_argument("--ticket", dest="upload_ticket", metavar="<num>",
help="The ticket number the logs correspond to")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if not opts.nodes and not opts.all_nodes:
_exit_if_errors(["Must specify either --all-nodes or --nodes"])
if opts.nodes and opts.all_nodes:
_exit_if_errors(["Cannot specify both --all-nodes and --nodes"])
if opts.salt and opts.redaction_level != "partial":
_exit_if_errors(["--redaction-level has to be set to 'partial' when --salt is specified"])
servers = opts.nodes
if opts.all_nodes:
servers = "*"
if opts.upload:
if not opts.upload_host:
_exit_if_errors(["--upload-host is required when --upload is specified"])
if not opts.upload_customer:
_exit_if_errors(["--upload-customer is required when --upload is specified"])
else:
if opts.upload_host:
_warning("--upload-host has no effect with specifying --upload")
if opts.upload_customer:
_warning("--upload-customer has no effect with specifying --upload")
if opts.upload_ticket:
_warning("--upload_ticket has no effect with specifying --upload")
if opts.upload_proxy:
_warning("--upload_proxy has no effect with specifying --upload")
_, errors = self.rest.collect_logs_start(servers, opts.redaction_level, opts.salt, opts.output_dir,
opts.tmp_dir, opts.upload, opts.upload_host, opts.upload_proxy,
opts.upload_customer, opts.upload_ticket)
_exit_if_errors(errors)
_success("Log collection started")
@staticmethod
def get_man_page_name():
return "couchbase-cli-collect-logs-start" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Start cluster log collection"
class CollectLogsStatus(Subcommand):
"""The collect-logs-status subcommand"""
def __init__(self):
super(CollectLogsStatus, self).__init__()
self.parser.prog = "couchbase-cli collect-logs-status"
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
tasks, errors = self.rest.get_tasks()
_exit_if_errors(errors)
found = False
for task in tasks:
if isinstance(task, dict) and 'type' in task and task['type'] == 'clusterLogsCollection':
found = True
self._print_task(task)
if not found:
print("No log collection tasks were found")
def _print_task(self, task):
print(f'Status: {task["status"]}')
if 'perNode' in task:
print("Details:")
for node, node_status in task["perNode"].items():
print('\tNode:', node)
print('\tStatus:', node_status['status'])
for field in ["path", "statusCode", "url", "uploadStatusCode", "uploadOutput"]:
if field in node_status:
print('\t', field, ":", node_status[field])
print()
@staticmethod
def get_man_page_name():
return "couchbase-cli-collect-logs-status" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "View the status of cluster log collection"
class CollectLogsStop(Subcommand):
"""The collect-logs-stop subcommand"""
def __init__(self):
super(CollectLogsStop, self).__init__()
self.parser.prog = "couchbase-cli collect-logs-stop"
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
_, errors = self.rest.collect_logs_stop()
_exit_if_errors(errors)
_success("Log collection stopped")
@staticmethod
def get_man_page_name():
return "couchbase-cli-collect-logs-stop" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Stop cluster log collection"
class Failover(Subcommand):
"""The failover subcommand"""
def __init__(self):
super(Failover, self).__init__()
self.parser.prog = "couchbase-cli failover"
group = self.parser.add_argument_group("Failover options")
group.add_argument("--server-failover", dest="servers_to_failover", metavar="<server_list>",
required=True, help="A list of servers to fail over")
group.add_argument("--hard", dest="hard", action="store_true",
help="Hard failover the server")
group.add_argument("--force", dest="force", action="store_true",
help="Force a hard failover")
group.add_argument("--no-progress-bar", dest="no_bar", action="store_true",
default=False, help="Disables the progress bar")
group.add_argument("--no-wait", dest="wait", action="store_false",
default=True, help="Don't wait for rebalance completion")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.force and not opts.hard:
_exit_if_errors(["--hard is required with --force flag"])
opts.servers_to_failover = apply_default_port(opts.servers_to_failover)
_, errors = self.rest.failover(opts.servers_to_failover, opts.hard, opts.force)
_exit_if_errors(errors)
if not opts.hard:
time.sleep(1)
if opts.wait:
bar = TopologyProgressBar(self.rest, 'Gracefully failing over', opts.no_bar)
errors = bar.show()
_exit_if_errors(errors)
_success("Server failed over")
else:
_success("Server failed over started")
else:
_success("Server failed over")
@staticmethod
def get_man_page_name():
return "couchbase-cli-failover" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Failover one or more servers"
class GroupManage(Subcommand):
"""The group manage subcommand"""
def __init__(self):
super(GroupManage, self).__init__()
self.parser.prog = "couchbase-cli group-manage"
group = self.parser.add_argument_group("Group manage options")
group.add_argument("--create", dest="create", action="store_true",
default=None, help="Create a new server group")
group.add_argument("--delete", dest="delete", action="store_true",
default=None, help="Delete a server group")
group.add_argument("--list", dest="list", action="store_true",
default=None, help="List all server groups")
group.add_argument("--rename", dest="rename", help="Rename a server group. It takes the new name of the group.")
group.add_argument("--group-name", dest="name", metavar="<name>",
help="The name of the server group")
group.add_argument("--move-servers", dest="move_servers", metavar="<server_list>",
help="A list of servers to move between groups")
group.add_argument("--from-group", dest="from_group", metavar="<group>",
help="The group to move servers from")
group.add_argument("--to-group", dest="to_group", metavar="<group>",
help="The group to move servers to")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
cmds = [opts.create, opts.delete, opts.list, opts.rename, opts.move_servers]
if sum(cmd is not None for cmd in cmds) == 0:
_exit_if_errors(["Must specify one of the following: --create, "
+ "--delete, --list, --move-servers, or --rename"])
elif sum(cmd is not None for cmd in cmds) != 1:
_exit_if_errors(["Only one of the following may be specified: --create"
+ ", --delete, --list, --move-servers, or --rename"])
if opts.create:
self._create(opts)
elif opts.delete:
self._delete(opts)
elif opts.list:
self._list(opts)
elif opts.rename:
self._rename(opts)
elif opts.move_servers is not None:
self._move(opts)
def _create(self, opts):
if opts.name is None:
_exit_if_errors(["--group-name is required with --create flag"])
_, errors = self.rest.create_server_group(opts.name)
_exit_if_errors(errors)
_success("Server group created")
def _delete(self, opts):
if opts.name is None:
_exit_if_errors(["--group-name is required with --delete flag"])
_, errors = self.rest.delete_server_group(opts.name)
_exit_if_errors(errors)
_success("Server group deleted")
def _list(self, opts):
groups, errors = self.rest.get_server_groups()
_exit_if_errors(errors)
found = False
for group in groups["groups"]:
if opts.name is None or opts.name == group['name']:
found = True
print(group['name'])
for node in group['nodes']:
print(f' server: {node["hostname"]}')
if not found and opts.name:
_exit_if_errors([f'Invalid group name: {opts.name}'])
def _move(self, opts):
if opts.from_group is None:
_exit_if_errors(["--from-group is required with --move-servers"])
if opts.to_group is None:
_exit_if_errors(["--to-group is required with --move-servers"])
servers = apply_default_port(opts.move_servers)
_, errors = self.rest.move_servers_between_groups(servers, opts.from_group, opts.to_group)
_exit_if_errors(errors)
_success("Servers moved between groups")
def _rename(self, opts):
if opts.name is None:
_exit_if_errors(["--group-name is required with --rename option"])
_, errors = self.rest.rename_server_group(opts.name, opts.rename)
_exit_if_errors(errors)
_success("Server group renamed")
@staticmethod
def get_man_page_name():
return "couchbase-cli-group-manage" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage server groups"
class HostList(Subcommand):
"""The host list subcommand"""
def __init__(self):
super(HostList, self).__init__()
self.parser.prog = "couchbase-cli host-list"
@rest_initialiser(version_check=True)
def execute(self, opts):
result, errors = self.rest.pools('default')
_exit_if_errors(errors)
if opts.output == 'json':
nodes_out = {'nodes': []}
for node in result['nodes']:
nodes_out['nodes'].append(node['configuredHostname'])
print(json.dumps(nodes_out))
else:
for node in result['nodes']:
print(node['configuredHostname'])
@staticmethod
def get_man_page_name():
return "couchbase-cli-host-list" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "List all hosts in a cluster"
class ResetCipherSuites(LocalSubcommand):
"""The reset cipher suites subcommand """
def __init__(self):
super(ResetCipherSuites, self).__init__()
self.parser.prog = "couchbase-cli reset-cipher-suites"
group = self.parser.add_argument_group("Reset Cipher Suites")
group.add_argument("--force", action='store_true', default=False, help="Force resetting of the cipher suites")
group.add_argument("-P", "--port", metavar="<port>", default="8091",
help="The REST API port, defaults to 8091")
def execute(self, opts):
token = _exit_on_file_read_failure(os.path.join(opts.config_path, "localtoken")).rstrip()
rest = ClusterManager("http://127.0.0.1:" + opts.port, "@localtoken", token)
check_cluster_initialized(rest)
check_versions(rest)
if not opts.force:
confirm = str(input("Are you sure that the cipher should be reset?: Y/[N]"))
if confirm != "Y":
_success("Cipher suites have not been reset to default")
_, errors = rest.reset_cipher_suites()
_exit_if_errors(errors)
_success("Cipher suites have been reset to the default")
@staticmethod
def get_man_page_name():
return "couchbase-cli-reset-cipher-suites" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Rests cipher suites to the default"
class MasterPassword(LocalSubcommand):
"""The master password subcommand"""
def __init__(self):
super(MasterPassword, self).__init__()
self.parser.prog = "couchbase-cli master-password"
group = self.parser.add_argument_group("Master password options")
group.add_argument("--send-password", dest="send_password", metavar="<password>",
required=False, action=CBNonEchoedAction, envvar=None,
prompt_text="Enter master password:",
help="Sends the master password to start the server")
def execute(self, opts):
if opts.send_password is not None:
path = [CB_BIN_PATH, os.environ['PATH']]
if os.name == 'posix':
os.environ['PATH'] = ':'.join(path)
else:
os.environ['PATH'] = ';'.join(path)
cookiefile = os.path.join(opts.config_path, "couchbase-server.babysitter.cookie")
if not os.path.isfile(cookiefile):
_exit_if_errors(["The node is down"])
cookie = _exit_on_file_read_failure(cookiefile, "Insufficient privileges to send master password - Please"
" execute this command as a operating system user who has"
" file system read permission on the Couchbase Server "
" configuration").rstrip()
nodefile = os.path.join(opts.config_path, "couchbase-server.babysitter.node")
node = _exit_on_file_read_failure(nodefile).rstrip()
self.prompt_for_master_pwd(node, cookie, opts.send_password, opts.config_path)
else:
_exit_if_errors(["No parameters set"])
def prompt_for_master_pwd(self, node, cookie, password, cb_cfg_path):
ns_server_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_server", "ebin")
babystr_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_babysitter", "ebin")
inetrc_file = os.path.join(CB_ETC_PATH, "hosts.cfg")
dist_cfg_file = os.path.join(cb_cfg_path, "config", "dist_cfg")
if password == '':
password = getpass.getpass("\nEnter master password:")
name = 'executioner@cb.local'
args = ['-pa', ns_server_ebin_path, babystr_ebin_path, '-noinput', '-name', name,
'-proto_dist', 'cb', '-epmd_module', 'cb_epmd',
'-kernel', 'inetrc', f'"{inetrc_file}"', 'dist_config_file', f'"{dist_cfg_file}"',
'-setcookie', cookie,
'-run', 'encryption_service', 'remote_set_password', node, password]
rc, out, err = self.run_process("erl", args)
if rc == 0:
print("SUCCESS: Password accepted. Node started booting.")
elif rc == 101:
print("Incorrect password.")
self.prompt_for_master_pwd(node, cookie, '', cb_cfg_path)
elif rc == 102:
_exit_if_errors(["Password was already supplied"])
elif rc == 103:
_exit_if_errors(["The node is down"])
elif rc == 104:
_exit_if_errors(["Incorrect password. Node shuts down."])
else:
_exit_if_errors([f'Unknown error: {rc} {out}, {err}'])
def run_process(self, name, args):
try:
if os.name == "nt":
name = name + ".exe"
args.insert(0, name)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.stdout.read()
error = p.stderr.read()
p.wait()
rc = p.returncode
return rc, output, error
except OSError:
_exit_if_errors([f'Could not locate the {name} executable'])
@staticmethod
def get_man_page_name():
return "couchbase-cli-master-password" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Unlocking the master password"
class NodeInit(Subcommand):
"""The node initialization subcommand"""
def __init__(self):
super(NodeInit, self).__init__()
self.parser.prog = "couchbase-cli node-init"
group = self.parser.add_argument_group("Node initialization options")
group.add_argument("--node-init-data-path", dest="data_path", metavar="<path>",
help="The path to store database files")
group.add_argument("--node-init-index-path", dest="index_path", metavar="<path>",
help="The path to store index files")
group.add_argument("--node-init-analytics-path", dest="analytics_path", metavar="<path>", action="append",
help="The path to store analytics files (supply one parameter for each path desired)")
group.add_argument("--node-init-eventing-path", dest="eventing_path", metavar="<path>",
help="The path to store eventing files")
group.add_argument("--node-init-java-home", dest="java_home", metavar="<path>",
help="The path of the Java Runtime Environment (JRE) to use on this server")
group.add_argument("--node-init-hostname", dest="hostname", metavar="<hostname>",
help="Sets the hostname for this server")
group.add_argument("--ipv6", dest="ipv6", action="store_true", default=False,
help="Configure the node to communicate via ipv6")
group.add_argument("--ipv4", dest="ipv4", action="store_true", default=False,
help="Configure the node to communicate via ipv4")
@rest_initialiser()
def execute(self, opts):
# Cluster does not need to be initialized for this command
if (opts.data_path is None and opts.index_path is None and opts.analytics_path is None
and opts.eventing_path is None and opts.java_home is None and opts.hostname is None
and opts.ipv6 is None and opts.ipv4 is None):
_exit_if_errors(["No node initialization parameters specified"])
if opts.ipv4 and opts.ipv6:
_exit_if_errors(["Use either --ipv4 or --ipv6"])
if opts.ipv4:
afamily = 'ipv4'
elif opts.ipv6:
afamily = 'ipv6'
else:
afamily = None
_, errors = self.rest.node_init(hostname=opts.hostname,
afamily=afamily,
data_path=opts.data_path,
index_path=opts.index_path,
cbas_path=opts.analytics_path,
eventing_path=opts.eventing_path,
java_home=opts.java_home)
_exit_if_errors(errors)
_success("Node initialized")
@staticmethod
def get_man_page_name():
return "couchbase-cli-node-init" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Set node specific settings"
class Rebalance(Subcommand):
"""The rebalance subcommand"""
def __init__(self):
super(Rebalance, self).__init__()
self.parser.prog = "couchbase-cli rebalance"
group = self.parser.add_argument_group("Rebalance options")
group.add_argument("--server-remove", dest="server_remove", metavar="<server_list>",
help="A list of servers to remove from the cluster")
group.add_argument("--no-progress-bar", dest="no_bar", action="store_true",
default=False, help="Disables the progress bar")
group.add_argument("--no-wait", dest="wait", action="store_false",
default=True, help="Don't wait for rebalance completion")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
eject_nodes = []
if opts.server_remove:
eject_nodes = apply_default_port(opts.server_remove)
_, errors = self.rest.rebalance(eject_nodes)
_exit_if_errors(errors)
time.sleep(1)
if opts.wait:
bar = TopologyProgressBar(self.rest, 'Rebalancing', opts.no_bar)
errors = bar.show()
_exit_if_errors(errors)
_success("Rebalance complete")
else:
_success("Rebalance started")
@staticmethod
def get_man_page_name():
return "couchbase-cli-rebalance" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Start a cluster rebalancing"
class RebalanceStatus(Subcommand):
"""The rebalance status subcommand"""
def __init__(self):
super(RebalanceStatus, self).__init__()
self.parser.prog = "couchbase-cli rebalance-status"
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
status, errors = self.rest.rebalance_status()
_exit_if_errors(errors)
print(json.dumps(status, indent=2))
@staticmethod
def get_man_page_name():
return "couchbase-cli-rebalance-status" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Show rebalance status"
class RebalanceStop(Subcommand):
"""The rebalance stop subcommand"""
def __init__(self):
super(RebalanceStop, self).__init__()
self.parser.prog = "couchbase-cli rebalance-stop"
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
_, errors = self.rest.stop_rebalance()
_exit_if_errors(errors)
_success("Rebalance stopped")
@staticmethod
def get_man_page_name():
return "couchbase-cli-rebalance-stop" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Stop a rebalance"
class Recovery(Subcommand):
"""The recovery command"""
def __init__(self):
super(Recovery, self).__init__()
self.parser.prog = "couchbase-cli recovery"
group = self.parser.add_argument_group("Recovery options")
group.add_argument("--server-recovery", dest="servers", metavar="<server_list>",
required=True, help="The list of servers to recover")
group.add_argument("--recovery-type", dest="recovery_type", metavar="type",
choices=["delta", "full"], default="delta",
help="The recovery type (delta or full)")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
servers = apply_default_port(opts.servers)
for server in servers:
_, errors = self.rest.recovery(server, opts.recovery_type)
_exit_if_errors(errors)
_success("Servers recovered")
@staticmethod
def get_man_page_name():
return "couchbase-cli-recovery" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Recover one or more servers"
class ResetAdminPassword(LocalSubcommand):
"""The reset admin password command"""
def __init__(self):
super(ResetAdminPassword, self).__init__()
self.parser.prog = "couchbase-cli reset-admin-password"
group = self.parser.add_argument_group("Reset password options")
group.add_argument("--new-password", dest="new_password", metavar="<password>",
required=False, action=CBNonEchoedAction, envvar=None,
prompt_text="Enter new administrator password:",
confirm_text="Confirm new administrator password:",
help="The new administrator password")
group.add_argument("--regenerate", dest="regenerate", action="store_true",
help="Generates a random administrator password")
group.add_argument("-P", "--port", metavar="<port>", default="8091",
help="The REST API port, defaults to 8091")
def execute(self, opts):
token = _exit_on_file_read_failure(os.path.join(opts.config_path, "localtoken")).rstrip()
rest = ClusterManager("http://127.0.0.1:" + opts.port, "@localtoken", token)
check_cluster_initialized(rest)
check_versions(rest)
if opts.new_password is not None and opts.regenerate:
_exit_if_errors(["Cannot specify both --new-password and --regenerate at the same time"])
elif opts.new_password is not None:
_, errors = rest.set_admin_password(opts.new_password)
_exit_if_errors(errors)
_success("Administrator password changed")
elif opts.regenerate:
result, errors = rest.regenerate_admin_password()
_exit_if_errors(errors)
print(result["password"])
else:
_exit_if_errors(["No parameters specified"])
@staticmethod
def get_man_page_name():
return "couchbase-cli-reset-admin-password" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Resets the administrator password"
class ServerAdd(Subcommand):
"""The server add command"""
def __init__(self):
super(ServerAdd, self).__init__()
self.parser.prog = "couchbase-cli server-add"
group = self.parser.add_argument_group("Server add options")
group.add_argument("--server-add", dest="servers", metavar="<server_list>", required=True,
help="The list of servers to add")
group.add_argument("--server-add-username", dest="server_username", metavar="<username>",
required=True, help="The username for the server to add")
group.add_argument("--server-add-password", dest="server_password", metavar="<password>",
required=True, help="The password for the server to add")
group.add_argument("--group-name", dest="group_name", metavar="<name>",
help="The server group to add this server into")
group.add_argument("--services", dest="services", default="data", metavar="<services>",
help="The services this server will run")
group.add_argument("--index-storage-setting", dest="index_storage_mode", metavar="<mode>",
choices=["default", "memopt"], help="The index storage mode")
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if not self.enterprise and opts.index_storage_mode == 'memopt':
_exit_if_errors(["memopt option for --index-storage-setting can only be configured on enterprise edition"])
opts.services, errors = process_services(opts.services, self.enterprise)
_exit_if_errors(errors)
settings, errors = self.rest.index_settings()
_exit_if_errors(errors)
if opts.index_storage_mode is None and settings['storageMode'] == "" and "index" in opts.services:
opts.index_storage_mode = "default"
# For supporting the default index backend changing from forestdb to plasma in Couchbase 5.0
default = "plasma"
if opts.index_storage_mode == "default" and settings['storageMode'] == "forestdb" or not self.enterprise:
default = "forestdb"
if opts.index_storage_mode:
param = index_storage_mode_to_param(opts.index_storage_mode, default)
_, errors = self.rest.set_index_settings(param, None, None, None, None, None, None, None)
_exit_if_errors(errors)
servers = opts.servers.split(',')
for server in servers:
_, errors = self.rest.add_server(server, opts.group_name, opts.server_username, opts.server_password,
opts.services)
_exit_if_errors(errors)
_success("Server added")
@staticmethod
def get_man_page_name():
return "couchbase-cli-server-add" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Add servers to the cluster"
class ServerEshell(Subcommand):
"""The server eshell subcommand"""
def __init__(self):
super(ServerEshell, self).__init__()
self.parser.prog = "couchbase-cli server-eshell"
group = self.parser.add_argument_group("Server eshell options")
group.add_argument("--vm", dest="vm", default="ns_server", metavar="<name>",
help="The vm to connect to")
group.add_argument("--erl-path", dest="erl_path", metavar="<path>", default=CB_BIN_PATH,
help="Override the path to the erl executable")
@rest_initialiser(version_check=True)
def execute(self, opts):
# Cluster does not need to be initialized for this command
result, errors = self.rest.node_info()
_exit_if_errors(errors)
node = result['otpNode']
cookie = result['otpCookie']
if opts.vm != 'ns_server':
cookie, errors = self.rest.get_babysitter_cookie()
_exit_if_errors(errors)
[short, _] = node.split('@')
if opts.vm == 'babysitter':
node = f'babysitter_of_{short}@cb.local'
elif opts.vm == 'couchdb':
node = f'couchdb_{short}@cb.local'
else:
_exit_if_errors([f'Unknown vm type `{opts.vm}`'])
rand_chars = ''.join(random.choice(string.ascii_letters) for _ in range(20))
name = f'ctl-{rand_chars}@127.0.0.1'
cb_erl = os.path.join(opts.erl_path, 'erl')
if os.path.isfile(cb_erl):
path = cb_erl
else:
_warning("Cannot locate Couchbase erlang. Attempting to use non-Couchbase erlang")
path = 'erl'
inetrc_file = os.path.join(CB_ETC_PATH, 'hosts.cfg')
if os.path.isfile(inetrc_file):
inetrc_opt = ['-kernel', 'inetrc', f'"{inetrc_file}"']
else:
inetrc_opt = []
ns_server_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_server", "ebin")
with tempfile.NamedTemporaryFile() as temp:
temp.write(f'[{{preferred_local_proto,{result["addressFamily"]}_tcp_dist}}].'.encode())
temp.flush()
temp_name = temp.name
args = [path, '-name', name, '-setcookie', cookie, '-hidden', '-remsh', node, '-proto_dist', 'cb',
'-epmd_module', 'cb_epmd', '-pa', ns_server_ebin_path, '-kernel', 'dist_config_file',
f'"{temp_name}"'] + inetrc_opt
if opts.debug:
print(f'Running {" ".join(args)}')
try:
subprocess.call(args)
except OSError:
_exit_if_errors(["Unable to find the erl executable"])
@staticmethod
def get_man_page_name():
return "couchbase-cli-server-eshell" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Opens a shell to the Couchbase cluster manager"
@staticmethod
def is_hidden():
# Internal command not recommended for production use
return True
class ServerInfo(Subcommand):
"""The server info subcommand"""
def __init__(self):
super(ServerInfo, self).__init__()
self.parser.prog = "couchbase-cli server-info"
@rest_initialiser(version_check=True)
def execute(self, opts):
# Cluster does not need to be initialized for this command
result, errors = self.rest.node_info()
_exit_if_errors(errors)
print(json.dumps(result, sort_keys=True, indent=2))
@staticmethod
def get_man_page_name():
return "couchbase-cli-server-info" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Show details of a node in the cluster"
class ServerList(Subcommand):
"""The server list subcommand"""
def __init__(self):
super(ServerList, self).__init__()
self.parser.prog = "couchbase-cli server-list"
@rest_initialiser(version_check=True)
def execute(self, opts):
result, errors = self.rest.pools('default')
_exit_if_errors(errors)
for node in result['nodes']:
if node.get('otpNode') is None:
raise Exception("could not access node")
print(node['otpNode'], node['hostname'], node['status'], node['clusterMembership'])
@staticmethod
def get_man_page_name():
return "couchbase-cli-server-list" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "List all nodes in a cluster"
class ServerReadd(Subcommand):
"""The server readd subcommand (Deprecated)"""
def __init__(self):
super(ServerReadd, self).__init__()
self.parser.prog = "couchbase-cli server-readd"
group = self.parser.add_argument_group("Server re-add options")
group.add_argument("--server-add", dest="servers", metavar="<server_list>", required=True,
help="The list of servers to recover")
# The parameters are unused, but kept for backwards compatibility
group.add_argument("--server-username", dest="server_username", metavar="<username>",
help="The admin username for the server")
group.add_argument("--server-password", dest="server_password", metavar="<password>",
help="The admin password for the server")
group.add_argument("--group-name", dest="name", metavar="<name>",
help="The name of the server group")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
_deprecated("Please use the recovery command instead")
servers = apply_default_port(opts.servers)
for server in servers:
_, errors = self.rest.readd_server(server)
_exit_if_errors(errors)
_success("Servers recovered")
@staticmethod
def get_man_page_name():
return "couchbase-cli-server-readd" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Add failed server back to the cluster"
@staticmethod
def is_hidden():
# Deprecated command in 4.6, hidden in 5.0, pending removal
return True
class SettingAlert(Subcommand):
"""The setting alert subcommand"""
def __init__(self):
super(SettingAlert, self).__init__()
self.parser.prog = "couchbase-cli setting-alert"
group = self.parser.add_argument_group("Alert settings")
group.add_argument("--enable-email-alert", dest="enabled", metavar="<1|0>", required=True,
choices=["0", "1"], help="Enable/disable email alerts")
group.add_argument("--email-recipients", dest="email_recipients", metavar="<email_list>",
help="A comma separated list of email addresses")
group.add_argument("--email-sender", dest="email_sender", metavar="<email_addr>",
help="The sender email address")
group.add_argument("--email-user", dest="email_username", metavar="<username>",
default="", help="The email server username")
group.add_argument("--email-password", dest="email_password", metavar="<password>",
default="", help="The email server password")
group.add_argument("--email-host", dest="email_host", metavar="<host>",
help="The email server host")
group.add_argument("--email-port", dest="email_port", metavar="<port>",
help="The email server port")
group.add_argument("--enable-email-encrypt", dest="email_encrypt", metavar="<1|0>",
choices=["0", "1"], help="Enable SSL encryption for emails")
group.add_argument("--alert-auto-failover-node", dest="alert_af_node",
action="store_true", help="Alert when a node is auto-failed over")
group.add_argument("--alert-auto-failover-max-reached", dest="alert_af_max_reached",
action="store_true",
help="Alert when the max number of auto-failover nodes was reached")
group.add_argument("--alert-auto-failover-node-down", dest="alert_af_node_down",
action="store_true",
help="Alert when a node wasn't auto-failed over because other nodes "
+ "were down")
group.add_argument("--alert-auto-failover-cluster-small", dest="alert_af_small",
action="store_true",
help="Alert when a node wasn't auto-failed over because cluster was"
+ " too small")
group.add_argument("--alert-auto-failover-disable", dest="alert_af_disable",
action="store_true",
help="Alert when a node wasn't auto-failed over because auto-failover"
+ " is disabled")
group.add_argument("--alert-ip-changed", dest="alert_ip_changed", action="store_true",
help="Alert when a nodes IP address changed")
group.add_argument("--alert-disk-space", dest="alert_disk_space", action="store_true",
help="Alert when disk usage on a node reaches 90%%")
group.add_argument("--alert-meta-overhead", dest="alert_meta_overhead", action="store_true",
help="Alert when metadata overhead is more than 50%%")
group.add_argument("--alert-meta-oom", dest="alert_meta_oom", action="store_true",
help="Alert when all bucket memory is used for metadata")
group.add_argument("--alert-write-failed", dest="alert_write_failed", action="store_true",
help="Alert when writing data to disk has failed")
group.add_argument("--alert-audit-msg-dropped", dest="alert_audit_dropped",
action="store_true", help="Alert when writing event to audit log failed")
group.add_argument("--alert-indexer-max-ram", dest="alert_indexer_max_ram",
action="store_true", help="Alert when indexer is using all of its allocated memory")
group.add_argument("--alert-timestamp-drift-exceeded", dest="alert_cas_drift",
action="store_true", help="Alert when clocks on two servers are more than five seconds"
+ "apart")
group.add_argument("--alert-communication-issue", dest="alert_communication_issue",
action="store_true", help="Alert when nodes are experiencing communication issues")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.enabled == "1":
if opts.email_recipients is None:
_exit_if_errors(["--email-recipient must be set when email alerts are enabled"])
if opts.email_sender is None:
_exit_if_errors(["--email-sender must be set when email alerts are enabled"])
if opts.email_host is None:
_exit_if_errors(["--email-host must be set when email alerts are enabled"])
if opts.email_port is None:
_exit_if_errors(["--email-port must be set when email alerts are enabled"])
alerts = list()
if opts.alert_af_node:
alerts.append('auto_failover_node')
if opts.alert_af_max_reached:
alerts.append('auto_failover_maximum_reached')
if opts.alert_af_node_down:
alerts.append('auto_failover_other_nodes_down')
if opts.alert_af_small:
alerts.append('auto_failover_cluster_too_small')
if opts.alert_af_disable:
alerts.append('auto_failover_disabled')
if opts.alert_ip_changed:
alerts.append('ip')
if opts.alert_disk_space:
alerts.append('disk')
if opts.alert_meta_overhead:
alerts.append('overhead')
if opts.alert_meta_oom:
alerts.append('ep_oom_errors')
if opts.alert_write_failed:
alerts.append('ep_item_commit_failed')
if opts.alert_audit_dropped:
alerts.append('audit_dropped_events')
if opts.alert_indexer_max_ram:
alerts.append('indexer_ram_max_usage')
if opts.alert_cas_drift:
alerts.append('ep_clock_cas_drift_threshold_exceeded')
if opts.alert_communication_issue:
alerts.append('communication_issue')
enabled = "true"
if opts.enabled == "0":
enabled = "false"
email_encrypt = "false"
if opts.email_encrypt == "1":
email_encrypt = "true"
_, errors = self.rest.set_alert_settings(enabled, opts.email_recipients, opts.email_sender, opts.email_username,
opts.email_password, opts.email_host, opts.email_port, email_encrypt,
",".join(alerts))
_exit_if_errors(errors)
_success("Email alert settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-alert" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify email alert settings"
class SettingAudit(Subcommand):
"""The settings audit subcommand"""
def __init__(self):
super(SettingAudit, self).__init__()
self.parser.prog = "couchbase-cli setting-audit"
self.parser.description = "Available only in Couchbase Server Enterprise Edition"
group = self.parser.add_argument_group("Audit settings")
group.add_argument("--list-filterable-events", dest="list_events", action="store_true",
help="Retrieve a list of filterable event IDs and the descriptions")
group.add_argument("--get-settings", dest="get_settings", action="store_true",
help="Retrieve current audit settings")
group.add_argument("--set", dest="set_settings", action="store_true",
help="Set current audit settings")
group.add_argument("--audit-enabled", dest="enabled", metavar="<1|0>", choices=["0", "1"],
help="Enable/disable auditing")
group.add_argument("--audit-log-path", dest="log_path", metavar="<path>",
help="The audit log path")
group.add_argument("--audit-log-rotate-interval", dest="rotate_interval", type=(int),
metavar="<seconds>", help="The audit log rotate interval")
group.add_argument("--audit-log-rotate-size", dest="rotate_size", type=(int),
metavar="<bytes>", help="The audit log rotate size")
group.add_argument("--disabled-users", dest="disabled_users", default=None,
help="A comma-separated list of users to ignore events from")
group.add_argument("--disable-events", dest="disable_events", default=None,
help="A comma-separated list of audit-event IDs to not audit")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
flags = sum([opts.list_events, opts.get_settings, opts.set_settings])
if flags != 1:
_exit_if_errors(["One of the following is required: --list-filterable-events, --get-settings or --set"])
if opts.list_events:
descriptors, errors = self.rest.get_id_descriptors()
_exit_if_errors(errors)
if opts.output == 'json':
print(json.dumps(descriptors, indent=4))
return
self.format_descriptors_in_table(descriptors)
elif opts.get_settings:
audit_settings, errors = self.rest.get_audit_settings()
_exit_if_errors(errors)
if opts.output == 'json':
print(json.dumps(audit_settings, indent=4))
return
descriptors, errors = self.rest.get_id_descriptors()
_exit_if_errors(errors)
self.format_audit_settings(audit_settings, descriptors)
elif opts.set_settings:
if not (opts.enabled or opts.log_path or opts.rotate_interval or opts.rotate_size
or opts.disable_events is not None or opts.disabled_users is not None):
_exit_if_errors(["At least one of [--audit-enabled, --audit-log-path, --audit-log-rotate-interval,"
" --audit-log-rotate-size, --disabled-users, --disable-events] is required with"
" --set"])
if opts.enabled == "1":
opts.enabled = "true"
elif opts.enabled == "0":
opts.enabled = "false"
_, errors = self.rest.set_audit_settings(opts.enabled, opts.log_path, opts.rotate_interval,
opts.rotate_size, opts.disable_events, opts.disabled_users)
_exit_if_errors(errors)
_success("Audit settings modified")
@staticmethod
def format_audit_settings(audit_settings, json_descriptors):
print(f'Audit enabled: {audit_settings["auditdEnabled"]}')
print(f'UUID: {audit_settings["uid"]}')
print(f'Log path: {audit_settings["logPath"] if "logPath" in audit_settings else "N/A"}')
print(f'Rotate interval: {audit_settings["rotateInterval"]}')
print(f'Rotate size: {audit_settings["rotateSize"]}')
print(f'Disabled users: {audit_settings["disabledUsers"]}')
if not audit_settings["auditdEnabled"]:
return
# change id lists to maps to make lookup o(1)
disable_map = {eventID for eventID in audit_settings['disabled']}
json_descriptors.sort(key=itemgetter('module', 'id'))
all_descriptors_sets = {events["id"] for events in json_descriptors}
padding_name = 12
for descriptor in json_descriptors:
if len(descriptor['name']) > padding_name:
padding_name = len(descriptor['name'])
padding_name += 2
header = f'{"ID":<6}| {"Module":<15}| {"Name":<{padding_name}}| Enabled'
print(header)
print('-' * len(header))
for descriptor in json_descriptors:
print(f'{descriptor["id"]:<6}| {descriptor["module"]:<15}| {descriptor["name"]:<{padding_name}}| '
f'{"False" if descriptor["id"] in disable_map else "True"}')
not_recognized = disable_map - all_descriptors_sets
for unrecognized in not_recognized:
print(f'{unrecognized:<6}| {"unknown":<15}| {"unknown":<{padding_name}}| False')
@staticmethod
def format_descriptors_in_table(json_descriptors):
sorted_descriptors = sorted(json_descriptors, key=itemgetter('module', 'id'))
padding_name = 15
for descriptor in sorted_descriptors:
if len(descriptor['name']) > padding_name:
padding_name = len(descriptor['name'])
padding_name += 2
header = f'{"ID":<6}| {"Module":<15}| {"Name":<{padding_name}}| Description'
print(header)
print('-' * len(header))
for descriptor in sorted_descriptors:
print(f'{descriptor["id"]:<6}| {descriptor["module"]:<15}| {descriptor["name"]:<{padding_name}}| '
f'{descriptor["description"]}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-audit" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify audit settings"
class SettingAutofailover(Subcommand):
"""The settings auto-failover subcommand"""
def __init__(self):
super(SettingAutofailover, self).__init__()
self.parser.prog = "couchbase-cli setting-autofailover"
group = self.parser.add_argument_group("Auto-failover settings")
group.add_argument("--enable-auto-failover", dest="enabled", metavar="<1|0>",
choices=["0", "1"], help="Enable/disable auto-failover")
group.add_argument("--auto-failover-timeout", dest="timeout", metavar="<seconds>",
type=(int), help="The auto-failover timeout")
group.add_argument("--enable-failover-of-server-groups", dest="enable_failover_of_server_groups",
metavar="<1|0>", choices=["0", "1"], help="Enable/disable auto-failover of server Groups")
group.add_argument("--max-failovers", dest="max_failovers", metavar="<1|2|3>", choices=["1", "2", "3"],
help="Maximum number of times an auto-failover event can happen")
group.add_argument("--enable-failover-on-data-disk-issues", dest="enable_failover_on_data_disk_issues",
metavar="<1|0>", choices=["0", "1"],
help="Enable/disable auto-failover when the Data Service reports disk issues. "
+ "Couchbase Server Enterprise Edition only.")
group.add_argument("--failover-data-disk-period", dest="failover_on_data_disk_period",
metavar="<seconds>", type=(int),
help="The amount of time the Data Serivce disk failures has to be happening for to trigger"
" an auto-failover")
group.add_argument("--can-abort-rebalance", metavar="<1|0>", choices=["1", "0"], dest="can_abort_rebalance",
help="Enables auto-failover to abort rebalance and perform the failover. (EE only)")
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if opts.enabled == "1":
opts.enabled = "true"
elif opts.enabled == "0":
opts.enabled = "false"
if opts.enable_failover_on_data_disk_issues == "1":
opts.enable_failover_on_data_disk_issues = "true"
elif opts.enable_failover_on_data_disk_issues == "0":
opts.enable_failover_on_data_disk_issues = "false"
if opts.enable_failover_of_server_groups == "1":
opts.enable_failover_of_server_groups = "true"
elif opts.enable_failover_of_server_groups == "0":
opts.enable_failover_of_server_groups = "false"
if not self.enterprise:
if opts.enable_failover_of_server_groups:
_exit_if_errors(["--enable-failover-of-server-groups can only be configured on enterprise edition"])
if opts.enable_failover_on_data_disk_issues or opts.failover_on_data_disk_period:
_exit_if_errors(["Auto failover on Data Service disk issues can only be configured on enterprise"
+ " edition"])
if opts.max_failovers:
_exit_if_errors(["--max-count can only be configured on enterprise edition"])
if opts.can_abort_rebalance:
_exit_if_errors(["--can-abort-rebalance can only be configured on enterprise edition"])
if not any([opts.enabled, opts.timeout, opts.enable_failover_on_data_disk_issues,
opts.failover_on_data_disk_period, opts.enable_failover_of_server_groups, opts.max_failovers]):
_exit_if_errors(["No settings specified to be changed"])
if ((opts.enable_failover_on_data_disk_issues is None or opts.enable_failover_on_data_disk_issues == "false")
and opts.failover_on_data_disk_period):
_exit_if_errors(["--enable-failover-on-data-disk-issues must be set to 1 when auto-failover Data"
" Service disk period has been set"])
if opts.enable_failover_on_data_disk_issues and opts.failover_on_data_disk_period is None:
_exit_if_errors(["--failover-data-disk-period must be set when auto-failover on Data Service disk"
" is enabled"])
if opts.enabled == "false" or opts.enabled is None:
if opts.enable_failover_on_data_disk_issues or opts.failover_on_data_disk_period:
_exit_if_errors(["--enable-auto-failover must be set to 1 when auto-failover on Data Service disk issues"
" settings are being configured"])
if opts.enable_failover_of_server_groups:
_exit_if_errors(["--enable-auto-failover must be set to 1 when enabling auto-failover of Server Groups"])
if opts.timeout:
_warning("Timeout specified will not take affect because auto-failover is being disabled")
if opts.can_abort_rebalance == '1':
opts.can_abort_rebalance = 'true'
elif opts.can_abort_rebalance == '0':
opts.can_abort_rebalance = 'false'
_, errors = self.rest.set_autofailover_settings(opts.enabled, opts.timeout,
opts.enable_failover_of_server_groups, opts.max_failovers,
opts.enable_failover_on_data_disk_issues,
opts.failover_on_data_disk_period, opts.can_abort_rebalance)
_exit_if_errors(errors)
_success("Auto-failover settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-autofailover" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify auto failover settings"
class SettingAutoreprovision(Subcommand):
"""The settings auto-reprovision subcommand"""
def __init__(self):
super(SettingAutoreprovision, self).__init__()
self.parser.prog = "couchbase-cli setting-autoreprovision"
group = self.parser.add_argument_group("Auto-reprovision settings")
group.add_argument("--enabled", dest="enabled", metavar="<1|0>", required=True,
choices=["0", "1"], help="Enable/disable auto-reprovision")
group.add_argument("--max-nodes", dest="max_nodes", metavar="<num>", type=(int),
help="The numbers of server that can be auto-reprovisioned before a rebalance")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.enabled == "1":
opts.enabled = "true"
elif opts.enabled == "0":
opts.enabled = "false"
if opts.enabled == "true" and opts.max_nodes is None:
_exit_if_errors(["--max-nodes must be specified if auto-reprovision is enabled"])
if not (opts.enabled or opts.max_nodes):
_exit_if_errors(["No settings specified to be changed"])
if (opts.enabled is None or opts.enabled == "false") and opts.max_nodes:
_warning("--max-servers will not take affect because auto-reprovision is being disabled")
_, errors = self.rest.set_autoreprovision_settings(opts.enabled, opts.max_nodes)
_exit_if_errors(errors)
_success("Auto-reprovision settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-autoreprovision" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify auto-reprovision settings"
class SettingCluster(Subcommand):
"""The settings cluster subcommand"""
def __init__(self):
super(SettingCluster, self).__init__()
self.parser.prog = "couchbase-cli setting-cluster"
group = self.parser.add_argument_group("Cluster settings")
group.add_argument("--cluster-username", dest="new_username", metavar="<username>",
help="The cluster administrator username")
group.add_argument("--cluster-password", dest="new_password", metavar="<password>",
help="Only compact the data files")
group.add_argument("--cluster-port", dest="port", type=(int), metavar="<port>",
help="The cluster administration console port")
group.add_argument("--cluster-ramsize", dest="data_mem_quota", metavar="<quota>",
type=(int), help="The data service memory quota in mebibytes")
group.add_argument("--cluster-index-ramsize", dest="index_mem_quota", metavar="<quota>",
type=(int), help="The index service memory quota in mebibytes")
group.add_argument("--cluster-fts-ramsize", dest="fts_mem_quota", metavar="<quota>",
type=(int), help="The full-text service memory quota in mebibytes")
group.add_argument("--cluster-eventing-ramsize", dest="eventing_mem_quota", metavar="<quota>",
type=(int), help="The Eventing service memory quota in mebibytes")
group.add_argument("--cluster-analytics-ramsize", dest="cbas_mem_quota", metavar="<quota>",
type=(int), help="The analytics service memory quota in mebibytes")
group.add_argument("--cluster-name", dest="name", metavar="<name>", help="The cluster name")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if (opts.data_mem_quota or opts.index_mem_quota or opts.fts_mem_quota or opts.cbas_mem_quota
or opts.eventing_mem_quota or opts.name):
_, errors = self.rest.set_pools_default(opts.data_mem_quota, opts.index_mem_quota, opts.fts_mem_quota,
opts.cbas_mem_quota, opts.eventing_mem_quota, opts.name)
_exit_if_errors(errors)
if opts.new_username or opts.new_password or opts.port:
username = opts.username
if opts.new_username:
username = opts.new_username
password = opts.password
if opts.new_password:
password = opts.new_password
_, errors = self.rest.set_admin_credentials(username, password, opts.port)
_exit_if_errors(errors)
_success("Cluster settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-cluster" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify cluster settings"
class ClusterEdit(SettingCluster):
"""The cluster edit subcommand (Deprecated)"""
def __init__(self):
super(ClusterEdit, self).__init__()
self.parser.prog = "couchbase-cli cluster-edit"
def execute(self, opts):
_deprecated("Please use the setting-cluster command instead")
super(ClusterEdit, self).execute(opts)
@staticmethod
def get_man_page_name():
return "couchbase-cli-cluster-edit" + ".1" if os.name != "nt" else ".html"
@staticmethod
def is_hidden():
# Deprecated command in 4.6, hidden in 5.0, pending removal
return True
class SettingCompaction(Subcommand):
"""The setting compaction subcommand"""
def __init__(self):
super(SettingCompaction, self).__init__()
self.parser.prog = "couchbase-cli setting-compaction"
group = self.parser.add_argument_group("Compaction settings")
group.add_argument("--compaction-db-percentage", dest="db_perc", metavar="<perc>",
type=(int),
help="Compacts the db once the fragmentation reaches this percentage")
group.add_argument("--compaction-db-size", dest="db_size", metavar="<mebibytes>",
type=(int),
help="Compacts db once the fragmentation reaches this size (MiB)")
group.add_argument("--compaction-view-percentage", dest="view_perc", metavar="<perc>",
type=(int),
help="Compacts the view once the fragmentation reaches this percentage")
group.add_argument("--compaction-view-size", dest="view_size", metavar="<mebibytes>",
type=(int),
help="Compacts view once the fragmentation reaches this size (MiB)")
group.add_argument("--compaction-period-from", dest="from_period", metavar="<HH:MM>",
help="Only run compaction after this time")
group.add_argument("--compaction-period-to", dest="to_period", metavar="<HH:MM>",
help="Only run compaction before this time")
group.add_argument("--enable-compaction-abort", dest="enable_abort", metavar="<1|0>",
choices=["0", "1"], help="Allow compactions to be aborted")
group.add_argument("--enable-compaction-parallel", dest="enable_parallel", metavar="<1|0>",
choices=["0", "1"], help="Allow parallel compactions")
group.add_argument("--metadata-purge-interval", dest="purge_interval", metavar="<float>",
type=(float), help="The metadata purge interval")
group.add_argument("--gsi-compaction-mode", dest="gsi_mode", choices=["append", "circular"],
help="Sets the gsi compaction mode (append or circular)")
group.add_argument("--compaction-gsi-percentage", dest="gsi_perc", type=(int), metavar="<perc>",
help="Starts compaction once gsi file fragmentation has reached this percentage"
+ "(Append mode only)")
group.add_argument("--compaction-gsi-interval", dest="gsi_interval", metavar="<days>",
help="A comma separated list of days compaction can run (Circular mode only)")
group.add_argument("--compaction-gsi-period-from", dest="gsi_from_period", metavar="<HH:MM>",
help="Allow gsi compaction to run after this time (Circular mode only)")
group.add_argument("--compaction-gsi-period-to", dest="gsi_to_period", metavar="<HH:MM>",
help="Allow gsi compaction to run before this time (Circular mode only)")
group.add_argument("--enable-gsi-compaction-abort", dest="enable_gsi_abort", metavar="<1|0>",
choices=["0", "1"],
help="Abort gsi compaction if when run outside of the accepted interaval"
+ "(Circular mode only)")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.db_perc is not None and (opts.db_perc < 2 or opts.db_perc > 100):
_exit_if_errors(["--compaction-db-percentage must be between 2 and 100"])
if opts.view_perc is not None and (opts.view_perc < 2 or opts.view_perc > 100):
_exit_if_errors(["--compaction-view-percentage must be between 2 and 100"])
if opts.db_size is not None:
if int(opts.db_size) < 1:
_exit_if_errors(["--compaction-db-size must be between greater than 1 or infinity"])
opts.db_size = int(opts.db_size) * 1024**2
if opts.view_size is not None:
if int(opts.view_size) < 1:
_exit_if_errors(["--compaction-view-size must be between greater than 1 or infinity"])
opts.view_size = int(opts.view_size) * 1024**2
if opts.from_period and not (opts.to_period and opts.enable_abort):
errors = []
if opts.to_period is None:
errors.append("--compaction-period-to is required when using --compaction-period-from")
if opts.enable_abort is None:
errors.append("--enable-compaction-abort is required when using --compaction-period-from")
_exit_if_errors(errors)
if opts.to_period and not (opts.from_period and opts.enable_abort):
errors = []
if opts.from_period is None:
errors.append("--compaction-period-from is required when using --compaction-period-to")
if opts.enable_abort is None:
errors.append("--enable-compaction-abort is required when using --compaction-period-to")
_exit_if_errors(errors)
if opts.enable_abort and not (opts.from_period and opts.to_period):
errors = []
if opts.from_period is None:
errors.append("--compaction-period-from is required when using --enable-compaction-abort")
if opts.to_period is None:
errors.append("--compaction-period-to is required when using --enable-compaction-abort")
_exit_if_errors(errors)
from_hour, from_min = self._handle_timevalue(opts.from_period,
"--compaction-period-from")
to_hour, to_min = self._handle_timevalue(opts.to_period, "--compaction-period-to")
if opts.enable_abort == "1":
opts.enable_abort = "true"
elif opts.enable_abort == "0":
opts.enable_abort = "false"
if opts.enable_parallel == "1":
opts.enable_parallel = "true"
else:
opts.enable_parallel = "false"
if opts.purge_interval is not None and (opts.purge_interval < 0.04 or opts.purge_interval > 60.0):
_exit_if_errors(["--metadata-purge-interval must be between 0.04 and 60.0"])
g_from_hour = None
g_from_min = None
g_to_hour = None
g_to_min = None
if opts.gsi_mode == "append":
opts.gsi_mode = "full"
if opts.gsi_perc is None:
_exit_if_errors(['--compaction-gsi-percentage must be specified when --gsi-compaction-mode is set '
'to append'])
elif opts.gsi_mode == "circular":
if opts.gsi_from_period is not None and opts.gsi_to_period is None:
_exit_if_errors(["--compaction-gsi-period-to is required with --compaction-gsi-period-from"])
if opts.gsi_to_period is not None and opts.gsi_from_period is None:
_exit_if_errors(["--compaction-gsi-period-from is required with --compaction-gsi-period-to"])
g_from_hour, g_from_min = self._handle_timevalue(opts.gsi_from_period, "--compaction-gsi-period-from")
g_to_hour, g_to_min = self._handle_timevalue(opts.gsi_to_period, "--compaction-gsi-period-to")
if opts.enable_gsi_abort == "1":
opts.enable_gsi_abort = "true"
else:
opts.enable_gsi_abort = "false"
_, errors = self.rest.set_compaction_settings(opts.db_perc, opts.db_size, opts.view_perc, opts.view_size,
from_hour, from_min, to_hour, to_min, opts.enable_abort,
opts.enable_parallel, opts.purge_interval, opts.gsi_mode,
opts.gsi_perc, opts.gsi_interval, g_from_hour, g_from_min,
g_to_hour, g_to_min, opts.enable_gsi_abort)
_exit_if_errors(errors)
_success("Compaction settings modified")
def _handle_timevalue(self, opt_value, opt_name):
hour = None
minute = None
if opt_value:
if opt_value.find(':') == -1:
_exit_if_errors([f'Invalid value for {opt_name}, must be in form XX:XX'])
hour, minute = opt_value.split(':', 1)
try:
hour = int(hour)
except ValueError:
_exit_if_errors([f'Invalid hour value for {opt_name}, must be an integer'])
if hour not in range(24):
_exit_if_errors([f'Invalid hour value for {opt_name}, must be 0-23'])
try:
minute = int(minute)
except ValueError:
_exit_if_errors([f'Invalid minute value for {opt_name}, must be an integer'])
if minute not in range(60):
_exit_if_errors([f'Invalid minute value for {opt_name}, must be 0-59'])
return hour, minute
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-compaction" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify auto-compaction settings"
class SettingIndex(Subcommand):
"""The setting index subcommand"""
def __init__(self):
super(SettingIndex, self).__init__()
self.parser.prog = "couchbase-cli setting-index"
group = self.parser.add_argument_group("Index settings")
group.add_argument("--index-max-rollback-points", dest="max_rollback", metavar="<num>",
type=(int), help="Max rollback points")
group.add_argument("--index-stable-snapshot-interval", dest="stable_snap", type=(int),
metavar="<seconds>", help="Stable snapshot interval in seconds")
group.add_argument("--index-memory-snapshot-interval", dest="mem_snap", metavar="<ms>",
type=(int), help="Stable snapshot interval in milliseconds")
group.add_argument("--index-storage-setting", dest="storage_mode", metavar="<mode>",
choices=["default", "memopt"], help="The index storage backend")
group.add_argument("--index-threads", dest="threads", metavar="<num>",
type=(int), help="The number of indexer threads")
group.add_argument("--index-log-level", dest="log_level", metavar="<level>",
choices=["debug", "silent", "fatal", "error", "warn", "info", "verbose",
"timing", "trace"],
help="The indexer log level")
group.add_argument('--replicas', metavar='<num>', type=int, help='Number of index replicas')
group.add_argument('--optimize-placement', metavar='<1|0>', type=str,
help='Optimize index placement on a rebalance.')
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if (opts.max_rollback is None and opts.stable_snap is None
and opts.mem_snap is None and opts.storage_mode is None
and opts.threads is None and opts.log_level is None and opts.replicas is None
and opts.optimize_placement is None):
_exit_if_errors(["No settings specified to be changed"])
settings, errors = self.rest.index_settings()
_exit_if_errors(errors)
# For supporting the default index backend changing from forestdb to plasma in Couchbase 5.0
default = "plasma"
if opts.storage_mode == "default" and settings['storageMode'] == "forestdb" or not self.enterprise:
default = "forestdb"
opts.storage_mode = index_storage_mode_to_param(opts.storage_mode, default)
_, errors = self.rest.set_index_settings(opts.storage_mode, opts.max_rollback, opts.stable_snap, opts.mem_snap,
opts.threads, opts.log_level, opts.replicas, opts.optimize_placement)
_exit_if_errors(errors)
_success("Indexer settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-index" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify index settings"
class SettingSaslauthd(Subcommand):
"""The setting sasl subcommand"""
def __init__(self):
super(SettingSaslauthd, self).__init__()
self.parser.prog = "couchbase-cli setting-saslauthd"
group = self.parser.add_argument_group("saslauthd settings")
group.add_argument("--enabled", dest="enabled", metavar="<1|0>", required=True,
choices=["0", "1"], help="Enable/disable saslauthd")
group.add_argument("--admins", dest="admins", metavar="<user_list>",
help="A comma separated list of full admins")
group.add_argument("--roadmins", dest="roadmins", metavar="<user_list>",
help="A comma separated list of read only admins")
group.add_argument("--default", dest="default", default="none",
choices=["admins", "roadmins", "none"], metavar="<default>",
help="Default roles for saslauthd users")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
admins = ""
if opts.admins:
admins = opts.admins.replace(",", "\n")
ro_admins = ""
if opts.roadmins:
ro_admins = opts.roadmins.replace(",", "\n")
errors = None
if opts.enabled == '1':
if opts.default == 'admins':
if ro_admins:
_warning("--ro-admins option ignored since default is read only admins")
_, errors = self.rest.sasl_settings('true', ro_admins, None)
elif opts.default == 'roadmins':
if admins:
_warning("--admins option ignored since default is admins")
_, errors = self.rest.sasl_settings('true', None, admins)
else:
_, errors = self.rest.sasl_settings('true', ro_admins, admins)
else:
if admins:
_warning("--admins option ignored since saslauthd is being disabled")
if ro_admins:
_warning("--roadmins option ignored since saslauthd is being disabled")
_, errors = self.rest.sasl_settings('false', "", "")
_exit_if_errors(errors)
_success("saslauthd settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-saslauthd" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify saslauthd settings"
class SettingLdap(Subcommand):
"""The setting Ldap subcommand"""
def __init__(self):
super(SettingLdap, self).__init__()
self.parser.prog = "couchbase-cli setting-ldap"
group = self.parser.add_argument_group("LDAP settings")
group.add_argument("--get", dest="get", default=False, action="store_true",
help='When the get flag is provided it will retrieve the current ldap settings')
group.add_argument("--authentication-enabled", dest="authentication_enabled", metavar="<1|0>",
choices=["1", "0"], help="Enable LDAP authentication, otherwise it defaults to disable")
group.add_argument("--authorization-enabled", dest="authorization_enabled", metavar="<1|0>",
choices=["1", "0"], help="Enable LDAP authorization, otherwise defaults to false")
group.add_argument("--hosts", dest="hosts", metavar="<host_list>",
help="Coma separated list of LDAP servers")
group.add_argument("--port", dest="port", metavar="<port>", help="LDAP port", type=int)
group.add_argument("--encryption", dest="encryption", metavar="<tls|startTLS|none>",
choices=["tls", "startTLS", "none"], help="Encryption used")
group.add_argument("--server-cert-validation", dest="server_cert_val", metavar="<1|0>", choices=["0", "1"],
help="Enable or disable certificate validation when connecting to LDAP server")
group.add_argument("--ldap-cacert", dest="cacert_ldap", metavar="<path>",
help="CA certificate to be used for LDAP server certificate validation, required if"
+ " certificate validation is not disabled")
group.add_argument("--user-dn-query", metavar="<query>", dest="user_dn_query",
help="LDAP query to get user's DN. Must contains at least one instance of %%u")
group.add_argument("--user-dn-template", metavar="<template>", dest="user_dn_template",
help="Template to construct user's DN. Must contain at least one instance of %%u")
group.add_argument("--client-cert", metavar="<path>", dest="client_cert",
help="The client TLS certificate for authentication")
group.add_argument("--client-key", metavar="<path>", dest="client_key",
help="The client TLS key for authentication")
group.add_argument("--request-timeout", metavar="<ms>", dest="timeout",
help="Request time out in milliseconds")
group.add_argument("--max-parallel", dest="max_parallel", metavar="<max>", type=int,
help="Maximum number of parallel connections that can be established")
group.add_argument("--max-cache-size", dest="max_cache_size", metavar="<size>",
help="Maximum number of cached LDAP requests")
group.add_argument("--cache-value-lifetime", dest="cache_value_lifetime", metavar="<ms>",
help="Cache value lifetime in milliseconds")
group.add_argument("--bind-dn", dest="bind_dn", metavar="<DN>",
help="The DN of a user to bind as to performance lookups")
group.add_argument("--bind-password", dest="bind_password", metavar="<password>",
help="The password of the bind user")
group.add_argument("--group-query", dest="group_query", metavar="<query>",
help="LDAP query to get user's groups by username")
group.add_argument("--enable-nested-groups", dest="nested_groups", metavar="<1|0>",
choices=["0", "1"])
group.add_argument("--nested-group-max-depth", dest="nested_max_depth", metavar="<max>", type=int,
help="Maximum number of recursive group requests allowed. [1 - 100]")
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=True)
def execute(self, opts):
if opts.get:
data, rv = self.rest.get_ldap()
_exit_if_errors(rv)
print(json.dumps(data))
else:
self._set(opts)
def _set(self, opts):
if opts.authentication_enabled == '1':
opts.authentication_enabled = 'true'
elif opts.authentication_enabled == '0':
opts.authentication_enabled = 'false'
if opts.authorization_enabled == '1':
opts.authorization_enabled = 'true'
elif opts.authorization_enabled == '0':
opts.authorization_enabled = 'false'
if opts.server_cert_val == '1':
opts.server_cert_val = 'true'
elif opts.server_cert_val == '0':
opts.server_cert_val = 'false'
if opts.server_cert_val == 'false' and opts.cacert_ldap is not None:
_exit_if_errors(['--server-cert-validation 0 and --ldap-cert can not be used together'])
if opts.cacert_ldap is not None:
opts.cacert_ldap = _exit_on_file_read_failure(opts.cacert_ldap)
if opts.encryption == "tls":
opts.encryption = "TLS"
elif opts.encryption == "startTLS":
opts.encryption = "StartTLSExtension"
elif opts.encryption == "none":
opts.encryption = "None"
if opts.nested_groups == '1':
opts.nested_groups = 'true'
elif opts.nested_groups == '0':
opts.nested_groups = 'false'
if opts.user_dn_query is not None and opts.user_dn_template is not None:
_exit_if_errors(['--user-dn-query and --user-dn-template can not be used together'])
mapping = None
if opts.user_dn_query is not None:
mapping = f'{{"query": "{opts.user_dn_query}"}}'
if opts.user_dn_template is not None:
mapping = f'{{"template": "{opts.user_dn_template}"}}'
if (opts.client_cert and not opts.client_key) or (not opts.client_cert and opts.client_key):
_exit_if_errors(['--client-cert and --client--key have to be used together'])
if opts.client_cert is not None:
opts.client_cert = _exit_on_file_read_failure(opts.client_cert)
if opts.client_key is not None:
opts.client_key = _exit_on_file_read_failure(opts.client_key)
_, errors = self.rest.ldap_settings(opts.authentication_enabled, opts.authorization_enabled, opts.hosts,
opts.port, opts.encryption, mapping, opts.timeout, opts.max_parallel,
opts.max_cache_size, opts.cache_value_lifetime, opts.bind_dn,
opts.bind_password, opts.client_cert, opts.client_key, opts.group_query,
opts.nested_groups, opts.nested_max_depth, opts.server_cert_val,
opts.cacert_ldap)
_exit_if_errors(errors)
_success("LDAP settings modified")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-ldap" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify LDAP settings"
class SettingNotification(Subcommand):
"""The settings notification subcommand"""
def __init__(self):
super(SettingNotification, self).__init__()
self.parser.prog = "couchbase-cli setting-notification"
group = self.parser.add_argument_group("Notification Settings")
group.add_argument("--enable-notifications", dest="enabled", metavar="<1|0>", required=True,
choices=["0", "1"], help="Enables/disable software notifications")
@rest_initialiser(version_check=True)
def execute(self, opts):
enabled = None
if opts.enabled == "1":
enabled = True
elif opts.enabled == "0":
enabled = False
_, errors = self.rest.enable_notifications(enabled)
_exit_if_errors(errors)
_success("Software notification settings updated")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-notification" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify software notification settings"
class SettingPasswordPolicy(Subcommand):
"""The settings password policy subcommand"""
def __init__(self):
super(SettingPasswordPolicy, self).__init__()
self.parser.prog = "couchbase-cli setting-password-policy"
group = self.parser.add_argument_group("Password Policy Settings")
group.add_argument("--get", dest="get", action="store_true", default=False,
help="Get the current password policy")
group.add_argument("--set", dest="set", action="store_true", default=False,
help="Set a new password policy")
group.add_argument("--min-length", dest="min_length", type=int, default=None, metavar="<num>",
help="Specifies the minimum password length for new passwords")
group.add_argument("--uppercase", dest="upper_case", metavar="<0|1>", choices=["0", "1"],
help="Specifies new passwords must contain an upper case character")
group.add_argument("--lowercase", dest="lower_case", metavar="<0|1>", choices=["0", "1"],
help="Specifies new passwords must contain a lower case character")
group.add_argument("--digit", dest="digit", metavar="<0|1>", choices=["0", "1"],
help="Specifies new passwords must at least one digit")
group.add_argument("--special-char", dest="special_char", metavar="<0|1>", choices=["0", "1"],
help="Specifies new passwords must at least one special character")
@rest_initialiser(version_check=True)
def execute(self, opts):
actions = sum([opts.get, opts.set])
if actions == 0:
_exit_if_errors(["Must specify either --get or --set"])
elif actions > 1:
_exit_if_errors(["The --get and --set flags may not be specified at the same time"])
elif opts.get:
if opts.min_length is not None or any([opts.upper_case, opts.lower_case, opts.digit, opts.special_char]):
_exit_if_errors(["The --get flag must be used without any other arguments"])
self._get()
elif opts.set:
if opts.min_length is None:
_exit_if_errors(["--min-length is required when using --set flag"])
if opts.min_length <= 0:
_exit_if_errors(["--min-length has to be greater than 0"])
self._set(opts)
def _get(self):
policy, errors = self.rest.get_password_policy()
_exit_if_errors(errors)
print(json.dumps(policy, sort_keys=True, indent=2))
def _set(self, opts):
_, errors = self.rest.set_password_policy(opts.min_length, opts.upper_case, opts.lower_case, opts.digit,
opts.special_char)
_exit_if_errors(errors)
_success("Password policy updated")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-password-policy" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify the password policy"
class SettingSecurity(Subcommand):
"""The settings security subcommand"""
def __init__(self):
super(SettingSecurity, self).__init__()
self.parser.prog = "couchbase-cli setting-security"
group = self.parser.add_argument_group("Cluster Security Settings")
group.add_argument('--get', default=False, action='store_true', help='Get security settings.')
group.add_argument('--set', default=False, action='store_true', help='Set security settings.')
group.add_argument("--disable-http-ui", dest="disable_http_ui", metavar="<0|1>", choices=['0', '1'],
default=None, help="Disables access to the UI over HTTP (0 or 1)")
group.add_argument("--disable-www-authenticate", dest="disable_www_authenticate",
metavar="<0|1>", choices=['0', '1'], default=None,
help="Disables use of WWW-Authenticate (0 or 1")
group.add_argument("--cluster-encryption-level", dest="cluster_encryption_level", metavar="<all|control>",
choices=['all', 'control'], default=None,
help="Set cluster encryption level, only used when cluster encryption enabled.")
group.add_argument('--tls-min-version', dest='tls_min_version', metavar='<tlsv1|tlsv1.1|tlsv1.2>',
choices=['tlsv1', 'tlsv1.1', 'tlsv1.2'], default=None, help='Set the minimum TLS version')
group.add_argument('--tls-honor-cipher-order', dest='tls_honor_cipher_order', metavar='<1|0>',
choices=['1', '0'], help='Specify or not the cipher order has to be followed.', default=None)
group.add_argument('--cipher-suites', metavar='<ciphers>', default=None,
help='Comma separated list of ciphers to use.If an empty string (e.g "") given it will'
' reset ciphers to default.')
@rest_initialiser(version_check=True)
def execute(self, opts):
if sum([opts.get, opts.set]) != 1:
_exit_if_errors(['Provided either --set or --get.'])
if opts.get:
val, err = self.rest.get_security_settings()
_exit_if_errors(err)
print(json.dumps(val))
elif opts.set:
self._set(self.rest, opts.disable_http_ui, opts.cluster_encryption_level, opts.tls_min_version,
opts.tls_honor_cipher_order, opts.cipher_suites, opts.disable_www_authenticate)
@staticmethod
def _set(rest, disable_http_ui, encryption_level, tls_min_version, honor_order, cipher_suites,
disable_www_authenticate):
if not any([True if x is not None else False for x in [disable_http_ui, encryption_level, tls_min_version,
honor_order, cipher_suites, disable_www_authenticate]]):
_exit_if_errors(['please provide at least one of --cluster-encryption-level, --disable-http-ui,'
' --tls-min-version, --tls-honor-cipher-order or --cipher-suites together with --set'])
if disable_http_ui == '1':
disable_http_ui = 'true'
elif disable_http_ui == '0':
disable_http_ui = 'false'
if disable_www_authenticate == '1':
disable_www_authenticate = 'true'
elif disable_www_authenticate == '0':
disable_www_authenticate = 'false'
if honor_order == '1':
honor_order = 'true'
elif honor_order == '0':
honor_order = 'false'
if cipher_suites == '':
cipher_suites = json.dumps([])
elif cipher_suites is not None:
cipher_suites = json.dumps(cipher_suites.split(','))
_, errors = rest.set_security_settings(disable_http_ui, encryption_level, tls_min_version,
honor_order, cipher_suites, disable_www_authenticate)
_exit_if_errors(errors)
_success("Security settings updated")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-security" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify security settings"
class SettingXdcr(Subcommand):
"""The setting xdcr subcommand"""
def __init__(self):
super(SettingXdcr, self).__init__()
self.parser.prog = "couchbase-cli setting-xdcr"
group = self.parser.add_argument_group("XDCR Settings")
group.add_argument("--checkpoint-interval", dest="chk_int", type=(int), metavar="<num>",
help="Intervals between checkpoints in seconds (60 to 14400)")
group.add_argument("--worker-batch-size", dest="worker_batch_size", metavar="<num>",
type=(int), help="Doc batch size (500 to 10000)")
group.add_argument("--doc-batch-size", dest="doc_batch_size", type=(int), metavar="<KB>",
help="Document batching size in KB (10 to 100000)")
group.add_argument("--failure-restart-interval", dest="fail_interval", metavar="<seconds>",
type=(int),
help="Interval for restarting failed xdcr in seconds (1 to 300)")
group.add_argument("--optimistic-replication-threshold", dest="rep_thresh", type=(int),
metavar="<bytes>",
help="Document body size threshold (bytes) to trigger optimistic "
+ "replication")
group.add_argument("--source-nozzle-per-node", dest="src_nozzles", metavar="<num>",
type=(int),
help="The number of source nozzles per source node (1 to 10)")
group.add_argument("--target-nozzle-per-node", dest="dst_nozzles", metavar="<num>",
type=(int),
help="The number of outgoing nozzles per target node (1 to 10)")
group.add_argument("--bandwidth-usage-limit", dest="usage_limit", type=(int),
metavar="<num>", help="The bandwidth usage limit in MiB/Sec")
group.add_argument("--enable-compression", dest="compression", metavar="<1|0>", choices=["1", "0"],
help="Enable/disable compression")
group.add_argument("--log-level", dest="log_level", metavar="<level>",
choices=["Error", "Info", "Debug", "Trace"],
help="The XDCR log level")
group.add_argument("--stats-interval", dest="stats_interval", metavar="<ms>",
help="The interval for statistics updates (in milliseconds)")
group.add_argument('--max-processes', dest='max_proc', metavar="<num>", type=int,
help='Number of processes to be allocated to XDCR. The default is 4.')
@rest_initialiser(version_check=True, cluster_init_check=True, enterprise_check=False)
def execute(self, opts):
if not self.enterprise and opts.compression:
_exit_if_errors(["--enable-compression can only be configured on enterprise edition"])
if opts.compression == "0":
opts.compression = "None"
elif opts.compression == "1":
opts.compression = "Auto"
_, errors = self.rest.xdcr_global_settings(opts.chk_int, opts.worker_batch_size, opts.doc_batch_size,
opts.fail_interval, opts.rep_thresh, opts.src_nozzles,
opts.dst_nozzles, opts.usage_limit, opts.compression, opts.log_level,
opts.stats_interval, opts.max_proc)
_exit_if_errors(errors)
_success("Global XDCR settings updated")
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-xdcr" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Modify XDCR related settings"
class SettingMasterPassword(Subcommand):
"""The setting master password subcommand"""
def __init__(self):
super(SettingMasterPassword, self).__init__()
self.parser.prog = "couchbase-cli setting-master-password"
group = self.parser.add_argument_group("Master password options")
group.add_argument("--new-password", dest="new_password", metavar="<password>",
required=False, action=CBNonEchoedAction, envvar=None,
prompt_text="Enter new master password:",
confirm_text="Confirm new master password:",
help="Sets a new master password")
group.add_argument("--rotate-data-key", dest="rotate_data_key", action="store_true",
help="Rotates the master password data key")
@rest_initialiser(version_check=True)
def execute(self, opts):
if opts.new_password is not None:
_, errors = self.rest.set_master_pwd(opts.new_password)
_exit_if_errors(errors)
_success("New master password set")
elif opts.rotate_data_key:
_, errors = self.rest.rotate_master_pwd()
_exit_if_errors(errors)
_success("Data key rotated")
else:
_exit_if_errors(["No parameters set"])
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-master-password" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Changing the settings of the master password"
class SslManage(Subcommand):
"""The user manage subcommand"""
def __init__(self):
super(SslManage, self).__init__()
self.parser.prog = "couchbase-cli ssl-manage"
group = self.parser.add_argument_group("SSL manage options")
group.add_argument("--cluster-cert-info", dest="cluster_cert", action="store_true",
default=False, help="Gets the cluster certificate")
group.add_argument("--node-cert-info", dest="node_cert", action="store_true",
default=False, help="Gets the node certificate")
group.add_argument("--regenerate-cert", dest="regenerate", metavar="<path>",
help="Regenerate the cluster certificate and save it to a file")
group.add_argument("--set-node-certificate", dest="set_cert", action="store_true",
default=False, help="Sets the node certificate")
group.add_argument("--upload-cluster-ca", dest="upload_cert", metavar="<path>",
help="Upload a new cluster certificate")
group.add_argument("--set-client-auth", dest="client_auth_path", metavar="<path>",
help="A path to a file containing the client auth configuration")
group.add_argument("--client-auth", dest="show_client_auth", action="store_true",
help="Show ssl client certificate authentication value")
group.add_argument("--extended", dest="extended", action="store_true",
default=False, help="Print extended certificate information")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.regenerate is not None:
try:
open(opts.regenerate, 'a').close()
except IOError:
_exit_if_errors([f'Unable to create file at `{opts.regenerate}`'])
certificate, errors = self.rest.regenerate_cluster_certificate()
_exit_if_errors(errors)
_exit_on_file_write_failure(opts.regenerate, certificate)
_success(f'Certificate regenerate and copied to `{opts.regenerate}`')
elif opts.cluster_cert:
certificate, errors = self.rest.retrieve_cluster_certificate(opts.extended)
_exit_if_errors(errors)
if isinstance(certificate, dict):
print(json.dumps(certificate, sort_keys=True, indent=2))
else:
print(certificate)
elif opts.node_cert:
host = urllib.parse.urlparse(opts.cluster).netloc
certificate, errors = self.rest.retrieve_node_certificate(host)
_exit_if_errors(errors)
print(json.dumps(certificate, sort_keys=True, indent=2))
elif opts.upload_cert:
certificate = _exit_on_file_read_failure(opts.upload_cert)
_, errors = self.rest.upload_cluster_certificate(certificate)
_exit_if_errors(errors)
_success(f'Uploaded cluster certificate to {opts.cluster}')
elif opts.set_cert:
_, errors = self.rest.set_node_certificate()
_exit_if_errors(errors)
_success("Node certificate set")
elif opts.client_auth_path:
data = _exit_on_file_read_failure(opts.client_auth_path)
try:
config = json.loads(data)
except ValueError as e:
_exit_if_errors([f'Client auth config does not contain valid json: {e}'])
_, errors = self.rest.set_client_cert_auth(config)
_exit_if_errors(errors)
_success("SSL client auth updated")
elif opts.show_client_auth:
result, errors = self.rest.retrieve_client_cert_auth()
_exit_if_errors(errors)
print(json.dumps(result, sort_keys=True, indent=2))
else:
_exit_if_errors(["No options specified"])
@staticmethod
def get_man_page_name():
return "couchbase-cli-ssl-manage" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage cluster certificates"
class UserManage(Subcommand):
"""The user manage subcommand"""
def __init__(self):
super(UserManage, self).__init__()
self.parser.prog = "couchbase-cli user-manage"
group = self.parser.add_argument_group("User manage options")
group.add_argument("--delete", dest="delete", action="store_true", default=False,
help="Delete an existing RBAC user")
group.add_argument("--get", dest="get", action="store_true", default=False,
help="Display RBAC user details")
group.add_argument("--list", dest="list", action="store_true", default=False,
help="List all RBAC users and their roles")
group.add_argument("--my-roles", dest="my_roles", action="store_true", default=False,
help="List my roles")
group.add_argument("--set", dest="set", action="store_true", default=False,
help="Create or edit an RBAC user")
group.add_argument("--set-group", dest="set_group", action="store_true", default=False,
help="Create or edit a user group")
group.add_argument("--delete-group", dest="delete_group", action="store_true", default=False,
help="Delete a user group")
group.add_argument("--list-groups", dest="list_group", action="store_true", default=False,
help="List all groups")
group.add_argument("--get-group", dest="get_group", action="store_true", default=False,
help="Get group")
group.add_argument("--rbac-username", dest="rbac_user", metavar="<username>",
help="The RBAC username")
group.add_argument("--rbac-password", dest="rbac_pass", metavar="<password>",
help="The RBAC password")
group.add_argument("--rbac-name", dest="rbac_name", metavar="<name>",
help="The full name of the user")
group.add_argument("--roles", dest="roles", metavar="<roles_list>",
help="The roles for the specified user")
group.add_argument("--auth-domain", dest="auth_domain", metavar="<domain>",
choices=["external", "local"],
help="The authentication type for the specified user")
group.add_argument("--user-groups", dest="groups", metavar="<groups>",
help="List of groups for the user to be added to")
group.add_argument("--group-name", dest="group", metavar="<group>", help="Group name")
group.add_argument("--group-description", dest="description", metavar="<text>", help="Group description")
group.add_argument("--ldap-ref", dest="ldap_ref", metavar="<ref>", help="LDAP group's distinguished name")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
num_selectors = sum([opts.delete, opts.list, opts.my_roles, opts.set, opts.get, opts.get_group,
opts.list_group, opts.delete_group, opts.set_group])
if num_selectors == 0:
_exit_if_errors(['Must specify --delete, --list, --my_roles, --set, --get, --get-group, --set-group, '
'--list-groups or --delete-group'])
elif num_selectors != 1:
_exit_if_errors(['Only one of the following can be specified:--delete, --list, --my_roles, --set, --get,'
' --get-group, --set-group, --list-groups or --delete-group'])
if opts.delete:
self._delete(opts)
elif opts.list:
self._list(opts)
elif opts.my_roles:
self._my_roles(opts)
elif opts.set:
self._set(opts)
elif opts.get:
self._get(opts)
elif opts.get_group:
self._get_group(opts)
elif opts.set_group:
self._set_group(opts)
elif opts.list_group:
self._list_groups()
elif opts.delete_group:
self._delete_group(opts)
def _delete_group(self, opts):
if opts.group is None:
_exit_if_errors(['--group-name is required with the --delete-group option'])
_, errors = self.rest.delete_user_group(opts.group)
_exit_if_errors(errors)
_success(f"Group '{opts.group}' was deleted")
def _get_group(self, opts):
if opts.group is None:
_exit_if_errors(['--group-name is required with the --get-group option'])
group, errors = self.rest.get_user_group(opts.group)
_exit_if_errors(errors)
print(json.dumps(group, indent=2))
def _set_group(self, opts):
if opts.group is None:
_exit_if_errors(['--group-name is required with --set-group'])
_, errors = self.rest.set_user_group(opts.group, opts.roles, opts.description, opts.ldap_ref)
_exit_if_errors(errors)
_success(f"Group '{opts.group}' set")
def _list_groups(self):
groups, errors = self.rest.list_user_groups()
_exit_if_errors(errors)
print(json.dumps(groups, indent=2))
def _delete(self, opts):
if opts.rbac_user is None:
_exit_if_errors(["--rbac-username is required with the --delete option"])
if opts.rbac_pass is not None:
_warning("--rbac-password is not used with the --delete option")
if opts.rbac_name is not None:
_warning("--rbac-name is not used with the --delete option")
if opts.roles is not None:
_warning("--roles is not used with the --delete option")
if opts.auth_domain is None:
_exit_if_errors(["--auth-domain is required with the --delete option"])
_, errors = self.rest.delete_rbac_user(opts.rbac_user, opts.auth_domain)
_exit_if_errors(errors)
_success(f"User '{opts.rbac_user}' was removed")
def _list(self, opts):
if opts.rbac_user is not None:
_warning(["--rbac-username is not used with the --list option"])
if opts.rbac_pass is not None:
_warning(["--rbac-password is not used with the --list option"])
if opts.rbac_name is not None:
_warning("--rbac-name is not used with the --list option")
if opts.roles is not None:
_warning("--roles is not used with the --list option")
if opts.auth_domain is not None:
_warning("--auth-domain is not used with the --list option")
result, errors = self.rest.list_rbac_users()
_exit_if_errors(errors)
print(json.dumps(result, indent=2))
def _get(self, opts):
if opts.rbac_user is None:
_exit_if_errors(["--rbac-username is required with the --get option"])
if opts.rbac_pass is not None:
_warning("--rbac-password is not used with the --get option")
if opts.rbac_name is not None:
_warning("--rbac-name is not used with the --get option")
if opts.roles is not None:
_warning("--roles is not used with the --get option")
if opts.auth_domain is not None:
_warning("--auth-domain is not used with the --get option")
result, errors = self.rest.list_rbac_users()
_exit_if_errors(errors)
user = [u for u in result if u['id'] == opts.rbac_user]
if len(user) != 0:
print(json.dumps(user, indent=2))
else:
_exit_if_errors([f'no user {opts.rbac_user}'])
def _my_roles(self, opts):
if opts.rbac_user is not None:
_warning("--rbac-username is not used with the --my-roles option")
if opts.rbac_pass is not None:
_warning("--rbac-password is not used with the --my-roles option")
if opts.rbac_name is not None:
_warning("--rbac-name is not used with the --my-roles option")
if opts.roles is not None:
_warning("--roles is not used with the --my-roles option")
if opts.auth_domain is not None:
_warning("--auth-domain is not used with the --my-roles option")
result, errors = self.rest.my_roles()
_exit_if_errors(errors)
print(json.dumps(result, indent=2))
def _set(self, opts):
if opts.rbac_user is None:
_exit_if_errors(["--rbac-username is required with the --set option"])
if opts.rbac_pass is not None and opts.auth_domain == "external":
_warning("--rbac-password cannot be used with the external auth domain")
opts.rbac_pass = None
if opts.auth_domain is None:
_exit_if_errors(["--auth-domain is required with the --set option"])
_, errors = self.rest.set_rbac_user(opts.rbac_user, opts.rbac_pass, opts.rbac_name, opts.roles,
opts.auth_domain, opts.groups)
_exit_if_errors(errors)
if opts.roles is not None and "query_external_access" in opts.roles:
_warning('Granting the query_external_access role permits execution of the N1QL '
'function CURL() and may allow access to other network endpoints in the local network and'
'the Internet.')
_success(f"User {opts.rbac_user} set")
@staticmethod
def get_man_page_name():
return "couchbase-cli-user-manage" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage RBAC users"
class XdcrReplicate(Subcommand):
"""The xdcr replicate subcommand"""
def __init__(self):
super(XdcrReplicate, self).__init__()
self.parser.prog = "couchbase-cli xdcr-replicate"
group = self.parser.add_argument_group("XDCR replicate options")
group.add_argument("--get", action="store_true", help="Retrieve the settings of a XDCR replication.")
group.add_argument("--create", dest="create", action="store_true",
default=False, help="Create an XDCR replication")
group.add_argument("--delete", dest="delete", action="store_true",
default=False, help="Delete an XDCR replication")
group.add_argument("--pause", dest="pause", action="store_true",
default=False, help="Pause an XDCR replication")
group.add_argument("--list", dest="list", action="store_true",
default=False, help="List all XDCR replications")
group.add_argument("--resume", dest="resume", action="store_true",
default=False, help="Resume an XDCR replication")
group.add_argument("--settings", dest="settings", action="store_true",
default=False, help="Set advanced settings for an XDCR replication")
group.add_argument("--xdcr-from-bucket", dest="from_bucket", metavar="<bucket>",
help="The name bucket to replicate data from")
group.add_argument("--xdcr-to-bucket", dest="to_bucket", metavar="<bucket>",
help="The name bucket to replicate data to")
group.add_argument("--xdcr-cluster-name", dest="cluster_name", metavar="<name>",
help="The name of the cluster reference to replicate to")
group.add_argument("--xdcr-replication-mode", dest="rep_mode", metavar="<mode>",
choices=["xmem", "capi"], action=CBDeprecatedAction, help=SUPPRESS)
group.add_argument("--filter-expression", dest="filter", metavar="<regex>",
help="Regular expression to filter replication streams")
group.add_argument("--filter-skip-restream", dest="filter_skip", action="store_true", default=False,
help="Restart the replication. It must be specified together with --filter-expression")
group.add_argument("--xdcr-replicator", dest="replicator_id", metavar="<id>",
help="Replication ID")
group.add_argument("--checkpoint-interval", dest="chk_int", type=(int), metavar="<seconds>",
help="Intervals between checkpoints in seconds (60 to 14400)")
group.add_argument("--worker-batch-size", dest="worker_batch_size", type=(int),
metavar="<num>", help="Doc batch size (500 to 10000)")
group.add_argument("--doc-batch-size", dest="doc_batch_size", type=(int), metavar="<KB>",
help="Document batching size in KB (10 to 100000)")
group.add_argument("--failure-restart-interval", dest="fail_interval", type=(int),
metavar="<seconds>",
help="Interval for restarting failed xdcr in seconds (1 to 300)")
group.add_argument("--optimistic-replication-threshold", dest="rep_thresh", type=(int),
metavar="<bytes>",
help="Document body size threshold to trigger optimistic replication"
+ " (bytes)")
group.add_argument("--source-nozzle-per-node", dest="src_nozzles", type=(int),
metavar="<num>",
help="The number of source nozzles per source node (1 to 10)")
group.add_argument("--target-nozzle-per-node", dest="dst_nozzles", type=(int),
metavar="<num>",
help="The number of outgoing nozzles per target node (1 to 10)")
group.add_argument("--bandwidth-usage-limit", dest="usage_limit", type=(int),
metavar="<num>", help="The bandwidth usage limit in MiB/Sec")
group.add_argument("--enable-compression", dest="compression", metavar="<1|0>", choices=["1", "0"],
help="Enable/disable compression")
group.add_argument("--log-level", dest="log_level", metavar="<level>",
choices=["Error", "Warn", "Info", "Debug", "Trace"],
help="The XDCR log level")
group.add_argument("--stats-interval", dest="stats_interval", metavar="<ms>",
help="The interval for statistics updates (in milliseconds)")
group.add_argument("--priority", dest="priority", choices=['High', 'Medium', 'Low'],
metavar="<High|Medium|Low>", help='XDCR priority, by default set to High')
group.add_argument('--reset-expiry', choices=['1', '0'], metavar='<1|0>', dest='reset_expiry',
default=None, help='When set to true the expiry of mutations will be set to zero')
group.add_argument('--filter-deletion', choices=['1', '0'], metavar='<1|0>', default=None, dest='filter_del',
help='When set to true delete mutations will be filter out and not sent to the target '
'cluster')
group.add_argument('--filter-expiration', choices=['1', '0'], metavar='<1|0>', default=None, dest='filter_exp',
help='When set to true expiry mutations will be filter out and not sent to the target '
'cluster')
collection_group = self.parser.add_argument_group("Collection options")
collection_group.add_argument('--collection-explicit-mappings', choices=['1', '0'], metavar='<1|0>',
default=None, help='If explicit collection mappings is to be used. '
'(Enterprise Edition Only)')
collection_group.add_argument('--collection-migration', choices=['1', '0'], metavar='<1|0>',
default=None, help='If XDCR is to run in collection migration mode. '
'(Enterprise Edition only)')
collection_group.add_argument('--collection-mapping-rules', type=str, default=None, metavar='<mappings>',
help='The mapping rules specified as a JSON formatted string. '
'(Enterprise Edition Only)')
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if not self.enterprise and opts.compression:
_exit_if_errors(["--enable-compression can only be configured on enterprise edition"])
if not self.enterprise and (opts.collection_migration or opts.collection_explicit_mappings is not None
or opts.collection_mapping_rules is not None):
_exit_if_errors(["[--collection-migration, --collection-explicit-mappings, --collection-mapping-rules] can"
" only be configured on enterprise edition"])
if opts.compression == "0":
opts.compression = "None"
elif opts.compression == "1":
opts.compression = "Auto"
actions = sum([opts.create, opts.delete, opts.pause, opts.list, opts.resume, opts.settings, opts.get])
if actions == 0:
_exit_if_errors(['Must specify one of --create, --delete, --pause, --list, --resume, --settings, --get'])
elif actions > 1:
_exit_if_errors(['The --create, --delete, --pause, --list, --resume, --settings, --get flags may not be '
'specified at the same time'])
elif opts.create:
self._create(opts)
elif opts.delete:
self._delete(opts)
elif opts.pause or opts.resume:
self._pause_resume(opts)
elif opts.list:
self._list()
elif opts.settings:
self._settings(opts)
elif opts.get:
self._get(opts)
def _get(self, opts):
if opts.replicator_id is None:
_exit_if_errors(["--xdcr-replicator is need to get the replicator settings"])
settings, errors = self.rest.get_xdcr_replicator_settings(opts.replicator_id)
_exit_if_errors(errors)
print(json.dumps(settings, indent=4, sort_keys=True))
def _create(self, opts):
if opts.collection_migration == '1' and opts.collection_explicit_mappings == '1':
_exit_if_errors(['cannot enable both collection migration and explicit mappings'])
if opts.filter_skip and opts.filter is None:
_exit_if_errors(["--filter-expersion is needed with the --filter-skip-restream option"])
_, errors = self.rest.create_xdcr_replication(opts.cluster_name, opts.to_bucket, opts.from_bucket, opts.chk_int,
opts.worker_batch_size, opts.doc_batch_size, opts.fail_interval,
opts.rep_thresh, opts.src_nozzles, opts.dst_nozzles,
opts.usage_limit, opts.compression, opts.log_level,
opts.stats_interval, opts.filter, opts.priority,
opts.reset_expiry, opts.filter_del, opts.filter_exp,
opts.collection_explicit_mappings, opts.collection_migration,
opts.collection_mapping_rules)
_exit_if_errors(errors)
_success("XDCR replication created")
def _delete(self, opts):
if opts.replicator_id is None:
_exit_if_errors(["--xdcr-replicator is needed to delete a replication"])
_, errors = self.rest.delete_xdcr_replicator(opts.replicator_id)
_exit_if_errors(errors)
_success("XDCR replication deleted")
def _pause_resume(self, opts):
if opts.replicator_id is None:
_exit_if_errors(["--xdcr-replicator is needed to pause or resume a replication"])
tasks, errors = self.rest.get_tasks()
_exit_if_errors(errors)
for task in tasks:
if task["type"] == "xdcr" and task["id"] == opts.replicator_id:
if opts.pause and task["status"] == "notRunning":
_exit_if_errors(["The replication is not running yet. Pause is not needed"])
if opts.resume and task["status"] == "running":
_exit_if_errors(["The replication is running already. Resume is not needed"])
break
if opts.pause:
_, errors = self.rest.pause_xdcr_replication(opts.replicator_id)
_exit_if_errors(errors)
_success("XDCR replication paused")
elif opts.resume:
_, errors = self.rest.resume_xdcr_replication(opts.replicator_id)
_exit_if_errors(errors)
_success("XDCR replication resume")
def _list(self):
tasks, errors = self.rest.get_tasks()
_exit_if_errors(errors)
for task in tasks:
if task["type"] == "xdcr":
print(f'stream id: {task["id"]}')
print(f' status: {task["status"]}')
print(f' source: {task["source"]}')
print(f' target: {task["target"]}')
if "filterExpression" in task and task["filterExpression"] != "":
print(f' filter: {task["filterExpression"]}')
def _settings(self, opts):
if opts.replicator_id is None:
_exit_if_errors(["--xdcr-replicator is needed to change a replicators settings"])
if opts.filter_skip and opts.filter is None:
_exit_if_errors(["--filter-expersion is needed with the --filter-skip-restream option"])
if opts.collection_migration == '1' and opts.collection_explicit_mappings == '1':
_exit_if_errors(['cannot enable both collection migration and explicit mappings'])
_, errors = self.rest.xdcr_replicator_settings(opts.chk_int, opts.worker_batch_size, opts.doc_batch_size,
opts.fail_interval, opts.rep_thresh, opts.src_nozzles,
opts.dst_nozzles, opts.usage_limit, opts.compression,
opts.log_level, opts.stats_interval, opts.replicator_id,
opts.filter, opts.filter_skip, opts.priority, opts.reset_expiry,
opts.filter_del, opts.filter_exp,
opts.collection_explicit_mappings, opts.collection_migration,
opts.collection_mapping_rules)
_exit_if_errors(errors)
_success("XDCR replicator settings updated")
@staticmethod
def get_man_page_name():
return "couchbase-cli-xdcr-replicate" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage XDCR cluster references"
class XdcrSetup(Subcommand):
"""The xdcr setup subcommand"""
def __init__(self):
super(XdcrSetup, self).__init__()
self.parser.prog = "couchbase-cli xdcr-setup"
group = self.parser.add_argument_group("XDCR setup options")
group.add_argument("--create", dest="create", action="store_true",
default=False, help="Create an XDCR remote reference")
group.add_argument("--delete", dest="delete", action="store_true",
default=False, help="Delete an XDCR remote reference")
group.add_argument("--edit", dest="edit", action="store_true",
default=False, help="Set the local read-only user")
group.add_argument("--list", dest="list", action="store_true",
default=False, help="List all XDCR remote references")
group.add_argument("--xdcr-cluster-name", dest="name", metavar="<name>",
help="The name for the remote cluster reference")
group.add_argument("--xdcr-hostname", dest="hostname", metavar="<hostname>",
help="The hostname of the remote cluster reference")
group.add_argument("--xdcr-username", dest="r_username", metavar="<username>",
help="The username of the remote cluster reference")
group.add_argument("--xdcr-password", dest="r_password", metavar="<password>",
help="The password of the remote cluster reference")
group.add_argument("--xdcr-user-certificate", dest="r_certificate", metavar="<path>",
help="The user certificate for authentication")
group.add_argument("--xdcr-user-key", dest="r_key", metavar="<path>",
help="The user key for authentication")
group.add_argument("--xdcr-demand-encryption", dest="encrypt", choices=["0", "1"],
action=CBDeprecatedAction, help=SUPPRESS)
group.add_argument("--xdcr-encryption-type", dest="encryption_type", choices=["full", "half"],
metavar="<type>", action=CBDeprecatedAction, help=SUPPRESS)
group.add_argument("--xdcr-certificate", dest="certificate", metavar="<path>",
help="The certificate used for encryption")
group.add_argument("--xdcr-secure-connection", dest="secure_connection", choices=["none", "full", "half"],
metavar="<type>", help="The XDCR secure connection type")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
actions = sum([opts.create, opts.delete, opts.edit, opts.list])
if actions == 0:
_exit_if_errors(["Must specify one of --create, --delete, --edit, --list"])
elif actions > 1:
_exit_if_errors(["The --create, --delete, --edit, --list flags may not be specified at the same time"])
elif opts.create or opts.edit:
self._set(opts)
elif opts.delete:
self._delete(opts)
elif opts.list:
self._list()
def _set(self, opts):
cmd = "create"
if opts.edit:
cmd = "edit"
if opts.name is None:
_exit_if_errors([f'--xdcr-cluster-name is required to {cmd} a cluster connection'])
if opts.hostname is None:
_exit_if_errors([f'--xdcr-hostname is required to {cmd} a cluster connections'])
if opts.username is None:
_exit_if_errors([f'--xdcr-username is required to {cmd} a cluster connections'])
if opts.password is None:
_exit_if_errors([f'--xdcr-password is required to {cmd} a cluster connections'])
if (opts.encrypt is not None or opts.encryption_type is not None) and opts.secure_connection is not None:
_exit_if_errors(["Cannot use deprecated flags --xdcr-demand-encryption or --xdcr-encryption-type with"
" --xdcr-secure-connection"])
if opts.secure_connection == "none":
opts.encrypt = "0"
opts.encryption_type = None
elif opts.secure_connection == "half":
opts.encrypt = "1"
opts.encryption_type = "half"
elif opts.secure_connection == "full":
opts.encrypt = "1"
opts.encryption_type = "full"
elif opts.encrypt is None and opts.encryption_type is None:
opts.encrypt = "0"
opts.encryption_type = None
raw_cert = None
if opts.encrypt == "1":
if opts.encryption_type is None:
opts.encryption_type = "full"
if opts.encryption_type == "full":
if opts.certificate is None:
_exit_if_errors(["certificate required if encryption is demanded"])
raw_cert = _exit_on_file_read_failure(opts.certificate)
raw_user_key = None
if opts.r_key:
raw_user_key = _exit_on_file_read_failure(opts.r_key)
raw_user_cert = None
if opts.r_certificate:
raw_user_cert = _exit_on_file_read_failure(opts.r_certificate)
if opts.create:
_, errors = self.rest.create_xdcr_reference(opts.name, opts.hostname, opts.r_username, opts.r_password,
opts.encrypt, opts.encryption_type, raw_cert, raw_user_cert,
raw_user_key)
_exit_if_errors(errors)
_success("Cluster reference created")
else:
_, errors = self.rest.edit_xdcr_reference(opts.name, opts.hostname, opts.r_username, opts.r_password,
opts.encrypt, opts.encryption_type, raw_cert, raw_user_cert,
raw_user_key)
_exit_if_errors(errors)
_success("Cluster reference edited")
def _delete(self, opts):
if opts.name is None:
_exit_if_errors(["--xdcr-cluster-name is required to deleta a cluster connection"])
_, errors = self.rest.delete_xdcr_reference(opts.name)
_exit_if_errors(errors)
_success("Cluster reference deleted")
def _list(self):
clusters, errors = self.rest.list_xdcr_references()
_exit_if_errors(errors)
for cluster in clusters:
if not cluster["deleted"]:
print(f'cluster name: {cluster["name"]}')
print(f' uuid: {cluster["uuid"]}')
print(f' host name: {cluster["hostname"]}')
print(f' user name: {cluster["username"]}')
print(f' uri: {cluster["uri"]}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-xdcr-setup" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage XDCR replications"
class EventingFunctionSetup(Subcommand):
"""The Eventing Service Function setup subcommand"""
def __init__(self):
super(EventingFunctionSetup, self).__init__()
self.parser.prog = "couchbase-cli eventing-function-setup"
group = self.parser.add_argument_group("Eventing Service Function setup options")
group.add_argument("--import", dest="_import", action="store_true",
default=False, help="Import functions")
group.add_argument("--export", dest="export", action="store_true",
default=False, help="Export a function")
group.add_argument("--export-all", dest="export_all", action="store_true",
default=False, help="Export all functions")
group.add_argument("--delete", dest="delete", action="store_true",
default=False, help="Delete a function")
group.add_argument("--list", dest="list", action="store_true",
default=False, help="List all functions")
group.add_argument("--deploy", dest="deploy", action="store_true",
default=False, help="Deploy a function")
group.add_argument("--undeploy", dest="undeploy", action="store_true",
default=False, help="Undeploy a function")
group.add_argument("--boundary", dest="boundary", metavar="<from-everything|from-now>",
choices=["from-everything", "from-now"], default=False,
help="Set the function deployment boundary")
group.add_argument("--name", dest="name", metavar="<name>",
default=False, help="The name of the function to take an action on")
group.add_argument("--file", dest="filename", metavar="<file>",
default=False, help="The file to export and import function(s) to and from")
group.add_argument("--pause", dest="pause", action="store_true", help="Pause a function")
group.add_argument("--resume", dest="resume", action="store_true", help="Resume a function")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
# pylint: disable=protected-access
actions = sum([opts._import, opts.export, opts.export_all, opts.delete, opts.list, opts.deploy, opts.undeploy,
opts.pause, opts.resume])
if actions == 0:
_exit_if_errors(["Must specify one of --import, --export, --export-all, --delete, --list, --deploy,"
" --undeploy, --pause, --resume"])
elif actions > 1:
_exit_if_errors(['The --import, --export, --export-all, --delete, --list, --deploy, --undeploy, --pause, '
'--resume flags may not be specified at the same time'])
elif opts._import: # pylint: disable=protected-access
self._import(opts)
elif opts.export:
self._export(opts)
elif opts.export_all:
self._export_all(opts)
elif opts.delete:
self._delete(opts)
elif opts.list:
self._list()
elif opts.deploy:
self._deploy_undeploy(opts, True)
elif opts.undeploy:
self._deploy_undeploy(opts, False)
elif opts.pause:
self._pause_resume(opts, True)
elif opts.resume:
self._pause_resume(opts, False)
def _pause_resume(self, opts, pause):
if not opts.name:
_exit_if_errors([f"Flag --name is required with the {'--pause' if pause else '--resume'} flag"])
_, err = self.rest.pause_resume_function(opts.name, pause)
_exit_if_errors(err)
_success(f"Function was {'paused' if pause else 'resumed'}")
def _import(self, opts):
if not opts.filename:
_exit_if_errors(["--file is needed to import functions"])
import_functions = _exit_on_file_read_failure(opts.filename)
import_functions = json.loads(import_functions)
_, errors = self.rest.import_functions(import_functions)
_exit_if_errors(errors)
_success("Events imported")
def _export(self, opts):
if not opts.filename:
_exit_if_errors(["--file is needed to export a function"])
if not opts.name:
_exit_if_errors(["--name is needed to export a function"])
functions, errors = self.rest.export_functions()
_exit_if_errors(errors)
exported_function = None
for function in functions:
if function["appname"] == opts.name:
exported_function = [function]
if not exported_function:
_exit_if_errors([f'Function {opts.name} does not exist'])
_exit_on_file_write_failure(opts.filename, json.dumps(exported_function, separators=(',', ':')))
_success("Function exported to: " + opts.filename)
def _export_all(self, opts):
if not opts.filename:
_exit_if_errors(["--file is needed to export all functions"])
exported_functions, errors = self.rest.export_functions()
_exit_if_errors(errors)
_exit_on_file_write_failure(opts.filename, json.dumps(exported_functions, separators=(',', ':')))
_success(f'All functions exported to: {opts.filename}')
def _delete(self, opts):
if not opts.name:
_exit_if_errors(["--name is needed to delete a function"])
_, errors = self.rest.delete_function(opts.name)
_exit_if_errors(errors)
_success("Request to delete the function was accepted")
def _deploy_undeploy(self, opts, deploy):
if not opts.name:
_exit_if_errors([f"--name is needed to {'deploy' if deploy else 'undeploy'} a function"])
if deploy and not opts.boundary:
_exit_if_errors(["--boundary is needed to deploy a function"])
_, errors = self.rest.deploy_undeploy_function(opts.name, deploy, opts.boundary)
_exit_if_errors(errors)
_success(f"Request to {'deploy' if deploy else 'undeploy'} the function was accepted")
def _list(self):
functions, errors = self.rest.list_functions()
_exit_if_errors(errors)
for function in functions:
print(function['appname'])
status = ''
if function['settings']['deployment_status']:
status = 'Deployed'
else:
status = 'Undeployed'
print(f' Status: {status}')
print(f' Source Bucket: {function["depcfg"]["source_bucket"]}')
print(f' Metadata Bucket: {function["depcfg"]["metadata_bucket"]}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-eventing-function-setup" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage Eventing Service Functions"
class AnalyticsLinkSetup(Subcommand):
"""The analytics link setup subcommand"""
def __init__(self):
super(AnalyticsLinkSetup, self).__init__()
self.parser.prog = "couchbase-cli analytics-link-setup"
group = self.parser.add_argument_group("Analytics Service link setup options")
group.add_argument("--create", dest="create", action="store_true",
default=False, help="Create a link")
group.add_argument("--delete", dest="delete", action="store_true",
default=False, help="Delete a link")
group.add_argument("--edit", dest="edit", action="store_true",
default=False, help="Modify a link")
group.add_argument("--list", dest="list", action="store_true",
default=False, help="List all links")
group.add_argument("--dataverse", dest="dataverse", metavar="<name>",
help="The dataverse of the link (Deprecated)")
group.add_argument("--scope", dest="scope", metavar="<name>",
help="The analytics scope of the link in its canonical form")
group.add_argument("--name", dest="name", metavar="<name>",
help="The name of the link")
group.add_argument("--type", dest="type", metavar="<type>", choices=["couchbase", "s3", "azureblob"],
help="The type of the link ('couchbase', 's3' or 'azureblob')")
group = self.parser.add_argument_group("Analytics Service Couchbase link setup options")
group.add_argument("--hostname", dest="hostname", metavar="<hostname>",
help="The hostname of the link")
group.add_argument("--link-username", dest="link_username", metavar="<username>",
help="The username of the link")
group.add_argument("--link-password", dest="link_password", metavar="<password>",
help="The password of the link")
group.add_argument("--user-certificate", dest="user_certificate", metavar="<path>",
help="The user certificate for authentication")
group.add_argument("--user-key", dest="user_key", metavar="<path>",
help="The user key for authentication")
group.add_argument("--certificate", dest="certificate", metavar="<path>",
help="The certificate used for encryption")
group.add_argument("--encryption", dest="encryption", choices=["none", "full", "half"],
metavar="<type>",
help="The link encryption type ('none', 'full' or 'half')")
group = self.parser.add_argument_group("Analytics Service S3 link setup options")
group.add_argument("--access-key-id", dest="access_key_id", metavar="<id>",
help="The access key ID of the link")
group.add_argument("--secret-access-key", dest="secret_access_key", metavar="<key>",
help="The secret access key of the link")
group.add_argument("--session-token", dest="session_token", metavar="<token>",
help="Temporary credentials session token")
group.add_argument("--region", dest="region", metavar="<region>",
help="The region of the link")
group.add_argument("--service-endpoint", dest="service_endpoint", metavar="<url>",
help="The service endpoint of the link (optional)")
group = self.parser.add_argument_group("Analytics Service Azure Blob link setup options")
group.add_argument("--connection-string", dest="connection_string", metavar="<key>",
help="The connection string of the link")
group.add_argument("--account-name", dest="account_name", metavar="<id>",
help="The account name of the link")
group.add_argument("--account-key", dest="account_key", metavar="<key>",
help="The account key of the link")
group.add_argument("--shared-access-signature", dest="shared_access_signature", metavar="<token>",
help="The shared access signature of the link")
group.add_argument("--blob-endpoint", dest="blob_endpoint", metavar="<url>",
help="The blob endpoint of the link (optional)")
group.add_argument("--endpoint-suffix", dest="endpoint_suffix", metavar="<url>",
help="The endpoint suffix of the link (optional)")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
actions = sum([opts.create, opts.delete, opts.edit, opts.list])
if actions == 0:
_exit_if_errors(["Must specify one of --create, --delete, --edit, --list"])
elif actions > 1:
_exit_if_errors(["The --create, --delete, --edit, --list flags may not be specified at the same time"])
if opts.dataverse:
_deprecated("--dataverse is deprecated, please use --scope instead")
if opts.dataverse and opts.scope:
_exit_if_errors(['Only one of --dataverse and --scope is allowed'])
if opts.create or opts.edit:
self._set(opts)
elif opts.delete:
self._delete(opts)
elif opts.list:
self._list(opts)
def _set(self, opts):
cmd = "create"
if opts.edit:
cmd = "edit"
if opts.dataverse is None and opts.scope is None:
_exit_if_errors([f'--dataverse or --scope is required to {cmd} a link'])
if opts.name is None:
_exit_if_errors([f'--name is required to {cmd} a link'])
if opts.create and opts.type is None:
_exit_if_errors([f'--type is required to {cmd} a link'])
if opts.type == 'azureblob':
if opts.connection_string is None and opts.account_key is None and opts.shared_access_signature is None:
_exit_if_errors(['No authentication parameters provided'])
if opts.connection_string and (opts.account_key or opts.shared_access_signature):
_exit_if_errors(['Only a single authentication method is allowed'])
if opts.account_key and opts.shared_access_signature:
_exit_if_errors(['Only a single authentication method is allowed'])
if opts.dataverse:
opts.scope = opts.dataverse
if opts.certificate:
opts.certificate = _exit_on_file_read_failure(opts.certificate)
if opts.user_key:
opts.user_key = _exit_on_file_read_failure(opts.user_key)
if opts.user_certificate:
opts.user_certificate = _exit_on_file_read_failure(opts.user_certificate)
if opts.create:
_, errors = self.rest.create_analytics_link(opts)
_exit_if_errors(errors)
_success("Link created")
else:
_, errors = self.rest.edit_analytics_link(opts)
_exit_if_errors(errors)
_success("Link edited")
def _delete(self, opts):
if opts.dataverse is None and opts.scope is None:
_exit_if_errors(['--dataverse or --scope is required to delete a link'])
if opts.name is None:
_exit_if_errors(['--name is required to delete a link'])
if opts.dataverse:
opts.scope = opts.dataverse
_, errors = self.rest.delete_analytics_link(opts.scope, opts.name)
_exit_if_errors(errors)
_success("Link deleted")
def _list(self, opts):
if opts.dataverse:
opts.scope = opts.dataverse
clusters, errors = self.rest.list_analytics_links(opts.scope, opts.name, opts.type)
_exit_if_errors(errors)
print(json.dumps(clusters, sort_keys=True, indent=2))
@staticmethod
def get_man_page_name():
return "couchbase-cli-analytics-link-setup" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage Analytics Links"
class UserChangePassword(Subcommand):
"""The change password subcommand"""
def __init__(self):
super(UserChangePassword, self).__init__()
self.parser.prog = "couchbase-cli user-change-password"
group = self.parser.add_argument_group("User password change option")
group.add_argument("--new-password", dest="new_pass", metavar="<password>", required=True,
help="The new password")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
if opts.new_pass is None:
_exit_if_errors(["--new-password is required"])
_, rv = self.rest.user_change_passsword(opts.new_pass)
_exit_if_errors(rv)
_success(f'Changed password for {opts.username}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-user-change-password" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Change user password"
class CollectionManage(Subcommand):
"""The collections-manage subcommand"""
def __init__(self):
super(CollectionManage, self).__init__()
self.parser.prog = "couchbase-cli collection-manage"
group = self.parser.add_argument_group("Collection manage option")
group.add_argument("--bucket", dest="bucket", metavar="<bucket>", required=True, help="The bucket to use")
group.add_argument("--create-scope", dest="create_scope", metavar="<scope>", default=None,
help="The name of the scope to make")
group.add_argument("--drop-scope", dest="drop_scope", metavar="<scope>", default=None,
help="The name of the scope to remove")
group.add_argument("--list-scopes", dest="list_scopes", action="store_true", default=None,
help="List all of the scopes in the bucket")
group.add_argument("--create-collection", dest="create_collection", metavar="<collection>", default=None,
help="The path to the collection to make")
group.add_argument("--drop-collection", dest="drop_collection", metavar="<collection>", default=None,
help="The path to the collection to remove")
group.add_argument("--list-collections", dest="list_collections", metavar="<scope_list>", default=None,
const="", nargs='?', help="List all of the collections in the provided scopes. If no scopes "
"are provided it will print all collections")
group.add_argument("--max-ttl", dest="max_ttl", metavar="<seconds>", type=int,
help="Set the maximum TTL the collection will accept")
@rest_initialiser(cluster_init_check=True, version_check=True)
def execute(self, opts):
cmds = [opts.create_scope, opts.drop_scope, opts.list_scopes, opts.create_collection, opts.drop_collection,
opts.list_collections]
cmd_total = sum(cmd is not None for cmd in cmds)
args = "--create-scope, --drop-scope, --list-scopes, --create-collection, --drop-collection, or " \
"--list-collections"
if cmd_total == 0:
_exit_if_errors([f'Must specify one of the following: {args}'])
elif cmd_total != 1:
_exit_if_errors([f'Only one of the following may be specified: {args}'])
if opts.max_ttl is not None and opts.create_collection is None:
_exit_if_errors(["--max-ttl can only be set with --create-collection"])
if opts.create_scope:
self._create_scope(opts)
if opts.drop_scope:
self._drop_scope(opts)
if opts.list_scopes:
self._list_scopes(opts)
if opts.create_collection:
self._create_collection(opts)
if opts.drop_collection:
self._drop_collection(opts)
if opts.list_collections is not None:
self._list_collections(opts)
def _create_scope(self, opts):
_, errors = self.rest.create_scope(opts.bucket, opts.create_scope)
_exit_if_errors(errors)
_success("Scope created")
def _drop_scope(self, opts):
_, errors = self.rest.drop_scope(opts.bucket, opts.drop_scope)
_exit_if_errors(errors)
_success("Scope dropped")
def _list_scopes(self, opts):
manifest, errors = self.rest.get_manifest(opts.bucket)
_exit_if_errors(errors)
for scope in manifest['scopes']:
print(scope['name'])
def _create_collection(self, opts):
scope, collection = self._get_scope_collection(opts.create_collection)
_, errors = self.rest.create_collection(opts.bucket, scope, collection, opts.max_ttl)
_exit_if_errors(errors)
_success("Collection created")
def _drop_collection(self, opts):
scope, collection = self._get_scope_collection(opts.drop_collection)
_, errors = self.rest.drop_collection(opts.bucket, scope, collection)
_exit_if_errors(errors)
_success("Collection dropped")
def _list_collections(self, opts):
manifest, errors = self.rest.get_manifest(opts.bucket)
_exit_if_errors(errors)
if opts.list_collections == "":
scope_dict = {}
else:
scope_dict = {scope: False for scope in opts.list_collections.split(',')}
if opts.output == 'json':
self._json_list_collections(manifest, scope_dict)
return
for scope in manifest['scopes']:
if len(scope_dict) == 0 or scope['name'] in scope_dict:
if len(scope_dict) > 0:
scope_dict[scope['name']] = True
print(f'Scope {scope["name"]}:')
for collection in scope['collections']:
print(f' - {collection["name"]}')
if len(scope_dict) > 0:
for scope, found in scope_dict.items():
if not found:
_warning(f'Scope "{scope}" does not exist')
@staticmethod
def _json_list_collections(manifest: Dict[str, Any], scope_dict: Dict[str, bool]):
out = {}
for scope in manifest['scopes']:
if len(scope_dict) == 0 or scope['name'] in scope_dict:
out[scope['name']] = [collection["name"] for collection in scope['collections']]
print(json.dumps(out, indent=4))
def _get_scope_collection(self, path):
scope, collection, err = self.expand_collection_shortcut(path)
if err is not None:
_exit_if_errors([err])
return scope, collection
@staticmethod
def expand_collection_shortcut(path):
parts = path.split('.')
if len(parts) != 2:
return None, None, f'invalid collection path {path}'
parts = ['_default' if x == '' else x for x in parts]
return parts[0], parts[1], None
@staticmethod
def get_man_page_name():
return "couchbase-cli-collection-manage" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage collections in a bucket"
class EnableDeveloperPreview(Subcommand):
""""The enable developer preview command"""
def __init__(self):
super(EnableDeveloperPreview, self).__init__()
self.parser.prog = "couchbase-cli enable-developer-preview"
group = self.parser.add_argument_group("Developer preview option")
group.add_argument('--enable', dest='enable', required=False, action="store_true",
help='Enable developer preview mode in target cluster')
group.add_argument('--list', dest='list', required=False, action="store_true",
help='Check if cluster is in developer preview mode')
@rest_initialiser(version_check=True)
def execute(self, opts):
if not (opts.enable or opts.list):
_exit_if_errors(['--enable or --list must be provided'])
if opts.enable and opts.list:
_exit_if_errors(['cannot provide both --enable and --list'])
if opts.enable:
confirm = input('Developer preview cannot be disabled once it is enabled. '
'If you enter developer preview mode you will not be able to '
'upgrade. DO NOT USE IN PRODUCTION.\nAre you sure [y/n]: ')
if confirm == 'y':
_, errors = self.rest.set_dp_mode()
_exit_if_errors(errors)
_success("Cluster is in developer preview mode")
elif confirm == 'n':
_success("Developer preview mode has NOT been enabled")
else:
_exit_if_errors(["Unknown option provided"])
if opts.list:
pools, rv = self.rest.pools()
_exit_if_errors(rv)
if 'isDeveloperPreview' in pools and pools['isDeveloperPreview']:
print('Cluster is in developer preview mode')
else:
print('Cluster is NOT in developer preview mode')
@staticmethod
def get_man_page_name():
return "couchbase-cli-enable-developer-preview" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Enable developer preview mode in target cluster"
class SettingAlternateAddress(Subcommand):
""""Setting alternate address command"""
def __init__(self):
super(SettingAlternateAddress, self).__init__()
self.parser.prog = "couchbase-cli setting-alternate-address"
group = self.parser.add_argument_group("Configure alternate addresses")
group.add_argument('--set', dest='set', required=False, action="store_true",
help='Set external address configuration for the node')
group.add_argument('--remove', dest='remove', required=False, action="store_true",
help='Remove external address configuration')
group.add_argument('--list', dest='list', required=False, action='store_true',
help='Retrieve current alternate address configuration for all nodes')
group.add_argument('--node', dest='node', metavar="<node>", help="Specify the node to update")
group.add_argument('--hostname', dest='alternate_hostname', metavar="<host>", help='Alternate address')
group.add_argument('--ports', dest='ports', metavar="<ports>",
help="A comma separated list specifying port mappings for the services")
@rest_initialiser(version_check=True)
def execute(self, opts):
flags_used = sum([opts.set, opts.list, opts.remove])
if flags_used != 1:
_exit_if_errors(['Use exactly one of --set, --list or --remove'])
if opts.set or opts.remove:
if not opts.node:
_exit_if_errors(['--node has to be set when using --set or --remove'])
# Alternate address can only be set on the node it self. The opts.cluster
# is updated with the opts.node instead to allow ease of use.
# The node name can have a port number (./cluster_run)
hostname, port = self._get_host_port(opts.node)
url = urllib.parse.urlparse(opts.cluster)
if url.scheme:
scheme = url.scheme
if url.port and not port:
port = url.port
elif not port:
_, old_port = self._get_host_port(opts.cluster)
if old_port:
port = old_port
if scheme:
cluster = f'{scheme}://'
cluster += hostname
if port:
cluster += f':{port}'
opts.cluster = cluster
# override rest client so it uses the node to be altered
self.rest = ClusterManager(opts.cluster, opts.username, opts.password, opts.ssl, opts.ssl_verify,
opts.cacert, opts.debug)
if opts.set:
ports, error = self._parse_ports(opts.ports)
_exit_if_errors(error)
_, error = self.rest.set_alternate_address(opts.alternate_hostname, ports)
_exit_if_errors(error)
if opts.remove:
_, error = self.rest.delete_alternate_address()
_exit_if_errors(error)
_success('Alternate address configuration deleted')
if opts.list:
add, error = self.rest.get_alternate_address()
_exit_if_errors(error)
if opts.output == 'standard':
port_names = set()
for node in add:
if 'alternateAddresses' in node and 'ports' in node['alternateAddresses']['external']:
for port in node['alternateAddresses']['external']['ports'].keys():
port_names.add(port)
print('{:20}{:20}{}'.format('Hostname', 'Alternate Address', 'Ports (Primary/Alternate)'))
print('{:40}'.format(' '), end='')
port_names = sorted(port_names)
for port in port_names:
column_size = len(port) + 1
if column_size < 11:
column_size = 11
print(f'{port:{column_size}}', end='')
print()
for node in add:
if 'alternateAddresses' in node:
# For cluster_run and single node clusters there is no hostname
try:
print(f'{node["hostname"]:20}{node["alternateAddresses"]["external"]["hostname"]:20}',
end='')
except KeyError:
host = 'UNKNOWN'
print(f'{host:20}{node["alternateAddresses"]["external"]["hostname"]:20}', end='')
for port in port_names:
column_size = len(port) + 1
if column_size < 11:
column_size = 11
ports = ' '
if port in node['alternateAddresses']['external']['ports']:
ports = f'{str(node["services"][port])}' \
f'/{str(node["alternateAddresses"]["external"]["ports"][port])}'
print(f'{ports:{column_size}}', end='')
print()
else:
# For cluster_run and single node clusters there is no hostanme
try:
print(f'{node["hostname"]}')
except KeyError:
print('UNKNOWN')
else:
print(json.dumps(add))
@staticmethod
def _parse_ports(ports):
if ports is None:
return None, None
port_mappings = ports.split(',')
port_tuple_list = []
for port_value_pair in port_mappings:
p_v = port_value_pair.split('=')
if len(p_v) != 2:
return None, [f'invalid port mapping: {port_value_pair}']
try:
int(p_v[1])
except ValueError:
return None, [f'invalid port mapping: {port_value_pair}']
port_tuple_list.append((p_v[0], p_v[1]))
return port_tuple_list, None
@staticmethod
def _get_host_port(host):
if ']' in host:
host_port = host.split(']:')
if len(host_port) == 2:
return host_port[0] + ']', host_port[1]
return host_port[0], None
else:
host_port = host.split(':')
if len(host_port) == 2:
return host_port[0], host_port[1]
return host_port[0], None
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-alternate-address" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Configure alternate addresses"
class SettingQuery(Subcommand):
""""Command to configure query settings"""
def __init__(self):
super(SettingQuery, self).__init__()
self.parser.prog = "couchbase-cli setting-query"
group = self.parser.add_argument_group("Query service settings")
group.add_argument('--set', dest='set', action="store_true",
help='Set query settings')
group.add_argument('--get', dest='get', action="store_true",
help='Retrieve current query settings')
group.add_argument('--pipeline-batch', metavar='<num>', type=int, default=None,
help='Number of items execution operators can batch.')
group.add_argument('--pipeline-cap', metavar='<num>', type=int, default=None,
help='Maximum number of items each execution operator can buffer.')
group.add_argument('--scan-cap', metavar='<size>', type=int, default=None,
help='Maximum buffer size for index scans.')
group.add_argument('--timeout', metavar='<ms>', type=int, default=None,
help='Server execution timeout.')
group.add_argument('--prepared-limit', metavar='<max>', type=int, default=None,
help='Maximum number of prepared statements.')
group.add_argument('--completed-limit', metavar='<max>', type=int, default=None,
help='Maximum number of completed requests.')
group.add_argument('--completed-threshold', metavar='<ms>', type=int, default=None,
help='Cache completed query lasting longer than this many milliseconds.')
group.add_argument('--log-level', choices=['trace', 'debug', 'info', 'warn', 'error', 'sever', 'none'],
default=None, metavar='<trace|debug|info|warn|error|server|none>',
help='Log level: debug, trace, info, warn, error, severe, none.')
group.add_argument('--max-parallelism', metavar='<max>', type=int, default=None,
help='Maximum parallelism per query.')
group.add_argument('--n1ql-feature-control', metavar='<num>', type=int, default=None,
help='N1QL Feature Controls')
group.add_argument('--temp-dir', metavar='<path>', type=str, default=None,
help='This specifies the directory for temporary query data.')
group.add_argument('--temp-dir-size', metavar='<mebibytes>', type=int, default=None,
help='Specify the maximum size in mebibytes for the temporary query data directory.')
group.add_argument('--cost-based-optimizer', metavar='<1|0>', type=str, default=None,
help='Use cost-based optimizer (Developer Preview).')
group.add_argument('--memory-quota', metavar='<mebibytes>', type=int, default=None,
help='Sets the query memory quota in MiB.')
group.add_argument('--transaction-timeout', metavar='<duration>', type=str, default=None,
help='A duration string for the transaction timeout i.e (100ns, 10ms, 1s, 1m).')
access_list_group = self.parser.add_argument_group('Query curl access settings')
access_list_group.add_argument('--curl-access', choices=['restricted', 'unrestricted'], default=None,
help='Specify either unrestricted or restricted, to determine which URLs are'
' permitted to be accessed by the curl function.')
access_list_group.add_argument('--allowed-urls', metavar='<urls>', type=str, default=None,
help='Comma separated lists of URLs that are allowed to be accessed by the curl'
' function.')
access_list_group.add_argument('--disallowed-urls', metavar='<urls>', type=str, default=None,
help='Comma separated lists of URLs that are disallowed to be accessed by the'
' curl function.')
@rest_initialiser(version_check=True)
def execute(self, opts):
if sum([opts.get, opts.set]) != 1:
_exit_if_errors(['Please provide --set or --get, both can not be provided at the same time'])
if opts.get:
settings, err = self.rest.get_query_settings()
_exit_if_errors(err)
print(json.dumps(settings))
if opts.set:
access_list = self._post_query_access_list(opts)
self._post_query_settings(opts, access_list)
_success('Updated the query settings')
def _post_query_access_list(self, opts) -> bool:
if opts.curl_access != 'restricted' and (opts.allowed_urls is not None or opts.disallowed_urls is not None):
_exit_if_errors(['Can only provide --allowed-urls or --disallowed-urls with --curl-access restricted'])
if opts.curl_access:
allowed = opts.allowed_urls.strip().split(',') if opts.allowed_urls is not None else None
disallowed = opts.disallowed_urls.strip().split(',') if opts.disallowed_urls is not None else None
_, err = self.rest.post_query_curl_access_settings(opts.curl_access == 'restricted', allowed, disallowed)
_exit_if_errors(err)
return True
return False
def _post_query_settings(self, opts, access_list):
if all(v is None for v in [opts.pipeline_batch, opts.pipeline_cap, opts.scan_cap, opts.timeout,
opts.prepared_limit, opts.completed_limit, opts.completed_threshold,
opts.log_level, opts.max_parallelism, opts.n1ql_feature_control, opts.temp_dir,
opts.temp_dir_size, opts.cost_based_optimizer, opts.memory_quota,
opts.transaction_timeout]):
if access_list:
return
_exit_if_errors(['Please provide at least one other option with --set'])
_, err = self.rest.post_query_settings(opts.pipeline_batch, opts.pipeline_cap, opts.scan_cap, opts.timeout,
opts.prepared_limit, opts.completed_limit, opts.completed_threshold,
opts.log_level, opts.max_parallelism, opts.n1ql_feature_control,
opts.temp_dir, opts.temp_dir_size, opts.cost_based_optimizer,
opts.memory_quota, opts.transaction_timeout)
_exit_if_errors(err)
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-query" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Manage query settings"
class IpFamily(Subcommand):
""""Command to switch between IP family for node to node communication"""
def __init__(self):
super(IpFamily, self).__init__()
self.parser.prog = "couchbase-cli ip-family"
group = self.parser.add_argument_group("IP family options")
group.add_argument('--get', action="store_true", default=False, help='Retrieve current used IP family')
group.add_argument('--set', action="store_true", default=False, help='Change current used IP family')
group.add_argument('--ipv4', dest='ipv4', default=False, action="store_true",
help='Set IP family to IPv4')
group.add_argument('--ipv6', dest='ipv6', default=False, action="store_true",
help='Set IP family to IPv6')
@rest_initialiser(version_check=True)
def execute(self, opts):
flags_used = sum([opts.set, opts.get])
if flags_used == 0:
_exit_if_errors(['Please provide one of --set, or --get'])
elif flags_used > 1:
_exit_if_errors(['Please provide only one of --set, or --get'])
if opts.get:
self._get(self.rest)
if opts.set:
if sum([opts.ipv6, opts.ipv4]) != 1:
_exit_if_errors(['Provided exactly one of --ipv4 or --ipv6 together with the --set option'])
self._set(self.rest, opts.ipv6, opts.ssl)
@staticmethod
def _set(rest, ipv6, ssl):
ip_fam, ip_fam_disable = ('ipv6', 'ipv4') if ipv6 else ('ipv4', 'ipv6')
node_data, err = rest.pools('nodes')
if err and err[0] == '"unknown pool"':
_, err = rest.enable_external_listener(ipfamily=ip_fam)
_exit_if_errors(err)
_, err = rest.setup_net_config(ipfamily=ip_fam)
_exit_if_errors(err)
_, err = rest.disable_unused_external_listeners(ipfamily=ip_fam_disable)
_exit_if_errors(err)
_success('Switched IP family of the cluster')
return
_exit_if_errors(err)
hosts = []
for n in node_data['nodes']:
host = f'http://{n["hostname"]}'
if ssl:
addr = host.rsplit(":", 1)[0]
host = f'https://{addr}:{n["ports"]["httpsMgmt"]}'
_, err = rest.enable_external_listener(host=host, ipfamily=ip_fam)
_exit_if_errors(err)
hosts.append(host)
for h in hosts:
_, err = rest.setup_net_config(host=h, ipfamily=ip_fam)
_exit_if_errors(err)
print(f'Switched IP family for node: {h}')
for h in hosts:
_, err = rest.disable_unused_external_listeners(host=h, ipfamily=ip_fam_disable)
_exit_if_errors(err)
_success('Switched IP family of the cluster')
@staticmethod
def _get(rest):
nodes, err = rest.nodes_info()
_exit_if_errors(err)
fam = {}
for n in nodes:
fam[n['addressFamily']] = True
family = list(fam.keys())
if len(family) == 1:
ipv_fam = 'UNKNOWN'
if family[0] == 'inet' or family[0] == 'inet_tls':
ipv_fam = 'ipv4'
elif family[0] == 'inet6' or family[0] == 'inet6_tls':
ipv_fam = 'ipv6'
print(f'Cluster using {ipv_fam}')
else:
print('Cluster is in mixed mode')
@staticmethod
def get_man_page_name():
return "couchbase-cli-ip-family" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Change or get the address family"
class NodeToNodeEncryption(Subcommand):
""""Command to enable/disable cluster encryption"""
def __init__(self):
super(NodeToNodeEncryption, self).__init__()
self.parser.prog = "couchbase-cli node-to-node-encryption"
group = self.parser.add_argument_group("Node-to-node encryption options")
group.add_argument('--enable', action="store_true", default=False, help='Enable node-to-node encryption')
group.add_argument('--disable', action="store_true", default=False, help='Disable node-to-node encryption')
group.add_argument('--get', action="store_true", default=False,
help='Retrieve current status of node-to-node encryption (on or off)')
@rest_initialiser(version_check=True)
def execute(self, opts):
flags_used = sum([opts.enable, opts.disable, opts.get])
if flags_used == 0:
_exit_if_errors(['Please provide one of --enable, --disable or --get'])
elif flags_used > 1:
_exit_if_errors(['Please provide only one of --enable, --disable or --get'])
if opts.get:
self._get(self.rest)
elif opts.enable:
self._change_encryption(self.rest, 'on', opts.ssl)
elif opts.disable:
self._change_encryption(self.rest, 'off', opts.ssl)
@staticmethod
def _change_encryption(rest, encryption, ssl):
node_data, err = rest.pools('nodes')
encryption_disable = 'off' if encryption == 'on' else 'on'
if err and err[0] == '"unknown pool"':
_, err = rest.enable_external_listener(encryption=encryption)
_exit_if_errors(err)
_, err = rest.setup_net_config(encryption=encryption)
_exit_if_errors(err)
_, err = rest.disable_unused_external_listeners(encryption=encryption_disable)
_exit_if_errors(err)
_success(f'Switched node-to-node encryption {encryption}')
return
_exit_if_errors(err)
hosts = []
for n in node_data['nodes']:
host = f'http://{n["hostname"]}'
if ssl:
addr = host.rsplit(":", 1)[0]
host = f'https://{addr}:{n["ports"]["httpsMgmt"]}'
_, err = rest.enable_external_listener(host=host, encryption=encryption)
_exit_if_errors(err)
hosts.append(host)
for h in hosts:
_, err = rest.setup_net_config(host=h, encryption=encryption)
_exit_if_errors(err)
print(f'Turned {encryption} encryption for node: {h}')
for h in hosts:
_, err = rest.disable_unused_external_listeners(host=h, encryption=encryption_disable)
_exit_if_errors(err)
_success(f'Switched node-to-node encryption {encryption}')
@staticmethod
def _get(rest):
# this will start the correct listeners in all the nodes
nodes, err = rest.nodes_info()
_exit_if_errors(err)
encrypted_nodes = []
unencrpyted_nodes = []
for n in nodes:
if n['nodeEncryption']:
encrypted_nodes.append(n['hostname'])
else:
unencrpyted_nodes.append(n['hostname'])
if len(encrypted_nodes) == len(nodes):
print('Node-to-node encryption is enabled')
elif len(unencrpyted_nodes) == len(nodes):
print('Node-to-node encryption is disabled')
else:
print('Cluster is in mixed mode')
print(f'Nodes with encryption enabled: {encrypted_nodes}')
print(f'Nodes with encryption disabled: {unencrpyted_nodes}')
@staticmethod
def get_man_page_name():
return "couchbase-cli-node-to-node-encryption" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Change or get the cluster encryption configuration"
class SettingRebalance(Subcommand):
"""The rebalance subcommand"""
def __init__(self):
super(SettingRebalance, self).__init__()
self.parser.prog = "couchbase-cli setting-rebalance"
group = self.parser.add_argument_group("Rebalance configuration")
group.add_argument("--set", default=False, action='store_true',
help='Set the automatic rebalance retry settings.')
group.add_argument("--get", default=False, action='store_true',
help='Get the automatic rebalance retry settings.')
group.add_argument('--cancel', default=False, action='store_true',
help='Cancel pending rebalance retry.')
group.add_argument('--moves-per-node', type=int, metavar='<num>',
help='Specify the number of [1-64] vBuckets to move concurrently')
group.add_argument('--pending-info', default=False, action='store_true',
help='Get info for pending rebalance retry.')
group.add_argument("--enable", metavar="<1|0>", choices=["1", "0"],
help="Enable or disable automatic rebalance retry")
group.add_argument("--wait-for", metavar="<sec>", type=int,
help="Specify the time to wat before retrying the rebalance [5-3600] seconds.")
group.add_argument("--max-attempts", metavar="<num>", type=int,
help="Maximum number of rebalance retires [1-3].")
group.add_argument('--rebalance-id', metavar='<id>',
help='Specify the id of the failed rebalance to cancel the retry.')
@rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False)
def execute(self, opts):
if sum([opts.set, opts.get, opts.cancel, opts.pending_info]) != 1:
_exit_if_errors(['Provide either --set, --get, --cancel or --pending-info'])
if opts.get:
settings, err = self.rest.get_settings_rebalance()
_exit_if_errors(err)
if self.enterprise:
retry_settings, err = self.rest.get_settings_rebalance_retry()
_exit_if_errors(err)
settings.update(retry_settings)
if opts.output == 'json':
print(json.dumps(settings))
else:
if self.enterprise:
print(f'Automatic rebalance retry {"enabled" if settings["enabled"] else "disabled"}')
print(f'Retry wait time: {settings["afterTimePeriod"]}')
print(f'Maximum number of retries: {settings["maxAttempts"]}')
print(f'Maximum number of vBucket move per node: {settings["rebalanceMovesPerNode"]}')
elif opts.set:
if (not self.enterprise and (opts.enable is not None or opts.wait_for is not None
or opts.max_attempts is not None)):
_exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"])
if opts.enable == '1':
opts.enable = 'true'
else:
opts.enable = 'false'
if opts.wait_for is not None and (opts.wait_for < 5 or opts.wait_for > 3600):
_exit_if_errors(['--wait-for must be a value between 5 and 3600'])
if opts.max_attempts is not None and (opts.max_attempts < 1 or opts.max_attempts > 3):
_exit_if_errors(['--max-attempts must be a value between 1 and 3'])
if self.enterprise:
_, err = self.rest.set_settings_rebalance_retry(opts.enable, opts.wait_for, opts.max_attempts)
_exit_if_errors(err)
if opts.moves_per_node is not None:
if not 1 <= opts.moves_per_node <= 64:
_exit_if_errors(['--moves-per-node must be a value between 1 and 64'])
_, err = self.rest.set_settings_rebalance(opts.moves_per_node)
_exit_if_errors(err)
_success('Rebalance settings updated')
elif opts.cancel and not self.enterprise:
_exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"])
if opts.rebalance_id is None:
_exit_if_errors(['Provide the failed rebalance id using --rebalance-id <id>'])
_, err = self.rest.cancel_rebalance_retry(opts.rebalance_id)
_exit_if_errors(err)
_success('Rebalance retry canceled')
else:
if not self.enterprise:
_exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"])
rebalance_info, err = self.rest.get_rebalance_info()
_exit_if_errors(err)
print(json.dumps(rebalance_info))
@staticmethod
def get_man_page_name():
return "couchbase-cli-setting-rebalance" + ".1" if os.name != "nt" else ".html"
@staticmethod
def get_description():
return "Configure automatic rebalance settings"
class BackupService(Subcommand):
"""BackupService class is a subcommand that will contain other commands to configure the service as well as manage
it. This approach attempts to make the interface more intuitive by keeping a hierarchical structure where the
service can have all its options under one command instead of having multiple completely separate commands (e.g
settings-backups, manage-backups and repository-setup-backup.)
The idea is that the interface will look like:
couchbase-cli backup-service [settings | plans | repositories | cloud-credentials] where each element in [] is a
subcommand to manage those options for that part of the backup service. As such if the user is not sure of what they
want to do they can always do couchbase-cli backup-service -h to get a top level details and then move down the
hierarchy to a more concrete option.
"""
def __init__(self):
super(BackupService, self).__init__()
self.parser.prog = "couchbase-cli backup-service"
self.subparser = self.parser.add_subparsers(help='Sub command help', dest='sub_cmd', metavar='<subcommand>')
self.settings_cmd = BackupServiceSettings(self.subparser)
self.repository_cmd = BackupServiceRepository(self.subparser)
self.plan_cmd = BackupServicePlan(self.subparser)
def execute(self, opts):
if opts.sub_cmd is None or opts.sub_cmd not in ['settings', 'repository', 'plan']:
_exit_if_errors(['<subcommand> must be one off [settings, repository, plan]'])
if opts.sub_cmd == 'settings':
self.settings_cmd.execute(opts)
elif opts.sub_cmd == 'repository':
self.repository_cmd.execute(opts)
elif opts.sub_cmd == 'plan':
self.plan_cmd.execute(opts)
@staticmethod
def get_man_page_name():
return 'couchbase-cli-backup-service' + '.1' if os.name != 'nt' else '.html'
@staticmethod
def get_description():
return "Manage the backup service"
class BackupServiceSettings:
"""Backup service settings is a nested command and manages the backup service settings"""
def __init__(self, subparser):
self.rest = None
setting_parser = subparser.add_parser('settings', help='Manage backup service settings', add_help=False,
allow_abbrev=False)
group = setting_parser.add_argument_group('Backup service settings options')
group.add_argument('--get', action='store_true', help='Get current backup service configuration')
group.add_argument('--set', action='store_true', help='Change the service configuration')
group.add_argument('--history-rotation-period', dest='rotation_period', type=int, metavar='<days>',
help='The number of days after which the task history should be rotated')
group.add_argument('--history-rotation-size', dest='rotation_size', type=int, metavar='<mebibytes>',
help='The size in MiB at which to rotate the task history')
group.add_argument("-h", "--help", action=CBHelpAction, klass=self,
help="Prints the short or long help message")
@rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True)
def execute(self, opts):
if sum([opts.get, opts.set]) != 1:
_exit_if_errors(['Must use one and only one of [--get, --set]'])
if opts.get:
self._get(opts)
if opts.set:
self._set(opts)
def _get(self, opts):
config, err = self.rest.get_backup_service_settings()
_exit_if_errors(err)
if opts.output == 'json':
print(json.dumps(config, indent=4))
else:
print('-- Backup service configuration --')
size = config['history_rotation_size'] if 'history_rotation_size' in config else 'N/A'
period = config['history_rotation_period'] if 'history_rotation_period' in config else 'N/A'
print(f'History rotation size: {size} MiB')
print(
f'History rotation period: {period} days')
def _set(self, opts):
if opts.rotation_period is None and opts.rotation_size is None:
_exit_if_errors(['At least one of --history-rotation-period or --history-rotation-size is required'])
_, err = self.rest.patch_backup_service_settings(opts.rotation_period, opts.rotation_size)
_exit_if_errors(err)
_success('Backup service settings patched')
@staticmethod
def get_man_page_name():
return 'couchbase-cli-backup-service-settings' + '.1' if os.name != 'nt' else '.html'
@staticmethod
def get_description():
return 'Manage backup service settings'
class BackupServiceRepository:
"""This command manages backup services repositories.
Things this command can do is:
- List repositories
- Get repository
- Add repository
- Archive repository
- Import repository
- Delete repository
"""
def __init__(self, subparser):
"""setup the parser"""
self.rest = None
repository_parser = subparser.add_parser('repository', help='Manage backup repositories', add_help=False,
allow_abbrev=False)
# action flags are mutually exclusive
action_group = repository_parser.add_mutually_exclusive_group(required=True)
action_group.add_argument('--list', action='store_true', help='Get all repositories')
action_group.add_argument('--get', action='store_true', help='Get repository by id')
action_group.add_argument('--archive', action='store_true', help='Archive a repository')
action_group.add_argument('--add', action='store_true', help='Add a new active repository')
action_group.add_argument('--remove', action='store_true', help='Remove an archived/imported repository')
action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self,
help="Prints the short or long help message")
# other arguments
group = repository_parser.add_argument_group('Backup service repository configuration')
group.add_argument('--id', metavar='<id>', help='The repository id')
group.add_argument('--new-id', metavar='<id>', help='The new repository id')
group.add_argument('--state', metavar='<state>', choices=['active', 'archived', 'imported'],
help='The repository state.')
group.add_argument('--plan', metavar='<plan_name>', help='The plan to use as base for the repository')
group.add_argument('--backup-archive', metavar='<archive>', help='The location to store the backups in')
group.add_argument('--bucket-name', metavar='<name>', help='The bucket to backup')
group.add_argument('--remove-data', action='store_true', help='Used to delete the repository data')
# the cloud arguments are given the own group so that the short help is a bit more readable
cloud_group = repository_parser.add_argument_group('Backup repository cloud arguments')
cloud_group.add_argument('--cloud-credentials-name', metavar='<name>',
help='The stored clouds credential name to use for the new repository')
cloud_group.add_argument('--cloud-staging-dir', metavar='<path>', help='The path to the staging directory')
cloud_group.add_argument('--cloud-credentials-id', metavar='<id>',
help='The ID to use to communicate with the object store')
cloud_group.add_argument('--cloud-credentials-key', metavar='<key>',
help='The key to use to communicate with the object store')
cloud_group.add_argument('--cloud-credentials-region', metavar='<region>',
help='The region for the object store')
cloud_group.add_argument('--cloud-endpoint', metavar='<endpoint>',
help='Overrides the default endpoint used to communicate with the cloud provider. '
'Use for object store compatible third party solutions')
cloud_group.add_argument('--s3-force-path-style', action='store_true',
help='When using S3 or S3 compatible storage it will use the old path style.')
@rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True)
def execute(self, opts):
"""Run the backup-service repository subcommand"""
if opts.list:
self.list_repositories(opts.state, opts.output == 'json')
elif opts.get:
self.get_repository(opts.id, opts.state, opts.output == 'json')
elif opts.archive:
self.archive_repository(opts.id, opts.new_id)
elif opts.remove:
self.remove_repository(opts.id, opts.state, opts.remove_data)
elif opts.add:
self.add_active_repository(opts.id, opts.plan, opts.backup_archive, bucket_name=opts.bucket_name,
credentials_name=opts.cloud_credentials_name,
credentials_id=opts.cloud_credentials_id,
credentials_key=opts.cloud_credentials_key,
cloud_region=opts.cloud_credentials_region, staging_dir=opts.cloud_staging_dir,
cloud_endpoint=opts.cloud_endpoint, s3_path_style=opts.s3_force_path_style)
def remove_repository(self, repository_id: str, state: str, delete_repo: bool = False):
"""Removes the repository in state 'state' and with id 'repository_id'
Args:
repository_id (str): The repository id
state (str): It must be either archived or imported otherwise it will return an error
delete_repo (bool): Whether or not the backup repository should be deleted
"""
if not repository_id:
_exit_if_errors(['--id is required'])
# the following is devided in two options to give better error messages depending if state is missing or if it
# is invalid
if not state:
_exit_if_errors(['--state is required'])
if state not in ['archived', 'imported']:
_exit_if_errors(['can only delete archived or imported repositories to delete an active repository it needs to '
'be archived first'])
# can only delete repo of archived repositories
if delete_repo and state == 'imported':
_exit_if_errors(['cannot delete the repository for an imported repository'])
_, errors = self.rest.delete_backup_repository(repository_id, state, delete_repo)
_exit_if_errors(errors)
_success('Repository was deleted')
def add_active_repository(self, repository_id: str, plan: str, archive: str, **kwargs):
"""Adds a new active repository identified by 'repository_id' and that uses 'plan' as base.
Args:
repository_id (str): The ID to give to the repository. This must be unique, if it is not an error will be
returned.
plan (str): The name of the plan to use as base for the repository. If it does not exist the service
will return an error.
archive (str): The location to store the data in. It must be accessible by all nodes. To use S3 instead of
providing a path to a filesystem directory use the syntax.
s3://<bucket-name>/<optional_prefix>/<archive>
**kwargs: Optional parameters [bucket_name, credentials_name, credentials_id, credentials_key, cloud_region,
staging_dir, cloud_endpoint, s3_path_style]
"""
if not repository_id:
_exit_if_errors(['--id is required'])
if not plan:
_exit_if_errors(['--plan is required'])
if not archive:
_exit_if_errors(['--backup-archive is required'])
_exit_if_errors(self.check_cloud_params(archive, **kwargs))
add_request_body = {
'plan': plan,
'archive': archive,
}
if kwargs.get('bucket_name', False):
add_request_body['bucket_name'] = kwargs.get('bucket_name')
if kwargs.get('credentials_name', False):
add_request_body['cloud_credential_name'] = kwargs.get('credentials_name')
if kwargs.get('credentials_id', False):
add_request_body['cloud_credentials_id'] = kwargs.get('credentials_id')
if kwargs.get('credentials_key', False):
add_request_body['cloud_credentials_key'] = kwargs.get('credentials_key')
if kwargs.get('cloud_region', False):
add_request_body['cloud_credentials_region'] = kwargs.get('cloud_region')
if kwargs.get('cloud_endpoint', False):
add_request_body['cloud_endpoint'] = kwargs.get('cloud_endpoint')
if kwargs.get('s3_path_style', False):
add_request_body['cloud_force_path_style'] = kwargs.get('s3_path_style')
_, errors = self.rest.add_backup_active_repository(repository_id, add_request_body)
_exit_if_errors(errors)
_success('Added repository')
@staticmethod
def check_cloud_params(archive: str, **kwargs) -> Optional[List[str]]:
"""Checks that inside kwargs there is a valid set of parameters to add a cloud repository
Args:
archive (str): The archive to use for the repository.
"""
# If not an s3 archive skip this
if not archive.startswith('s3://'):
return None
creds_name = kwargs.get('credentials_name')
region = kwargs.get('cloud_region')
creds_id = kwargs.get('credentials_id')
creds_key = kwargs.get('credentials_key')
staging_dir = kwargs.get('staging_dir')
if (creds_name and (creds_id or creds_key)) or (not creds_name and not (creds_id or creds_key)):
return ['must provide either --cloud-credentials-name or --cloud-credentials-key and '
'--cloud-credentials-id']
if not staging_dir:
return ['--cloud-staging-dir is required']
if not creds_name and not region:
return ['--cloud-credentials-region is required']
return None
def archive_repository(self, repository_id, new_id):
"""Archive an repository. The archived repository will have the id `new_id`
Args:
repository_id (str): The active repository ID to be archived
new_id (str): The id that will be given to the archived repository
"""
if not repository_id:
_exit_if_errors(['--id is required'])
if not new_id:
_exit_if_errors(['--new-id is required'])
_, errors = self.rest.archive_backup_repository(repository_id, new_id)
_exit_if_errors(errors)
_success('Archived repository')
def list_repositories(self, state=None, json_out=False):
"""List the backup repositories.
If a repository state is given only repositories in that state will be listed. This command supports listing both in
json and human friendly format.
Args:
state (str, optional): One of ['active', 'imported', 'archived']. The repository on this state will be
retrieved.
json_out (bool): If True the output will be JSON otherwise it will be a human friendly format.
"""
states = ['active', 'archived', 'imported'] if state is None else [state]
results = {}
for get_state in states:
repositories, errors = self.rest.get_backup_service_repositories(state=get_state)
_exit_if_errors(errors)
results[get_state] = repositories
if json_out:
print(json.dumps(results, indent=2))
else:
self.human_friendly_print_repositories(results)
def get_repository(self, repository_id, state, json_out=False):
"""Retrieves one repository from the backup service
If the repository does not exist an error will be returned
Args:
repository_id (str): The repository id to be retrieved
state (str): The state of the repository to retrieve
json_out (bool): If True the output will be JSON otherwise it will be a human friendly format.
"""
if not repository_id:
_exit_if_errors(['--id is required'])
if not state:
_exit_if_errors(['--state is required'])
repository, errors = self.rest.get_backup_service_repository(repository_id, state)
_exit_if_errors(errors)
if json_out:
print(json.dumps(repository, indent=2))
else:
self.human_firendly_print_repository(repository)
@staticmethod
def human_firendly_print_repository(repository):
"""Print the repository in a human friendly format
Args:
repository (obj): The backup repository information
"""
print(f'ID: {repository["id"]}')
print(f'State: {repository["state"]}')
print(f'Healthy: {(not ("health" in repository and not repository["health"]["healthy"]))!s}')
print(f'Archive: {repository["archive"]}')
print(f'Repository: {repository["repo"]}')
if 'bucket' in repository:
print(f'Bucket: {repository["bucket"]["name"]}')
if 'plan_name' in repository and repository['plan_name'] != "":
print(f'plan: {repository["plan_name"]}')
print(f'Creation time: {repository["creation_time"]}')
if 'scheduled' in repository and repository['scheduled']:
print()
BackupServiceRepository.human_firendly_print_repository_scheduled_tasks(repository['scheduled'])
one_off = repository['running_one_off'] if 'running_one_off' in repository else None
running_scheduled = repository['running_tasks'] if 'running_tasks' in repository else None
if one_off or running_scheduled:
print()
BackupServiceRepository.human_friendly_print_running_tasks(one_off, running_scheduled)
@staticmethod
def human_friendly_print_running_tasks(one_off, scheduled):
"""Prints the running task summary in a human friendly way
Args:
one_off (map<str, task object>): Running one off tasks
scheduled (map<str, task object>): Running scheduled tasks
"""
all_vals = []
name_pad = 5
if one_off:
for name in one_off:
if len(name) > name_pad:
name_pad = len(name)
all_vals += one_off.values()
if scheduled:
for name in scheduled:
if len(name) > name_pad:
name_pad = len(name)
all_vals += scheduled.values()
name_pad += 1
header = f'{"Name":<{name_pad}}| Task type | Status | Start'
print(header)
print('-' * (len(header) + 5))
for task in all_vals:
print(f'{task["name"]:<{name_pad}}| {task["type"].title():<10}| {task["status"]:<8} | {task["start"]}')
@staticmethod
def human_firendly_print_repository_scheduled_tasks(scheduled):
"""Print the scheduled task in a tabular format"""
name_pad = 5
for name in scheduled:
if len(name) > name_pad:
name_pad = len(name)
name_pad += 1
header = f'{"Name":<{name_pad}}| Task type | Next run'
print('Scheduled tasks:')
print(header)
print('-' * (len(header) + 5))
for task in scheduled.values():
print(f'{task["name"]:<{name_pad}}| {task["task_type"].title():<10}| {task["next_run"]}')
@staticmethod
def human_friendly_print_repositories(repositories_map):
"""This will print the repositories in a tabular format
Args:
repository_map (map<state (str), repository (list of objects)>)
"""
repository_count = 0
id_pad = 5
plan_pad = 7
for repositories in repositories_map.values():
for repository in repositories:
repository_count += 1
if id_pad < len(repository['id']):
id_pad = len(repository['id'])
if 'plan_name' in repository and plan_pad < len(repository['plan_name']):
plan_pad = len(repository['plan_name'])
if repository_count == 0:
print('No repositories found')
return
# Get an extra space between the the information and the column separator
plan_pad += 1
id_pad += 1
# build header
header = f'{"ID":<{id_pad}}| {"State":<9}| {"plan":<{plan_pad}}| Healthy | Repository'
print(header)
print('-' * len(header))
# print repository summary
for _, repositories in sorted(repositories_map.items()):
for repository in repositories:
healthy = not ('health' in repository and not repository['health']['healthy'])
# archived and imported repositories may not have plans so we have to replace the empty string with N/A
plan_name = 'N/A'
if 'plan_name' in repository and len(repository['plan_name']) != 0:
plan_name = repository['plan_name']
print(f"{repository['id']:<{id_pad}}| {repository['state']:<9}| {plan_name:<{plan_pad}}| "
f" {healthy!s:<7}| {repository['repo']}")
@staticmethod
def get_man_page_name():
return 'couchbase-cli-backup-service-repository' + '.1' if os.name != 'nt' else '.html'
@staticmethod
def get_description():
return 'Manage backup service repositories'
class BackupServicePlan:
"""This command manages backup services plans.
Things this command can do is:
- List plans
- Add delete
- Delete plans
"""
def __init__(self, subparser):
"""setup the parser"""
self.rest = None
plan_parser = subparser.add_parser('plan', help='Manage backup plans', add_help=False,
allow_abbrev=False)
# action flags are mutually exclusive
action_group = plan_parser.add_mutually_exclusive_group(required=True)
action_group.add_argument('--list', action='store_true', help='List all available backup plans')
action_group.add_argument('--get', action='store_true', help='Get a plan by name')
action_group.add_argument('--remove', action='store_true', help='Remove a plan by name')
action_group.add_argument('--add', action='store_true', help='Add a new plan')
action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self,
help="Prints the short or long help message")
options = plan_parser.add_argument_group('Plan options')
options.add_argument('--name', metavar='<name>', help='Plan name')
options.add_argument('--description', metavar='<description>', help='Optional description')
options.add_argument('--services', metavar='<services>', help='A comma separated list of services to backup')
options.add_argument('--task', metavar='<tasks>', nargs='+', help='JSON task definition')
@rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True)
def execute(self, opts):
"""Run the backup plan managment command"""
if opts.list:
self.list_plans(opts.output == 'json')
elif opts.get:
self.get_plan(opts.name, opts.output == 'json')
elif opts.remove:
self.remove_plan(opts.name)
elif opts.add:
self.add_plan(opts.name, opts.services, opts.task, opts.description)
def add_plan(self, name: str, services: Optional[str], tasks: Optional[List[str]], description: Optional[str]):
"""Add a new backup plan
The validation of the inputs in the CLI is intentionally lacking as this is offloaded to the backup service.
Args:
name (str): The name to give the new plan. It must be unique.
services (optional list): A list of services to backup if empty all services are backed up.
tasks (optional list): A list of JSON strings representing the tasks to be run.
description (optional str): A optional description string.
"""
if not name:
_exit_if_errors(['--name is required'])
service_list = []
if services:
service_list = [service.strip() for service in services.split(',')]
tasks_objects = []
if tasks:
for task_str in tasks:
try:
task = json.loads(task_str)
tasks_objects.append(task)
except json.decoder.JSONDecodeError as json_error:
_exit_if_errors([f'invalid task {json_error!s}'])
plan = {}
if service_list:
plan['services'] = service_list
if tasks_objects:
plan['tasks'] = tasks_objects
if description:
plan['description'] = description
_, errors = self.rest.add_backup_plan(name, plan)
_exit_if_errors(errors)
_success('Added plan')
def remove_plan(self, name: str):
"""Removes a plan by name"""
if not name:
_exit_if_errors(['--name is required'])
_, errors = self.rest.delete_backup_plan(name)
_exit_if_errors(errors)
_success('Plan removed')
def get_plan(self, name: str, json_output: bool = False):
"""Gets a backup plan by name
Args:
name (str): The name of the plan to retrieve
json_output (bool): Whether to print in JSON or a more human friendly way
"""
if not name:
_exit_if_errors(['--name is required'])
plan, errors = self.rest.get_backup_plan(name)
_exit_if_errors(errors)
if json_output:
print(json.dumps(plan, indent=2))
else:
self.human_print_plan(plan)
def list_plans(self, json_output: bool = False):
"""Prints all the plans stored in the backup service
Args:
json_output (bool): Whether to print in JSON or a more human friendly way
"""
plans, errors = self.rest.list_backup_plans()
_exit_if_errors(errors)
if json_output:
print(json.dumps(plans, indent=2))
else:
self.human_print_plans(plans)
@staticmethod
def human_print_plan(plan: object):
"""Prints the plan in a human friendly way"""
print(f'Name: {plan["name"]}')
print(f'Description: {plan["description"] if "description" in plan else "N/A"}')
print(f'Services: {BackupServicePlan.service_list_to_str(plan["services"])}')
print(f'Default: {(plan["default"] if "deafult" in plan else False)!s}')
# If the are no tasks return
if not plan["tasks"]:
return
print()
print('Tasks:')
task_name_pad = 5
schedule_pad = 10
for task in plan['tasks']:
if len(task['name']) > task_name_pad:
task_name_pad = len(task['name'])
task['schedule_str'] = BackupServicePlan.format_schedule(task['schedule'])
if len(task['schedule_str']) > schedule_pad:
schedule_pad = len(task['schedule_str'])
task_name_pad += 1
schedule_pad += 1
header = f'{"Name":<{task_name_pad}} | {"Schedule":<{schedule_pad}} | Options'
print(header)
print('-' * (len(header) + 5))
for task in plan['tasks']:
options = BackupServicePlan.format_options(task)
print(f'{task["name"]:<{task_name_pad}} | {task["schedule_str"]:<{schedule_pad}} | {options}')
@staticmethod
def format_options(task: object) -> str:
"""Format the full backup or merge options"""
options = 'N/A'
if task['task_type'] == 'BACKUP' and task['full_backup']:
options = 'Full backup'
elif task['task_type'] == 'MERGE':
if 'merge_options' in task:
options = (f'Merge from {task["merge_options"]["offset_start"]} to '
f'{task["merge_options"]["offset_end"]}')
else:
options = 'Merge everything'
return options
@staticmethod
def format_schedule(schedule: object) -> str:
"""Format the schedule object in a string of the format <task> every <frequency>? <period> (at <time>)?"""
task_start = f'{schedule["job_type"].lower()}'
frequency_part = 'every'
if schedule['frequency'] == 1:
period = schedule["period"].lower()
period = period if period[-1] != 's' else period[:-1]
frequency_part += f' {period}'
else:
frequency_part += f' {schedule["frequency"]} {schedule["period"].lower()}'
time_part = f' at {schedule["time"]}' if 'time' in schedule else ''
return f'{task_start} {frequency_part}{time_part}'
@staticmethod
def human_print_plans(plans: List[Any]):
"""Prints a table with an overview of each plan"""
# if plans is empty or none print no plans message
if not plans:
print('No plans')
return
name_pad = 5
service_pad = 8
for plan in plans:
if len(plan['name']) > name_pad:
name_pad = len(plan['name'])
services_str = BackupServicePlan.service_list_to_str(plan['services'])
if len(services_str) > service_pad:
service_pad = len(services_str)
name_pad += 1
service_pad += 1
header = f'{"Name":<{name_pad}} | # Tasks | {"Services":<{service_pad}} | Default'
print(header)
print('-' * (len(header) + 5))
for plan in plans:
task_len = len(plan['tasks']) if 'tasks' in plan and plan['tasks'] else 0
print(f'{plan["name"]:<{name_pad}} | {task_len:<7} | '
f'{BackupServicePlan.service_list_to_str(plan["services"]):<{service_pad}} | '
f'{(plan["default"] if "default" in plan else False)!s}')
@staticmethod
def service_list_to_str(services: Optional[List[Any]]) -> str:
"""convert the list of services to a concise list of services"""
if not services:
return 'all'
# a way to convert codenames to visible name
convert = {'gsi': 'Indexing', 'cbas': 'Analytics', 'ft': 'Full Text Search'}
return ', '.join([convert[service] if service in convert else service.title() for service in services])
@staticmethod
def get_man_page_name():
return 'couchbase-cli-backup-service-plan' + '.1' if os.name != 'nt' else '.html'
@staticmethod
def get_description():
return 'Manage backup service plans'
|
'''
Preprocessor for Foliant documentation authoring tool.
Calls Elasticsearch API to generate an index based on Markdown content.
'''
import re
import json
from os import getenv
from pathlib import Path
from urllib import request
from urllib.error import HTTPError
from markdown import markdown
from bs4 import BeautifulSoup
from foliant.preprocessors.base import BasePreprocessor
class Preprocessor(BasePreprocessor):
defaults = {
'es_url': 'http://127.0.0.1:9200/',
'index_name': '',
'index_copy_name': '',
'index_properties': {},
'actions': [
'delete',
'create'
],
'use_chapters': True,
'format': 'plaintext',
'escape_html': True,
'url_transform': [
{'\/?index\.md$': '/'},
{'\.md$': '/'},
{'^([^\/]+)': '/\g<1>'}
],
'require_env': False,
'targets': []
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = self.logger.getChild('elasticsearch')
self.logger.debug(f'Preprocessor inited: {self.__dict__}')
def _get_url(self, markdown_file_path: str) -> str:
url = str(markdown_file_path.relative_to(self.working_dir))
url_transformation_rules = self.options['url_transform']
if not isinstance(url_transformation_rules, list):
url_transformation_rules = [url_transformation_rules]
for url_transformation_rule in url_transformation_rules:
for pattern, replacement in url_transformation_rule.items():
url = re.sub(pattern, replacement, url)
return url
def _get_title(self, markdown_content: str) -> str or None:
headings_found = re.search(
r'^\#{1,6}\s+(.+?)(?:\s+\{\#\S+\})?\s*$',
markdown_content,
flags=re.MULTILINE
)
if headings_found:
return headings_found.group(1)
return None
def _get_chapters_paths(self) -> list:
def _recursive_process_chapters(chapters_subset):
if isinstance(chapters_subset, dict):
processed_chapters_subset = {}
for key, value in chapters_subset.items():
processed_chapters_subset[key] = _recursive_process_chapters(value)
elif isinstance(chapters_subset, list):
processed_chapters_subset = []
for item in chapters_subset:
processed_chapters_subset.append(_recursive_process_chapters(item))
elif isinstance(chapters_subset, str):
if chapters_subset.endswith('.md'):
chapters_paths.append(self.working_dir / chapters_subset)
processed_chapters_subset = chapters_subset
else:
processed_chapters_subset = chapters_subset
return processed_chapters_subset
chapters_paths = []
_recursive_process_chapters(self.config['chapters'])
self.logger.debug(f'Chapters files paths: {chapters_paths}')
return chapters_paths
def _http_request(
self,
request_url: str,
request_method: str = 'GET',
request_headers: dict or None = None,
request_data: bytes or None = None
) -> dict:
http_request = request.Request(request_url, method=request_method)
if request_headers:
http_request.headers = request_headers
if request_data:
http_request.data = request_data
try:
with request.urlopen(http_request) as http_response:
response_status = http_response.getcode()
response_headers = http_response.info()
response_data = http_response.read()
except HTTPError as http_response_not_ok:
response_status = http_response_not_ok.getcode()
response_headers = http_response_not_ok.info()
response_data = http_response_not_ok.read()
return {
'status': response_status,
'headers': response_headers,
'data': response_data
}
def _escape_html(self, content: str) -> str:
return content.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"')
def _create_index(self, index_name: str) -> None:
if self.options['index_properties']:
create_request_url = f'{self.options['es_url'].rstrip('/')}/{index_name}/'
self.logger.debug(
'Calling Elasticsearch API to create an index with specified properties, ' +
f'URL: {create_request_url}'
)
create_response = self._http_request(
create_request_url,
'PUT',
{
'Content-Type': 'application/json; charset=utf-8'
},
json.dumps(self.options['index_properties'], ensure_ascii=False).encode('utf-8')
)
create_response_data = json.loads(create_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {create_response['status']}')
self.logger.debug(f'Response headers: {create_response['headers']}')
self.logger.debug(f'Response data: {create_response_data}')
if create_response['status'] == 200 and create_response_data.get('acknowledged', None) is True:
self.logger.debug('Index created')
elif create_response['status'] == 400 and create_response_data.get(
'error', {}
).get(
'type', ''
) == 'resource_already_exists_exception':
self.logger.debug('Index already exists')
else:
error_message = 'Failed to create an index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
else:
self.logger.debug('An index without specific properties will be created')
if self.options['use_chapters']:
self.logger.debug('Only files mentioned in chapters will be indexed')
markdown_files_paths = self._get_chapters_paths()
else:
self.logger.debug('All files of the project will be indexed')
markdown_files_paths = self.working_dir.rglob('*.md')
data_for_indexing = ''
for markdown_file_path in markdown_files_paths:
self.logger.debug(f'Processing the file: {markdown_file_path}')
with open(markdown_file_path, encoding='utf8') as markdown_file:
markdown_content = markdown_file.read()
if markdown_content:
url = self._get_url(markdown_file_path)
title = self._get_title(markdown_content)
if self.options['format'] == 'html' or self.options['format'] == 'plaintext':
self.logger.debug(f'Converting source Markdown content to: {self.options['format']}')
content = markdown(markdown_content)
if self.options['format'] == 'plaintext':
soup = BeautifulSoup(content, 'lxml')
for non_text_node in soup(['style', 'script']):
non_text_node.extract()
content = soup.get_text()
if self.options['escape_html']:
self.logger.debug('Escaping HTML syntax')
if title:
title = self._escape_html(title)
content = self._escape_html(content)
else:
self.logger.debug('Leaving source Markdown content unchanged')
content = markdown_content
self.logger.debug(f'Adding the page, URL: {url}, title: {title}')
data_for_indexing += '{"index": {}}\n' + json.dumps(
{
'url': url,
'title': title,
'content': content
},
ensure_ascii=False
) + '\n'
else:
self.logger.debug('It seems that the file has no content')
self.logger.debug(f'Data for indexing: {data_for_indexing}')
update_request_url = f'{self.options['es_url'].rstrip('/')}/{index_name}/_bulk?refresh'
self.logger.debug(f'Calling Elasticsearch API to add the content to the index, URL: {update_request_url}')
update_response = self._http_request(
update_request_url,
'POST',
{
'Content-Type': 'application/json; charset=utf-8'
},
data_for_indexing.encode('utf-8')
)
update_response_data = json.loads(update_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {update_response['status']}')
self.logger.debug(f'Response headers: {update_response['headers']}')
self.logger.debug(f'Response data: {update_response_data}')
if update_response['status'] != 200 or update_response_data.get('errors', True):
error_message = 'Failed to add content to the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _delete_index(self, index_name: str) -> None:
delete_request_url = f'{self.options['es_url'].rstrip('/')}/{index_name}/'
self.logger.debug(f'Calling Elasticsearch API to delete the index, URL: {delete_request_url}')
delete_response = self._http_request(
delete_request_url,
'DELETE'
)
delete_response_data = json.loads(delete_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {delete_response['status']}')
self.logger.debug(f'Response headers: {delete_response['headers']}')
self.logger.debug(f'Response data: {delete_response_data}')
if delete_response['status'] == 200 and delete_response_data.get('acknowledged', None) is True:
self.logger.debug('Index deleted')
elif delete_response['status'] == 404 and delete_response_data.get(
'error', {}
).get(
'type', ''
) == 'index_not_found_exception':
self.logger.debug('Index does not exist')
else:
error_message = 'Failed to delete the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _update_index_setting(self, index_name: str, settings_to_update: dict) -> None:
update_request_url = f'{self.options['es_url'].rstrip('/')}/{index_name}/_settings/'
self.logger.debug(f'Calling Elasticsearch API to update the index settings, URL: {update_request_url}')
update_response = self._http_request(
update_request_url,
'PUT',
{
'Content-Type': 'application/json; charset=utf-8'
},
json.dumps(
settings_to_update,
ensure_ascii=False
).encode('utf-8')
)
update_response_data = json.loads(update_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {update_response['status']}')
self.logger.debug(f'Response headers: {update_response['headers']}')
self.logger.debug(f'Response data: {update_response_data}')
if update_response['status'] == 200 and update_response_data.get('acknowledged', None) is True:
self.logger.debug('Index settings updated')
else:
error_message = 'Failed to update the index settings'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _clone_index(self, index_name: str, index_copy_name: str) -> None:
clone_request_url = f'{self.options['es_url'].rstrip('/')}/{index_name}/_clone/{index_copy_name}/'
self.logger.debug(f'Calling Elasticsearch API to clone the index, URL: {clone_request_url}')
clone_response = self._http_request(
clone_request_url,
'POST'
)
clone_response_data = json.loads(clone_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {clone_response['status']}')
self.logger.debug(f'Response headers: {clone_response['headers']}')
self.logger.debug(f'Response data: {clone_response_data}')
if clone_response['status'] == 200 and clone_response_data.get('acknowledged', None) is True:
self.logger.debug('Index cloned')
else:
error_message = 'Failed to clone the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _copy_index(self, index_name: str, index_copy_name: str) -> None:
if not index_copy_name:
index_copy_name = index_name + '_copy'
self.logger.debug(f'Copying the index {index_name} to {index_copy_name}')
self.logger.debug(f'First, marking the index {index_name} as read-only')
self._update_index_setting(
index_name,
{
'settings': {
'index.blocks.write': True
}
}
)
self.logger.debug(f'Second, deleting the index {index_copy_name}, if exists')
self._delete_index(index_copy_name)
self.logger.debug(f'Third, cloning the index {index_name} as {index_copy_name}')
self._clone_index(index_name, index_copy_name)
self.logger.debug(f'Fourth, unmarking the index {index_name} as read-only')
self._update_index_setting(
index_name,
{
'settings': {
'index.blocks.write': False
}
}
)
self.logger.debug(f'Fifth, also unmarking the index {index_copy_name} as read-only')
self._update_index_setting(
index_copy_name,
{
'settings': {
'index.blocks.write': False
}
}
)
return None
def apply(self):
self.logger.info('Applying preprocessor')
envvar = 'FOLIANT_ELASTICSEARCH'
if not self.options['require_env'] or getenv(envvar) is not None:
self.logger.debug(
f'Allowed targets: {self.options['targets']}, ' +
f'current target: {self.context['target']}'
)
if not self.options['targets'] or self.context['target'] in self.options['targets']:
actions = self.options['actions']
if not isinstance(self.options['actions'], list):
actions = [actions]
for action in actions:
self.logger.debug(f'Applying action: {action}')
if action == 'create':
self._create_index(self.options['index_name'])
elif action == 'delete':
self._delete_index(self.options['index_name'])
elif action == 'copy':
self._copy_index(self.options['index_name'], self.options['index_copy_name'])
else:
self.logger.debug('Unknown action, skipping')
else:
self.logger.debug(f'Environment variable {envvar} is not set, skipping')
self.logger.info('Preprocessor applied')
| '''
Preprocessor for Foliant documentation authoring tool.
Calls Elasticsearch API to generate an index based on Markdown content.
'''
import re
import json
from os import getenv
from pathlib import Path
from urllib import request
from urllib.error import HTTPError
from markdown import markdown
from bs4 import BeautifulSoup
from foliant.preprocessors.base import BasePreprocessor
class Preprocessor(BasePreprocessor):
defaults = {
'es_url': 'http://127.0.0.1:9200/',
'index_name': '',
'index_copy_name': '',
'index_properties': {},
'actions': [
'delete',
'create'
],
'use_chapters': True,
'format': 'plaintext',
'escape_html': True,
'url_transform': [
{'\/?index\.md$': '/'},
{'\.md$': '/'},
{'^([^\/]+)': '/\g<1>'}
],
'require_env': False,
'targets': []
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = self.logger.getChild('elasticsearch')
self.logger.debug(f'Preprocessor inited: {self.__dict__}')
def _get_url(self, markdown_file_path: str) -> str:
url = str(markdown_file_path.relative_to(self.working_dir))
url_transformation_rules = self.options['url_transform']
if not isinstance(url_transformation_rules, list):
url_transformation_rules = [url_transformation_rules]
for url_transformation_rule in url_transformation_rules:
for pattern, replacement in url_transformation_rule.items():
url = re.sub(pattern, replacement, url)
return url
def _get_title(self, markdown_content: str) -> str or None:
headings_found = re.search(
r'^\#{1,6}\s+(.+?)(?:\s+\{\#\S+\})?\s*$',
markdown_content,
flags=re.MULTILINE
)
if headings_found:
return headings_found.group(1)
return None
def _get_chapters_paths(self) -> list:
def _recursive_process_chapters(chapters_subset):
if isinstance(chapters_subset, dict):
processed_chapters_subset = {}
for key, value in chapters_subset.items():
processed_chapters_subset[key] = _recursive_process_chapters(value)
elif isinstance(chapters_subset, list):
processed_chapters_subset = []
for item in chapters_subset:
processed_chapters_subset.append(_recursive_process_chapters(item))
elif isinstance(chapters_subset, str):
if chapters_subset.endswith('.md'):
chapters_paths.append(self.working_dir / chapters_subset)
processed_chapters_subset = chapters_subset
else:
processed_chapters_subset = chapters_subset
return processed_chapters_subset
chapters_paths = []
_recursive_process_chapters(self.config['chapters'])
self.logger.debug(f'Chapters files paths: {chapters_paths}')
return chapters_paths
def _http_request(
self,
request_url: str,
request_method: str = 'GET',
request_headers: dict or None = None,
request_data: bytes or None = None
) -> dict:
http_request = request.Request(request_url, method=request_method)
if request_headers:
http_request.headers = request_headers
if request_data:
http_request.data = request_data
try:
with request.urlopen(http_request) as http_response:
response_status = http_response.getcode()
response_headers = http_response.info()
response_data = http_response.read()
except HTTPError as http_response_not_ok:
response_status = http_response_not_ok.getcode()
response_headers = http_response_not_ok.info()
response_data = http_response_not_ok.read()
return {
'status': response_status,
'headers': response_headers,
'data': response_data
}
def _escape_html(self, content: str) -> str:
return content.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"')
def _create_index(self, index_name: str) -> None:
if self.options['index_properties']:
create_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/'
self.logger.debug(
'Calling Elasticsearch API to create an index with specified properties, ' +
f'URL: {create_request_url}'
)
create_response = self._http_request(
create_request_url,
'PUT',
{
'Content-Type': 'application/json; charset=utf-8'
},
json.dumps(self.options['index_properties'], ensure_ascii=False).encode('utf-8')
)
create_response_data = json.loads(create_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {create_response["status"]}')
self.logger.debug(f'Response headers: {create_response["headers"]}')
self.logger.debug(f'Response data: {create_response_data}')
if create_response['status'] == 200 and create_response_data.get('acknowledged', None) is True:
self.logger.debug('Index created')
elif create_response['status'] == 400 and create_response_data.get(
'error', {}
).get(
'type', ''
) == 'resource_already_exists_exception':
self.logger.debug('Index already exists')
else:
error_message = 'Failed to create an index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
else:
self.logger.debug('An index without specific properties will be created')
if self.options['use_chapters']:
self.logger.debug('Only files mentioned in chapters will be indexed')
markdown_files_paths = self._get_chapters_paths()
else:
self.logger.debug('All files of the project will be indexed')
markdown_files_paths = self.working_dir.rglob('*.md')
data_for_indexing = ''
for markdown_file_path in markdown_files_paths:
self.logger.debug(f'Processing the file: {markdown_file_path}')
with open(markdown_file_path, encoding='utf8') as markdown_file:
markdown_content = markdown_file.read()
if markdown_content:
url = self._get_url(markdown_file_path)
title = self._get_title(markdown_content)
if self.options['format'] == 'html' or self.options['format'] == 'plaintext':
self.logger.debug(f'Converting source Markdown content to: {self.options["format"]}')
content = markdown(markdown_content)
if self.options['format'] == 'plaintext':
soup = BeautifulSoup(content, 'lxml')
for non_text_node in soup(['style', 'script']):
non_text_node.extract()
content = soup.get_text()
if self.options['escape_html']:
self.logger.debug('Escaping HTML syntax')
if title:
title = self._escape_html(title)
content = self._escape_html(content)
else:
self.logger.debug('Leaving source Markdown content unchanged')
content = markdown_content
self.logger.debug(f'Adding the page, URL: {url}, title: {title}')
data_for_indexing += '{"index": {}}\n' + json.dumps(
{
'url': url,
'title': title,
'content': content
},
ensure_ascii=False
) + '\n'
else:
self.logger.debug('It seems that the file has no content')
self.logger.debug(f'Data for indexing: {data_for_indexing}')
update_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/_bulk?refresh'
self.logger.debug(f'Calling Elasticsearch API to add the content to the index, URL: {update_request_url}')
update_response = self._http_request(
update_request_url,
'POST',
{
'Content-Type': 'application/json; charset=utf-8'
},
data_for_indexing.encode('utf-8')
)
update_response_data = json.loads(update_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {update_response["status"]}')
self.logger.debug(f'Response headers: {update_response["headers"]}')
self.logger.debug(f'Response data: {update_response_data}')
if update_response['status'] != 200 or update_response_data.get('errors', True):
error_message = 'Failed to add content to the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _delete_index(self, index_name: str) -> None:
delete_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/'
self.logger.debug(f'Calling Elasticsearch API to delete the index, URL: {delete_request_url}')
delete_response = self._http_request(
delete_request_url,
'DELETE'
)
delete_response_data = json.loads(delete_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {delete_response["status"]}')
self.logger.debug(f'Response headers: {delete_response["headers"]}')
self.logger.debug(f'Response data: {delete_response_data}')
if delete_response['status'] == 200 and delete_response_data.get('acknowledged', None) is True:
self.logger.debug('Index deleted')
elif delete_response['status'] == 404 and delete_response_data.get(
'error', {}
).get(
'type', ''
) == 'index_not_found_exception':
self.logger.debug('Index does not exist')
else:
error_message = 'Failed to delete the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _update_index_setting(self, index_name: str, settings_to_update: dict) -> None:
update_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/_settings/'
self.logger.debug(f'Calling Elasticsearch API to update the index settings, URL: {update_request_url}')
update_response = self._http_request(
update_request_url,
'PUT',
{
'Content-Type': 'application/json; charset=utf-8'
},
json.dumps(
settings_to_update,
ensure_ascii=False
).encode('utf-8')
)
update_response_data = json.loads(update_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {update_response["status"]}')
self.logger.debug(f'Response headers: {update_response["headers"]}')
self.logger.debug(f'Response data: {update_response_data}')
if update_response['status'] == 200 and update_response_data.get('acknowledged', None) is True:
self.logger.debug('Index settings updated')
else:
error_message = 'Failed to update the index settings'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _clone_index(self, index_name: str, index_copy_name: str) -> None:
clone_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/_clone/{index_copy_name}/'
self.logger.debug(f'Calling Elasticsearch API to clone the index, URL: {clone_request_url}')
clone_response = self._http_request(
clone_request_url,
'POST'
)
clone_response_data = json.loads(clone_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {clone_response["status"]}')
self.logger.debug(f'Response headers: {clone_response["headers"]}')
self.logger.debug(f'Response data: {clone_response_data}')
if clone_response['status'] == 200 and clone_response_data.get('acknowledged', None) is True:
self.logger.debug('Index cloned')
else:
error_message = 'Failed to clone the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _copy_index(self, index_name: str, index_copy_name: str) -> None:
if not index_copy_name:
index_copy_name = index_name + '_copy'
self.logger.debug(f'Copying the index {index_name} to {index_copy_name}')
self.logger.debug(f'First, marking the index {index_name} as read-only')
self._update_index_setting(
index_name,
{
'settings': {
'index.blocks.write': True
}
}
)
self.logger.debug(f'Second, deleting the index {index_copy_name}, if exists')
self._delete_index(index_copy_name)
self.logger.debug(f'Third, cloning the index {index_name} as {index_copy_name}')
self._clone_index(index_name, index_copy_name)
self.logger.debug(f'Fourth, unmarking the index {index_name} as read-only')
self._update_index_setting(
index_name,
{
'settings': {
'index.blocks.write': False
}
}
)
self.logger.debug(f'Fifth, also unmarking the index {index_copy_name} as read-only')
self._update_index_setting(
index_copy_name,
{
'settings': {
'index.blocks.write': False
}
}
)
return None
def apply(self):
self.logger.info('Applying preprocessor')
envvar = 'FOLIANT_ELASTICSEARCH'
if not self.options['require_env'] or getenv(envvar) is not None:
self.logger.debug(
f'Allowed targets: {self.options["targets"]}, ' +
f'current target: {self.context["target"]}'
)
if not self.options['targets'] or self.context['target'] in self.options['targets']:
actions = self.options['actions']
if not isinstance(self.options['actions'], list):
actions = [actions]
for action in actions:
self.logger.debug(f'Applying action: {action}')
if action == 'create':
self._create_index(self.options['index_name'])
elif action == 'delete':
self._delete_index(self.options['index_name'])
elif action == 'copy':
self._copy_index(self.options['index_name'], self.options['index_copy_name'])
else:
self.logger.debug('Unknown action, skipping')
else:
self.logger.debug(f'Environment variable {envvar} is not set, skipping')
self.logger.info('Preprocessor applied')
|
from sklearn.metrics import roc_curve, auc
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import json
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
from pylab import rcParams
# rcParams['figure.figsize'] = 20, 20
rcParams['figure.figsize'] = 15, 15
def results(x_true, x_pred, y_true, y_pred, classes, params, path=None, name=None):
if path is None and name is None:
path = f'models/{params['model_type']}/{params['exp_name']}/'
name = f'{params['model_type']}-{params['exp_name']}'
# Create folder
Path(path).mkdir(parents=True, exist_ok=True)
# Log
log_file = open(f'{path}log.json', "w")
json.dump(params, log_file, indent=4)
# Train results
x_pred_ = x_pred.argmax(dim=1)
#classification report
report = classification_report(x_true, x_pred_, target_names=classes,output_dict=True)
df_classification_report = pd.DataFrame(report).transpose()
accuracy_report = df_classification_report.tail(3)
accuracy_report.to_csv(path+'train_accuracy_report.csv')
df_classification_report.drop(df_classification_report.tail(3).index, inplace=True)
df_classification_report = df_classification_report.sort_values(by=['f1-score'], ascending=False)
df_classification_report.to_csv(path+'train_classification_report.csv')
# AUC curve
x_true_ohe = np.zeros((len(x_pred), len(classes)))
for idx, lbl in enumerate(x_true):
x_true_ohe[idx][lbl] = 1
x_pred = x_pred.detach().numpy()
plot_multiclass_roc(x_true_ohe,x_pred, classes=classes, path=path, name='train-'+name)
# Confusion matrix
cm = confusion_matrix(x_true, x_pred_)
plot_confusion_matrix(cm, classes, path=path, name='train-'+name)
# Test results
y_pred_ = y_pred.argmax(dim=1)
#classification report
report = classification_report(y_true, y_pred_, target_names=classes,output_dict=True)
df_classification_report = pd.DataFrame(report).transpose()
accuracy_report = df_classification_report.tail(3)
accuracy_report.to_csv(path+'test-accuracy_report.csv')
df_classification_report.drop(df_classification_report.tail(3).index, inplace=True)
df_classification_report = df_classification_report.sort_values(by=['f1-score'], ascending=False)
df_classification_report.to_csv(path+'test-classification_report.csv')
# AUC curve
y_true_ohe = np.zeros((len(y_pred), len(classes)))
for idx, lbl in enumerate(y_true):
y_true_ohe[idx][lbl] = 1
y_pred = y_pred.detach().numpy()
plot_multiclass_roc(y_true_ohe,y_pred, classes=classes, path=path, name='test-'+name)
# Confusion matrix
cm = confusion_matrix(y_true, y_pred_)
plot_confusion_matrix(cm, classes, path=path, name='test-'+name)
# plot_confusion_matrix(cm, list(range(len(classes))), path=path, name='test-'+name)
def get_color(idx):
if idx < 10:
return '#f500dc'
elif idx < 20:
return '#00f500'
elif idx < 30:
return '#00e0f5'
elif idx < 40:
return '#000cf5'
elif idx < 50:
return '#f5e900'
elif idx < 60:
return '#f58f00'
else:
return '#f50000'
def plot_multiclass_roc(y_true, y_pred, classes, path, name):
n_classes = len(classes)
lw=1
items = []
labels = ['item_id', 'fpr', 'tpr', 'roc_auc']
for i in range(n_classes):
fpr, tpr, _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc = auc(fpr, tpr)
items.append((i, fpr, tpr, roc_auc))
df = pd.DataFrame.from_records(items, columns=labels)
df = df.sort_values(by=['roc_auc'], ascending=False)
for idx, (_, row) in enumerate(df.iterrows()):
color = get_color(idx)
plt.plot(row['fpr'], row['tpr'], lw=lw, color=color,
label=f'{classes[row['item_id']]} (area = {row['roc_auc']:.2f})')
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(f'Receiver operating characteristic for {name}')
plt.legend(loc='lower right',
fancybox=True, shadow=True, ncol=3, prop={'size': 12})
plt.savefig(f'{path}{name}-roc.png', bbox_inches='tight')
plt.clf()
plt.close()
def plot_confusion_matrix(cm, classes, path, name, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar(shrink=0.75)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title(f'Confusion Matrix for {name}')
plt.savefig(f'{path}{name}-cm.png', bbox_inches='tight')
plt.clf()
plt.close() |
from sklearn.metrics import roc_curve, auc
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import json
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
from pylab import rcParams
# rcParams['figure.figsize'] = 20, 20
rcParams['figure.figsize'] = 15, 15
def results(x_true, x_pred, y_true, y_pred, classes, params, path=None, name=None):
if path is None and name is None:
path = f'models/{params["model_type"]}/{params["exp_name"]}/'
name = f'{params["model_type"]}-{params["exp_name"]}'
# Create folder
Path(path).mkdir(parents=True, exist_ok=True)
# Log
log_file = open(f'{path}log.json', "w")
json.dump(params, log_file, indent=4)
# Train results
x_pred_ = x_pred.argmax(dim=1)
#classification report
report = classification_report(x_true, x_pred_, target_names=classes,output_dict=True)
df_classification_report = pd.DataFrame(report).transpose()
accuracy_report = df_classification_report.tail(3)
accuracy_report.to_csv(path+'train_accuracy_report.csv')
df_classification_report.drop(df_classification_report.tail(3).index, inplace=True)
df_classification_report = df_classification_report.sort_values(by=['f1-score'], ascending=False)
df_classification_report.to_csv(path+'train_classification_report.csv')
# AUC curve
x_true_ohe = np.zeros((len(x_pred), len(classes)))
for idx, lbl in enumerate(x_true):
x_true_ohe[idx][lbl] = 1
x_pred = x_pred.detach().numpy()
plot_multiclass_roc(x_true_ohe,x_pred, classes=classes, path=path, name='train-'+name)
# Confusion matrix
cm = confusion_matrix(x_true, x_pred_)
plot_confusion_matrix(cm, classes, path=path, name='train-'+name)
# Test results
y_pred_ = y_pred.argmax(dim=1)
#classification report
report = classification_report(y_true, y_pred_, target_names=classes,output_dict=True)
df_classification_report = pd.DataFrame(report).transpose()
accuracy_report = df_classification_report.tail(3)
accuracy_report.to_csv(path+'test-accuracy_report.csv')
df_classification_report.drop(df_classification_report.tail(3).index, inplace=True)
df_classification_report = df_classification_report.sort_values(by=['f1-score'], ascending=False)
df_classification_report.to_csv(path+'test-classification_report.csv')
# AUC curve
y_true_ohe = np.zeros((len(y_pred), len(classes)))
for idx, lbl in enumerate(y_true):
y_true_ohe[idx][lbl] = 1
y_pred = y_pred.detach().numpy()
plot_multiclass_roc(y_true_ohe,y_pred, classes=classes, path=path, name='test-'+name)
# Confusion matrix
cm = confusion_matrix(y_true, y_pred_)
plot_confusion_matrix(cm, classes, path=path, name='test-'+name)
# plot_confusion_matrix(cm, list(range(len(classes))), path=path, name='test-'+name)
def get_color(idx):
if idx < 10:
return '#f500dc'
elif idx < 20:
return '#00f500'
elif idx < 30:
return '#00e0f5'
elif idx < 40:
return '#000cf5'
elif idx < 50:
return '#f5e900'
elif idx < 60:
return '#f58f00'
else:
return '#f50000'
def plot_multiclass_roc(y_true, y_pred, classes, path, name):
n_classes = len(classes)
lw=1
items = []
labels = ['item_id', 'fpr', 'tpr', 'roc_auc']
for i in range(n_classes):
fpr, tpr, _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc = auc(fpr, tpr)
items.append((i, fpr, tpr, roc_auc))
df = pd.DataFrame.from_records(items, columns=labels)
df = df.sort_values(by=['roc_auc'], ascending=False)
for idx, (_, row) in enumerate(df.iterrows()):
color = get_color(idx)
plt.plot(row['fpr'], row['tpr'], lw=lw, color=color,
label=f'{classes[row["item_id"]]} (area = {row["roc_auc"]:.2f})')
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(f'Receiver operating characteristic for {name}')
plt.legend(loc='lower right',
fancybox=True, shadow=True, ncol=3, prop={'size': 12})
plt.savefig(f'{path}{name}-roc.png', bbox_inches='tight')
plt.clf()
plt.close()
def plot_confusion_matrix(cm, classes, path, name, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar(shrink=0.75)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title(f'Confusion Matrix for {name}')
plt.savefig(f'{path}{name}-cm.png', bbox_inches='tight')
plt.clf()
plt.close() |
from typing import List
from flake8_functions_names.custom_types import FuncdefInfo
from flake8_functions_names.utils.imports import is_module_installed
from flake8_functions_names.words import VERBS, PURE_VERBS, BLACKLISTED_WORDS_IN_FUNCTIONS_NAMES
def validate_returns_bool_if_names_said_so(funcdef: FuncdefInfo) -> List[str]:
if funcdef.is_name_looks_like_question and funcdef.return_type != 'bool':
return [
f'FNE001 Name of the function says, that is should '
f'return bool, but it returns {funcdef.return_type}',
]
return []
def validate_has_property_and_no_verbs(funcdef: FuncdefInfo) -> List[str]: # noqa: FNE007
if funcdef.has_property_decorator and any(w in VERBS for w in funcdef.name_words):
verbs = [w for w in funcdef.name_words if w in VERBS]
return [
f'FNE002 The method has a @property decorator, '
f"but has a verb in it's name ({", ".join(verbs)})",
]
return []
def validate_save_to(funcdef: FuncdefInfo) -> List[str]:
if 'save' in funcdef.name_words and 'to' not in funcdef.name_words:
return [
'FNE003 Name of the function uses "save", but not uses "to"',
]
return []
def validate_load_from(funcdef: FuncdefInfo) -> List[str]:
if 'load' in funcdef.name_words and 'from' not in funcdef.name_words:
return [
'FNE004 ame of the function uses "load", but not uses "from"',
]
return []
def validate_returns_bool_and_name_shows_it(funcdef: FuncdefInfo) -> List[str]: # noqa: FNE007
if (
funcdef.return_type == 'bool'
and not funcdef.is_name_looks_like_question
and not funcdef.is_buildin_dundner_method_that_returns_bool
):
return [
"FNE005 Return type of the function is bool, but the name doesn't show it",
]
return []
def validate_names_says_its_pure_and_its_pure( # noqa: CFQ003, FNE007
funcdef: FuncdefInfo,
) -> List[str]:
if (
is_module_installed('deal')
and not funcdef.has_deal_pure_decorator
and any(w in PURE_VERBS for w in funcdef.name_words)
):
return [
'FNE006 Name of function says, that it works with data, '
'so it should be pure, but it has no @deal.pure()',
]
return []
def validate_no_blacklisted_words_in_name(funcdef: FuncdefInfo) -> List[str]:
blacklisted_words = [w for w in funcdef.name_words if w in BLACKLISTED_WORDS_IN_FUNCTIONS_NAMES]
if blacklisted_words:
return [
f'FNE007 "{blacklisted_words[0]}" is not recommended in functions names',
]
return []
def validate_name_not_endswith_first_argument_name(funcdef: FuncdefInfo) -> List[str]:
if funcdef.arguments_names and funcdef.name.endswith(f'_{funcdef.arguments_names[0]}'):
return [
"FNE008 Name of functions ends with it's first argument name",
]
return []
| from typing import List
from flake8_functions_names.custom_types import FuncdefInfo
from flake8_functions_names.utils.imports import is_module_installed
from flake8_functions_names.words import VERBS, PURE_VERBS, BLACKLISTED_WORDS_IN_FUNCTIONS_NAMES
def validate_returns_bool_if_names_said_so(funcdef: FuncdefInfo) -> List[str]:
if funcdef.is_name_looks_like_question and funcdef.return_type != 'bool':
return [
f'FNE001 Name of the function says, that is should '
f'return bool, but it returns {funcdef.return_type}',
]
return []
def validate_has_property_and_no_verbs(funcdef: FuncdefInfo) -> List[str]: # noqa: FNE007
if funcdef.has_property_decorator and any(w in VERBS for w in funcdef.name_words):
verbs = [w for w in funcdef.name_words if w in VERBS]
return [
f'FNE002 The method has a @property decorator, '
f"but has a verb in it's name ({', '.join(verbs)})",
]
return []
def validate_save_to(funcdef: FuncdefInfo) -> List[str]:
if 'save' in funcdef.name_words and 'to' not in funcdef.name_words:
return [
'FNE003 Name of the function uses "save", but not uses "to"',
]
return []
def validate_load_from(funcdef: FuncdefInfo) -> List[str]:
if 'load' in funcdef.name_words and 'from' not in funcdef.name_words:
return [
'FNE004 ame of the function uses "load", but not uses "from"',
]
return []
def validate_returns_bool_and_name_shows_it(funcdef: FuncdefInfo) -> List[str]: # noqa: FNE007
if (
funcdef.return_type == 'bool'
and not funcdef.is_name_looks_like_question
and not funcdef.is_buildin_dundner_method_that_returns_bool
):
return [
"FNE005 Return type of the function is bool, but the name doesn't show it",
]
return []
def validate_names_says_its_pure_and_its_pure( # noqa: CFQ003, FNE007
funcdef: FuncdefInfo,
) -> List[str]:
if (
is_module_installed('deal')
and not funcdef.has_deal_pure_decorator
and any(w in PURE_VERBS for w in funcdef.name_words)
):
return [
'FNE006 Name of function says, that it works with data, '
'so it should be pure, but it has no @deal.pure()',
]
return []
def validate_no_blacklisted_words_in_name(funcdef: FuncdefInfo) -> List[str]:
blacklisted_words = [w for w in funcdef.name_words if w in BLACKLISTED_WORDS_IN_FUNCTIONS_NAMES]
if blacklisted_words:
return [
f'FNE007 "{blacklisted_words[0]}" is not recommended in functions names',
]
return []
def validate_name_not_endswith_first_argument_name(funcdef: FuncdefInfo) -> List[str]:
if funcdef.arguments_names and funcdef.name.endswith(f'_{funcdef.arguments_names[0]}'):
return [
"FNE008 Name of functions ends with it's first argument name",
]
return []
|
import asyncio
import discord
from discord import Member, Role, TextChannel, DMChannel
from discord.ext import commands
from typing import Union
from profanity_check import predict
class ProfanityFilter:
"""
A simple filter that checks for profanity in a message and
then deletes it. Many profanity detection libraries use a hard-coded
list of bad words to detect and filter profanity, however this
plugin utilises a library that uses a linear support vector machine
(SVM) model trained on 200k human-labeled samples of clean and profane
text strings. ([`profanity-check`](https://github.com/vzhou842/profanity-check)).
Artificial intelligence in a discord bot? Heck yeah!
"""
def __init__(self, bot):
self.bot = bot
self.coll = bot.plugin_db.get_partition(self)
self.enabled = True
self.whitelist = set()
asyncio.create_task(self._set_config())
async def _set_config(self):
config = await self.coll.find_one({'_id': 'config'})
self.enabled = config.get('enabled', True)
self.whitelist = set(config.get('whitelist', []))
@commands.group(invoke_without_command=True)
@commands.is_owner()
async def profanity(self, ctx, mode: bool):
"""Disable or enable the profanity filter.
Usage: `profanity enable` / `profanity disable`
"""
self.enabled = mode
await self.coll.update_one(
{'_id': 'config'},
{'$set': {'enabled': self.enabled}},
upsert=True
)
await ctx.send(('Enabled' if mode else 'Disabled') + ' the profanity filter.')
@commands.is_owner()
@profanity.command()
async def whitelist(ctx, target: Union[Member, Role, TextChannel]):
"""Whitelist a user, role or channel from the profanity filter.
Usage: `profanity whitelist @dude`
"""
self = ctx.bot.get_cog('ProfanityFilter') # wtf where did self dissapear
if target.id in self.whitelist:
self.whitelist.remove(target.id)
removed = True
else:
self.whitelist.add(target.id)
removed = False
await self.coll.update_one(
{'_id': 'config'},
{'$set': {'whitelist': list(self.whitelist)}},
upsert=True
)
await ctx.send(
f"{"Un-w" if removed else "W"}hitelisted "
f"{target.mention} from the profanity filter."
)
async def on_message(self, message):
if not self.enabled:
return
channel = message.channel
author = message.author
if isinstance(author, discord.User): # private channel
return
ids = {author.id, channel.id} | {r.id for r in author.roles}
if self.whitelist.intersection(ids): # anything intersects
return
profane = bool(predict([message.content])[0])
if not profane:
return
await message.delete()
temp = await channel.send(
f'{author.mention} your message has '
'been deleted for containing profanity.'
)
await asyncio.sleep(5)
await temp.delete()
def setup(bot):
bot.add_cog(ProfanityFilter(bot)) | import asyncio
import discord
from discord import Member, Role, TextChannel, DMChannel
from discord.ext import commands
from typing import Union
from profanity_check import predict
class ProfanityFilter:
"""
A simple filter that checks for profanity in a message and
then deletes it. Many profanity detection libraries use a hard-coded
list of bad words to detect and filter profanity, however this
plugin utilises a library that uses a linear support vector machine
(SVM) model trained on 200k human-labeled samples of clean and profane
text strings. ([`profanity-check`](https://github.com/vzhou842/profanity-check)).
Artificial intelligence in a discord bot? Heck yeah!
"""
def __init__(self, bot):
self.bot = bot
self.coll = bot.plugin_db.get_partition(self)
self.enabled = True
self.whitelist = set()
asyncio.create_task(self._set_config())
async def _set_config(self):
config = await self.coll.find_one({'_id': 'config'})
self.enabled = config.get('enabled', True)
self.whitelist = set(config.get('whitelist', []))
@commands.group(invoke_without_command=True)
@commands.is_owner()
async def profanity(self, ctx, mode: bool):
"""Disable or enable the profanity filter.
Usage: `profanity enable` / `profanity disable`
"""
self.enabled = mode
await self.coll.update_one(
{'_id': 'config'},
{'$set': {'enabled': self.enabled}},
upsert=True
)
await ctx.send(('Enabled' if mode else 'Disabled') + ' the profanity filter.')
@commands.is_owner()
@profanity.command()
async def whitelist(ctx, target: Union[Member, Role, TextChannel]):
"""Whitelist a user, role or channel from the profanity filter.
Usage: `profanity whitelist @dude`
"""
self = ctx.bot.get_cog('ProfanityFilter') # wtf where did self dissapear
if target.id in self.whitelist:
self.whitelist.remove(target.id)
removed = True
else:
self.whitelist.add(target.id)
removed = False
await self.coll.update_one(
{'_id': 'config'},
{'$set': {'whitelist': list(self.whitelist)}},
upsert=True
)
await ctx.send(
f"{'Un-w' if removed else 'W'}hitelisted "
f"{target.mention} from the profanity filter."
)
async def on_message(self, message):
if not self.enabled:
return
channel = message.channel
author = message.author
if isinstance(author, discord.User): # private channel
return
ids = {author.id, channel.id} | {r.id for r in author.roles}
if self.whitelist.intersection(ids): # anything intersects
return
profane = bool(predict([message.content])[0])
if not profane:
return
await message.delete()
temp = await channel.send(
f'{author.mention} your message has '
'been deleted for containing profanity.'
)
await asyncio.sleep(5)
await temp.delete()
def setup(bot):
bot.add_cog(ProfanityFilter(bot)) |
import paho.mqtt.client as mqtt
import time
import argparse
from tinydb import TinyDB, Query
from tinyrecord import transaction
import logging
import sys
import json
import threading
import ssl
from random import randint
CA_ROOT_CERT_FILE = "ag-certificate/AmazonRootCA1.pem"
THING_CERT_FILE = "ag-certificate/..."
THING_PRIVATE_KEY = "ag-certificate/..."
# init args parser
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"MQTT_broker", metavar="MQTT_broker", type=str, help="Address of the MQTT broker"
)
args = parser.parse_args()
# init logger
logFormatter = logging.Formatter(
"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s"
)
logger = logging.getLogger()
fileHandler = logging.FileHandler("{0}/{1}.log".format("log", f"agenceur"))
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.setLevel(logging.INFO)
# init opaque DB
db_opaque = TinyDB("opaque.json")
# init clear measures DB
db_measures = TinyDB("measures.json")
db_measures.truncate()
lock = threading.Lock() # on received message
def on_message(client, userdata, message):
with lock:
logger.debug(
"rcvd: " + message.topic + "/" + str(message.payload.decode("utf-8"))
)
if message.topic == "addToPool":
# store in DB
logger.info(f"storing payload")
db_opaque.insert({"entry": str(message.payload.decode("utf-8"))})
if message.topic == "requestPool":
asking = str(message.payload.decode("utf-8"))
logger.info(f"received pool request from {asking}")
completePool = db_opaque.all()
# truncate because we will save it again?
db_opaque.truncate()
# dont sent if pool is empty
if len(completePool):
to_send = []
if len(completePool) > 10:
for i in range(10):
to_send.append(completePool.pop(0))
else:
to_send = completePool.copy()
completePool.clear()
for left in completePool:
db_opaque.insert(left)
#logger.info(f"to_send: {to_send}")
table_json = json.dumps(to_send)
logger.info(f"publishing table to getPool{asking}, len={len(table_json)}, n={len(to_send)}")
client.publish(f"getPool{asking}", table_json, qos=1)
if message.topic == "measures":
j = json.loads(message.payload.decode("utf-8"))
logger.info(f"m: {message.payload}")
db_measures.insert({"entry": message.payload.decode("utf-8")})
logger.info(f"received measure {j["MUID"]}")
# connecting to MQTT broker
logger.info(f"Connecting to broker at {args.MQTT_broker}")
client = mqtt.Client("Agenceur")
client.tls_set(CA_ROOT_CERT_FILE, certfile=THING_CERT_FILE, keyfile=THING_PRIVATE_KEY)#, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
client.connect(args.MQTT_broker, 8883)
# client.enable_logger()
# start receive thread
client.loop_start()
# subscribe to
# * addToPool: endpoint for opaque payload
# * requestPool: endpoint for opaque pool request from devices
# * measures: endpoint for clear-data measures
client.subscribe("addToPool")
client.subscribe("requestPool")
client.subscribe("measures")
# register receive routine
client.on_message = on_message
# only on event execution
while True:
time.sleep(1)
client.loop_stop()
| import paho.mqtt.client as mqtt
import time
import argparse
from tinydb import TinyDB, Query
from tinyrecord import transaction
import logging
import sys
import json
import threading
import ssl
from random import randint
CA_ROOT_CERT_FILE = "ag-certificate/AmazonRootCA1.pem"
THING_CERT_FILE = "ag-certificate/..."
THING_PRIVATE_KEY = "ag-certificate/..."
# init args parser
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"MQTT_broker", metavar="MQTT_broker", type=str, help="Address of the MQTT broker"
)
args = parser.parse_args()
# init logger
logFormatter = logging.Formatter(
"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s"
)
logger = logging.getLogger()
fileHandler = logging.FileHandler("{0}/{1}.log".format("log", f"agenceur"))
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.setLevel(logging.INFO)
# init opaque DB
db_opaque = TinyDB("opaque.json")
# init clear measures DB
db_measures = TinyDB("measures.json")
db_measures.truncate()
lock = threading.Lock() # on received message
def on_message(client, userdata, message):
with lock:
logger.debug(
"rcvd: " + message.topic + "/" + str(message.payload.decode("utf-8"))
)
if message.topic == "addToPool":
# store in DB
logger.info(f"storing payload")
db_opaque.insert({"entry": str(message.payload.decode("utf-8"))})
if message.topic == "requestPool":
asking = str(message.payload.decode("utf-8"))
logger.info(f"received pool request from {asking}")
completePool = db_opaque.all()
# truncate because we will save it again?
db_opaque.truncate()
# dont sent if pool is empty
if len(completePool):
to_send = []
if len(completePool) > 10:
for i in range(10):
to_send.append(completePool.pop(0))
else:
to_send = completePool.copy()
completePool.clear()
for left in completePool:
db_opaque.insert(left)
#logger.info(f"to_send: {to_send}")
table_json = json.dumps(to_send)
logger.info(f"publishing table to getPool{asking}, len={len(table_json)}, n={len(to_send)}")
client.publish(f"getPool{asking}", table_json, qos=1)
if message.topic == "measures":
j = json.loads(message.payload.decode("utf-8"))
logger.info(f"m: {message.payload}")
db_measures.insert({"entry": message.payload.decode("utf-8")})
logger.info(f"received measure {j['MUID']}")
# connecting to MQTT broker
logger.info(f"Connecting to broker at {args.MQTT_broker}")
client = mqtt.Client("Agenceur")
client.tls_set(CA_ROOT_CERT_FILE, certfile=THING_CERT_FILE, keyfile=THING_PRIVATE_KEY)#, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
client.connect(args.MQTT_broker, 8883)
# client.enable_logger()
# start receive thread
client.loop_start()
# subscribe to
# * addToPool: endpoint for opaque payload
# * requestPool: endpoint for opaque pool request from devices
# * measures: endpoint for clear-data measures
client.subscribe("addToPool")
client.subscribe("requestPool")
client.subscribe("measures")
# register receive routine
client.on_message = on_message
# only on event execution
while True:
time.sleep(1)
client.loop_stop()
|
import base64
def display_skills(skills):
result = []
for skill in skills:
base = f'''<img width ='22px' align='left' src ='{'https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/'+skill+'.svg'}'>'''
result.append(base)
return '\n'.join(result)
def display_socials(linkedin,twitter,medium,portfolio,github):
result = ''
if linkedin != '':
linkedin = 'https://www.linkedin.com/in/'+linkedin
result += f'''<a href = '{linkedin}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/linked-in-alt.svg"/></a> \n'''
if twitter != '':
twitter = 'https://www.twitter.com/'+twitter
result += f'''<a href = '{twitter}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/twitter.svg"/></a> \n'''
if medium != '':
result += f'''<a href = '{medium}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/medium.svg"/></a> \n'''
if portfolio != '':
result += f'''<a href = '{portfolio}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/portfolio.png"/></a> \n'''
if github != '':
github = 'https://www.github.com/'+github
result += f'''<a href = '{github}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/github.svg"/></a> \n'''
return result
def default_html(name = 'Rahul', linkedin_url = '',twitter_url = '',medium_url='',portfolio_url='',waka_userName = 'rahulbanerjee26',github_username = 'rahulbanerjee26',p1='......',p2='.......',p3='.........',p4='.........',skills=[]):
return f'''
# Hello World <img src = "https://raw.githubusercontent.com/MartinHeinz/MartinHeinz/master/wave.gif" width = 50px>

<div size='20px'> Hi! My name is {name}. Thank You for taking the time to view my GitHub Profile :smile:
<h2> Connect with me <img src='https://raw.githubusercontent.com/ShahriarShafin/ShahriarShafin/main/Assets/handshake.gif' width="64px"> </h2>
{display_socials(linkedin_url,twitter_url,medium_url,portfolio_url,github_username)}
</div>
<h2> Skills </h2>
{display_skills(skills)}
<h2> About Me</h2>
- 🔭 I’m currently working on {p1}
- 🌱 I’m currently learning {p2}
- 👯 I’m looking to collaborate on {p3}
- 💬 Talk to me about {p4}
## Stuff I worked on last week⌚
<a href="https://github.com/anuraghazra/github-readme-stats">
<img align="center" src="https://github-readme-stats.vercel.app/api/wakatime?username=@{waka_userName}&compact=True"/>
</a>
## My GitHub Stats 📊
<a href="https://github.com/anuraghazra/github-readme-stats">
<img align="left" src="https://github-readme-stats.vercel.app/api?username={github_username}&count_private=true&show_icons=true&theme=radical" />
</a>
<a href="https://github.com/anuraghazra/convoychat">
<img align="center" src="https://github-readme-stats.vercel.app/api/top-langs/?username={github_username}&layout=compact" />
</a>
<!-- BLOG-POST-LIST:START -->
<!-- BLOG-POST-LIST:END -->
'''
def get_yml(feed_url):
yml_file = f'''
name: Latest blog post workflow
on:
schedule: # Run workflow automatically
- cron: '0 * * * *' # Runs every hour, on the hour
workflow_dispatch: # Run workflow manually (without waiting for the cron to be called), through the Github Actions Workflow page directly
jobs:
update-readme-with-blog:
name: Update this repo's README with latest blog posts
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: gautamkrishnar/blog-post-workflow@master
with:
feed_list: "{feed_url}"
'''
b64 = base64.b64encode(yml_file.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="blog-post-workflow.yml">Download yml file</a>'
return href
def download_readme(code):
b64 = base64.b64encode(code.encode()).decode()
href = f'<h4><a href="data:file/csv;base64,{b64}" download="README.md">Dowload README</a></h4>'
return href
| import base64
def display_skills(skills):
result = []
for skill in skills:
base = f'''<img width ='22px' align='left' src ='{'https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/'+skill+'.svg'}'>'''
result.append(base)
return '\n'.join(result)
def display_socials(linkedin,twitter,medium,portfolio,github):
result = ''
if linkedin != '':
linkedin = 'https://www.linkedin.com/in/'+linkedin
result += f'''<a href = '{linkedin}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/linked-in-alt.svg"/></a> \n'''
if twitter != '':
twitter = 'https://www.twitter.com/'+twitter
result += f'''<a href = '{twitter}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/twitter.svg"/></a> \n'''
if medium != '':
result += f'''<a href = '{medium}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/medium.svg"/></a> \n'''
if portfolio != '':
result += f'''<a href = '{portfolio}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/portfolio.png"/></a> \n'''
if github != '':
github = 'https://www.github.com/'+github
result += f'''<a href = '{github}'> <img width = '22px' align= 'left' src="https://raw.githubusercontent.com/rahulbanerjee26/githubAboutMeGenerator/main/icons/github.svg"/></a> \n'''
return result
def default_html(name = 'Rahul', linkedin_url = '',twitter_url = '',medium_url='',portfolio_url='',waka_userName = 'rahulbanerjee26',github_username = 'rahulbanerjee26',p1='......',p2='.......',p3='.........',p4='.........',skills=[]):
return f'''
# Hello World <img src = "https://raw.githubusercontent.com/MartinHeinz/MartinHeinz/master/wave.gif" width = 50px>

<div size='20px'> Hi! My name is {name}. Thank You for taking the time to view my GitHub Profile :smile:
<h2> Connect with me <img src='https://raw.githubusercontent.com/ShahriarShafin/ShahriarShafin/main/Assets/handshake.gif' width="64px"> </h2>
{display_socials(linkedin_url,twitter_url,medium_url,portfolio_url,github_username)}
</div>
<h2> Skills </h2>
{display_skills(skills)}
<h2> About Me</h2>
- 🔭 I’m currently working on {p1}
- 🌱 I’m currently learning {p2}
- 👯 I’m looking to collaborate on {p3}
- 💬 Talk to me about {p4}
## Stuff I worked on last week⌚
<a href="https://github.com/anuraghazra/github-readme-stats">
<img align="center" src="https://github-readme-stats.vercel.app/api/wakatime?username=@{waka_userName}&compact=True"/>
</a>
## My GitHub Stats 📊
<a href="https://github.com/anuraghazra/github-readme-stats">
<img align="left" src="https://github-readme-stats.vercel.app/api?username={github_username}&count_private=true&show_icons=true&theme=radical" />
</a>
<a href="https://github.com/anuraghazra/convoychat">
<img align="center" src="https://github-readme-stats.vercel.app/api/top-langs/?username={github_username}&layout=compact" />
</a>
<!-- BLOG-POST-LIST:START -->
<!-- BLOG-POST-LIST:END -->
'''
def get_yml(feed_url):
yml_file = f'''
name: Latest blog post workflow
on:
schedule: # Run workflow automatically
- cron: '0 * * * *' # Runs every hour, on the hour
workflow_dispatch: # Run workflow manually (without waiting for the cron to be called), through the Github Actions Workflow page directly
jobs:
update-readme-with-blog:
name: Update this repo's README with latest blog posts
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: gautamkrishnar/blog-post-workflow@master
with:
feed_list: "{feed_url}"
'''
b64 = base64.b64encode(yml_file.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="blog-post-workflow.yml">Download yml file</a>'
return href
def download_readme(code):
b64 = base64.b64encode(code.encode()).decode()
href = f'<h4><a href="data:file/csv;base64,{b64}" download="README.md">Dowload README</a></h4>'
return href
|
import datetime
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
from typing import Any, Dict, List, Optional
import dateutil.parser
import pytest
import requests
from determined import experimental
from determined.common import api, yaml
from determined.common.api import authentication, certs
from tests import config as conf
from tests.cluster import utils as cluster_utils
def maybe_create_native_experiment(context_dir: str, command: List[str]) -> Optional[int]:
target_env = os.environ.copy()
target_env["DET_MASTER"] = conf.make_master_url()
with subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=context_dir, env=target_env
) as p:
assert p.stdout is not None
for line in p.stdout:
m = re.search(r"Created experiment (\d+)\n", line.decode())
if m is not None:
return int(m.group(1))
return None
def create_native_experiment(context_dir: str, command: List[str]) -> int:
experiment_id = maybe_create_native_experiment(context_dir, command)
if experiment_id is None:
pytest.fail(f"Failed to create experiment in {context_dir}: {command}")
return experiment_id
def maybe_create_experiment(
config_file: str, model_def_file: str, create_args: Optional[List[str]] = None
) -> subprocess.CompletedProcess:
command = [
"det",
"-m",
conf.make_master_url(),
"experiment",
"create",
config_file,
model_def_file,
]
if create_args is not None:
command += create_args
env = os.environ.copy()
env["DET_DEBUG"] = "true"
return subprocess.run(
command, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
def create_experiment(
config_file: str, model_def_file: str, create_args: Optional[List[str]] = None
) -> int:
completed_process = maybe_create_experiment(config_file, model_def_file, create_args)
assert completed_process.returncode == 0, "\nstdout:\n{} \nstderr:\n{}".format(
completed_process.stdout, completed_process.stderr
)
m = re.search(r"Created experiment (\d+)\n", str(completed_process.stdout))
assert m is not None
return int(m.group(1))
def pause_experiment(experiment_id: int) -> None:
command = ["det", "-m", conf.make_master_url(), "experiment", "pause", str(experiment_id)]
subprocess.check_call(command)
def activate_experiment(experiment_id: int) -> None:
command = ["det", "-m", conf.make_master_url(), "experiment", "activate", str(experiment_id)]
subprocess.check_call(command)
def change_experiment_state(experiment_id: int, new_state: str) -> None:
# TODO(DET-5678): refactor tests to not use cli singleton auth.
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.patch(
conf.make_master_url(),
"experiments/{}".format(experiment_id),
headers={"Content-Type": "application/merge-patch+json"},
json={"state": new_state},
)
assert r.status_code == requests.codes.no_content, r.text
def cancel_experiment(experiment_id: int) -> None:
change_experiment_state(experiment_id, "STOPPING_CANCELED")
# We may never observe the STOPPING_CANCELED state.
wait_for_experiment_state(experiment_id, "CANCELED")
def cancel_experiment_v1(experiment_id: int) -> None:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.post(conf.make_master_url(), "/api/v1/experiments/{}/cancel".format(experiment_id))
r.raise_for_status()
wait_for_experiment_state(experiment_id, "CANCELED")
def wait_for_experiment_state(
experiment_id: int,
target_state: str,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
log_every: int = 60,
) -> None:
for seconds_waited in range(max_wait_secs):
try:
state = experiment_state(experiment_id)
# Ignore network errors while polling for experiment state to avoid a
# single network flake to cause a test suite failure. If the master is
# unreachable multiple times, this test will fail after max_wait_secs.
except api.errors.MasterNotFoundException:
logging.warning(
"Network failure ignored when polling for state of "
"experiment {}".format(experiment_id)
)
time.sleep(1)
continue
if state == target_state:
return
if is_terminal_state(state):
if state != target_state:
report_failed_experiment(experiment_id)
pytest.fail(
f"Experiment {experiment_id} terminated in {state} state, expected {target_state}"
)
if seconds_waited > 0 and seconds_waited % log_every == 0:
print(
f"Waited {seconds_waited} seconds for experiment {experiment_id} "
f"(currently {state}) to reach {target_state}"
)
time.sleep(1)
else:
if target_state == "COMPLETED":
cancel_experiment(experiment_id)
report_failed_experiment(experiment_id)
pytest.fail(
"Experiment did not reach target state {} after {} seconds".format(
target_state, max_wait_secs
)
)
def experiment_has_active_workload(experiment_id: int) -> bool:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "tasks").json()
for task in r.values():
if "Experiment {}".format(experiment_id) in task["name"] and len(task["containers"]) > 0:
return True
return False
def wait_for_experiment_active_workload(
experiment_id: int, max_ticks: int = conf.MAX_TASK_SCHEDULED_SECS
) -> None:
for _ in range(conf.MAX_TASK_SCHEDULED_SECS):
if experiment_has_active_workload(experiment_id):
return
time.sleep(1)
pytest.fail(
f"The only trial cannot be scheduled within {max_ticks} seconds.",
)
def wait_for_experiment_workload_progress(
experiment_id: int, max_ticks: int = conf.MAX_TRIAL_BUILD_SECS
) -> None:
for _ in range(conf.MAX_TRIAL_BUILD_SECS):
trials = experiment_trials(experiment_id)
if len(trials) > 0:
only_trial = trials[0]
if len(only_trial["steps"]) > 1:
return
time.sleep(1)
pytest.fail(
f"Trial cannot finish first workload within {max_ticks} seconds.",
)
def experiment_has_completed_workload(experiment_id: int) -> bool:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
trials = experiment_trials(experiment_id)
if not any(trials):
return False
return any(any(s["state"] == "COMPLETED" for s in t["steps"]) for t in trials)
def experiment_json(experiment_id: int) -> Dict[str, Any]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "experiments/{}".format(experiment_id))
assert r.status_code == requests.codes.ok, r.text
json = r.json() # type: Dict[str, Any]
return json
def experiment_state(experiment_id: int) -> str:
state = experiment_json(experiment_id)["state"] # type: str
return state
def experiment_trials(experiment_id: int) -> List[Dict[str, Any]]:
trials = experiment_json(experiment_id)["trials"] # type: List[Dict[str, Any]]
return trials
def num_experiments() -> int:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "experiments")
assert r.status_code == requests.codes.ok, r.text
return len(r.json())
def cancel_single(experiment_id: int, should_have_trial: bool = False) -> None:
cancel_experiment(experiment_id)
trials = experiment_trials(experiment_id)
if should_have_trial or len(trials) > 0:
assert len(trials) == 1
trial = trials[0]
assert trial["state"] == "CANCELED"
def cancel_single_v1(experiment_id: int, should_have_trial: bool = False) -> None:
cancel_experiment_v1(experiment_id)
trials = experiment_trials(experiment_id)
if should_have_trial or len(trials) > 0:
assert len(trials) == 1
trial = trials[0]
assert trial["state"] == "CANCELED"
def is_terminal_state(state: str) -> bool:
return state in ("CANCELED", "COMPLETED", "ERROR")
def trial_metrics(trial_id: int) -> Dict[str, Any]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "trials/{}/metrics".format(trial_id))
assert r.status_code == requests.codes.ok, r.text
json = r.json() # type: Dict[str, Any]
return json
def get_flat_metrics(trial_id: int, metric: str) -> List:
full_trial_metrics = trial_metrics(trial_id)
metrics = [m for step in full_trial_metrics["steps"] for m in step["metrics"]["batch_metrics"]]
return [v[metric] for v in metrics]
def num_trials(experiment_id: int) -> int:
return len(experiment_trials(experiment_id))
def num_active_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "ACTIVE" else 0 for t in experiment_trials(experiment_id))
def num_completed_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "COMPLETED" else 0 for t in experiment_trials(experiment_id))
def num_error_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "ERROR" else 0 for t in experiment_trials(experiment_id))
def trial_logs(trial_id: int) -> List[str]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
return [tl["message"] for tl in api.trial_logs(conf.make_master_url(), trial_id)]
def check_if_string_present_in_trial_logs(trial_id: int, target_string: str) -> bool:
logs = trial_logs(trial_id)
for log_line in logs:
if target_string in log_line:
return True
return False
def assert_equivalent_trials(A: int, B: int, validation_metrics: List[str]) -> None:
full_trial_metrics1 = trial_metrics(A)
full_trial_metrics2 = trial_metrics(B)
assert len(full_trial_metrics1["steps"]) == len(full_trial_metrics2["steps"])
for step1, step2 in zip(full_trial_metrics1["steps"], full_trial_metrics2["steps"]):
metric1 = step1["metrics"]["batch_metrics"]
metric2 = step2["metrics"]["batch_metrics"]
for batch1, batch2 in zip(metric1, metric2):
assert len(batch1) == len(batch2) == 2
assert batch1["loss"] == pytest.approx(batch2["loss"])
if step1["validation"] is not None or step2["validation"] is not None:
assert step1["validation"] is not None
assert step2["validation"] is not None
for metric in validation_metrics:
val1 = step1.get("validation").get("metrics").get("validation_metrics").get(metric)
val2 = step2.get("validation").get("metrics").get("validation_metrics").get(metric)
assert val1 == pytest.approx(val2)
def assert_performed_initial_validation(exp_id: int) -> None:
trials = experiment_trials(exp_id)
assert len(trials) > 0
steps = trials[0]["steps"]
assert len(steps) > 0
zeroth_step = steps[0]
assert zeroth_step["validation"] is not None
assert zeroth_step["validation"]["total_batches"] == 0
assert zeroth_step["validation"]["state"] == "COMPLETED"
def assert_performed_final_checkpoint(exp_id: int) -> None:
trials = experiment_trials(exp_id)
assert len(trials) > 0
steps = trials[0]["steps"]
assert len(steps) > 0
last_step = steps[-1]
assert last_step["checkpoint"] is not None
assert last_step["checkpoint"]["state"] == "COMPLETED"
def run_describe_cli_tests(experiment_id: int) -> None:
"""
Runs `det experiment describe` CLI command on a finished
experiment. Will raise an exception if `det experiment describe`
encounters a traceback failure.
"""
# "det experiment describe" without metrics.
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
str(experiment_id),
"--outdir",
tmpdir,
]
)
assert os.path.exists(os.path.join(tmpdir, "experiments.csv"))
assert os.path.exists(os.path.join(tmpdir, "workloads.csv"))
assert os.path.exists(os.path.join(tmpdir, "trials.csv"))
# "det experiment describe" with metrics.
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
str(experiment_id),
"--metrics",
"--outdir",
tmpdir,
]
)
assert os.path.exists(os.path.join(tmpdir, "experiments.csv"))
assert os.path.exists(os.path.join(tmpdir, "workloads.csv"))
assert os.path.exists(os.path.join(tmpdir, "trials.csv"))
def run_list_cli_tests(experiment_id: int) -> None:
"""
Runs list-related CLI commands on a finished experiment. Will raise an
exception if the CLI command encounters a traceback failure.
"""
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-trials", str(experiment_id)]
)
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-checkpoints", str(experiment_id)]
)
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"list-checkpoints",
"--best",
str(1),
str(experiment_id),
]
)
def report_failed_experiment(experiment_id: int) -> None:
trials = experiment_trials(experiment_id)
active = sum(1 for t in trials if t["state"] == "ACTIVE")
paused = sum(1 for t in trials if t["state"] == "PAUSED")
stopping_completed = sum(1 for t in trials if t["state"] == "STOPPING_COMPLETED")
stopping_canceled = sum(1 for t in trials if t["state"] == "STOPPING_CANCELED")
stopping_error = sum(1 for t in trials if t["state"] == "STOPPING_ERROR")
completed = sum(1 for t in trials if t["state"] == "COMPLETED")
canceled = sum(1 for t in trials if t["state"] == "CANCELED")
errored = sum(1 for t in trials if t["state"] == "ERROR")
stopping_killed = sum(1 for t in trials if t["state"] == "STOPPING_KILLED")
print(
f"Experiment {experiment_id}: {len(trials)} trials, {completed} completed, "
f"{active} active, {paused} paused, {stopping_completed} stopping-completed, "
f"{stopping_canceled} stopping-canceled, {stopping_error} stopping-error, "
f"{stopping_killed} stopping-killed, {canceled} canceled, {errored} errored",
file=sys.stderr,
)
for trial in trials:
print_trial_logs(trial["id"])
def report_failed_trial(trial_id: int, state: str) -> None:
print(f"Trial {trial_id} was not COMPLETED but {state}", file=sys.stderr)
print_trial_logs(trial_id)
def print_trial_logs(trial_id: int) -> None:
print("******** Start of logs for trial {} ********".format(trial_id), file=sys.stderr)
print("".join(trial_logs(trial_id)), file=sys.stderr)
print("******** End of logs for trial {} ********".format(trial_id), file=sys.stderr)
def run_basic_test(
config_file: str,
model_def_file: str,
expected_trials: Optional[int],
create_args: Optional[List[str]] = None,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
) -> int:
assert os.path.isdir(model_def_file)
experiment_id = create_experiment(config_file, model_def_file, create_args)
wait_for_experiment_state(experiment_id, "COMPLETED", max_wait_secs=max_wait_secs)
assert num_active_trials(experiment_id) == 0
verify_completed_experiment_metadata(experiment_id, expected_trials)
return experiment_id
def verify_completed_experiment_metadata(
experiment_id: int, num_expected_trials: Optional[int]
) -> None:
# If `expected_trials` is None, the expected number of trials is
# non-deterministic.
if num_expected_trials is not None:
assert num_trials(experiment_id) == num_expected_trials
assert num_completed_trials(experiment_id) == num_expected_trials
# Check that every trial and step is COMPLETED.
trials = experiment_trials(experiment_id)
assert len(trials) > 0
for trial in trials:
if trial["state"] != "COMPLETED":
report_failed_trial(trial["id"], trial["state"])
pytest.fail(f"Trial {trial["id"]} was not COMPLETED but {trial["state"]}")
assert len(trial["steps"]) > 0
# Check that batches appear in increasing order.
batch_ids = [s["total_batches"] for s in trial["steps"]]
assert all(x <= y for x, y in zip(batch_ids, batch_ids[1:]))
for step in trial["steps"]:
assert step["state"] == "COMPLETED"
if step["validation"]:
validation = step["validation"]
assert validation["state"] == "COMPLETED"
if step["checkpoint"]:
checkpoint = step["checkpoint"]
assert checkpoint["state"] in {"COMPLETED", "DELETED"}
# The last step of every trial should have a checkpoint.
for trial in trials:
last_step = trial["steps"][-1]
assert last_step["checkpoint"]
# When the experiment completes, all slots should now be free. This
# requires terminating the experiment's last container, which might
# take some time.
max_secs_to_free_slots = 30
for _ in range(max_secs_to_free_slots):
if cluster_utils.num_free_slots() == cluster_utils.num_slots():
break
time.sleep(1)
else:
raise AssertionError("Slots failed to free after experiment {}".format(experiment_id))
# Run a series of CLI tests on the finished experiment, to sanity check
# that basic CLI commands don't raise errors.
run_describe_cli_tests(experiment_id)
run_list_cli_tests(experiment_id)
# Use Determined to run an experiment that we expect to fail.
def run_failure_test(
config_file: str, model_def_file: str, error_str: Optional[str] = None
) -> None:
experiment_id = create_experiment(config_file, model_def_file)
wait_for_experiment_state(experiment_id, "ERROR")
# The searcher is configured with a `max_trials` of 8. Since the
# first step of each trial results in an error, there should be no
# completed trials.
#
# Most of the trials should result in ERROR, but depending on that
# seems fragile: if we support task preemption in the future, we
# might start a trial but cancel it before we hit the error in the
# model definition.
assert num_active_trials(experiment_id) == 0
assert num_completed_trials(experiment_id) == 0
assert num_error_trials(experiment_id) >= 1
# For each failed trial, check for the expected error in the logs.
trials = experiment_trials(experiment_id)
for t in trials:
if t["state"] != "ERROR":
continue
trial_id = t["id"]
logs = trial_logs(trial_id)
if error_str is not None:
assert any(error_str in line for line in logs)
def get_validation_metric_from_last_step(
experiment_id: int, trial_id: int, validation_metric_name: str
) -> float:
trial = experiment_trials(experiment_id)[trial_id]
last_validation = trial["steps"][len(trial["steps"]) - 1]["validation"]
return last_validation["metrics"]["validation_metrics"][validation_metric_name] # type: ignore
class ExperimentDurations:
def __init__(
self,
experiment_duration: datetime.timedelta,
training_duration: datetime.timedelta,
validation_duration: datetime.timedelta,
checkpoint_duration: datetime.timedelta,
):
self.experiment_duration = experiment_duration
self.training_duration = training_duration
self.validation_duration = validation_duration
self.checkpoint_duration = checkpoint_duration
def __str__(self) -> str:
duration_strs = []
duration_strs.append(f"experiment duration: {self.experiment_duration}")
duration_strs.append(f"training duration: {self.training_duration}")
duration_strs.append(f"validation duration: {self.validation_duration}")
duration_strs.append(f"checkpoint duration: {self.checkpoint_duration}")
return "\n".join(duration_strs)
def get_experiment_durations(experiment_id: int, trial_idx: int) -> ExperimentDurations:
experiment_metadata = experiment_json(experiment_id)
end_time = dateutil.parser.parse(experiment_metadata["end_time"])
start_time = dateutil.parser.parse(experiment_metadata["start_time"])
experiment_duration = end_time - start_time
training_duration = datetime.timedelta(seconds=0)
validation_duration = datetime.timedelta(seconds=0)
checkpoint_duration = datetime.timedelta(seconds=0)
for step in experiment_metadata["trials"][trial_idx]["steps"]:
end_time = dateutil.parser.parse(step["end_time"])
start_time = dateutil.parser.parse(step["start_time"])
training_duration += end_time - start_time
if "validation" in step and step["validation"]:
end_time = dateutil.parser.parse(step["validation"]["end_time"])
start_time = dateutil.parser.parse(step["validation"]["start_time"])
validation_duration += end_time - start_time
if "checkpoint" in step and step["checkpoint"]:
end_time = dateutil.parser.parse(step["checkpoint"]["end_time"])
start_time = dateutil.parser.parse(step["checkpoint"]["start_time"])
checkpoint_duration += end_time - start_time
return ExperimentDurations(
experiment_duration, training_duration, validation_duration, checkpoint_duration
)
def run_basic_test_with_temp_config(
config: Dict[Any, Any],
model_def_path: str,
expected_trials: Optional[int],
create_args: Optional[List[str]] = None,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
) -> int:
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
experiment_id = run_basic_test(
tf.name,
model_def_path,
expected_trials,
create_args,
max_wait_secs=max_wait_secs,
)
return experiment_id
def run_failure_test_with_temp_config(
config: Dict[Any, Any],
model_def_path: str,
error_str: Optional[str] = None,
) -> None:
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
run_failure_test(tf.name, model_def_path, error_str=error_str)
def shared_fs_checkpoint_config() -> Dict[str, str]:
return {
"type": "shared_fs",
"host_path": "/tmp",
"storage_path": "determined-integration-checkpoints",
}
def s3_checkpoint_config(secrets: Dict[str, str], prefix: Optional[str] = None) -> Dict[str, str]:
config_dict = {
"type": "s3",
"access_key": secrets["INTEGRATIONS_S3_ACCESS_KEY"],
"secret_key": secrets["INTEGRATIONS_S3_SECRET_KEY"],
"bucket": secrets["INTEGRATIONS_S3_BUCKET"],
}
if prefix is not None:
config_dict["prefix"] = prefix
return config_dict
def s3_checkpoint_config_no_creds() -> Dict[str, str]:
return {"type": "s3", "bucket": "determined-ai-examples"}
def root_user_home_bind_mount() -> Dict[str, str]:
return {"host_path": "/tmp", "container_path": "/root"}
def _export_and_load_model(experiment_id: int, master_url: str) -> None:
experimental.Determined(master_url).get_experiment(experiment_id).top_checkpoint().load()
def export_and_load_model(experiment_id: int) -> None:
# We run this in a subprocess to avoid module name collisions
# when performing checkpoint export of different models.
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(
target=_export_and_load_model,
args=(
experiment_id,
conf.make_master_url(),
),
)
p.start()
p.join()
assert p.exitcode == 0, p.exitcode
| import datetime
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
from typing import Any, Dict, List, Optional
import dateutil.parser
import pytest
import requests
from determined import experimental
from determined.common import api, yaml
from determined.common.api import authentication, certs
from tests import config as conf
from tests.cluster import utils as cluster_utils
def maybe_create_native_experiment(context_dir: str, command: List[str]) -> Optional[int]:
target_env = os.environ.copy()
target_env["DET_MASTER"] = conf.make_master_url()
with subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=context_dir, env=target_env
) as p:
assert p.stdout is not None
for line in p.stdout:
m = re.search(r"Created experiment (\d+)\n", line.decode())
if m is not None:
return int(m.group(1))
return None
def create_native_experiment(context_dir: str, command: List[str]) -> int:
experiment_id = maybe_create_native_experiment(context_dir, command)
if experiment_id is None:
pytest.fail(f"Failed to create experiment in {context_dir}: {command}")
return experiment_id
def maybe_create_experiment(
config_file: str, model_def_file: str, create_args: Optional[List[str]] = None
) -> subprocess.CompletedProcess:
command = [
"det",
"-m",
conf.make_master_url(),
"experiment",
"create",
config_file,
model_def_file,
]
if create_args is not None:
command += create_args
env = os.environ.copy()
env["DET_DEBUG"] = "true"
return subprocess.run(
command, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
def create_experiment(
config_file: str, model_def_file: str, create_args: Optional[List[str]] = None
) -> int:
completed_process = maybe_create_experiment(config_file, model_def_file, create_args)
assert completed_process.returncode == 0, "\nstdout:\n{} \nstderr:\n{}".format(
completed_process.stdout, completed_process.stderr
)
m = re.search(r"Created experiment (\d+)\n", str(completed_process.stdout))
assert m is not None
return int(m.group(1))
def pause_experiment(experiment_id: int) -> None:
command = ["det", "-m", conf.make_master_url(), "experiment", "pause", str(experiment_id)]
subprocess.check_call(command)
def activate_experiment(experiment_id: int) -> None:
command = ["det", "-m", conf.make_master_url(), "experiment", "activate", str(experiment_id)]
subprocess.check_call(command)
def change_experiment_state(experiment_id: int, new_state: str) -> None:
# TODO(DET-5678): refactor tests to not use cli singleton auth.
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.patch(
conf.make_master_url(),
"experiments/{}".format(experiment_id),
headers={"Content-Type": "application/merge-patch+json"},
json={"state": new_state},
)
assert r.status_code == requests.codes.no_content, r.text
def cancel_experiment(experiment_id: int) -> None:
change_experiment_state(experiment_id, "STOPPING_CANCELED")
# We may never observe the STOPPING_CANCELED state.
wait_for_experiment_state(experiment_id, "CANCELED")
def cancel_experiment_v1(experiment_id: int) -> None:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.post(conf.make_master_url(), "/api/v1/experiments/{}/cancel".format(experiment_id))
r.raise_for_status()
wait_for_experiment_state(experiment_id, "CANCELED")
def wait_for_experiment_state(
experiment_id: int,
target_state: str,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
log_every: int = 60,
) -> None:
for seconds_waited in range(max_wait_secs):
try:
state = experiment_state(experiment_id)
# Ignore network errors while polling for experiment state to avoid a
# single network flake to cause a test suite failure. If the master is
# unreachable multiple times, this test will fail after max_wait_secs.
except api.errors.MasterNotFoundException:
logging.warning(
"Network failure ignored when polling for state of "
"experiment {}".format(experiment_id)
)
time.sleep(1)
continue
if state == target_state:
return
if is_terminal_state(state):
if state != target_state:
report_failed_experiment(experiment_id)
pytest.fail(
f"Experiment {experiment_id} terminated in {state} state, expected {target_state}"
)
if seconds_waited > 0 and seconds_waited % log_every == 0:
print(
f"Waited {seconds_waited} seconds for experiment {experiment_id} "
f"(currently {state}) to reach {target_state}"
)
time.sleep(1)
else:
if target_state == "COMPLETED":
cancel_experiment(experiment_id)
report_failed_experiment(experiment_id)
pytest.fail(
"Experiment did not reach target state {} after {} seconds".format(
target_state, max_wait_secs
)
)
def experiment_has_active_workload(experiment_id: int) -> bool:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "tasks").json()
for task in r.values():
if "Experiment {}".format(experiment_id) in task["name"] and len(task["containers"]) > 0:
return True
return False
def wait_for_experiment_active_workload(
experiment_id: int, max_ticks: int = conf.MAX_TASK_SCHEDULED_SECS
) -> None:
for _ in range(conf.MAX_TASK_SCHEDULED_SECS):
if experiment_has_active_workload(experiment_id):
return
time.sleep(1)
pytest.fail(
f"The only trial cannot be scheduled within {max_ticks} seconds.",
)
def wait_for_experiment_workload_progress(
experiment_id: int, max_ticks: int = conf.MAX_TRIAL_BUILD_SECS
) -> None:
for _ in range(conf.MAX_TRIAL_BUILD_SECS):
trials = experiment_trials(experiment_id)
if len(trials) > 0:
only_trial = trials[0]
if len(only_trial["steps"]) > 1:
return
time.sleep(1)
pytest.fail(
f"Trial cannot finish first workload within {max_ticks} seconds.",
)
def experiment_has_completed_workload(experiment_id: int) -> bool:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
trials = experiment_trials(experiment_id)
if not any(trials):
return False
return any(any(s["state"] == "COMPLETED" for s in t["steps"]) for t in trials)
def experiment_json(experiment_id: int) -> Dict[str, Any]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "experiments/{}".format(experiment_id))
assert r.status_code == requests.codes.ok, r.text
json = r.json() # type: Dict[str, Any]
return json
def experiment_state(experiment_id: int) -> str:
state = experiment_json(experiment_id)["state"] # type: str
return state
def experiment_trials(experiment_id: int) -> List[Dict[str, Any]]:
trials = experiment_json(experiment_id)["trials"] # type: List[Dict[str, Any]]
return trials
def num_experiments() -> int:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "experiments")
assert r.status_code == requests.codes.ok, r.text
return len(r.json())
def cancel_single(experiment_id: int, should_have_trial: bool = False) -> None:
cancel_experiment(experiment_id)
trials = experiment_trials(experiment_id)
if should_have_trial or len(trials) > 0:
assert len(trials) == 1
trial = trials[0]
assert trial["state"] == "CANCELED"
def cancel_single_v1(experiment_id: int, should_have_trial: bool = False) -> None:
cancel_experiment_v1(experiment_id)
trials = experiment_trials(experiment_id)
if should_have_trial or len(trials) > 0:
assert len(trials) == 1
trial = trials[0]
assert trial["state"] == "CANCELED"
def is_terminal_state(state: str) -> bool:
return state in ("CANCELED", "COMPLETED", "ERROR")
def trial_metrics(trial_id: int) -> Dict[str, Any]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "trials/{}/metrics".format(trial_id))
assert r.status_code == requests.codes.ok, r.text
json = r.json() # type: Dict[str, Any]
return json
def get_flat_metrics(trial_id: int, metric: str) -> List:
full_trial_metrics = trial_metrics(trial_id)
metrics = [m for step in full_trial_metrics["steps"] for m in step["metrics"]["batch_metrics"]]
return [v[metric] for v in metrics]
def num_trials(experiment_id: int) -> int:
return len(experiment_trials(experiment_id))
def num_active_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "ACTIVE" else 0 for t in experiment_trials(experiment_id))
def num_completed_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "COMPLETED" else 0 for t in experiment_trials(experiment_id))
def num_error_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "ERROR" else 0 for t in experiment_trials(experiment_id))
def trial_logs(trial_id: int) -> List[str]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
return [tl["message"] for tl in api.trial_logs(conf.make_master_url(), trial_id)]
def check_if_string_present_in_trial_logs(trial_id: int, target_string: str) -> bool:
logs = trial_logs(trial_id)
for log_line in logs:
if target_string in log_line:
return True
return False
def assert_equivalent_trials(A: int, B: int, validation_metrics: List[str]) -> None:
full_trial_metrics1 = trial_metrics(A)
full_trial_metrics2 = trial_metrics(B)
assert len(full_trial_metrics1["steps"]) == len(full_trial_metrics2["steps"])
for step1, step2 in zip(full_trial_metrics1["steps"], full_trial_metrics2["steps"]):
metric1 = step1["metrics"]["batch_metrics"]
metric2 = step2["metrics"]["batch_metrics"]
for batch1, batch2 in zip(metric1, metric2):
assert len(batch1) == len(batch2) == 2
assert batch1["loss"] == pytest.approx(batch2["loss"])
if step1["validation"] is not None or step2["validation"] is not None:
assert step1["validation"] is not None
assert step2["validation"] is not None
for metric in validation_metrics:
val1 = step1.get("validation").get("metrics").get("validation_metrics").get(metric)
val2 = step2.get("validation").get("metrics").get("validation_metrics").get(metric)
assert val1 == pytest.approx(val2)
def assert_performed_initial_validation(exp_id: int) -> None:
trials = experiment_trials(exp_id)
assert len(trials) > 0
steps = trials[0]["steps"]
assert len(steps) > 0
zeroth_step = steps[0]
assert zeroth_step["validation"] is not None
assert zeroth_step["validation"]["total_batches"] == 0
assert zeroth_step["validation"]["state"] == "COMPLETED"
def assert_performed_final_checkpoint(exp_id: int) -> None:
trials = experiment_trials(exp_id)
assert len(trials) > 0
steps = trials[0]["steps"]
assert len(steps) > 0
last_step = steps[-1]
assert last_step["checkpoint"] is not None
assert last_step["checkpoint"]["state"] == "COMPLETED"
def run_describe_cli_tests(experiment_id: int) -> None:
"""
Runs `det experiment describe` CLI command on a finished
experiment. Will raise an exception if `det experiment describe`
encounters a traceback failure.
"""
# "det experiment describe" without metrics.
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
str(experiment_id),
"--outdir",
tmpdir,
]
)
assert os.path.exists(os.path.join(tmpdir, "experiments.csv"))
assert os.path.exists(os.path.join(tmpdir, "workloads.csv"))
assert os.path.exists(os.path.join(tmpdir, "trials.csv"))
# "det experiment describe" with metrics.
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
str(experiment_id),
"--metrics",
"--outdir",
tmpdir,
]
)
assert os.path.exists(os.path.join(tmpdir, "experiments.csv"))
assert os.path.exists(os.path.join(tmpdir, "workloads.csv"))
assert os.path.exists(os.path.join(tmpdir, "trials.csv"))
def run_list_cli_tests(experiment_id: int) -> None:
"""
Runs list-related CLI commands on a finished experiment. Will raise an
exception if the CLI command encounters a traceback failure.
"""
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-trials", str(experiment_id)]
)
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-checkpoints", str(experiment_id)]
)
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"list-checkpoints",
"--best",
str(1),
str(experiment_id),
]
)
def report_failed_experiment(experiment_id: int) -> None:
trials = experiment_trials(experiment_id)
active = sum(1 for t in trials if t["state"] == "ACTIVE")
paused = sum(1 for t in trials if t["state"] == "PAUSED")
stopping_completed = sum(1 for t in trials if t["state"] == "STOPPING_COMPLETED")
stopping_canceled = sum(1 for t in trials if t["state"] == "STOPPING_CANCELED")
stopping_error = sum(1 for t in trials if t["state"] == "STOPPING_ERROR")
completed = sum(1 for t in trials if t["state"] == "COMPLETED")
canceled = sum(1 for t in trials if t["state"] == "CANCELED")
errored = sum(1 for t in trials if t["state"] == "ERROR")
stopping_killed = sum(1 for t in trials if t["state"] == "STOPPING_KILLED")
print(
f"Experiment {experiment_id}: {len(trials)} trials, {completed} completed, "
f"{active} active, {paused} paused, {stopping_completed} stopping-completed, "
f"{stopping_canceled} stopping-canceled, {stopping_error} stopping-error, "
f"{stopping_killed} stopping-killed, {canceled} canceled, {errored} errored",
file=sys.stderr,
)
for trial in trials:
print_trial_logs(trial["id"])
def report_failed_trial(trial_id: int, state: str) -> None:
print(f"Trial {trial_id} was not COMPLETED but {state}", file=sys.stderr)
print_trial_logs(trial_id)
def print_trial_logs(trial_id: int) -> None:
print("******** Start of logs for trial {} ********".format(trial_id), file=sys.stderr)
print("".join(trial_logs(trial_id)), file=sys.stderr)
print("******** End of logs for trial {} ********".format(trial_id), file=sys.stderr)
def run_basic_test(
config_file: str,
model_def_file: str,
expected_trials: Optional[int],
create_args: Optional[List[str]] = None,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
) -> int:
assert os.path.isdir(model_def_file)
experiment_id = create_experiment(config_file, model_def_file, create_args)
wait_for_experiment_state(experiment_id, "COMPLETED", max_wait_secs=max_wait_secs)
assert num_active_trials(experiment_id) == 0
verify_completed_experiment_metadata(experiment_id, expected_trials)
return experiment_id
def verify_completed_experiment_metadata(
experiment_id: int, num_expected_trials: Optional[int]
) -> None:
# If `expected_trials` is None, the expected number of trials is
# non-deterministic.
if num_expected_trials is not None:
assert num_trials(experiment_id) == num_expected_trials
assert num_completed_trials(experiment_id) == num_expected_trials
# Check that every trial and step is COMPLETED.
trials = experiment_trials(experiment_id)
assert len(trials) > 0
for trial in trials:
if trial["state"] != "COMPLETED":
report_failed_trial(trial["id"], trial["state"])
pytest.fail(f"Trial {trial['id']} was not COMPLETED but {trial['state']}")
assert len(trial["steps"]) > 0
# Check that batches appear in increasing order.
batch_ids = [s["total_batches"] for s in trial["steps"]]
assert all(x <= y for x, y in zip(batch_ids, batch_ids[1:]))
for step in trial["steps"]:
assert step["state"] == "COMPLETED"
if step["validation"]:
validation = step["validation"]
assert validation["state"] == "COMPLETED"
if step["checkpoint"]:
checkpoint = step["checkpoint"]
assert checkpoint["state"] in {"COMPLETED", "DELETED"}
# The last step of every trial should have a checkpoint.
for trial in trials:
last_step = trial["steps"][-1]
assert last_step["checkpoint"]
# When the experiment completes, all slots should now be free. This
# requires terminating the experiment's last container, which might
# take some time.
max_secs_to_free_slots = 30
for _ in range(max_secs_to_free_slots):
if cluster_utils.num_free_slots() == cluster_utils.num_slots():
break
time.sleep(1)
else:
raise AssertionError("Slots failed to free after experiment {}".format(experiment_id))
# Run a series of CLI tests on the finished experiment, to sanity check
# that basic CLI commands don't raise errors.
run_describe_cli_tests(experiment_id)
run_list_cli_tests(experiment_id)
# Use Determined to run an experiment that we expect to fail.
def run_failure_test(
config_file: str, model_def_file: str, error_str: Optional[str] = None
) -> None:
experiment_id = create_experiment(config_file, model_def_file)
wait_for_experiment_state(experiment_id, "ERROR")
# The searcher is configured with a `max_trials` of 8. Since the
# first step of each trial results in an error, there should be no
# completed trials.
#
# Most of the trials should result in ERROR, but depending on that
# seems fragile: if we support task preemption in the future, we
# might start a trial but cancel it before we hit the error in the
# model definition.
assert num_active_trials(experiment_id) == 0
assert num_completed_trials(experiment_id) == 0
assert num_error_trials(experiment_id) >= 1
# For each failed trial, check for the expected error in the logs.
trials = experiment_trials(experiment_id)
for t in trials:
if t["state"] != "ERROR":
continue
trial_id = t["id"]
logs = trial_logs(trial_id)
if error_str is not None:
assert any(error_str in line for line in logs)
def get_validation_metric_from_last_step(
experiment_id: int, trial_id: int, validation_metric_name: str
) -> float:
trial = experiment_trials(experiment_id)[trial_id]
last_validation = trial["steps"][len(trial["steps"]) - 1]["validation"]
return last_validation["metrics"]["validation_metrics"][validation_metric_name] # type: ignore
class ExperimentDurations:
def __init__(
self,
experiment_duration: datetime.timedelta,
training_duration: datetime.timedelta,
validation_duration: datetime.timedelta,
checkpoint_duration: datetime.timedelta,
):
self.experiment_duration = experiment_duration
self.training_duration = training_duration
self.validation_duration = validation_duration
self.checkpoint_duration = checkpoint_duration
def __str__(self) -> str:
duration_strs = []
duration_strs.append(f"experiment duration: {self.experiment_duration}")
duration_strs.append(f"training duration: {self.training_duration}")
duration_strs.append(f"validation duration: {self.validation_duration}")
duration_strs.append(f"checkpoint duration: {self.checkpoint_duration}")
return "\n".join(duration_strs)
def get_experiment_durations(experiment_id: int, trial_idx: int) -> ExperimentDurations:
experiment_metadata = experiment_json(experiment_id)
end_time = dateutil.parser.parse(experiment_metadata["end_time"])
start_time = dateutil.parser.parse(experiment_metadata["start_time"])
experiment_duration = end_time - start_time
training_duration = datetime.timedelta(seconds=0)
validation_duration = datetime.timedelta(seconds=0)
checkpoint_duration = datetime.timedelta(seconds=0)
for step in experiment_metadata["trials"][trial_idx]["steps"]:
end_time = dateutil.parser.parse(step["end_time"])
start_time = dateutil.parser.parse(step["start_time"])
training_duration += end_time - start_time
if "validation" in step and step["validation"]:
end_time = dateutil.parser.parse(step["validation"]["end_time"])
start_time = dateutil.parser.parse(step["validation"]["start_time"])
validation_duration += end_time - start_time
if "checkpoint" in step and step["checkpoint"]:
end_time = dateutil.parser.parse(step["checkpoint"]["end_time"])
start_time = dateutil.parser.parse(step["checkpoint"]["start_time"])
checkpoint_duration += end_time - start_time
return ExperimentDurations(
experiment_duration, training_duration, validation_duration, checkpoint_duration
)
def run_basic_test_with_temp_config(
config: Dict[Any, Any],
model_def_path: str,
expected_trials: Optional[int],
create_args: Optional[List[str]] = None,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
) -> int:
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
experiment_id = run_basic_test(
tf.name,
model_def_path,
expected_trials,
create_args,
max_wait_secs=max_wait_secs,
)
return experiment_id
def run_failure_test_with_temp_config(
config: Dict[Any, Any],
model_def_path: str,
error_str: Optional[str] = None,
) -> None:
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
run_failure_test(tf.name, model_def_path, error_str=error_str)
def shared_fs_checkpoint_config() -> Dict[str, str]:
return {
"type": "shared_fs",
"host_path": "/tmp",
"storage_path": "determined-integration-checkpoints",
}
def s3_checkpoint_config(secrets: Dict[str, str], prefix: Optional[str] = None) -> Dict[str, str]:
config_dict = {
"type": "s3",
"access_key": secrets["INTEGRATIONS_S3_ACCESS_KEY"],
"secret_key": secrets["INTEGRATIONS_S3_SECRET_KEY"],
"bucket": secrets["INTEGRATIONS_S3_BUCKET"],
}
if prefix is not None:
config_dict["prefix"] = prefix
return config_dict
def s3_checkpoint_config_no_creds() -> Dict[str, str]:
return {"type": "s3", "bucket": "determined-ai-examples"}
def root_user_home_bind_mount() -> Dict[str, str]:
return {"host_path": "/tmp", "container_path": "/root"}
def _export_and_load_model(experiment_id: int, master_url: str) -> None:
experimental.Determined(master_url).get_experiment(experiment_id).top_checkpoint().load()
def export_and_load_model(experiment_id: int) -> None:
# We run this in a subprocess to avoid module name collisions
# when performing checkpoint export of different models.
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(
target=_export_and_load_model,
args=(
experiment_id,
conf.make_master_url(),
),
)
p.start()
p.join()
assert p.exitcode == 0, p.exitcode
|
import discord
from discord.ext import commands
import asyncio
import wolframalpha
from aiohttp import ClientSession
from html2text import html2text
from random import choice, randint
from re import sub
#setup wolframalpha API
client = wolframalpha.Client(open('WA_KEY').readline().rstrip())
class Api(commands.Cog):
"""Get random cute pics"""
def __init__(self, bot):
self.bot = bot
self.colours = [0x1abc9c, 0x11806a, 0x2ecc71, 0x1f8b4c, 0x3498db, 0x206694, 0x9b59b6, 0x71368a, 0xe91e63, 0xad1457, 0xf1c40f, 0xc27c0e, 0xa84300, 0xe74c3c, 0x992d22, 0x95a5a6, 0x607d8b, 0x979c9f, 0x546e7a]
@commands.command(name='ask',
description="replies to a query with the short text answer of the wolfram alpha API",
brief="wolfram alpha API")
async def ask(self, ctx, *, query):
res = client.query(query)
if res['@success'] == 'false':
strRes = "Couldn't find an answer"
else:
strRes = next(res.results).text
embed = discord.Embed(
title=query,
description=strRes,
color=self.bot.embed_color)
await ctx.send(embed=embed)
@commands.command(name='dog',
description="send random dog picture",
brief="send dog pic",
aliases=['auau'])
async def dog(self, ctx):
while True:
result, error = await get_json('https://random.dog/woof.json')
if error:
await ctx.send(error)
return
if result['url'].endswith('.mp4'):
pass
else:
embed = discord.Embed(color=choice(self.colours))
embed.set_image(url=result['url'])
await ctx.send(embed=embed)
return
@commands.command(name='cat',
description="send random cat picture",
brief="send cat pic",
aliases=['antiauau', 'miau'])
async def cat(self, ctx):
result, error = await get_json('http://aws.random.cat/meow')
if error:
await ctx.send(error)
return
embed = discord.Embed(color=choice(self.colours))
embed.set_image(url=result['file'])
await ctx.send(embed=embed)
@commands.command(name='xkcd',
brief="send xkcd comic")
async def xkcd(self, ctx, args = None):
"""
send xkcd comic
*xkcd -> sends newest comic
*xkcd random -> sends random comic
*xkcd [number] -> sends a specific comic
"""
url = None
if not args:
url = 'http://xkcd.com/info.0.json'
elif args.isdigit():
url = f'http://xkcd.com/{int(args)}/info.0.json'
elif args.lower() == 'random':
result, error = await get_json('http://xkcd.com/info.0.json')
if error:
await ctx.send(error)
return
number = randint(0, result['num'])
url = f'http://xkcd.com/{number}/info.0.json'
result, error = await get_json(url)
if error:
await ctx.send(error)
return
embed = discord.Embed(color=choice(self.colours))
embed.set_image(url=result['img'])
await ctx.send(embed=embed)
@commands.command(name='lmgtfy',
description="give link for let me google that for you",
brief="let me google that for you")
async def lmgtfy(self, ctx, *query):
await ctx.send(f"http://lmgtfy.com/?q={"+".join(query)}")
@commands.command(name='lmddgtfy',
description="give link for let me duck duck go that for you",
brief="let me duck duck go that for you")
async def lmddgtfy(self, ctx, *query):
await ctx.send(f"http://lmddgtfy.net/?q={"%20".join(query)}")
@commands.command(name='urban',
description="Get a urban defenition of a query",
brief="search urban")
async def urban(self, ctx, * query : str):
url = f"http://api.urbandictionary.com/v0/define?term={"+".join(query)}"
result, error = await get_json(url)
if error:
await ctx.send(error)
return
if result["list"]:
top_def = result['list'][0]
embed = discord.Embed(
title=f"Definition of {top_def["word"]}",
url=top_def['permalink'],
description=top_def['definition'],
color=self.bot.embed_color)
embed.set_thumbnail(
url = "http://campbelllawobserver.com/wp-content/uploads/2014/03/Urban-Dictionary-e1372286057646.png")
embed.add_field(
name="Example",
value=top_def['example'],
inline=False)
embed.add_field(
name=":thumbsup:",
value=top_def['thumbs_up'],
inline=True)
embed.add_field(
name=":thumbsdown:",
value=top_def['thumbs_down'],
inline=True)
embed.set_footer(text=f"Submited by {top_def["author"]}")
await ctx.send(embed =embed)
else:
await ctx.send("Your query gave no results.")
@commands.command(name='hoogle',
brief="search hoogle")
async def hoogle(self, ctx, * query : str):
"""Searches Hoggle and returns first two options
Click title to see full search"""
url = f"https://hoogle.haskell.org?mode=json&hoogle={"+".join(query)}&start=1&count=1"
result, error = await get_json(url)
if error:
await ctx.send(error)
return
embed = discord.Embed(
title=f"Definition of {" ".join(query)}",
url=f"https://hoogle.haskell.org/?hoogle={"+".join(query)}",
color=self.bot.embed_color)
embed.set_thumbnail(
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Lambda-letter-lowercase-symbol-Garamond.svg/1200px-Lambda-letter-lowercase-symbol-Garamond.svg.png")
if not result:
embed.add_field(
name = "No results found",
value="*undefined*",
inline=False)
else:
for l in result:
val = "*Module:* " + l["module"]["name"] + "\n"
val+= sub(r'\n{2,}', '\n\n', sub(r"\n +", "\n" , html2text(l["docs"])))
embed.add_field(
name= html2text(l["item"]),
value= val,
inline=False)
embed.set_footer(text="first option in Hoogle (Click title for more)")
await ctx.send(embed=embed)
async def get_json(url):
try:
async with ClientSession() as session:
async with session.get(url) as response:
result = await response.json()
return result, None
except:
return None, "Something unexpected went wrong."
def setup(bot):
bot.add_cog(Api(bot))
| import discord
from discord.ext import commands
import asyncio
import wolframalpha
from aiohttp import ClientSession
from html2text import html2text
from random import choice, randint
from re import sub
#setup wolframalpha API
client = wolframalpha.Client(open('WA_KEY').readline().rstrip())
class Api(commands.Cog):
"""Get random cute pics"""
def __init__(self, bot):
self.bot = bot
self.colours = [0x1abc9c, 0x11806a, 0x2ecc71, 0x1f8b4c, 0x3498db, 0x206694, 0x9b59b6, 0x71368a, 0xe91e63, 0xad1457, 0xf1c40f, 0xc27c0e, 0xa84300, 0xe74c3c, 0x992d22, 0x95a5a6, 0x607d8b, 0x979c9f, 0x546e7a]
@commands.command(name='ask',
description="replies to a query with the short text answer of the wolfram alpha API",
brief="wolfram alpha API")
async def ask(self, ctx, *, query):
res = client.query(query)
if res['@success'] == 'false':
strRes = "Couldn't find an answer"
else:
strRes = next(res.results).text
embed = discord.Embed(
title=query,
description=strRes,
color=self.bot.embed_color)
await ctx.send(embed=embed)
@commands.command(name='dog',
description="send random dog picture",
brief="send dog pic",
aliases=['auau'])
async def dog(self, ctx):
while True:
result, error = await get_json('https://random.dog/woof.json')
if error:
await ctx.send(error)
return
if result['url'].endswith('.mp4'):
pass
else:
embed = discord.Embed(color=choice(self.colours))
embed.set_image(url=result['url'])
await ctx.send(embed=embed)
return
@commands.command(name='cat',
description="send random cat picture",
brief="send cat pic",
aliases=['antiauau', 'miau'])
async def cat(self, ctx):
result, error = await get_json('http://aws.random.cat/meow')
if error:
await ctx.send(error)
return
embed = discord.Embed(color=choice(self.colours))
embed.set_image(url=result['file'])
await ctx.send(embed=embed)
@commands.command(name='xkcd',
brief="send xkcd comic")
async def xkcd(self, ctx, args = None):
"""
send xkcd comic
*xkcd -> sends newest comic
*xkcd random -> sends random comic
*xkcd [number] -> sends a specific comic
"""
url = None
if not args:
url = 'http://xkcd.com/info.0.json'
elif args.isdigit():
url = f'http://xkcd.com/{int(args)}/info.0.json'
elif args.lower() == 'random':
result, error = await get_json('http://xkcd.com/info.0.json')
if error:
await ctx.send(error)
return
number = randint(0, result['num'])
url = f'http://xkcd.com/{number}/info.0.json'
result, error = await get_json(url)
if error:
await ctx.send(error)
return
embed = discord.Embed(color=choice(self.colours))
embed.set_image(url=result['img'])
await ctx.send(embed=embed)
@commands.command(name='lmgtfy',
description="give link for let me google that for you",
brief="let me google that for you")
async def lmgtfy(self, ctx, *query):
await ctx.send(f"http://lmgtfy.com/?q={'+'.join(query)}")
@commands.command(name='lmddgtfy',
description="give link for let me duck duck go that for you",
brief="let me duck duck go that for you")
async def lmddgtfy(self, ctx, *query):
await ctx.send(f"http://lmddgtfy.net/?q={'%20'.join(query)}")
@commands.command(name='urban',
description="Get a urban defenition of a query",
brief="search urban")
async def urban(self, ctx, * query : str):
url = f"http://api.urbandictionary.com/v0/define?term={'+'.join(query)}"
result, error = await get_json(url)
if error:
await ctx.send(error)
return
if result["list"]:
top_def = result['list'][0]
embed = discord.Embed(
title=f"Definition of {top_def['word']}",
url=top_def['permalink'],
description=top_def['definition'],
color=self.bot.embed_color)
embed.set_thumbnail(
url = "http://campbelllawobserver.com/wp-content/uploads/2014/03/Urban-Dictionary-e1372286057646.png")
embed.add_field(
name="Example",
value=top_def['example'],
inline=False)
embed.add_field(
name=":thumbsup:",
value=top_def['thumbs_up'],
inline=True)
embed.add_field(
name=":thumbsdown:",
value=top_def['thumbs_down'],
inline=True)
embed.set_footer(text=f"Submited by {top_def['author']}")
await ctx.send(embed =embed)
else:
await ctx.send("Your query gave no results.")
@commands.command(name='hoogle',
brief="search hoogle")
async def hoogle(self, ctx, * query : str):
"""Searches Hoggle and returns first two options
Click title to see full search"""
url = f"https://hoogle.haskell.org?mode=json&hoogle={'+'.join(query)}&start=1&count=1"
result, error = await get_json(url)
if error:
await ctx.send(error)
return
embed = discord.Embed(
title=f"Definition of {' '.join(query)}",
url=f"https://hoogle.haskell.org/?hoogle={'+'.join(query)}",
color=self.bot.embed_color)
embed.set_thumbnail(
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Lambda-letter-lowercase-symbol-Garamond.svg/1200px-Lambda-letter-lowercase-symbol-Garamond.svg.png")
if not result:
embed.add_field(
name = "No results found",
value="*undefined*",
inline=False)
else:
for l in result:
val = "*Module:* " + l["module"]["name"] + "\n"
val+= sub(r'\n{2,}', '\n\n', sub(r"\n +", "\n" , html2text(l["docs"])))
embed.add_field(
name= html2text(l["item"]),
value= val,
inline=False)
embed.set_footer(text="first option in Hoogle (Click title for more)")
await ctx.send(embed=embed)
async def get_json(url):
try:
async with ClientSession() as session:
async with session.get(url) as response:
result = await response.json()
return result, None
except:
return None, "Something unexpected went wrong."
def setup(bot):
bot.add_cog(Api(bot))
|
# specifically use concurrent.futures for threadsafety
# asyncio Futures cannot be used across threads
import asyncio
import json
import time
from functools import partial
from kubernetes_asyncio import watch
from traitlets import Any
from traitlets import Bool
from traitlets import Dict
from traitlets import Int
from traitlets import Unicode
from traitlets.config import LoggingConfigurable
from urllib3.exceptions import ReadTimeoutError
from .clients import shared_client
# This is kubernetes client implementation specific, but we need to know
# whether it was a network or watch timeout.
class ResourceReflector(LoggingConfigurable):
"""Base class for keeping a local up-to-date copy of a set of
kubernetes resources.
Must be subclassed once per kind of resource that needs watching.
Creating a reflector should be done with the create() classmethod,
since that, in addition to creating the instance starts the watch task.
Shutting down a reflector should be done by awaiting its stop() method.
KubeSpawner does not do this, because its reflectors are singleton
instances shared among multiple spawners. The watch task therefore runs
until JupyterHub exits.
"""
labels = Dict(
{},
config=True,
help="""
Labels to reflect onto local cache
""",
)
fields = Dict(
{},
config=True,
help="""
Fields to restrict the reflected objects
""",
)
resources = Dict(
{},
help="""
Dictionary of resource names to the appropriate resource objects.
This can be accessed across threads safely.
""",
)
kind = Unicode(
'resource',
help="""
Human readable name for kind of object we're watching for.
Used for diagnostic messages.
""",
)
omit_namespace = Bool(
False,
config=True,
help="""
Set this to true if the reflector is to operate across
multiple namespaces.
""",
)
namespace = Unicode(
None,
allow_none=True,
help="""
Namespace to watch for resources in; leave at 'None' for
multi-namespace reflectors.
""",
)
list_method_name = Unicode(
"",
help="""
Name of function (on apigroup respresented by
`api_group_name`) that is to be called to list resources.
This will be passed a a label selector.
If self.omit_namespace is False you want something of the form
list_namespaced_<resource> - for example,
`list_namespaced_pod` will give you a PodReflector. It will
take its namespace from self.namespace (which therefore should
not be None).
If self.omit_namespace is True, you want
list_<resource>_for_all_namespaces.
This must be set by a subclass.
It is not necessary to set it for pod or event reflectors, because
__init__ will figure it out. If you create your own reflector
subclass you probably want to add the logic to choose the method
name to that class's __init__().
""",
)
api_group_name = Unicode(
'CoreV1Api',
help="""
Name of class that represents the apigroup on which
`list_method_name` is to be found.
Defaults to CoreV1Api, which has everything in the 'core' API group. If you want to watch Ingresses,
for example, you would have to use ExtensionsV1beta1Api
""",
)
request_timeout = Int(
60,
config=True,
help="""
Network timeout for kubernetes watch.
Trigger watch reconnect when a given request is taking too long,
which can indicate network issues.
""",
)
timeout_seconds = Int(
10,
config=True,
help="""
Timeout for kubernetes watch.
Trigger watch reconnect when no watch event has been received.
This will cause a full reload of the currently existing resources
from the API server.
""",
)
restart_seconds = Int(
30,
config=True,
help="""
Maximum time before restarting a watch.
The watch will be restarted at least this often,
even if events are still arriving.
Avoids trusting kubernetes watch to yield all events,
which seems to not be a safe assumption.
""",
)
on_failure = Any(help="""Function to be called when the reflector gives up.""")
_stopping = Bool(False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Client configuration for kubernetes, as done via the load_config
# function, has already taken place in KubeSpawner or KubeIngressProxy
# initialization steps.
self.api = shared_client(self.api_group_name)
# FIXME: Protect against malicious labels?
self.label_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.labels.items()]
)
self.field_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.fields.items()]
)
self.first_load_future = asyncio.Future()
# Make sure that we know kind, whether we should omit the
# namespace, and what our list_method_name is. For the things
# we already know about, we can derive list_method_name from
# those two things. New reflector types should also update
# their __init__() methods to derive list_method_name, but you
# could just set it directly in the subclass.
if not self.list_method_name:
plural_to_singular = {
"endpoints": "endpoints",
"events": "event",
"ingresses": "ingress",
"pods": "pod",
"services": "service",
}
if self.kind in plural_to_singular:
if self.omit_namespace:
self.list_method_name = (
f"list_{plural_to_singular[self.kind]}_for_all_namespaces"
)
else:
self.list_method_name = (
f"list_namespaced_{plural_to_singular[self.kind]}"
)
# Make sure we have the required values.
if not self.kind:
raise RuntimeError("Reflector kind must be set!")
if not self.list_method_name:
raise RuntimeError("Reflector list_method_name must be set!")
self.watch_task = None
async def _list_and_update(self):
"""
Update current list of resources by doing a full fetch.
Overwrites all current resource info.
"""
initial_resources = None
kwargs = dict(
label_selector=self.label_selector,
field_selector=self.field_selector,
_request_timeout=self.request_timeout,
_preload_content=False,
)
if not self.omit_namespace:
kwargs["namespace"] = self.namespace
list_method = getattr(self.api, self.list_method_name)
initial_resources_raw = await list_method(**kwargs)
# This is an atomic operation on the dictionary!
initial_resources = json.loads(await initial_resources_raw.read())
self.resources = {
f'{p['metadata']['namespace']}/{p['metadata']['name']}': p
for p in initial_resources["items"]
}
if not self.first_load_future.done():
# signal that we've loaded our initial data at least once
self.first_load_future.set_result(None)
# return the resource version so we can hook up a watch
return initial_resources["metadata"]["resourceVersion"]
async def _watch_and_update(self):
"""
Keeps the current list of resources up-to-date
We first fetch the list of current resources, and store that. Then we
register to be notified of changes to those resources, and keep our
local store up-to-date based on these notifications.
We also perform exponential backoff, giving up after we hit 32s
wait time. This should protect against network connections dropping
and intermittent unavailability of the api-server. Every time we
recover from an exception we also do a full fetch, to pick up
changes that might've been missed in the time we were not doing
a watch.
Since the resources are read-only in the Spawner (where they are
used), then this is safe. The Spawner's view of the world might be
out-of-date, but it's not going to corrupt any data.
"""
selectors = []
if self.label_selector:
selectors.append("label selector=%r" % self.label_selector)
if self.field_selector:
selectors.append("field selector=%r" % self.field_selector)
log_selector = ', '.join(selectors)
cur_delay = 0.1
if self.omit_namespace:
ns_str = "all namespaces"
else:
ns_str = "namespace {}".format(self.namespace)
self.log.info(
"watching for %s with %s in %s",
self.kind,
log_selector,
ns_str,
)
while True:
self.log.debug("Connecting %s watcher", self.kind)
start = time.monotonic()
w = watch.Watch()
try:
resource_version = await self._list_and_update()
watch_args = {
"label_selector": self.label_selector,
"field_selector": self.field_selector,
"resource_version": resource_version,
}
if not self.omit_namespace:
watch_args["namespace"] = self.namespace
if self.request_timeout:
# set network receive timeout
watch_args['_request_timeout'] = self.request_timeout
if self.timeout_seconds:
# set watch timeout
watch_args['timeout_seconds'] = self.timeout_seconds
# Calling the method with _preload_content=False is a performance
# optimization making the Kubernetes client do less work. See
# https://github.com/jupyterhub/kubespawner/pull/424.
method = partial(
getattr(self.api, self.list_method_name), _preload_content=False
)
async with w.stream(method, **watch_args) as stream:
async for watch_event in stream:
# in case of timeout_seconds, the w.stream just exits (no exception thrown)
# -> we stop the watcher and start a new one
# Remember that these events are k8s api related WatchEvents
# objects, not k8s Event or Pod representations, they will
# reside in the WatchEvent's object field depending on what
# kind of resource is watched.
#
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#watchevent-v1-meta
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#event-v1-core
cur_delay = 0.1
resource = watch_event['raw_object']
ref_key = "{}/{}".format(
resource["metadata"]["namespace"],
resource["metadata"]["name"],
)
if watch_event['type'] == 'DELETED':
# This is an atomic delete operation on the dictionary!
self.resources.pop(ref_key, None)
else:
# This is an atomic operation on the dictionary!
self.resources[ref_key] = resource
if self._stopping:
self.log.info("%s watcher stopped: inner", self.kind)
break
watch_duration = time.monotonic() - start
if watch_duration >= self.restart_seconds:
self.log.debug(
"Restarting %s watcher after %i seconds",
self.kind,
watch_duration,
)
break
except ReadTimeoutError:
# network read time out, just continue and restart the watch
# this could be due to a network problem or just low activity
self.log.warning("Read timeout watching %s, reconnecting", self.kind)
continue
except asyncio.CancelledError:
self.log.debug("Cancelled watching %s", self.kind)
raise
except Exception:
cur_delay = cur_delay * 2
if cur_delay > 30:
self.log.exception("Watching resources never recovered, giving up")
if self.on_failure:
self.on_failure()
return
self.log.exception(
"Error when watching resources, retrying in %ss", cur_delay
)
await asyncio.sleep(cur_delay)
continue
else:
# no events on watch, reconnect
self.log.debug("%s watcher timeout", self.kind)
finally:
w.stop()
if self._stopping:
self.log.info("%s watcher stopped: outer", self.kind)
break
self.log.warning("%s watcher finished", self.kind)
async def start(self):
"""
Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards!
"""
if self.watch_task and not self.watch_task.done():
raise RuntimeError('Task watching for resources is already running')
await self._list_and_update()
self.watch_task = asyncio.create_task(self._watch_and_update())
async def stop(self):
"""
Cleanly shut down the watch task.
"""
self._stopping = True
if self.watch_task and not self.watch_task.done():
# cancel the task, wait for it to complete
self.watch_task.cancel()
try:
timeout = 5
await asyncio.wait_for(self.watch_task, timeout)
except asyncio.TimeoutError:
# Raising the TimeoutError will cancel the task.
self.log.warning(
f"Watch task did not finish in {timeout}s and was cancelled"
)
self.watch_task = None
class NamespacedResourceReflector(ResourceReflector):
"""
Watches for resources in a particular namespace. The list_methods
want both a method name and a namespace.
"""
omit_namespace = False
class MultiNamespaceResourceReflector(ResourceReflector):
"""
Watches for resources across all namespaces. The list_methods
want only a method name. Note that this requires the service account
to be significantly more powerful, since it must be bound to ClusterRoles
rather than just Roles, and therefore this is inherently more
dangerous.
"""
omit_namespace = True
| # specifically use concurrent.futures for threadsafety
# asyncio Futures cannot be used across threads
import asyncio
import json
import time
from functools import partial
from kubernetes_asyncio import watch
from traitlets import Any
from traitlets import Bool
from traitlets import Dict
from traitlets import Int
from traitlets import Unicode
from traitlets.config import LoggingConfigurable
from urllib3.exceptions import ReadTimeoutError
from .clients import shared_client
# This is kubernetes client implementation specific, but we need to know
# whether it was a network or watch timeout.
class ResourceReflector(LoggingConfigurable):
"""Base class for keeping a local up-to-date copy of a set of
kubernetes resources.
Must be subclassed once per kind of resource that needs watching.
Creating a reflector should be done with the create() classmethod,
since that, in addition to creating the instance starts the watch task.
Shutting down a reflector should be done by awaiting its stop() method.
KubeSpawner does not do this, because its reflectors are singleton
instances shared among multiple spawners. The watch task therefore runs
until JupyterHub exits.
"""
labels = Dict(
{},
config=True,
help="""
Labels to reflect onto local cache
""",
)
fields = Dict(
{},
config=True,
help="""
Fields to restrict the reflected objects
""",
)
resources = Dict(
{},
help="""
Dictionary of resource names to the appropriate resource objects.
This can be accessed across threads safely.
""",
)
kind = Unicode(
'resource',
help="""
Human readable name for kind of object we're watching for.
Used for diagnostic messages.
""",
)
omit_namespace = Bool(
False,
config=True,
help="""
Set this to true if the reflector is to operate across
multiple namespaces.
""",
)
namespace = Unicode(
None,
allow_none=True,
help="""
Namespace to watch for resources in; leave at 'None' for
multi-namespace reflectors.
""",
)
list_method_name = Unicode(
"",
help="""
Name of function (on apigroup respresented by
`api_group_name`) that is to be called to list resources.
This will be passed a a label selector.
If self.omit_namespace is False you want something of the form
list_namespaced_<resource> - for example,
`list_namespaced_pod` will give you a PodReflector. It will
take its namespace from self.namespace (which therefore should
not be None).
If self.omit_namespace is True, you want
list_<resource>_for_all_namespaces.
This must be set by a subclass.
It is not necessary to set it for pod or event reflectors, because
__init__ will figure it out. If you create your own reflector
subclass you probably want to add the logic to choose the method
name to that class's __init__().
""",
)
api_group_name = Unicode(
'CoreV1Api',
help="""
Name of class that represents the apigroup on which
`list_method_name` is to be found.
Defaults to CoreV1Api, which has everything in the 'core' API group. If you want to watch Ingresses,
for example, you would have to use ExtensionsV1beta1Api
""",
)
request_timeout = Int(
60,
config=True,
help="""
Network timeout for kubernetes watch.
Trigger watch reconnect when a given request is taking too long,
which can indicate network issues.
""",
)
timeout_seconds = Int(
10,
config=True,
help="""
Timeout for kubernetes watch.
Trigger watch reconnect when no watch event has been received.
This will cause a full reload of the currently existing resources
from the API server.
""",
)
restart_seconds = Int(
30,
config=True,
help="""
Maximum time before restarting a watch.
The watch will be restarted at least this often,
even if events are still arriving.
Avoids trusting kubernetes watch to yield all events,
which seems to not be a safe assumption.
""",
)
on_failure = Any(help="""Function to be called when the reflector gives up.""")
_stopping = Bool(False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Client configuration for kubernetes, as done via the load_config
# function, has already taken place in KubeSpawner or KubeIngressProxy
# initialization steps.
self.api = shared_client(self.api_group_name)
# FIXME: Protect against malicious labels?
self.label_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.labels.items()]
)
self.field_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.fields.items()]
)
self.first_load_future = asyncio.Future()
# Make sure that we know kind, whether we should omit the
# namespace, and what our list_method_name is. For the things
# we already know about, we can derive list_method_name from
# those two things. New reflector types should also update
# their __init__() methods to derive list_method_name, but you
# could just set it directly in the subclass.
if not self.list_method_name:
plural_to_singular = {
"endpoints": "endpoints",
"events": "event",
"ingresses": "ingress",
"pods": "pod",
"services": "service",
}
if self.kind in plural_to_singular:
if self.omit_namespace:
self.list_method_name = (
f"list_{plural_to_singular[self.kind]}_for_all_namespaces"
)
else:
self.list_method_name = (
f"list_namespaced_{plural_to_singular[self.kind]}"
)
# Make sure we have the required values.
if not self.kind:
raise RuntimeError("Reflector kind must be set!")
if not self.list_method_name:
raise RuntimeError("Reflector list_method_name must be set!")
self.watch_task = None
async def _list_and_update(self):
"""
Update current list of resources by doing a full fetch.
Overwrites all current resource info.
"""
initial_resources = None
kwargs = dict(
label_selector=self.label_selector,
field_selector=self.field_selector,
_request_timeout=self.request_timeout,
_preload_content=False,
)
if not self.omit_namespace:
kwargs["namespace"] = self.namespace
list_method = getattr(self.api, self.list_method_name)
initial_resources_raw = await list_method(**kwargs)
# This is an atomic operation on the dictionary!
initial_resources = json.loads(await initial_resources_raw.read())
self.resources = {
f'{p["metadata"]["namespace"]}/{p["metadata"]["name"]}': p
for p in initial_resources["items"]
}
if not self.first_load_future.done():
# signal that we've loaded our initial data at least once
self.first_load_future.set_result(None)
# return the resource version so we can hook up a watch
return initial_resources["metadata"]["resourceVersion"]
async def _watch_and_update(self):
"""
Keeps the current list of resources up-to-date
We first fetch the list of current resources, and store that. Then we
register to be notified of changes to those resources, and keep our
local store up-to-date based on these notifications.
We also perform exponential backoff, giving up after we hit 32s
wait time. This should protect against network connections dropping
and intermittent unavailability of the api-server. Every time we
recover from an exception we also do a full fetch, to pick up
changes that might've been missed in the time we were not doing
a watch.
Since the resources are read-only in the Spawner (where they are
used), then this is safe. The Spawner's view of the world might be
out-of-date, but it's not going to corrupt any data.
"""
selectors = []
if self.label_selector:
selectors.append("label selector=%r" % self.label_selector)
if self.field_selector:
selectors.append("field selector=%r" % self.field_selector)
log_selector = ', '.join(selectors)
cur_delay = 0.1
if self.omit_namespace:
ns_str = "all namespaces"
else:
ns_str = "namespace {}".format(self.namespace)
self.log.info(
"watching for %s with %s in %s",
self.kind,
log_selector,
ns_str,
)
while True:
self.log.debug("Connecting %s watcher", self.kind)
start = time.monotonic()
w = watch.Watch()
try:
resource_version = await self._list_and_update()
watch_args = {
"label_selector": self.label_selector,
"field_selector": self.field_selector,
"resource_version": resource_version,
}
if not self.omit_namespace:
watch_args["namespace"] = self.namespace
if self.request_timeout:
# set network receive timeout
watch_args['_request_timeout'] = self.request_timeout
if self.timeout_seconds:
# set watch timeout
watch_args['timeout_seconds'] = self.timeout_seconds
# Calling the method with _preload_content=False is a performance
# optimization making the Kubernetes client do less work. See
# https://github.com/jupyterhub/kubespawner/pull/424.
method = partial(
getattr(self.api, self.list_method_name), _preload_content=False
)
async with w.stream(method, **watch_args) as stream:
async for watch_event in stream:
# in case of timeout_seconds, the w.stream just exits (no exception thrown)
# -> we stop the watcher and start a new one
# Remember that these events are k8s api related WatchEvents
# objects, not k8s Event or Pod representations, they will
# reside in the WatchEvent's object field depending on what
# kind of resource is watched.
#
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#watchevent-v1-meta
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#event-v1-core
cur_delay = 0.1
resource = watch_event['raw_object']
ref_key = "{}/{}".format(
resource["metadata"]["namespace"],
resource["metadata"]["name"],
)
if watch_event['type'] == 'DELETED':
# This is an atomic delete operation on the dictionary!
self.resources.pop(ref_key, None)
else:
# This is an atomic operation on the dictionary!
self.resources[ref_key] = resource
if self._stopping:
self.log.info("%s watcher stopped: inner", self.kind)
break
watch_duration = time.monotonic() - start
if watch_duration >= self.restart_seconds:
self.log.debug(
"Restarting %s watcher after %i seconds",
self.kind,
watch_duration,
)
break
except ReadTimeoutError:
# network read time out, just continue and restart the watch
# this could be due to a network problem or just low activity
self.log.warning("Read timeout watching %s, reconnecting", self.kind)
continue
except asyncio.CancelledError:
self.log.debug("Cancelled watching %s", self.kind)
raise
except Exception:
cur_delay = cur_delay * 2
if cur_delay > 30:
self.log.exception("Watching resources never recovered, giving up")
if self.on_failure:
self.on_failure()
return
self.log.exception(
"Error when watching resources, retrying in %ss", cur_delay
)
await asyncio.sleep(cur_delay)
continue
else:
# no events on watch, reconnect
self.log.debug("%s watcher timeout", self.kind)
finally:
w.stop()
if self._stopping:
self.log.info("%s watcher stopped: outer", self.kind)
break
self.log.warning("%s watcher finished", self.kind)
async def start(self):
"""
Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards!
"""
if self.watch_task and not self.watch_task.done():
raise RuntimeError('Task watching for resources is already running')
await self._list_and_update()
self.watch_task = asyncio.create_task(self._watch_and_update())
async def stop(self):
"""
Cleanly shut down the watch task.
"""
self._stopping = True
if self.watch_task and not self.watch_task.done():
# cancel the task, wait for it to complete
self.watch_task.cancel()
try:
timeout = 5
await asyncio.wait_for(self.watch_task, timeout)
except asyncio.TimeoutError:
# Raising the TimeoutError will cancel the task.
self.log.warning(
f"Watch task did not finish in {timeout}s and was cancelled"
)
self.watch_task = None
class NamespacedResourceReflector(ResourceReflector):
"""
Watches for resources in a particular namespace. The list_methods
want both a method name and a namespace.
"""
omit_namespace = False
class MultiNamespaceResourceReflector(ResourceReflector):
"""
Watches for resources across all namespaces. The list_methods
want only a method name. Note that this requires the service account
to be significantly more powerful, since it must be bound to ClusterRoles
rather than just Roles, and therefore this is inherently more
dangerous.
"""
omit_namespace = True
|
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import make_password
from django.db import transaction
from rest_framework import exceptions, serializers
from care.facility.models import Facility, FacilityUser, READ_ONLY_USER_TYPES
from care.users.api.serializers.lsg import DistrictSerializer, LocalBodySerializer, StateSerializer, BlockSerializer
from care.users.models import GENDER_CHOICES
from care.utils.serializer.phonenumber_ispossible_field import PhoneNumberIsPossibleField
from config.serializers import ChoiceField
User = get_user_model()
class SignUpSerializer(serializers.ModelSerializer):
user_type = ChoiceField(choices=User.TYPE_CHOICES)
gender = ChoiceField(choices=GENDER_CHOICES)
password = serializers.CharField(write_only=True)
phone_number = PhoneNumberIsPossibleField()
alt_phone_number = PhoneNumberIsPossibleField(required=False, allow_blank=True)
class Meta:
model = User
fields = (
"id",
"username",
"first_name",
"last_name",
"email",
"password",
"user_type",
"ward",
"local_body",
"district",
"state",
"phone_number",
"alt_phone_number",
"gender",
"age",
)
def create(self, validated_data):
validated_data["password"] = make_password(validated_data.get("password"))
return super().create(validated_data)
class UserCreateSerializer(SignUpSerializer):
password = serializers.CharField(required=False)
facilities = serializers.ListSerializer(
child=serializers.UUIDField(), required=False, allow_empty=True, write_only=True
)
class Meta:
model = User
include = ("facilities",)
exclude = (
"is_superuser",
"is_staff",
"is_active",
"last_login",
"date_joined",
"verified",
"deleted",
"groups",
"user_permissions",
)
def validate_facilities(self, facility_ids):
if facility_ids:
if len(facility_ids) != Facility.objects.filter(external_id__in=facility_ids).count():
available_facility_ids = Facility.objects.filter(external_id__in=facility_ids).values_list(
"external_id", flat=True
)
not_found_ids = list(set(facility_ids) - set(available_facility_ids))
raise serializers.ValidationError(
f"Some facilities are not available - {", ".join([str(_id) for _id in not_found_ids])}"
)
return facility_ids
def validate_ward(self, value):
if (
value is not None
and value != self.context["created_by"].ward
and not self.context["created_by"].is_superuser
and not self.context["created_by"].user_type >= User.TYPE_VALUE_MAP["LocalBodyAdmin"]
):
raise serializers.ValidationError("Cannot create for a different Ward")
return value
def validate_local_body(self, value):
if (
value is not None
and value != self.context["created_by"].local_body
and not self.context["created_by"].is_superuser
and not self.context["created_by"].user_type >= User.TYPE_VALUE_MAP["DistrictAdmin"]
):
raise serializers.ValidationError("Cannot create for a different local body")
return value
def validate_district(self, value):
if (
value is not None
and value != self.context["created_by"].district
and not self.context["created_by"].is_superuser
and not self.context["created_by"].user_type >= User.TYPE_VALUE_MAP["StateAdmin"]
):
raise serializers.ValidationError("Cannot create for a different state")
return value
def validate_state(self, value):
if (
value is not None
and value != self.context["created_by"].state
and not self.context["created_by"].is_superuser
):
raise serializers.ValidationError("Cannot create for a different state")
return value
def validate(self, attrs):
validated = super(UserCreateSerializer, self).validate(attrs)
if self.context["created_by"].user_type in READ_ONLY_USER_TYPES:
if validated["user_type"] not in READ_ONLY_USER_TYPES:
raise exceptions.ValidationError(
{"user_type": ["Read only users can create other read only users only"]}
)
if (
validated["user_type"] > self.context["created_by"].user_type
and not self.context["created_by"].is_superuser
):
raise exceptions.ValidationError(
{"user_type": ["User cannot create another user with higher permissions"]}
)
if (
not validated.get("ward")
and not validated.get("local_body")
and not validated.get("district")
and not validated.get("state")
):
raise exceptions.ValidationError({"__all__": ["One of ward, local body, district or state is required"]})
if validated.get("user_type") == User.TYPE_VALUE_MAP["BlockAdmin"]:
local_body_object = validated.get("local_body")
if local_body_object.block is None:
raise exceptions.ValidationError({"__all__": ["The local_body doesn't have a Block associated with it"]})
return validated
def facility_query(self, user):
queryset = Facility.objects.all()
if user.is_superuser:
pass
elif user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]:
queryset = queryset.filter(state=user.state)
elif user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
queryset = queryset.filter(district=user.district)
elif user.user_type >= User.TYPE_VALUE_MAP["BlockAdmin"]:
queryset = queryset.filter(block=user.block)
elif user.user_type >= User.TYPE_VALUE_MAP["LocalBodyAdmin"]:
queryset = queryset.filter(local_body=user.local_body)
else:
queryset = queryset.filter(users__id__exact=user.id)
return queryset
def create(self, validated_data):
with transaction.atomic():
facilities = validated_data.pop("facilities", [])
user = User.objects.create_user(**{**validated_data, "verified": True})
user.set_password(validated_data["password"])
facility_query = self.facility_query(self.context["created_by"])
if facilities:
facility_objs = facility_query.filter(external_id__in=facilities)
facility_user_objs = [
FacilityUser(facility=facility, user=user, created_by=self.context["created_by"])
for facility in facility_objs
]
FacilityUser.objects.bulk_create(facility_user_objs)
return user
class UserSerializer(SignUpSerializer):
user_type = ChoiceField(choices=User.TYPE_CHOICES, read_only=True)
is_superuser = serializers.BooleanField(read_only=True)
local_body_object = LocalBodySerializer(source="local_body", read_only=True)
block_object = BlockSerializer(source="local_body__block", read_only=True)
district_object = DistrictSerializer(source="district", read_only=True)
state_object = StateSerializer(source="state", read_only=True)
alt_phone_number = PhoneNumberIsPossibleField(required=False, allow_blank=True)
class Meta:
model = User
fields = (
"id",
"username",
"first_name",
"last_name",
"email",
"user_type",
"local_body",
"block",
"district",
"state",
"phone_number",
"alt_phone_number",
"gender",
"age",
"is_superuser",
"verified",
"local_body_object",
"block_object",
"district_object",
"state_object",
"pf_endpoint",
"pf_p256dh",
"pf_auth",
)
read_only_fields = (
"is_superuser",
"verified",
"user_type",
"ward",
"local_body",
"block",
"district",
"state",
"pf_endpoint",
"pf_p256dh",
"pf_auth",
)
extra_kwargs = {"url": {"lookup_field": "username"}}
class UserBaseMinimumSerializer(serializers.ModelSerializer):
user_type = ChoiceField(choices=User.TYPE_CHOICES, read_only=True)
class Meta:
model = User
fields = (
"id",
"first_name",
"username",
"email",
"last_name",
"user_type",
"last_login",
)
class UserListSerializer(serializers.ModelSerializer):
local_body_object = LocalBodySerializer(source="local_body", read_only=True)
block_object = BlockSerializer(source="local_body__block", read_only=True)
district_object = DistrictSerializer(source="district", read_only=True)
state_object = StateSerializer(source="state", read_only=True)
user_type = ChoiceField(choices=User.TYPE_CHOICES, read_only=True)
class Meta:
model = User
fields = (
"id",
"first_name",
"last_name",
"username",
"local_body_object",
"block_object",
"district_object",
"state_object",
"user_type",
"last_login",
)
| from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import make_password
from django.db import transaction
from rest_framework import exceptions, serializers
from care.facility.models import Facility, FacilityUser, READ_ONLY_USER_TYPES
from care.users.api.serializers.lsg import DistrictSerializer, LocalBodySerializer, StateSerializer, BlockSerializer
from care.users.models import GENDER_CHOICES
from care.utils.serializer.phonenumber_ispossible_field import PhoneNumberIsPossibleField
from config.serializers import ChoiceField
User = get_user_model()
class SignUpSerializer(serializers.ModelSerializer):
user_type = ChoiceField(choices=User.TYPE_CHOICES)
gender = ChoiceField(choices=GENDER_CHOICES)
password = serializers.CharField(write_only=True)
phone_number = PhoneNumberIsPossibleField()
alt_phone_number = PhoneNumberIsPossibleField(required=False, allow_blank=True)
class Meta:
model = User
fields = (
"id",
"username",
"first_name",
"last_name",
"email",
"password",
"user_type",
"ward",
"local_body",
"district",
"state",
"phone_number",
"alt_phone_number",
"gender",
"age",
)
def create(self, validated_data):
validated_data["password"] = make_password(validated_data.get("password"))
return super().create(validated_data)
class UserCreateSerializer(SignUpSerializer):
password = serializers.CharField(required=False)
facilities = serializers.ListSerializer(
child=serializers.UUIDField(), required=False, allow_empty=True, write_only=True
)
class Meta:
model = User
include = ("facilities",)
exclude = (
"is_superuser",
"is_staff",
"is_active",
"last_login",
"date_joined",
"verified",
"deleted",
"groups",
"user_permissions",
)
def validate_facilities(self, facility_ids):
if facility_ids:
if len(facility_ids) != Facility.objects.filter(external_id__in=facility_ids).count():
available_facility_ids = Facility.objects.filter(external_id__in=facility_ids).values_list(
"external_id", flat=True
)
not_found_ids = list(set(facility_ids) - set(available_facility_ids))
raise serializers.ValidationError(
f"Some facilities are not available - {', '.join([str(_id) for _id in not_found_ids])}"
)
return facility_ids
def validate_ward(self, value):
if (
value is not None
and value != self.context["created_by"].ward
and not self.context["created_by"].is_superuser
and not self.context["created_by"].user_type >= User.TYPE_VALUE_MAP["LocalBodyAdmin"]
):
raise serializers.ValidationError("Cannot create for a different Ward")
return value
def validate_local_body(self, value):
if (
value is not None
and value != self.context["created_by"].local_body
and not self.context["created_by"].is_superuser
and not self.context["created_by"].user_type >= User.TYPE_VALUE_MAP["DistrictAdmin"]
):
raise serializers.ValidationError("Cannot create for a different local body")
return value
def validate_district(self, value):
if (
value is not None
and value != self.context["created_by"].district
and not self.context["created_by"].is_superuser
and not self.context["created_by"].user_type >= User.TYPE_VALUE_MAP["StateAdmin"]
):
raise serializers.ValidationError("Cannot create for a different state")
return value
def validate_state(self, value):
if (
value is not None
and value != self.context["created_by"].state
and not self.context["created_by"].is_superuser
):
raise serializers.ValidationError("Cannot create for a different state")
return value
def validate(self, attrs):
validated = super(UserCreateSerializer, self).validate(attrs)
if self.context["created_by"].user_type in READ_ONLY_USER_TYPES:
if validated["user_type"] not in READ_ONLY_USER_TYPES:
raise exceptions.ValidationError(
{"user_type": ["Read only users can create other read only users only"]}
)
if (
validated["user_type"] > self.context["created_by"].user_type
and not self.context["created_by"].is_superuser
):
raise exceptions.ValidationError(
{"user_type": ["User cannot create another user with higher permissions"]}
)
if (
not validated.get("ward")
and not validated.get("local_body")
and not validated.get("district")
and not validated.get("state")
):
raise exceptions.ValidationError({"__all__": ["One of ward, local body, district or state is required"]})
if validated.get("user_type") == User.TYPE_VALUE_MAP["BlockAdmin"]:
local_body_object = validated.get("local_body")
if local_body_object.block is None:
raise exceptions.ValidationError({"__all__": ["The local_body doesn't have a Block associated with it"]})
return validated
def facility_query(self, user):
queryset = Facility.objects.all()
if user.is_superuser:
pass
elif user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]:
queryset = queryset.filter(state=user.state)
elif user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
queryset = queryset.filter(district=user.district)
elif user.user_type >= User.TYPE_VALUE_MAP["BlockAdmin"]:
queryset = queryset.filter(block=user.block)
elif user.user_type >= User.TYPE_VALUE_MAP["LocalBodyAdmin"]:
queryset = queryset.filter(local_body=user.local_body)
else:
queryset = queryset.filter(users__id__exact=user.id)
return queryset
def create(self, validated_data):
with transaction.atomic():
facilities = validated_data.pop("facilities", [])
user = User.objects.create_user(**{**validated_data, "verified": True})
user.set_password(validated_data["password"])
facility_query = self.facility_query(self.context["created_by"])
if facilities:
facility_objs = facility_query.filter(external_id__in=facilities)
facility_user_objs = [
FacilityUser(facility=facility, user=user, created_by=self.context["created_by"])
for facility in facility_objs
]
FacilityUser.objects.bulk_create(facility_user_objs)
return user
class UserSerializer(SignUpSerializer):
user_type = ChoiceField(choices=User.TYPE_CHOICES, read_only=True)
is_superuser = serializers.BooleanField(read_only=True)
local_body_object = LocalBodySerializer(source="local_body", read_only=True)
block_object = BlockSerializer(source="local_body__block", read_only=True)
district_object = DistrictSerializer(source="district", read_only=True)
state_object = StateSerializer(source="state", read_only=True)
alt_phone_number = PhoneNumberIsPossibleField(required=False, allow_blank=True)
class Meta:
model = User
fields = (
"id",
"username",
"first_name",
"last_name",
"email",
"user_type",
"local_body",
"block",
"district",
"state",
"phone_number",
"alt_phone_number",
"gender",
"age",
"is_superuser",
"verified",
"local_body_object",
"block_object",
"district_object",
"state_object",
"pf_endpoint",
"pf_p256dh",
"pf_auth",
)
read_only_fields = (
"is_superuser",
"verified",
"user_type",
"ward",
"local_body",
"block",
"district",
"state",
"pf_endpoint",
"pf_p256dh",
"pf_auth",
)
extra_kwargs = {"url": {"lookup_field": "username"}}
class UserBaseMinimumSerializer(serializers.ModelSerializer):
user_type = ChoiceField(choices=User.TYPE_CHOICES, read_only=True)
class Meta:
model = User
fields = (
"id",
"first_name",
"username",
"email",
"last_name",
"user_type",
"last_login",
)
class UserListSerializer(serializers.ModelSerializer):
local_body_object = LocalBodySerializer(source="local_body", read_only=True)
block_object = BlockSerializer(source="local_body__block", read_only=True)
district_object = DistrictSerializer(source="district", read_only=True)
state_object = StateSerializer(source="state", read_only=True)
user_type = ChoiceField(choices=User.TYPE_CHOICES, read_only=True)
class Meta:
model = User
fields = (
"id",
"first_name",
"last_name",
"username",
"local_body_object",
"block_object",
"district_object",
"state_object",
"user_type",
"last_login",
)
|
import re
import discord
from discord.ext import commands
from discord.ext.commands import clean_content
from Util import Configuration, GearbotLogging, Permissioncheckers, Translator, Utils
INVITE_MATCHER = re.compile(r"(?:https?:\/\/)?(?:www\.)?(?:discord\.(?:gg|io|me|li)|discordapp\.com\/invite)\/([\w|\d|-]+)", flags=re.IGNORECASE)
async def censor(ctx, code, server_name):
try:
await ctx.message.delete()
clean_message = await clean_content().convert(ctx, ctx.message.content)
clean_name = Utils.clean_user(ctx.message.author)
await GearbotLogging.log_to(ctx.guild.id, "CENSOR",
f":no_entry_sign: {Translator.translate("censored_invite", ctx.guild.id, user=clean_name, code=code, message=clean_message, server_name=server_name)}")
except discord.NotFound:
pass # we failed? guess we lost the race
class Censor:
def __init__(self, bot):
self.bot: commands.Bot = bot
async def on_message(self, message: discord.Message):
if not hasattr(message.channel, "guild") or message.channel.guild is None:
return
ctx: commands.Context = await self.bot.get_context(message)
guild = message.guild
is_mod = Permissioncheckers.get_user_lvl(ctx) >= 2
if message.author == guild.me or is_mod or message.author.id in Configuration.get_var(guild.id, "IGNORED_USERS"):
return
guilds = Configuration.get_var(message.guild.id, "INVITE_WHITELIST")
if len(guilds) is not 0:
codes = INVITE_MATCHER.findall(message.content)
for code in codes:
try:
invite:discord.Invite = await self.bot.get_invite(code)
except discord.NotFound:
pass
except KeyError:
await censor(ctx, code, "DM group")
else:
if invite.guild is None or (not invite.guild.id in guilds and invite.guild.id != guild.id):
await censor(ctx, code, invite.guild.name)
def setup(bot):
bot.add_cog(Censor(bot)) | import re
import discord
from discord.ext import commands
from discord.ext.commands import clean_content
from Util import Configuration, GearbotLogging, Permissioncheckers, Translator, Utils
INVITE_MATCHER = re.compile(r"(?:https?:\/\/)?(?:www\.)?(?:discord\.(?:gg|io|me|li)|discordapp\.com\/invite)\/([\w|\d|-]+)", flags=re.IGNORECASE)
async def censor(ctx, code, server_name):
try:
await ctx.message.delete()
clean_message = await clean_content().convert(ctx, ctx.message.content)
clean_name = Utils.clean_user(ctx.message.author)
await GearbotLogging.log_to(ctx.guild.id, "CENSOR",
f":no_entry_sign: {Translator.translate('censored_invite', ctx.guild.id, user=clean_name, code=code, message=clean_message, server_name=server_name)}")
except discord.NotFound:
pass # we failed? guess we lost the race
class Censor:
def __init__(self, bot):
self.bot: commands.Bot = bot
async def on_message(self, message: discord.Message):
if not hasattr(message.channel, "guild") or message.channel.guild is None:
return
ctx: commands.Context = await self.bot.get_context(message)
guild = message.guild
is_mod = Permissioncheckers.get_user_lvl(ctx) >= 2
if message.author == guild.me or is_mod or message.author.id in Configuration.get_var(guild.id, "IGNORED_USERS"):
return
guilds = Configuration.get_var(message.guild.id, "INVITE_WHITELIST")
if len(guilds) is not 0:
codes = INVITE_MATCHER.findall(message.content)
for code in codes:
try:
invite:discord.Invite = await self.bot.get_invite(code)
except discord.NotFound:
pass
except KeyError:
await censor(ctx, code, "DM group")
else:
if invite.guild is None or (not invite.guild.id in guilds and invite.guild.id != guild.id):
await censor(ctx, code, invite.guild.name)
def setup(bot):
bot.add_cog(Censor(bot)) |
from pathlib import Path
import tvm
from tvm import autotvm
from tvm import relay
from tvm.autotvm.tuner import GATuner
from tvm.autotvm.tuner import GridSearchTuner
from tvm.autotvm.tuner import RandomTuner
from tvm.autotvm.tuner import XGBTuner
from rl_tuner.ga_dqn_tuner import GADQNTuner
from rl_tuner.ga_dqn_tuner_debug import GADQNTuner as GADQNTunerDebug
from .get_model import get_model
def tune_model(mod, params, tune_settings, target, model_name):
"""
Tune a model for a specified number of trials along with other tune settings.
Tune settings are specified using a json configuration, as per the TVM tools readme.
"""
early_stopping = tune_settings['early_stopping']
number = tune_settings["number"]
save_path = tune_settings["save_path"]
save_name = tune_settings["save_name"]
repeat = tune_settings["repeat"]
debug = tune_settings.get("debug_gadqn") or False
trials = tune_settings["trials"]
tuner = tune_settings["tuner"]
target = tvm.target.Target(target)
tasks = autotvm.task.extract_from_program(
mod["main"],
target=target,
target_host="llvm",
params=params)
runner = autotvm.LocalRunner(
number=number,
repeat=repeat)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="default"), runner=runner)
for i, tsk in enumerate(tasks):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# Create a tuner
if tuner in ("xgb", "xgb-rank"):
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
elif tuner == "ga-dqn" and debug:
tuner_obj = GADQNTunerDebug(tsk)
elif tuner == "ga-dqn":
tuner_obj = GADQNTuner(tsk)
else:
raise ValueError("invalid tuner: %s " % tuner)
abs_path = Path(save_path + save_name).resolve()
abs_path.mkdir(exist_ok=True, parents=True)
abs_path_str = str(abs_path)
tuner_obj.tune(
n_trial=min(trials, len(tsk.config_space)),
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(trials, prefix=prefix),
autotvm.callback.log_to_file(abs_path_str + f"/tuning_record_model={model_name}.json"),
],
)
# Save debug info for rl tuner only
if tuner == "ga-dqn" and debug:
tuner_obj.save_model(save_path, save_name + f"_model={model_name}_layer={i}")
del tuner_obj
def tune_models(data):
"""
Auto tune all models referenced in the json configuration.
"""
target_string = data['target']
tune_settings = data['autotuner_settings']
for model in data['models']:
trace, input_shapes = get_model(model['name'], model['type'])
mod, params = relay.frontend.from_pytorch(trace, input_shapes)
print(f"Tuning model {model["name"]}, using strategy {tune_settings["tuner"]}")
tune_model(mod, params, tune_settings, target_string, model['name'])
| from pathlib import Path
import tvm
from tvm import autotvm
from tvm import relay
from tvm.autotvm.tuner import GATuner
from tvm.autotvm.tuner import GridSearchTuner
from tvm.autotvm.tuner import RandomTuner
from tvm.autotvm.tuner import XGBTuner
from rl_tuner.ga_dqn_tuner import GADQNTuner
from rl_tuner.ga_dqn_tuner_debug import GADQNTuner as GADQNTunerDebug
from .get_model import get_model
def tune_model(mod, params, tune_settings, target, model_name):
"""
Tune a model for a specified number of trials along with other tune settings.
Tune settings are specified using a json configuration, as per the TVM tools readme.
"""
early_stopping = tune_settings['early_stopping']
number = tune_settings["number"]
save_path = tune_settings["save_path"]
save_name = tune_settings["save_name"]
repeat = tune_settings["repeat"]
debug = tune_settings.get("debug_gadqn") or False
trials = tune_settings["trials"]
tuner = tune_settings["tuner"]
target = tvm.target.Target(target)
tasks = autotvm.task.extract_from_program(
mod["main"],
target=target,
target_host="llvm",
params=params)
runner = autotvm.LocalRunner(
number=number,
repeat=repeat)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="default"), runner=runner)
for i, tsk in enumerate(tasks):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# Create a tuner
if tuner in ("xgb", "xgb-rank"):
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
elif tuner == "ga-dqn" and debug:
tuner_obj = GADQNTunerDebug(tsk)
elif tuner == "ga-dqn":
tuner_obj = GADQNTuner(tsk)
else:
raise ValueError("invalid tuner: %s " % tuner)
abs_path = Path(save_path + save_name).resolve()
abs_path.mkdir(exist_ok=True, parents=True)
abs_path_str = str(abs_path)
tuner_obj.tune(
n_trial=min(trials, len(tsk.config_space)),
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(trials, prefix=prefix),
autotvm.callback.log_to_file(abs_path_str + f"/tuning_record_model={model_name}.json"),
],
)
# Save debug info for rl tuner only
if tuner == "ga-dqn" and debug:
tuner_obj.save_model(save_path, save_name + f"_model={model_name}_layer={i}")
del tuner_obj
def tune_models(data):
"""
Auto tune all models referenced in the json configuration.
"""
target_string = data['target']
tune_settings = data['autotuner_settings']
for model in data['models']:
trace, input_shapes = get_model(model['name'], model['type'])
mod, params = relay.frontend.from_pytorch(trace, input_shapes)
print(f"Tuning model {model['name']}, using strategy {tune_settings['tuner']}")
tune_model(mod, params, tune_settings, target_string, model['name'])
|
#!/usr/bin/env python3
#
# Extract a CSV of findings for a particular bucket
#
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import csv
from time import sleep
from datetime import datetime
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
CSV_HEADER = ['AccountId', 'BucketName', 'Region', 'FileExtension', 'Severity', 'FindingType',
'FindingCount', 'Details', 'ObjectKey', 'S3Path', 'URLPath', 'FindingConsoleURL', 'Finding Creation Date', 'Object-level Public ACL']
def main(args, logger):
# Macie is regional even though buckets aren't. So we need to iterate across regions to find out bucket
# Unless you know already
if args.region:
regions = [args.region]
else:
regions = get_regions()
# Store bucket results
results = {
"Low": 0,
"Medium": 0,
"High": 0
}
with open(args.filename, 'w') as csvoutfile:
writer = csv.writer(csvoutfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(CSV_HEADER)
for r in regions:
macie_client = boto3.client('macie2', region_name=r)
# Build a Findings criteria dictionary to pass to Macie2
findingCriteria = {'criterion': {'category': {'eq': ['CLASSIFICATION']}}}
if args.bucket:
findingCriteria['criterion']['resourcesAffected.s3Bucket.name'] = {'eq': [args.bucket]}
if args.severity:
if args.severity == "High":
findingCriteria['criterion']['severity.description'] = {'eq': ["High"]}
elif args.severity == "Medium":
findingCriteria['criterion']['severity.description'] = {'eq': ["High", "Medium"]}
else:
# No need to add a severity filter
pass
if args.since:
end_time = datetime.now()
start_time = datetime.strptime(args.since, "%Y-%m-%d")
findingCriteria['criterion']['createdAt'] = {
'gte': int(start_time.timestamp())*1000,
'lte': int(end_time.timestamp())*1000
}
logger.debug(f"findingCriteria: {json.dumps(findingCriteria, indent=2)}")
# Macie is annyoing in that I have to list each findings, then pass the list of ids to the
# get_findings() API to get any useful details. Bah
list_response = macie_client.list_findings(
findingCriteria=findingCriteria,
maxResults=40
)
findings = list_response['findingIds']
logger.debug(f"Found {len(findings)} findings in {r}")
if len(findings) == 0:
# No findings in this region, move along
continue
# Now get the meat of these findings
get_response = macie_client.get_findings(findingIds=findings)
for f in get_response['findings']:
bucket_name = f['resourcesAffected']['s3Bucket']['name']
key = f['resourcesAffected']['s3Object']['key']
summary, count = get_summary(f)
obj_publicAccess = "Unknown"
if 'publicAccess' in f['resourcesAffected']['s3Object']:
obj_publicAccess = f['resourcesAffected']['s3Object']['publicAccess']
writer.writerow([f['accountId'], bucket_name, r,
f['resourcesAffected']['s3Object']['extension'],
f['severity']['description'], f['type'],
count, summary, key,
f"s3://{bucket_name}/{key}",
f"https://{bucket_name}.s3.amazonaws.com/{key}",
f"https://{r}.console.aws.amazon.com/macie/home?region={r}#findings?search=resourcesAffected.s3Bucket.name%3D{bucket_name}¯os=current&itemId={f["id"]}",
f['createdAt'], obj_publicAccess
])
results[f['severity']['description']] += 1
# pagination is a pita. Here we continue to the List pagination
while 'nextToken' in list_response:
sleep(0.5)
list_response = macie_client.list_findings(
findingCriteria=findingCriteria,
maxResults=40,
nextToken=list_response['nextToken']
)
findings = list_response['findingIds']
logger.debug(f"Found {len(findings)} more findings in {r}")
get_response = macie_client.get_findings(findingIds=findings)
for f in get_response['findings']:
bucket_name = f['resourcesAffected']['s3Bucket']['name']
key = f['resourcesAffected']['s3Object']['key']
summary, count = get_summary(f)
obj_publicAccess = "Unknown"
if 'publicAccess' in f['resourcesAffected']['s3Object']:
obj_publicAccess = f['resourcesAffected']['s3Object']['publicAccess']
writer.writerow([f['accountId'], bucket_name, r,
f['resourcesAffected']['s3Object']['extension'],
f['severity']['description'], f['type'],
count, summary, key,
f"s3://{bucket_name}/{key}",
f"https://{bucket_name}.s3.amazonaws.com/{key}",
f"https://{r}.console.aws.amazon.com/macie/home?region={r}#findings?search=resourcesAffected.s3Bucket.name%3D{bucket_name}¯os=current&itemId={f["id"]}",
f['createdAt'], obj_publicAccess
])
results[f['severity']['description']] += 1
print(f"Exported High: {results["High"]} Medium: {results["Medium"]} Low: {results["Low"]} ")
csvoutfile.close()
def get_summary(finding):
summary = []
count = 0
for data_type in finding['classificationDetails']['result']['sensitiveData']:
summary.append(f"{data_type["category"]}: {data_type["totalCount"]}")
count += data_type['totalCount']
return("\n".join(summary), count)
def get_regions():
"""Return an array of the regions this account is active in. Ordered with us-east-1 in the front."""
ec2 = boto3.client('ec2')
response = ec2.describe_regions()
output = ['us-east-1']
for r in response['Regions']:
if r['RegionName'] == "us-east-1":
continue
output.append(r['RegionName'])
return(output)
def do_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="print debugging info", action='store_true')
parser.add_argument("--error", help="print error info only", action='store_true')
parser.add_argument("--region", help="Only Process this region")
parser.add_argument("--bucket", help="Only price out this bucket")
parser.add_argument("--filename", help="Save to filename", required=True)
parser.add_argument("--since", help="Only output findings after this date - specified as YYYY-MM-DD")
parser.add_argument("--severity", help="Filter on this severity and higher",
choices=['High', 'Medium', 'Low'], default='Medium')
args = parser.parse_args()
return(args)
if __name__ == '__main__':
args = do_args()
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
ch = logging.StreamHandler()
if args.error:
logger.setLevel(logging.ERROR)
elif args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# create formatter
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# # Sanity check region
# if args.region:
# os.environ['AWS_DEFAULT_REGION'] = args.region
# if 'AWS_DEFAULT_REGION' not in os.environ:
# logger.error("AWS_DEFAULT_REGION Not set. Aborting...")
# exit(1)
try:
main(args, logger)
except KeyboardInterrupt:
exit(1)
| #!/usr/bin/env python3
#
# Extract a CSV of findings for a particular bucket
#
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import csv
from time import sleep
from datetime import datetime
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
CSV_HEADER = ['AccountId', 'BucketName', 'Region', 'FileExtension', 'Severity', 'FindingType',
'FindingCount', 'Details', 'ObjectKey', 'S3Path', 'URLPath', 'FindingConsoleURL', 'Finding Creation Date', 'Object-level Public ACL']
def main(args, logger):
# Macie is regional even though buckets aren't. So we need to iterate across regions to find out bucket
# Unless you know already
if args.region:
regions = [args.region]
else:
regions = get_regions()
# Store bucket results
results = {
"Low": 0,
"Medium": 0,
"High": 0
}
with open(args.filename, 'w') as csvoutfile:
writer = csv.writer(csvoutfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(CSV_HEADER)
for r in regions:
macie_client = boto3.client('macie2', region_name=r)
# Build a Findings criteria dictionary to pass to Macie2
findingCriteria = {'criterion': {'category': {'eq': ['CLASSIFICATION']}}}
if args.bucket:
findingCriteria['criterion']['resourcesAffected.s3Bucket.name'] = {'eq': [args.bucket]}
if args.severity:
if args.severity == "High":
findingCriteria['criterion']['severity.description'] = {'eq': ["High"]}
elif args.severity == "Medium":
findingCriteria['criterion']['severity.description'] = {'eq': ["High", "Medium"]}
else:
# No need to add a severity filter
pass
if args.since:
end_time = datetime.now()
start_time = datetime.strptime(args.since, "%Y-%m-%d")
findingCriteria['criterion']['createdAt'] = {
'gte': int(start_time.timestamp())*1000,
'lte': int(end_time.timestamp())*1000
}
logger.debug(f"findingCriteria: {json.dumps(findingCriteria, indent=2)}")
# Macie is annyoing in that I have to list each findings, then pass the list of ids to the
# get_findings() API to get any useful details. Bah
list_response = macie_client.list_findings(
findingCriteria=findingCriteria,
maxResults=40
)
findings = list_response['findingIds']
logger.debug(f"Found {len(findings)} findings in {r}")
if len(findings) == 0:
# No findings in this region, move along
continue
# Now get the meat of these findings
get_response = macie_client.get_findings(findingIds=findings)
for f in get_response['findings']:
bucket_name = f['resourcesAffected']['s3Bucket']['name']
key = f['resourcesAffected']['s3Object']['key']
summary, count = get_summary(f)
obj_publicAccess = "Unknown"
if 'publicAccess' in f['resourcesAffected']['s3Object']:
obj_publicAccess = f['resourcesAffected']['s3Object']['publicAccess']
writer.writerow([f['accountId'], bucket_name, r,
f['resourcesAffected']['s3Object']['extension'],
f['severity']['description'], f['type'],
count, summary, key,
f"s3://{bucket_name}/{key}",
f"https://{bucket_name}.s3.amazonaws.com/{key}",
f"https://{r}.console.aws.amazon.com/macie/home?region={r}#findings?search=resourcesAffected.s3Bucket.name%3D{bucket_name}¯os=current&itemId={f['id']}",
f['createdAt'], obj_publicAccess
])
results[f['severity']['description']] += 1
# pagination is a pita. Here we continue to the List pagination
while 'nextToken' in list_response:
sleep(0.5)
list_response = macie_client.list_findings(
findingCriteria=findingCriteria,
maxResults=40,
nextToken=list_response['nextToken']
)
findings = list_response['findingIds']
logger.debug(f"Found {len(findings)} more findings in {r}")
get_response = macie_client.get_findings(findingIds=findings)
for f in get_response['findings']:
bucket_name = f['resourcesAffected']['s3Bucket']['name']
key = f['resourcesAffected']['s3Object']['key']
summary, count = get_summary(f)
obj_publicAccess = "Unknown"
if 'publicAccess' in f['resourcesAffected']['s3Object']:
obj_publicAccess = f['resourcesAffected']['s3Object']['publicAccess']
writer.writerow([f['accountId'], bucket_name, r,
f['resourcesAffected']['s3Object']['extension'],
f['severity']['description'], f['type'],
count, summary, key,
f"s3://{bucket_name}/{key}",
f"https://{bucket_name}.s3.amazonaws.com/{key}",
f"https://{r}.console.aws.amazon.com/macie/home?region={r}#findings?search=resourcesAffected.s3Bucket.name%3D{bucket_name}¯os=current&itemId={f['id']}",
f['createdAt'], obj_publicAccess
])
results[f['severity']['description']] += 1
print(f"Exported High: {results['High']} Medium: {results['Medium']} Low: {results['Low']} ")
csvoutfile.close()
def get_summary(finding):
summary = []
count = 0
for data_type in finding['classificationDetails']['result']['sensitiveData']:
summary.append(f"{data_type['category']}: {data_type['totalCount']}")
count += data_type['totalCount']
return("\n".join(summary), count)
def get_regions():
"""Return an array of the regions this account is active in. Ordered with us-east-1 in the front."""
ec2 = boto3.client('ec2')
response = ec2.describe_regions()
output = ['us-east-1']
for r in response['Regions']:
if r['RegionName'] == "us-east-1":
continue
output.append(r['RegionName'])
return(output)
def do_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="print debugging info", action='store_true')
parser.add_argument("--error", help="print error info only", action='store_true')
parser.add_argument("--region", help="Only Process this region")
parser.add_argument("--bucket", help="Only price out this bucket")
parser.add_argument("--filename", help="Save to filename", required=True)
parser.add_argument("--since", help="Only output findings after this date - specified as YYYY-MM-DD")
parser.add_argument("--severity", help="Filter on this severity and higher",
choices=['High', 'Medium', 'Low'], default='Medium')
args = parser.parse_args()
return(args)
if __name__ == '__main__':
args = do_args()
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
ch = logging.StreamHandler()
if args.error:
logger.setLevel(logging.ERROR)
elif args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# create formatter
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# # Sanity check region
# if args.region:
# os.environ['AWS_DEFAULT_REGION'] = args.region
# if 'AWS_DEFAULT_REGION' not in os.environ:
# logger.error("AWS_DEFAULT_REGION Not set. Aborting...")
# exit(1)
try:
main(args, logger)
except KeyboardInterrupt:
exit(1)
|
import dataclasses
import re
import textwrap
from typing import Optional, Iterable, List, Match, Pattern, Tuple, Type, TypeVar, Union
def add_line_prefix(s: str, prefix: str, /, empty_lines=False) -> str:
if empty_lines:
predicate = lambda line: True
else:
predicate = None
return textwrap.indent(s, prefix, predicate=predicate)
def add_indent(s: str, levels=1) -> str:
level_prefix = 4 * " "
return add_line_prefix(s, levels * level_prefix)
def remove_indent(s: str) -> str:
return textwrap.dedent(s)
def split_trim(s: str, delim: Optional[str]) -> List[str]:
return [s.strip() for s in s.split(delim)]
def join_nonempty_lines(lines: Iterable[Optional[str]]) -> str:
return "\n".join(filter(None, (line.strip() for line in lines if line)))
def read_until_closing(s: str, open: str, close: str) -> Tuple[str, str, str]:
open_pattern = re.compile(open)
pattern = re.compile(f"(?:{open_pattern.pattern})|(?:{close})")
start_pos = end_pos = 0
depth = 1
while depth:
match = pattern.search(s, end_pos)
if not match:
raise ValueError(f"missing closing bracket (expected {depth} closing)")
start_pos, end_pos = match.start(), match.end()
if open_pattern.match(match[0]):
depth += 1
else:
depth -= 1
return s[:start_pos], s[start_pos:end_pos], s[end_pos:]
def read_until_closing_bracket(
s: str, *, skip_non_content_after=True
) -> Tuple[str, str]:
(a, _, b) = read_until_closing(s, r"\{", r"\}")
if skip_non_content_after:
b = skip_non_content(b)
return a, b
def build_wasm_bindgen_attr(
*args: Union[str, None], **kwargs: Union[str, Iterable[str], None]
) -> str:
args = list(args)
for key, value in kwargs.items():
if not value:
continue
if isinstance(value, str):
args.append(f"{key} = {value}")
else:
args.extend(f"{key} = {v}" for v in value)
return f"#[wasm_bindgen({", ".join(filter(None, args))})]"
_PATTERN_COMMENT = re.compile(r"^ *\/\/.*\n")
def consume_comments(s: str) -> str:
while match := _PATTERN_COMMENT.match(s):
s = s[match.end() :]
return s
_PATTERN_EMPTY_LINES = re.compile(r"^ *(?:\n|$)")
def consume_empty_lines(s: str) -> str:
while match := _PATTERN_EMPTY_LINES.match(s):
s = s[match.end() :]
if not s:
break
return s
def skip_non_content(s: str) -> str:
while True:
new = consume_comments(consume_empty_lines(s))
if new == s:
break
s = new
return s
@dataclasses.dataclass()
class MatchError(Exception):
s: str
pattern: Optional[Pattern] = None
info: Optional[str] = None
def __str__(self) -> str:
s = self.preview_s()
if info := self.info:
return f"failed to parse: {info}:\n{s}"
elif pattern := self.pattern:
return f"didn't match pattern: `{pattern.pattern}`:\n{s}"
else:
return "{s}"
def preview_s(self) -> str:
lines = self.s.splitlines()
if len(lines) > 8:
lines = lines[:8]
lines.append("... TRUNCATED")
s = "\n".join(lines)
hor_line = 80 * "="
return f"{hor_line}\n{s}\n{hor_line}"
def consume_match(
pattern: Pattern, s: str, *, skip_non_content_after=True, info: str = None,
) -> Tuple[Match, str]:
if match := pattern.match(s):
remainder = s[match.end() :]
if skip_non_content_after:
remainder = skip_non_content(remainder)
return match, remainder
raise MatchError(s=s, pattern=pattern, info=info)
T = TypeVar("T")
def consume_first(s: str, *consumers: Type[T], args=None) -> Tuple[T, str]:
assert consumers, "need at least one consumer"
if args is None:
args = ()
error = None
for consumer in consumers:
try:
return consumer.consume(s, *args)
except MatchError as e:
e.__context__ = error
error = e
raise MatchError(
s=s, info=" | ".join(f"`{consumer.__qualname__}`" for consumer in consumers)
) from error
class ModSet:
def __init__(self, mods: Iterable[str]) -> None:
self._mods = set(mods)
@classmethod
def create(cls, s: str):
return cls(s.split())
def pop(self, mod: str) -> bool:
try:
self._mods.remove(mod)
except KeyError:
return False
else:
return True
def assert_empty(self):
if self._mods:
raise ValueError(f"unhandled modifiers: {self._mods}")
| import dataclasses
import re
import textwrap
from typing import Optional, Iterable, List, Match, Pattern, Tuple, Type, TypeVar, Union
def add_line_prefix(s: str, prefix: str, /, empty_lines=False) -> str:
if empty_lines:
predicate = lambda line: True
else:
predicate = None
return textwrap.indent(s, prefix, predicate=predicate)
def add_indent(s: str, levels=1) -> str:
level_prefix = 4 * " "
return add_line_prefix(s, levels * level_prefix)
def remove_indent(s: str) -> str:
return textwrap.dedent(s)
def split_trim(s: str, delim: Optional[str]) -> List[str]:
return [s.strip() for s in s.split(delim)]
def join_nonempty_lines(lines: Iterable[Optional[str]]) -> str:
return "\n".join(filter(None, (line.strip() for line in lines if line)))
def read_until_closing(s: str, open: str, close: str) -> Tuple[str, str, str]:
open_pattern = re.compile(open)
pattern = re.compile(f"(?:{open_pattern.pattern})|(?:{close})")
start_pos = end_pos = 0
depth = 1
while depth:
match = pattern.search(s, end_pos)
if not match:
raise ValueError(f"missing closing bracket (expected {depth} closing)")
start_pos, end_pos = match.start(), match.end()
if open_pattern.match(match[0]):
depth += 1
else:
depth -= 1
return s[:start_pos], s[start_pos:end_pos], s[end_pos:]
def read_until_closing_bracket(
s: str, *, skip_non_content_after=True
) -> Tuple[str, str]:
(a, _, b) = read_until_closing(s, r"\{", r"\}")
if skip_non_content_after:
b = skip_non_content(b)
return a, b
def build_wasm_bindgen_attr(
*args: Union[str, None], **kwargs: Union[str, Iterable[str], None]
) -> str:
args = list(args)
for key, value in kwargs.items():
if not value:
continue
if isinstance(value, str):
args.append(f"{key} = {value}")
else:
args.extend(f"{key} = {v}" for v in value)
return f"#[wasm_bindgen({', '.join(filter(None, args))})]"
_PATTERN_COMMENT = re.compile(r"^ *\/\/.*\n")
def consume_comments(s: str) -> str:
while match := _PATTERN_COMMENT.match(s):
s = s[match.end() :]
return s
_PATTERN_EMPTY_LINES = re.compile(r"^ *(?:\n|$)")
def consume_empty_lines(s: str) -> str:
while match := _PATTERN_EMPTY_LINES.match(s):
s = s[match.end() :]
if not s:
break
return s
def skip_non_content(s: str) -> str:
while True:
new = consume_comments(consume_empty_lines(s))
if new == s:
break
s = new
return s
@dataclasses.dataclass()
class MatchError(Exception):
s: str
pattern: Optional[Pattern] = None
info: Optional[str] = None
def __str__(self) -> str:
s = self.preview_s()
if info := self.info:
return f"failed to parse: {info}:\n{s}"
elif pattern := self.pattern:
return f"didn't match pattern: `{pattern.pattern}`:\n{s}"
else:
return "{s}"
def preview_s(self) -> str:
lines = self.s.splitlines()
if len(lines) > 8:
lines = lines[:8]
lines.append("... TRUNCATED")
s = "\n".join(lines)
hor_line = 80 * "="
return f"{hor_line}\n{s}\n{hor_line}"
def consume_match(
pattern: Pattern, s: str, *, skip_non_content_after=True, info: str = None,
) -> Tuple[Match, str]:
if match := pattern.match(s):
remainder = s[match.end() :]
if skip_non_content_after:
remainder = skip_non_content(remainder)
return match, remainder
raise MatchError(s=s, pattern=pattern, info=info)
T = TypeVar("T")
def consume_first(s: str, *consumers: Type[T], args=None) -> Tuple[T, str]:
assert consumers, "need at least one consumer"
if args is None:
args = ()
error = None
for consumer in consumers:
try:
return consumer.consume(s, *args)
except MatchError as e:
e.__context__ = error
error = e
raise MatchError(
s=s, info=" | ".join(f"`{consumer.__qualname__}`" for consumer in consumers)
) from error
class ModSet:
def __init__(self, mods: Iterable[str]) -> None:
self._mods = set(mods)
@classmethod
def create(cls, s: str):
return cls(s.split())
def pop(self, mod: str) -> bool:
try:
self._mods.remove(mod)
except KeyError:
return False
else:
return True
def assert_empty(self):
if self._mods:
raise ValueError(f"unhandled modifiers: {self._mods}")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import json
import os
from pymisp import ExpandedPyMISP
from settings import url, key, ssl, outputdir, filters, valid_attribute_distribution_levels
try:
from settings import with_distribution
except ImportError:
with_distribution = False
try:
from settings import include_deleted
except ImportError:
include_deleted = False
try:
from settings import exclude_attribute_types
except ImportError:
exclude_attribute_types = []
valid_attribute_distributions = []
def init():
# If we have an old settings.py file then this variable won't exist
global valid_attribute_distributions
try:
valid_attribute_distributions = [int(v) for v in valid_attribute_distribution_levels]
except Exception:
valid_attribute_distributions = [0, 1, 2, 3, 4, 5]
return ExpandedPyMISP(url, key, ssl)
def saveEvent(event):
try:
with open(os.path.join(outputdir, f'{event['Event']['uuid']}.json'), 'w') as f:
json.dump(event, f, indent=2)
except Exception as e:
print(e)
sys.exit('Could not create the event dump.')
def saveHashes(hashes):
try:
with open(os.path.join(outputdir, 'hashes.csv'), 'w') as hashFile:
for element in hashes:
hashFile.write('{},{}\n'.format(element[0], element[1]))
except Exception as e:
print(e)
sys.exit('Could not create the quick hash lookup file.')
def saveManifest(manifest):
try:
manifestFile = open(os.path.join(outputdir, 'manifest.json'), 'w')
manifestFile.write(json.dumps(manifest))
manifestFile.close()
except Exception as e:
print(e)
sys.exit('Could not create the manifest file.')
if __name__ == '__main__':
misp = init()
try:
events = misp.search_index(minimal=True, **filters, pythonify=False)
except Exception as e:
print(e)
sys.exit("Invalid response received from MISP.")
if len(events) == 0:
sys.exit("No events returned.")
manifest = {}
hashes = []
counter = 1
total = len(events)
for event in events:
try:
e = misp.get_event(event['uuid'], deleted=include_deleted, pythonify=True)
if exclude_attribute_types:
for i, attribute in enumerate(e.attributes):
if attribute.type in exclude_attribute_types:
e.attributes.pop(i)
e_feed = e.to_feed(valid_distributions=valid_attribute_distributions, with_meta=True, with_distribution=with_distribution)
except Exception as err:
print(err, event['uuid'])
continue
if not e_feed:
print(f'Invalid distribution {e.distribution}, skipping')
continue
hashes += [[h, e.uuid] for h in e_feed['Event'].pop('_hashes')]
manifest.update(e_feed['Event'].pop('_manifest'))
saveEvent(e_feed)
print("Event " + str(counter) + "/" + str(total) + " exported.")
counter += 1
saveManifest(manifest)
print('Manifest saved.')
saveHashes(hashes)
print('Hashes saved. Feed creation completed.')
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import json
import os
from pymisp import ExpandedPyMISP
from settings import url, key, ssl, outputdir, filters, valid_attribute_distribution_levels
try:
from settings import with_distribution
except ImportError:
with_distribution = False
try:
from settings import include_deleted
except ImportError:
include_deleted = False
try:
from settings import exclude_attribute_types
except ImportError:
exclude_attribute_types = []
valid_attribute_distributions = []
def init():
# If we have an old settings.py file then this variable won't exist
global valid_attribute_distributions
try:
valid_attribute_distributions = [int(v) for v in valid_attribute_distribution_levels]
except Exception:
valid_attribute_distributions = [0, 1, 2, 3, 4, 5]
return ExpandedPyMISP(url, key, ssl)
def saveEvent(event):
try:
with open(os.path.join(outputdir, f'{event["Event"]["uuid"]}.json'), 'w') as f:
json.dump(event, f, indent=2)
except Exception as e:
print(e)
sys.exit('Could not create the event dump.')
def saveHashes(hashes):
try:
with open(os.path.join(outputdir, 'hashes.csv'), 'w') as hashFile:
for element in hashes:
hashFile.write('{},{}\n'.format(element[0], element[1]))
except Exception as e:
print(e)
sys.exit('Could not create the quick hash lookup file.')
def saveManifest(manifest):
try:
manifestFile = open(os.path.join(outputdir, 'manifest.json'), 'w')
manifestFile.write(json.dumps(manifest))
manifestFile.close()
except Exception as e:
print(e)
sys.exit('Could not create the manifest file.')
if __name__ == '__main__':
misp = init()
try:
events = misp.search_index(minimal=True, **filters, pythonify=False)
except Exception as e:
print(e)
sys.exit("Invalid response received from MISP.")
if len(events) == 0:
sys.exit("No events returned.")
manifest = {}
hashes = []
counter = 1
total = len(events)
for event in events:
try:
e = misp.get_event(event['uuid'], deleted=include_deleted, pythonify=True)
if exclude_attribute_types:
for i, attribute in enumerate(e.attributes):
if attribute.type in exclude_attribute_types:
e.attributes.pop(i)
e_feed = e.to_feed(valid_distributions=valid_attribute_distributions, with_meta=True, with_distribution=with_distribution)
except Exception as err:
print(err, event['uuid'])
continue
if not e_feed:
print(f'Invalid distribution {e.distribution}, skipping')
continue
hashes += [[h, e.uuid] for h in e_feed['Event'].pop('_hashes')]
manifest.update(e_feed['Event'].pop('_manifest'))
saveEvent(e_feed)
print("Event " + str(counter) + "/" + str(total) + " exported.")
counter += 1
saveManifest(manifest)
print('Manifest saved.')
saveHashes(hashes)
print('Hashes saved. Feed creation completed.')
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/13a_learner.ipynb (unless otherwise specified).
__all__ = ['CancelFitException', 'CancelEpochException', 'CancelTrainException', 'CancelValidException',
'CancelBatchException', 'replacing_yield', 'mk_metric', 'save_model', 'load_model', 'Learner',
'VerboseCallback', 'Metric', 'AvgMetric', 'AvgLoss', 'AvgSmoothLoss', 'Recorder', 'FetchPreds',
'load_learner']
# Cell
from .data.all import *
from .optimizer import *
from .callback.core import *
# Cell
defaults.lr = 1e-3
# Cell
def replacing_yield(o, attr, val):
"Context manager to temporarily replace an attribute"
old = getattr(o,attr)
try: yield setattr(o,attr,val)
finally: setattr(o,attr,old)
# Cell
def mk_metric(m):
"Convert `m` to an `AvgMetric`, unless it's already a `Metric`"
return m if isinstance(m, Metric) else AvgMetric(m)
# Cell
def save_model(file, model, opt, with_opt=True):
"Save `model` to `file` along with `opt` (if available, and if `with_opt`)"
if opt is None: with_opt=False
state = get_model(model).state_dict()
if with_opt: state = {'model': state, 'opt':opt.state_dict()}
torch.save(state, file)
# Cell
def load_model(file, model, opt, with_opt=None, device=None, strict=True):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
if isinstance(device, int): device = torch.device('cuda', device)
elif device is None: device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = set(state)=={'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(model_state, strict=strict)
if hasopt and ifnone(with_opt,True):
try: opt.load_state_dict(state['opt'])
except:
if with_opt: warn("Could not load the optimizer state.")
elif with_opt: warn("Saved filed doesn't contain an optimizer state.")
# Cell
def _try_concat(o):
try: return torch.cat(o)
except: return sum([L(o_[i,:] for i in range_of(o_)) for o_ in o], L())
# Cell
from contextlib import ExitStack
# Cell
_before_epoch = [event.begin_fit, event.begin_epoch]
_after_epoch = [event.after_epoch, event.after_fit]
# Cell
class Learner():
def __init__(self, dls, model, loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=trainable_params, cbs=None,
metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True,
moms=(0.95,0.85,0.95)):
store_attr(self, "dls,model,opt_func,lr,splitter,model_dir,wd,wd_bn_bias,train_bn,metrics,moms")
self.training,self.create_mbar,self.logger,self.opt,self.cbs = False,True,print,None,L()
if loss_func is None:
loss_func = getattr(dls.train_ds, 'loss_func', None)
assert loss_func is not None, "Could not infer loss function from the data, please pass a loss function."
self.loss_func = loss_func
self.path = path if path is not None else getattr(dls, 'path', Path('.'))
self.add_cbs([(cb() if isinstance(cb, type) else cb) for cb in L(defaults.callbacks)+L(cbs)])
self.model.to(self.dls.device)
if hasattr(self.model, 'reset'): self.model.reset()
self.epoch,self.n_epoch,self.loss = 0,1,tensor(0.)
@property
def metrics(self): return self._metrics
@metrics.setter
def metrics(self,v): self._metrics = L(v).map(mk_metric)
def add_cbs(self, cbs): L(cbs).map(self.add_cb)
def remove_cbs(self, cbs): L(cbs).map(self.remove_cb)
def add_cb(self, cb):
old = getattr(self, cb.name, None)
assert not old or isinstance(old, type(cb)), f"self.{cb.name} already registered"
cb.learn = self
setattr(self, cb.name, cb)
self.cbs.append(cb)
return self
def remove_cb(self, cb):
cb.learn = None
if hasattr(self, cb.name): delattr(self, cb.name)
if cb in self.cbs: self.cbs.remove(cb)
@contextmanager
def added_cbs(self, cbs):
self.add_cbs(cbs)
yield
self.remove_cbs(cbs)
def ordered_cbs(self, cb_func): return [cb for cb in sort_by_run(self.cbs) if hasattr(cb, cb_func)]
def __call__(self, event_name): L(event_name).map(self._call_one)
def _call_one(self, event_name):
assert hasattr(event, event_name)
[cb(event_name) for cb in sort_by_run(self.cbs)]
def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
def create_opt(self):
self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
if not self.wd_bn_bias:
for p in self._bn_bias_state(True ): p['do_wd'] = False
if self.train_bn:
for p in self._bn_bias_state(False): p['force_train'] = True
def _split(self, b):
i = getattr(self.dls, 'n_inp', 1 if len(b)==1 else len(b)-1)
self.xb,self.yb = b[:i],b[i:]
def all_batches(self):
self.n_iter = len(self.dl)
for o in enumerate(self.dl): self.one_batch(*o)
def one_batch(self, i, b):
self.iter = i
try:
self._split(b); self('begin_batch')
self.pred = self.model(*self.xb); self('after_pred')
if len(self.yb) == 0: return
self.loss = self.loss_func(self.pred, *self.yb); self('after_loss')
if not self.training: return
self.loss.backward(); self('after_backward')
self.opt.step(); self('after_step')
self.opt.zero_grad()
except CancelBatchException: self('after_cancel_batch')
finally: self('after_batch')
def _do_begin_fit(self, n_epoch):
self.n_epoch,self.loss = n_epoch,tensor(0.); self('begin_fit')
def _do_epoch_train(self):
try:
self.dl = self.dls.train; self('begin_train')
self.all_batches()
except CancelTrainException: self('after_cancel_train')
finally: self('after_train')
def _do_epoch_validate(self, ds_idx=1, dl=None):
if dl is None: dl = self.dls[ds_idx]
names = ['shuffle', 'drop_last']
try:
dl,old,has = change_attrs(dl, names, [False,False])
self.dl = dl; self('begin_validate')
with torch.no_grad(): self.all_batches()
except CancelValidException: self('after_cancel_validate')
finally:
dl,*_ = change_attrs(dl, names, old, has); self('after_validate')
def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):
with self.added_cbs(cbs):
if reset_opt or not self.opt: self.create_opt()
if wd is None: wd = self.wd
if wd is not None: self.opt.set_hypers(wd=wd)
self.opt.set_hypers(lr=self.lr if lr is None else lr)
try:
self._do_begin_fit(n_epoch)
for epoch in range(n_epoch):
try:
self.epoch=epoch; self('begin_epoch')
self._do_epoch_train()
self._do_epoch_validate()
except CancelEpochException: self('after_cancel_epoch')
finally: self('after_epoch')
except CancelFitException: self('after_cancel_fit')
finally: self('after_fit')
def validate(self, ds_idx=1, dl=None, cbs=None):
if dl is None: dl = self.dls[ds_idx]
with self.added_cbs(cbs), self.no_logging(), self.no_mbar():
self(_before_epoch)
self._do_epoch_validate(ds_idx, dl)
self(_after_epoch)
return getattr(self, 'final_record', None)
@delegates(GatherPredsCallback.__init__)
def get_preds(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, with_loss=False, act=None,
inner=False, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffled=False, drop_last=False)
cb = GatherPredsCallback(with_input=with_input, with_loss=with_loss, **kwargs)
#with self.no_logging(), self.added_cbs(cb), self.loss_not_reduced(), self.no_mbar():
ctx_mgrs = [self.no_logging(), self.added_cbs(cb), self.no_mbar()]
if with_loss: ctx_mgrs.append(self.loss_not_reduced())
with ExitStack() as stack:
for mgr in ctx_mgrs: stack.enter_context(mgr)
self(event.begin_epoch if inner else _before_epoch)
self._do_epoch_validate(dl=dl)
self(event.after_epoch if inner else _after_epoch)
if act is None: act = getattr(self.loss_func, 'activation', noop)
res = cb.all_tensors()
pred_i = 1 if with_input else 0
if res[pred_i] is not None:
res[pred_i] = act(res[pred_i])
if with_decoded: res.insert(pred_i+2, getattr(self.loss_func, 'decodes', noop)(res[pred_i]))
return tuple(res)
def predict(self, item, rm_type_tfms=None, with_input=False):
dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms)
inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
dec = self.dls.decode_batch((*tuplify(inp),*tuplify(dec_preds)))[0]
i = getattr(self.dls, 'n_inp', -1)
dec_inp,dec_targ = map(detuplify, [dec[:i],dec[i:]])
res = dec_targ,dec_preds[0],preds[0]
if with_input: res = (dec_inp,) + res
return res
def show_results(self, ds_idx=1, dl=None, max_n=9, shuffle=True, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffle=shuffle)
b = dl.one_batch()
_,_,preds = self.get_preds(dl=[b], with_decoded=True)
self.dls.show_results(b, preds, max_n=max_n, **kwargs)
def show_training_loop(self):
indent = 0
for s in _loop:
if s.startswith('Start'): print(f'{' '*indent}{s}'); indent += 2
elif s.startswith('End'): indent -= 2; print(f'{' '*indent}{s}')
else: print(f'{' '*indent} - {s:15}:', self.ordered_cbs(s))
@contextmanager
def no_logging(self): return replacing_yield(self, 'logger', noop)
@contextmanager
def no_mbar(self): return replacing_yield(self, 'create_mbar', False)
@contextmanager
def loss_not_reduced(self):
if hasattr(self.loss_func, 'reduction'): return replacing_yield(self.loss_func, 'reduction', 'none')
else: return replacing_yield(self, 'loss_func', partial(self.loss_func, reduction='none'))
def save(self, file, with_opt=True):
if rank_distrib(): return # don't save if slave proc
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
save_model(file, self.model, getattr(self,'opt',None), with_opt)
def load(self, file, with_opt=None, device=None, strict=True):
if device is None: device = self.dls.device
if self.opt is None: self.create_opt()
distrib_barrier()
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
load_model(file, self.model, self.opt, with_opt=with_opt, device=device, strict=strict)
return self
Learner.x,Learner.y = add_props(lambda i,x: detuplify((x.xb,x.yb)[i]))
# Cell
add_docs(Learner, "Group together a `model`, some `dls` and a `loss_func` to handle training",
add_cbs="Add `cbs` to the list of `Callback` and register `self` as their learner",
add_cb="Add `cb` to the list of `Callback` and register `self` as their learner",
remove_cbs="Remove `cbs` from the list of `Callback` and deregister `self` as their learner",
remove_cb="Add `cb` from the list of `Callback` and deregister `self` as their learner",
added_cbs="Context manage that temporarily adds `cbs`",
ordered_cbs="Return a list of `Callback` for one step `cb_func` in the training loop",
create_opt="Create an optimizer with `lr`",
one_batch="Train or evaluate `self.model` on batch `(xb,yb)`",
all_batches="Train or evaluate `self.model` on all batches of `self.dl`",
fit="Fit `self.model` for `n_epoch` using `cbs`. Optionally `reset_opt`.",
validate="Validate on `dl` with potential new `cbs`.",
get_preds="Get the predictions and targets on the `ds_idx`-th dbunchset or `dl`, optionally `with_input` and `with_loss`",
predict="Return the prediction on `item`, fully decoded, loss function decoded and probabilities",
show_results="Show some predictions on `ds_idx`-th dbunchset or `dl`",
show_training_loop="Show each step in the training loop",
no_logging="Context manager to temporarily remove `logger`",
no_mbar="Context manager to temporarily prevent the master progress bar from being created",
loss_not_reduced="A context manager to evaluate `loss_func` with reduction set to none.",
save="Save model and optimizer state (if `with_opt`) to `self.path/self.model_dir/file`",
load="Load model and optimizer state (if `with_opt`) from `self.path/self.model_dir/file` using `device`"
)
# Cell
class VerboseCallback(Callback):
"Callback that prints the name of each event called"
def __call__(self, event_name):
print(event_name)
super().__call__(event_name)
# Cell
@docs
class Metric():
"Blueprint for defining a metric"
def reset(self): pass
def accumulate(self, learn): pass
@property
def value(self): raise NotImplementedError
@property
def name(self): return class2attr(self, 'Metric')
_docs = dict(
reset="Reset inner state to prepare for new computation",
name="Name of the `Metric`, camel-cased and with Metric removed",
accumulate="Use `learn` to update the state with new results",
value="The value of the metric")
# Cell
def _maybe_reduce(val):
if num_distrib()>1:
val = val.clone()
torch.distributed.all_reduce(val, op=torch.distributed.ReduceOp.SUM)
val /= num_distrib()
return val
# Cell
class AvgMetric(Metric):
"Average the values of `func` taking into account potential different batch sizes"
def __init__(self, func): self.func = func
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += to_detach(self.func(learn.pred, *learn.yb))*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
# Cell
class AvgLoss(Metric):
"Average the losses taking into account potential different batch sizes"
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += to_detach(learn.loss.mean())*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return "loss"
# Cell
class AvgSmoothLoss(Metric):
"Smooth average of the losses (exponentially weighted with `beta`)"
def __init__(self, beta=0.98): self.beta = beta
def reset(self): self.count,self.val = 0,tensor(0.)
def accumulate(self, learn):
self.count += 1
self.val = torch.lerp(to_detach(learn.loss.mean(), gather=False), self.val, self.beta)
@property
def value(self): return self.val/(1-self.beta**self.count)
# Cell
from fastprogress.fastprogress import format_time
def _maybe_item(t):
t = t.value
return t.item() if isinstance(t, Tensor) and t.numel()==1 else t
# Cell
class Recorder(Callback):
"Callback that registers statistics (lr, loss and metrics) during training"
run_after = TrainEvalCallback
def __init__(self, add_time=True, train_metrics=False, valid_metrics=True, beta=0.98):
store_attr(self, 'add_time,train_metrics,valid_metrics')
self.loss,self.smooth_loss = AvgLoss(),AvgSmoothLoss(beta=beta)
def begin_fit(self):
"Prepare state for training"
self.lrs,self.iters,self.losses,self.values = [],[],[],[]
names = self.metrics.attrgot('name')
if self.train_metrics and self.valid_metrics:
names = L('loss') + names
names = names.map('train_{}') + names.map('valid_{}')
elif self.valid_metrics: names = L('train_loss', 'valid_loss') + names
else: names = L('train_loss') + names
if self.add_time: names.append('time')
self.metric_names = 'epoch'+names
self.smooth_loss.reset()
def after_batch(self):
"Update all metrics and records lr and smooth loss in training"
if len(self.yb) == 0: return
mets = self._train_mets if self.training else self._valid_mets
for met in mets: met.accumulate(self.learn)
if not self.training: return
self.lrs.append(self.opt.hypers[-1]['lr'])
self.losses.append(self.smooth_loss.value)
self.learn.smooth_loss = self.smooth_loss.value
def begin_epoch(self):
"Set timer if `self.add_time=True`"
self.cancel_train,self.cancel_valid = False,False
if self.add_time: self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0))
def begin_train (self): self._train_mets[1:].map(Self.reset())
def begin_validate(self): self._valid_mets.map(Self.reset())
def after_train (self): self.log += self._train_mets.map(_maybe_item)
def after_validate(self): self.log += self._valid_mets.map(_maybe_item)
def after_cancel_train(self): self.cancel_train = True
def after_cancel_validate(self): self.cancel_valid = True
def after_epoch(self):
"Store and log the loss/metric values"
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time: self.log.append(format_time(time.time() - self.start_epoch))
self.logger(self.log)
self.iters.append(self.smooth_loss.count)
@property
def _train_mets(self):
if getattr(self, 'cancel_train', False): return L()
return L(self.smooth_loss) + (self.metrics if self.train_metrics else L())
@property
def _valid_mets(self):
if getattr(self, 'cancel_valid', False): return L()
return (L(self.loss) + self.metrics if self.valid_metrics else L())
def plot_loss(self, skip_start=5, with_valid=True):
plt.plot(list(range(skip_start, len(self.losses))), self.losses[skip_start:], label='train')
if with_valid:
idx = (np.array(self.iters)<skip_start).sum()
plt.plot(self.iters[idx:], L(self.values[idx:]).itemgot(1), label='valid')
plt.legend()
# Cell
add_docs(Recorder,
begin_train = "Reset loss and metrics state",
after_train = "Log loss and metric values on the training set (if `self.training_metrics=True`)",
begin_validate = "Reset loss and metrics state",
after_validate = "Log loss and metric values on the validation set",
after_cancel_train = "Ignore training metrics for this epoch",
after_cancel_validate = "Ignore validation metrics for this epoch",
plot_loss = "Plot the losses from `skip_start` and onward")
defaults.callbacks = [TrainEvalCallback, Recorder]
# Cell
class FetchPreds(Callback):
"A callback to fetch predictions during the training loop"
def __init__(self, ds_idx=1, dl=None, with_input=False, with_decoded=False):
store_attr(self, 'ds_idx,dl,with_input,with_decoded')
def after_validate(self):
learn,rec = self.learn,self.learn.recorder
learn.remove_cbs([self,rec])
self.preds = learn.get_preds(ds_idx=self.ds_idx, dl=self.dl,
with_input=self.with_input, with_decoded=self.with_decoded, inner=True)
learn.add_cbs([self, rec])
# Cell
@patch
def freeze_to(self:Learner, n):
if self.opt is None: self.create_opt()
self.opt.freeze_to(n)
self.opt.clear_state()
@patch
def freeze(self:Learner): self.freeze_to(-1)
@patch
def unfreeze(self:Learner): self.freeze_to(0)
add_docs(Learner,
freeze_to="Freeze parameter groups up to `n`",
freeze="Freeze up to last parameter group",
unfreeze="Unfreeze the entire model")
# Cell
@patch
def export(self:Learner, fname='export.pkl'):
"Export the content of `self` without the items and the optimizer state for inference"
if rank_distrib(): return # don't export if slave proc
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict()
self.opt = None
with warnings.catch_warnings():
#To avoid the warning that come from PyTorch about model not being checked
warnings.simplefilter("ignore")
torch.save(self, self.path/fname)
self.create_opt()
self.opt.load_state_dict(state)
self.dls = old_dbunch
# Cell
def load_learner(fname, cpu=True):
"Load a `Learner` object in `fname`, optionally putting it on the `cpu`"
res = torch.load(fname, map_location='cpu' if cpu else None)
if hasattr(res, 'to_fp32'): res = res.to_fp32()
if cpu: res.dls.cpu()
return res
# Cell
@patch
def tta(self:Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
"Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation"
if dl is None: dl = self.dls[ds_idx]
if item_tfms is not None or batch_tfms is not None: dl = dl.new(after_item=item_tfms, after_batch=batch_tfms)
with dl.dataset.set_split_idx(0), self.no_mbar():
if hasattr(self,'progress'): self.progress.mbar = master_bar(list(range(n)))
aug_preds = []
for i in self.progress.mbar if hasattr(self,'progress') else range(n):
self.epoch = i #To keep track of progress on mbar since the progress callback will use self.epoch
aug_preds.append(self.get_preds(ds_idx, inner=True)[0][None])
aug_preds = torch.cat(aug_preds)
aug_preds = aug_preds.max(0)[0] if use_max else aug_preds.mean(0)
self.epoch = n
with dl.dataset.set_split_idx(1): preds,targs = self.get_preds(ds_idx, inner=True)
if use_max: return torch.stack([preds, aug_preds], 0).max(0)[0],targs
preds = (aug_preds,preds) if beta is None else torch.lerp(aug_preds, preds, beta)
return preds,targs | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/13a_learner.ipynb (unless otherwise specified).
__all__ = ['CancelFitException', 'CancelEpochException', 'CancelTrainException', 'CancelValidException',
'CancelBatchException', 'replacing_yield', 'mk_metric', 'save_model', 'load_model', 'Learner',
'VerboseCallback', 'Metric', 'AvgMetric', 'AvgLoss', 'AvgSmoothLoss', 'Recorder', 'FetchPreds',
'load_learner']
# Cell
from .data.all import *
from .optimizer import *
from .callback.core import *
# Cell
defaults.lr = 1e-3
# Cell
def replacing_yield(o, attr, val):
"Context manager to temporarily replace an attribute"
old = getattr(o,attr)
try: yield setattr(o,attr,val)
finally: setattr(o,attr,old)
# Cell
def mk_metric(m):
"Convert `m` to an `AvgMetric`, unless it's already a `Metric`"
return m if isinstance(m, Metric) else AvgMetric(m)
# Cell
def save_model(file, model, opt, with_opt=True):
"Save `model` to `file` along with `opt` (if available, and if `with_opt`)"
if opt is None: with_opt=False
state = get_model(model).state_dict()
if with_opt: state = {'model': state, 'opt':opt.state_dict()}
torch.save(state, file)
# Cell
def load_model(file, model, opt, with_opt=None, device=None, strict=True):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
if isinstance(device, int): device = torch.device('cuda', device)
elif device is None: device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = set(state)=={'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(model_state, strict=strict)
if hasopt and ifnone(with_opt,True):
try: opt.load_state_dict(state['opt'])
except:
if with_opt: warn("Could not load the optimizer state.")
elif with_opt: warn("Saved filed doesn't contain an optimizer state.")
# Cell
def _try_concat(o):
try: return torch.cat(o)
except: return sum([L(o_[i,:] for i in range_of(o_)) for o_ in o], L())
# Cell
from contextlib import ExitStack
# Cell
_before_epoch = [event.begin_fit, event.begin_epoch]
_after_epoch = [event.after_epoch, event.after_fit]
# Cell
class Learner():
def __init__(self, dls, model, loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=trainable_params, cbs=None,
metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True,
moms=(0.95,0.85,0.95)):
store_attr(self, "dls,model,opt_func,lr,splitter,model_dir,wd,wd_bn_bias,train_bn,metrics,moms")
self.training,self.create_mbar,self.logger,self.opt,self.cbs = False,True,print,None,L()
if loss_func is None:
loss_func = getattr(dls.train_ds, 'loss_func', None)
assert loss_func is not None, "Could not infer loss function from the data, please pass a loss function."
self.loss_func = loss_func
self.path = path if path is not None else getattr(dls, 'path', Path('.'))
self.add_cbs([(cb() if isinstance(cb, type) else cb) for cb in L(defaults.callbacks)+L(cbs)])
self.model.to(self.dls.device)
if hasattr(self.model, 'reset'): self.model.reset()
self.epoch,self.n_epoch,self.loss = 0,1,tensor(0.)
@property
def metrics(self): return self._metrics
@metrics.setter
def metrics(self,v): self._metrics = L(v).map(mk_metric)
def add_cbs(self, cbs): L(cbs).map(self.add_cb)
def remove_cbs(self, cbs): L(cbs).map(self.remove_cb)
def add_cb(self, cb):
old = getattr(self, cb.name, None)
assert not old or isinstance(old, type(cb)), f"self.{cb.name} already registered"
cb.learn = self
setattr(self, cb.name, cb)
self.cbs.append(cb)
return self
def remove_cb(self, cb):
cb.learn = None
if hasattr(self, cb.name): delattr(self, cb.name)
if cb in self.cbs: self.cbs.remove(cb)
@contextmanager
def added_cbs(self, cbs):
self.add_cbs(cbs)
yield
self.remove_cbs(cbs)
def ordered_cbs(self, cb_func): return [cb for cb in sort_by_run(self.cbs) if hasattr(cb, cb_func)]
def __call__(self, event_name): L(event_name).map(self._call_one)
def _call_one(self, event_name):
assert hasattr(event, event_name)
[cb(event_name) for cb in sort_by_run(self.cbs)]
def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
def create_opt(self):
self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
if not self.wd_bn_bias:
for p in self._bn_bias_state(True ): p['do_wd'] = False
if self.train_bn:
for p in self._bn_bias_state(False): p['force_train'] = True
def _split(self, b):
i = getattr(self.dls, 'n_inp', 1 if len(b)==1 else len(b)-1)
self.xb,self.yb = b[:i],b[i:]
def all_batches(self):
self.n_iter = len(self.dl)
for o in enumerate(self.dl): self.one_batch(*o)
def one_batch(self, i, b):
self.iter = i
try:
self._split(b); self('begin_batch')
self.pred = self.model(*self.xb); self('after_pred')
if len(self.yb) == 0: return
self.loss = self.loss_func(self.pred, *self.yb); self('after_loss')
if not self.training: return
self.loss.backward(); self('after_backward')
self.opt.step(); self('after_step')
self.opt.zero_grad()
except CancelBatchException: self('after_cancel_batch')
finally: self('after_batch')
def _do_begin_fit(self, n_epoch):
self.n_epoch,self.loss = n_epoch,tensor(0.); self('begin_fit')
def _do_epoch_train(self):
try:
self.dl = self.dls.train; self('begin_train')
self.all_batches()
except CancelTrainException: self('after_cancel_train')
finally: self('after_train')
def _do_epoch_validate(self, ds_idx=1, dl=None):
if dl is None: dl = self.dls[ds_idx]
names = ['shuffle', 'drop_last']
try:
dl,old,has = change_attrs(dl, names, [False,False])
self.dl = dl; self('begin_validate')
with torch.no_grad(): self.all_batches()
except CancelValidException: self('after_cancel_validate')
finally:
dl,*_ = change_attrs(dl, names, old, has); self('after_validate')
def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):
with self.added_cbs(cbs):
if reset_opt or not self.opt: self.create_opt()
if wd is None: wd = self.wd
if wd is not None: self.opt.set_hypers(wd=wd)
self.opt.set_hypers(lr=self.lr if lr is None else lr)
try:
self._do_begin_fit(n_epoch)
for epoch in range(n_epoch):
try:
self.epoch=epoch; self('begin_epoch')
self._do_epoch_train()
self._do_epoch_validate()
except CancelEpochException: self('after_cancel_epoch')
finally: self('after_epoch')
except CancelFitException: self('after_cancel_fit')
finally: self('after_fit')
def validate(self, ds_idx=1, dl=None, cbs=None):
if dl is None: dl = self.dls[ds_idx]
with self.added_cbs(cbs), self.no_logging(), self.no_mbar():
self(_before_epoch)
self._do_epoch_validate(ds_idx, dl)
self(_after_epoch)
return getattr(self, 'final_record', None)
@delegates(GatherPredsCallback.__init__)
def get_preds(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, with_loss=False, act=None,
inner=False, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffled=False, drop_last=False)
cb = GatherPredsCallback(with_input=with_input, with_loss=with_loss, **kwargs)
#with self.no_logging(), self.added_cbs(cb), self.loss_not_reduced(), self.no_mbar():
ctx_mgrs = [self.no_logging(), self.added_cbs(cb), self.no_mbar()]
if with_loss: ctx_mgrs.append(self.loss_not_reduced())
with ExitStack() as stack:
for mgr in ctx_mgrs: stack.enter_context(mgr)
self(event.begin_epoch if inner else _before_epoch)
self._do_epoch_validate(dl=dl)
self(event.after_epoch if inner else _after_epoch)
if act is None: act = getattr(self.loss_func, 'activation', noop)
res = cb.all_tensors()
pred_i = 1 if with_input else 0
if res[pred_i] is not None:
res[pred_i] = act(res[pred_i])
if with_decoded: res.insert(pred_i+2, getattr(self.loss_func, 'decodes', noop)(res[pred_i]))
return tuple(res)
def predict(self, item, rm_type_tfms=None, with_input=False):
dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms)
inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
dec = self.dls.decode_batch((*tuplify(inp),*tuplify(dec_preds)))[0]
i = getattr(self.dls, 'n_inp', -1)
dec_inp,dec_targ = map(detuplify, [dec[:i],dec[i:]])
res = dec_targ,dec_preds[0],preds[0]
if with_input: res = (dec_inp,) + res
return res
def show_results(self, ds_idx=1, dl=None, max_n=9, shuffle=True, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffle=shuffle)
b = dl.one_batch()
_,_,preds = self.get_preds(dl=[b], with_decoded=True)
self.dls.show_results(b, preds, max_n=max_n, **kwargs)
def show_training_loop(self):
indent = 0
for s in _loop:
if s.startswith('Start'): print(f'{" "*indent}{s}'); indent += 2
elif s.startswith('End'): indent -= 2; print(f'{" "*indent}{s}')
else: print(f'{" "*indent} - {s:15}:', self.ordered_cbs(s))
@contextmanager
def no_logging(self): return replacing_yield(self, 'logger', noop)
@contextmanager
def no_mbar(self): return replacing_yield(self, 'create_mbar', False)
@contextmanager
def loss_not_reduced(self):
if hasattr(self.loss_func, 'reduction'): return replacing_yield(self.loss_func, 'reduction', 'none')
else: return replacing_yield(self, 'loss_func', partial(self.loss_func, reduction='none'))
def save(self, file, with_opt=True):
if rank_distrib(): return # don't save if slave proc
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
save_model(file, self.model, getattr(self,'opt',None), with_opt)
def load(self, file, with_opt=None, device=None, strict=True):
if device is None: device = self.dls.device
if self.opt is None: self.create_opt()
distrib_barrier()
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
load_model(file, self.model, self.opt, with_opt=with_opt, device=device, strict=strict)
return self
Learner.x,Learner.y = add_props(lambda i,x: detuplify((x.xb,x.yb)[i]))
# Cell
add_docs(Learner, "Group together a `model`, some `dls` and a `loss_func` to handle training",
add_cbs="Add `cbs` to the list of `Callback` and register `self` as their learner",
add_cb="Add `cb` to the list of `Callback` and register `self` as their learner",
remove_cbs="Remove `cbs` from the list of `Callback` and deregister `self` as their learner",
remove_cb="Add `cb` from the list of `Callback` and deregister `self` as their learner",
added_cbs="Context manage that temporarily adds `cbs`",
ordered_cbs="Return a list of `Callback` for one step `cb_func` in the training loop",
create_opt="Create an optimizer with `lr`",
one_batch="Train or evaluate `self.model` on batch `(xb,yb)`",
all_batches="Train or evaluate `self.model` on all batches of `self.dl`",
fit="Fit `self.model` for `n_epoch` using `cbs`. Optionally `reset_opt`.",
validate="Validate on `dl` with potential new `cbs`.",
get_preds="Get the predictions and targets on the `ds_idx`-th dbunchset or `dl`, optionally `with_input` and `with_loss`",
predict="Return the prediction on `item`, fully decoded, loss function decoded and probabilities",
show_results="Show some predictions on `ds_idx`-th dbunchset or `dl`",
show_training_loop="Show each step in the training loop",
no_logging="Context manager to temporarily remove `logger`",
no_mbar="Context manager to temporarily prevent the master progress bar from being created",
loss_not_reduced="A context manager to evaluate `loss_func` with reduction set to none.",
save="Save model and optimizer state (if `with_opt`) to `self.path/self.model_dir/file`",
load="Load model and optimizer state (if `with_opt`) from `self.path/self.model_dir/file` using `device`"
)
# Cell
class VerboseCallback(Callback):
"Callback that prints the name of each event called"
def __call__(self, event_name):
print(event_name)
super().__call__(event_name)
# Cell
@docs
class Metric():
"Blueprint for defining a metric"
def reset(self): pass
def accumulate(self, learn): pass
@property
def value(self): raise NotImplementedError
@property
def name(self): return class2attr(self, 'Metric')
_docs = dict(
reset="Reset inner state to prepare for new computation",
name="Name of the `Metric`, camel-cased and with Metric removed",
accumulate="Use `learn` to update the state with new results",
value="The value of the metric")
# Cell
def _maybe_reduce(val):
if num_distrib()>1:
val = val.clone()
torch.distributed.all_reduce(val, op=torch.distributed.ReduceOp.SUM)
val /= num_distrib()
return val
# Cell
class AvgMetric(Metric):
"Average the values of `func` taking into account potential different batch sizes"
def __init__(self, func): self.func = func
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += to_detach(self.func(learn.pred, *learn.yb))*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
# Cell
class AvgLoss(Metric):
"Average the losses taking into account potential different batch sizes"
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += to_detach(learn.loss.mean())*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return "loss"
# Cell
class AvgSmoothLoss(Metric):
"Smooth average of the losses (exponentially weighted with `beta`)"
def __init__(self, beta=0.98): self.beta = beta
def reset(self): self.count,self.val = 0,tensor(0.)
def accumulate(self, learn):
self.count += 1
self.val = torch.lerp(to_detach(learn.loss.mean(), gather=False), self.val, self.beta)
@property
def value(self): return self.val/(1-self.beta**self.count)
# Cell
from fastprogress.fastprogress import format_time
def _maybe_item(t):
t = t.value
return t.item() if isinstance(t, Tensor) and t.numel()==1 else t
# Cell
class Recorder(Callback):
"Callback that registers statistics (lr, loss and metrics) during training"
run_after = TrainEvalCallback
def __init__(self, add_time=True, train_metrics=False, valid_metrics=True, beta=0.98):
store_attr(self, 'add_time,train_metrics,valid_metrics')
self.loss,self.smooth_loss = AvgLoss(),AvgSmoothLoss(beta=beta)
def begin_fit(self):
"Prepare state for training"
self.lrs,self.iters,self.losses,self.values = [],[],[],[]
names = self.metrics.attrgot('name')
if self.train_metrics and self.valid_metrics:
names = L('loss') + names
names = names.map('train_{}') + names.map('valid_{}')
elif self.valid_metrics: names = L('train_loss', 'valid_loss') + names
else: names = L('train_loss') + names
if self.add_time: names.append('time')
self.metric_names = 'epoch'+names
self.smooth_loss.reset()
def after_batch(self):
"Update all metrics and records lr and smooth loss in training"
if len(self.yb) == 0: return
mets = self._train_mets if self.training else self._valid_mets
for met in mets: met.accumulate(self.learn)
if not self.training: return
self.lrs.append(self.opt.hypers[-1]['lr'])
self.losses.append(self.smooth_loss.value)
self.learn.smooth_loss = self.smooth_loss.value
def begin_epoch(self):
"Set timer if `self.add_time=True`"
self.cancel_train,self.cancel_valid = False,False
if self.add_time: self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0))
def begin_train (self): self._train_mets[1:].map(Self.reset())
def begin_validate(self): self._valid_mets.map(Self.reset())
def after_train (self): self.log += self._train_mets.map(_maybe_item)
def after_validate(self): self.log += self._valid_mets.map(_maybe_item)
def after_cancel_train(self): self.cancel_train = True
def after_cancel_validate(self): self.cancel_valid = True
def after_epoch(self):
"Store and log the loss/metric values"
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time: self.log.append(format_time(time.time() - self.start_epoch))
self.logger(self.log)
self.iters.append(self.smooth_loss.count)
@property
def _train_mets(self):
if getattr(self, 'cancel_train', False): return L()
return L(self.smooth_loss) + (self.metrics if self.train_metrics else L())
@property
def _valid_mets(self):
if getattr(self, 'cancel_valid', False): return L()
return (L(self.loss) + self.metrics if self.valid_metrics else L())
def plot_loss(self, skip_start=5, with_valid=True):
plt.plot(list(range(skip_start, len(self.losses))), self.losses[skip_start:], label='train')
if with_valid:
idx = (np.array(self.iters)<skip_start).sum()
plt.plot(self.iters[idx:], L(self.values[idx:]).itemgot(1), label='valid')
plt.legend()
# Cell
add_docs(Recorder,
begin_train = "Reset loss and metrics state",
after_train = "Log loss and metric values on the training set (if `self.training_metrics=True`)",
begin_validate = "Reset loss and metrics state",
after_validate = "Log loss and metric values on the validation set",
after_cancel_train = "Ignore training metrics for this epoch",
after_cancel_validate = "Ignore validation metrics for this epoch",
plot_loss = "Plot the losses from `skip_start` and onward")
defaults.callbacks = [TrainEvalCallback, Recorder]
# Cell
class FetchPreds(Callback):
"A callback to fetch predictions during the training loop"
def __init__(self, ds_idx=1, dl=None, with_input=False, with_decoded=False):
store_attr(self, 'ds_idx,dl,with_input,with_decoded')
def after_validate(self):
learn,rec = self.learn,self.learn.recorder
learn.remove_cbs([self,rec])
self.preds = learn.get_preds(ds_idx=self.ds_idx, dl=self.dl,
with_input=self.with_input, with_decoded=self.with_decoded, inner=True)
learn.add_cbs([self, rec])
# Cell
@patch
def freeze_to(self:Learner, n):
if self.opt is None: self.create_opt()
self.opt.freeze_to(n)
self.opt.clear_state()
@patch
def freeze(self:Learner): self.freeze_to(-1)
@patch
def unfreeze(self:Learner): self.freeze_to(0)
add_docs(Learner,
freeze_to="Freeze parameter groups up to `n`",
freeze="Freeze up to last parameter group",
unfreeze="Unfreeze the entire model")
# Cell
@patch
def export(self:Learner, fname='export.pkl'):
"Export the content of `self` without the items and the optimizer state for inference"
if rank_distrib(): return # don't export if slave proc
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict()
self.opt = None
with warnings.catch_warnings():
#To avoid the warning that come from PyTorch about model not being checked
warnings.simplefilter("ignore")
torch.save(self, self.path/fname)
self.create_opt()
self.opt.load_state_dict(state)
self.dls = old_dbunch
# Cell
def load_learner(fname, cpu=True):
"Load a `Learner` object in `fname`, optionally putting it on the `cpu`"
res = torch.load(fname, map_location='cpu' if cpu else None)
if hasattr(res, 'to_fp32'): res = res.to_fp32()
if cpu: res.dls.cpu()
return res
# Cell
@patch
def tta(self:Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
"Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation"
if dl is None: dl = self.dls[ds_idx]
if item_tfms is not None or batch_tfms is not None: dl = dl.new(after_item=item_tfms, after_batch=batch_tfms)
with dl.dataset.set_split_idx(0), self.no_mbar():
if hasattr(self,'progress'): self.progress.mbar = master_bar(list(range(n)))
aug_preds = []
for i in self.progress.mbar if hasattr(self,'progress') else range(n):
self.epoch = i #To keep track of progress on mbar since the progress callback will use self.epoch
aug_preds.append(self.get_preds(ds_idx, inner=True)[0][None])
aug_preds = torch.cat(aug_preds)
aug_preds = aug_preds.max(0)[0] if use_max else aug_preds.mean(0)
self.epoch = n
with dl.dataset.set_split_idx(1): preds,targs = self.get_preds(ds_idx, inner=True)
if use_max: return torch.stack([preds, aug_preds], 0).max(0)[0],targs
preds = (aug_preds,preds) if beta is None else torch.lerp(aug_preds, preds, beta)
return preds,targs |
##
## © Copyright 2021- IBM Inc. All rights reserved
# SPDX-License-Identifier: MIT
##
#
# This code has two use-cases:
# 1. Where you want to run a batch of queries, with each saving results in CSV - specify the TestId this is added to make filename))
# 2. When you want to run a series of queries and check the data retrieved is the same as the last time your ran it
# i.e. as a regression tester - with the queries specified in the spreadsheet and fully cached data used as a mock
# server you don't even need to have the server attached
#
# This is a simple regression tester which compares current results with saved results from a previous run on the same server.
# The server doesn't always have to be attached to re-run tests because the test mode uses 'forever' http caching to save everything.
# Although if the OSLC query causes a request for a previously-unfetched response from the server then things will of course fail.
# But that doesn't seem too unreasonable, as everything gets 'forever' cached including login response :-)
#
import argparse
import re
import sys
import time
import openpyxl as XL
import elmclient.examples.oslcquery as querymain
# this maps important column headings to commandline option+value - these columns must all be present in the worksheet - if the value in a cell is None then no prefix is put in the commandline
# if a heading isn't in this list it is ignored
# the sequence of these determines the sequence they appear in the oslc query commandline
# (otherwise they would have been in alphabetical order)
xlstoargs={
'Appstring': '-A'
,'Project': '-p'
,'Component': '-C'
,'Configuration': '-F'
,'GC Config': '-G'
,'GC Project': '-E'
,'ResourceType': '-r'
,'Query': '-q'
,'Select': '-s'
,'Searchterms': '-f'
,"Orderby": '-o'
,'Null': '-n'
,'Value': '-v'
,'Value1': '-v'
,'OutputFile': '-O'
,'TypeSystemReport': '--typesystemreport'
,'Creds0': '-0'
,'Creds1': '-1'
,'Creds2': '-2'
,'Creds3': '-3'
,'Creds4': '-4'
,'NResults': '--nresults'
,'User': '-U'
,'Password': '-P'
,'JazzURL': '-J'
,'Logging': '-L'
}
# turn a list of options into a Windows cmd-style quoted string (means this works on Windows only!)
# option strings are NOT already wrapped in quotes!
# first it doubles " in the string, then if space or " in the string it is wrapped in " "
def argstocmd(args):
newargs = []
for arg in args:
if '"' in arg:
arg = arg.replace( '"','""')
if ' ' in arg or '"' in arg:
arg = f'"{arg}"'
newargs.append(arg)
return " ".join(newargs)
def do_tests(inputargs=None):
inputargs = inputargs or sys.argv[1:]
# setup argparse
parser = argparse.ArgumentParser(description="Perform OSLC query on a Jazz application, with results output to CSV (and other) formats - use -h to get some basic help")
parser.add_argument('spreadsheet', help='Name of the xlsx spreadsheet with tests')
parser.add_argument('-d', '--dryrun', action="store_true", help="Dry run - show commandline but don't run the OSLC Query")
parser.add_argument('-f', '--stoponfail', action="store_true", help="Stop at first failure")
parser.add_argument('-g', '--group', default=None, help="Comma-separated list of regex pattern to match groups to be run, in the worksheet Group column")
parser.add_argument('-j', '--just', default=None, help="Comma-separated list of tests to run, matching the TestId column in the worksheet")
parser.add_argument('-L', '--loglevel', default=None, help="Set logging level - default is None - choose from INFO/DEBUG/ERROR")
parser.add_argument('-r', '--reps', default=1, type=int, help="Number of times to repeat the selected tests (must be >=1")
parser.add_argument('-s', '--save', action="store_true", help="UNFINISHED Retrieve and save query results forever (used to save reference for -t testing")
parser.add_argument('-t', '--test', action="store_true", help="UNFINISHED Retrieve data and do comparison to test that results match saved results from -s")
parser.add_argument('-w', '--sheetname', default=None, help='Name of the worksheet with tests (if not specified the workbook must only have one worksheet, which is used)')
parser.add_argument('-W', '--cachecontrol', action='count', default=0, help="Used once -W erases cache for the first test then continues with caching enabled. Used twice -WW wipes cache and disables caching.")
args = parser.parse_args(inputargs)
if args.reps<1:
raise Exception( f"Reps must be >=1" )
justtests = [j.strip() for j in args.just.split(",")] if args.just else []
wb = XL.load_workbook(filename=args.spreadsheet,data_only=True)
wss=wb.sheetnames
if args.sheetname:
tests = wb[args.sheetname]
else:
if len( wss ) > 1:
raise Exception( "Worksheet not specified but spreadsheet file includes more than one sheet!" )
print( f"Using worksheet {wss[0]}" )
tests = wb[wss[0]]
# first scan the headings on row 1 to get the column numbers for the columns we want to use
# turn the worksheet content into a list of dictionaries using the column headings as keys
colheadings = []
for col in range(1,50):
thiscolheading = tests.cell(column=col, row=1).value
# first empty heading terminates the table
if not thiscolheading:
break
colheadings.append(thiscolheading)
# now retrieve data to list of dictionaries, one per row
rows = []
for rownum in range(2, 2000):
row = {}
for i,col in enumerate(colheadings):
row[col]=tests.cell(column=i+1, row=rownum).value
rows.append(row)
wb.close()
# now go down the rows executing the specified test
npassed = 0
nfailed = 0
firstquery = True
for rep in range(args.reps):
for n,row in enumerate(rows):
testnumber = row['TestId']
if not testnumber:
continue
if row['Disable'] and row['Disable'].startswith('#'):
continue
if args.group:
if not row['Group']:
continue
rowgroups = [j.strip() for j in row['Group'].split(",")]
regexes = [j.strip() for j in args.group.split(",")]
if not any([re.match(regex,group) for regex in regexes for group in rowgroups]):
continue
if justtests and str(testnumber) not in justtests:
continue
print( f"=====================================================================\n{testnumber=} {row.get("Description","")}" )
exceptionexpected = True if row['ExceptionExpected'] else False
csvname = "test_"+str(testnumber)+".csv"
queryargs=[]
for k,v in xlstoargs.items():
if k not in colheadings:
raise Exception( f"Heading {k} not present in spreadsheet!" )
cellvalue=row[k]
if cellvalue is not None:
if v:
# if there's an option
cellvalue=str(row[k]).strip()
# check for options where the value starts with - - these have to be specified using -o=value
if cellvalue.startswith("-"):
# use -o=value
queryargs.append( f"{v}={cellvalue}" )
else:
# use -o value
queryargs.append(v)
queryargs.append(cellvalue)
else:
queryargs.append(str(cellvalue).strip())
if args.save:
queryargs.extend(['-0','-O',csvname])
if args.test:
queryargs.extend(['-0','-2',csvname])
if args.loglevel and "-L" not in queryargs:
queryargs.extend(['-L',args.loglevel])
# handle cache control passing on to oslcquery
if firstquery:
# if this is first query run and we have to wipe cache:
if args.cachecontrol==1:
queryargs.extend( [ "-W" ] )
elif args.cachecontrol==2:
queryargs.extend( [ "-WW" ] )
firstquery = False
elif args.cachecontrol==2:
queryargs.extend( [ "-WW" ] )
# run it
try:
if args.dryrun:
print( f"Dry-run: query commandline is: oslcquery {argstocmd(queryargs)}" )
result = 0
else:
print( f"Query commandline is: oslcquery {argstocmd(queryargs)}" )
result = querymain.do_oslc_query(queryargs)
exceptionhappened = False
except Exception as e:
print( e )
result = 1
exceptionhappened = True
# if not exceptionexpected:
# raise
if (result != 0 and not exceptionexpected) or (result == 0 and exceptionexpected):
nfailed += 1
print( f" TEST {testnumber} FAILED!!!!!!!!!!!!!!!!!!!!!\n" )
if args.stoponfail:
print( f"Stopping after first failure, {rep} repetitions" )
return
else:
print( f"Test {testnumber} passed!" )
npassed += 1
if not args.dryrun:
print( f"\nPassed {npassed} Failed {nfailed}" )
else:
print( f"Dry run completed" )
def main():
runstarttime = time.perf_counter()
do_tests(sys.argv[1:])
elapsedsecs = time.perf_counter() - runstarttime
print( f"Runtime was {int(elapsedsecs/60)}m {int(elapsedsecs%60):02d}s" )
if __name__ == '__main__':
main()
| ##
## © Copyright 2021- IBM Inc. All rights reserved
# SPDX-License-Identifier: MIT
##
#
# This code has two use-cases:
# 1. Where you want to run a batch of queries, with each saving results in CSV - specify the TestId this is added to make filename))
# 2. When you want to run a series of queries and check the data retrieved is the same as the last time your ran it
# i.e. as a regression tester - with the queries specified in the spreadsheet and fully cached data used as a mock
# server you don't even need to have the server attached
#
# This is a simple regression tester which compares current results with saved results from a previous run on the same server.
# The server doesn't always have to be attached to re-run tests because the test mode uses 'forever' http caching to save everything.
# Although if the OSLC query causes a request for a previously-unfetched response from the server then things will of course fail.
# But that doesn't seem too unreasonable, as everything gets 'forever' cached including login response :-)
#
import argparse
import re
import sys
import time
import openpyxl as XL
import elmclient.examples.oslcquery as querymain
# this maps important column headings to commandline option+value - these columns must all be present in the worksheet - if the value in a cell is None then no prefix is put in the commandline
# if a heading isn't in this list it is ignored
# the sequence of these determines the sequence they appear in the oslc query commandline
# (otherwise they would have been in alphabetical order)
xlstoargs={
'Appstring': '-A'
,'Project': '-p'
,'Component': '-C'
,'Configuration': '-F'
,'GC Config': '-G'
,'GC Project': '-E'
,'ResourceType': '-r'
,'Query': '-q'
,'Select': '-s'
,'Searchterms': '-f'
,"Orderby": '-o'
,'Null': '-n'
,'Value': '-v'
,'Value1': '-v'
,'OutputFile': '-O'
,'TypeSystemReport': '--typesystemreport'
,'Creds0': '-0'
,'Creds1': '-1'
,'Creds2': '-2'
,'Creds3': '-3'
,'Creds4': '-4'
,'NResults': '--nresults'
,'User': '-U'
,'Password': '-P'
,'JazzURL': '-J'
,'Logging': '-L'
}
# turn a list of options into a Windows cmd-style quoted string (means this works on Windows only!)
# option strings are NOT already wrapped in quotes!
# first it doubles " in the string, then if space or " in the string it is wrapped in " "
def argstocmd(args):
newargs = []
for arg in args:
if '"' in arg:
arg = arg.replace( '"','""')
if ' ' in arg or '"' in arg:
arg = f'"{arg}"'
newargs.append(arg)
return " ".join(newargs)
def do_tests(inputargs=None):
inputargs = inputargs or sys.argv[1:]
# setup argparse
parser = argparse.ArgumentParser(description="Perform OSLC query on a Jazz application, with results output to CSV (and other) formats - use -h to get some basic help")
parser.add_argument('spreadsheet', help='Name of the xlsx spreadsheet with tests')
parser.add_argument('-d', '--dryrun', action="store_true", help="Dry run - show commandline but don't run the OSLC Query")
parser.add_argument('-f', '--stoponfail', action="store_true", help="Stop at first failure")
parser.add_argument('-g', '--group', default=None, help="Comma-separated list of regex pattern to match groups to be run, in the worksheet Group column")
parser.add_argument('-j', '--just', default=None, help="Comma-separated list of tests to run, matching the TestId column in the worksheet")
parser.add_argument('-L', '--loglevel', default=None, help="Set logging level - default is None - choose from INFO/DEBUG/ERROR")
parser.add_argument('-r', '--reps', default=1, type=int, help="Number of times to repeat the selected tests (must be >=1")
parser.add_argument('-s', '--save', action="store_true", help="UNFINISHED Retrieve and save query results forever (used to save reference for -t testing")
parser.add_argument('-t', '--test', action="store_true", help="UNFINISHED Retrieve data and do comparison to test that results match saved results from -s")
parser.add_argument('-w', '--sheetname', default=None, help='Name of the worksheet with tests (if not specified the workbook must only have one worksheet, which is used)')
parser.add_argument('-W', '--cachecontrol', action='count', default=0, help="Used once -W erases cache for the first test then continues with caching enabled. Used twice -WW wipes cache and disables caching.")
args = parser.parse_args(inputargs)
if args.reps<1:
raise Exception( f"Reps must be >=1" )
justtests = [j.strip() for j in args.just.split(",")] if args.just else []
wb = XL.load_workbook(filename=args.spreadsheet,data_only=True)
wss=wb.sheetnames
if args.sheetname:
tests = wb[args.sheetname]
else:
if len( wss ) > 1:
raise Exception( "Worksheet not specified but spreadsheet file includes more than one sheet!" )
print( f"Using worksheet {wss[0]}" )
tests = wb[wss[0]]
# first scan the headings on row 1 to get the column numbers for the columns we want to use
# turn the worksheet content into a list of dictionaries using the column headings as keys
colheadings = []
for col in range(1,50):
thiscolheading = tests.cell(column=col, row=1).value
# first empty heading terminates the table
if not thiscolheading:
break
colheadings.append(thiscolheading)
# now retrieve data to list of dictionaries, one per row
rows = []
for rownum in range(2, 2000):
row = {}
for i,col in enumerate(colheadings):
row[col]=tests.cell(column=i+1, row=rownum).value
rows.append(row)
wb.close()
# now go down the rows executing the specified test
npassed = 0
nfailed = 0
firstquery = True
for rep in range(args.reps):
for n,row in enumerate(rows):
testnumber = row['TestId']
if not testnumber:
continue
if row['Disable'] and row['Disable'].startswith('#'):
continue
if args.group:
if not row['Group']:
continue
rowgroups = [j.strip() for j in row['Group'].split(",")]
regexes = [j.strip() for j in args.group.split(",")]
if not any([re.match(regex,group) for regex in regexes for group in rowgroups]):
continue
if justtests and str(testnumber) not in justtests:
continue
print( f"=====================================================================\n{testnumber=} {row.get('Description','')}" )
exceptionexpected = True if row['ExceptionExpected'] else False
csvname = "test_"+str(testnumber)+".csv"
queryargs=[]
for k,v in xlstoargs.items():
if k not in colheadings:
raise Exception( f"Heading {k} not present in spreadsheet!" )
cellvalue=row[k]
if cellvalue is not None:
if v:
# if there's an option
cellvalue=str(row[k]).strip()
# check for options where the value starts with - - these have to be specified using -o=value
if cellvalue.startswith("-"):
# use -o=value
queryargs.append( f"{v}={cellvalue}" )
else:
# use -o value
queryargs.append(v)
queryargs.append(cellvalue)
else:
queryargs.append(str(cellvalue).strip())
if args.save:
queryargs.extend(['-0','-O',csvname])
if args.test:
queryargs.extend(['-0','-2',csvname])
if args.loglevel and "-L" not in queryargs:
queryargs.extend(['-L',args.loglevel])
# handle cache control passing on to oslcquery
if firstquery:
# if this is first query run and we have to wipe cache:
if args.cachecontrol==1:
queryargs.extend( [ "-W" ] )
elif args.cachecontrol==2:
queryargs.extend( [ "-WW" ] )
firstquery = False
elif args.cachecontrol==2:
queryargs.extend( [ "-WW" ] )
# run it
try:
if args.dryrun:
print( f"Dry-run: query commandline is: oslcquery {argstocmd(queryargs)}" )
result = 0
else:
print( f"Query commandline is: oslcquery {argstocmd(queryargs)}" )
result = querymain.do_oslc_query(queryargs)
exceptionhappened = False
except Exception as e:
print( e )
result = 1
exceptionhappened = True
# if not exceptionexpected:
# raise
if (result != 0 and not exceptionexpected) or (result == 0 and exceptionexpected):
nfailed += 1
print( f" TEST {testnumber} FAILED!!!!!!!!!!!!!!!!!!!!!\n" )
if args.stoponfail:
print( f"Stopping after first failure, {rep} repetitions" )
return
else:
print( f"Test {testnumber} passed!" )
npassed += 1
if not args.dryrun:
print( f"\nPassed {npassed} Failed {nfailed}" )
else:
print( f"Dry run completed" )
def main():
runstarttime = time.perf_counter()
do_tests(sys.argv[1:])
elapsedsecs = time.perf_counter() - runstarttime
print( f"Runtime was {int(elapsedsecs/60)}m {int(elapsedsecs%60):02d}s" )
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# GenerateJSONOutput.py
#
# Command-line interface for turning output root files into JSON files for further processing.
#
# By: Larry Lee - Dec 2017
import argparse
import sys
import os
import ROOT
ROOT.gSystem.Load(f"{os.getenv("HISTFITTER")}/lib/libSusyFitter.so")
ROOT.gROOT.SetBatch()
parser = argparse.ArgumentParser()
parser.add_argument(
"--inputFiles",
"-i",
type=str,
nargs="+",
help="input ROOT files -- if you give me just the nominal, I'll try to find the theory variations and the upper limit automatically",
required=True,
)
parser.add_argument(
"--format",
"-f",
type=str,
help="format of object names",
default="hypo_SU_%f_%f_0_10",
)
parser.add_argument(
"--interpretation",
"-p",
type=str,
help="interpretation of object name",
default="m0:m12",
)
parser.add_argument(
"--addCoordinates",
"-a",
type=str,
help="add additional coordinates to json using a python dictionary {existing json value name and math : new value name}",
default='{"m0":"x","m12":"y"}',
)
parser.add_argument("--cut", "-c", type=str, help="cut string", default="1")
parser.add_argument(
"--noAddTabs",
"-n",
help="don't convert JSON to human readable file",
action="store_true",
default=False,
)
args = parser.parse_args()
# Print out the settings
for arg in vars(args):
user_input = getattr(args, arg)
print(f">>> ... Setting: {arg: >20} {str(user_input): >40}")
print("")
def main():
for filename in args.inputFiles:
processFile(filename)
if args.addCoordinates != "":
addCoordinates(filename, args.addCoordinates)
if "Nominal" in filename:
print(">>> Attempting to find theory variation files")
try:
newfilename = filename.replace("Nominal", "Up")
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("Nominal", "Up")
)
try:
newfilename = filename.replace("Nominal", "Down")
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("Nominal", "Down")
)
try:
newfilename = filename.replace(
"_fixSigXSecNominal_hypotest", "_upperlimit"
)
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("_fixSigXSecNominal_hypotest", "_upperlimit")
)
try:
newfilename = filename.replace("_Nominal", "_upperlimit")
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("_fixSigXSecNominal_hypotest", "_upperlimit")
)
if not args.noAddTabs:
cleanUpJSON()
print(">>>")
print(">>> Done!")
print(">>>")
return
def processFile(file):
print("")
if os.path.isfile(file):
ROOT.CollectAndWriteHypoTestResults(
file, args.format, args.interpretation, args.cut
)
else:
print(">>> ERROR: File does not exist: %s" % file)
sys.exit(1)
print("")
return
def cleanUpJSON():
import json
import glob
for file in glob.glob("./*json"):
print(">>> Making file human readable: %s" % file)
data = json.load(open(file))
with open(file, "w") as f:
f.write(json.dumps(data, indent=4))
return
def addCoordinates(fileName, coordString):
import json
import re
coordDict = json.loads(coordString)
jsonFileName = fileName.split("/")[-1] # grab just the filename
jsonFileName = jsonFileName.replace(".root", "__1_harvest_list.json")
data = json.load(open(jsonFileName))
for i, hypo_test in enumerate(data): # an entry is one hypo test result
for key in coordDict: # each item of the result
# parse input arguments, thanks to Larry for regex suggestions
total = eval(re.sub(r"\b([a-zA-Z]+[0-9]*)\b", r'hypo_test["\g<1>"]', key))
# assign new key to value
hypo_test[coordDict[key]] = total
with open(jsonFileName, "w") as f:
f.write(json.dumps(data))
if __name__ == "__main__":
main()
| #!/usr/bin/env python
# GenerateJSONOutput.py
#
# Command-line interface for turning output root files into JSON files for further processing.
#
# By: Larry Lee - Dec 2017
import argparse
import sys
import os
import ROOT
ROOT.gSystem.Load(f"{os.getenv('HISTFITTER')}/lib/libSusyFitter.so")
ROOT.gROOT.SetBatch()
parser = argparse.ArgumentParser()
parser.add_argument(
"--inputFiles",
"-i",
type=str,
nargs="+",
help="input ROOT files -- if you give me just the nominal, I'll try to find the theory variations and the upper limit automatically",
required=True,
)
parser.add_argument(
"--format",
"-f",
type=str,
help="format of object names",
default="hypo_SU_%f_%f_0_10",
)
parser.add_argument(
"--interpretation",
"-p",
type=str,
help="interpretation of object name",
default="m0:m12",
)
parser.add_argument(
"--addCoordinates",
"-a",
type=str,
help="add additional coordinates to json using a python dictionary {existing json value name and math : new value name}",
default='{"m0":"x","m12":"y"}',
)
parser.add_argument("--cut", "-c", type=str, help="cut string", default="1")
parser.add_argument(
"--noAddTabs",
"-n",
help="don't convert JSON to human readable file",
action="store_true",
default=False,
)
args = parser.parse_args()
# Print out the settings
for arg in vars(args):
user_input = getattr(args, arg)
print(f">>> ... Setting: {arg: >20} {str(user_input): >40}")
print("")
def main():
for filename in args.inputFiles:
processFile(filename)
if args.addCoordinates != "":
addCoordinates(filename, args.addCoordinates)
if "Nominal" in filename:
print(">>> Attempting to find theory variation files")
try:
newfilename = filename.replace("Nominal", "Up")
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("Nominal", "Up")
)
try:
newfilename = filename.replace("Nominal", "Down")
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("Nominal", "Down")
)
try:
newfilename = filename.replace(
"_fixSigXSecNominal_hypotest", "_upperlimit"
)
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("_fixSigXSecNominal_hypotest", "_upperlimit")
)
try:
newfilename = filename.replace("_Nominal", "_upperlimit")
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("_fixSigXSecNominal_hypotest", "_upperlimit")
)
if not args.noAddTabs:
cleanUpJSON()
print(">>>")
print(">>> Done!")
print(">>>")
return
def processFile(file):
print("")
if os.path.isfile(file):
ROOT.CollectAndWriteHypoTestResults(
file, args.format, args.interpretation, args.cut
)
else:
print(">>> ERROR: File does not exist: %s" % file)
sys.exit(1)
print("")
return
def cleanUpJSON():
import json
import glob
for file in glob.glob("./*json"):
print(">>> Making file human readable: %s" % file)
data = json.load(open(file))
with open(file, "w") as f:
f.write(json.dumps(data, indent=4))
return
def addCoordinates(fileName, coordString):
import json
import re
coordDict = json.loads(coordString)
jsonFileName = fileName.split("/")[-1] # grab just the filename
jsonFileName = jsonFileName.replace(".root", "__1_harvest_list.json")
data = json.load(open(jsonFileName))
for i, hypo_test in enumerate(data): # an entry is one hypo test result
for key in coordDict: # each item of the result
# parse input arguments, thanks to Larry for regex suggestions
total = eval(re.sub(r"\b([a-zA-Z]+[0-9]*)\b", r'hypo_test["\g<1>"]', key))
# assign new key to value
hypo_test[coordDict[key]] = total
with open(jsonFileName, "w") as f:
f.write(json.dumps(data))
if __name__ == "__main__":
main()
|
import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from model import Model
from test import validation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
""" model configuration """
if 'CTC' in opt.Prediction:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling, opt.Prediction)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
""" setup loss """
if 'CTC' in opt.Prediction:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
# loss averager
loss_avg = Averager()
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, model.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]
# setup optimizer
if opt.adam:
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
else:
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
start_time = time.time()
best_accuracy = -1
best_norm_ED = -1
iteration = start_iter
while(True):
# train part
image_tensors, labels = train_dataset.get_batch()
image = image_tensors.to(device)
text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)
batch_size = image.size(0)
if 'CTC' in opt.Prediction:
preds = model(image, text)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
preds = preds.log_softmax(2).permute(1, 0, 2)
cost = criterion(preds, text, preds_size, length)
else:
preds = model(image, text[:, :-1]) # align with Attention.forward
target = text[:, 1:] # without [GO] Symbol
cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))
model.zero_grad()
cost.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) # gradient clipping with 5 (Default)
optimizer.step()
loss_avg.add(cost)
# validation part
if (iteration + 1) % opt.valInterval == 0 or iteration == 0: # To see training progress, we also conduct validation when 'iteration == 0'
elapsed_time = time.time() - start_time
# for log
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
model.eval()
with torch.no_grad():
valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data = validation(
model, criterion, valid_loader, converter, opt)
model.train()
# training loss and validation loss
loss_log = f'[{iteration+1}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'
loss_avg.reset()
current_model_log = f'{'Current_accuracy':17s}: {current_accuracy:0.3f}, {'Current_norm_ED':17s}: {current_norm_ED:0.2f}'
# keep best accuracy model (on valid dataset)
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth')
if current_norm_ED > best_norm_ED:
best_norm_ED = current_norm_ED
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth')
best_model_log = f'{'Best_accuracy':17s}: {best_accuracy:0.3f}, {'Best_norm_ED':17s}: {best_norm_ED:0.2f}'
loss_model_log = f'{loss_log}\n{current_model_log}\n{best_model_log}'
print(loss_model_log)
log.write(loss_model_log + '\n')
# show some predicted results
dashed_line = '-' * 80
head = f'{'Ground Truth':25s} | {'Prediction':25s} | Confidence Score & T/F'
predicted_result_log = f'{dashed_line}\n{head}\n{dashed_line}\n'
for gt, pred, confidence in zip(labels[:5], preds[:5], confidence_score[:5]):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred = pred[:pred.find('[s]')]
predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\t{str(pred == gt)}\n'
predicted_result_log += f'{dashed_line}'
print(predicted_result_log)
log.write(predicted_result_log + '\n')
# save model per 1e+5 iter.
if (iteration + 1) % 1e+5 == 0:
torch.save(
model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
if (iteration + 1) == opt.num_iter:
print('end the training')
sys.exit()
iteration += 1
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = "2,3"
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', default="/path/to/your/lmdb/train", help='path to training dataset')
parser.add_argument('--valid_data', default="/path/to/your/lmdb/val", help='path to validation dataset')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', default=4, type=int, help='number of data loading workers')
parser.add_argument('--batch_size', default=64, type=int, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=500, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is Adadelta)')
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
""" Data processing """
parser.add_argument('--select_data', type=str, default='/',
help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
parser.add_argument('--batch_ratio', type=str, default='1',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, default="TPS", help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, default="ResNet", help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, default="BiLSTM", help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--Prediction', type=str, default="Attn", help='Prediction stage. CTC|Attn')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512, help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
| import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from model import Model
from test import validation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
""" model configuration """
if 'CTC' in opt.Prediction:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling, opt.Prediction)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
""" setup loss """
if 'CTC' in opt.Prediction:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
# loss averager
loss_avg = Averager()
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, model.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]
# setup optimizer
if opt.adam:
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
else:
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
start_time = time.time()
best_accuracy = -1
best_norm_ED = -1
iteration = start_iter
while(True):
# train part
image_tensors, labels = train_dataset.get_batch()
image = image_tensors.to(device)
text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)
batch_size = image.size(0)
if 'CTC' in opt.Prediction:
preds = model(image, text)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
preds = preds.log_softmax(2).permute(1, 0, 2)
cost = criterion(preds, text, preds_size, length)
else:
preds = model(image, text[:, :-1]) # align with Attention.forward
target = text[:, 1:] # without [GO] Symbol
cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))
model.zero_grad()
cost.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) # gradient clipping with 5 (Default)
optimizer.step()
loss_avg.add(cost)
# validation part
if (iteration + 1) % opt.valInterval == 0 or iteration == 0: # To see training progress, we also conduct validation when 'iteration == 0'
elapsed_time = time.time() - start_time
# for log
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
model.eval()
with torch.no_grad():
valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data = validation(
model, criterion, valid_loader, converter, opt)
model.train()
# training loss and validation loss
loss_log = f'[{iteration+1}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'
loss_avg.reset()
current_model_log = f'{"Current_accuracy":17s}: {current_accuracy:0.3f}, {"Current_norm_ED":17s}: {current_norm_ED:0.2f}'
# keep best accuracy model (on valid dataset)
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth')
if current_norm_ED > best_norm_ED:
best_norm_ED = current_norm_ED
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth')
best_model_log = f'{"Best_accuracy":17s}: {best_accuracy:0.3f}, {"Best_norm_ED":17s}: {best_norm_ED:0.2f}'
loss_model_log = f'{loss_log}\n{current_model_log}\n{best_model_log}'
print(loss_model_log)
log.write(loss_model_log + '\n')
# show some predicted results
dashed_line = '-' * 80
head = f'{"Ground Truth":25s} | {"Prediction":25s} | Confidence Score & T/F'
predicted_result_log = f'{dashed_line}\n{head}\n{dashed_line}\n'
for gt, pred, confidence in zip(labels[:5], preds[:5], confidence_score[:5]):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred = pred[:pred.find('[s]')]
predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\t{str(pred == gt)}\n'
predicted_result_log += f'{dashed_line}'
print(predicted_result_log)
log.write(predicted_result_log + '\n')
# save model per 1e+5 iter.
if (iteration + 1) % 1e+5 == 0:
torch.save(
model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
if (iteration + 1) == opt.num_iter:
print('end the training')
sys.exit()
iteration += 1
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = "2,3"
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', default="/path/to/your/lmdb/train", help='path to training dataset')
parser.add_argument('--valid_data', default="/path/to/your/lmdb/val", help='path to validation dataset')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', default=4, type=int, help='number of data loading workers')
parser.add_argument('--batch_size', default=64, type=int, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=500, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is Adadelta)')
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
""" Data processing """
parser.add_argument('--select_data', type=str, default='/',
help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
parser.add_argument('--batch_ratio', type=str, default='1',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, default="TPS", help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, default="ResNet", help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, default="BiLSTM", help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--Prediction', type=str, default="Attn", help='Prediction stage. CTC|Attn')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512, help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
|
__package__ = "blackhat.bin.installable"
from ...helpers import Result
from ...lib.input import ArgParser
from ...lib.output import output
from ...lib.ifaddrs import getifaddrs
__COMMAND__ = "ifconfig"
__DESCRIPTION__ = ""
__DESCRIPTION_LONG__ = ""
__VERSION__ = "1.2"
def parse_args(args=[], doc=False):
"""
Handle parsing of arguments and flags. Generates docs using help from `ArgParser`
Args:
args (list): argv passed to the binary
doc (bool): If the function should generate and return manpage
Returns:
Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage
"""
parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}")
parser.add_argument("--version", action="store_true", help=f"output version information and exit")
args = parser.parse_args(args)
arg_helps_with_dups = parser._actions
arg_helps = []
[arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps]
NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}"
SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... "
DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION__}\n\n"
for item in arg_helps:
# Its a positional argument
if len(item.option_strings) == 0:
# If the argument is optional:
if item.nargs == "?":
SYNOPSIS += f"[{item.dest.upper()}] "
else:
SYNOPSIS += f"{item.dest.upper()} "
else:
# Boolean flag
if item.nargs == 0:
if len(item.option_strings) == 1:
DESCRIPTION += f"\t**{" ".join(item.option_strings)}*/\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{" ".join(item.option_strings)}*/\n\t\t{item.help}\n\n"
elif item.nargs == "+":
DESCRIPTION += f"\t**{" ".join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{" ".join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n"
if doc:
return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n"
else:
return args, parser
def main(args: list, pipe: bool) -> Result:
"""
# TODO: Add docstring for manpage
"""
args, parser = parse_args(args)
if parser.error_message:
if not args.version:
return output(f"{__COMMAND__}: {parser.error_message}", pipe, success=False)
if args.version:
return output(f"ifconfig (blackhat netutils) {__VERSION__}", pipe)
# If we specific -h/--help, args will be empty, so exit gracefully
if not args:
return output("", pipe)
else:
result = getifaddrs()
return output(result.data.ifa_addr, pipe)
| __package__ = "blackhat.bin.installable"
from ...helpers import Result
from ...lib.input import ArgParser
from ...lib.output import output
from ...lib.ifaddrs import getifaddrs
__COMMAND__ = "ifconfig"
__DESCRIPTION__ = ""
__DESCRIPTION_LONG__ = ""
__VERSION__ = "1.2"
def parse_args(args=[], doc=False):
"""
Handle parsing of arguments and flags. Generates docs using help from `ArgParser`
Args:
args (list): argv passed to the binary
doc (bool): If the function should generate and return manpage
Returns:
Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage
"""
parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}")
parser.add_argument("--version", action="store_true", help=f"output version information and exit")
args = parser.parse_args(args)
arg_helps_with_dups = parser._actions
arg_helps = []
[arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps]
NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}"
SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... "
DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION__}\n\n"
for item in arg_helps:
# Its a positional argument
if len(item.option_strings) == 0:
# If the argument is optional:
if item.nargs == "?":
SYNOPSIS += f"[{item.dest.upper()}] "
else:
SYNOPSIS += f"{item.dest.upper()} "
else:
# Boolean flag
if item.nargs == 0:
if len(item.option_strings) == 1:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\n\t\t{item.help}\n\n"
elif item.nargs == "+":
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n"
if doc:
return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n"
else:
return args, parser
def main(args: list, pipe: bool) -> Result:
"""
# TODO: Add docstring for manpage
"""
args, parser = parse_args(args)
if parser.error_message:
if not args.version:
return output(f"{__COMMAND__}: {parser.error_message}", pipe, success=False)
if args.version:
return output(f"ifconfig (blackhat netutils) {__VERSION__}", pipe)
# If we specific -h/--help, args will be empty, so exit gracefully
if not args:
return output("", pipe)
else:
result = getifaddrs()
return output(result.data.ifa_addr, pipe)
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import re
from azure.ai.ml._ml_exceptions import MlException, ErrorCategory, ErrorTarget
class JobParsingError(MlException):
"""Exception that the job data returned by MFE cannot be parsed."""
def __init__(self, error_category, no_personal_data_message, message, *args, **kwargs):
super(JobParsingError, self).__init__(
message=message,
target=ErrorTarget.JOB,
error_category=error_category,
no_personal_data_message=no_personal_data_message,
*args,
**kwargs,
)
class PipelineChildJobError(MlException):
"""Exception that the pipeline child job is not supported."""
ERROR_MESSAGE_TEMPLATE = "az ml job {command} is not supported on pipeline child job, {prompt_message}."
PROMPT_STUDIO_UI_MESSAGE = "please go to studio UI to do related actions{url}"
PROMPT_PARENT_MESSAGE = "please use this command on pipeline parent job"
JOB_ID_RE_PATTERN = re.compile(
r"\/subscriptions\/(?P<subscription>[\w,-]+)\/resourceGroups\/(?P<resource_group>[\w,-]+)\/providers\/Microsoft\.MachineLearningServices\/workspaces\/(?P<workspace>[\w,-]+)\/jobs\/(?P<run_id>[\w,-]+)" # fmt: skip
)
def __init__(self, job_id: str, command: str = "parse", prompt_studio_ui: bool = False):
if prompt_studio_ui:
url = ""
m = self.JOB_ID_RE_PATTERN.match(job_id)
if m:
url = f": https://ml.azure.com/runs/{m.group("run_id")}?wsid=/subscriptions/{m.group("subscription")}/resourcegroups/{m.group("resource_group")}/workspaces/{m.group("workspace")}" # fmt: skip
prompt_message = self.PROMPT_STUDIO_UI_MESSAGE.format(url=url)
else:
prompt_message = self.PROMPT_PARENT_MESSAGE
super(PipelineChildJobError, self).__init__(
message=self.ERROR_MESSAGE_TEMPLATE.format(command=command, prompt_message=prompt_message),
no_personal_data_message="Pipeline child job is not supported currently.",
target=ErrorTarget.JOB,
error_category=ErrorCategory.USER_ERROR,
)
self.job_id = job_id
| # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import re
from azure.ai.ml._ml_exceptions import MlException, ErrorCategory, ErrorTarget
class JobParsingError(MlException):
"""Exception that the job data returned by MFE cannot be parsed."""
def __init__(self, error_category, no_personal_data_message, message, *args, **kwargs):
super(JobParsingError, self).__init__(
message=message,
target=ErrorTarget.JOB,
error_category=error_category,
no_personal_data_message=no_personal_data_message,
*args,
**kwargs,
)
class PipelineChildJobError(MlException):
"""Exception that the pipeline child job is not supported."""
ERROR_MESSAGE_TEMPLATE = "az ml job {command} is not supported on pipeline child job, {prompt_message}."
PROMPT_STUDIO_UI_MESSAGE = "please go to studio UI to do related actions{url}"
PROMPT_PARENT_MESSAGE = "please use this command on pipeline parent job"
JOB_ID_RE_PATTERN = re.compile(
r"\/subscriptions\/(?P<subscription>[\w,-]+)\/resourceGroups\/(?P<resource_group>[\w,-]+)\/providers\/Microsoft\.MachineLearningServices\/workspaces\/(?P<workspace>[\w,-]+)\/jobs\/(?P<run_id>[\w,-]+)" # fmt: skip
)
def __init__(self, job_id: str, command: str = "parse", prompt_studio_ui: bool = False):
if prompt_studio_ui:
url = ""
m = self.JOB_ID_RE_PATTERN.match(job_id)
if m:
url = f": https://ml.azure.com/runs/{m.group('run_id')}?wsid=/subscriptions/{m.group('subscription')}/resourcegroups/{m.group('resource_group')}/workspaces/{m.group('workspace')}" # fmt: skip
prompt_message = self.PROMPT_STUDIO_UI_MESSAGE.format(url=url)
else:
prompt_message = self.PROMPT_PARENT_MESSAGE
super(PipelineChildJobError, self).__init__(
message=self.ERROR_MESSAGE_TEMPLATE.format(command=command, prompt_message=prompt_message),
no_personal_data_message="Pipeline child job is not supported currently.",
target=ErrorTarget.JOB,
error_category=ErrorCategory.USER_ERROR,
)
self.job_id = job_id
|
{"filter":false,"title":"kabutan_scraping.py","tooltip":"/kabutan_scraping.py","undoManager":{"mark":100,"position":100,"stack":[[{"start":{"row":3,"column":0},"end":{"row":4,"column":0},"action":"insert","lines":["",""],"id":2},{"start":{"row":4,"column":0},"end":{"row":5,"column":0},"action":"insert","lines":["",""]}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":1},"action":"insert","lines":["K"],"id":3},{"start":{"row":4,"column":1},"end":{"row":4,"column":2},"action":"insert","lines":["a"]},{"start":{"row":4,"column":2},"end":{"row":4,"column":3},"action":"insert","lines":["b"]},{"start":{"row":4,"column":3},"end":{"row":4,"column":4},"action":"insert","lines":["u"]},{"start":{"row":4,"column":4},"end":{"row":4,"column":5},"action":"insert","lines":["t"]},{"start":{"row":4,"column":5},"end":{"row":4,"column":6},"action":"insert","lines":["a"]}],[{"start":{"row":4,"column":6},"end":{"row":4,"column":7},"action":"insert","lines":["n"],"id":4},{"start":{"row":4,"column":7},"end":{"row":4,"column":8},"action":"insert","lines":["S"]},{"start":{"row":4,"column":8},"end":{"row":4,"column":9},"action":"insert","lines":["c"]},{"start":{"row":4,"column":9},"end":{"row":4,"column":10},"action":"insert","lines":["r"]},{"start":{"row":4,"column":10},"end":{"row":4,"column":11},"action":"insert","lines":["a"]}],[{"start":{"row":4,"column":11},"end":{"row":4,"column":12},"action":"insert","lines":["p"],"id":5},{"start":{"row":4,"column":12},"end":{"row":4,"column":13},"action":"insert","lines":["i"]},{"start":{"row":4,"column":13},"end":{"row":4,"column":14},"action":"insert","lines":["n"]},{"start":{"row":4,"column":14},"end":{"row":4,"column":15},"action":"insert","lines":["g"]}],[{"start":{"row":4,"column":15},"end":{"row":4,"column":17},"action":"insert","lines":["()"],"id":6}],[{"start":{"row":4,"column":17},"end":{"row":4,"column":18},"action":"insert","lines":[":"],"id":7}],[{"start":{"row":5,"column":0},"end":{"row":5,"column":4},"action":"insert","lines":[" "],"id":8},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]},{"start":{"row":7,"column":0},"end":{"row":7,"column":4},"action":"insert","lines":[" "]},{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"insert","lines":[" "]},{"start":{"row":9,"column":0},"end":{"row":9,"column":4},"action":"insert","lines":[" "]},{"start":{"row":10,"column":0},"end":{"row":10,"column":4},"action":"insert","lines":[" "]},{"start":{"row":11,"column":0},"end":{"row":11,"column":4},"action":"insert","lines":[" "]},{"start":{"row":12,"column":0},"end":{"row":12,"column":4},"action":"insert","lines":[" "]},{"start":{"row":13,"column":0},"end":{"row":13,"column":4},"action":"insert","lines":[" "]},{"start":{"row":14,"column":0},"end":{"row":14,"column":4},"action":"insert","lines":[" "]},{"start":{"row":15,"column":0},"end":{"row":15,"column":4},"action":"insert","lines":[" "]},{"start":{"row":16,"column":0},"end":{"row":16,"column":4},"action":"insert","lines":[" "]},{"start":{"row":17,"column":0},"end":{"row":17,"column":4},"action":"insert","lines":[" "]},{"start":{"row":18,"column":0},"end":{"row":18,"column":4},"action":"insert","lines":[" "]},{"start":{"row":19,"column":0},"end":{"row":19,"column":4},"action":"insert","lines":[" "]},{"start":{"row":20,"column":0},"end":{"row":20,"column":4},"action":"insert","lines":[" "]},{"start":{"row":21,"column":0},"end":{"row":21,"column":4},"action":"insert","lines":[" "]},{"start":{"row":22,"column":0},"end":{"row":22,"column":4},"action":"insert","lines":[" "]},{"start":{"row":23,"column":0},"end":{"row":23,"column":4},"action":"insert","lines":[" "]},{"start":{"row":24,"column":0},"end":{"row":24,"column":4},"action":"insert","lines":[" "]},{"start":{"row":25,"column":0},"end":{"row":25,"column":4},"action":"insert","lines":[" "]},{"start":{"row":26,"column":0},"end":{"row":26,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":4,"column":18},"end":{"row":5,"column":0},"action":"insert","lines":["",""],"id":9},{"start":{"row":5,"column":0},"end":{"row":5,"column":4},"action":"insert","lines":[" "]},{"start":{"row":5,"column":4},"end":{"row":6,"column":0},"action":"insert","lines":["",""]},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":5,"column":4},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":10},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":1},"action":"insert","lines":["C"],"id":11},{"start":{"row":4,"column":1},"end":{"row":4,"column":2},"action":"insert","lines":["l"]},{"start":{"row":4,"column":2},"end":{"row":4,"column":3},"action":"insert","lines":["a"]},{"start":{"row":4,"column":3},"end":{"row":4,"column":4},"action":"insert","lines":["s"]},{"start":{"row":4,"column":4},"end":{"row":4,"column":5},"action":"insert","lines":["s"]}],[{"start":{"row":4,"column":5},"end":{"row":4,"column":6},"action":"insert","lines":[" "],"id":12}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":5},"action":"remove","lines":["Class"],"id":13},{"start":{"row":4,"column":0},"end":{"row":4,"column":1},"action":"insert","lines":["c"]},{"start":{"row":4,"column":1},"end":{"row":4,"column":2},"action":"insert","lines":["l"]},{"start":{"row":4,"column":2},"end":{"row":4,"column":3},"action":"insert","lines":["a"]},{"start":{"row":4,"column":3},"end":{"row":4,"column":4},"action":"insert","lines":["s"]},{"start":{"row":4,"column":4},"end":{"row":4,"column":5},"action":"insert","lines":["s"]}],[{"start":{"row":6,"column":4},"end":{"row":6,"column":5},"action":"insert","lines":["d"],"id":14},{"start":{"row":6,"column":5},"end":{"row":6,"column":6},"action":"insert","lines":["e"]},{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["f"]}],[{"start":{"row":6,"column":7},"end":{"row":6,"column":8},"action":"insert","lines":[" "],"id":15},{"start":{"row":6,"column":8},"end":{"row":6,"column":9},"action":"insert","lines":["_"]},{"start":{"row":6,"column":9},"end":{"row":6,"column":10},"action":"insert","lines":["_"]},{"start":{"row":6,"column":10},"end":{"row":6,"column":11},"action":"insert","lines":["i"]},{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"insert","lines":["n"]},{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":["t"]}],[{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"remove","lines":["t"],"id":16}],[{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":["i"],"id":17},{"start":{"row":6,"column":13},"end":{"row":6,"column":14},"action":"insert","lines":["t"]},{"start":{"row":6,"column":14},"end":{"row":6,"column":15},"action":"insert","lines":["_"]},{"start":{"row":6,"column":15},"end":{"row":6,"column":16},"action":"insert","lines":["_"]}],[{"start":{"row":6,"column":16},"end":{"row":6,"column":18},"action":"insert","lines":["()"],"id":18}],[{"start":{"row":6,"column":18},"end":{"row":6,"column":19},"action":"insert","lines":[":"],"id":19}],[{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"insert","lines":[" "],"id":20},{"start":{"row":9,"column":0},"end":{"row":9,"column":4},"action":"insert","lines":[" "]},{"start":{"row":10,"column":0},"end":{"row":10,"column":4},"action":"insert","lines":[" "]},{"start":{"row":11,"column":0},"end":{"row":11,"column":4},"action":"insert","lines":[" "]},{"start":{"row":12,"column":0},"end":{"row":12,"column":4},"action":"insert","lines":[" "]},{"start":{"row":13,"column":0},"end":{"row":13,"column":4},"action":"insert","lines":[" "]},{"start":{"row":14,"column":0},"end":{"row":14,"column":4},"action":"insert","lines":[" "]},{"start":{"row":15,"column":0},"end":{"row":15,"column":4},"action":"insert","lines":[" "]},{"start":{"row":16,"column":0},"end":{"row":16,"column":4},"action":"insert","lines":[" "]},{"start":{"row":17,"column":0},"end":{"row":17,"column":4},"action":"insert","lines":[" "]},{"start":{"row":18,"column":0},"end":{"row":18,"column":4},"action":"insert","lines":[" "]},{"start":{"row":19,"column":0},"end":{"row":19,"column":4},"action":"insert","lines":[" "]},{"start":{"row":20,"column":0},"end":{"row":20,"column":4},"action":"insert","lines":[" "]},{"start":{"row":21,"column":0},"end":{"row":21,"column":4},"action":"insert","lines":[" "]},{"start":{"row":22,"column":0},"end":{"row":22,"column":4},"action":"insert","lines":[" "]},{"start":{"row":23,"column":0},"end":{"row":23,"column":4},"action":"insert","lines":[" "]},{"start":{"row":24,"column":0},"end":{"row":24,"column":4},"action":"insert","lines":[" "]},{"start":{"row":25,"column":0},"end":{"row":25,"column":4},"action":"insert","lines":[" "]},{"start":{"row":26,"column":0},"end":{"row":26,"column":4},"action":"insert","lines":[" "]},{"start":{"row":27,"column":0},"end":{"row":27,"column":4},"action":"insert","lines":[" "]},{"start":{"row":28,"column":0},"end":{"row":28,"column":4},"action":"insert","lines":[" "]},{"start":{"row":29,"column":0},"end":{"row":29,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":6,"column":17},"end":{"row":6,"column":18},"action":"insert","lines":["s"],"id":21},{"start":{"row":6,"column":18},"end":{"row":6,"column":19},"action":"insert","lines":["e"]},{"start":{"row":6,"column":19},"end":{"row":6,"column":20},"action":"insert","lines":["l"]},{"start":{"row":6,"column":20},"end":{"row":6,"column":21},"action":"insert","lines":["f"]}],[{"start":{"row":5,"column":4},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":22},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":5,"column":4},"end":{"row":5,"column":6},"action":"insert","lines":[""""],"id":23}],[{"start":{"row":5,"column":6},"end":{"row":5,"column":7},"action":"insert","lines":["""],"id":24}],[{"start":{"row":5,"column":7},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":25},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]},{"start":{"row":6,"column":4},"end":{"row":6,"column":5},"action":"insert","lines":["""]},{"start":{"row":6,"column":5},"end":{"row":6,"column":6},"action":"insert","lines":["""]},{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["""]}],[{"start":{"row":5,"column":7},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":26},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]},{"start":{"row":6,"column":4},"end":{"row":6,"column":5},"action":"insert","lines":["@"]},{"start":{"row":6,"column":5},"end":{"row":6,"column":6},"action":"insert","lines":["b"]},{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["r"]},{"start":{"row":6,"column":7},"end":{"row":6,"column":8},"action":"insert","lines":["i"]},{"start":{"row":6,"column":8},"end":{"row":6,"column":9},"action":"insert","lines":["e"]}],[{"start":{"row":6,"column":9},"end":{"row":6,"column":10},"action":"insert","lines":["f"],"id":27}],[{"start":{"row":6,"column":10},"end":{"row":6,"column":11},"action":"insert","lines":[" "],"id":28},{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"insert","lines":[":"]}],[{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":[" "],"id":29},{"start":{"row":6,"column":13},"end":{"row":6,"column":15},"action":"insert","lines":["株価"]}],[{"start":{"row":6,"column":15},"end":{"row":6,"column":16},"action":"insert","lines":["を"],"id":30},{"start":{"row":6,"column":16},"end":{"row":6,"column":18},"action":"insert","lines":["取得"]}],[{"start":{"row":6,"column":18},"end":{"row":6,"column":20},"action":"insert","lines":["する"],"id":31},{"start":{"row":6,"column":20},"end":{"row":6,"column":23},"action":"insert","lines":["クラス"]}],[{"start":{"row":32,"column":28},"end":{"row":33,"column":0},"action":"insert","lines":["",""],"id":32},{"start":{"row":33,"column":0},"end":{"row":33,"column":8},"action":"insert","lines":[" "]},{"start":{"row":33,"column":8},"end":{"row":34,"column":0},"action":"insert","lines":["",""]},{"start":{"row":34,"column":0},"end":{"row":34,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":8},"action":"remove","lines":[" "],"id":33},{"start":{"row":34,"column":0},"end":{"row":34,"column":4},"action":"remove","lines":[" "]}],[{"start":{"row":34,"column":0},"end":{"row":34,"column":1},"action":"insert","lines":["k"],"id":34},{"start":{"row":34,"column":1},"end":{"row":34,"column":2},"action":"insert","lines":["a"]},{"start":{"row":34,"column":2},"end":{"row":34,"column":3},"action":"insert","lines":["b"]},{"start":{"row":34,"column":3},"end":{"row":34,"column":4},"action":"insert","lines":["u"]}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":5},"action":"insert","lines":[" "],"id":35},{"start":{"row":34,"column":5},"end":{"row":34,"column":6},"action":"insert","lines":["="]}],[{"start":{"row":34,"column":6},"end":{"row":34,"column":7},"action":"insert","lines":[" "],"id":36},{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"insert","lines":["k"]},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["a"]}],[{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"remove","lines":["a"],"id":37},{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"remove","lines":["k"]}],[{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"insert","lines":["K"],"id":38},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["a"]}],[{"start":{"row":34,"column":7},"end":{"row":34,"column":9},"action":"remove","lines":["Ka"],"id":39},{"start":{"row":34,"column":7},"end":{"row":34,"column":22},"action":"insert","lines":["KabutanScraping"]}],[{"start":{"row":34,"column":22},"end":{"row":34,"column":24},"action":"insert","lines":["()"],"id":40}],[{"start":{"row":34,"column":24},"end":{"row":35,"column":0},"action":"insert","lines":["",""],"id":41}],[{"start":{"row":32,"column":28},"end":{"row":33,"column":0},"action":"insert","lines":["",""],"id":42},{"start":{"row":33,"column":0},"end":{"row":33,"column":8},"action":"insert","lines":[" "]},{"start":{"row":33,"column":8},"end":{"row":34,"column":0},"action":"insert","lines":["",""]},{"start":{"row":34,"column":0},"end":{"row":34,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":8},"action":"remove","lines":[" "],"id":43}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":5},"action":"insert","lines":["d"],"id":44},{"start":{"row":34,"column":5},"end":{"row":34,"column":6},"action":"insert","lines":["e"]},{"start":{"row":34,"column":6},"end":{"row":34,"column":7},"action":"insert","lines":["f"]}],[{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"insert","lines":[" "],"id":45},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["d"]},{"start":{"row":34,"column":9},"end":{"row":34,"column":10},"action":"insert","lines":["i"]},{"start":{"row":34,"column":10},"end":{"row":34,"column":11},"action":"insert","lines":["s"]},{"start":{"row":34,"column":11},"end":{"row":34,"column":12},"action":"insert","lines":["p"]}],[{"start":{"row":34,"column":12},"end":{"row":34,"column":13},"action":"insert","lines":["l"],"id":46},{"start":{"row":34,"column":13},"end":{"row":34,"column":14},"action":"insert","lines":["a"]},{"start":{"row":34,"column":14},"end":{"row":34,"column":15},"action":"insert","lines":["y"]}],[{"start":{"row":34,"column":15},"end":{"row":34,"column":17},"action":"insert","lines":["()"],"id":47}],[{"start":{"row":34,"column":16},"end":{"row":34,"column":17},"action":"insert","lines":["s"],"id":48},{"start":{"row":34,"column":17},"end":{"row":34,"column":18},"action":"insert","lines":["e"]},{"start":{"row":34,"column":18},"end":{"row":34,"column":19},"action":"insert","lines":["l"]},{"start":{"row":34,"column":19},"end":{"row":34,"column":20},"action":"insert","lines":["f"]}],[{"start":{"row":34,"column":21},"end":{"row":34,"column":22},"action":"insert","lines":[":"],"id":49}],[{"start":{"row":34,"column":22},"end":{"row":35,"column":0},"action":"insert","lines":["",""],"id":50},{"start":{"row":35,"column":0},"end":{"row":35,"column":8},"action":"insert","lines":[" "]},{"start":{"row":35,"column":8},"end":{"row":35,"column":9},"action":"insert","lines":["d"]},{"start":{"row":35,"column":9},"end":{"row":35,"column":10},"action":"insert","lines":["i"]},{"start":{"row":35,"column":10},"end":{"row":35,"column":11},"action":"insert","lines":["s"]}],[{"start":{"row":35,"column":11},"end":{"row":35,"column":12},"action":"insert","lines":["p"],"id":51},{"start":{"row":35,"column":12},"end":{"row":35,"column":13},"action":"insert","lines":["l"]},{"start":{"row":35,"column":13},"end":{"row":35,"column":14},"action":"insert","lines":["a"]},{"start":{"row":35,"column":14},"end":{"row":35,"column":15},"action":"insert","lines":["y"]}],[{"start":{"row":35,"column":15},"end":{"row":35,"column":17},"action":"insert","lines":["()"],"id":52}],[{"start":{"row":35,"column":16},"end":{"row":35,"column":17},"action":"insert","lines":["d"],"id":53},{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["e"]}],[{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"remove","lines":["e"],"id":54}],[{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["f"],"id":55},{"start":{"row":35,"column":18},"end":{"row":35,"column":19},"action":"insert","lines":["A"]},{"start":{"row":35,"column":19},"end":{"row":35,"column":20},"action":"insert","lines":["l"]},{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"insert","lines":["l"]}],[{"start":{"row":35,"column":21},"end":{"row":35,"column":22},"action":"insert","lines":["B"],"id":56},{"start":{"row":35,"column":22},"end":{"row":35,"column":23},"action":"insert","lines":["r"]},{"start":{"row":35,"column":23},"end":{"row":35,"column":24},"action":"insert","lines":["a"]},{"start":{"row":35,"column":24},"end":{"row":35,"column":25},"action":"insert","lines":["n"]},{"start":{"row":35,"column":25},"end":{"row":35,"column":26},"action":"insert","lines":["d"]},{"start":{"row":35,"column":26},"end":{"row":35,"column":27},"action":"insert","lines":["s"]}],[{"start":{"row":38,"column":0},"end":{"row":38,"column":1},"action":"insert","lines":["k"],"id":57},{"start":{"row":38,"column":1},"end":{"row":38,"column":2},"action":"insert","lines":["a"]},{"start":{"row":38,"column":2},"end":{"row":38,"column":3},"action":"insert","lines":["b"]},{"start":{"row":38,"column":3},"end":{"row":38,"column":4},"action":"insert","lines":["u"]},{"start":{"row":38,"column":4},"end":{"row":38,"column":5},"action":"insert","lines":["."]},{"start":{"row":38,"column":5},"end":{"row":38,"column":6},"action":"insert","lines":["d"]},{"start":{"row":38,"column":6},"end":{"row":38,"column":7},"action":"insert","lines":["i"]},{"start":{"row":38,"column":7},"end":{"row":38,"column":8},"action":"insert","lines":["s"]}],[{"start":{"row":38,"column":8},"end":{"row":38,"column":9},"action":"insert","lines":["p"],"id":58},{"start":{"row":38,"column":9},"end":{"row":38,"column":10},"action":"insert","lines":["l"]},{"start":{"row":38,"column":10},"end":{"row":38,"column":11},"action":"insert","lines":["a"]},{"start":{"row":38,"column":11},"end":{"row":38,"column":12},"action":"insert","lines":["y"]}],[{"start":{"row":32,"column":8},"end":{"row":32,"column":9},"action":"insert","lines":["#"],"id":59}],[{"start":{"row":35,"column":16},"end":{"row":35,"column":17},"action":"insert","lines":["s"],"id":60},{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["e"]},{"start":{"row":35,"column":18},"end":{"row":35,"column":19},"action":"insert","lines":["l"]},{"start":{"row":35,"column":19},"end":{"row":35,"column":20},"action":"insert","lines":["f"]},{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"insert","lines":["."]}],[{"start":{"row":35,"column":21},"end":{"row":36,"column":0},"action":"insert","lines":["",""],"id":61},{"start":{"row":36,"column":0},"end":{"row":36,"column":8},"action":"insert","lines":[" "]},{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"insert","lines":["."]}],[{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"remove","lines":["."],"id":62},{"start":{"row":36,"column":4},"end":{"row":36,"column":8},"action":"remove","lines":[" "]},{"start":{"row":36,"column":0},"end":{"row":36,"column":4},"action":"remove","lines":[" "]},{"start":{"row":35,"column":21},"end":{"row":36,"column":0},"action":"remove","lines":["",""]},{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"remove","lines":["."]}],[{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"insert","lines":["."],"id":63}],[{"start":{"row":22,"column":16},"end":{"row":22,"column":17},"action":"insert","lines":["#"],"id":64}],[{"start":{"row":7,"column":7},"end":{"row":8,"column":0},"action":"insert","lines":["",""],"id":65},{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":8,"column":4},"end":{"row":8,"column":5},"action":"insert","lines":["d"],"id":66},{"start":{"row":8,"column":5},"end":{"row":8,"column":6},"action":"insert","lines":["f"]}],[{"start":{"row":8,"column":6},"end":{"row":8,"column":7},"action":"insert","lines":["A"],"id":67},{"start":{"row":8,"column":7},"end":{"row":8,"column":8},"action":"insert","lines":["l"]},{"start":{"row":8,"column":8},"end":{"row":8,"column":9},"action":"insert","lines":["l"]},{"start":{"row":8,"column":9},"end":{"row":8,"column":10},"action":"insert","lines":["B"]},{"start":{"row":8,"column":10},"end":{"row":8,"column":11},"action":"insert","lines":["r"]},{"start":{"row":8,"column":11},"end":{"row":8,"column":12},"action":"insert","lines":["a"]},{"start":{"row":8,"column":12},"end":{"row":8,"column":13},"action":"insert","lines":["n"]},{"start":{"row":8,"column":13},"end":{"row":8,"column":14},"action":"insert","lines":["d"]}],[{"start":{"row":8,"column":14},"end":{"row":8,"column":15},"action":"insert","lines":["s"],"id":68}],[{"start":{"row":8,"column":15},"end":{"row":8,"column":16},"action":"insert","lines":[" "],"id":69},{"start":{"row":8,"column":16},"end":{"row":8,"column":17},"action":"insert","lines":["="]}],[{"start":{"row":8,"column":17},"end":{"row":8,"column":18},"action":"insert","lines":[" "],"id":70},{"start":{"row":8,"column":18},"end":{"row":8,"column":19},"action":"insert","lines":["p"]},{"start":{"row":8,"column":19},"end":{"row":8,"column":20},"action":"insert","lines":["d"]},{"start":{"row":8,"column":20},"end":{"row":8,"column":21},"action":"insert","lines":["."]},{"start":{"row":8,"column":21},"end":{"row":8,"column":22},"action":"insert","lines":["D"]},{"start":{"row":8,"column":22},"end":{"row":8,"column":23},"action":"insert","lines":["a"]},{"start":{"row":8,"column":23},"end":{"row":8,"column":24},"action":"insert","lines":["t"]}],[{"start":{"row":8,"column":24},"end":{"row":8,"column":25},"action":"insert","lines":["a"],"id":71},{"start":{"row":8,"column":25},"end":{"row":8,"column":26},"action":"insert","lines":["F"]},{"start":{"row":8,"column":26},"end":{"row":8,"column":27},"action":"insert","lines":["r"]},{"start":{"row":8,"column":27},"end":{"row":8,"column":28},"action":"insert","lines":["a"]},{"start":{"row":8,"column":28},"end":{"row":8,"column":29},"action":"insert","lines":["m"]},{"start":{"row":8,"column":29},"end":{"row":8,"column":30},"action":"insert","lines":["e"]}],[{"start":{"row":8,"column":30},"end":{"row":8,"column":32},"action":"insert","lines":["()"],"id":72}],[{"start":{"row":16,"column":8},"end":{"row":16,"column":9},"action":"insert","lines":["#"],"id":73}],[{"start":{"row":25,"column":12},"end":{"row":25,"column":13},"action":"insert","lines":["s"],"id":74},{"start":{"row":25,"column":13},"end":{"row":25,"column":14},"action":"insert","lines":["e"]},{"start":{"row":25,"column":14},"end":{"row":25,"column":15},"action":"insert","lines":["l"]},{"start":{"row":25,"column":15},"end":{"row":25,"column":16},"action":"insert","lines":["f"]},{"start":{"row":25,"column":16},"end":{"row":25,"column":17},"action":"insert","lines":["."]}],[{"start":{"row":32,"column":8},"end":{"row":32,"column":9},"action":"insert","lines":["s"],"id":75},{"start":{"row":32,"column":9},"end":{"row":32,"column":10},"action":"insert","lines":["e"]},{"start":{"row":32,"column":10},"end":{"row":32,"column":11},"action":"insert","lines":["l"]},{"start":{"row":32,"column":11},"end":{"row":32,"column":12},"action":"insert","lines":["f"]},{"start":{"row":32,"column":12},"end":{"row":32,"column":13},"action":"insert","lines":["."]}],[{"start":{"row":25,"column":31},"end":{"row":25,"column":32},"action":"insert","lines":["s"],"id":76},{"start":{"row":25,"column":32},"end":{"row":25,"column":33},"action":"insert","lines":["e"]},{"start":{"row":25,"column":33},"end":{"row":25,"column":34},"action":"insert","lines":["l"]},{"start":{"row":25,"column":34},"end":{"row":25,"column":35},"action":"insert","lines":["f"]},{"start":{"row":25,"column":35},"end":{"row":25,"column":36},"action":"insert","lines":["."]}],[{"start":{"row":29,"column":17},"end":{"row":29,"column":18},"action":"insert","lines":["s"],"id":77},{"start":{"row":29,"column":18},"end":{"row":29,"column":19},"action":"insert","lines":["e"]},{"start":{"row":29,"column":19},"end":{"row":29,"column":20},"action":"insert","lines":["l"]},{"start":{"row":29,"column":20},"end":{"row":29,"column":21},"action":"insert","lines":["f"]},{"start":{"row":29,"column":21},"end":{"row":29,"column":22},"action":"insert","lines":["."]}],[{"start":{"row":31,"column":60},"end":{"row":31,"column":61},"action":"insert","lines":["s"],"id":78},{"start":{"row":31,"column":61},"end":{"row":31,"column":62},"action":"insert","lines":["e"]},{"start":{"row":31,"column":62},"end":{"row":31,"column":63},"action":"insert","lines":["l"]},{"start":{"row":31,"column":63},"end":{"row":31,"column":64},"action":"insert","lines":["f"]},{"start":{"row":31,"column":64},"end":{"row":31,"column":65},"action":"insert","lines":["."]}],[{"start":{"row":16,"column":8},"end":{"row":16,"column":9},"action":"remove","lines":["#"],"id":79}],[{"start":{"row":16,"column":8},"end":{"row":16,"column":9},"action":"insert","lines":["s"],"id":80},{"start":{"row":16,"column":9},"end":{"row":16,"column":10},"action":"insert","lines":["e"]},{"start":{"row":16,"column":10},"end":{"row":16,"column":11},"action":"insert","lines":["l"]},{"start":{"row":16,"column":11},"end":{"row":16,"column":12},"action":"insert","lines":["f"]},{"start":{"row":16,"column":12},"end":{"row":16,"column":13},"action":"insert","lines":["."]}],[{"start":{"row":8,"column":31},"end":{"row":8,"column":32},"action":"remove","lines":[")"],"id":81},{"start":{"row":8,"column":30},"end":{"row":8,"column":31},"action":"remove","lines":["("]},{"start":{"row":8,"column":29},"end":{"row":8,"column":30},"action":"remove","lines":["e"]},{"start":{"row":8,"column":28},"end":{"row":8,"column":29},"action":"remove","lines":["m"]},{"start":{"row":8,"column":27},"end":{"row":8,"column":28},"action":"remove","lines":["a"]},{"start":{"row":8,"column":26},"end":{"row":8,"column":27},"action":"remove","lines":["r"]},{"start":{"row":8,"column":25},"end":{"row":8,"column":26},"action":"remove","lines":["F"]},{"start":{"row":8,"column":24},"end":{"row":8,"column":25},"action":"remove","lines":["a"]},{"start":{"row":8,"column":23},"end":{"row":8,"column":24},"action":"remove","lines":["t"]},{"start":{"row":8,"column":22},"end":{"row":8,"column":23},"action":"remove","lines":["a"]},{"start":{"row":8,"column":21},"end":{"row":8,"column":22},"action":"remove","lines":["D"]},{"start":{"row":8,"column":20},"end":{"row":8,"column":21},"action":"remove","lines":["."]},{"start":{"row":8,"column":19},"end":{"row":8,"column":20},"action":"remove","lines":["d"]},{"start":{"row":8,"column":18},"end":{"row":8,"column":19},"action":"remove","lines":["p"]},{"start":{"row":8,"column":17},"end":{"row":8,"column":18},"action":"remove","lines":[" "]},{"start":{"row":8,"column":16},"end":{"row":8,"column":17},"action":"remove","lines":["="]},{"start":{"row":8,"column":15},"end":{"row":8,"column":16},"action":"remove","lines":[" "]}],[{"start":{"row":8,"column":15},"end":{"row":8,"column":16},"action":"insert","lines":[" "],"id":82},{"start":{"row":8,"column":16},"end":{"row":8,"column":17},"action":"insert","lines":["="]}],[{"start":{"row":8,"column":17},"end":{"row":8,"column":18},"action":"insert","lines":[" "],"id":83}],[{"start":{"row":8,"column":18},"end":{"row":8,"column":20},"action":"insert","lines":["\"\""],"id":84}],[{"start":{"row":33,"column":8},"end":{"row":33,"column":9},"action":"remove","lines":["#"],"id":85}],[{"start":{"row":33,"column":16},"end":{"row":33,"column":17},"action":"insert","lines":["s"],"id":86},{"start":{"row":33,"column":17},"end":{"row":33,"column":18},"action":"insert","lines":["e"]},{"start":{"row":33,"column":18},"end":{"row":33,"column":19},"action":"insert","lines":["l"]},{"start":{"row":33,"column":19},"end":{"row":33,"column":20},"action":"insert","lines":["f"]},{"start":{"row":33,"column":20},"end":{"row":33,"column":21},"action":"insert","lines":["."]}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"insert","lines":["#"],"id":87}],[{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"insert","lines":["s"],"id":88},{"start":{"row":36,"column":9},"end":{"row":36,"column":10},"action":"insert","lines":["e"]},{"start":{"row":36,"column":10},"end":{"row":36,"column":11},"action":"insert","lines":["l"]},{"start":{"row":36,"column":11},"end":{"row":36,"column":12},"action":"insert","lines":["f"]},{"start":{"row":36,"column":12},"end":{"row":36,"column":13},"action":"insert","lines":["."]}],[{"start":{"row":36,"column":12},"end":{"row":36,"column":13},"action":"remove","lines":["."],"id":89},{"start":{"row":36,"column":11},"end":{"row":36,"column":12},"action":"remove","lines":["f"]},{"start":{"row":36,"column":10},"end":{"row":36,"column":11},"action":"remove","lines":["l"]},{"start":{"row":36,"column":9},"end":{"row":36,"column":10},"action":"remove","lines":["e"]},{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"remove","lines":["s"]}],[{"start":{"row":35,"column":15},"end":{"row":35,"column":16},"action":"insert","lines":["_"],"id":90},{"start":{"row":35,"column":16},"end":{"row":35,"column":17},"action":"insert","lines":["a"]},{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["l"]},{"start":{"row":35,"column":18},"end":{"row":35,"column":19},"action":"insert","lines":["l"]}],[{"start":{"row":33,"column":8},"end":{"row":33,"column":9},"action":"insert","lines":["#"],"id":91}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"remove","lines":["#"],"id":92}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"insert","lines":["#"],"id":93}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"remove","lines":["#"],"id":94}],[{"start":{"row":39,"column":12},"end":{"row":39,"column":13},"action":"insert","lines":["_"],"id":95},{"start":{"row":39,"column":13},"end":{"row":39,"column":14},"action":"insert","lines":["a"]},{"start":{"row":39,"column":14},"end":{"row":39,"column":15},"action":"insert","lines":["l"]},{"start":{"row":39,"column":15},"end":{"row":39,"column":16},"action":"insert","lines":["l"]}],[{"start":{"row":39,"column":16},"end":{"row":39,"column":18},"action":"insert","lines":["()"],"id":96}],[{"start":{"row":38,"column":0},"end":{"row":39,"column":0},"action":"insert","lines":["",""],"id":97}],[{"start":{"row":38,"column":0},"end":{"row":38,"column":1},"action":"insert","lines":["#"],"id":98}],[{"start":{"row":38,"column":1},"end":{"row":38,"column":2},"action":"insert","lines":[" "],"id":99},{"start":{"row":38,"column":2},"end":{"row":38,"column":3},"action":"insert","lines":["F"]},{"start":{"row":38,"column":3},"end":{"row":38,"column":4},"action":"insert","lines":["o"]},{"start":{"row":38,"column":4},"end":{"row":38,"column":5},"action":"insert","lines":["r"]}],[{"start":{"row":38,"column":5},"end":{"row":38,"column":6},"action":"insert","lines":[" "],"id":100},{"start":{"row":38,"column":6},"end":{"row":38,"column":7},"action":"insert","lines":["D"]},{"start":{"row":38,"column":7},"end":{"row":38,"column":8},"action":"insert","lines":["e"]},{"start":{"row":38,"column":8},"end":{"row":38,"column":9},"action":"insert","lines":["b"]},{"start":{"row":38,"column":9},"end":{"row":38,"column":10},"action":"insert","lines":["u"]},{"start":{"row":38,"column":10},"end":{"row":38,"column":11},"action":"insert","lines":["g"]}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"insert","lines":["#"],"id":101}],[{"start":{"row":40,"column":0},"end":{"row":40,"column":1},"action":"insert","lines":["#"],"id":102}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":40,"column":1},"end":{"row":40,"column":1},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1542509718542,"hash":"72aa41b34248e38e6225a8cdc232de27a235d1a5"} | {"filter":false,"title":"kabutan_scraping.py","tooltip":"/kabutan_scraping.py","undoManager":{"mark":100,"position":100,"stack":[[{"start":{"row":3,"column":0},"end":{"row":4,"column":0},"action":"insert","lines":["",""],"id":2},{"start":{"row":4,"column":0},"end":{"row":5,"column":0},"action":"insert","lines":["",""]}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":1},"action":"insert","lines":["K"],"id":3},{"start":{"row":4,"column":1},"end":{"row":4,"column":2},"action":"insert","lines":["a"]},{"start":{"row":4,"column":2},"end":{"row":4,"column":3},"action":"insert","lines":["b"]},{"start":{"row":4,"column":3},"end":{"row":4,"column":4},"action":"insert","lines":["u"]},{"start":{"row":4,"column":4},"end":{"row":4,"column":5},"action":"insert","lines":["t"]},{"start":{"row":4,"column":5},"end":{"row":4,"column":6},"action":"insert","lines":["a"]}],[{"start":{"row":4,"column":6},"end":{"row":4,"column":7},"action":"insert","lines":["n"],"id":4},{"start":{"row":4,"column":7},"end":{"row":4,"column":8},"action":"insert","lines":["S"]},{"start":{"row":4,"column":8},"end":{"row":4,"column":9},"action":"insert","lines":["c"]},{"start":{"row":4,"column":9},"end":{"row":4,"column":10},"action":"insert","lines":["r"]},{"start":{"row":4,"column":10},"end":{"row":4,"column":11},"action":"insert","lines":["a"]}],[{"start":{"row":4,"column":11},"end":{"row":4,"column":12},"action":"insert","lines":["p"],"id":5},{"start":{"row":4,"column":12},"end":{"row":4,"column":13},"action":"insert","lines":["i"]},{"start":{"row":4,"column":13},"end":{"row":4,"column":14},"action":"insert","lines":["n"]},{"start":{"row":4,"column":14},"end":{"row":4,"column":15},"action":"insert","lines":["g"]}],[{"start":{"row":4,"column":15},"end":{"row":4,"column":17},"action":"insert","lines":["()"],"id":6}],[{"start":{"row":4,"column":17},"end":{"row":4,"column":18},"action":"insert","lines":[":"],"id":7}],[{"start":{"row":5,"column":0},"end":{"row":5,"column":4},"action":"insert","lines":[" "],"id":8},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]},{"start":{"row":7,"column":0},"end":{"row":7,"column":4},"action":"insert","lines":[" "]},{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"insert","lines":[" "]},{"start":{"row":9,"column":0},"end":{"row":9,"column":4},"action":"insert","lines":[" "]},{"start":{"row":10,"column":0},"end":{"row":10,"column":4},"action":"insert","lines":[" "]},{"start":{"row":11,"column":0},"end":{"row":11,"column":4},"action":"insert","lines":[" "]},{"start":{"row":12,"column":0},"end":{"row":12,"column":4},"action":"insert","lines":[" "]},{"start":{"row":13,"column":0},"end":{"row":13,"column":4},"action":"insert","lines":[" "]},{"start":{"row":14,"column":0},"end":{"row":14,"column":4},"action":"insert","lines":[" "]},{"start":{"row":15,"column":0},"end":{"row":15,"column":4},"action":"insert","lines":[" "]},{"start":{"row":16,"column":0},"end":{"row":16,"column":4},"action":"insert","lines":[" "]},{"start":{"row":17,"column":0},"end":{"row":17,"column":4},"action":"insert","lines":[" "]},{"start":{"row":18,"column":0},"end":{"row":18,"column":4},"action":"insert","lines":[" "]},{"start":{"row":19,"column":0},"end":{"row":19,"column":4},"action":"insert","lines":[" "]},{"start":{"row":20,"column":0},"end":{"row":20,"column":4},"action":"insert","lines":[" "]},{"start":{"row":21,"column":0},"end":{"row":21,"column":4},"action":"insert","lines":[" "]},{"start":{"row":22,"column":0},"end":{"row":22,"column":4},"action":"insert","lines":[" "]},{"start":{"row":23,"column":0},"end":{"row":23,"column":4},"action":"insert","lines":[" "]},{"start":{"row":24,"column":0},"end":{"row":24,"column":4},"action":"insert","lines":[" "]},{"start":{"row":25,"column":0},"end":{"row":25,"column":4},"action":"insert","lines":[" "]},{"start":{"row":26,"column":0},"end":{"row":26,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":4,"column":18},"end":{"row":5,"column":0},"action":"insert","lines":["",""],"id":9},{"start":{"row":5,"column":0},"end":{"row":5,"column":4},"action":"insert","lines":[" "]},{"start":{"row":5,"column":4},"end":{"row":6,"column":0},"action":"insert","lines":["",""]},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":5,"column":4},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":10},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":1},"action":"insert","lines":["C"],"id":11},{"start":{"row":4,"column":1},"end":{"row":4,"column":2},"action":"insert","lines":["l"]},{"start":{"row":4,"column":2},"end":{"row":4,"column":3},"action":"insert","lines":["a"]},{"start":{"row":4,"column":3},"end":{"row":4,"column":4},"action":"insert","lines":["s"]},{"start":{"row":4,"column":4},"end":{"row":4,"column":5},"action":"insert","lines":["s"]}],[{"start":{"row":4,"column":5},"end":{"row":4,"column":6},"action":"insert","lines":[" "],"id":12}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":5},"action":"remove","lines":["Class"],"id":13},{"start":{"row":4,"column":0},"end":{"row":4,"column":1},"action":"insert","lines":["c"]},{"start":{"row":4,"column":1},"end":{"row":4,"column":2},"action":"insert","lines":["l"]},{"start":{"row":4,"column":2},"end":{"row":4,"column":3},"action":"insert","lines":["a"]},{"start":{"row":4,"column":3},"end":{"row":4,"column":4},"action":"insert","lines":["s"]},{"start":{"row":4,"column":4},"end":{"row":4,"column":5},"action":"insert","lines":["s"]}],[{"start":{"row":6,"column":4},"end":{"row":6,"column":5},"action":"insert","lines":["d"],"id":14},{"start":{"row":6,"column":5},"end":{"row":6,"column":6},"action":"insert","lines":["e"]},{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["f"]}],[{"start":{"row":6,"column":7},"end":{"row":6,"column":8},"action":"insert","lines":[" "],"id":15},{"start":{"row":6,"column":8},"end":{"row":6,"column":9},"action":"insert","lines":["_"]},{"start":{"row":6,"column":9},"end":{"row":6,"column":10},"action":"insert","lines":["_"]},{"start":{"row":6,"column":10},"end":{"row":6,"column":11},"action":"insert","lines":["i"]},{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"insert","lines":["n"]},{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":["t"]}],[{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"remove","lines":["t"],"id":16}],[{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":["i"],"id":17},{"start":{"row":6,"column":13},"end":{"row":6,"column":14},"action":"insert","lines":["t"]},{"start":{"row":6,"column":14},"end":{"row":6,"column":15},"action":"insert","lines":["_"]},{"start":{"row":6,"column":15},"end":{"row":6,"column":16},"action":"insert","lines":["_"]}],[{"start":{"row":6,"column":16},"end":{"row":6,"column":18},"action":"insert","lines":["()"],"id":18}],[{"start":{"row":6,"column":18},"end":{"row":6,"column":19},"action":"insert","lines":[":"],"id":19}],[{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"insert","lines":[" "],"id":20},{"start":{"row":9,"column":0},"end":{"row":9,"column":4},"action":"insert","lines":[" "]},{"start":{"row":10,"column":0},"end":{"row":10,"column":4},"action":"insert","lines":[" "]},{"start":{"row":11,"column":0},"end":{"row":11,"column":4},"action":"insert","lines":[" "]},{"start":{"row":12,"column":0},"end":{"row":12,"column":4},"action":"insert","lines":[" "]},{"start":{"row":13,"column":0},"end":{"row":13,"column":4},"action":"insert","lines":[" "]},{"start":{"row":14,"column":0},"end":{"row":14,"column":4},"action":"insert","lines":[" "]},{"start":{"row":15,"column":0},"end":{"row":15,"column":4},"action":"insert","lines":[" "]},{"start":{"row":16,"column":0},"end":{"row":16,"column":4},"action":"insert","lines":[" "]},{"start":{"row":17,"column":0},"end":{"row":17,"column":4},"action":"insert","lines":[" "]},{"start":{"row":18,"column":0},"end":{"row":18,"column":4},"action":"insert","lines":[" "]},{"start":{"row":19,"column":0},"end":{"row":19,"column":4},"action":"insert","lines":[" "]},{"start":{"row":20,"column":0},"end":{"row":20,"column":4},"action":"insert","lines":[" "]},{"start":{"row":21,"column":0},"end":{"row":21,"column":4},"action":"insert","lines":[" "]},{"start":{"row":22,"column":0},"end":{"row":22,"column":4},"action":"insert","lines":[" "]},{"start":{"row":23,"column":0},"end":{"row":23,"column":4},"action":"insert","lines":[" "]},{"start":{"row":24,"column":0},"end":{"row":24,"column":4},"action":"insert","lines":[" "]},{"start":{"row":25,"column":0},"end":{"row":25,"column":4},"action":"insert","lines":[" "]},{"start":{"row":26,"column":0},"end":{"row":26,"column":4},"action":"insert","lines":[" "]},{"start":{"row":27,"column":0},"end":{"row":27,"column":4},"action":"insert","lines":[" "]},{"start":{"row":28,"column":0},"end":{"row":28,"column":4},"action":"insert","lines":[" "]},{"start":{"row":29,"column":0},"end":{"row":29,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":6,"column":17},"end":{"row":6,"column":18},"action":"insert","lines":["s"],"id":21},{"start":{"row":6,"column":18},"end":{"row":6,"column":19},"action":"insert","lines":["e"]},{"start":{"row":6,"column":19},"end":{"row":6,"column":20},"action":"insert","lines":["l"]},{"start":{"row":6,"column":20},"end":{"row":6,"column":21},"action":"insert","lines":["f"]}],[{"start":{"row":5,"column":4},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":22},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":5,"column":4},"end":{"row":5,"column":6},"action":"insert","lines":["''"],"id":23}],[{"start":{"row":5,"column":6},"end":{"row":5,"column":7},"action":"insert","lines":["'"],"id":24}],[{"start":{"row":5,"column":7},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":25},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]},{"start":{"row":6,"column":4},"end":{"row":6,"column":5},"action":"insert","lines":["'"]},{"start":{"row":6,"column":5},"end":{"row":6,"column":6},"action":"insert","lines":["'"]},{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["'"]}],[{"start":{"row":5,"column":7},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":26},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]},{"start":{"row":6,"column":4},"end":{"row":6,"column":5},"action":"insert","lines":["@"]},{"start":{"row":6,"column":5},"end":{"row":6,"column":6},"action":"insert","lines":["b"]},{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["r"]},{"start":{"row":6,"column":7},"end":{"row":6,"column":8},"action":"insert","lines":["i"]},{"start":{"row":6,"column":8},"end":{"row":6,"column":9},"action":"insert","lines":["e"]}],[{"start":{"row":6,"column":9},"end":{"row":6,"column":10},"action":"insert","lines":["f"],"id":27}],[{"start":{"row":6,"column":10},"end":{"row":6,"column":11},"action":"insert","lines":[" "],"id":28},{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"insert","lines":[":"]}],[{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":[" "],"id":29},{"start":{"row":6,"column":13},"end":{"row":6,"column":15},"action":"insert","lines":["株価"]}],[{"start":{"row":6,"column":15},"end":{"row":6,"column":16},"action":"insert","lines":["を"],"id":30},{"start":{"row":6,"column":16},"end":{"row":6,"column":18},"action":"insert","lines":["取得"]}],[{"start":{"row":6,"column":18},"end":{"row":6,"column":20},"action":"insert","lines":["する"],"id":31},{"start":{"row":6,"column":20},"end":{"row":6,"column":23},"action":"insert","lines":["クラス"]}],[{"start":{"row":32,"column":28},"end":{"row":33,"column":0},"action":"insert","lines":["",""],"id":32},{"start":{"row":33,"column":0},"end":{"row":33,"column":8},"action":"insert","lines":[" "]},{"start":{"row":33,"column":8},"end":{"row":34,"column":0},"action":"insert","lines":["",""]},{"start":{"row":34,"column":0},"end":{"row":34,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":8},"action":"remove","lines":[" "],"id":33},{"start":{"row":34,"column":0},"end":{"row":34,"column":4},"action":"remove","lines":[" "]}],[{"start":{"row":34,"column":0},"end":{"row":34,"column":1},"action":"insert","lines":["k"],"id":34},{"start":{"row":34,"column":1},"end":{"row":34,"column":2},"action":"insert","lines":["a"]},{"start":{"row":34,"column":2},"end":{"row":34,"column":3},"action":"insert","lines":["b"]},{"start":{"row":34,"column":3},"end":{"row":34,"column":4},"action":"insert","lines":["u"]}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":5},"action":"insert","lines":[" "],"id":35},{"start":{"row":34,"column":5},"end":{"row":34,"column":6},"action":"insert","lines":["="]}],[{"start":{"row":34,"column":6},"end":{"row":34,"column":7},"action":"insert","lines":[" "],"id":36},{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"insert","lines":["k"]},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["a"]}],[{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"remove","lines":["a"],"id":37},{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"remove","lines":["k"]}],[{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"insert","lines":["K"],"id":38},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["a"]}],[{"start":{"row":34,"column":7},"end":{"row":34,"column":9},"action":"remove","lines":["Ka"],"id":39},{"start":{"row":34,"column":7},"end":{"row":34,"column":22},"action":"insert","lines":["KabutanScraping"]}],[{"start":{"row":34,"column":22},"end":{"row":34,"column":24},"action":"insert","lines":["()"],"id":40}],[{"start":{"row":34,"column":24},"end":{"row":35,"column":0},"action":"insert","lines":["",""],"id":41}],[{"start":{"row":32,"column":28},"end":{"row":33,"column":0},"action":"insert","lines":["",""],"id":42},{"start":{"row":33,"column":0},"end":{"row":33,"column":8},"action":"insert","lines":[" "]},{"start":{"row":33,"column":8},"end":{"row":34,"column":0},"action":"insert","lines":["",""]},{"start":{"row":34,"column":0},"end":{"row":34,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":8},"action":"remove","lines":[" "],"id":43}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":5},"action":"insert","lines":["d"],"id":44},{"start":{"row":34,"column":5},"end":{"row":34,"column":6},"action":"insert","lines":["e"]},{"start":{"row":34,"column":6},"end":{"row":34,"column":7},"action":"insert","lines":["f"]}],[{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"insert","lines":[" "],"id":45},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["d"]},{"start":{"row":34,"column":9},"end":{"row":34,"column":10},"action":"insert","lines":["i"]},{"start":{"row":34,"column":10},"end":{"row":34,"column":11},"action":"insert","lines":["s"]},{"start":{"row":34,"column":11},"end":{"row":34,"column":12},"action":"insert","lines":["p"]}],[{"start":{"row":34,"column":12},"end":{"row":34,"column":13},"action":"insert","lines":["l"],"id":46},{"start":{"row":34,"column":13},"end":{"row":34,"column":14},"action":"insert","lines":["a"]},{"start":{"row":34,"column":14},"end":{"row":34,"column":15},"action":"insert","lines":["y"]}],[{"start":{"row":34,"column":15},"end":{"row":34,"column":17},"action":"insert","lines":["()"],"id":47}],[{"start":{"row":34,"column":16},"end":{"row":34,"column":17},"action":"insert","lines":["s"],"id":48},{"start":{"row":34,"column":17},"end":{"row":34,"column":18},"action":"insert","lines":["e"]},{"start":{"row":34,"column":18},"end":{"row":34,"column":19},"action":"insert","lines":["l"]},{"start":{"row":34,"column":19},"end":{"row":34,"column":20},"action":"insert","lines":["f"]}],[{"start":{"row":34,"column":21},"end":{"row":34,"column":22},"action":"insert","lines":[":"],"id":49}],[{"start":{"row":34,"column":22},"end":{"row":35,"column":0},"action":"insert","lines":["",""],"id":50},{"start":{"row":35,"column":0},"end":{"row":35,"column":8},"action":"insert","lines":[" "]},{"start":{"row":35,"column":8},"end":{"row":35,"column":9},"action":"insert","lines":["d"]},{"start":{"row":35,"column":9},"end":{"row":35,"column":10},"action":"insert","lines":["i"]},{"start":{"row":35,"column":10},"end":{"row":35,"column":11},"action":"insert","lines":["s"]}],[{"start":{"row":35,"column":11},"end":{"row":35,"column":12},"action":"insert","lines":["p"],"id":51},{"start":{"row":35,"column":12},"end":{"row":35,"column":13},"action":"insert","lines":["l"]},{"start":{"row":35,"column":13},"end":{"row":35,"column":14},"action":"insert","lines":["a"]},{"start":{"row":35,"column":14},"end":{"row":35,"column":15},"action":"insert","lines":["y"]}],[{"start":{"row":35,"column":15},"end":{"row":35,"column":17},"action":"insert","lines":["()"],"id":52}],[{"start":{"row":35,"column":16},"end":{"row":35,"column":17},"action":"insert","lines":["d"],"id":53},{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["e"]}],[{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"remove","lines":["e"],"id":54}],[{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["f"],"id":55},{"start":{"row":35,"column":18},"end":{"row":35,"column":19},"action":"insert","lines":["A"]},{"start":{"row":35,"column":19},"end":{"row":35,"column":20},"action":"insert","lines":["l"]},{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"insert","lines":["l"]}],[{"start":{"row":35,"column":21},"end":{"row":35,"column":22},"action":"insert","lines":["B"],"id":56},{"start":{"row":35,"column":22},"end":{"row":35,"column":23},"action":"insert","lines":["r"]},{"start":{"row":35,"column":23},"end":{"row":35,"column":24},"action":"insert","lines":["a"]},{"start":{"row":35,"column":24},"end":{"row":35,"column":25},"action":"insert","lines":["n"]},{"start":{"row":35,"column":25},"end":{"row":35,"column":26},"action":"insert","lines":["d"]},{"start":{"row":35,"column":26},"end":{"row":35,"column":27},"action":"insert","lines":["s"]}],[{"start":{"row":38,"column":0},"end":{"row":38,"column":1},"action":"insert","lines":["k"],"id":57},{"start":{"row":38,"column":1},"end":{"row":38,"column":2},"action":"insert","lines":["a"]},{"start":{"row":38,"column":2},"end":{"row":38,"column":3},"action":"insert","lines":["b"]},{"start":{"row":38,"column":3},"end":{"row":38,"column":4},"action":"insert","lines":["u"]},{"start":{"row":38,"column":4},"end":{"row":38,"column":5},"action":"insert","lines":["."]},{"start":{"row":38,"column":5},"end":{"row":38,"column":6},"action":"insert","lines":["d"]},{"start":{"row":38,"column":6},"end":{"row":38,"column":7},"action":"insert","lines":["i"]},{"start":{"row":38,"column":7},"end":{"row":38,"column":8},"action":"insert","lines":["s"]}],[{"start":{"row":38,"column":8},"end":{"row":38,"column":9},"action":"insert","lines":["p"],"id":58},{"start":{"row":38,"column":9},"end":{"row":38,"column":10},"action":"insert","lines":["l"]},{"start":{"row":38,"column":10},"end":{"row":38,"column":11},"action":"insert","lines":["a"]},{"start":{"row":38,"column":11},"end":{"row":38,"column":12},"action":"insert","lines":["y"]}],[{"start":{"row":32,"column":8},"end":{"row":32,"column":9},"action":"insert","lines":["#"],"id":59}],[{"start":{"row":35,"column":16},"end":{"row":35,"column":17},"action":"insert","lines":["s"],"id":60},{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["e"]},{"start":{"row":35,"column":18},"end":{"row":35,"column":19},"action":"insert","lines":["l"]},{"start":{"row":35,"column":19},"end":{"row":35,"column":20},"action":"insert","lines":["f"]},{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"insert","lines":["."]}],[{"start":{"row":35,"column":21},"end":{"row":36,"column":0},"action":"insert","lines":["",""],"id":61},{"start":{"row":36,"column":0},"end":{"row":36,"column":8},"action":"insert","lines":[" "]},{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"insert","lines":["."]}],[{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"remove","lines":["."],"id":62},{"start":{"row":36,"column":4},"end":{"row":36,"column":8},"action":"remove","lines":[" "]},{"start":{"row":36,"column":0},"end":{"row":36,"column":4},"action":"remove","lines":[" "]},{"start":{"row":35,"column":21},"end":{"row":36,"column":0},"action":"remove","lines":["",""]},{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"remove","lines":["."]}],[{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"insert","lines":["."],"id":63}],[{"start":{"row":22,"column":16},"end":{"row":22,"column":17},"action":"insert","lines":["#"],"id":64}],[{"start":{"row":7,"column":7},"end":{"row":8,"column":0},"action":"insert","lines":["",""],"id":65},{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":8,"column":4},"end":{"row":8,"column":5},"action":"insert","lines":["d"],"id":66},{"start":{"row":8,"column":5},"end":{"row":8,"column":6},"action":"insert","lines":["f"]}],[{"start":{"row":8,"column":6},"end":{"row":8,"column":7},"action":"insert","lines":["A"],"id":67},{"start":{"row":8,"column":7},"end":{"row":8,"column":8},"action":"insert","lines":["l"]},{"start":{"row":8,"column":8},"end":{"row":8,"column":9},"action":"insert","lines":["l"]},{"start":{"row":8,"column":9},"end":{"row":8,"column":10},"action":"insert","lines":["B"]},{"start":{"row":8,"column":10},"end":{"row":8,"column":11},"action":"insert","lines":["r"]},{"start":{"row":8,"column":11},"end":{"row":8,"column":12},"action":"insert","lines":["a"]},{"start":{"row":8,"column":12},"end":{"row":8,"column":13},"action":"insert","lines":["n"]},{"start":{"row":8,"column":13},"end":{"row":8,"column":14},"action":"insert","lines":["d"]}],[{"start":{"row":8,"column":14},"end":{"row":8,"column":15},"action":"insert","lines":["s"],"id":68}],[{"start":{"row":8,"column":15},"end":{"row":8,"column":16},"action":"insert","lines":[" "],"id":69},{"start":{"row":8,"column":16},"end":{"row":8,"column":17},"action":"insert","lines":["="]}],[{"start":{"row":8,"column":17},"end":{"row":8,"column":18},"action":"insert","lines":[" "],"id":70},{"start":{"row":8,"column":18},"end":{"row":8,"column":19},"action":"insert","lines":["p"]},{"start":{"row":8,"column":19},"end":{"row":8,"column":20},"action":"insert","lines":["d"]},{"start":{"row":8,"column":20},"end":{"row":8,"column":21},"action":"insert","lines":["."]},{"start":{"row":8,"column":21},"end":{"row":8,"column":22},"action":"insert","lines":["D"]},{"start":{"row":8,"column":22},"end":{"row":8,"column":23},"action":"insert","lines":["a"]},{"start":{"row":8,"column":23},"end":{"row":8,"column":24},"action":"insert","lines":["t"]}],[{"start":{"row":8,"column":24},"end":{"row":8,"column":25},"action":"insert","lines":["a"],"id":71},{"start":{"row":8,"column":25},"end":{"row":8,"column":26},"action":"insert","lines":["F"]},{"start":{"row":8,"column":26},"end":{"row":8,"column":27},"action":"insert","lines":["r"]},{"start":{"row":8,"column":27},"end":{"row":8,"column":28},"action":"insert","lines":["a"]},{"start":{"row":8,"column":28},"end":{"row":8,"column":29},"action":"insert","lines":["m"]},{"start":{"row":8,"column":29},"end":{"row":8,"column":30},"action":"insert","lines":["e"]}],[{"start":{"row":8,"column":30},"end":{"row":8,"column":32},"action":"insert","lines":["()"],"id":72}],[{"start":{"row":16,"column":8},"end":{"row":16,"column":9},"action":"insert","lines":["#"],"id":73}],[{"start":{"row":25,"column":12},"end":{"row":25,"column":13},"action":"insert","lines":["s"],"id":74},{"start":{"row":25,"column":13},"end":{"row":25,"column":14},"action":"insert","lines":["e"]},{"start":{"row":25,"column":14},"end":{"row":25,"column":15},"action":"insert","lines":["l"]},{"start":{"row":25,"column":15},"end":{"row":25,"column":16},"action":"insert","lines":["f"]},{"start":{"row":25,"column":16},"end":{"row":25,"column":17},"action":"insert","lines":["."]}],[{"start":{"row":32,"column":8},"end":{"row":32,"column":9},"action":"insert","lines":["s"],"id":75},{"start":{"row":32,"column":9},"end":{"row":32,"column":10},"action":"insert","lines":["e"]},{"start":{"row":32,"column":10},"end":{"row":32,"column":11},"action":"insert","lines":["l"]},{"start":{"row":32,"column":11},"end":{"row":32,"column":12},"action":"insert","lines":["f"]},{"start":{"row":32,"column":12},"end":{"row":32,"column":13},"action":"insert","lines":["."]}],[{"start":{"row":25,"column":31},"end":{"row":25,"column":32},"action":"insert","lines":["s"],"id":76},{"start":{"row":25,"column":32},"end":{"row":25,"column":33},"action":"insert","lines":["e"]},{"start":{"row":25,"column":33},"end":{"row":25,"column":34},"action":"insert","lines":["l"]},{"start":{"row":25,"column":34},"end":{"row":25,"column":35},"action":"insert","lines":["f"]},{"start":{"row":25,"column":35},"end":{"row":25,"column":36},"action":"insert","lines":["."]}],[{"start":{"row":29,"column":17},"end":{"row":29,"column":18},"action":"insert","lines":["s"],"id":77},{"start":{"row":29,"column":18},"end":{"row":29,"column":19},"action":"insert","lines":["e"]},{"start":{"row":29,"column":19},"end":{"row":29,"column":20},"action":"insert","lines":["l"]},{"start":{"row":29,"column":20},"end":{"row":29,"column":21},"action":"insert","lines":["f"]},{"start":{"row":29,"column":21},"end":{"row":29,"column":22},"action":"insert","lines":["."]}],[{"start":{"row":31,"column":60},"end":{"row":31,"column":61},"action":"insert","lines":["s"],"id":78},{"start":{"row":31,"column":61},"end":{"row":31,"column":62},"action":"insert","lines":["e"]},{"start":{"row":31,"column":62},"end":{"row":31,"column":63},"action":"insert","lines":["l"]},{"start":{"row":31,"column":63},"end":{"row":31,"column":64},"action":"insert","lines":["f"]},{"start":{"row":31,"column":64},"end":{"row":31,"column":65},"action":"insert","lines":["."]}],[{"start":{"row":16,"column":8},"end":{"row":16,"column":9},"action":"remove","lines":["#"],"id":79}],[{"start":{"row":16,"column":8},"end":{"row":16,"column":9},"action":"insert","lines":["s"],"id":80},{"start":{"row":16,"column":9},"end":{"row":16,"column":10},"action":"insert","lines":["e"]},{"start":{"row":16,"column":10},"end":{"row":16,"column":11},"action":"insert","lines":["l"]},{"start":{"row":16,"column":11},"end":{"row":16,"column":12},"action":"insert","lines":["f"]},{"start":{"row":16,"column":12},"end":{"row":16,"column":13},"action":"insert","lines":["."]}],[{"start":{"row":8,"column":31},"end":{"row":8,"column":32},"action":"remove","lines":[")"],"id":81},{"start":{"row":8,"column":30},"end":{"row":8,"column":31},"action":"remove","lines":["("]},{"start":{"row":8,"column":29},"end":{"row":8,"column":30},"action":"remove","lines":["e"]},{"start":{"row":8,"column":28},"end":{"row":8,"column":29},"action":"remove","lines":["m"]},{"start":{"row":8,"column":27},"end":{"row":8,"column":28},"action":"remove","lines":["a"]},{"start":{"row":8,"column":26},"end":{"row":8,"column":27},"action":"remove","lines":["r"]},{"start":{"row":8,"column":25},"end":{"row":8,"column":26},"action":"remove","lines":["F"]},{"start":{"row":8,"column":24},"end":{"row":8,"column":25},"action":"remove","lines":["a"]},{"start":{"row":8,"column":23},"end":{"row":8,"column":24},"action":"remove","lines":["t"]},{"start":{"row":8,"column":22},"end":{"row":8,"column":23},"action":"remove","lines":["a"]},{"start":{"row":8,"column":21},"end":{"row":8,"column":22},"action":"remove","lines":["D"]},{"start":{"row":8,"column":20},"end":{"row":8,"column":21},"action":"remove","lines":["."]},{"start":{"row":8,"column":19},"end":{"row":8,"column":20},"action":"remove","lines":["d"]},{"start":{"row":8,"column":18},"end":{"row":8,"column":19},"action":"remove","lines":["p"]},{"start":{"row":8,"column":17},"end":{"row":8,"column":18},"action":"remove","lines":[" "]},{"start":{"row":8,"column":16},"end":{"row":8,"column":17},"action":"remove","lines":["="]},{"start":{"row":8,"column":15},"end":{"row":8,"column":16},"action":"remove","lines":[" "]}],[{"start":{"row":8,"column":15},"end":{"row":8,"column":16},"action":"insert","lines":[" "],"id":82},{"start":{"row":8,"column":16},"end":{"row":8,"column":17},"action":"insert","lines":["="]}],[{"start":{"row":8,"column":17},"end":{"row":8,"column":18},"action":"insert","lines":[" "],"id":83}],[{"start":{"row":8,"column":18},"end":{"row":8,"column":20},"action":"insert","lines":["\"\""],"id":84}],[{"start":{"row":33,"column":8},"end":{"row":33,"column":9},"action":"remove","lines":["#"],"id":85}],[{"start":{"row":33,"column":16},"end":{"row":33,"column":17},"action":"insert","lines":["s"],"id":86},{"start":{"row":33,"column":17},"end":{"row":33,"column":18},"action":"insert","lines":["e"]},{"start":{"row":33,"column":18},"end":{"row":33,"column":19},"action":"insert","lines":["l"]},{"start":{"row":33,"column":19},"end":{"row":33,"column":20},"action":"insert","lines":["f"]},{"start":{"row":33,"column":20},"end":{"row":33,"column":21},"action":"insert","lines":["."]}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"insert","lines":["#"],"id":87}],[{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"insert","lines":["s"],"id":88},{"start":{"row":36,"column":9},"end":{"row":36,"column":10},"action":"insert","lines":["e"]},{"start":{"row":36,"column":10},"end":{"row":36,"column":11},"action":"insert","lines":["l"]},{"start":{"row":36,"column":11},"end":{"row":36,"column":12},"action":"insert","lines":["f"]},{"start":{"row":36,"column":12},"end":{"row":36,"column":13},"action":"insert","lines":["."]}],[{"start":{"row":36,"column":12},"end":{"row":36,"column":13},"action":"remove","lines":["."],"id":89},{"start":{"row":36,"column":11},"end":{"row":36,"column":12},"action":"remove","lines":["f"]},{"start":{"row":36,"column":10},"end":{"row":36,"column":11},"action":"remove","lines":["l"]},{"start":{"row":36,"column":9},"end":{"row":36,"column":10},"action":"remove","lines":["e"]},{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"remove","lines":["s"]}],[{"start":{"row":35,"column":15},"end":{"row":35,"column":16},"action":"insert","lines":["_"],"id":90},{"start":{"row":35,"column":16},"end":{"row":35,"column":17},"action":"insert","lines":["a"]},{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["l"]},{"start":{"row":35,"column":18},"end":{"row":35,"column":19},"action":"insert","lines":["l"]}],[{"start":{"row":33,"column":8},"end":{"row":33,"column":9},"action":"insert","lines":["#"],"id":91}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"remove","lines":["#"],"id":92}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"insert","lines":["#"],"id":93}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"remove","lines":["#"],"id":94}],[{"start":{"row":39,"column":12},"end":{"row":39,"column":13},"action":"insert","lines":["_"],"id":95},{"start":{"row":39,"column":13},"end":{"row":39,"column":14},"action":"insert","lines":["a"]},{"start":{"row":39,"column":14},"end":{"row":39,"column":15},"action":"insert","lines":["l"]},{"start":{"row":39,"column":15},"end":{"row":39,"column":16},"action":"insert","lines":["l"]}],[{"start":{"row":39,"column":16},"end":{"row":39,"column":18},"action":"insert","lines":["()"],"id":96}],[{"start":{"row":38,"column":0},"end":{"row":39,"column":0},"action":"insert","lines":["",""],"id":97}],[{"start":{"row":38,"column":0},"end":{"row":38,"column":1},"action":"insert","lines":["#"],"id":98}],[{"start":{"row":38,"column":1},"end":{"row":38,"column":2},"action":"insert","lines":[" "],"id":99},{"start":{"row":38,"column":2},"end":{"row":38,"column":3},"action":"insert","lines":["F"]},{"start":{"row":38,"column":3},"end":{"row":38,"column":4},"action":"insert","lines":["o"]},{"start":{"row":38,"column":4},"end":{"row":38,"column":5},"action":"insert","lines":["r"]}],[{"start":{"row":38,"column":5},"end":{"row":38,"column":6},"action":"insert","lines":[" "],"id":100},{"start":{"row":38,"column":6},"end":{"row":38,"column":7},"action":"insert","lines":["D"]},{"start":{"row":38,"column":7},"end":{"row":38,"column":8},"action":"insert","lines":["e"]},{"start":{"row":38,"column":8},"end":{"row":38,"column":9},"action":"insert","lines":["b"]},{"start":{"row":38,"column":9},"end":{"row":38,"column":10},"action":"insert","lines":["u"]},{"start":{"row":38,"column":10},"end":{"row":38,"column":11},"action":"insert","lines":["g"]}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"insert","lines":["#"],"id":101}],[{"start":{"row":40,"column":0},"end":{"row":40,"column":1},"action":"insert","lines":["#"],"id":102}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":40,"column":1},"end":{"row":40,"column":1},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1542509718542,"hash":"72aa41b34248e38e6225a8cdc232de27a235d1a5"} |
from discord.ext import commands, tasks
from collections import Counter, defaultdict
from .utils import checks, db, time, formats
from .utils.paginator import CannotPaginate
import pkg_resources
import logging
import discord
import textwrap
import datetime
import traceback
import itertools
import typing
import asyncpg
import asyncio
import pygit2
import psutil
import json
import os
import re
import io
import gc
log = logging.getLogger(__name__)
LOGGING_CHANNEL = 309632009427222529
class GatewayHandler(logging.Handler):
def __init__(self, cog):
self.cog = cog
super().__init__(logging.INFO)
def filter(self, record):
return record.name == 'discord.gateway' or 'Shard ID' in record.msg or 'Websocket closed ' in record.msg
def emit(self, record):
self.cog.add_record(record)
class Commands(db.Table):
id = db.PrimaryKeyColumn()
guild_id = db.Column(db.Integer(big=True), index=True)
channel_id = db.Column(db.Integer(big=True))
author_id = db.Column(db.Integer(big=True), index=True)
used = db.Column(db.Datetime, index=True)
prefix = db.Column(db.String)
command = db.Column(db.String, index=True)
failed = db.Column(db.Boolean, index=True)
_INVITE_REGEX = re.compile(r'(?:https?:\/\/)?discord(?:\.gg|\.com|app\.com\/invite)?\/[A-Za-z0-9]+')
def censor_invite(obj, *, _regex=_INVITE_REGEX):
return _regex.sub('[censored-invite]', str(obj))
def hex_value(arg):
return int(arg, base=16)
def object_at(addr):
for o in gc.get_objects():
if id(o) == addr:
return o
return None
class Stats(commands.Cog):
"""Bot usage statistics."""
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process()
self._batch_lock = asyncio.Lock(loop=bot.loop)
self._data_batch = []
self.bulk_insert_loop.add_exception_type(asyncpg.PostgresConnectionError)
self.bulk_insert_loop.start()
self._gateway_queue = asyncio.Queue(loop=bot.loop)
self.gateway_worker.start()
# This is a datetime list
self._resumes = []
# shard_id: List[datetime]
self._identifies = defaultdict(list)
def _clear_gateway_data(self):
one_week_ago = datetime.datetime.utcnow() - datetime.timedelta(days=7)
to_remove = [index for index, dt in enumerate(self._resumes) if dt < one_week_ago]
for index in reversed(to_remove):
del self._resumes[index]
for shard_id, dates in self._identifies.items():
to_remove = [index for index, dt in enumerate(dates) if dt < one_week_ago]
for index in reversed(to_remove):
del dates[index]
async def bulk_insert(self):
query = """INSERT INTO commands (guild_id, channel_id, author_id, used, prefix, command, failed)
SELECT x.guild, x.channel, x.author, x.used, x.prefix, x.command, x.failed
FROM jsonb_to_recordset($1::jsonb) AS
x(guild BIGINT, channel BIGINT, author BIGINT, used TIMESTAMP, prefix TEXT, command TEXT, failed BOOLEAN)
"""
if self._data_batch:
await self.bot.pool.execute(query, self._data_batch)
total = len(self._data_batch)
if total > 1:
log.info('Registered %s commands to the database.', total)
self._data_batch.clear()
def cog_unload(self):
self.bulk_insert_loop.stop()
self._gateway_worker.cancel()
@tasks.loop(seconds=10.0)
async def bulk_insert_loop(self):
async with self._batch_lock:
await self.bulk_insert()
@tasks.loop(seconds=0.0)
async def gateway_worker(self):
record = await self._gateway_queue.get()
await self.notify_gateway_status(record)
async def register_command(self, ctx):
if ctx.command is None:
return
command = ctx.command.qualified_name
self.bot.command_stats[command] += 1
message = ctx.message
destination = None
if ctx.guild is None:
destination = 'Private Message'
guild_id = None
else:
destination = f'#{message.channel} ({message.guild})'
guild_id = ctx.guild.id
log.info(f'{message.created_at}: {message.author} in {destination}: {message.content}')
async with self._batch_lock:
self._data_batch.append({
'guild': guild_id,
'channel': ctx.channel.id,
'author': ctx.author.id,
'used': message.created_at.isoformat(),
'prefix': ctx.prefix,
'command': command,
'failed': ctx.command_failed,
})
@commands.Cog.listener()
async def on_command_completion(self, ctx):
await self.register_command(ctx)
@commands.Cog.listener()
async def on_socket_response(self, msg):
self.bot.socket_stats[msg.get('t')] += 1
@property
def webhook(self):
wh_id, wh_token = self.bot.config.stat_webhook
hook = discord.Webhook.partial(id=wh_id, token=wh_token, adapter=discord.AsyncWebhookAdapter(self.bot.session))
return hook
async def log_error(self, *, ctx=None, extra=None):
e = discord.Embed(title='Error', colour=0xdd5f53)
e.description = f'```py\n{traceback.format_exc()}\n```'
e.add_field(name='Extra', value=extra, inline=False)
e.timestamp = datetime.datetime.utcnow()
if ctx is not None:
fmt = '{0} (ID: {0.id})'
author = fmt.format(ctx.author)
channel = fmt.format(ctx.channel)
guild = 'None' if ctx.guild is None else fmt.format(ctx.guild)
e.add_field(name='Author', value=author)
e.add_field(name='Channel', value=channel)
e.add_field(name='Guild', value=guild)
await self.webhook.send(embed=e)
@commands.command(hidden=True)
@commands.is_owner()
async def commandstats(self, ctx, limit=20):
"""Shows command stats.
Use a negative number for bottom instead of top.
This is only for the current session.
"""
counter = self.bot.command_stats
width = len(max(counter, key=len))
total = sum(counter.values())
if limit > 0:
common = counter.most_common(limit)
else:
common = counter.most_common()[limit:]
output = '\n'.join(f'{k:<{width}}: {c}' for k, c in common)
await ctx.send(f'```\n{output}\n```')
@commands.command(hidden=True)
async def socketstats(self, ctx):
delta = datetime.datetime.utcnow() - self.bot.uptime
minutes = delta.total_seconds() / 60
total = sum(self.bot.socket_stats.values())
cpm = total / minutes
await ctx.send(f'{total} socket events observed ({cpm:.2f}/minute):\n{self.bot.socket_stats}')
def get_bot_uptime(self, *, brief=False):
return time.human_timedelta(self.bot.uptime, accuracy=None, brief=brief, suffix=False)
@commands.command()
async def uptime(self, ctx):
"""Tells you how long the bot has been up for."""
await ctx.send(f'Uptime: **{self.get_bot_uptime()}**')
def format_commit(self, commit):
short, _, _ = commit.message.partition('\n')
short_sha2 = commit.hex[0:6]
commit_tz = datetime.timezone(datetime.timedelta(minutes=commit.commit_time_offset))
commit_time = datetime.datetime.fromtimestamp(commit.commit_time).replace(tzinfo=commit_tz)
# [`hash`](url) message (offset)
offset = time.human_timedelta(commit_time.astimezone(datetime.timezone.utc).replace(tzinfo=None), accuracy=1)
return f'[`{short_sha2}`](https://github.com/Rapptz/RoboDanny/commit/{commit.hex}) {short} ({offset})'
def get_last_commits(self, count=3):
repo = pygit2.Repository('.git')
commits = list(itertools.islice(repo.walk(repo.head.target, pygit2.GIT_SORT_TOPOLOGICAL), count))
return '\n'.join(self.format_commit(c) for c in commits)
@commands.command()
async def about(self, ctx):
"""Tells you information about the bot itself."""
revision = self.get_last_commits()
embed = discord.Embed(description='Latest Changes:\n' + revision)
embed.title = 'Official Bot Server Invite'
embed.url = 'https://discord.gg/DWEaqMy'
embed.colour = discord.Colour.blurple()
owner = self.bot.get_user(self.bot.owner_id)
embed.set_author(name=str(owner), icon_url=owner.avatar_url)
# statistics
total_members = 0
total_online = 0
offline = discord.Status.offline
for member in self.bot.get_all_members():
total_members += 1
if member.status is not offline:
total_online += 1
total_unique = len(self.bot.users)
text = 0
voice = 0
guilds = 0
for guild in self.bot.guilds:
guilds += 1
for channel in guild.channels:
if isinstance(channel, discord.TextChannel):
text += 1
elif isinstance(channel, discord.VoiceChannel):
voice += 1
embed.add_field(name='Members', value=f'{total_members} total\n{total_unique} unique\n{total_online} unique online')
embed.add_field(name='Channels', value=f'{text + voice} total\n{text} text\n{voice} voice')
memory_usage = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB\n{cpu_usage:.2f}% CPU')
version = pkg_resources.get_distribution('discord.py').version
embed.add_field(name='Guilds', value=guilds)
embed.add_field(name='Commands Run', value=sum(self.bot.command_stats.values()))
embed.add_field(name='Uptime', value=self.get_bot_uptime(brief=True))
embed.set_footer(text=f'Made with discord.py v{version}', icon_url='http://i.imgur.com/5BFecvA.png')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
def censor_object(self, obj):
if not isinstance(obj, str) and obj.id in self.bot.blacklist:
return '[censored]'
return censor_invite(obj)
async def show_guild_stats(self, ctx):
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
embed = discord.Embed(title='Server Command Stats', colour=discord.Colour.blurple())
# total command uses
query = "SELECT COUNT(*), MIN(used) FROM commands WHERE guild_id=$1;"
count = await ctx.db.fetchrow(query, ctx.guild.id)
embed.description = f'{count[0]} commands used.'
embed.set_footer(text='Tracking command usage since').timestamp = count[1] or datetime.datetime.utcnow()
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Top Commands', value=value, inline=True)
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands.'
embed.add_field(name='Top Commands Today', value=value, inline=True)
embed.add_field(name='\u200b', value='\u200b', inline=True)
query = """SELECT author_id,
COUNT(*) AS "uses"
FROM commands
WHERE guild_id=$1
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: <@!{author_id}> ({uses} bot uses)'
for (index, (author_id, uses)) in enumerate(records)) or 'No bot users.'
embed.add_field(name='Top Command Users', value=value, inline=True)
query = """SELECT author_id,
COUNT(*) AS "uses"
FROM commands
WHERE guild_id=$1
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: <@!{author_id}> ({uses} bot uses)'
for (index, (author_id, uses)) in enumerate(records)) or 'No command users.'
embed.add_field(name='Top Command Users Today', value=value, inline=True)
await ctx.send(embed=embed)
async def show_member_stats(self, ctx, member):
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
embed = discord.Embed(title='Command Stats', colour=member.colour)
embed.set_author(name=str(member), icon_url=member.avatar_url)
# total command uses
query = "SELECT COUNT(*), MIN(used) FROM commands WHERE guild_id=$1 AND author_id=$2;"
count = await ctx.db.fetchrow(query, ctx.guild.id, member.id)
embed.description = f'{count[0]} commands used.'
embed.set_footer(text='First command used').timestamp = count[1] or datetime.datetime.utcnow()
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1 AND author_id=$2
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id, member.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Most Used Commands', value=value, inline=False)
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
AND author_id=$2
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id, member.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Most Used Commands Today', value=value, inline=False)
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True)
@commands.guild_only()
@commands.cooldown(1, 30.0, type=commands.BucketType.member)
async def stats(self, ctx, *, member: discord.Member = None):
"""Tells you command usage stats for the server or a member."""
async with ctx.typing():
if member is None:
await self.show_guild_stats(ctx)
else:
await self.show_member_stats(ctx, member)
@stats.command(name='global')
@commands.is_owner()
async def stats_global(self, ctx):
"""Global all time command statistics."""
query = "SELECT COUNT(*) FROM commands;"
total = await ctx.db.fetchrow(query)
e = discord.Embed(title='Command Stats', colour=discord.Colour.blurple())
e.description = f'{total[0]} commands used.'
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
query = """SELECT command, COUNT(*) AS "uses"
FROM commands
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))
e.add_field(name='Top Commands', value=value, inline=False)
query = """SELECT guild_id, COUNT(*) AS "uses"
FROM commands
GROUP BY guild_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (guild_id, uses)) in enumerate(records):
if guild_id is None:
guild = 'Private Message'
else:
guild = self.censor_object(self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {guild} ({uses} uses)')
e.add_field(name='Top Guilds', value='\n'.join(value), inline=False)
query = """SELECT author_id, COUNT(*) AS "uses"
FROM commands
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (author_id, uses)) in enumerate(records):
user = self.censor_object(self.bot.get_user(author_id) or f'<Unknown {author_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {user} ({uses} uses)')
e.add_field(name='Top Users', value='\n'.join(value), inline=False)
await ctx.send(embed=e)
@stats.command(name='today')
@commands.is_owner()
async def stats_today(self, ctx):
"""Global command statistics for the day."""
query = "SELECT failed, COUNT(*) FROM commands WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day') GROUP BY failed;"
total = await ctx.db.fetch(query)
failed = 0
success = 0
question = 0
for state, count in total:
if state is False:
success += count
elif state is True:
failed += count
else:
question += count
e = discord.Embed(title='Last 24 Hour Command Stats', colour=discord.Colour.blurple())
e.description = f'{failed + success + question} commands used today. ' \
f'({success} succeeded, {failed} failed, {question} unknown)'
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
query = """SELECT command, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))
e.add_field(name='Top Commands', value=value, inline=False)
query = """SELECT guild_id, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY guild_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (guild_id, uses)) in enumerate(records):
if guild_id is None:
guild = 'Private Message'
else:
guild = self.censor_object(self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {guild} ({uses} uses)')
e.add_field(name='Top Guilds', value='\n'.join(value), inline=False)
query = """SELECT author_id, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (author_id, uses)) in enumerate(records):
user = self.censor_object(self.bot.get_user(author_id) or f'<Unknown {author_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {user} ({uses} uses)')
e.add_field(name='Top Users', value='\n'.join(value), inline=False)
await ctx.send(embed=e)
async def send_guild_stats(self, e, guild):
e.add_field(name='Name', value=guild.name)
e.add_field(name='ID', value=guild.id)
e.add_field(name='Shard ID', value=guild.shard_id or 'N/A')
e.add_field(name='Owner', value=f'{guild.owner} (ID: {guild.owner.id})')
bots = sum(m.bot for m in guild.members)
total = guild.member_count
online = sum(m.status is discord.Status.online for m in guild.members)
e.add_field(name='Members', value=str(total))
e.add_field(name='Bots', value=f'{bots} ({bots/total:.2%})')
e.add_field(name='Online', value=f'{online} ({online/total:.2%})')
if guild.icon:
e.set_thumbnail(url=guild.icon_url)
if guild.me:
e.timestamp = guild.me.joined_at
await self.webhook.send(embed=e)
@stats_today.before_invoke
@stats_global.before_invoke
async def before_stats_invoke(self, ctx):
await ctx.trigger_typing()
@commands.Cog.listener()
async def on_guild_join(self, guild):
e = discord.Embed(colour=0x53dda4, title='New Guild') # green colour
await self.send_guild_stats(e, guild)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
e = discord.Embed(colour=0xdd5f53, title='Left Guild') # red colour
await self.send_guild_stats(e, guild)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
await self.register_command(ctx)
if not isinstance(error, (commands.CommandInvokeError, commands.ConversionError)):
return
error = error.original
if isinstance(error, (discord.Forbidden, discord.NotFound, CannotPaginate)):
return
e = discord.Embed(title='Command Error', colour=0xcc3366)
e.add_field(name='Name', value=ctx.command.qualified_name)
e.add_field(name='Author', value=f'{ctx.author} (ID: {ctx.author.id})')
fmt = f'Channel: {ctx.channel} (ID: {ctx.channel.id})'
if ctx.guild:
fmt = f'{fmt}\nGuild: {ctx.guild} (ID: {ctx.guild.id})'
e.add_field(name='Location', value=fmt, inline=False)
e.add_field(name='Content', value=textwrap.shorten(ctx.message.content, width=512))
exc = ''.join(traceback.format_exception(type(error), error, error.__traceback__, chain=False))
e.description = f'```py\n{exc}\n```'
e.timestamp = datetime.datetime.utcnow()
await self.webhook.send(embed=e)
@commands.Cog.listener()
async def on_socket_raw_send(self, data):
# kind of weird way to check if we're sending
# IDENTIFY or RESUME
if '"op":2' not in data and '"op":6' not in data:
return
back_to_json = json.loads(data)
if back_to_json['op'] == 2:
payload = back_to_json['d']
inner_shard = payload.get('shard', [0])
self._identifies[inner_shard[0]].append(datetime.datetime.utcnow())
else:
self._resumes.append(datetime.datetime.utcnow())
# don't want to permanently grow memory
self._clear_gateway_data()
def add_record(self, record):
# if self.bot.config.debug:
# return
self._gateway_queue.put_nowait(record)
async def notify_gateway_status(self, record):
attributes = {
'INFO': '\N{INFORMATION SOURCE}',
'WARNING': '\N{WARNING SIGN}'
}
emoji = attributes.get(record.levelname, '\N{CROSS MARK}')
dt = datetime.datetime.utcfromtimestamp(record.created)
msg = f'{emoji} `[{dt:%Y-%m-%d %H:%M:%S}] {record.message}`'
await self.webhook.send(msg, username='Gateway', avatar_url='https://i.imgur.com/4PnCKB3.png')
@commands.command(hidden=True)
@commands.is_owner()
async def bothealth(self, ctx):
"""Various bot health monitoring tools."""
# This uses a lot of private methods because there is no
# clean way of doing this otherwise.
HEALTHY = discord.Colour(value=0x43B581)
UNHEALTHY = discord.Colour(value=0xF04947)
WARNING = discord.Colour(value=0xF09E47)
total_warnings = 0
embed = discord.Embed(title='Bot Health Report', colour=HEALTHY)
# Check the connection pool health.
pool = self.bot.pool
total_waiting = len(pool._queue._getters)
current_generation = pool._generation
description = [
f'Total `Pool.acquire` Waiters: {total_waiting}',
f'Current Pool Generation: {current_generation}',
f'Connections In Use: {len(pool._holders) - pool._queue.qsize()}'
]
questionable_connections = 0
connection_value = []
for index, holder in enumerate(pool._holders, start=1):
generation = holder._generation
in_use = holder._in_use is not None
is_closed = holder._con is None or holder._con.is_closed()
display = f'gen={holder._generation} in_use={in_use} closed={is_closed}'
questionable_connections += any((in_use, generation != current_generation))
connection_value.append(f'<Holder i={index} {display}>')
joined_value = '\n'.join(connection_value)
embed.add_field(name='Connections', value=f'```py\n{joined_value}\n```', inline=False)
spam_control = self.bot.spam_control
being_spammed = [
str(key) for key, value in spam_control._cache.items()
if value._tokens == 0
]
description.append(f'Current Spammers: {', '.join(being_spammed) if being_spammed else 'None'}')
description.append(f'Questionable Connections: {questionable_connections}')
total_warnings += questionable_connections
if being_spammed:
embed.colour = WARNING
total_warnings += 1
try:
task_retriever = asyncio.Task.all_tasks
except AttributeError:
# future proofing for 3.9 I guess
task_retriever = asyncio.all_tasks
else:
all_tasks = task_retriever(loop=self.bot.loop)
event_tasks = [
t for t in all_tasks
if 'Client._run_event' in repr(t) and not t.done()
]
cogs_directory = os.path.dirname(__file__)
tasks_directory = os.path.join('discord', 'ext', 'tasks', '__init__.py')
inner_tasks = [
t for t in all_tasks
if cogs_directory in repr(t) or tasks_directory in repr(t)
]
bad_inner_tasks = ", ".join(hex(id(t)) for t in inner_tasks if t.done() and t._exception is not None)
total_warnings += bool(bad_inner_tasks)
embed.add_field(name='Inner Tasks', value=f'Total: {len(inner_tasks)}\nFailed: {bad_inner_tasks or 'None'}')
embed.add_field(name='Events Waiting', value=f'Total: {len(event_tasks)}', inline=False)
command_waiters = len(self._data_batch)
is_locked = self._batch_lock.locked()
description.append(f'Commands Waiting: {command_waiters}, Batch Locked: {is_locked}')
# RESUME/IDENTIFY data
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
total_resumes = sum(1 for dt in self._resumes if dt > yesterday)
identifies = {
shard_id: sum(1 for dt in dates if dt > yesterday)
for shard_id, dates in self._identifies.items()
}
absolute_total_identifies = sum(identifies.values())
resume_info_builder = [
f'Total RESUMEs: {total_resumes}',
f'Total IDENTIFYs: {absolute_total_identifies}'
]
for shard_id, total in identifies.items():
resume_info_builder.append(f'Shard ID {shard_id} IDENTIFYs: {total}')
if absolute_total_identifies >= (len(self.bot.shards) * 5):
total_warnings += 1
embed.colour = WARNING
embed.add_field(name='Gateway (last 24 hours)', value='\n'.join(resume_info_builder), inline=False)
memory_usage = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB\n{cpu_usage:.2f}% CPU', inline=False)
global_rate_limit = not self.bot.http._global_over.is_set()
description.append(f'Global Rate Limit: {global_rate_limit}')
if command_waiters >= 8:
total_warnings += 1
embed.colour = WARNING
if global_rate_limit or total_warnings >= 9:
embed.colour = UNHEALTHY
embed.set_footer(text=f'{total_warnings} warning(s)')
embed.description = '\n'.join(description)
await ctx.send(embed=embed)
@commands.command(hidden=True, aliases=['cancel_task'])
@commands.is_owner()
async def debug_task(self, ctx, memory_id: hex_value):
"""Debug a task by a memory location."""
task = object_at(memory_id)
if task is None or not isinstance(task, asyncio.Task):
return await ctx.send(f'Could not find Task object at {hex(memory_id)}.')
if ctx.invoked_with == 'cancel_task':
task.cancel()
return await ctx.send(f'Cancelled task object {task!r}.')
paginator = commands.Paginator(prefix='```py')
fp = io.StringIO()
frames = len(task.get_stack())
paginator.add_line(f'# Total Frames: {frames}')
task.print_stack(file=fp)
for line in fp.getvalue().splitlines():
paginator.add_line(line)
for page in paginator.pages:
await ctx.send(page)
async def tabulate_query(self, ctx, query, *args):
records = await ctx.db.fetch(query, *args)
if len(records) == 0:
return await ctx.send('No results found.')
headers = list(records[0].keys())
table = formats.TabularData()
table.set_columns(headers)
table.add_rows(list(r.values()) for r in records)
render = table.render()
fmt = f'```\n{render}\n```'
if len(fmt) > 2000:
fp = io.BytesIO(fmt.encode('utf-8'))
await ctx.send('Too many results...', file=discord.File(fp, 'results.txt'))
else:
await ctx.send(fmt)
@commands.group(hidden=True, invoke_without_command=True)
@commands.is_owner()
async def command_history(self, ctx):
"""Command history."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
to_char(used, 'Mon DD HH12:MI:SS AM') AS "invoked",
author_id,
guild_id
FROM commands
ORDER BY used DESC
LIMIT 15;
"""
await self.tabulate_query(ctx, query)
@command_history.command(name='for')
@commands.is_owner()
async def command_history_for(self, ctx, days: typing.Optional[int] = 7, *, command: str):
"""Command history for a command."""
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT guild_id,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE command=$1
AND used > (CURRENT_TIMESTAMP - $2::interval)
GROUP BY guild_id
) AS t
ORDER BY "total" DESC
LIMIT 30;
"""
await self.tabulate_query(ctx, query, command, datetime.timedelta(days=days))
@command_history.command(name='guild', aliases=['server'])
@commands.is_owner()
async def command_history_guild(self, ctx, guild_id: int):
"""Command history for a guild."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
channel_id,
author_id,
used
FROM commands
WHERE guild_id=$1
ORDER BY used DESC
LIMIT 15;
"""
await self.tabulate_query(ctx, query, guild_id)
@command_history.command(name='user', aliases=['member'])
@commands.is_owner()
async def command_history_user(self, ctx, user_id: int):
"""Command history for a user."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
guild_id,
used
FROM commands
WHERE author_id=$1
ORDER BY used DESC
LIMIT 20;
"""
await self.tabulate_query(ctx, query, user_id)
@command_history.command(name='log')
@commands.is_owner()
async def command_history_log(self, ctx, days=7):
"""Command history log for the last N days."""
query = """SELECT command, COUNT(*)
FROM commands
WHERE used > (CURRENT_TIMESTAMP - $1::interval)
GROUP BY command
ORDER BY 2 DESC
"""
all_commands = {
c.qualified_name: 0
for c in self.bot.walk_commands()
}
records = await ctx.db.fetch(query, datetime.timedelta(days=days))
for name, uses in records:
if name in all_commands:
all_commands[name] = uses
as_data = sorted(all_commands.items(), key=lambda t: t[1], reverse=True)
table = formats.TabularData()
table.set_columns(['Command', 'Uses'])
table.add_rows(tup for tup in as_data)
render = table.render()
embed = discord.Embed(title='Summary', colour=discord.Colour.green())
embed.set_footer(text='Since').timestamp = datetime.datetime.utcnow() - datetime.timedelta(days=days)
top_ten = '\n'.join(f'{command}: {uses}' for command, uses in records[:10])
bottom_ten = '\n'.join(f'{command}: {uses}' for command, uses in records[-10:])
embed.add_field(name='Top 10', value=top_ten)
embed.add_field(name='Bottom 10', value=bottom_ten)
unused = ', '.join(name for name, uses in as_data if uses == 0)
if len(unused) > 1024:
unused = 'Way too many...'
embed.add_field(name='Unused', value=unused, inline=False)
await ctx.send(embed=embed, file=discord.File(io.BytesIO(render.encode()), filename='full_results.txt'))
@command_history.command(name='cog')
@commands.is_owner()
async def command_history_cog(self, ctx, days: typing.Optional[int] = 7, *, cog: str = None):
"""Command history for a cog or grouped by a cog."""
interval = datetime.timedelta(days=days)
if cog is not None:
cog = self.bot.get_cog(cog)
if cog is None:
return await ctx.send(f'Unknown cog: {cog}')
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT command,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE command = any($1::text[])
AND used > (CURRENT_TIMESTAMP - $2::interval)
GROUP BY command
) AS t
ORDER BY "total" DESC
LIMIT 30;
"""
return await self.tabulate_query(ctx, query, [c.qualified_name for c in cog.walk_commands()], interval)
# A more manual query with a manual grouper.
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT command,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - $1::interval)
GROUP BY command
) AS t;
"""
class Count:
__slots__ = ('success', 'failed', 'total')
def __init__(self):
self.success = 0
self.failed = 0
self.total = 0
def add(self, record):
self.success += record['success']
self.failed += record['failed']
self.total += record['total']
data = defaultdict(Count)
records = await ctx.db.fetch(query, interval)
for record in records:
command = self.bot.get_command(record['command'])
if command is None or command.cog is None:
data['No Cog'].add(record)
else:
data[command.cog.qualified_name].add(record)
table = formats.TabularData()
table.set_columns(['Cog', 'Success', 'Failed', 'Total'])
data = sorted([
(cog, e.success, e.failed, e.total)
for cog, e in data.items()
], key=lambda t: t[-1], reverse=True)
table.add_rows(data)
render = table.render()
await ctx.safe_send(f'```\n{render}\n```')
old_on_error = commands.AutoShardedBot.on_error
async def on_error(self, event, *args, **kwargs):
e = discord.Embed(title='Event Error', colour=0xa32952)
e.add_field(name='Event', value=event)
e.description = f'```py\n{traceback.format_exc()}\n```'
e.timestamp = datetime.datetime.utcnow()
args_str = ['```py']
for index, arg in enumerate(args):
args_str.append(f'[{index}]: {arg!r}')
args_str.append('```')
e.add_field(name='Args', value='\n'.join(args_str), inline=False)
hook = self.get_cog('Stats').webhook
try:
await hook.send(embed=e)
except:
pass
def setup(bot):
if not hasattr(bot, 'command_stats'):
bot.command_stats = Counter()
if not hasattr(bot, 'socket_stats'):
bot.socket_stats = Counter()
cog = Stats(bot)
bot.add_cog(cog)
bot._stats_cog_gateway_handler = handler = GatewayHandler(cog)
logging.getLogger().addHandler(handler)
commands.AutoShardedBot.on_error = on_error
def teardown(bot):
commands.AutoShardedBot.on_error = old_on_error
logging.getLogger().removeHandler(bot._stats_cog_gateway_handler)
del bot._stats_cog_gateway_handler
| from discord.ext import commands, tasks
from collections import Counter, defaultdict
from .utils import checks, db, time, formats
from .utils.paginator import CannotPaginate
import pkg_resources
import logging
import discord
import textwrap
import datetime
import traceback
import itertools
import typing
import asyncpg
import asyncio
import pygit2
import psutil
import json
import os
import re
import io
import gc
log = logging.getLogger(__name__)
LOGGING_CHANNEL = 309632009427222529
class GatewayHandler(logging.Handler):
def __init__(self, cog):
self.cog = cog
super().__init__(logging.INFO)
def filter(self, record):
return record.name == 'discord.gateway' or 'Shard ID' in record.msg or 'Websocket closed ' in record.msg
def emit(self, record):
self.cog.add_record(record)
class Commands(db.Table):
id = db.PrimaryKeyColumn()
guild_id = db.Column(db.Integer(big=True), index=True)
channel_id = db.Column(db.Integer(big=True))
author_id = db.Column(db.Integer(big=True), index=True)
used = db.Column(db.Datetime, index=True)
prefix = db.Column(db.String)
command = db.Column(db.String, index=True)
failed = db.Column(db.Boolean, index=True)
_INVITE_REGEX = re.compile(r'(?:https?:\/\/)?discord(?:\.gg|\.com|app\.com\/invite)?\/[A-Za-z0-9]+')
def censor_invite(obj, *, _regex=_INVITE_REGEX):
return _regex.sub('[censored-invite]', str(obj))
def hex_value(arg):
return int(arg, base=16)
def object_at(addr):
for o in gc.get_objects():
if id(o) == addr:
return o
return None
class Stats(commands.Cog):
"""Bot usage statistics."""
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process()
self._batch_lock = asyncio.Lock(loop=bot.loop)
self._data_batch = []
self.bulk_insert_loop.add_exception_type(asyncpg.PostgresConnectionError)
self.bulk_insert_loop.start()
self._gateway_queue = asyncio.Queue(loop=bot.loop)
self.gateway_worker.start()
# This is a datetime list
self._resumes = []
# shard_id: List[datetime]
self._identifies = defaultdict(list)
def _clear_gateway_data(self):
one_week_ago = datetime.datetime.utcnow() - datetime.timedelta(days=7)
to_remove = [index for index, dt in enumerate(self._resumes) if dt < one_week_ago]
for index in reversed(to_remove):
del self._resumes[index]
for shard_id, dates in self._identifies.items():
to_remove = [index for index, dt in enumerate(dates) if dt < one_week_ago]
for index in reversed(to_remove):
del dates[index]
async def bulk_insert(self):
query = """INSERT INTO commands (guild_id, channel_id, author_id, used, prefix, command, failed)
SELECT x.guild, x.channel, x.author, x.used, x.prefix, x.command, x.failed
FROM jsonb_to_recordset($1::jsonb) AS
x(guild BIGINT, channel BIGINT, author BIGINT, used TIMESTAMP, prefix TEXT, command TEXT, failed BOOLEAN)
"""
if self._data_batch:
await self.bot.pool.execute(query, self._data_batch)
total = len(self._data_batch)
if total > 1:
log.info('Registered %s commands to the database.', total)
self._data_batch.clear()
def cog_unload(self):
self.bulk_insert_loop.stop()
self._gateway_worker.cancel()
@tasks.loop(seconds=10.0)
async def bulk_insert_loop(self):
async with self._batch_lock:
await self.bulk_insert()
@tasks.loop(seconds=0.0)
async def gateway_worker(self):
record = await self._gateway_queue.get()
await self.notify_gateway_status(record)
async def register_command(self, ctx):
if ctx.command is None:
return
command = ctx.command.qualified_name
self.bot.command_stats[command] += 1
message = ctx.message
destination = None
if ctx.guild is None:
destination = 'Private Message'
guild_id = None
else:
destination = f'#{message.channel} ({message.guild})'
guild_id = ctx.guild.id
log.info(f'{message.created_at}: {message.author} in {destination}: {message.content}')
async with self._batch_lock:
self._data_batch.append({
'guild': guild_id,
'channel': ctx.channel.id,
'author': ctx.author.id,
'used': message.created_at.isoformat(),
'prefix': ctx.prefix,
'command': command,
'failed': ctx.command_failed,
})
@commands.Cog.listener()
async def on_command_completion(self, ctx):
await self.register_command(ctx)
@commands.Cog.listener()
async def on_socket_response(self, msg):
self.bot.socket_stats[msg.get('t')] += 1
@property
def webhook(self):
wh_id, wh_token = self.bot.config.stat_webhook
hook = discord.Webhook.partial(id=wh_id, token=wh_token, adapter=discord.AsyncWebhookAdapter(self.bot.session))
return hook
async def log_error(self, *, ctx=None, extra=None):
e = discord.Embed(title='Error', colour=0xdd5f53)
e.description = f'```py\n{traceback.format_exc()}\n```'
e.add_field(name='Extra', value=extra, inline=False)
e.timestamp = datetime.datetime.utcnow()
if ctx is not None:
fmt = '{0} (ID: {0.id})'
author = fmt.format(ctx.author)
channel = fmt.format(ctx.channel)
guild = 'None' if ctx.guild is None else fmt.format(ctx.guild)
e.add_field(name='Author', value=author)
e.add_field(name='Channel', value=channel)
e.add_field(name='Guild', value=guild)
await self.webhook.send(embed=e)
@commands.command(hidden=True)
@commands.is_owner()
async def commandstats(self, ctx, limit=20):
"""Shows command stats.
Use a negative number for bottom instead of top.
This is only for the current session.
"""
counter = self.bot.command_stats
width = len(max(counter, key=len))
total = sum(counter.values())
if limit > 0:
common = counter.most_common(limit)
else:
common = counter.most_common()[limit:]
output = '\n'.join(f'{k:<{width}}: {c}' for k, c in common)
await ctx.send(f'```\n{output}\n```')
@commands.command(hidden=True)
async def socketstats(self, ctx):
delta = datetime.datetime.utcnow() - self.bot.uptime
minutes = delta.total_seconds() / 60
total = sum(self.bot.socket_stats.values())
cpm = total / minutes
await ctx.send(f'{total} socket events observed ({cpm:.2f}/minute):\n{self.bot.socket_stats}')
def get_bot_uptime(self, *, brief=False):
return time.human_timedelta(self.bot.uptime, accuracy=None, brief=brief, suffix=False)
@commands.command()
async def uptime(self, ctx):
"""Tells you how long the bot has been up for."""
await ctx.send(f'Uptime: **{self.get_bot_uptime()}**')
def format_commit(self, commit):
short, _, _ = commit.message.partition('\n')
short_sha2 = commit.hex[0:6]
commit_tz = datetime.timezone(datetime.timedelta(minutes=commit.commit_time_offset))
commit_time = datetime.datetime.fromtimestamp(commit.commit_time).replace(tzinfo=commit_tz)
# [`hash`](url) message (offset)
offset = time.human_timedelta(commit_time.astimezone(datetime.timezone.utc).replace(tzinfo=None), accuracy=1)
return f'[`{short_sha2}`](https://github.com/Rapptz/RoboDanny/commit/{commit.hex}) {short} ({offset})'
def get_last_commits(self, count=3):
repo = pygit2.Repository('.git')
commits = list(itertools.islice(repo.walk(repo.head.target, pygit2.GIT_SORT_TOPOLOGICAL), count))
return '\n'.join(self.format_commit(c) for c in commits)
@commands.command()
async def about(self, ctx):
"""Tells you information about the bot itself."""
revision = self.get_last_commits()
embed = discord.Embed(description='Latest Changes:\n' + revision)
embed.title = 'Official Bot Server Invite'
embed.url = 'https://discord.gg/DWEaqMy'
embed.colour = discord.Colour.blurple()
owner = self.bot.get_user(self.bot.owner_id)
embed.set_author(name=str(owner), icon_url=owner.avatar_url)
# statistics
total_members = 0
total_online = 0
offline = discord.Status.offline
for member in self.bot.get_all_members():
total_members += 1
if member.status is not offline:
total_online += 1
total_unique = len(self.bot.users)
text = 0
voice = 0
guilds = 0
for guild in self.bot.guilds:
guilds += 1
for channel in guild.channels:
if isinstance(channel, discord.TextChannel):
text += 1
elif isinstance(channel, discord.VoiceChannel):
voice += 1
embed.add_field(name='Members', value=f'{total_members} total\n{total_unique} unique\n{total_online} unique online')
embed.add_field(name='Channels', value=f'{text + voice} total\n{text} text\n{voice} voice')
memory_usage = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB\n{cpu_usage:.2f}% CPU')
version = pkg_resources.get_distribution('discord.py').version
embed.add_field(name='Guilds', value=guilds)
embed.add_field(name='Commands Run', value=sum(self.bot.command_stats.values()))
embed.add_field(name='Uptime', value=self.get_bot_uptime(brief=True))
embed.set_footer(text=f'Made with discord.py v{version}', icon_url='http://i.imgur.com/5BFecvA.png')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
def censor_object(self, obj):
if not isinstance(obj, str) and obj.id in self.bot.blacklist:
return '[censored]'
return censor_invite(obj)
async def show_guild_stats(self, ctx):
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
embed = discord.Embed(title='Server Command Stats', colour=discord.Colour.blurple())
# total command uses
query = "SELECT COUNT(*), MIN(used) FROM commands WHERE guild_id=$1;"
count = await ctx.db.fetchrow(query, ctx.guild.id)
embed.description = f'{count[0]} commands used.'
embed.set_footer(text='Tracking command usage since').timestamp = count[1] or datetime.datetime.utcnow()
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Top Commands', value=value, inline=True)
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands.'
embed.add_field(name='Top Commands Today', value=value, inline=True)
embed.add_field(name='\u200b', value='\u200b', inline=True)
query = """SELECT author_id,
COUNT(*) AS "uses"
FROM commands
WHERE guild_id=$1
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: <@!{author_id}> ({uses} bot uses)'
for (index, (author_id, uses)) in enumerate(records)) or 'No bot users.'
embed.add_field(name='Top Command Users', value=value, inline=True)
query = """SELECT author_id,
COUNT(*) AS "uses"
FROM commands
WHERE guild_id=$1
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: <@!{author_id}> ({uses} bot uses)'
for (index, (author_id, uses)) in enumerate(records)) or 'No command users.'
embed.add_field(name='Top Command Users Today', value=value, inline=True)
await ctx.send(embed=embed)
async def show_member_stats(self, ctx, member):
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
embed = discord.Embed(title='Command Stats', colour=member.colour)
embed.set_author(name=str(member), icon_url=member.avatar_url)
# total command uses
query = "SELECT COUNT(*), MIN(used) FROM commands WHERE guild_id=$1 AND author_id=$2;"
count = await ctx.db.fetchrow(query, ctx.guild.id, member.id)
embed.description = f'{count[0]} commands used.'
embed.set_footer(text='First command used').timestamp = count[1] or datetime.datetime.utcnow()
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1 AND author_id=$2
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id, member.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Most Used Commands', value=value, inline=False)
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
AND author_id=$2
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id, member.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Most Used Commands Today', value=value, inline=False)
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True)
@commands.guild_only()
@commands.cooldown(1, 30.0, type=commands.BucketType.member)
async def stats(self, ctx, *, member: discord.Member = None):
"""Tells you command usage stats for the server or a member."""
async with ctx.typing():
if member is None:
await self.show_guild_stats(ctx)
else:
await self.show_member_stats(ctx, member)
@stats.command(name='global')
@commands.is_owner()
async def stats_global(self, ctx):
"""Global all time command statistics."""
query = "SELECT COUNT(*) FROM commands;"
total = await ctx.db.fetchrow(query)
e = discord.Embed(title='Command Stats', colour=discord.Colour.blurple())
e.description = f'{total[0]} commands used.'
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
query = """SELECT command, COUNT(*) AS "uses"
FROM commands
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))
e.add_field(name='Top Commands', value=value, inline=False)
query = """SELECT guild_id, COUNT(*) AS "uses"
FROM commands
GROUP BY guild_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (guild_id, uses)) in enumerate(records):
if guild_id is None:
guild = 'Private Message'
else:
guild = self.censor_object(self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {guild} ({uses} uses)')
e.add_field(name='Top Guilds', value='\n'.join(value), inline=False)
query = """SELECT author_id, COUNT(*) AS "uses"
FROM commands
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (author_id, uses)) in enumerate(records):
user = self.censor_object(self.bot.get_user(author_id) or f'<Unknown {author_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {user} ({uses} uses)')
e.add_field(name='Top Users', value='\n'.join(value), inline=False)
await ctx.send(embed=e)
@stats.command(name='today')
@commands.is_owner()
async def stats_today(self, ctx):
"""Global command statistics for the day."""
query = "SELECT failed, COUNT(*) FROM commands WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day') GROUP BY failed;"
total = await ctx.db.fetch(query)
failed = 0
success = 0
question = 0
for state, count in total:
if state is False:
success += count
elif state is True:
failed += count
else:
question += count
e = discord.Embed(title='Last 24 Hour Command Stats', colour=discord.Colour.blurple())
e.description = f'{failed + success + question} commands used today. ' \
f'({success} succeeded, {failed} failed, {question} unknown)'
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
query = """SELECT command, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))
e.add_field(name='Top Commands', value=value, inline=False)
query = """SELECT guild_id, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY guild_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (guild_id, uses)) in enumerate(records):
if guild_id is None:
guild = 'Private Message'
else:
guild = self.censor_object(self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {guild} ({uses} uses)')
e.add_field(name='Top Guilds', value='\n'.join(value), inline=False)
query = """SELECT author_id, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (author_id, uses)) in enumerate(records):
user = self.censor_object(self.bot.get_user(author_id) or f'<Unknown {author_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {user} ({uses} uses)')
e.add_field(name='Top Users', value='\n'.join(value), inline=False)
await ctx.send(embed=e)
async def send_guild_stats(self, e, guild):
e.add_field(name='Name', value=guild.name)
e.add_field(name='ID', value=guild.id)
e.add_field(name='Shard ID', value=guild.shard_id or 'N/A')
e.add_field(name='Owner', value=f'{guild.owner} (ID: {guild.owner.id})')
bots = sum(m.bot for m in guild.members)
total = guild.member_count
online = sum(m.status is discord.Status.online for m in guild.members)
e.add_field(name='Members', value=str(total))
e.add_field(name='Bots', value=f'{bots} ({bots/total:.2%})')
e.add_field(name='Online', value=f'{online} ({online/total:.2%})')
if guild.icon:
e.set_thumbnail(url=guild.icon_url)
if guild.me:
e.timestamp = guild.me.joined_at
await self.webhook.send(embed=e)
@stats_today.before_invoke
@stats_global.before_invoke
async def before_stats_invoke(self, ctx):
await ctx.trigger_typing()
@commands.Cog.listener()
async def on_guild_join(self, guild):
e = discord.Embed(colour=0x53dda4, title='New Guild') # green colour
await self.send_guild_stats(e, guild)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
e = discord.Embed(colour=0xdd5f53, title='Left Guild') # red colour
await self.send_guild_stats(e, guild)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
await self.register_command(ctx)
if not isinstance(error, (commands.CommandInvokeError, commands.ConversionError)):
return
error = error.original
if isinstance(error, (discord.Forbidden, discord.NotFound, CannotPaginate)):
return
e = discord.Embed(title='Command Error', colour=0xcc3366)
e.add_field(name='Name', value=ctx.command.qualified_name)
e.add_field(name='Author', value=f'{ctx.author} (ID: {ctx.author.id})')
fmt = f'Channel: {ctx.channel} (ID: {ctx.channel.id})'
if ctx.guild:
fmt = f'{fmt}\nGuild: {ctx.guild} (ID: {ctx.guild.id})'
e.add_field(name='Location', value=fmt, inline=False)
e.add_field(name='Content', value=textwrap.shorten(ctx.message.content, width=512))
exc = ''.join(traceback.format_exception(type(error), error, error.__traceback__, chain=False))
e.description = f'```py\n{exc}\n```'
e.timestamp = datetime.datetime.utcnow()
await self.webhook.send(embed=e)
@commands.Cog.listener()
async def on_socket_raw_send(self, data):
# kind of weird way to check if we're sending
# IDENTIFY or RESUME
if '"op":2' not in data and '"op":6' not in data:
return
back_to_json = json.loads(data)
if back_to_json['op'] == 2:
payload = back_to_json['d']
inner_shard = payload.get('shard', [0])
self._identifies[inner_shard[0]].append(datetime.datetime.utcnow())
else:
self._resumes.append(datetime.datetime.utcnow())
# don't want to permanently grow memory
self._clear_gateway_data()
def add_record(self, record):
# if self.bot.config.debug:
# return
self._gateway_queue.put_nowait(record)
async def notify_gateway_status(self, record):
attributes = {
'INFO': '\N{INFORMATION SOURCE}',
'WARNING': '\N{WARNING SIGN}'
}
emoji = attributes.get(record.levelname, '\N{CROSS MARK}')
dt = datetime.datetime.utcfromtimestamp(record.created)
msg = f'{emoji} `[{dt:%Y-%m-%d %H:%M:%S}] {record.message}`'
await self.webhook.send(msg, username='Gateway', avatar_url='https://i.imgur.com/4PnCKB3.png')
@commands.command(hidden=True)
@commands.is_owner()
async def bothealth(self, ctx):
"""Various bot health monitoring tools."""
# This uses a lot of private methods because there is no
# clean way of doing this otherwise.
HEALTHY = discord.Colour(value=0x43B581)
UNHEALTHY = discord.Colour(value=0xF04947)
WARNING = discord.Colour(value=0xF09E47)
total_warnings = 0
embed = discord.Embed(title='Bot Health Report', colour=HEALTHY)
# Check the connection pool health.
pool = self.bot.pool
total_waiting = len(pool._queue._getters)
current_generation = pool._generation
description = [
f'Total `Pool.acquire` Waiters: {total_waiting}',
f'Current Pool Generation: {current_generation}',
f'Connections In Use: {len(pool._holders) - pool._queue.qsize()}'
]
questionable_connections = 0
connection_value = []
for index, holder in enumerate(pool._holders, start=1):
generation = holder._generation
in_use = holder._in_use is not None
is_closed = holder._con is None or holder._con.is_closed()
display = f'gen={holder._generation} in_use={in_use} closed={is_closed}'
questionable_connections += any((in_use, generation != current_generation))
connection_value.append(f'<Holder i={index} {display}>')
joined_value = '\n'.join(connection_value)
embed.add_field(name='Connections', value=f'```py\n{joined_value}\n```', inline=False)
spam_control = self.bot.spam_control
being_spammed = [
str(key) for key, value in spam_control._cache.items()
if value._tokens == 0
]
description.append(f'Current Spammers: {", ".join(being_spammed) if being_spammed else "None"}')
description.append(f'Questionable Connections: {questionable_connections}')
total_warnings += questionable_connections
if being_spammed:
embed.colour = WARNING
total_warnings += 1
try:
task_retriever = asyncio.Task.all_tasks
except AttributeError:
# future proofing for 3.9 I guess
task_retriever = asyncio.all_tasks
else:
all_tasks = task_retriever(loop=self.bot.loop)
event_tasks = [
t for t in all_tasks
if 'Client._run_event' in repr(t) and not t.done()
]
cogs_directory = os.path.dirname(__file__)
tasks_directory = os.path.join('discord', 'ext', 'tasks', '__init__.py')
inner_tasks = [
t for t in all_tasks
if cogs_directory in repr(t) or tasks_directory in repr(t)
]
bad_inner_tasks = ", ".join(hex(id(t)) for t in inner_tasks if t.done() and t._exception is not None)
total_warnings += bool(bad_inner_tasks)
embed.add_field(name='Inner Tasks', value=f'Total: {len(inner_tasks)}\nFailed: {bad_inner_tasks or "None"}')
embed.add_field(name='Events Waiting', value=f'Total: {len(event_tasks)}', inline=False)
command_waiters = len(self._data_batch)
is_locked = self._batch_lock.locked()
description.append(f'Commands Waiting: {command_waiters}, Batch Locked: {is_locked}')
# RESUME/IDENTIFY data
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
total_resumes = sum(1 for dt in self._resumes if dt > yesterday)
identifies = {
shard_id: sum(1 for dt in dates if dt > yesterday)
for shard_id, dates in self._identifies.items()
}
absolute_total_identifies = sum(identifies.values())
resume_info_builder = [
f'Total RESUMEs: {total_resumes}',
f'Total IDENTIFYs: {absolute_total_identifies}'
]
for shard_id, total in identifies.items():
resume_info_builder.append(f'Shard ID {shard_id} IDENTIFYs: {total}')
if absolute_total_identifies >= (len(self.bot.shards) * 5):
total_warnings += 1
embed.colour = WARNING
embed.add_field(name='Gateway (last 24 hours)', value='\n'.join(resume_info_builder), inline=False)
memory_usage = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB\n{cpu_usage:.2f}% CPU', inline=False)
global_rate_limit = not self.bot.http._global_over.is_set()
description.append(f'Global Rate Limit: {global_rate_limit}')
if command_waiters >= 8:
total_warnings += 1
embed.colour = WARNING
if global_rate_limit or total_warnings >= 9:
embed.colour = UNHEALTHY
embed.set_footer(text=f'{total_warnings} warning(s)')
embed.description = '\n'.join(description)
await ctx.send(embed=embed)
@commands.command(hidden=True, aliases=['cancel_task'])
@commands.is_owner()
async def debug_task(self, ctx, memory_id: hex_value):
"""Debug a task by a memory location."""
task = object_at(memory_id)
if task is None or not isinstance(task, asyncio.Task):
return await ctx.send(f'Could not find Task object at {hex(memory_id)}.')
if ctx.invoked_with == 'cancel_task':
task.cancel()
return await ctx.send(f'Cancelled task object {task!r}.')
paginator = commands.Paginator(prefix='```py')
fp = io.StringIO()
frames = len(task.get_stack())
paginator.add_line(f'# Total Frames: {frames}')
task.print_stack(file=fp)
for line in fp.getvalue().splitlines():
paginator.add_line(line)
for page in paginator.pages:
await ctx.send(page)
async def tabulate_query(self, ctx, query, *args):
records = await ctx.db.fetch(query, *args)
if len(records) == 0:
return await ctx.send('No results found.')
headers = list(records[0].keys())
table = formats.TabularData()
table.set_columns(headers)
table.add_rows(list(r.values()) for r in records)
render = table.render()
fmt = f'```\n{render}\n```'
if len(fmt) > 2000:
fp = io.BytesIO(fmt.encode('utf-8'))
await ctx.send('Too many results...', file=discord.File(fp, 'results.txt'))
else:
await ctx.send(fmt)
@commands.group(hidden=True, invoke_without_command=True)
@commands.is_owner()
async def command_history(self, ctx):
"""Command history."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
to_char(used, 'Mon DD HH12:MI:SS AM') AS "invoked",
author_id,
guild_id
FROM commands
ORDER BY used DESC
LIMIT 15;
"""
await self.tabulate_query(ctx, query)
@command_history.command(name='for')
@commands.is_owner()
async def command_history_for(self, ctx, days: typing.Optional[int] = 7, *, command: str):
"""Command history for a command."""
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT guild_id,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE command=$1
AND used > (CURRENT_TIMESTAMP - $2::interval)
GROUP BY guild_id
) AS t
ORDER BY "total" DESC
LIMIT 30;
"""
await self.tabulate_query(ctx, query, command, datetime.timedelta(days=days))
@command_history.command(name='guild', aliases=['server'])
@commands.is_owner()
async def command_history_guild(self, ctx, guild_id: int):
"""Command history for a guild."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
channel_id,
author_id,
used
FROM commands
WHERE guild_id=$1
ORDER BY used DESC
LIMIT 15;
"""
await self.tabulate_query(ctx, query, guild_id)
@command_history.command(name='user', aliases=['member'])
@commands.is_owner()
async def command_history_user(self, ctx, user_id: int):
"""Command history for a user."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
guild_id,
used
FROM commands
WHERE author_id=$1
ORDER BY used DESC
LIMIT 20;
"""
await self.tabulate_query(ctx, query, user_id)
@command_history.command(name='log')
@commands.is_owner()
async def command_history_log(self, ctx, days=7):
"""Command history log for the last N days."""
query = """SELECT command, COUNT(*)
FROM commands
WHERE used > (CURRENT_TIMESTAMP - $1::interval)
GROUP BY command
ORDER BY 2 DESC
"""
all_commands = {
c.qualified_name: 0
for c in self.bot.walk_commands()
}
records = await ctx.db.fetch(query, datetime.timedelta(days=days))
for name, uses in records:
if name in all_commands:
all_commands[name] = uses
as_data = sorted(all_commands.items(), key=lambda t: t[1], reverse=True)
table = formats.TabularData()
table.set_columns(['Command', 'Uses'])
table.add_rows(tup for tup in as_data)
render = table.render()
embed = discord.Embed(title='Summary', colour=discord.Colour.green())
embed.set_footer(text='Since').timestamp = datetime.datetime.utcnow() - datetime.timedelta(days=days)
top_ten = '\n'.join(f'{command}: {uses}' for command, uses in records[:10])
bottom_ten = '\n'.join(f'{command}: {uses}' for command, uses in records[-10:])
embed.add_field(name='Top 10', value=top_ten)
embed.add_field(name='Bottom 10', value=bottom_ten)
unused = ', '.join(name for name, uses in as_data if uses == 0)
if len(unused) > 1024:
unused = 'Way too many...'
embed.add_field(name='Unused', value=unused, inline=False)
await ctx.send(embed=embed, file=discord.File(io.BytesIO(render.encode()), filename='full_results.txt'))
@command_history.command(name='cog')
@commands.is_owner()
async def command_history_cog(self, ctx, days: typing.Optional[int] = 7, *, cog: str = None):
"""Command history for a cog or grouped by a cog."""
interval = datetime.timedelta(days=days)
if cog is not None:
cog = self.bot.get_cog(cog)
if cog is None:
return await ctx.send(f'Unknown cog: {cog}')
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT command,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE command = any($1::text[])
AND used > (CURRENT_TIMESTAMP - $2::interval)
GROUP BY command
) AS t
ORDER BY "total" DESC
LIMIT 30;
"""
return await self.tabulate_query(ctx, query, [c.qualified_name for c in cog.walk_commands()], interval)
# A more manual query with a manual grouper.
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT command,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - $1::interval)
GROUP BY command
) AS t;
"""
class Count:
__slots__ = ('success', 'failed', 'total')
def __init__(self):
self.success = 0
self.failed = 0
self.total = 0
def add(self, record):
self.success += record['success']
self.failed += record['failed']
self.total += record['total']
data = defaultdict(Count)
records = await ctx.db.fetch(query, interval)
for record in records:
command = self.bot.get_command(record['command'])
if command is None or command.cog is None:
data['No Cog'].add(record)
else:
data[command.cog.qualified_name].add(record)
table = formats.TabularData()
table.set_columns(['Cog', 'Success', 'Failed', 'Total'])
data = sorted([
(cog, e.success, e.failed, e.total)
for cog, e in data.items()
], key=lambda t: t[-1], reverse=True)
table.add_rows(data)
render = table.render()
await ctx.safe_send(f'```\n{render}\n```')
old_on_error = commands.AutoShardedBot.on_error
async def on_error(self, event, *args, **kwargs):
e = discord.Embed(title='Event Error', colour=0xa32952)
e.add_field(name='Event', value=event)
e.description = f'```py\n{traceback.format_exc()}\n```'
e.timestamp = datetime.datetime.utcnow()
args_str = ['```py']
for index, arg in enumerate(args):
args_str.append(f'[{index}]: {arg!r}')
args_str.append('```')
e.add_field(name='Args', value='\n'.join(args_str), inline=False)
hook = self.get_cog('Stats').webhook
try:
await hook.send(embed=e)
except:
pass
def setup(bot):
if not hasattr(bot, 'command_stats'):
bot.command_stats = Counter()
if not hasattr(bot, 'socket_stats'):
bot.socket_stats = Counter()
cog = Stats(bot)
bot.add_cog(cog)
bot._stats_cog_gateway_handler = handler = GatewayHandler(cog)
logging.getLogger().addHandler(handler)
commands.AutoShardedBot.on_error = on_error
def teardown(bot):
commands.AutoShardedBot.on_error = old_on_error
logging.getLogger().removeHandler(bot._stats_cog_gateway_handler)
del bot._stats_cog_gateway_handler
|
import json
import os
from time import sleep
import requests
import pyrominfo.pyrominfo.snes as snes
from shutil import copy
from pyrominfo.pyrominfo import nintendo64
def n64_info(filename):
n64_parser = nintendo64.Nintendo64Parser()
props = n64_parser.parse(filename)
return props
def snes_info(filename):
snes_parser = snes.SNESParser()
props = snes_parser.parse(filename)
return props
def get_console(argument):
switcher = {
'sfc': 'SNES',
'smc': 'SNES',
'md': '',
'bin': '',
'gb': 'GB',
'gbc': 'GBC',
'nes': 'NES',
'z64': 'N64',
}
return switcher.get(argument)
def giant_bomb_request(title, api_key):
headers = {'User-Agent': 'gripper'}
params = {
'resources': 'game',
'query': title,
'api_key': api_key,
'format': 'json'
}
response = requests.get(url='http://www.giantbomb.com/api/search/', headers=headers, params=params)
return json.loads(response.text)
def rip_game():
while True:
path = '/RETRODE'
api_key = os.environ['api-key']
files = os.listdir(path)
files.remove('RETRODE.CFG')
breakout = False
console = get_console(files[0].split('.')[-1])
filename = f'{path}/{files[0]}'
if console == 'N64':
rom_info = n64_info(filename)
if console == 'SNES':
rom_info = snes_info(filename)
title = rom_info["title"]
search_results = giant_bomb_request(title, api_key)
for results in search_results['results']:
if breakout is True:
break
aliases = str(results.get('aliases')).lower().splitlines()
if title.lower() in aliases or title.lower() == results['name']:
for platform in results['platforms']:
if platform['abbreviation'] == 'SNES':
if not os.path.exists(f'./{title}'):
os.mkdir(f'./{title} - {rom_info['region']}')
for file in files:
destination_file = f'./{title} - {rom_info['region']}/{title}.{file.split('.')[-1]}'
if not os.path.exists(destination_file):
copy(filename, destination_file)
breakout = True
break
sleep(5)
#dont run code while testing container
if __name__ == '__main__':
sleep(900)
#rip_game()
| import json
import os
from time import sleep
import requests
import pyrominfo.pyrominfo.snes as snes
from shutil import copy
from pyrominfo.pyrominfo import nintendo64
def n64_info(filename):
n64_parser = nintendo64.Nintendo64Parser()
props = n64_parser.parse(filename)
return props
def snes_info(filename):
snes_parser = snes.SNESParser()
props = snes_parser.parse(filename)
return props
def get_console(argument):
switcher = {
'sfc': 'SNES',
'smc': 'SNES',
'md': '',
'bin': '',
'gb': 'GB',
'gbc': 'GBC',
'nes': 'NES',
'z64': 'N64',
}
return switcher.get(argument)
def giant_bomb_request(title, api_key):
headers = {'User-Agent': 'gripper'}
params = {
'resources': 'game',
'query': title,
'api_key': api_key,
'format': 'json'
}
response = requests.get(url='http://www.giantbomb.com/api/search/', headers=headers, params=params)
return json.loads(response.text)
def rip_game():
while True:
path = '/RETRODE'
api_key = os.environ['api-key']
files = os.listdir(path)
files.remove('RETRODE.CFG')
breakout = False
console = get_console(files[0].split('.')[-1])
filename = f'{path}/{files[0]}'
if console == 'N64':
rom_info = n64_info(filename)
if console == 'SNES':
rom_info = snes_info(filename)
title = rom_info["title"]
search_results = giant_bomb_request(title, api_key)
for results in search_results['results']:
if breakout is True:
break
aliases = str(results.get('aliases')).lower().splitlines()
if title.lower() in aliases or title.lower() == results['name']:
for platform in results['platforms']:
if platform['abbreviation'] == 'SNES':
if not os.path.exists(f'./{title}'):
os.mkdir(f'./{title} - {rom_info["region"]}')
for file in files:
destination_file = f'./{title} - {rom_info["region"]}/{title}.{file.split(".")[-1]}'
if not os.path.exists(destination_file):
copy(filename, destination_file)
breakout = True
break
sleep(5)
#dont run code while testing container
if __name__ == '__main__':
sleep(900)
#rip_game()
|
import requests
from bs4 import BeautifulSoup
import jinja2
import re
class Chara:
name = ''
job = ''
hp = 0
mp = 0
str = 0
end = 0
dex = 0
agi = 0
mag = 0
killer = ""
counter_hp = ""
skills = ""
passive_skills = ""
class HtmlParser:
def __init__(self, text):
self.soup = BeautifulSoup(text, 'html.parser')
self.soup_ptr = self.soup.find("div", class_='toc')
def get_next_div(self):
found = False
while not found:
self.soup_ptr = self.soup_ptr.find_next_sibling("div", class_='basic')
if self.soup_ptr.find("table") is not None:
found = True
return self.soup_ptr
def parse_effs(effs_str):
effs = []
if "カウンター率" in effs_str:
effs.append("Ability.counter_rate")
if "ペネトレーション率" in effs_str:
effs.append("Ability.pene_rate")
if "必殺技ゲージ" in effs_str:
effs.append("Ability.energy_bar")
if "クリティカル率" in effs_str:
effs.append("Ability.crit_rate")
if "ガード率" in effs_str:
effs.append("Ability.guard_rate")
if "カウンター発生" in effs_str:
effs.append("SuccessUp.counter")
if "ペネトレーション発生" in effs_str:
effs.append("SuccessUp.pene")
if "クリティカル発生" in effs_str:
effs.append("SuccessUp.crit")
if "ガード発生" in effs_str:
effs.append("SuccessUp.guard")
if "力と魔力" in effs_str:
effs.append("Ability.str")
effs.append("Ability.mag")
elif "力と" in effs_str:
effs.append("Ability.str")
elif "力、魔力" in effs_str:
effs.append("Ability.str")
effs.append("Ability.mag")
elif "魔力" in effs_str:
effs.append("Ability.mag")
elif "の力" in effs_str:
effs.append("Ability.str")
elif "、力" in effs_str:
effs.append("Ability.str")
elif effs_str.startswith("力"):
effs.append("Ability.str")
if "敏捷" in effs_str:
effs.append("Ability.agi")
if "器用" in effs_str:
effs.append("Ability.dex")
if "耐久" in effs_str:
effs.append("Ability.end")
if "火属性耐性" in effs_str:
effs.append("Endurance.fire")
if "地属性耐性" in effs_str:
effs.append("Endurance.earth")
if "風属性耐性" in effs_str:
effs.append("Endurance.wind")
if "水属性耐性" in effs_str:
effs.append("Endurance.ice")
if "雷属性耐性" in effs_str:
effs.append("Endurance.thunder")
if "光属性耐性" in effs_str:
effs.append("Endurance.light")
if "闇属性耐性" in effs_str:
effs.append("Endurance.dark")
if "物理耐性" in effs_str:
effs.append("Endurance.phy")
if "魔法耐性" in effs_str:
effs.append("Endurance.mag")
if "全体攻撃ダメージ" in effs_str:
effs.append("Endurance.foes")
if "単体攻撃ダメージ" in effs_str:
effs.append("Endurance.foe")
if "火属性攻撃" in effs_str:
effs.append("Damage.fire")
if "地属性攻撃" in effs_str:
effs.append("Damage.earth")
if "風属性攻撃" in effs_str:
effs.append("Damage.wind")
if "水属性攻撃" in effs_str:
effs.append("Damage.ice")
if "雷属性攻撃" in effs_str:
effs.append("Damage.thunder")
if "光属性攻撃" in effs_str:
effs.append("Damage.light")
if "闇属性攻撃" in effs_str:
effs.append("Damage.dark")
if "全体攻撃被ダメージ" in effs_str:
effs.append("Endurance.foes")
if "単体攻撃被ダメージ" in effs_str:
effs.append("Endurance.foe")
if "HP" in effs_str or "HP" in effs_str:
effs.append("Recover.hp_turn")
if "MP" in effs_str or "MP" in effs_str:
effs.append("Recover.mp_turn")
return effs
def gen_eff_str(effs, scope, val_for_eff=None, turn=None):
eff_enums = []
for e in effs:
if turn and val_for_eff:
eff_enums.append(f"Effect({scope}, {e}, {val_for_eff}, {turn})")
elif val_for_eff:
eff_enums.append(f"Effect({scope}, {e}, {val_for_eff})")
else:
eff_enums.append(f"Effect({scope}, {e}, 0)")
ret = ", ".join(eff_enums)
return ret
def parse_turns(text):
m = re.match(r".+(\d)ターンの間.+", text, re.UNICODE)
if m is None:
return None
turn = m.group(1)
return turn
def parse_scope(scope_str):
if "敵全体" in scope_str:
scope = "Scope.foes"
elif "敵単体" in scope_str:
scope = "Scope.foe"
elif "味方全体" in scope_str:
scope = "Scope.my_team"
elif "自分" in scope_str:
scope = "Scope.my_self"
else:
raise ValueError
return scope
def parse_atk(text):
scope = parse_scope(text)
if "超強威力" in text:
power = "Power.ultra"
elif "超威力" in text:
power = "Power.super"
elif "強威力" in text:
power = "Power.high"
elif "中威力" in text:
power = "Power.mid"
elif "弱威力" in text:
power = "Power.low"
else:
raise ValueError
m = re.match(r".+(\w)属性(\w\w)攻撃.+", text, re.UNICODE)
attr = m.group(1)
phy_mag = m.group(2)
if attr == "火":
attr_dmg = "Damage.fire"
elif attr == "地":
attr_dmg = "Damage.earth"
elif attr == "風":
attr_dmg = "Damage.wind"
elif attr == "水":
attr_dmg = "Damage.ice"
elif attr == "雷":
attr_dmg = "Damage.thunder"
elif attr == "光":
attr_dmg = "Damage.light"
elif attr == "闇":
attr_dmg = "Damage.dark"
else:
raise ValueError
if phy_mag == "物理":
atk = "Attack.phy"
elif phy_mag == "魔法":
atk = "Attack.mag"
else:
raise ValueError
temp_boost = ""
if "技発動時のみ力を上昇" in text or "技発動時のみ魔力を上昇" in text:
temp_boost = "temp_boost=True, "
boost_by_buff = ""
m = re.match(r".*自分の(\w+)上昇効果1つにつき、この技の威力が(\d+)[%%]上昇.*", text, re.UNICODE)
if m is not None:
up_val = int(m.group(2))
up_val /= 100
up_indexes = m.group(1)
effs = parse_effs(up_indexes)
enum_str = gen_eff_str(effs, "Scope.my_self", up_val)
boost_by_buff = f'boost_by_buff=[{enum_str}],'
atk_str = f"{scope}, {power}, {attr_dmg}, {atk}, {temp_boost} {boost_by_buff}"
return atk_str
def parse_debuff(text, turn):
m = re.match(r".*(敵単体|敵全体)(.+?)を?(\d+)[%%]減少.*", text, re.UNICODE)
if m is None:
m = re.match(r".+(敵単体|敵全体)(.+被ダメージ).*を(\d+)[%%]増加.*", text, re.UNICODE)
if m is None:
return None
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
down_val = int(m.group(3))
down_val /= 100
enum_str = gen_eff_str(effs, scope, down_val, turn)
return enum_str
def parse_recover_hp(text, turn):
m = re.match(r".*(\d+)ターンの間、(味方全体|自分)に(\d+)[%%]の(HP|HP)治癒付与.*", text, re.UNICODE)
if m:
turn = m.group(1)
scope = parse_scope(m.group(2))
up_val = int(m.group(3)) / 100
return f"Effect({scope}, Recover.hp_turn, {up_val}, {turn})"
m = re.match(r".*味方全体に\w+の(HP|HP)回復技.*", text, re.UNICODE)
if m:
return f"Effect(Scope.my_team, Recover.hp_imm, 0.8)"
m = re.match(r".*味方全体の(HP|HP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_team, Recover.hp_imm, {up_val})"
def parse_recover_mp(text, turn):
m = re.match(r".*(\d+)ターンの間、(味方全体|自分)に(\d+)[%%]の(MP|MP)回復.*", text, re.UNICODE)
if m:
turn = m.group(1)
scope = parse_scope(m.group(2))
up_val = int(m.group(3)) / 100
return f"Effect({scope}, Recover.mp_turn, {up_val}, {turn})"
m = re.match(r".*味方全体の(MP|MP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_team, Recover.mp_imm, {up_val})"
m = re.match(r".*自分の(MP|MP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_self, Recover.mp_imm, {up_val})"
def parse_buff(text, turn):
m = re.match(r".*(味方全体|自分)(.+)を(\d+)[%%](上昇|軽減).*", text, re.UNICODE)
if m is None:
return None
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
up_val = int(m.group(3))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val, turn)
return enum_str
def parse_passive_buff(text):
ret_effs = []
scope = "Scope.my_self"
m = re.match(r"(.+)が(\d+)[%%]上昇.*", text, re.UNICODE)
if m:
effs_str = m.group(1)
effs = parse_effs(effs_str)
effs = [e for e in effs if "Ability" not in e]
up_val = int(m.group(2))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val)
ret_effs.append(enum_str)
m = re.match(r".*毎ターン(.+)が(\d+)[%%]回復.*", text, re.UNICODE)
if m:
effs_str = m.group(1)
effs = parse_effs(effs_str)
up_val = int(m.group(2))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val)
ret_effs.append(enum_str)
return ", ".join(ret_effs)
def parse_adj_buff(text):
ret_effs = []
m = re.match(r".*(敵単体|敵全体)の(.+)上昇効果を解除.*", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
for e in effs:
enum_str = f"Effect({scope}, AdjBuff.clear_buff, 0, 0, {e})"
ret_effs.append(enum_str)
m = re.match(r".*(自分|味方全体)のステイタス上昇効果.*(\d+)ターン延長", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.extend_buff, {turn_val}, 0)"
ret_effs.append(enum_str)
m = re.match(r".*(敵単体|敵全体)のステイタス減少効果.*(\d+)ターン延長", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.extend_debuff, {turn_val}, 0)"
ret_effs.append(enum_str)
m = re.match(r".*(敵単体|敵全体)のステイタス上昇効果.*(\d+)ターン減少", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.shorten_buff, {turn_val}, 0)"
ret_effs.append(enum_str)
return ", ".join(ret_effs)
def gen_skill_str(text, is_special=False):
text = text.replace("\n", "")
text = text.replace("・", "")
print(text)
atk_str = ""
mp_str = ""
special_str = ""
is_fast_str = ""
if "すばやく" in text:
is_fast_str = "is_fast=True, "
if "攻撃。" in text:
# has attack
atk_str = parse_atk(text)
turn = parse_turns(text)
texts = []
texts_tmp = text.split("し、")
scope_guessing = None
for txt in texts_tmp:
if "味方全体" in txt:
scope_guessing = "味方全体"
elif "自分" in txt:
scope_guessing = "自分"
if "自分" not in txt and "味方全体" not in txt:
if scope_guessing:
txt = scope_guessing + txt
texts.extend(txt.split("さらに"))
buffs_eff = []
debuffs_eff = []
adj_buffs_eff = []
for t in texts:
b = parse_buff(t, turn)
d = parse_debuff(t, turn)
a = parse_adj_buff(t)
rhp = parse_recover_hp(t, turn)
rmp = parse_recover_mp(t, turn)
if b:
buffs_eff.append(b)
if rhp:
buffs_eff.append(rhp)
if rmp:
buffs_eff.append(rmp)
if d:
debuffs_eff.append(d)
if a:
adj_buffs_eff.append(a)
buffs_str = f"buffs=[{", ".join(buffs_eff)}],"
debuffs_str = f"debuffs=[{", ".join(debuffs_eff)}],"
adj_buffs_str = f"adj_buffs=[{", ".join(adj_buffs_eff)}],"
# print(buffs_str)
# print(debuffs_str)
if is_special:
special_str = "is_special=True,"
else:
m = re.match(r".+(MP:(\d+)).*", text, re.UNICODE)
if m:
mp = m.group(1)
mp_str = f"mp={mp},"
skill_dec_str = f"Skill({is_fast_str} {atk_str} {special_str} {mp_str} {buffs_str} {debuffs_str} {adj_buffs_str})"
return skill_dec_str
def gen_passive_skill_str(text):
b = parse_passive_buff(text)
print(text)
return b
def gen_counter_hp_str(text):
m = re.match(r".*】.*カウンター発生時.*通常攻撃.*(HP|HP)回復", text, re.UNICODE)
if m:
return "counter_hp=True,"
return ""
def gen_killer_str(text):
m = re.match(r".*】(\w+)の敵を攻撃.*(\d+)[%%]上昇.*", text, re.UNICODE)
if m:
killer = m.group(1)
if killer == "猛牛系":
return "killer=Killer.bull, "
elif killer == "巨人系":
return "killer=Killer.giant, "
elif killer == "魔獣系":
return "killer=Killer.beast, "
elif killer == "精霊系":
return "killer=Killer.fairy, "
elif killer == "植物系":
return "killer=Killer.plant, "
elif killer == "昆虫系":
return "killer=Killer.bug, "
elif killer == "堅鉱系":
return "killer=Killer.rock, "
elif killer == "蠕獣系":
return "killer=Killer.worm, "
elif killer == "竜系":
return "killer=Killer.dragon, "
elif killer == "水棲系":
return "killer=Killer.aquatic, "
elif killer == "妖鬼系":
return "killer=Killer.orge, "
elif killer == "幽魔系":
return "killer=Killer.undead, "
else:
raise ValueError
return ""
def parsing_chara(html_text):
parser = HtmlParser(html_text)
chara = Chara()
basics_table = parser.get_next_div()
for tr in basics_table.table.find_all('tr'):
col = tr.th.text
val = tr.td.text
if col == "名称":
chara.name = val
if col == "カテゴリ":
if val == "冒険者":
chara.job = "Adventurer"
elif val == "アシスト":
chara.job = "Assist"
limit_break_status_table = parser.get_next_div()
while "最大値" not in limit_break_status_table.text:
limit_break_status_table = parser.get_next_div()
#limit_break_status_table = parser.get_next_div()
for tr in limit_break_status_table.table.find_all('tr'):
if tr.td is None:
continue
col = tr.td
val = col.find_next_sibling()
print(col.text, val.text)
if col.text == "HP":
chara.hp = int(val.text)
if col.text == "MP":
chara.mp = int(val.text)
if col.text == "物攻":
chara.str = int(val.text.split("(")[0])
if col.text == "魔攻":
chara.mag = int(val.text.split("(")[0])
if col.text == "防御":
chara.end = int(val.text.split("(")[0])
if col.text == "器用":
chara.dex = int(val.text.split("(")[0])
if col.text == "敏捷":
chara.agi = int(val.text.split("(")[0])
all_skills = []
all_passive_skills = []
if chara.job == "Adventurer":
status_table_no_used = parser.get_next_div()
special_skill = parser.get_next_div()
special_skill_dec_str = gen_skill_str(special_skill.text, True)
skills = parser.get_next_div()
for s in skills.find_all("td"):
skill_str = gen_skill_str(s.text)
all_skills.append(skill_str)
if chara.job == "Adventurer":
all_skills.append(special_skill_dec_str)
concated_skills = ',\n '.join(all_skills)
chara.skills = f"skills=[{concated_skills}],"
if chara.job == "Adventurer":
passive_skills = parser.get_next_div()
for s in passive_skills.find_all("td"):
passive_skill_str = gen_passive_skill_str(s.text)
if passive_skill_str:
all_passive_skills.append(passive_skill_str)
if chara.killer == "":
chara.killer = gen_killer_str(s.text)
if chara.counter_hp == "":
chara.counter_hp = gen_counter_hp_str(s.text)
concated_passive_skills = ',\n '.join(all_passive_skills)
chara.passive_skills = f"passive_skills=[Skill(buffs=[{concated_passive_skills}])],"
template = jinja2.Template("""
{{chara.job}}("{{chara.name}}", {{chara.hp}}, {{chara.mp}},
{{chara.str}}, {{chara.end}}, {{chara.dex}}, {{chara.agi}}, {{chara.mag}},
{{chara.skills}}
{{chara.passive_skills}}
{{chara.killer}}
{{chara.counter_hp}}
),
""")
if chara.job == "Adventurer":
out = template.render(chara=chara)
print(out)
else:
for i, s in enumerate(all_skills):
print("======================================================")
if i == 0:
continue
elif i == 1:
print("LV 60~76:")
elif i == 2:
print("LV 80:")
else:
raise
chara.skills = f"skill={s}"
out = template.render(chara=chara)
print(out)
def parsing_chara_from_web(http_url):
r = requests.get(http_url)
html_text = r.text
parsing_chara(html_text)
if __name__ == '__main__':
with open('tmp.html', 'r', encoding="utf-8") as f:
html_text_to_test = f.read()
parsing_chara(html_text_to_test)
| import requests
from bs4 import BeautifulSoup
import jinja2
import re
class Chara:
name = ''
job = ''
hp = 0
mp = 0
str = 0
end = 0
dex = 0
agi = 0
mag = 0
killer = ""
counter_hp = ""
skills = ""
passive_skills = ""
class HtmlParser:
def __init__(self, text):
self.soup = BeautifulSoup(text, 'html.parser')
self.soup_ptr = self.soup.find("div", class_='toc')
def get_next_div(self):
found = False
while not found:
self.soup_ptr = self.soup_ptr.find_next_sibling("div", class_='basic')
if self.soup_ptr.find("table") is not None:
found = True
return self.soup_ptr
def parse_effs(effs_str):
effs = []
if "カウンター率" in effs_str:
effs.append("Ability.counter_rate")
if "ペネトレーション率" in effs_str:
effs.append("Ability.pene_rate")
if "必殺技ゲージ" in effs_str:
effs.append("Ability.energy_bar")
if "クリティカル率" in effs_str:
effs.append("Ability.crit_rate")
if "ガード率" in effs_str:
effs.append("Ability.guard_rate")
if "カウンター発生" in effs_str:
effs.append("SuccessUp.counter")
if "ペネトレーション発生" in effs_str:
effs.append("SuccessUp.pene")
if "クリティカル発生" in effs_str:
effs.append("SuccessUp.crit")
if "ガード発生" in effs_str:
effs.append("SuccessUp.guard")
if "力と魔力" in effs_str:
effs.append("Ability.str")
effs.append("Ability.mag")
elif "力と" in effs_str:
effs.append("Ability.str")
elif "力、魔力" in effs_str:
effs.append("Ability.str")
effs.append("Ability.mag")
elif "魔力" in effs_str:
effs.append("Ability.mag")
elif "の力" in effs_str:
effs.append("Ability.str")
elif "、力" in effs_str:
effs.append("Ability.str")
elif effs_str.startswith("力"):
effs.append("Ability.str")
if "敏捷" in effs_str:
effs.append("Ability.agi")
if "器用" in effs_str:
effs.append("Ability.dex")
if "耐久" in effs_str:
effs.append("Ability.end")
if "火属性耐性" in effs_str:
effs.append("Endurance.fire")
if "地属性耐性" in effs_str:
effs.append("Endurance.earth")
if "風属性耐性" in effs_str:
effs.append("Endurance.wind")
if "水属性耐性" in effs_str:
effs.append("Endurance.ice")
if "雷属性耐性" in effs_str:
effs.append("Endurance.thunder")
if "光属性耐性" in effs_str:
effs.append("Endurance.light")
if "闇属性耐性" in effs_str:
effs.append("Endurance.dark")
if "物理耐性" in effs_str:
effs.append("Endurance.phy")
if "魔法耐性" in effs_str:
effs.append("Endurance.mag")
if "全体攻撃ダメージ" in effs_str:
effs.append("Endurance.foes")
if "単体攻撃ダメージ" in effs_str:
effs.append("Endurance.foe")
if "火属性攻撃" in effs_str:
effs.append("Damage.fire")
if "地属性攻撃" in effs_str:
effs.append("Damage.earth")
if "風属性攻撃" in effs_str:
effs.append("Damage.wind")
if "水属性攻撃" in effs_str:
effs.append("Damage.ice")
if "雷属性攻撃" in effs_str:
effs.append("Damage.thunder")
if "光属性攻撃" in effs_str:
effs.append("Damage.light")
if "闇属性攻撃" in effs_str:
effs.append("Damage.dark")
if "全体攻撃被ダメージ" in effs_str:
effs.append("Endurance.foes")
if "単体攻撃被ダメージ" in effs_str:
effs.append("Endurance.foe")
if "HP" in effs_str or "HP" in effs_str:
effs.append("Recover.hp_turn")
if "MP" in effs_str or "MP" in effs_str:
effs.append("Recover.mp_turn")
return effs
def gen_eff_str(effs, scope, val_for_eff=None, turn=None):
eff_enums = []
for e in effs:
if turn and val_for_eff:
eff_enums.append(f"Effect({scope}, {e}, {val_for_eff}, {turn})")
elif val_for_eff:
eff_enums.append(f"Effect({scope}, {e}, {val_for_eff})")
else:
eff_enums.append(f"Effect({scope}, {e}, 0)")
ret = ", ".join(eff_enums)
return ret
def parse_turns(text):
m = re.match(r".+(\d)ターンの間.+", text, re.UNICODE)
if m is None:
return None
turn = m.group(1)
return turn
def parse_scope(scope_str):
if "敵全体" in scope_str:
scope = "Scope.foes"
elif "敵単体" in scope_str:
scope = "Scope.foe"
elif "味方全体" in scope_str:
scope = "Scope.my_team"
elif "自分" in scope_str:
scope = "Scope.my_self"
else:
raise ValueError
return scope
def parse_atk(text):
scope = parse_scope(text)
if "超強威力" in text:
power = "Power.ultra"
elif "超威力" in text:
power = "Power.super"
elif "強威力" in text:
power = "Power.high"
elif "中威力" in text:
power = "Power.mid"
elif "弱威力" in text:
power = "Power.low"
else:
raise ValueError
m = re.match(r".+(\w)属性(\w\w)攻撃.+", text, re.UNICODE)
attr = m.group(1)
phy_mag = m.group(2)
if attr == "火":
attr_dmg = "Damage.fire"
elif attr == "地":
attr_dmg = "Damage.earth"
elif attr == "風":
attr_dmg = "Damage.wind"
elif attr == "水":
attr_dmg = "Damage.ice"
elif attr == "雷":
attr_dmg = "Damage.thunder"
elif attr == "光":
attr_dmg = "Damage.light"
elif attr == "闇":
attr_dmg = "Damage.dark"
else:
raise ValueError
if phy_mag == "物理":
atk = "Attack.phy"
elif phy_mag == "魔法":
atk = "Attack.mag"
else:
raise ValueError
temp_boost = ""
if "技発動時のみ力を上昇" in text or "技発動時のみ魔力を上昇" in text:
temp_boost = "temp_boost=True, "
boost_by_buff = ""
m = re.match(r".*自分の(\w+)上昇効果1つにつき、この技の威力が(\d+)[%%]上昇.*", text, re.UNICODE)
if m is not None:
up_val = int(m.group(2))
up_val /= 100
up_indexes = m.group(1)
effs = parse_effs(up_indexes)
enum_str = gen_eff_str(effs, "Scope.my_self", up_val)
boost_by_buff = f'boost_by_buff=[{enum_str}],'
atk_str = f"{scope}, {power}, {attr_dmg}, {atk}, {temp_boost} {boost_by_buff}"
return atk_str
def parse_debuff(text, turn):
m = re.match(r".*(敵単体|敵全体)(.+?)を?(\d+)[%%]減少.*", text, re.UNICODE)
if m is None:
m = re.match(r".+(敵単体|敵全体)(.+被ダメージ).*を(\d+)[%%]増加.*", text, re.UNICODE)
if m is None:
return None
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
down_val = int(m.group(3))
down_val /= 100
enum_str = gen_eff_str(effs, scope, down_val, turn)
return enum_str
def parse_recover_hp(text, turn):
m = re.match(r".*(\d+)ターンの間、(味方全体|自分)に(\d+)[%%]の(HP|HP)治癒付与.*", text, re.UNICODE)
if m:
turn = m.group(1)
scope = parse_scope(m.group(2))
up_val = int(m.group(3)) / 100
return f"Effect({scope}, Recover.hp_turn, {up_val}, {turn})"
m = re.match(r".*味方全体に\w+の(HP|HP)回復技.*", text, re.UNICODE)
if m:
return f"Effect(Scope.my_team, Recover.hp_imm, 0.8)"
m = re.match(r".*味方全体の(HP|HP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_team, Recover.hp_imm, {up_val})"
def parse_recover_mp(text, turn):
m = re.match(r".*(\d+)ターンの間、(味方全体|自分)に(\d+)[%%]の(MP|MP)回復.*", text, re.UNICODE)
if m:
turn = m.group(1)
scope = parse_scope(m.group(2))
up_val = int(m.group(3)) / 100
return f"Effect({scope}, Recover.mp_turn, {up_val}, {turn})"
m = re.match(r".*味方全体の(MP|MP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_team, Recover.mp_imm, {up_val})"
m = re.match(r".*自分の(MP|MP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_self, Recover.mp_imm, {up_val})"
def parse_buff(text, turn):
m = re.match(r".*(味方全体|自分)(.+)を(\d+)[%%](上昇|軽減).*", text, re.UNICODE)
if m is None:
return None
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
up_val = int(m.group(3))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val, turn)
return enum_str
def parse_passive_buff(text):
ret_effs = []
scope = "Scope.my_self"
m = re.match(r"(.+)が(\d+)[%%]上昇.*", text, re.UNICODE)
if m:
effs_str = m.group(1)
effs = parse_effs(effs_str)
effs = [e for e in effs if "Ability" not in e]
up_val = int(m.group(2))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val)
ret_effs.append(enum_str)
m = re.match(r".*毎ターン(.+)が(\d+)[%%]回復.*", text, re.UNICODE)
if m:
effs_str = m.group(1)
effs = parse_effs(effs_str)
up_val = int(m.group(2))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val)
ret_effs.append(enum_str)
return ", ".join(ret_effs)
def parse_adj_buff(text):
ret_effs = []
m = re.match(r".*(敵単体|敵全体)の(.+)上昇効果を解除.*", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
for e in effs:
enum_str = f"Effect({scope}, AdjBuff.clear_buff, 0, 0, {e})"
ret_effs.append(enum_str)
m = re.match(r".*(自分|味方全体)のステイタス上昇効果.*(\d+)ターン延長", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.extend_buff, {turn_val}, 0)"
ret_effs.append(enum_str)
m = re.match(r".*(敵単体|敵全体)のステイタス減少効果.*(\d+)ターン延長", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.extend_debuff, {turn_val}, 0)"
ret_effs.append(enum_str)
m = re.match(r".*(敵単体|敵全体)のステイタス上昇効果.*(\d+)ターン減少", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.shorten_buff, {turn_val}, 0)"
ret_effs.append(enum_str)
return ", ".join(ret_effs)
def gen_skill_str(text, is_special=False):
text = text.replace("\n", "")
text = text.replace("・", "")
print(text)
atk_str = ""
mp_str = ""
special_str = ""
is_fast_str = ""
if "すばやく" in text:
is_fast_str = "is_fast=True, "
if "攻撃。" in text:
# has attack
atk_str = parse_atk(text)
turn = parse_turns(text)
texts = []
texts_tmp = text.split("し、")
scope_guessing = None
for txt in texts_tmp:
if "味方全体" in txt:
scope_guessing = "味方全体"
elif "自分" in txt:
scope_guessing = "自分"
if "自分" not in txt and "味方全体" not in txt:
if scope_guessing:
txt = scope_guessing + txt
texts.extend(txt.split("さらに"))
buffs_eff = []
debuffs_eff = []
adj_buffs_eff = []
for t in texts:
b = parse_buff(t, turn)
d = parse_debuff(t, turn)
a = parse_adj_buff(t)
rhp = parse_recover_hp(t, turn)
rmp = parse_recover_mp(t, turn)
if b:
buffs_eff.append(b)
if rhp:
buffs_eff.append(rhp)
if rmp:
buffs_eff.append(rmp)
if d:
debuffs_eff.append(d)
if a:
adj_buffs_eff.append(a)
buffs_str = f"buffs=[{', '.join(buffs_eff)}],"
debuffs_str = f"debuffs=[{', '.join(debuffs_eff)}],"
adj_buffs_str = f"adj_buffs=[{', '.join(adj_buffs_eff)}],"
# print(buffs_str)
# print(debuffs_str)
if is_special:
special_str = "is_special=True,"
else:
m = re.match(r".+(MP:(\d+)).*", text, re.UNICODE)
if m:
mp = m.group(1)
mp_str = f"mp={mp},"
skill_dec_str = f"Skill({is_fast_str} {atk_str} {special_str} {mp_str} {buffs_str} {debuffs_str} {adj_buffs_str})"
return skill_dec_str
def gen_passive_skill_str(text):
b = parse_passive_buff(text)
print(text)
return b
def gen_counter_hp_str(text):
m = re.match(r".*】.*カウンター発生時.*通常攻撃.*(HP|HP)回復", text, re.UNICODE)
if m:
return "counter_hp=True,"
return ""
def gen_killer_str(text):
m = re.match(r".*】(\w+)の敵を攻撃.*(\d+)[%%]上昇.*", text, re.UNICODE)
if m:
killer = m.group(1)
if killer == "猛牛系":
return "killer=Killer.bull, "
elif killer == "巨人系":
return "killer=Killer.giant, "
elif killer == "魔獣系":
return "killer=Killer.beast, "
elif killer == "精霊系":
return "killer=Killer.fairy, "
elif killer == "植物系":
return "killer=Killer.plant, "
elif killer == "昆虫系":
return "killer=Killer.bug, "
elif killer == "堅鉱系":
return "killer=Killer.rock, "
elif killer == "蠕獣系":
return "killer=Killer.worm, "
elif killer == "竜系":
return "killer=Killer.dragon, "
elif killer == "水棲系":
return "killer=Killer.aquatic, "
elif killer == "妖鬼系":
return "killer=Killer.orge, "
elif killer == "幽魔系":
return "killer=Killer.undead, "
else:
raise ValueError
return ""
def parsing_chara(html_text):
parser = HtmlParser(html_text)
chara = Chara()
basics_table = parser.get_next_div()
for tr in basics_table.table.find_all('tr'):
col = tr.th.text
val = tr.td.text
if col == "名称":
chara.name = val
if col == "カテゴリ":
if val == "冒険者":
chara.job = "Adventurer"
elif val == "アシスト":
chara.job = "Assist"
limit_break_status_table = parser.get_next_div()
while "最大値" not in limit_break_status_table.text:
limit_break_status_table = parser.get_next_div()
#limit_break_status_table = parser.get_next_div()
for tr in limit_break_status_table.table.find_all('tr'):
if tr.td is None:
continue
col = tr.td
val = col.find_next_sibling()
print(col.text, val.text)
if col.text == "HP":
chara.hp = int(val.text)
if col.text == "MP":
chara.mp = int(val.text)
if col.text == "物攻":
chara.str = int(val.text.split("(")[0])
if col.text == "魔攻":
chara.mag = int(val.text.split("(")[0])
if col.text == "防御":
chara.end = int(val.text.split("(")[0])
if col.text == "器用":
chara.dex = int(val.text.split("(")[0])
if col.text == "敏捷":
chara.agi = int(val.text.split("(")[0])
all_skills = []
all_passive_skills = []
if chara.job == "Adventurer":
status_table_no_used = parser.get_next_div()
special_skill = parser.get_next_div()
special_skill_dec_str = gen_skill_str(special_skill.text, True)
skills = parser.get_next_div()
for s in skills.find_all("td"):
skill_str = gen_skill_str(s.text)
all_skills.append(skill_str)
if chara.job == "Adventurer":
all_skills.append(special_skill_dec_str)
concated_skills = ',\n '.join(all_skills)
chara.skills = f"skills=[{concated_skills}],"
if chara.job == "Adventurer":
passive_skills = parser.get_next_div()
for s in passive_skills.find_all("td"):
passive_skill_str = gen_passive_skill_str(s.text)
if passive_skill_str:
all_passive_skills.append(passive_skill_str)
if chara.killer == "":
chara.killer = gen_killer_str(s.text)
if chara.counter_hp == "":
chara.counter_hp = gen_counter_hp_str(s.text)
concated_passive_skills = ',\n '.join(all_passive_skills)
chara.passive_skills = f"passive_skills=[Skill(buffs=[{concated_passive_skills}])],"
template = jinja2.Template("""
{{chara.job}}("{{chara.name}}", {{chara.hp}}, {{chara.mp}},
{{chara.str}}, {{chara.end}}, {{chara.dex}}, {{chara.agi}}, {{chara.mag}},
{{chara.skills}}
{{chara.passive_skills}}
{{chara.killer}}
{{chara.counter_hp}}
),
""")
if chara.job == "Adventurer":
out = template.render(chara=chara)
print(out)
else:
for i, s in enumerate(all_skills):
print("======================================================")
if i == 0:
continue
elif i == 1:
print("LV 60~76:")
elif i == 2:
print("LV 80:")
else:
raise
chara.skills = f"skill={s}"
out = template.render(chara=chara)
print(out)
def parsing_chara_from_web(http_url):
r = requests.get(http_url)
html_text = r.text
parsing_chara(html_text)
if __name__ == '__main__':
with open('tmp.html', 'r', encoding="utf-8") as f:
html_text_to_test = f.read()
parsing_chara(html_text_to_test)
|
"""
hubspot engagements api
"""
from hubspot3.base import BaseClient
from hubspot3.utils import get_log
from typing import Dict, List
ENGAGEMENTS_API_VERSION = "1"
class EngagementsClient(BaseClient):
"""
The hubspot3 Engagements client uses the _make_request method to call the API
for data. It returns a python object translated from the json returned
"""
def __init__(self, *args, **kwargs) -> None:
super(EngagementsClient, self).__init__(*args, **kwargs)
self.log = get_log("hubspot3.engagements")
def _get_path(self, subpath: str) -> str:
"""get full subpath"""
return f"engagements/v{self.options.get("version") or ENGAGEMENTS_API_VERSION}/{subpath}"
def get(self, engagement_id, **options):
"""Get a HubSpot engagement."""
return self._call(f"engagements/{engagement_id}", method="GET", **options)
def get_associated(self, object_type, object_id, **options) -> List[Dict]:
"""
get all engagements associated with the given object
:param object_type: type of object to get associations on [CONTACT, COMPANY, DEAL]
:param object_id: ID of the object to get associations on
"""
finished = False
output = [] # type: List[Dict]
query_limit = 100 # Max value according to docs
offset = 0
while not finished:
batch = self._call(
f"engagements/associated/{object_type}/{object_id}/paged",
method="GET",
params={"limit": query_limit, "offset": offset},
**options,
)
output.extend(batch["results"])
finished = not batch["hasMore"]
offset = batch["offset"]
return output
def create(self, data=None, **options):
data = data or {}
return self._call("engagements", data=data, method="POST", **options)
def update(self, key, data=None, **options):
data = data or {}
return self._call(f"engagements/{key}", data=data, method="PUT", **options)
def patch(self, key, data=None, **options):
data = data or {}
return self._call(f"engagements/{key}", data=data, method="PATCH", **options)
def get_all(self, **options) -> List[Dict]:
"""get all engagements"""
finished = False
output = [] # type: List[Dict]
query_limit = 250 # Max value according to docs
offset = 0
while not finished:
batch = self._call(
"engagements/paged",
method="GET",
params={"limit": query_limit, "offset": offset},
**options,
)
output.extend(batch["results"])
finished = not batch["hasMore"]
offset = batch["offset"]
return output
def get_recently_modified(self, since, **options) -> List[Dict]:
"""get recently modified engagements"""
finished = False
output = [] # type: List[Dict]
query_limit = 100 # Max value according to docs
offset = 0
while not finished:
batch = self._call(
"engagements/recent/modified",
method="GET",
params={"limit": query_limit, "offset": offset, "since": since},
**options,
)
output.extend(batch["results"])
finished = not batch["hasMore"]
offset = batch["offset"]
return output
| """
hubspot engagements api
"""
from hubspot3.base import BaseClient
from hubspot3.utils import get_log
from typing import Dict, List
ENGAGEMENTS_API_VERSION = "1"
class EngagementsClient(BaseClient):
"""
The hubspot3 Engagements client uses the _make_request method to call the API
for data. It returns a python object translated from the json returned
"""
def __init__(self, *args, **kwargs) -> None:
super(EngagementsClient, self).__init__(*args, **kwargs)
self.log = get_log("hubspot3.engagements")
def _get_path(self, subpath: str) -> str:
"""get full subpath"""
return f"engagements/v{self.options.get('version') or ENGAGEMENTS_API_VERSION}/{subpath}"
def get(self, engagement_id, **options):
"""Get a HubSpot engagement."""
return self._call(f"engagements/{engagement_id}", method="GET", **options)
def get_associated(self, object_type, object_id, **options) -> List[Dict]:
"""
get all engagements associated with the given object
:param object_type: type of object to get associations on [CONTACT, COMPANY, DEAL]
:param object_id: ID of the object to get associations on
"""
finished = False
output = [] # type: List[Dict]
query_limit = 100 # Max value according to docs
offset = 0
while not finished:
batch = self._call(
f"engagements/associated/{object_type}/{object_id}/paged",
method="GET",
params={"limit": query_limit, "offset": offset},
**options,
)
output.extend(batch["results"])
finished = not batch["hasMore"]
offset = batch["offset"]
return output
def create(self, data=None, **options):
data = data or {}
return self._call("engagements", data=data, method="POST", **options)
def update(self, key, data=None, **options):
data = data or {}
return self._call(f"engagements/{key}", data=data, method="PUT", **options)
def patch(self, key, data=None, **options):
data = data or {}
return self._call(f"engagements/{key}", data=data, method="PATCH", **options)
def get_all(self, **options) -> List[Dict]:
"""get all engagements"""
finished = False
output = [] # type: List[Dict]
query_limit = 250 # Max value according to docs
offset = 0
while not finished:
batch = self._call(
"engagements/paged",
method="GET",
params={"limit": query_limit, "offset": offset},
**options,
)
output.extend(batch["results"])
finished = not batch["hasMore"]
offset = batch["offset"]
return output
def get_recently_modified(self, since, **options) -> List[Dict]:
"""get recently modified engagements"""
finished = False
output = [] # type: List[Dict]
query_limit = 100 # Max value according to docs
offset = 0
while not finished:
batch = self._call(
"engagements/recent/modified",
method="GET",
params={"limit": query_limit, "offset": offset, "since": since},
**options,
)
output.extend(batch["results"])
finished = not batch["hasMore"]
offset = batch["offset"]
return output
|
from ib_tws_server.codegen.generator_utils import GeneratorUtils
from ib_tws_server.api_definition import *
from ib_tws_server.codegen.generator_utils import *
import inspect
def forward_method_parameters_dict_style(params: List[inspect.Parameter]) -> str:
return ",".join([ f"{v.name} = {v.name}" for v in params ])
def request_state_member_name(d: ApiDefinition):
return f"_req_state"
def subscription_member_name(d: ApiDefinition):
return f"_subscriptions"
def response_instance(d: ApiDefinition, m: Callable):
callback_type,is_wrapper = GeneratorUtils.callback_type(d, m)
if is_wrapper:
return f"{callback_type}({forward_method_parameters_dict_style(GeneratorUtils.data_class_members(d, [m], False))})"
else:
return GeneratorUtils.data_class_members(d, [m], False)[0].name
def streaming_instance(d: ApiDefinition, m: Callable):
callback_type,is_wrapper = GeneratorUtils.callback_type(d, m)
if is_wrapper:
return f"{callback_type}({forward_method_parameters_dict_style(GeneratorUtils.data_class_members(d, [m], True))})"
else:
return GeneratorUtils.data_class_members(d, [m], False)[0].name
def request_id(d: ApiDefinition, m: Callable):
if not d.uses_req_id:
return f"'{d.request_method.__name__}'"
else:
return GeneratorUtils.req_id_param_name(m)
def current_request_state(d: ApiDefinition, m: Callable):
return f"self.{request_state_member_name(d)}[{request_id(d, m)}]"
def bind_method(d: ApiDefinition, m: Callable, param_values: List[str]) -> str:
param_values[0] = f"self._client.{m.__name__}"
return f"functools.partial({",".join(param_values)})"
class AsyncioClientGenerator:
@staticmethod
def generate(filename):
def init_callback(d: ApiDefinition, m: Callable, cb: str):
if d.callback_methods is not None or d.done_method is not None:
return f"{current_request_state(d,m)}.{cb} = {cb}"
return ""
def init_request_id(d: ApiDefinition, u: Callable):
if d.uses_req_id:
return f"{GeneratorUtils.req_id_param_name(d.request_method)} = self.next_request_id()"
else:
return ""
def init_subscription(d: ApiDefinition):
if d.cancel_method is None:
raise RuntimeError(f"Request does not support cancellation {d.request_method.__name__}")
current_subscription = f"self.{subscription_member_name(d)}[{request_id(d, d.request_method)}]"
return f"{current_subscription}= SubscriptionGenerator(self.__{d.cancel_method.__name__}, {GeneratorUtils.req_id_param_name(d.request_method)})"
def async_request_method(d: ApiDefinition, is_subscription: bool):
method_name = GeneratorUtils.request_method_name(d, is_subscription)
original_sig = GeneratorUtils.signature(d.request_method)
signature = GeneratorUtils.request_signature(d, is_subscription)
param_values = [ p.name if p.name != d.subscription_flag_name else f"{d.subscription_flag_value if is_subscription else not d.subscription_flag_value}" for p in original_sig.parameters.values() ]
if is_subscription:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
{init_request_id(d, d.request_method)}
ret: SubscriptionGenerator = None
with self._lock:
ret = {init_subscription(d)}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
return ret"""
if d.callback_methods is not None or d.done_method is not None:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
loop = asyncio.get_running_loop()
future = loop.create_future()
def cb(res: {GeneratorUtils.request_return_type(d, is_subscription)}):
loop.call_soon_threadsafe(future.set_result, res)
{init_request_id(d, d.request_method)}
with self._lock:
{init_callback(d, d.request_method, 'cb')}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
res = (await future)
if isinstance(res, IbError):
raise res
return res"""
else:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
{init_request_id(d, d.request_method)}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
return None"""
def cancel_method(d: ApiDefinition):
return f"""
def __{GeneratorUtils.method_declaration(d.cancel_method)}:
{GeneratorUtils.doc_string(d.cancel_method)}
self.cancel_request({request_id(d,d.cancel_method)})
self._writer.queue.put({bind_method(d, d.cancel_method, list(GeneratorUtils.signature(d.cancel_method).parameters))})"""
with open(filename, "w") as f:
f.write(f"""
import asyncio
import functools
from collections import defaultdict
from ibapi.client import EClient
from ib_tws_server.asyncio.ib_writer import IBWriter
from ib_tws_server.asyncio.request_state import *
from ib_tws_server.asyncio.subscription_generator import SubscriptionGenerator
from ib_tws_server.error import *
from ib_tws_server.gen.client_responses import *
from ib_tws_server.gen.asyncio_wrapper import *
from ib_tws_server.ib_imports import *
from threading import Lock, Thread
import time
from typing import Callable, Dict, List, Tuple
class AsyncioClient():
_lock: Lock
_req_state: Dict[str, RequestState]
_subscriptions: Dict[int, SubscriptionGenerator]
_wrapper: AsyncioWrapper
_client: EClient
def __init__(self):
self._lock = Lock()
self._current_request_id = 0
self._req_state = defaultdict(RequestState)
self._subscriptions = defaultdict(SubscriptionGenerator)
self._wrapper = AsyncioWrapper(self._lock, self._req_state, self._subscriptions)
self._client = EClient(self._wrapper)
self._writer = IBWriter(self._client)
self._wrapper._writer = self._writer
def run(self):
self._writer.start()
self._client.run()
def next_request_id(self):
with self._lock:
self._current_request_id += 1
return self._current_request_id
def disconnect(self, clean=False):
self._wrapper._expecting_disconnect = clean
return self._client.disconnect()
def cancel_request(self, id: RequestId):
response_cb = None
with self._lock:
if id in self._req_state:
response_cb = self._req_state[id].cb
del self._req_state[id]
if id in self._subscriptions:
del self._subscriptions[id]
if response_cb is not None:
response_cb(None)
def start(self, host: str, port: int, client_id: int, connection_retry_interval: int):
while True:
try:
self._client.connect(host, port, client_id)
break
except ConnectionError as e:
if connection_retry_interval > 0:
time.sleep(connection_retry_interval)
else:
raise e
thread = Thread(target = self.run)
thread.start()
setattr(thread, "_thread", thread)
def active_request_count(self):
with self._lock:
return len(self._req_state)
def active_subscription_count(self):
with self._lock:
return len(self._subscriptions)
"""
)
for d in REQUEST_DEFINITIONS:
if d.request_method is not None:
if d.subscription_flag_name is not None:
f.write(async_request_method(d, False))
f.write(async_request_method(d, True))
else:
f.write(async_request_method(d, d.is_subscription))
if d.cancel_method is not None and (d.is_subscription or d.subscription_flag_name is not None):
f.write(cancel_method(d))
class AsyncioWrapperGenerator:
@staticmethod
def generate(filename):
def update_response(d: ApiDefinition, m:Callable):
if GeneratorUtils.response_is_list(d):
return f"""
if {request_id(d, m)} in self._req_state:
req_state = {current_request_state(d, m)}
if req_state.response is None:
req_state.response = []
req_state.response.append({response_instance(d, m)})"""
else:
return f"""
if {request_id(d, m)} in self._req_state:
req_state = {current_request_state(d, m)}
if req_state is not None:
req_state.response = {response_instance(d, m)}"""
def call_response_cb(d: ApiDefinition, m: Callable):
if d.callback_methods is not None:
return f"self.call_response_cb({request_id(d,m)})"
else:
return ""
def call_response_cb_if_done(d: ApiDefinition, m: Callable):
if d.has_done_flag:
return f"""
if (done):
{call_response_cb(d, m)}"""
elif not GeneratorUtils.response_is_list(d):
return f"""
{call_response_cb(d,m)}"""
else:
return ""
def callback_method(d: ApiDefinition, m: Callable):
if d.subscription_flag_name is not None:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
is_subscription: bool = False
with self._lock:
is_subscription = {request_id(d, m)} in self._subscriptions
{update_response(d, m)}
if is_subscription:
self.call_streaming_cb({request_id(d,m)}, {streaming_instance(d,m)})
return
{call_response_cb_if_done(d, m)}"""
elif not d.is_subscription:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
with self._lock:
{update_response(d, m)}
{call_response_cb_if_done(d, m)}"""
else:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
self.call_streaming_cb({request_id(d,m)}, {streaming_instance(d,m)})"""
def done_method(d: ApiDefinition):
return f"""
def {GeneratorUtils.method_declaration(d.done_method)}:
{GeneratorUtils.doc_string(d.done_method)}
{call_response_cb(d,d.done_method)}"""
with open(filename, "w") as f:
f.write(f"""
from ibapi.wrapper import EWrapper
from ib_tws_server.asyncio.ib_writer import IBWriter
from ib_tws_server.asyncio.request_state import *
from ib_tws_server.asyncio.subscription_generator import SubscriptionGenerator
from ib_tws_server.error import *
from ib_tws_server.gen.client_responses import *
from ib_tws_server.ib_imports import *
from threading import Lock
from typing import Dict, List
class AsyncioWrapper(EWrapper):
_lock: Lock
_req_state: Dict[str, RequestState]
_subscriptions: Dict[int, SubscriptionGenerator]
_expecting_disconnect: bool
_writer: IBWriter
def __init__(self, lock: Lock, req_state: Dict[str, RequestState], subscriptions: Dict[int, SubscriptionGenerator]):
self._lock = lock
self._req_state = req_state
self._subscriptions = subscriptions
EWrapper.__init__(self)
self._expecting_disconnect = False
def connectionClosed(self):
if self._expecting_disconnect:
# Wake up writer
self._writer.queue.put(lambda *a, **k: None)
else:
raise ConnectionError("Unexpected disconnect")
def call_response_cb(self, id: RequestId, res=None):
cb = None
with self._lock:
if not id in self._req_state:
return
s = self._req_state[id]
cb = s.cb
if res is None:
res = s.response
del self._req_state[id]
if cb is not None:
cb(res)
def error(self, reqId: int, errorCode: int, errorString: str):
cb = None
if reqId is not None:
with self._lock:
if reqId in self._req_state:
s = self._req_state[reqId]
cb = s.cb
del self._req_state[reqId]
if cb is not None:
cb(IbError(errorString, errorCode))
else:
super().error(reqId, errorCode, errorString)
def call_streaming_cb(self, id: RequestId, res: any):
cb = None
loop = None
with self._lock:
if id in self._subscriptions:
s = self._subscriptions[id]
cb = s.add_to_queue
loop = s._loop
if loop is not None:
loop.call_soon_threadsafe(cb, res)
""")
for d in REQUEST_DEFINITIONS:
if d.request_method is not None:
if d.callback_methods is not None:
for m in d.callback_methods:
f.write(callback_method(d, m))
if d.done_method is not None:
f.write(done_method(d))
| from ib_tws_server.codegen.generator_utils import GeneratorUtils
from ib_tws_server.api_definition import *
from ib_tws_server.codegen.generator_utils import *
import inspect
def forward_method_parameters_dict_style(params: List[inspect.Parameter]) -> str:
return ",".join([ f"{v.name} = {v.name}" for v in params ])
def request_state_member_name(d: ApiDefinition):
return f"_req_state"
def subscription_member_name(d: ApiDefinition):
return f"_subscriptions"
def response_instance(d: ApiDefinition, m: Callable):
callback_type,is_wrapper = GeneratorUtils.callback_type(d, m)
if is_wrapper:
return f"{callback_type}({forward_method_parameters_dict_style(GeneratorUtils.data_class_members(d, [m], False))})"
else:
return GeneratorUtils.data_class_members(d, [m], False)[0].name
def streaming_instance(d: ApiDefinition, m: Callable):
callback_type,is_wrapper = GeneratorUtils.callback_type(d, m)
if is_wrapper:
return f"{callback_type}({forward_method_parameters_dict_style(GeneratorUtils.data_class_members(d, [m], True))})"
else:
return GeneratorUtils.data_class_members(d, [m], False)[0].name
def request_id(d: ApiDefinition, m: Callable):
if not d.uses_req_id:
return f"'{d.request_method.__name__}'"
else:
return GeneratorUtils.req_id_param_name(m)
def current_request_state(d: ApiDefinition, m: Callable):
return f"self.{request_state_member_name(d)}[{request_id(d, m)}]"
def bind_method(d: ApiDefinition, m: Callable, param_values: List[str]) -> str:
param_values[0] = f"self._client.{m.__name__}"
return f"functools.partial({','.join(param_values)})"
class AsyncioClientGenerator:
@staticmethod
def generate(filename):
def init_callback(d: ApiDefinition, m: Callable, cb: str):
if d.callback_methods is not None or d.done_method is not None:
return f"{current_request_state(d,m)}.{cb} = {cb}"
return ""
def init_request_id(d: ApiDefinition, u: Callable):
if d.uses_req_id:
return f"{GeneratorUtils.req_id_param_name(d.request_method)} = self.next_request_id()"
else:
return ""
def init_subscription(d: ApiDefinition):
if d.cancel_method is None:
raise RuntimeError(f"Request does not support cancellation {d.request_method.__name__}")
current_subscription = f"self.{subscription_member_name(d)}[{request_id(d, d.request_method)}]"
return f"{current_subscription}= SubscriptionGenerator(self.__{d.cancel_method.__name__}, {GeneratorUtils.req_id_param_name(d.request_method)})"
def async_request_method(d: ApiDefinition, is_subscription: bool):
method_name = GeneratorUtils.request_method_name(d, is_subscription)
original_sig = GeneratorUtils.signature(d.request_method)
signature = GeneratorUtils.request_signature(d, is_subscription)
param_values = [ p.name if p.name != d.subscription_flag_name else f"{d.subscription_flag_value if is_subscription else not d.subscription_flag_value}" for p in original_sig.parameters.values() ]
if is_subscription:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
{init_request_id(d, d.request_method)}
ret: SubscriptionGenerator = None
with self._lock:
ret = {init_subscription(d)}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
return ret"""
if d.callback_methods is not None or d.done_method is not None:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
loop = asyncio.get_running_loop()
future = loop.create_future()
def cb(res: {GeneratorUtils.request_return_type(d, is_subscription)}):
loop.call_soon_threadsafe(future.set_result, res)
{init_request_id(d, d.request_method)}
with self._lock:
{init_callback(d, d.request_method, 'cb')}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
res = (await future)
if isinstance(res, IbError):
raise res
return res"""
else:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
{init_request_id(d, d.request_method)}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
return None"""
def cancel_method(d: ApiDefinition):
return f"""
def __{GeneratorUtils.method_declaration(d.cancel_method)}:
{GeneratorUtils.doc_string(d.cancel_method)}
self.cancel_request({request_id(d,d.cancel_method)})
self._writer.queue.put({bind_method(d, d.cancel_method, list(GeneratorUtils.signature(d.cancel_method).parameters))})"""
with open(filename, "w") as f:
f.write(f"""
import asyncio
import functools
from collections import defaultdict
from ibapi.client import EClient
from ib_tws_server.asyncio.ib_writer import IBWriter
from ib_tws_server.asyncio.request_state import *
from ib_tws_server.asyncio.subscription_generator import SubscriptionGenerator
from ib_tws_server.error import *
from ib_tws_server.gen.client_responses import *
from ib_tws_server.gen.asyncio_wrapper import *
from ib_tws_server.ib_imports import *
from threading import Lock, Thread
import time
from typing import Callable, Dict, List, Tuple
class AsyncioClient():
_lock: Lock
_req_state: Dict[str, RequestState]
_subscriptions: Dict[int, SubscriptionGenerator]
_wrapper: AsyncioWrapper
_client: EClient
def __init__(self):
self._lock = Lock()
self._current_request_id = 0
self._req_state = defaultdict(RequestState)
self._subscriptions = defaultdict(SubscriptionGenerator)
self._wrapper = AsyncioWrapper(self._lock, self._req_state, self._subscriptions)
self._client = EClient(self._wrapper)
self._writer = IBWriter(self._client)
self._wrapper._writer = self._writer
def run(self):
self._writer.start()
self._client.run()
def next_request_id(self):
with self._lock:
self._current_request_id += 1
return self._current_request_id
def disconnect(self, clean=False):
self._wrapper._expecting_disconnect = clean
return self._client.disconnect()
def cancel_request(self, id: RequestId):
response_cb = None
with self._lock:
if id in self._req_state:
response_cb = self._req_state[id].cb
del self._req_state[id]
if id in self._subscriptions:
del self._subscriptions[id]
if response_cb is not None:
response_cb(None)
def start(self, host: str, port: int, client_id: int, connection_retry_interval: int):
while True:
try:
self._client.connect(host, port, client_id)
break
except ConnectionError as e:
if connection_retry_interval > 0:
time.sleep(connection_retry_interval)
else:
raise e
thread = Thread(target = self.run)
thread.start()
setattr(thread, "_thread", thread)
def active_request_count(self):
with self._lock:
return len(self._req_state)
def active_subscription_count(self):
with self._lock:
return len(self._subscriptions)
"""
)
for d in REQUEST_DEFINITIONS:
if d.request_method is not None:
if d.subscription_flag_name is not None:
f.write(async_request_method(d, False))
f.write(async_request_method(d, True))
else:
f.write(async_request_method(d, d.is_subscription))
if d.cancel_method is not None and (d.is_subscription or d.subscription_flag_name is not None):
f.write(cancel_method(d))
class AsyncioWrapperGenerator:
@staticmethod
def generate(filename):
def update_response(d: ApiDefinition, m:Callable):
if GeneratorUtils.response_is_list(d):
return f"""
if {request_id(d, m)} in self._req_state:
req_state = {current_request_state(d, m)}
if req_state.response is None:
req_state.response = []
req_state.response.append({response_instance(d, m)})"""
else:
return f"""
if {request_id(d, m)} in self._req_state:
req_state = {current_request_state(d, m)}
if req_state is not None:
req_state.response = {response_instance(d, m)}"""
def call_response_cb(d: ApiDefinition, m: Callable):
if d.callback_methods is not None:
return f"self.call_response_cb({request_id(d,m)})"
else:
return ""
def call_response_cb_if_done(d: ApiDefinition, m: Callable):
if d.has_done_flag:
return f"""
if (done):
{call_response_cb(d, m)}"""
elif not GeneratorUtils.response_is_list(d):
return f"""
{call_response_cb(d,m)}"""
else:
return ""
def callback_method(d: ApiDefinition, m: Callable):
if d.subscription_flag_name is not None:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
is_subscription: bool = False
with self._lock:
is_subscription = {request_id(d, m)} in self._subscriptions
{update_response(d, m)}
if is_subscription:
self.call_streaming_cb({request_id(d,m)}, {streaming_instance(d,m)})
return
{call_response_cb_if_done(d, m)}"""
elif not d.is_subscription:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
with self._lock:
{update_response(d, m)}
{call_response_cb_if_done(d, m)}"""
else:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
self.call_streaming_cb({request_id(d,m)}, {streaming_instance(d,m)})"""
def done_method(d: ApiDefinition):
return f"""
def {GeneratorUtils.method_declaration(d.done_method)}:
{GeneratorUtils.doc_string(d.done_method)}
{call_response_cb(d,d.done_method)}"""
with open(filename, "w") as f:
f.write(f"""
from ibapi.wrapper import EWrapper
from ib_tws_server.asyncio.ib_writer import IBWriter
from ib_tws_server.asyncio.request_state import *
from ib_tws_server.asyncio.subscription_generator import SubscriptionGenerator
from ib_tws_server.error import *
from ib_tws_server.gen.client_responses import *
from ib_tws_server.ib_imports import *
from threading import Lock
from typing import Dict, List
class AsyncioWrapper(EWrapper):
_lock: Lock
_req_state: Dict[str, RequestState]
_subscriptions: Dict[int, SubscriptionGenerator]
_expecting_disconnect: bool
_writer: IBWriter
def __init__(self, lock: Lock, req_state: Dict[str, RequestState], subscriptions: Dict[int, SubscriptionGenerator]):
self._lock = lock
self._req_state = req_state
self._subscriptions = subscriptions
EWrapper.__init__(self)
self._expecting_disconnect = False
def connectionClosed(self):
if self._expecting_disconnect:
# Wake up writer
self._writer.queue.put(lambda *a, **k: None)
else:
raise ConnectionError("Unexpected disconnect")
def call_response_cb(self, id: RequestId, res=None):
cb = None
with self._lock:
if not id in self._req_state:
return
s = self._req_state[id]
cb = s.cb
if res is None:
res = s.response
del self._req_state[id]
if cb is not None:
cb(res)
def error(self, reqId: int, errorCode: int, errorString: str):
cb = None
if reqId is not None:
with self._lock:
if reqId in self._req_state:
s = self._req_state[reqId]
cb = s.cb
del self._req_state[reqId]
if cb is not None:
cb(IbError(errorString, errorCode))
else:
super().error(reqId, errorCode, errorString)
def call_streaming_cb(self, id: RequestId, res: any):
cb = None
loop = None
with self._lock:
if id in self._subscriptions:
s = self._subscriptions[id]
cb = s.add_to_queue
loop = s._loop
if loop is not None:
loop.call_soon_threadsafe(cb, res)
""")
for d in REQUEST_DEFINITIONS:
if d.request_method is not None:
if d.callback_methods is not None:
for m in d.callback_methods:
f.write(callback_method(d, m))
if d.done_method is not None:
f.write(done_method(d))
|
import logging
from abc import abstractmethod
from .input import Input
from .input_config import assert_keycode_list
class Switch(Input):
"""Switch input class
Implement custom on() and off() logic
Read more about defaults from input_config.py
"""
def validate_defaults(self, defaults):
super().validate_defaults(defaults)
assert defaults.keys() <= {
"humanReadableName",
"onScreenPosition",
"keys",
}, "there were extra items in defaults"
assert "keys" in defaults
assert_keycode_list(defaults["keys"])
async def _on_input(self, command, seat):
"""Switch input functionality
Calls on and off depending on command state
:param command: Command from game engine
:type command: dict
:param seat: Robot seat
:type seat: int
"""
if "state" not in command:
logging.warning("Switch: invalid command received")
return
try:
val = str(command["state"])
except (ValueError, TypeError):
logging.warn(
f"Switch: could not convert {command["state"]} into String"
)
return
if val != "up" and val != "down":
logging.warn("Switch: command not <up|down>")
return
if val == "up":
await self.off(seat)
else:
await self.on(seat)
@abstractmethod
async def on(self, seat):
"""Switch turned on functionality
:param seat: Robot seat
:type seat: int
"""
pass
@abstractmethod
async def off(self, seat):
"""Switch turned off functionality
:param seat: Robot seat
:type seat: int
"""
pass
async def reset(self, seat):
"""Switch reset functionality
Defaults to calling off()
:param seat: Robot seat
:type seat: int
"""
await self.off(seat)
def get_name(self):
"""Returns the name of the input
:return: name of the input
:rtype: str
"""
return "button"
def _get_default_keybinds(self):
binds = self.get_default_keybinds()
if not isinstance(binds, list):
binds = [binds]
def enum_to_str(item):
if type(item) is not str:
return item.value
return item
binds = list(map(enum_to_str, binds))
return {"keys": binds}
def get_default_keybinds(self):
"""Returns a single keybind or a list of keybinds.
Switches are bound to the space key by default.
To override the defaults, override this method in your switch
subclass and return different keybinds.
"""
return []
| import logging
from abc import abstractmethod
from .input import Input
from .input_config import assert_keycode_list
class Switch(Input):
"""Switch input class
Implement custom on() and off() logic
Read more about defaults from input_config.py
"""
def validate_defaults(self, defaults):
super().validate_defaults(defaults)
assert defaults.keys() <= {
"humanReadableName",
"onScreenPosition",
"keys",
}, "there were extra items in defaults"
assert "keys" in defaults
assert_keycode_list(defaults["keys"])
async def _on_input(self, command, seat):
"""Switch input functionality
Calls on and off depending on command state
:param command: Command from game engine
:type command: dict
:param seat: Robot seat
:type seat: int
"""
if "state" not in command:
logging.warning("Switch: invalid command received")
return
try:
val = str(command["state"])
except (ValueError, TypeError):
logging.warn(
f"Switch: could not convert {command['state']} into String"
)
return
if val != "up" and val != "down":
logging.warn("Switch: command not <up|down>")
return
if val == "up":
await self.off(seat)
else:
await self.on(seat)
@abstractmethod
async def on(self, seat):
"""Switch turned on functionality
:param seat: Robot seat
:type seat: int
"""
pass
@abstractmethod
async def off(self, seat):
"""Switch turned off functionality
:param seat: Robot seat
:type seat: int
"""
pass
async def reset(self, seat):
"""Switch reset functionality
Defaults to calling off()
:param seat: Robot seat
:type seat: int
"""
await self.off(seat)
def get_name(self):
"""Returns the name of the input
:return: name of the input
:rtype: str
"""
return "button"
def _get_default_keybinds(self):
binds = self.get_default_keybinds()
if not isinstance(binds, list):
binds = [binds]
def enum_to_str(item):
if type(item) is not str:
return item.value
return item
binds = list(map(enum_to_str, binds))
return {"keys": binds}
def get_default_keybinds(self):
"""Returns a single keybind or a list of keybinds.
Switches are bound to the space key by default.
To override the defaults, override this method in your switch
subclass and return different keybinds.
"""
return []
|
# This script is used to parse BOOST special function test data into something
# we can easily import in numpy.
import re
import os
# Where to put the data (directory will be created)
DATA_DIR = 'scipy/special/tests/data/boost'
# Where to pull out boost data
BOOST_SRC = "boostmath/test"
CXX_COMMENT = re.compile(r'^\s+//')
DATA_REGEX = re.compile(r'^\s*/*\{*\s*SC_')
ITEM_REGEX = re.compile(r'[+-]?\d*\.?\d+(?:[eE][+-]?\d+)?')
HEADER_REGEX = re.compile(
r'const boost::array\<boost::array\<.*, (\d+)\>, (\d+)\> ([a-zA-Z_\d]+)')
IGNORE_PATTERNS = [
# Makes use of ldexp and casts
"hypergeometric_1F1_big_double_limited.ipp",
"hypergeometric_1F1_big_unsolved.ipp",
# Makes use of numeric_limits and ternary operator
"beta_small_data.ipp",
# Doesn't contain any data
"almost_equal.ipp",
# Derivatives functions don't exist
"bessel_y01_prime_data.ipp",
"bessel_yn_prime_data.ipp",
"sph_bessel_prime_data.ipp",
"sph_neumann_prime_data.ipp",
# Data files not needed by scipy special tests.
"ibeta_derivative_",
r"ellint_r[cdfjg]_[^d]",
r"ellint_d2?_",
"jacobi_",
"heuman_lambda_",
"hypergeometric_",
"nct_",
r".*gammap1m1_",
"trig_",
"powm1_data.ipp",
]
def _raw_data(line):
items = line.split(',')
l = []
for item in items:
m = ITEM_REGEX.search(item)
if m:
q = m.group(0)
l.append(q)
return l
def parse_ipp_file(filename):
print(filename)
a = open(filename, 'r')
lines = a.readlines()
data = {}
i = 0
while (i < len(lines)):
line = lines[i]
m = HEADER_REGEX.search(line)
if m:
d = int(m.group(1))
n = int(m.group(2))
print(f"d = {d}, n = {n}")
cdata = []
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
while DATA_REGEX.match(line):
cdata.append(_raw_data(line))
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
if not len(cdata) == n:
raise ValueError("parsed data: %d, expected %d" % (len(cdata), n))
data[m.group(3)] = cdata
else:
i += 1
return data
def dump_dataset(filename, data):
fid = open(filename, 'w')
try:
for line in data:
fid.write(f"{" ".join(line)}\n")
finally:
fid.close()
def dump_datasets(filename):
base, ext = os.path.splitext(os.path.basename(filename))
base += f'_{ext[1:]}'
datadir = os.path.join(DATA_DIR, base)
os.makedirs(datadir)
datasets = parse_ipp_file(filename)
for k, d in datasets.items():
print(k, len(d))
dfilename = os.path.join(datadir, k) + '.txt'
dump_dataset(dfilename, d)
if __name__ == '__main__':
for filename in sorted(os.listdir(BOOST_SRC)):
# Note: Misses data in hpp files (e.x. powm1_sqrtp1m1_test.hpp)
if filename.endswith(".ipp"):
if any(re.match(pattern, filename) for pattern in IGNORE_PATTERNS):
continue
path = os.path.join(BOOST_SRC, filename)
print(f"================= {path} ===============")
dump_datasets(path)
| # This script is used to parse BOOST special function test data into something
# we can easily import in numpy.
import re
import os
# Where to put the data (directory will be created)
DATA_DIR = 'scipy/special/tests/data/boost'
# Where to pull out boost data
BOOST_SRC = "boostmath/test"
CXX_COMMENT = re.compile(r'^\s+//')
DATA_REGEX = re.compile(r'^\s*/*\{*\s*SC_')
ITEM_REGEX = re.compile(r'[+-]?\d*\.?\d+(?:[eE][+-]?\d+)?')
HEADER_REGEX = re.compile(
r'const boost::array\<boost::array\<.*, (\d+)\>, (\d+)\> ([a-zA-Z_\d]+)')
IGNORE_PATTERNS = [
# Makes use of ldexp and casts
"hypergeometric_1F1_big_double_limited.ipp",
"hypergeometric_1F1_big_unsolved.ipp",
# Makes use of numeric_limits and ternary operator
"beta_small_data.ipp",
# Doesn't contain any data
"almost_equal.ipp",
# Derivatives functions don't exist
"bessel_y01_prime_data.ipp",
"bessel_yn_prime_data.ipp",
"sph_bessel_prime_data.ipp",
"sph_neumann_prime_data.ipp",
# Data files not needed by scipy special tests.
"ibeta_derivative_",
r"ellint_r[cdfjg]_[^d]",
r"ellint_d2?_",
"jacobi_",
"heuman_lambda_",
"hypergeometric_",
"nct_",
r".*gammap1m1_",
"trig_",
"powm1_data.ipp",
]
def _raw_data(line):
items = line.split(',')
l = []
for item in items:
m = ITEM_REGEX.search(item)
if m:
q = m.group(0)
l.append(q)
return l
def parse_ipp_file(filename):
print(filename)
a = open(filename, 'r')
lines = a.readlines()
data = {}
i = 0
while (i < len(lines)):
line = lines[i]
m = HEADER_REGEX.search(line)
if m:
d = int(m.group(1))
n = int(m.group(2))
print(f"d = {d}, n = {n}")
cdata = []
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
while DATA_REGEX.match(line):
cdata.append(_raw_data(line))
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
if not len(cdata) == n:
raise ValueError("parsed data: %d, expected %d" % (len(cdata), n))
data[m.group(3)] = cdata
else:
i += 1
return data
def dump_dataset(filename, data):
fid = open(filename, 'w')
try:
for line in data:
fid.write(f"{' '.join(line)}\n")
finally:
fid.close()
def dump_datasets(filename):
base, ext = os.path.splitext(os.path.basename(filename))
base += f'_{ext[1:]}'
datadir = os.path.join(DATA_DIR, base)
os.makedirs(datadir)
datasets = parse_ipp_file(filename)
for k, d in datasets.items():
print(k, len(d))
dfilename = os.path.join(datadir, k) + '.txt'
dump_dataset(dfilename, d)
if __name__ == '__main__':
for filename in sorted(os.listdir(BOOST_SRC)):
# Note: Misses data in hpp files (e.x. powm1_sqrtp1m1_test.hpp)
if filename.endswith(".ipp"):
if any(re.match(pattern, filename) for pattern in IGNORE_PATTERNS):
continue
path = os.path.join(BOOST_SRC, filename)
print(f"================= {path} ===============")
dump_datasets(path)
|
import sys
import cv2
import os
from ast import literal_eval
from pathlib import Path
import shutil
import logging
import random
import pickle
import yaml
import subprocess
from PIL import Image
from glob import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import animation, rc
plt.rcParams['figure.figsize'] = 30, 30
np.set_printoptions(precision=3, suppress=True)
rc('animation', html='jshtml')
import torch
from augmentations import get_albu_transforms
IMAGE_DIR = '~/Kaggle/data/tensorflow-great-barrier-reef/train_images'
def load_image(video_id, video_frame, image_dir):
img_path = f'{image_dir}/video_{video_id}/{video_frame}.jpg'
assert os.path.exists(img_path), f'{img_path} does not exist.'
img = cv2.imread(img_path)
return img
def decode_annotations(annotaitons_str):
"""decode annotations in string to list of dict"""
return literal_eval(annotaitons_str)
def load_image_with_annotations(video_id, video_frame, image_dir, annotaitons_str):
img = load_image(video_id, video_frame, image_dir)
annotations = decode_annotations(annotaitons_str)
if len(annotations) > 0:
for ann in annotations:
cv2.rectangle(img, (ann['x'], ann['y']),
(ann['x'] + ann['width'], ann['y'] + ann['height']),
(255, 0, 0), thickness=2,)
return img
def draw_predictions(img, pred_bboxes):
img = img.copy()
if len(pred_bboxes) > 0:
for bbox in pred_bboxes:
conf = bbox[0]
x, y, w, h = bbox[1:].round().astype(int)
cv2.rectangle(img, (x, y),(x+w, y+h),(0, 255, 255), thickness=2,)
cv2.putText(img, f"{conf:.2}",(x, max(0, y-5)),
cv2.FONT_HERSHEY_SIMPLEX,0.5,(0, 0, 255),
thickness=1,
)
return img
def plot_img(df, idx, image_dir, pred_bboxes=None):
row = df.iloc[idx]
video_id = row.video_id
video_frame = row.video_frame
annotations_str = row.annotations
img = load_image_with_annotations(video_id, video_frame, image_dir, annotations_str)
if pred_bboxes and len(pred_bboxes) > 0:
pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf
img = draw_predictions(img, pred_bboxes)
plt.imshow(img[:, :, ::-1])
def calc_iou(bboxes1, bboxes2, bbox_mode='xywh'):
assert len(bboxes1.shape) == 2 and bboxes1.shape[1] == 4
assert len(bboxes2.shape) == 2 and bboxes2.shape[1] == 4
bboxes1 = bboxes1.copy()
bboxes2 = bboxes2.copy()
if bbox_mode == 'xywh':
bboxes1[:, 2:] += bboxes1[:, :2]
bboxes2[:, 2:] += bboxes2[:, :2]
x11, y11, x12, y12 = np.split(bboxes1, 4, axis=1)
x21, y21, x22, y22 = np.split(bboxes2, 4, axis=1)
xA = np.maximum(x11, np.transpose(x21))
yA = np.maximum(y11, np.transpose(y21))
xB = np.minimum(x12, np.transpose(x22))
yB = np.minimum(y12, np.transpose(y22))
interArea = np.maximum((xB - xA + 1e-9), 0) * np.maximum((yB - yA + 1e-9), 0)
boxAArea = (x12 - x11 + 1e-9) * (y12 - y11 + 1e-9)
boxBArea = (x22 - x21 + 1e-9) * (y22 - y21 + 1e-9)
iou = interArea / (boxAArea + np.transpose(boxBArea) - interArea)
return iou
def f_beta(tp, fp, fn, beta=2):
if tp == 0:
return 0
return (1+beta**2)*tp / ((1+beta**2)*tp + beta**2*fn+fp)
def calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th, verbose=False):
gt_bboxes = gt_bboxes.copy()
pred_bboxes = pred_bboxes.copy()
tp = 0
fp = 0
for k, pred_bbox in enumerate(pred_bboxes): # fixed in ver.7
if len(gt_bboxes) == 0:
fp += len(pred_bboxes) - k # fix in ver.7
break
ious = calc_iou(gt_bboxes, pred_bbox[None, 1:])
max_iou = ious.max()
if max_iou >= iou_th:
tp += 1
gt_bboxes = np.delete(gt_bboxes, ious.argmax(), axis=0)
else:
fp += 1
fn = len(gt_bboxes)
return tp, fp, fn
def calc_is_correct(gt_bboxes, pred_bboxes, iou_th=0.5):
"""
gt_bboxes: (N, 4) np.array in xywh format
pred_bboxes: (N, 5) np.array in conf+xywh format
"""
if len(gt_bboxes) == 0 and len(pred_bboxes) == 0:
tps, fps, fns = 0, 0, 0
return tps, fps, fns
elif len(gt_bboxes) == 0:
tps, fps, fns = 0, len(pred_bboxes), 0
return tps, fps, fns
elif len(pred_bboxes) == 0:
tps, fps, fns = 0, 0, len(gt_bboxes)
return tps, fps, fns
pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf
tps, fps, fns = 0, 0, 0
tp, fp, fn = calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th)
tps += tp
fps += fp
fns += fn
return tps, fps, fns
def calc_f2_score(gt_bboxes_list, pred_bboxes_list, verbose=False):
"""
gt_bboxes_list: list of (N, 4) np.array in xywh format
pred_bboxes_list: list of (N, 5) np.array in conf+xywh format
"""
#f2s = []
f2_dict = {'f2':0, "P":0, "R": 0}
all_tps = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
all_fps = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
all_fns = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)):
tps, fps, fns = 0, 0, 0
for i, (gt_bboxes, pred_bboxes) in enumerate(zip(gt_bboxes_list, pred_bboxes_list)):
tp, fp, fn = calc_is_correct(gt_bboxes, pred_bboxes, iou_th)
tps += tp
fps += fp
fns += fn
all_tps[i][k] = tp
all_fps[i][k] = fp
all_fns[i][k] = fn
if verbose:
num_gt = len(gt_bboxes)
num_pred = len(pred_bboxes)
print(f'num_gt:{num_gt:<3} num_pred:{num_pred:<3} tp:{tp:<3} fp:{fp:<3} fn:{fn:<3}')
f2 = f_beta(tps, fps, fns, beta=2)
precision = f_beta(tps, fps, fns, beta=0)
recall = f_beta(tps, fps, fns, beta=100)
f2_dict["f2_" + str(round(iou_th,3))] = f2
f2_dict["P_" + str(round(iou_th,3))] = precision
f2_dict["R_" + str(round(iou_th,3))] = recall
f2_dict['f2'] += f2 / 11
f2_dict['P'] += precision / 11
f2_dict['R'] += recall / 11
f2_dict["tps"] = all_tps
f2_dict["fps"] = all_fps
f2_dict["fns"] = all_fns
return f2_dict
def print_f2_dict(d):
print("Overall f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d['f2'], d['precision'], d['recall']))
for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)):
print(f"IOU {iou_th:.2f}:", end=" ")
print("f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d["f2_" + str(round(iou_th,3))],
d["precision_" + str(round(iou_th,3))],
d["recall_" + str(round(iou_th,3))]))
def get_path(row, params, infer=False):
row['old_image_path'] = params['root_dir'] / f'train_images/video_{row.video_id}/{row.video_frame}.jpg'
if infer:
row['image_path'] = row["old_image_path"]
else:
row['image_path'] = params['image_dir'] / f'video_{row.video_id}_{row.video_frame}.jpg'
row['label_path'] = params['label_dir'] / f'video_{row.video_id}_{row.video_frame}.txt'
return row
def make_copy(path, params):
# TODO: fix split issue
data = str(path).split('/')
filename = data[-1]
video_id = data[-2]
new_path = params["image_dir"] / f'{video_id}_{filename}'
shutil.copy(path, new_path)
return
# https://www.kaggle.com/awsaf49/great-barrier-reef-yolov5-train
def voc2yolo(image_height, image_width, bboxes):
"""
voc => [x1, y1, x2, y1]
yolo => [xmid, ymid, w, h] (normalized)
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]]/ image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]]/ image_height
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
bboxes[..., 0] = bboxes[..., 0] + w/2
bboxes[..., 1] = bboxes[..., 1] + h/2
bboxes[..., 2] = w
bboxes[..., 3] = h
return bboxes
def yolo2voc(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
voc => [x1, y1, x2, y1]
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]]* image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]]* image_height
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2
bboxes[..., [2, 3]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]
return bboxes
def coco2yolo(image_height, image_width, bboxes):
"""
coco => [xmin, ymin, w, h]
yolo => [xmid, ymid, w, h] (normalized)
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# normolizinig
bboxes[..., [0, 2]]= bboxes[..., [0, 2]]/ image_width
bboxes[..., [1, 3]]= bboxes[..., [1, 3]]/ image_height
# converstion (xmin, ymin) => (xmid, ymid)
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]/2
return bboxes
def yolo2coco(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
coco => [xmin, ymin, w, h]
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# denormalizing
bboxes[..., [0, 2]]= bboxes[..., [0, 2]]* image_width
bboxes[..., [1, 3]]= bboxes[..., [1, 3]]* image_height
# converstion (xmid, ymid) => (xmin, ymin)
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2
return bboxes
def voc2coco(bboxes, image_height=720, image_width=1280):
bboxes = voc2yolo(image_height, image_width, bboxes)
bboxes = yolo2coco(image_height, image_width, bboxes)
return bboxes
def load_image(image_path):
return cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def draw_bboxes(img, bboxes, classes, colors = None, show_classes = None, bbox_format = 'yolo', class_name = False, line_thickness = 1):
image = img.copy()
show_classes = classes if show_classes is None else show_classes
colors = (0, 255 ,0) if colors is None else colors
if bbox_format == 'yolo':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
color = colors[idx]
if cls in show_classes:
x1 = round(float(bbox[0])*image.shape[1])
y1 = round(float(bbox[1])*image.shape[0])
w = round(float(bbox[2])*image.shape[1]/2) #w/2
h = round(float(bbox[3])*image.shape[0]/2)
voc_bbox = (x1-w, y1-h, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(get_label(cls)),
line_thickness = line_thickness)
elif bbox_format == 'coco':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
color = colors[idx]
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
w = int(round(bbox[2]))
h = int(round(bbox[3]))
voc_bbox = (x1, y1, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls,
line_thickness = line_thickness)
elif bbox_format == 'voc_pascal':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
cls_id = class_ids[idx]
color = colors[cls_id] if type(colors) is list else colors
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
x2 = int(round(bbox[2]))
y2 = int(round(bbox[3]))
voc_bbox = (x1, y1, x2, y2)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(cls_id),
line_thickness = line_thickness)
else:
raise ValueError('wrong bbox format')
return image
def get_bbox(annots):
bboxes = [list(annot.values()) for annot in annots]
return bboxes
def get_imgsize(row):
row['width'], row['height'] = imagesize.get(row['image_path'])
return row
# https://www.kaggle.com/diegoalejogm/great-barrier-reefs-eda-with-animations
def create_animation(ims):
fig = plt.figure(figsize=(16, 12))
plt.axis('off')
im = plt.imshow(ims[0])
def animate_func(i):
im.set_array(ims[i])
return [im]
return animation.FuncAnimation(fig, animate_func, frames = len(ims), interval = 1000//12)
# https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py
def nms(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
# https://github.com/DocF/Soft-NMS/blob/master/soft_nms.py
def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=2):
"""
py_cpu_softnms
:param dets: boexs 坐标矩阵 format [y1, x1, y2, x2]
:param sc: 每个 boxes 对应的分数
:param Nt: iou 交叠门限
:param sigma: 使用 gaussian 函数的方差
:param thresh: 最后的分数门限
:param method: 使用的方法
:return: 留下的 boxes 的 index
"""
# indexes concatenate boxes with the last column
N = dets.shape[0]
indexes = np.array([np.arange(N)])
dets = np.concatenate((dets, indexes.T), axis=1)
# the order of boxes coordinate is [y1,x1,y2,x2]
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
scores = sc
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
for i in range(N):
# intermediate parameters for later parameters exchange
tBD = dets[i, :].copy()
tscore = scores[i].copy()
tarea = areas[i].copy()
pos = i + 1
#
if i != N-1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
if tscore < maxscore:
dets[i, :] = dets[maxpos + i + 1, :]
dets[maxpos + i + 1, :] = tBD
tBD = dets[i, :]
scores[i] = scores[maxpos + i + 1]
scores[maxpos + i + 1] = tscore
tscore = scores[i]
areas[i] = areas[maxpos + i + 1]
areas[maxpos + i + 1] = tarea
tarea = areas[i]
# IoU calculate
xx1 = np.maximum(dets[i, 1], dets[pos:, 1])
yy1 = np.maximum(dets[i, 0], dets[pos:, 0])
xx2 = np.minimum(dets[i, 3], dets[pos:, 3])
yy2 = np.minimum(dets[i, 2], dets[pos:, 2])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[pos:] - inter)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(ovr.shape)
weight[ovr > Nt] = weight[ovr > Nt] - ovr[ovr > Nt]
elif method == 2: # gaussian
weight = np.exp(-(ovr * ovr) / sigma)
else: # original NMS
weight = np.ones(ovr.shape)
weight[ovr > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = dets[:, 4][scores > thresh]
keep = inds.astype(int)
return keep
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def create_logger(filename, filemode='a'):
# better logging file - output the in terminal as well
file_handler = logging.FileHandler(filename=filename, mode=filemode)
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
formatter = "%(asctime)s %(levelname)s: %(message)s"
datefmt = "%m/%d/%Y %I:%M:%S %p"
logging.basicConfig(format=formatter, datefmt=datefmt,
level=logging.DEBUG, handlers=handlers)
return
def save_pickle(obj, folder_path):
pickle.dump(obj, open(folder_path, 'wb'), pickle.HIGHEST_PROTOCOL)
def load_pickle(folder_path):
return pickle.load(open(folder_path, 'rb'))
def save_yaml(obj, folder_path):
obj2 = obj.copy()
for key, value in obj2.items():
if isinstance(value, Path):
obj2[key] = str(value.resolve())
else:
obj2[key] = value
with open(folder_path, 'w') as file:
yaml.dump(obj2, file)
def load_yaml(folder_path):
with open(folder_path) as file:
data = yaml.load(file, Loader=yaml.FullLoader)
return data
def load_model(params):
try:
model = torch.hub.load(params['repo'],
'custom',
path=params['ckpt_path'],
source='local',
force_reload=True) # local repo
except:
print("torch.hub.load failed, try torch.load")
model = torch.load(params['ckpt_path'])
model.conf = params['conf'] # NMS confidence threshold
model.iou = params['iou'] # NMS IoU threshold
model.classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for persons, cats and dogs
model.multi_label = False # NMS multiple labels per box
model.max_det = 50 # maximum number of detections per image
return model
def predict(model, img, size=768, augment=False, use_sahi=False):
if use_sahi:
from sahi.predict import get_sliced_prediction
results = get_sliced_prediction(
img,
model,
slice_height = 512,
slice_width = 512,
overlap_height_ratio = 0.2,
overlap_width_ratio = 0.2
)
preds = results.object_prediction_list
bboxes = np.array([pred.bbox.to_voc_bbox() for pred in preds])
else:
results = model(img, size=size, augment=augment) # custom inference size
preds = results.pandas().xyxy[0]
bboxes = preds[['xmin','ymin','xmax','ymax']].values
if len(bboxes):
height, width = img.shape[:2]
bboxes = voc2coco(bboxes,height,width).astype(int)
if use_sahi:
confs = np.array([pred.score.value for pred in preds])
else:
confs = preds.confidence.values
return bboxes, confs
else:
return np.array([]),[]
def format_prediction(bboxes, confs):
annot = ''
if len(bboxes)>0:
for idx in range(len(bboxes)):
xmin, ymin, w, h = bboxes[idx]
conf = confs[idx]
annot += f'{conf} {xmin} {ymin} {w} {h}'
annot +=' '
annot = annot.strip(' ')
return annot
def show_img(img, bboxes, confs, colors, bbox_format='yolo'):
labels = [str(round(conf,2)) for conf in confs]
img = draw_bboxes(img = img,
bboxes = bboxes,
classes = labels,
class_name = True,
colors = colors,
bbox_format = bbox_format,
line_thickness = 2)
return Image.fromarray(img)
def write_hyp(params):
with open(params["hyp_file"], mode="w") as f:
for key, val in params["hyp_param"].items():
f.write(f"{key}: {val}\n")
def class2dict(f):
return dict((name, getattr(f, name)) for name in dir(f) if not name.startswith('__'))
def upload(params):
data_version = "-".join(params["exp_name"].split("_"))
if os.path.exists(params["output_dir"] / "wandb"):
shutil.move(str(params["output_dir"] / "wandb"),
str(params["output_dir"].parent / f"{params["exp_name"]}_wandb/")
)
with open(params["output_dir"] / "dataset-metadata.json", "w") as f:
f.write("{\n")
f.write(f""" "title": "{data_version}",\n""")
f.write(f""" "id": "vincentwang25/{data_version}",\n""")
f.write(""" "licenses": [\n""")
f.write(""" {\n""")
f.write(""" "name": "CC0-1.0"\n""")
f.write(""" }\n""")
f.write(""" ]\n""")
f.write("""}""")
subprocess.call(["kaggle", "datasets", "create", "-p", str(params["output_dir"]), "-r", "zip"])
def coco(df):
annotion_id = 0
images = []
annotations = []
categories = [{'id': 0, 'name': 'cots'}]
for i, row in df.iterrows():
images.append({
"id": i,
"file_name": f"video_{row["video_id"]}_{row["video_frame"]}.jpg",
"height": 720,
"width": 1280,
})
for bbox in row['annotations']:
annotations.append({
"id": annotion_id,
"image_id": i,
"category_id": 0,
"bbox": list(bbox.values()),
"area": bbox['width'] * bbox['height'],
"segmentation": [],
"iscrowd": 0
})
annotion_id += 1
json_file = {'categories':categories, 'images':images, 'annotations':annotations}
return json_file
def mmcfg_from_param(params):
from mmcv import Config
# model
cfg = Config.fromfile(params['hyp_param']['base_file'])
cfg.work_dir = str(params['output_dir'])
cfg.seed = 2022
cfg.gpu_ids = range(2)
cfg.load_from = params['hyp_param']['load_from']
if params['hyp_param']['model_type'] == 'faster_rcnn':
cfg.model.roi_head.bbox_head.num_classes = 1
cfg.model.roi_head.bbox_head.loss_bbox.type = params['hyp_param']['loss_fnc']
cfg.model.rpn_head.loss_bbox.type = params['hyp_param']['loss_fnc']
if params['hyp_param']['loss_fnc'] == "GIoULoss":
cfg.model.roi_head.bbox_head.reg_decoded_bbox = True
cfg.model.rpn_head.reg_decoded_bbox = True
cfg.model.train_cfg.rpn_proposal.nms.type = params['hyp_param']['nms']
cfg.model.test_cfg.rpn.nms.type = params['hyp_param']['nms']
cfg.model.test_cfg.rcnn.nms.type = params['hyp_param']['nms']
cfg.model.train_cfg.rcnn.sampler.type = params['hyp_param']['sampler']
elif params['hyp_param']['model_type'] == 'swin':
pass # already changed
elif params['hyp_param']['model_type'] == 'vfnet':
cfg.model.bbox_head.num_classes = 1
if params['hyp_param'].get("optimizer", cfg.optimizer.type) == "AdamW":
cfg.optimizer = dict(
type="AdamW",
lr=params['hyp_param'].get("lr", cfg.optimizer.lr),
weight_decay=params['hyp_param'].get(
"weight_decay", cfg.optimizer.weight_decay
),
)
else:
cfg.optimizer.lr = params['hyp_param'].get("lr", cfg.optimizer.lr)
cfg.optimizer.weight_decay = params['hyp_param'].get(
"weight_decay", cfg.optimizer.weight_decay)
cfg.lr_config = dict(
policy='CosineAnnealing',
by_epoch=False,
warmup='linear',
warmup_iters= 1000,
warmup_ratio= 1/10,
min_lr=1e-07)
# data
cfg = add_data_pipeline(cfg, params)
cfg.runner.max_epochs = params['epochs']
cfg.evaluation.start = 1
cfg.evaluation.interval = 1
cfg.evaluation.save_best='auto'
cfg.evaluation.metric ='bbox'
cfg.checkpoint_config.interval = -1
cfg.log_config.interval = 500
cfg.log_config.with_step = True
cfg.log_config.by_epoch = True
cfg.log_config.hooks =[dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')]
cfg.workflow = [('train',1)]
logging.info(str(cfg))
return cfg
def add_data_pipeline(cfg, params):
cfg.dataset_type = 'COCODataset'
cfg.classes = ('cots',)
cfg.data_root = str(params['data_path'].resolve())
params['aug_param']['img_scale'] = (params['img_size'], params['img_size'])
cfg.img_scale = params['aug_param']['img_scale']
cfg.dataset_type = 'CocoDataset'
cfg.filter_empty_gt = False
cfg.data.filter_empty_gt = False
cfg.data.train.type = cfg.dataset_type
cfg.data.train.classes = cfg.classes
cfg.data.train.ann_file = str(params["cfg_dir"] / 'annotations_train.json')
cfg.data.train.img_prefix = cfg.data_root + '/images/'
cfg.data.train.filter_empty_gt = False
cfg.data.test.type = cfg.dataset_type
cfg.data.test.classes = cfg.classes
cfg.data.test.ann_file = str(params["cfg_dir"] / 'annotations_valid.json')
cfg.data.test.img_prefix = cfg.data_root + '/images/'
cfg.data.test.filter_empty_gt = False
cfg.data.val.type = cfg.dataset_type
cfg.data.val.classes = cfg.classes
cfg.data.val.ann_file = str(params["cfg_dir"] / 'annotations_valid.json')
cfg.data.val.img_prefix = cfg.data_root + '/images/'
cfg.data.val.filter_empty_gt = False
cfg.data.samples_per_gpu = params['batch'] // len(cfg.gpu_ids)
cfg.data.workers_per_gpu = params['workers'] // len(cfg.gpu_ids)
# train pipeline
albu_train_transforms = get_albu_transforms(params['aug_param'], is_train=True)
if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']:
train_pipeline = []
else:
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)]
if params['aug_param']['use_mosaic']:
train_pipeline.append(dict(type='Mosaic', img_scale=cfg.img_scale, pad_val=114.0))
else:
train_pipeline.append(dict(type='Resize', img_scale=cfg.img_scale, keep_ratio=False))
train_pipeline = train_pipeline +[
dict(type='Pad', size_divisor=32),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=False
)]
if params['aug_param']['use_mixup']:
train_pipeline.append(dict(type='MixUp', img_scale=cfg.img_scale, ratio_range=(0.8, 1.6), pad_val=114.0))
train_pipeline = train_pipeline +\
[
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels'],
meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'img_norm_cfg')),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=cfg.img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=[cfg.img_scale],
flip=[False],
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Pad', size_divisor=32),
dict(type='RandomFlip', direction='horizontal'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
cfg.train_pipeline = train_pipeline
cfg.val_pipeline = val_pipeline
cfg.test_pipeline = test_pipeline
if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']:
cfg.train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=cfg.dataset_type,
classes=cfg.classes,
ann_file=str(params["cfg_dir"] / 'annotations_train.json'),
img_prefix=cfg.data_root + '/images/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=cfg.train_pipeline
)
cfg.data.train = cfg.train_dataset
else:
cfg.data.train.pipeline = cfg.train_pipeline
cfg.data.val.pipeline = cfg.val_pipeline
cfg.data.test.pipeline = cfg.test_pipeline
return cfg
def find_ckp(output_dir):
return glob(output_dir / "best*.pth")[0] | import sys
import cv2
import os
from ast import literal_eval
from pathlib import Path
import shutil
import logging
import random
import pickle
import yaml
import subprocess
from PIL import Image
from glob import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import animation, rc
plt.rcParams['figure.figsize'] = 30, 30
np.set_printoptions(precision=3, suppress=True)
rc('animation', html='jshtml')
import torch
from augmentations import get_albu_transforms
IMAGE_DIR = '~/Kaggle/data/tensorflow-great-barrier-reef/train_images'
def load_image(video_id, video_frame, image_dir):
img_path = f'{image_dir}/video_{video_id}/{video_frame}.jpg'
assert os.path.exists(img_path), f'{img_path} does not exist.'
img = cv2.imread(img_path)
return img
def decode_annotations(annotaitons_str):
"""decode annotations in string to list of dict"""
return literal_eval(annotaitons_str)
def load_image_with_annotations(video_id, video_frame, image_dir, annotaitons_str):
img = load_image(video_id, video_frame, image_dir)
annotations = decode_annotations(annotaitons_str)
if len(annotations) > 0:
for ann in annotations:
cv2.rectangle(img, (ann['x'], ann['y']),
(ann['x'] + ann['width'], ann['y'] + ann['height']),
(255, 0, 0), thickness=2,)
return img
def draw_predictions(img, pred_bboxes):
img = img.copy()
if len(pred_bboxes) > 0:
for bbox in pred_bboxes:
conf = bbox[0]
x, y, w, h = bbox[1:].round().astype(int)
cv2.rectangle(img, (x, y),(x+w, y+h),(0, 255, 255), thickness=2,)
cv2.putText(img, f"{conf:.2}",(x, max(0, y-5)),
cv2.FONT_HERSHEY_SIMPLEX,0.5,(0, 0, 255),
thickness=1,
)
return img
def plot_img(df, idx, image_dir, pred_bboxes=None):
row = df.iloc[idx]
video_id = row.video_id
video_frame = row.video_frame
annotations_str = row.annotations
img = load_image_with_annotations(video_id, video_frame, image_dir, annotations_str)
if pred_bboxes and len(pred_bboxes) > 0:
pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf
img = draw_predictions(img, pred_bboxes)
plt.imshow(img[:, :, ::-1])
def calc_iou(bboxes1, bboxes2, bbox_mode='xywh'):
assert len(bboxes1.shape) == 2 and bboxes1.shape[1] == 4
assert len(bboxes2.shape) == 2 and bboxes2.shape[1] == 4
bboxes1 = bboxes1.copy()
bboxes2 = bboxes2.copy()
if bbox_mode == 'xywh':
bboxes1[:, 2:] += bboxes1[:, :2]
bboxes2[:, 2:] += bboxes2[:, :2]
x11, y11, x12, y12 = np.split(bboxes1, 4, axis=1)
x21, y21, x22, y22 = np.split(bboxes2, 4, axis=1)
xA = np.maximum(x11, np.transpose(x21))
yA = np.maximum(y11, np.transpose(y21))
xB = np.minimum(x12, np.transpose(x22))
yB = np.minimum(y12, np.transpose(y22))
interArea = np.maximum((xB - xA + 1e-9), 0) * np.maximum((yB - yA + 1e-9), 0)
boxAArea = (x12 - x11 + 1e-9) * (y12 - y11 + 1e-9)
boxBArea = (x22 - x21 + 1e-9) * (y22 - y21 + 1e-9)
iou = interArea / (boxAArea + np.transpose(boxBArea) - interArea)
return iou
def f_beta(tp, fp, fn, beta=2):
if tp == 0:
return 0
return (1+beta**2)*tp / ((1+beta**2)*tp + beta**2*fn+fp)
def calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th, verbose=False):
gt_bboxes = gt_bboxes.copy()
pred_bboxes = pred_bboxes.copy()
tp = 0
fp = 0
for k, pred_bbox in enumerate(pred_bboxes): # fixed in ver.7
if len(gt_bboxes) == 0:
fp += len(pred_bboxes) - k # fix in ver.7
break
ious = calc_iou(gt_bboxes, pred_bbox[None, 1:])
max_iou = ious.max()
if max_iou >= iou_th:
tp += 1
gt_bboxes = np.delete(gt_bboxes, ious.argmax(), axis=0)
else:
fp += 1
fn = len(gt_bboxes)
return tp, fp, fn
def calc_is_correct(gt_bboxes, pred_bboxes, iou_th=0.5):
"""
gt_bboxes: (N, 4) np.array in xywh format
pred_bboxes: (N, 5) np.array in conf+xywh format
"""
if len(gt_bboxes) == 0 and len(pred_bboxes) == 0:
tps, fps, fns = 0, 0, 0
return tps, fps, fns
elif len(gt_bboxes) == 0:
tps, fps, fns = 0, len(pred_bboxes), 0
return tps, fps, fns
elif len(pred_bboxes) == 0:
tps, fps, fns = 0, 0, len(gt_bboxes)
return tps, fps, fns
pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf
tps, fps, fns = 0, 0, 0
tp, fp, fn = calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th)
tps += tp
fps += fp
fns += fn
return tps, fps, fns
def calc_f2_score(gt_bboxes_list, pred_bboxes_list, verbose=False):
"""
gt_bboxes_list: list of (N, 4) np.array in xywh format
pred_bboxes_list: list of (N, 5) np.array in conf+xywh format
"""
#f2s = []
f2_dict = {'f2':0, "P":0, "R": 0}
all_tps = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
all_fps = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
all_fns = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)):
tps, fps, fns = 0, 0, 0
for i, (gt_bboxes, pred_bboxes) in enumerate(zip(gt_bboxes_list, pred_bboxes_list)):
tp, fp, fn = calc_is_correct(gt_bboxes, pred_bboxes, iou_th)
tps += tp
fps += fp
fns += fn
all_tps[i][k] = tp
all_fps[i][k] = fp
all_fns[i][k] = fn
if verbose:
num_gt = len(gt_bboxes)
num_pred = len(pred_bboxes)
print(f'num_gt:{num_gt:<3} num_pred:{num_pred:<3} tp:{tp:<3} fp:{fp:<3} fn:{fn:<3}')
f2 = f_beta(tps, fps, fns, beta=2)
precision = f_beta(tps, fps, fns, beta=0)
recall = f_beta(tps, fps, fns, beta=100)
f2_dict["f2_" + str(round(iou_th,3))] = f2
f2_dict["P_" + str(round(iou_th,3))] = precision
f2_dict["R_" + str(round(iou_th,3))] = recall
f2_dict['f2'] += f2 / 11
f2_dict['P'] += precision / 11
f2_dict['R'] += recall / 11
f2_dict["tps"] = all_tps
f2_dict["fps"] = all_fps
f2_dict["fns"] = all_fns
return f2_dict
def print_f2_dict(d):
print("Overall f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d['f2'], d['precision'], d['recall']))
for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)):
print(f"IOU {iou_th:.2f}:", end=" ")
print("f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d["f2_" + str(round(iou_th,3))],
d["precision_" + str(round(iou_th,3))],
d["recall_" + str(round(iou_th,3))]))
def get_path(row, params, infer=False):
row['old_image_path'] = params['root_dir'] / f'train_images/video_{row.video_id}/{row.video_frame}.jpg'
if infer:
row['image_path'] = row["old_image_path"]
else:
row['image_path'] = params['image_dir'] / f'video_{row.video_id}_{row.video_frame}.jpg'
row['label_path'] = params['label_dir'] / f'video_{row.video_id}_{row.video_frame}.txt'
return row
def make_copy(path, params):
# TODO: fix split issue
data = str(path).split('/')
filename = data[-1]
video_id = data[-2]
new_path = params["image_dir"] / f'{video_id}_{filename}'
shutil.copy(path, new_path)
return
# https://www.kaggle.com/awsaf49/great-barrier-reef-yolov5-train
def voc2yolo(image_height, image_width, bboxes):
"""
voc => [x1, y1, x2, y1]
yolo => [xmid, ymid, w, h] (normalized)
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]]/ image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]]/ image_height
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
bboxes[..., 0] = bboxes[..., 0] + w/2
bboxes[..., 1] = bboxes[..., 1] + h/2
bboxes[..., 2] = w
bboxes[..., 3] = h
return bboxes
def yolo2voc(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
voc => [x1, y1, x2, y1]
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]]* image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]]* image_height
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2
bboxes[..., [2, 3]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]
return bboxes
def coco2yolo(image_height, image_width, bboxes):
"""
coco => [xmin, ymin, w, h]
yolo => [xmid, ymid, w, h] (normalized)
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# normolizinig
bboxes[..., [0, 2]]= bboxes[..., [0, 2]]/ image_width
bboxes[..., [1, 3]]= bboxes[..., [1, 3]]/ image_height
# converstion (xmin, ymin) => (xmid, ymid)
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]/2
return bboxes
def yolo2coco(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
coco => [xmin, ymin, w, h]
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# denormalizing
bboxes[..., [0, 2]]= bboxes[..., [0, 2]]* image_width
bboxes[..., [1, 3]]= bboxes[..., [1, 3]]* image_height
# converstion (xmid, ymid) => (xmin, ymin)
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2
return bboxes
def voc2coco(bboxes, image_height=720, image_width=1280):
bboxes = voc2yolo(image_height, image_width, bboxes)
bboxes = yolo2coco(image_height, image_width, bboxes)
return bboxes
def load_image(image_path):
return cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def draw_bboxes(img, bboxes, classes, colors = None, show_classes = None, bbox_format = 'yolo', class_name = False, line_thickness = 1):
image = img.copy()
show_classes = classes if show_classes is None else show_classes
colors = (0, 255 ,0) if colors is None else colors
if bbox_format == 'yolo':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
color = colors[idx]
if cls in show_classes:
x1 = round(float(bbox[0])*image.shape[1])
y1 = round(float(bbox[1])*image.shape[0])
w = round(float(bbox[2])*image.shape[1]/2) #w/2
h = round(float(bbox[3])*image.shape[0]/2)
voc_bbox = (x1-w, y1-h, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(get_label(cls)),
line_thickness = line_thickness)
elif bbox_format == 'coco':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
color = colors[idx]
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
w = int(round(bbox[2]))
h = int(round(bbox[3]))
voc_bbox = (x1, y1, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls,
line_thickness = line_thickness)
elif bbox_format == 'voc_pascal':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
cls_id = class_ids[idx]
color = colors[cls_id] if type(colors) is list else colors
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
x2 = int(round(bbox[2]))
y2 = int(round(bbox[3]))
voc_bbox = (x1, y1, x2, y2)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(cls_id),
line_thickness = line_thickness)
else:
raise ValueError('wrong bbox format')
return image
def get_bbox(annots):
bboxes = [list(annot.values()) for annot in annots]
return bboxes
def get_imgsize(row):
row['width'], row['height'] = imagesize.get(row['image_path'])
return row
# https://www.kaggle.com/diegoalejogm/great-barrier-reefs-eda-with-animations
def create_animation(ims):
fig = plt.figure(figsize=(16, 12))
plt.axis('off')
im = plt.imshow(ims[0])
def animate_func(i):
im.set_array(ims[i])
return [im]
return animation.FuncAnimation(fig, animate_func, frames = len(ims), interval = 1000//12)
# https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py
def nms(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
# https://github.com/DocF/Soft-NMS/blob/master/soft_nms.py
def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=2):
"""
py_cpu_softnms
:param dets: boexs 坐标矩阵 format [y1, x1, y2, x2]
:param sc: 每个 boxes 对应的分数
:param Nt: iou 交叠门限
:param sigma: 使用 gaussian 函数的方差
:param thresh: 最后的分数门限
:param method: 使用的方法
:return: 留下的 boxes 的 index
"""
# indexes concatenate boxes with the last column
N = dets.shape[0]
indexes = np.array([np.arange(N)])
dets = np.concatenate((dets, indexes.T), axis=1)
# the order of boxes coordinate is [y1,x1,y2,x2]
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
scores = sc
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
for i in range(N):
# intermediate parameters for later parameters exchange
tBD = dets[i, :].copy()
tscore = scores[i].copy()
tarea = areas[i].copy()
pos = i + 1
#
if i != N-1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
if tscore < maxscore:
dets[i, :] = dets[maxpos + i + 1, :]
dets[maxpos + i + 1, :] = tBD
tBD = dets[i, :]
scores[i] = scores[maxpos + i + 1]
scores[maxpos + i + 1] = tscore
tscore = scores[i]
areas[i] = areas[maxpos + i + 1]
areas[maxpos + i + 1] = tarea
tarea = areas[i]
# IoU calculate
xx1 = np.maximum(dets[i, 1], dets[pos:, 1])
yy1 = np.maximum(dets[i, 0], dets[pos:, 0])
xx2 = np.minimum(dets[i, 3], dets[pos:, 3])
yy2 = np.minimum(dets[i, 2], dets[pos:, 2])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[pos:] - inter)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(ovr.shape)
weight[ovr > Nt] = weight[ovr > Nt] - ovr[ovr > Nt]
elif method == 2: # gaussian
weight = np.exp(-(ovr * ovr) / sigma)
else: # original NMS
weight = np.ones(ovr.shape)
weight[ovr > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = dets[:, 4][scores > thresh]
keep = inds.astype(int)
return keep
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def create_logger(filename, filemode='a'):
# better logging file - output the in terminal as well
file_handler = logging.FileHandler(filename=filename, mode=filemode)
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
formatter = "%(asctime)s %(levelname)s: %(message)s"
datefmt = "%m/%d/%Y %I:%M:%S %p"
logging.basicConfig(format=formatter, datefmt=datefmt,
level=logging.DEBUG, handlers=handlers)
return
def save_pickle(obj, folder_path):
pickle.dump(obj, open(folder_path, 'wb'), pickle.HIGHEST_PROTOCOL)
def load_pickle(folder_path):
return pickle.load(open(folder_path, 'rb'))
def save_yaml(obj, folder_path):
obj2 = obj.copy()
for key, value in obj2.items():
if isinstance(value, Path):
obj2[key] = str(value.resolve())
else:
obj2[key] = value
with open(folder_path, 'w') as file:
yaml.dump(obj2, file)
def load_yaml(folder_path):
with open(folder_path) as file:
data = yaml.load(file, Loader=yaml.FullLoader)
return data
def load_model(params):
try:
model = torch.hub.load(params['repo'],
'custom',
path=params['ckpt_path'],
source='local',
force_reload=True) # local repo
except:
print("torch.hub.load failed, try torch.load")
model = torch.load(params['ckpt_path'])
model.conf = params['conf'] # NMS confidence threshold
model.iou = params['iou'] # NMS IoU threshold
model.classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for persons, cats and dogs
model.multi_label = False # NMS multiple labels per box
model.max_det = 50 # maximum number of detections per image
return model
def predict(model, img, size=768, augment=False, use_sahi=False):
if use_sahi:
from sahi.predict import get_sliced_prediction
results = get_sliced_prediction(
img,
model,
slice_height = 512,
slice_width = 512,
overlap_height_ratio = 0.2,
overlap_width_ratio = 0.2
)
preds = results.object_prediction_list
bboxes = np.array([pred.bbox.to_voc_bbox() for pred in preds])
else:
results = model(img, size=size, augment=augment) # custom inference size
preds = results.pandas().xyxy[0]
bboxes = preds[['xmin','ymin','xmax','ymax']].values
if len(bboxes):
height, width = img.shape[:2]
bboxes = voc2coco(bboxes,height,width).astype(int)
if use_sahi:
confs = np.array([pred.score.value for pred in preds])
else:
confs = preds.confidence.values
return bboxes, confs
else:
return np.array([]),[]
def format_prediction(bboxes, confs):
annot = ''
if len(bboxes)>0:
for idx in range(len(bboxes)):
xmin, ymin, w, h = bboxes[idx]
conf = confs[idx]
annot += f'{conf} {xmin} {ymin} {w} {h}'
annot +=' '
annot = annot.strip(' ')
return annot
def show_img(img, bboxes, confs, colors, bbox_format='yolo'):
labels = [str(round(conf,2)) for conf in confs]
img = draw_bboxes(img = img,
bboxes = bboxes,
classes = labels,
class_name = True,
colors = colors,
bbox_format = bbox_format,
line_thickness = 2)
return Image.fromarray(img)
def write_hyp(params):
with open(params["hyp_file"], mode="w") as f:
for key, val in params["hyp_param"].items():
f.write(f"{key}: {val}\n")
def class2dict(f):
return dict((name, getattr(f, name)) for name in dir(f) if not name.startswith('__'))
def upload(params):
data_version = "-".join(params["exp_name"].split("_"))
if os.path.exists(params["output_dir"] / "wandb"):
shutil.move(str(params["output_dir"] / "wandb"),
str(params["output_dir"].parent / f"{params['exp_name']}_wandb/")
)
with open(params["output_dir"] / "dataset-metadata.json", "w") as f:
f.write("{\n")
f.write(f""" "title": "{data_version}",\n""")
f.write(f""" "id": "vincentwang25/{data_version}",\n""")
f.write(""" "licenses": [\n""")
f.write(""" {\n""")
f.write(""" "name": "CC0-1.0"\n""")
f.write(""" }\n""")
f.write(""" ]\n""")
f.write("""}""")
subprocess.call(["kaggle", "datasets", "create", "-p", str(params["output_dir"]), "-r", "zip"])
def coco(df):
annotion_id = 0
images = []
annotations = []
categories = [{'id': 0, 'name': 'cots'}]
for i, row in df.iterrows():
images.append({
"id": i,
"file_name": f"video_{row['video_id']}_{row['video_frame']}.jpg",
"height": 720,
"width": 1280,
})
for bbox in row['annotations']:
annotations.append({
"id": annotion_id,
"image_id": i,
"category_id": 0,
"bbox": list(bbox.values()),
"area": bbox['width'] * bbox['height'],
"segmentation": [],
"iscrowd": 0
})
annotion_id += 1
json_file = {'categories':categories, 'images':images, 'annotations':annotations}
return json_file
def mmcfg_from_param(params):
from mmcv import Config
# model
cfg = Config.fromfile(params['hyp_param']['base_file'])
cfg.work_dir = str(params['output_dir'])
cfg.seed = 2022
cfg.gpu_ids = range(2)
cfg.load_from = params['hyp_param']['load_from']
if params['hyp_param']['model_type'] == 'faster_rcnn':
cfg.model.roi_head.bbox_head.num_classes = 1
cfg.model.roi_head.bbox_head.loss_bbox.type = params['hyp_param']['loss_fnc']
cfg.model.rpn_head.loss_bbox.type = params['hyp_param']['loss_fnc']
if params['hyp_param']['loss_fnc'] == "GIoULoss":
cfg.model.roi_head.bbox_head.reg_decoded_bbox = True
cfg.model.rpn_head.reg_decoded_bbox = True
cfg.model.train_cfg.rpn_proposal.nms.type = params['hyp_param']['nms']
cfg.model.test_cfg.rpn.nms.type = params['hyp_param']['nms']
cfg.model.test_cfg.rcnn.nms.type = params['hyp_param']['nms']
cfg.model.train_cfg.rcnn.sampler.type = params['hyp_param']['sampler']
elif params['hyp_param']['model_type'] == 'swin':
pass # already changed
elif params['hyp_param']['model_type'] == 'vfnet':
cfg.model.bbox_head.num_classes = 1
if params['hyp_param'].get("optimizer", cfg.optimizer.type) == "AdamW":
cfg.optimizer = dict(
type="AdamW",
lr=params['hyp_param'].get("lr", cfg.optimizer.lr),
weight_decay=params['hyp_param'].get(
"weight_decay", cfg.optimizer.weight_decay
),
)
else:
cfg.optimizer.lr = params['hyp_param'].get("lr", cfg.optimizer.lr)
cfg.optimizer.weight_decay = params['hyp_param'].get(
"weight_decay", cfg.optimizer.weight_decay)
cfg.lr_config = dict(
policy='CosineAnnealing',
by_epoch=False,
warmup='linear',
warmup_iters= 1000,
warmup_ratio= 1/10,
min_lr=1e-07)
# data
cfg = add_data_pipeline(cfg, params)
cfg.runner.max_epochs = params['epochs']
cfg.evaluation.start = 1
cfg.evaluation.interval = 1
cfg.evaluation.save_best='auto'
cfg.evaluation.metric ='bbox'
cfg.checkpoint_config.interval = -1
cfg.log_config.interval = 500
cfg.log_config.with_step = True
cfg.log_config.by_epoch = True
cfg.log_config.hooks =[dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')]
cfg.workflow = [('train',1)]
logging.info(str(cfg))
return cfg
def add_data_pipeline(cfg, params):
cfg.dataset_type = 'COCODataset'
cfg.classes = ('cots',)
cfg.data_root = str(params['data_path'].resolve())
params['aug_param']['img_scale'] = (params['img_size'], params['img_size'])
cfg.img_scale = params['aug_param']['img_scale']
cfg.dataset_type = 'CocoDataset'
cfg.filter_empty_gt = False
cfg.data.filter_empty_gt = False
cfg.data.train.type = cfg.dataset_type
cfg.data.train.classes = cfg.classes
cfg.data.train.ann_file = str(params["cfg_dir"] / 'annotations_train.json')
cfg.data.train.img_prefix = cfg.data_root + '/images/'
cfg.data.train.filter_empty_gt = False
cfg.data.test.type = cfg.dataset_type
cfg.data.test.classes = cfg.classes
cfg.data.test.ann_file = str(params["cfg_dir"] / 'annotations_valid.json')
cfg.data.test.img_prefix = cfg.data_root + '/images/'
cfg.data.test.filter_empty_gt = False
cfg.data.val.type = cfg.dataset_type
cfg.data.val.classes = cfg.classes
cfg.data.val.ann_file = str(params["cfg_dir"] / 'annotations_valid.json')
cfg.data.val.img_prefix = cfg.data_root + '/images/'
cfg.data.val.filter_empty_gt = False
cfg.data.samples_per_gpu = params['batch'] // len(cfg.gpu_ids)
cfg.data.workers_per_gpu = params['workers'] // len(cfg.gpu_ids)
# train pipeline
albu_train_transforms = get_albu_transforms(params['aug_param'], is_train=True)
if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']:
train_pipeline = []
else:
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)]
if params['aug_param']['use_mosaic']:
train_pipeline.append(dict(type='Mosaic', img_scale=cfg.img_scale, pad_val=114.0))
else:
train_pipeline.append(dict(type='Resize', img_scale=cfg.img_scale, keep_ratio=False))
train_pipeline = train_pipeline +[
dict(type='Pad', size_divisor=32),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=False
)]
if params['aug_param']['use_mixup']:
train_pipeline.append(dict(type='MixUp', img_scale=cfg.img_scale, ratio_range=(0.8, 1.6), pad_val=114.0))
train_pipeline = train_pipeline +\
[
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels'],
meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'img_norm_cfg')),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=cfg.img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=[cfg.img_scale],
flip=[False],
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Pad', size_divisor=32),
dict(type='RandomFlip', direction='horizontal'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
cfg.train_pipeline = train_pipeline
cfg.val_pipeline = val_pipeline
cfg.test_pipeline = test_pipeline
if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']:
cfg.train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=cfg.dataset_type,
classes=cfg.classes,
ann_file=str(params["cfg_dir"] / 'annotations_train.json'),
img_prefix=cfg.data_root + '/images/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=cfg.train_pipeline
)
cfg.data.train = cfg.train_dataset
else:
cfg.data.train.pipeline = cfg.train_pipeline
cfg.data.val.pipeline = cfg.val_pipeline
cfg.data.test.pipeline = cfg.test_pipeline
return cfg
def find_ckp(output_dir):
return glob(output_dir / "best*.pth")[0] |
# IDLSave - a python module to read IDL 'save' files
# Copyright (c) 2010 Thomas P. Robitaille
# Many thanks to Craig Markwardt for publishing the Unofficial Format
# Specification for IDL .sav files, without which this Python module would not
# exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt).
# This code was developed by with permission from ITT Visual Information
# Systems. IDL(r) is a registered trademark of ITT Visual Information Systems,
# Inc. for their Interactive Data Language software.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__all__ = ['readsav']
import struct
import numpy as np
from numpy.compat import asstr
import tempfile
import zlib
import warnings
# Define the different data types that can be found in an IDL save file
DTYPE_DICT = {1: '>u1',
2: '>i2',
3: '>i4',
4: '>f4',
5: '>f8',
6: '>c8',
7: '|O',
8: '|O',
9: '>c16',
10: '|O',
11: '|O',
12: '>u2',
13: '>u4',
14: '>i8',
15: '>u8'}
# Define the different record types that can be found in an IDL save file
RECTYPE_DICT = {0: "START_MARKER",
1: "COMMON_VARIABLE",
2: "VARIABLE",
3: "SYSTEM_VARIABLE",
6: "END_MARKER",
10: "TIMESTAMP",
12: "COMPILED",
13: "IDENTIFICATION",
14: "VERSION",
15: "HEAP_HEADER",
16: "HEAP_DATA",
17: "PROMOTE64",
19: "NOTICE",
20: "DESCRIPTION"}
# Define a dictionary to contain structure definitions
STRUCT_DICT = {}
def _align_32(f):
'''Align to the next 32-bit position in a file'''
pos = f.tell()
if pos % 4 != 0:
f.seek(pos + 4 - pos % 4)
return
def _skip_bytes(f, n):
'''Skip `n` bytes'''
f.read(n)
return
def _read_bytes(f, n):
'''Read the next `n` bytes'''
return f.read(n)
def _read_byte(f):
'''Read a single byte'''
return np.uint8(struct.unpack('>B', f.read(4)[:1])[0])
def _read_long(f):
'''Read a signed 32-bit integer'''
return np.int32(struct.unpack('>l', f.read(4))[0])
def _read_int16(f):
'''Read a signed 16-bit integer'''
return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])
def _read_int32(f):
'''Read a signed 32-bit integer'''
return np.int32(struct.unpack('>i', f.read(4))[0])
def _read_int64(f):
'''Read a signed 64-bit integer'''
return np.int64(struct.unpack('>q', f.read(8))[0])
def _read_uint16(f):
'''Read an unsigned 16-bit integer'''
return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0])
def _read_uint32(f):
'''Read an unsigned 32-bit integer'''
return np.uint32(struct.unpack('>I', f.read(4))[0])
def _read_uint64(f):
'''Read an unsigned 64-bit integer'''
return np.uint64(struct.unpack('>Q', f.read(8))[0])
def _read_float32(f):
'''Read a 32-bit float'''
return np.float32(struct.unpack('>f', f.read(4))[0])
def _read_float64(f):
'''Read a 64-bit float'''
return np.float64(struct.unpack('>d', f.read(8))[0])
class Pointer(object):
'''Class used to define pointers'''
def __init__(self, index):
self.index = index
return
class ObjectPointer(Pointer):
'''Class used to define object pointers'''
pass
def _read_string(f):
'''Read a string'''
length = _read_long(f)
if length > 0:
chars = _read_bytes(f, length)
_align_32(f)
chars = asstr(chars)
else:
chars = ''
return chars
def _read_string_data(f):
'''Read a data string (length is specified twice)'''
length = _read_long(f)
if length > 0:
length = _read_long(f)
string_data = _read_bytes(f, length)
_align_32(f)
else:
string_data = ''
return string_data
def _read_data(f, dtype):
'''Read a variable with a specified data type'''
if dtype == 1:
if _read_int32(f) != 1:
raise Exception("Error occurred while reading byte variable")
return _read_byte(f)
elif dtype == 2:
return _read_int16(f)
elif dtype == 3:
return _read_int32(f)
elif dtype == 4:
return _read_float32(f)
elif dtype == 5:
return _read_float64(f)
elif dtype == 6:
real = _read_float32(f)
imag = _read_float32(f)
return np.complex64(real + imag * 1j)
elif dtype == 7:
return _read_string_data(f)
elif dtype == 8:
raise Exception("Should not be here - please report this")
elif dtype == 9:
real = _read_float64(f)
imag = _read_float64(f)
return np.complex128(real + imag * 1j)
elif dtype == 10:
return Pointer(_read_int32(f))
elif dtype == 11:
return ObjectPointer(_read_int32(f))
elif dtype == 12:
return _read_uint16(f)
elif dtype == 13:
return _read_uint32(f)
elif dtype == 14:
return _read_int64(f)
elif dtype == 15:
return _read_uint64(f)
else:
raise Exception("Unknown IDL type: %i - please report this" % dtype)
def _read_structure(f, array_desc, struct_desc):
'''
Read a structure, with the array and structure descriptors given as
`array_desc` and `structure_desc` respectively.
'''
nrows = array_desc['nelements']
columns = struct_desc['tagtable']
dtype = []
for col in columns:
if col['structure'] or col['array']:
dtype.append(((col['name'].lower(), col['name']), np.object_))
else:
if col['typecode'] in DTYPE_DICT:
dtype.append(((col['name'].lower(), col['name']),
DTYPE_DICT[col['typecode']]))
else:
raise Exception("Variable type %i not implemented" %
col['typecode'])
structure = np.recarray((nrows, ), dtype=dtype)
for i in range(nrows):
for col in columns:
dtype = col['typecode']
if col['structure']:
structure[col['name']][i] = _read_structure(f,
struct_desc['arrtable'][col['name']],
struct_desc['structtable'][col['name']])
elif col['array']:
structure[col['name']][i] = _read_array(f, dtype,
struct_desc['arrtable'][col['name']])
else:
structure[col['name']][i] = _read_data(f, dtype)
# Reshape structure if needed
if array_desc['ndims'] > 1:
dims = array_desc['dims'][:int(array_desc['ndims'])]
dims.reverse()
structure = structure.reshape(dims)
return structure
def _read_array(f, typecode, array_desc):
'''
Read an array of type `typecode`, with the array descriptor given as
`array_desc`.
'''
if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]:
if typecode == 1:
nbytes = _read_int32(f)
if nbytes != array_desc['nbytes']:
warnings.warn("Not able to verify number of bytes from header")
# Read bytes as numpy array
array = np.frombuffer(f.read(array_desc['nbytes']),
dtype=DTYPE_DICT[typecode])
elif typecode in [2, 12]:
# These are 2 byte types, need to skip every two as they are not packed
array = np.frombuffer(f.read(array_desc['nbytes']*2),
dtype=DTYPE_DICT[typecode])[1::2]
else:
# Read bytes into list
array = []
for i in range(array_desc['nelements']):
dtype = typecode
data = _read_data(f, dtype)
array.append(data)
array = np.array(array, dtype=np.object_)
# Reshape array if needed
if array_desc['ndims'] > 1:
dims = array_desc['dims'][:int(array_desc['ndims'])]
dims.reverse()
array = array.reshape(dims)
# Go to next alignment position
_align_32(f)
return array
def _read_record(f):
'''Function to read in a full record'''
record = {'rectype': _read_long(f)}
nextrec = _read_uint32(f)
nextrec += _read_uint32(f) * 2**32
_skip_bytes(f, 4)
if not record['rectype'] in RECTYPE_DICT:
raise Exception("Unknown RECTYPE: %i" % record['rectype'])
record['rectype'] = RECTYPE_DICT[record['rectype']]
if record['rectype'] in ["VARIABLE", "HEAP_DATA"]:
if record['rectype'] == "VARIABLE":
record['varname'] = _read_string(f)
else:
record['heap_index'] = _read_long(f)
_skip_bytes(f, 4)
rectypedesc = _read_typedesc(f)
if rectypedesc['typecode'] == 0:
if nextrec == f.tell():
record['data'] = None # Indicates NULL value
else:
raise ValueError("Unexpected type code: 0")
else:
varstart = _read_long(f)
if varstart != 7:
raise Exception("VARSTART is not 7")
if rectypedesc['structure']:
record['data'] = _read_structure(f, rectypedesc['array_desc'],
rectypedesc['struct_desc'])
elif rectypedesc['array']:
record['data'] = _read_array(f, rectypedesc['typecode'],
rectypedesc['array_desc'])
else:
dtype = rectypedesc['typecode']
record['data'] = _read_data(f, dtype)
elif record['rectype'] == "TIMESTAMP":
_skip_bytes(f, 4*256)
record['date'] = _read_string(f)
record['user'] = _read_string(f)
record['host'] = _read_string(f)
elif record['rectype'] == "VERSION":
record['format'] = _read_long(f)
record['arch'] = _read_string(f)
record['os'] = _read_string(f)
record['release'] = _read_string(f)
elif record['rectype'] == "IDENTIFICATON":
record['author'] = _read_string(f)
record['title'] = _read_string(f)
record['idcode'] = _read_string(f)
elif record['rectype'] == "NOTICE":
record['notice'] = _read_string(f)
elif record['rectype'] == "DESCRIPTION":
record['description'] = _read_string_data(f)
elif record['rectype'] == "HEAP_HEADER":
record['nvalues'] = _read_long(f)
record['indices'] = [_read_long(f) for _ in range(record['nvalues'])]
elif record['rectype'] == "COMMONBLOCK":
record['nvars'] = _read_long(f)
record['name'] = _read_string(f)
record['varnames'] = [_read_string(f) for _ in range(record['nvars'])]
elif record['rectype'] == "END_MARKER":
record['end'] = True
elif record['rectype'] == "UNKNOWN":
warnings.warn("Skipping UNKNOWN record")
elif record['rectype'] == "SYSTEM_VARIABLE":
warnings.warn("Skipping SYSTEM_VARIABLE record")
else:
raise Exception("record['rectype']=%s not implemented" %
record['rectype'])
f.seek(nextrec)
return record
def _read_typedesc(f):
'''Function to read in a type descriptor'''
typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)}
if typedesc['varflags'] & 2 == 2:
raise Exception("System variables not implemented")
typedesc['array'] = typedesc['varflags'] & 4 == 4
typedesc['structure'] = typedesc['varflags'] & 32 == 32
if typedesc['structure']:
typedesc['array_desc'] = _read_arraydesc(f)
typedesc['struct_desc'] = _read_structdesc(f)
elif typedesc['array']:
typedesc['array_desc'] = _read_arraydesc(f)
return typedesc
def _read_arraydesc(f):
'''Function to read in an array descriptor'''
arraydesc = {'arrstart': _read_long(f)}
if arraydesc['arrstart'] == 8:
_skip_bytes(f, 4)
arraydesc['nbytes'] = _read_long(f)
arraydesc['nelements'] = _read_long(f)
arraydesc['ndims'] = _read_long(f)
_skip_bytes(f, 8)
arraydesc['nmax'] = _read_long(f)
arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])]
elif arraydesc['arrstart'] == 18:
warnings.warn("Using experimental 64-bit array read")
_skip_bytes(f, 8)
arraydesc['nbytes'] = _read_uint64(f)
arraydesc['nelements'] = _read_uint64(f)
arraydesc['ndims'] = _read_long(f)
_skip_bytes(f, 8)
arraydesc['nmax'] = 8
arraydesc['dims'] = []
for d in range(arraydesc['nmax']):
v = _read_long(f)
if v != 0:
raise Exception("Expected a zero in ARRAY_DESC")
arraydesc['dims'].append(_read_long(f))
else:
raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart'])
return arraydesc
def _read_structdesc(f):
'''Function to read in a structure descriptor'''
structdesc = {}
structstart = _read_long(f)
if structstart != 9:
raise Exception("STRUCTSTART should be 9")
structdesc['name'] = _read_string(f)
predef = _read_long(f)
structdesc['ntags'] = _read_long(f)
structdesc['nbytes'] = _read_long(f)
structdesc['predef'] = predef & 1
structdesc['inherits'] = predef & 2
structdesc['is_super'] = predef & 4
if not structdesc['predef']:
structdesc['tagtable'] = [_read_tagdesc(f)
for _ in range(structdesc['ntags'])]
for tag in structdesc['tagtable']:
tag['name'] = _read_string(f)
structdesc['arrtable'] = {tag['name']: _read_arraydesc(f)
for tag in structdesc['tagtable']
if tag['array']}
structdesc['structtable'] = {tag['name']: _read_structdesc(f)
for tag in structdesc['tagtable']
if tag['structure']}
if structdesc['inherits'] or structdesc['is_super']:
structdesc['classname'] = _read_string(f)
structdesc['nsupclasses'] = _read_long(f)
structdesc['supclassnames'] = [
_read_string(f) for _ in range(structdesc['nsupclasses'])]
structdesc['supclasstable'] = [
_read_structdesc(f) for _ in range(structdesc['nsupclasses'])]
STRUCT_DICT[structdesc['name']] = structdesc
else:
if not structdesc['name'] in STRUCT_DICT:
raise Exception("PREDEF=1 but can't find definition")
structdesc = STRUCT_DICT[structdesc['name']]
return structdesc
def _read_tagdesc(f):
'''Function to read in a tag descriptor'''
tagdesc = {'offset': _read_long(f)}
if tagdesc['offset'] == -1:
tagdesc['offset'] = _read_uint64(f)
tagdesc['typecode'] = _read_long(f)
tagflags = _read_long(f)
tagdesc['array'] = tagflags & 4 == 4
tagdesc['structure'] = tagflags & 32 == 32
tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT
# Assume '10'x is scalar
return tagdesc
def _replace_heap(variable, heap):
if isinstance(variable, Pointer):
while isinstance(variable, Pointer):
if variable.index == 0:
variable = None
else:
if variable.index in heap:
variable = heap[variable.index]
else:
warnings.warn("Variable referenced by pointer not found "
"in heap: variable will be set to None")
variable = None
replace, new = _replace_heap(variable, heap)
if replace:
variable = new
return True, variable
elif isinstance(variable, np.core.records.recarray):
# Loop over records
for ir, record in enumerate(variable):
replace, new = _replace_heap(record, heap)
if replace:
variable[ir] = new
return False, variable
elif isinstance(variable, np.core.records.record):
# Loop over values
for iv, value in enumerate(variable):
replace, new = _replace_heap(value, heap)
if replace:
variable[iv] = new
return False, variable
elif isinstance(variable, np.ndarray):
# Loop over values if type is np.object_
if variable.dtype.type is np.object_:
for iv in range(variable.size):
replace, new = _replace_heap(variable.item(iv), heap)
if replace:
variable.itemset(iv, new)
return False, variable
else:
return False, variable
class AttrDict(dict):
'''
A case-insensitive dictionary with access via item, attribute, and call
notations:
>>> d = AttrDict()
>>> d['Variable'] = 123
>>> d['Variable']
123
>>> d.Variable
123
>>> d.variable
123
>>> d('VARIABLE')
123
'''
def __init__(self, init={}):
dict.__init__(self, init)
def __getitem__(self, name):
return super(AttrDict, self).__getitem__(name.lower())
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key.lower(), value)
__getattr__ = __getitem__
__setattr__ = __setitem__
__call__ = __getitem__
def readsav(file_name, idict=None, python_dict=False,
uncompressed_file_name=None, verbose=False):
"""
Read an IDL .sav file.
Parameters
----------
file_name : str
Name of the IDL save file.
idict : dict, optional
Dictionary in which to insert .sav file variables.
python_dict : bool, optional
By default, the object return is not a Python dictionary, but a
case-insensitive dictionary with item, attribute, and call access
to variables. To get a standard Python dictionary, set this option
to True.
uncompressed_file_name : str, optional
This option only has an effect for .sav files written with the
/compress option. If a file name is specified, compressed .sav
files are uncompressed to this file. Otherwise, readsav will use
the `tempfile` module to determine a temporary filename
automatically, and will remove the temporary file upon successfully
reading it in.
verbose : bool, optional
Whether to print out information about the save file, including
the records read, and available variables.
Returns
-------
idl_dict : AttrDict or dict
If `python_dict` is set to False (default), this function returns a
case-insensitive dictionary with item, attribute, and call access
to variables. If `python_dict` is set to True, this function
returns a Python dictionary with all variable names in lowercase.
If `idict` was specified, then variables are written to the
dictionary specified, and the updated dictionary is returned.
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> import scipy.io as sio
>>> from scipy.io import readsav
Get the filename for an example .sav file from the tests/data directory.
>>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')
>>> sav_fname = pjoin(data_dir, 'array_float32_1d.sav')
Load the .sav file contents.
>>> sav_data = readsav(sav_fname)
Get keys of the .sav file contents.
>>> print(sav_data.keys())
dict_keys(['array1d'])
Access a content with a key.
>>> print(sav_data['array1d'])
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
"""
# Initialize record and variable holders
records = []
if python_dict or idict:
variables = {}
else:
variables = AttrDict()
# Open the IDL file
f = open(file_name, 'rb')
# Read the signature, which should be 'SR'
signature = _read_bytes(f, 2)
if signature != b'SR':
raise Exception(f"Invalid SIGNATURE: {signature}")
# Next, the record format, which is '\x00\x04' for normal .sav
# files, and '\x00\x06' for compressed .sav files.
recfmt = _read_bytes(f, 2)
if recfmt == b'\x00\x04':
pass
elif recfmt == b'\x00\x06':
if verbose:
print("IDL Save file is compressed")
if uncompressed_file_name:
fout = open(uncompressed_file_name, 'w+b')
else:
fout = tempfile.NamedTemporaryFile(suffix='.sav')
if verbose:
print(f" -> expanding to {fout.name}")
# Write header
fout.write(b'SR\x00\x04')
# Cycle through records
while True:
# Read record type
rectype = _read_long(f)
fout.write(struct.pack('>l', int(rectype)))
# Read position of next record and return as int
nextrec = _read_uint32(f)
nextrec += _read_uint32(f) * 2**32
# Read the unknown 4 bytes
unknown = f.read(4)
# Check if the end of the file has been reached
if RECTYPE_DICT[rectype] == 'END_MARKER':
fout.write(struct.pack('>I', int(nextrec) % 2**32))
fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
fout.write(unknown)
break
# Find current position
pos = f.tell()
# Decompress record
rec_string = zlib.decompress(f.read(nextrec-pos))
# Find new position of next record
nextrec = fout.tell() + len(rec_string) + 12
# Write out record
fout.write(struct.pack('>I', int(nextrec % 2**32)))
fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
fout.write(unknown)
fout.write(rec_string)
# Close the original compressed file
f.close()
# Set f to be the decompressed file, and skip the first four bytes
f = fout
f.seek(4)
else:
raise Exception(f"Invalid RECFMT: {recfmt}")
# Loop through records, and add them to the list
while True:
r = _read_record(f)
records.append(r)
if 'end' in r:
if r['end']:
break
# Close the file
f.close()
# Find heap data variables
heap = {}
for r in records:
if r['rectype'] == "HEAP_DATA":
heap[r['heap_index']] = r['data']
# Find all variables
for r in records:
if r['rectype'] == "VARIABLE":
replace, new = _replace_heap(r['data'], heap)
if replace:
r['data'] = new
variables[r['varname'].lower()] = r['data']
if verbose:
# Print out timestamp info about the file
for record in records:
if record['rectype'] == "TIMESTAMP":
print("-"*50)
print(f"Date: {record["date"]}")
print(f"User: {record["user"]}")
print(f"Host: {record["host"]}")
break
# Print out version info about the file
for record in records:
if record['rectype'] == "VERSION":
print("-"*50)
print(f"Format: {record["format"]}")
print(f"Architecture: {record["arch"]}")
print(f"Operating System: {record["os"]}")
print(f"IDL Version: {record["release"]}")
break
# Print out identification info about the file
for record in records:
if record['rectype'] == "IDENTIFICATON":
print("-"*50)
print(f"Author: {record["author"]}")
print(f"Title: {record["title"]}")
print(f"ID Code: {record["idcode"]}")
break
# Print out descriptions saved with the file
for record in records:
if record['rectype'] == "DESCRIPTION":
print("-"*50)
print(f"Description: {record["description"]}")
break
print("-"*50)
print("Successfully read %i records of which:" %
(len(records)))
# Create convenience list of record types
rectypes = [r['rectype'] for r in records]
for rt in set(rectypes):
if rt != 'END_MARKER':
print(" - %i are of type %s" % (rectypes.count(rt), rt))
print("-"*50)
if 'VARIABLE' in rectypes:
print("Available variables:")
for var in variables:
print(f" - {var} [{type(variables[var])}]")
print("-"*50)
if idict:
for var in variables:
idict[var] = variables[var]
return idict
else:
return variables
| # IDLSave - a python module to read IDL 'save' files
# Copyright (c) 2010 Thomas P. Robitaille
# Many thanks to Craig Markwardt for publishing the Unofficial Format
# Specification for IDL .sav files, without which this Python module would not
# exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt).
# This code was developed by with permission from ITT Visual Information
# Systems. IDL(r) is a registered trademark of ITT Visual Information Systems,
# Inc. for their Interactive Data Language software.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__all__ = ['readsav']
import struct
import numpy as np
from numpy.compat import asstr
import tempfile
import zlib
import warnings
# Define the different data types that can be found in an IDL save file
DTYPE_DICT = {1: '>u1',
2: '>i2',
3: '>i4',
4: '>f4',
5: '>f8',
6: '>c8',
7: '|O',
8: '|O',
9: '>c16',
10: '|O',
11: '|O',
12: '>u2',
13: '>u4',
14: '>i8',
15: '>u8'}
# Define the different record types that can be found in an IDL save file
RECTYPE_DICT = {0: "START_MARKER",
1: "COMMON_VARIABLE",
2: "VARIABLE",
3: "SYSTEM_VARIABLE",
6: "END_MARKER",
10: "TIMESTAMP",
12: "COMPILED",
13: "IDENTIFICATION",
14: "VERSION",
15: "HEAP_HEADER",
16: "HEAP_DATA",
17: "PROMOTE64",
19: "NOTICE",
20: "DESCRIPTION"}
# Define a dictionary to contain structure definitions
STRUCT_DICT = {}
def _align_32(f):
'''Align to the next 32-bit position in a file'''
pos = f.tell()
if pos % 4 != 0:
f.seek(pos + 4 - pos % 4)
return
def _skip_bytes(f, n):
'''Skip `n` bytes'''
f.read(n)
return
def _read_bytes(f, n):
'''Read the next `n` bytes'''
return f.read(n)
def _read_byte(f):
'''Read a single byte'''
return np.uint8(struct.unpack('>B', f.read(4)[:1])[0])
def _read_long(f):
'''Read a signed 32-bit integer'''
return np.int32(struct.unpack('>l', f.read(4))[0])
def _read_int16(f):
'''Read a signed 16-bit integer'''
return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])
def _read_int32(f):
'''Read a signed 32-bit integer'''
return np.int32(struct.unpack('>i', f.read(4))[0])
def _read_int64(f):
'''Read a signed 64-bit integer'''
return np.int64(struct.unpack('>q', f.read(8))[0])
def _read_uint16(f):
'''Read an unsigned 16-bit integer'''
return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0])
def _read_uint32(f):
'''Read an unsigned 32-bit integer'''
return np.uint32(struct.unpack('>I', f.read(4))[0])
def _read_uint64(f):
'''Read an unsigned 64-bit integer'''
return np.uint64(struct.unpack('>Q', f.read(8))[0])
def _read_float32(f):
'''Read a 32-bit float'''
return np.float32(struct.unpack('>f', f.read(4))[0])
def _read_float64(f):
'''Read a 64-bit float'''
return np.float64(struct.unpack('>d', f.read(8))[0])
class Pointer(object):
'''Class used to define pointers'''
def __init__(self, index):
self.index = index
return
class ObjectPointer(Pointer):
'''Class used to define object pointers'''
pass
def _read_string(f):
'''Read a string'''
length = _read_long(f)
if length > 0:
chars = _read_bytes(f, length)
_align_32(f)
chars = asstr(chars)
else:
chars = ''
return chars
def _read_string_data(f):
'''Read a data string (length is specified twice)'''
length = _read_long(f)
if length > 0:
length = _read_long(f)
string_data = _read_bytes(f, length)
_align_32(f)
else:
string_data = ''
return string_data
def _read_data(f, dtype):
'''Read a variable with a specified data type'''
if dtype == 1:
if _read_int32(f) != 1:
raise Exception("Error occurred while reading byte variable")
return _read_byte(f)
elif dtype == 2:
return _read_int16(f)
elif dtype == 3:
return _read_int32(f)
elif dtype == 4:
return _read_float32(f)
elif dtype == 5:
return _read_float64(f)
elif dtype == 6:
real = _read_float32(f)
imag = _read_float32(f)
return np.complex64(real + imag * 1j)
elif dtype == 7:
return _read_string_data(f)
elif dtype == 8:
raise Exception("Should not be here - please report this")
elif dtype == 9:
real = _read_float64(f)
imag = _read_float64(f)
return np.complex128(real + imag * 1j)
elif dtype == 10:
return Pointer(_read_int32(f))
elif dtype == 11:
return ObjectPointer(_read_int32(f))
elif dtype == 12:
return _read_uint16(f)
elif dtype == 13:
return _read_uint32(f)
elif dtype == 14:
return _read_int64(f)
elif dtype == 15:
return _read_uint64(f)
else:
raise Exception("Unknown IDL type: %i - please report this" % dtype)
def _read_structure(f, array_desc, struct_desc):
'''
Read a structure, with the array and structure descriptors given as
`array_desc` and `structure_desc` respectively.
'''
nrows = array_desc['nelements']
columns = struct_desc['tagtable']
dtype = []
for col in columns:
if col['structure'] or col['array']:
dtype.append(((col['name'].lower(), col['name']), np.object_))
else:
if col['typecode'] in DTYPE_DICT:
dtype.append(((col['name'].lower(), col['name']),
DTYPE_DICT[col['typecode']]))
else:
raise Exception("Variable type %i not implemented" %
col['typecode'])
structure = np.recarray((nrows, ), dtype=dtype)
for i in range(nrows):
for col in columns:
dtype = col['typecode']
if col['structure']:
structure[col['name']][i] = _read_structure(f,
struct_desc['arrtable'][col['name']],
struct_desc['structtable'][col['name']])
elif col['array']:
structure[col['name']][i] = _read_array(f, dtype,
struct_desc['arrtable'][col['name']])
else:
structure[col['name']][i] = _read_data(f, dtype)
# Reshape structure if needed
if array_desc['ndims'] > 1:
dims = array_desc['dims'][:int(array_desc['ndims'])]
dims.reverse()
structure = structure.reshape(dims)
return structure
def _read_array(f, typecode, array_desc):
'''
Read an array of type `typecode`, with the array descriptor given as
`array_desc`.
'''
if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]:
if typecode == 1:
nbytes = _read_int32(f)
if nbytes != array_desc['nbytes']:
warnings.warn("Not able to verify number of bytes from header")
# Read bytes as numpy array
array = np.frombuffer(f.read(array_desc['nbytes']),
dtype=DTYPE_DICT[typecode])
elif typecode in [2, 12]:
# These are 2 byte types, need to skip every two as they are not packed
array = np.frombuffer(f.read(array_desc['nbytes']*2),
dtype=DTYPE_DICT[typecode])[1::2]
else:
# Read bytes into list
array = []
for i in range(array_desc['nelements']):
dtype = typecode
data = _read_data(f, dtype)
array.append(data)
array = np.array(array, dtype=np.object_)
# Reshape array if needed
if array_desc['ndims'] > 1:
dims = array_desc['dims'][:int(array_desc['ndims'])]
dims.reverse()
array = array.reshape(dims)
# Go to next alignment position
_align_32(f)
return array
def _read_record(f):
'''Function to read in a full record'''
record = {'rectype': _read_long(f)}
nextrec = _read_uint32(f)
nextrec += _read_uint32(f) * 2**32
_skip_bytes(f, 4)
if not record['rectype'] in RECTYPE_DICT:
raise Exception("Unknown RECTYPE: %i" % record['rectype'])
record['rectype'] = RECTYPE_DICT[record['rectype']]
if record['rectype'] in ["VARIABLE", "HEAP_DATA"]:
if record['rectype'] == "VARIABLE":
record['varname'] = _read_string(f)
else:
record['heap_index'] = _read_long(f)
_skip_bytes(f, 4)
rectypedesc = _read_typedesc(f)
if rectypedesc['typecode'] == 0:
if nextrec == f.tell():
record['data'] = None # Indicates NULL value
else:
raise ValueError("Unexpected type code: 0")
else:
varstart = _read_long(f)
if varstart != 7:
raise Exception("VARSTART is not 7")
if rectypedesc['structure']:
record['data'] = _read_structure(f, rectypedesc['array_desc'],
rectypedesc['struct_desc'])
elif rectypedesc['array']:
record['data'] = _read_array(f, rectypedesc['typecode'],
rectypedesc['array_desc'])
else:
dtype = rectypedesc['typecode']
record['data'] = _read_data(f, dtype)
elif record['rectype'] == "TIMESTAMP":
_skip_bytes(f, 4*256)
record['date'] = _read_string(f)
record['user'] = _read_string(f)
record['host'] = _read_string(f)
elif record['rectype'] == "VERSION":
record['format'] = _read_long(f)
record['arch'] = _read_string(f)
record['os'] = _read_string(f)
record['release'] = _read_string(f)
elif record['rectype'] == "IDENTIFICATON":
record['author'] = _read_string(f)
record['title'] = _read_string(f)
record['idcode'] = _read_string(f)
elif record['rectype'] == "NOTICE":
record['notice'] = _read_string(f)
elif record['rectype'] == "DESCRIPTION":
record['description'] = _read_string_data(f)
elif record['rectype'] == "HEAP_HEADER":
record['nvalues'] = _read_long(f)
record['indices'] = [_read_long(f) for _ in range(record['nvalues'])]
elif record['rectype'] == "COMMONBLOCK":
record['nvars'] = _read_long(f)
record['name'] = _read_string(f)
record['varnames'] = [_read_string(f) for _ in range(record['nvars'])]
elif record['rectype'] == "END_MARKER":
record['end'] = True
elif record['rectype'] == "UNKNOWN":
warnings.warn("Skipping UNKNOWN record")
elif record['rectype'] == "SYSTEM_VARIABLE":
warnings.warn("Skipping SYSTEM_VARIABLE record")
else:
raise Exception("record['rectype']=%s not implemented" %
record['rectype'])
f.seek(nextrec)
return record
def _read_typedesc(f):
'''Function to read in a type descriptor'''
typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)}
if typedesc['varflags'] & 2 == 2:
raise Exception("System variables not implemented")
typedesc['array'] = typedesc['varflags'] & 4 == 4
typedesc['structure'] = typedesc['varflags'] & 32 == 32
if typedesc['structure']:
typedesc['array_desc'] = _read_arraydesc(f)
typedesc['struct_desc'] = _read_structdesc(f)
elif typedesc['array']:
typedesc['array_desc'] = _read_arraydesc(f)
return typedesc
def _read_arraydesc(f):
'''Function to read in an array descriptor'''
arraydesc = {'arrstart': _read_long(f)}
if arraydesc['arrstart'] == 8:
_skip_bytes(f, 4)
arraydesc['nbytes'] = _read_long(f)
arraydesc['nelements'] = _read_long(f)
arraydesc['ndims'] = _read_long(f)
_skip_bytes(f, 8)
arraydesc['nmax'] = _read_long(f)
arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])]
elif arraydesc['arrstart'] == 18:
warnings.warn("Using experimental 64-bit array read")
_skip_bytes(f, 8)
arraydesc['nbytes'] = _read_uint64(f)
arraydesc['nelements'] = _read_uint64(f)
arraydesc['ndims'] = _read_long(f)
_skip_bytes(f, 8)
arraydesc['nmax'] = 8
arraydesc['dims'] = []
for d in range(arraydesc['nmax']):
v = _read_long(f)
if v != 0:
raise Exception("Expected a zero in ARRAY_DESC")
arraydesc['dims'].append(_read_long(f))
else:
raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart'])
return arraydesc
def _read_structdesc(f):
'''Function to read in a structure descriptor'''
structdesc = {}
structstart = _read_long(f)
if structstart != 9:
raise Exception("STRUCTSTART should be 9")
structdesc['name'] = _read_string(f)
predef = _read_long(f)
structdesc['ntags'] = _read_long(f)
structdesc['nbytes'] = _read_long(f)
structdesc['predef'] = predef & 1
structdesc['inherits'] = predef & 2
structdesc['is_super'] = predef & 4
if not structdesc['predef']:
structdesc['tagtable'] = [_read_tagdesc(f)
for _ in range(structdesc['ntags'])]
for tag in structdesc['tagtable']:
tag['name'] = _read_string(f)
structdesc['arrtable'] = {tag['name']: _read_arraydesc(f)
for tag in structdesc['tagtable']
if tag['array']}
structdesc['structtable'] = {tag['name']: _read_structdesc(f)
for tag in structdesc['tagtable']
if tag['structure']}
if structdesc['inherits'] or structdesc['is_super']:
structdesc['classname'] = _read_string(f)
structdesc['nsupclasses'] = _read_long(f)
structdesc['supclassnames'] = [
_read_string(f) for _ in range(structdesc['nsupclasses'])]
structdesc['supclasstable'] = [
_read_structdesc(f) for _ in range(structdesc['nsupclasses'])]
STRUCT_DICT[structdesc['name']] = structdesc
else:
if not structdesc['name'] in STRUCT_DICT:
raise Exception("PREDEF=1 but can't find definition")
structdesc = STRUCT_DICT[structdesc['name']]
return structdesc
def _read_tagdesc(f):
'''Function to read in a tag descriptor'''
tagdesc = {'offset': _read_long(f)}
if tagdesc['offset'] == -1:
tagdesc['offset'] = _read_uint64(f)
tagdesc['typecode'] = _read_long(f)
tagflags = _read_long(f)
tagdesc['array'] = tagflags & 4 == 4
tagdesc['structure'] = tagflags & 32 == 32
tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT
# Assume '10'x is scalar
return tagdesc
def _replace_heap(variable, heap):
if isinstance(variable, Pointer):
while isinstance(variable, Pointer):
if variable.index == 0:
variable = None
else:
if variable.index in heap:
variable = heap[variable.index]
else:
warnings.warn("Variable referenced by pointer not found "
"in heap: variable will be set to None")
variable = None
replace, new = _replace_heap(variable, heap)
if replace:
variable = new
return True, variable
elif isinstance(variable, np.core.records.recarray):
# Loop over records
for ir, record in enumerate(variable):
replace, new = _replace_heap(record, heap)
if replace:
variable[ir] = new
return False, variable
elif isinstance(variable, np.core.records.record):
# Loop over values
for iv, value in enumerate(variable):
replace, new = _replace_heap(value, heap)
if replace:
variable[iv] = new
return False, variable
elif isinstance(variable, np.ndarray):
# Loop over values if type is np.object_
if variable.dtype.type is np.object_:
for iv in range(variable.size):
replace, new = _replace_heap(variable.item(iv), heap)
if replace:
variable.itemset(iv, new)
return False, variable
else:
return False, variable
class AttrDict(dict):
'''
A case-insensitive dictionary with access via item, attribute, and call
notations:
>>> d = AttrDict()
>>> d['Variable'] = 123
>>> d['Variable']
123
>>> d.Variable
123
>>> d.variable
123
>>> d('VARIABLE')
123
'''
def __init__(self, init={}):
dict.__init__(self, init)
def __getitem__(self, name):
return super(AttrDict, self).__getitem__(name.lower())
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key.lower(), value)
__getattr__ = __getitem__
__setattr__ = __setitem__
__call__ = __getitem__
def readsav(file_name, idict=None, python_dict=False,
uncompressed_file_name=None, verbose=False):
"""
Read an IDL .sav file.
Parameters
----------
file_name : str
Name of the IDL save file.
idict : dict, optional
Dictionary in which to insert .sav file variables.
python_dict : bool, optional
By default, the object return is not a Python dictionary, but a
case-insensitive dictionary with item, attribute, and call access
to variables. To get a standard Python dictionary, set this option
to True.
uncompressed_file_name : str, optional
This option only has an effect for .sav files written with the
/compress option. If a file name is specified, compressed .sav
files are uncompressed to this file. Otherwise, readsav will use
the `tempfile` module to determine a temporary filename
automatically, and will remove the temporary file upon successfully
reading it in.
verbose : bool, optional
Whether to print out information about the save file, including
the records read, and available variables.
Returns
-------
idl_dict : AttrDict or dict
If `python_dict` is set to False (default), this function returns a
case-insensitive dictionary with item, attribute, and call access
to variables. If `python_dict` is set to True, this function
returns a Python dictionary with all variable names in lowercase.
If `idict` was specified, then variables are written to the
dictionary specified, and the updated dictionary is returned.
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> import scipy.io as sio
>>> from scipy.io import readsav
Get the filename for an example .sav file from the tests/data directory.
>>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')
>>> sav_fname = pjoin(data_dir, 'array_float32_1d.sav')
Load the .sav file contents.
>>> sav_data = readsav(sav_fname)
Get keys of the .sav file contents.
>>> print(sav_data.keys())
dict_keys(['array1d'])
Access a content with a key.
>>> print(sav_data['array1d'])
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
"""
# Initialize record and variable holders
records = []
if python_dict or idict:
variables = {}
else:
variables = AttrDict()
# Open the IDL file
f = open(file_name, 'rb')
# Read the signature, which should be 'SR'
signature = _read_bytes(f, 2)
if signature != b'SR':
raise Exception(f"Invalid SIGNATURE: {signature}")
# Next, the record format, which is '\x00\x04' for normal .sav
# files, and '\x00\x06' for compressed .sav files.
recfmt = _read_bytes(f, 2)
if recfmt == b'\x00\x04':
pass
elif recfmt == b'\x00\x06':
if verbose:
print("IDL Save file is compressed")
if uncompressed_file_name:
fout = open(uncompressed_file_name, 'w+b')
else:
fout = tempfile.NamedTemporaryFile(suffix='.sav')
if verbose:
print(f" -> expanding to {fout.name}")
# Write header
fout.write(b'SR\x00\x04')
# Cycle through records
while True:
# Read record type
rectype = _read_long(f)
fout.write(struct.pack('>l', int(rectype)))
# Read position of next record and return as int
nextrec = _read_uint32(f)
nextrec += _read_uint32(f) * 2**32
# Read the unknown 4 bytes
unknown = f.read(4)
# Check if the end of the file has been reached
if RECTYPE_DICT[rectype] == 'END_MARKER':
fout.write(struct.pack('>I', int(nextrec) % 2**32))
fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
fout.write(unknown)
break
# Find current position
pos = f.tell()
# Decompress record
rec_string = zlib.decompress(f.read(nextrec-pos))
# Find new position of next record
nextrec = fout.tell() + len(rec_string) + 12
# Write out record
fout.write(struct.pack('>I', int(nextrec % 2**32)))
fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
fout.write(unknown)
fout.write(rec_string)
# Close the original compressed file
f.close()
# Set f to be the decompressed file, and skip the first four bytes
f = fout
f.seek(4)
else:
raise Exception(f"Invalid RECFMT: {recfmt}")
# Loop through records, and add them to the list
while True:
r = _read_record(f)
records.append(r)
if 'end' in r:
if r['end']:
break
# Close the file
f.close()
# Find heap data variables
heap = {}
for r in records:
if r['rectype'] == "HEAP_DATA":
heap[r['heap_index']] = r['data']
# Find all variables
for r in records:
if r['rectype'] == "VARIABLE":
replace, new = _replace_heap(r['data'], heap)
if replace:
r['data'] = new
variables[r['varname'].lower()] = r['data']
if verbose:
# Print out timestamp info about the file
for record in records:
if record['rectype'] == "TIMESTAMP":
print("-"*50)
print(f"Date: {record['date']}")
print(f"User: {record['user']}")
print(f"Host: {record['host']}")
break
# Print out version info about the file
for record in records:
if record['rectype'] == "VERSION":
print("-"*50)
print(f"Format: {record['format']}")
print(f"Architecture: {record['arch']}")
print(f"Operating System: {record['os']}")
print(f"IDL Version: {record['release']}")
break
# Print out identification info about the file
for record in records:
if record['rectype'] == "IDENTIFICATON":
print("-"*50)
print(f"Author: {record['author']}")
print(f"Title: {record['title']}")
print(f"ID Code: {record['idcode']}")
break
# Print out descriptions saved with the file
for record in records:
if record['rectype'] == "DESCRIPTION":
print("-"*50)
print(f"Description: {record['description']}")
break
print("-"*50)
print("Successfully read %i records of which:" %
(len(records)))
# Create convenience list of record types
rectypes = [r['rectype'] for r in records]
for rt in set(rectypes):
if rt != 'END_MARKER':
print(" - %i are of type %s" % (rectypes.count(rt), rt))
print("-"*50)
if 'VARIABLE' in rectypes:
print("Available variables:")
for var in variables:
print(f" - {var} [{type(variables[var])}]")
print("-"*50)
if idict:
for var in variables:
idict[var] = variables[var]
return idict
else:
return variables
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing various sites direct links generators"""
from subprocess import PIPE, Popen
import re
import urllib.parse
import json
from random import choice
import requests
from bs4 import BeautifulSoup
from humanize import naturalsize
from userbot import CMD_HELP
from userbot.events import register
def subprocess_run(cmd):
subproc = Popen(cmd, stdout=PIPE, stderr=PIPE,
shell=True, universal_newlines=True,
executable="bash")
talk = subproc.communicate()
exitCode = subproc.returncode
if exitCode != 0:
print('An error was detected while running the subprocess:\n'
f'exit code: {exitCode}\n'
f'stdout: {talk[0]}\n'
f'stderr: {talk[1]}')
return talk
@register(outgoing=True, pattern=r"^.direct(?: |$)([\s\S]*)")
async def direct_link_generator(request):
""" direct links generator """
await request.edit("`Processing...`")
textx = await request.get_reply_message()
message = request.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await request.edit("`Usage: .direct <url>`")
return
reply = ''
links = re.findall(r'\bhttps?://.*\.\S+', message)
if not links:
reply = "`No links found!`"
await request.edit(reply)
for link in links:
if 'drive.google.com' in link:
reply += gdrive(link)
elif 'zippyshare.com' in link:
reply += zippy_share(link)
elif 'mega.' in link:
reply += mega_dl(link)
elif 'yadi.sk' in link:
reply += yandex_disk(link)
elif 'cloud.mail.ru' in link:
reply += cm_ru(link)
elif 'mediafire.com' in link:
reply += mediafire(link)
elif 'sourceforge.net' in link:
reply += sourceforge(link)
elif 'osdn.net' in link:
reply += osdn(link)
elif 'github.com' in link:
reply += github(link)
elif 'androidfilehost.com' in link:
reply += androidfilehost(link)
else:
reply += re.findall(r"\bhttps?://(.*?[^/]+)",
link)[0] + 'is not supported'
await request.edit(reply)
def gdrive(url: str) -> str:
""" GDrive direct links generator """
drive = 'https://drive.google.com'
try:
link = re.findall(r'\bhttps?://drive\.google\.com\S+', url)[0]
except IndexError:
reply = "`No Google drive links found`\n"
return reply
file_id = ''
reply = ''
if link.find("view") != -1:
file_id = link.split('/')[-2]
elif link.find("open?id=") != -1:
file_id = link.split("open?id=")[1].strip()
elif link.find("uc?id=") != -1:
file_id = link.split("uc?id=")[1].strip()
url = f'{drive}/uc?export=download&id={file_id}'
download = requests.get(url, stream=True, allow_redirects=False)
cookies = download.cookies
try:
# In case of small file size, Google downloads directly
dl_url = download.headers["location"]
if 'accounts.google.com' in dl_url: # non-public file
reply += '`Link is not public!`\n'
return reply
name = 'Direct Download Link'
except KeyError:
# In case of download warning page
page = BeautifulSoup(download.content, 'lxml')
export = drive + page.find('a', {'id': 'uc-download-link'}).get('href')
name = page.find('span', {'class': 'uc-name-size'}).text
response = requests.get(export,
stream=True,
allow_redirects=False,
cookies=cookies)
dl_url = response.headers['location']
if 'accounts.google.com' in dl_url:
reply += 'Link is not public!'
return reply
reply += f'[{name}]({dl_url})\n'
return reply
def zippy_share(url: str) -> str:
""" ZippyShare direct links generator
Based on https://github.com/LameLemon/ziggy"""
reply = ''
dl_url = ''
try:
link = re.findall(r'\bhttps?://.*zippyshare\.com\S+', url)[0]
except IndexError:
reply = "`No ZippyShare links found`\n"
return reply
session = requests.Session()
base_url = re.search('http.+.com', link).group()
response = session.get(link)
page_soup = BeautifulSoup(response.content, "lxml")
scripts = page_soup.find_all("script", {"type": "text/javascript"})
for script in scripts:
if "getElementById('dlbutton')" in script.text:
url_raw = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);',
script.text).group('url')
math = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);',
script.text).group('math')
dl_url = url_raw.replace(math, '"' + str(eval(math)) + '"')
break
dl_url = base_url + eval(dl_url)
name = urllib.parse.unquote(dl_url.split('/')[-1])
reply += f'[{name}]({dl_url})\n'
return reply
def yandex_disk(url: str) -> str:
""" Yandex.Disk direct links generator
Based on https://github.com/wldhx/yadisk-direct"""
reply = ''
try:
link = re.findall(r'\bhttps?://.*yadi\.sk\S+', url)[0]
except IndexError:
reply = "`No Yandex.Disk links found`\n"
return reply
api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}'
try:
dl_url = requests.get(api.format(link)).json()['href']
name = dl_url.split('filename=')[1].split('&disposition')[0]
reply += f'[{name}]({dl_url})\n'
except KeyError:
reply += '`Error: File not found / Download limit reached`\n'
return reply
return reply
def mega_dl(url: str) -> str:
""" MEGA.nz direct links generator
Using https://github.com/tonikelope/megadown"""
reply = ''
try:
link = re.findall(r'\bhttps?://.*mega.*\.nz\S+', url)[0]
except IndexError:
reply = "`No MEGA.nz links found`\n"
return reply
cmd = f'bin/megadown -q -m {link}'
result = subprocess_run(cmd)
try:
data = json.loads(result[0])
print(data)
except json.JSONDecodeError:
reply += "`Error: Can't extract the link`\n"
return reply
dl_url = data['url']
name = data['file_name']
size = naturalsize(int(data['file_size']))
reply += f'[{name} ({size})]({dl_url})\n'
return reply
def cm_ru(url: str) -> str:
""" cloud.mail.ru direct links generator
Using https://github.com/JrMasterModelBuilder/cmrudl.py"""
reply = ''
try:
link = re.findall(r'\bhttps?://.*cloud\.mail\.ru\S+', url)[0]
except IndexError:
reply = "`No cloud.mail.ru links found`\n"
return reply
cmd = f'bin/cmrudl -s {link}'
result = subprocess_run(cmd)
result = result[0].splitlines()[-1]
try:
data = json.loads(result)
except json.decoder.JSONDecodeError:
reply += "`Error: Can't extract the link`\n"
return reply
dl_url = data['download']
name = data['file_name']
size = naturalsize(int(data['file_size']))
reply += f'[{name} ({size})]({dl_url})\n'
return reply
def mediafire(url: str) -> str:
""" MediaFire direct links generator """
try:
link = re.findall(r'\bhttps?://.*mediafire\.com\S+', url)[0]
except IndexError:
reply = "`No MediaFire links found`\n"
return reply
reply = ''
page = BeautifulSoup(requests.get(link).content, 'lxml')
info = page.find('a', {'aria-label': 'Download file'})
dl_url = info.get('href')
size = re.findall(r'\(.*\)', info.text)[0]
name = page.find('div', {'class': 'filename'}).text
reply += f'[{name} {size}]({dl_url})\n'
return reply
def sourceforge(url: str) -> str:
""" SourceForge direct links generator """
try:
link = re.findall(r'\bhttps?://.*sourceforge\.net\S+', url)[0]
except IndexError:
reply = "`No SourceForge links found`\n"
return reply
file_path = re.findall(r'files(.*)/download', link)[0]
reply = f"Mirrors for __{file_path.split("/")[-1]}__\n"
project = re.findall(r'projects?/(.*?)/files', link)[0]
mirrors = f'https://sourceforge.net/settings/mirror_choices?' \
f'projectname={project}&filename={file_path}'
page = BeautifulSoup(requests.get(mirrors).content, 'html.parser')
info = page.find('ul', {'id': 'mirrorList'}).findAll('li')
for mirror in info[1:]:
name = re.findall(r'\((.*)\)', mirror.text.strip())[0]
dl_url = f'https://{mirror['id']}.dl.sourceforge.net/project/{project}/{file_path}'
reply += f'[{name}]({dl_url}) '
return reply
def osdn(url: str) -> str:
""" OSDN direct links generator """
osdn_link = 'https://osdn.net'
try:
link = re.findall(r'\bhttps?://.*osdn\.net\S+', url)[0]
except IndexError:
reply = "`No OSDN links found`\n"
return reply
page = BeautifulSoup(
requests.get(link, allow_redirects=True).content, 'lxml')
info = page.find('a', {'class': 'mirror_link'})
link = urllib.parse.unquote(osdn_link + info['href'])
reply = f"Mirrors for __{link.split("/")[-1]}__\n"
mirrors = page.find('form', {'id': 'mirror-select-form'}).findAll('tr')
for data in mirrors[1:]:
mirror = data.find('input')['value']
name = re.findall(r'\((.*)\)', data.findAll('td')[-1].text.strip())[0]
dl_url = re.sub(r'm=(.*)&f', f'm={mirror}&f', link)
reply += f'[{name}]({dl_url}) '
return reply
def github(url: str) -> str:
""" GitHub direct links generator """
try:
link = re.findall(r'\bhttps?://.*github\.com.*releases\S+', url)[0]
except IndexError:
reply = "`No GitHub Releases links found`\n"
return reply
reply = ''
dl_url = ''
download = requests.get(url, stream=True, allow_redirects=False)
try:
dl_url = download.headers["location"]
except KeyError:
reply += "`Error: Can't extract the link`\n"
name = link.split('/')[-1]
reply += f'[{name}]({dl_url}) '
return reply
def androidfilehost(url: str) -> str:
""" AFH direct links generator """
try:
link = re.findall(r'\bhttps?://.*androidfilehost.*fid.*\S+', url)[0]
except IndexError:
reply = "`No AFH links found`\n"
return reply
fid = re.findall(r'\?fid=(.*)', link)[0]
session = requests.Session()
user_agent = useragent()
headers = {'user-agent': user_agent}
res = session.get(link, headers=headers, allow_redirects=True)
headers = {
'origin': 'https://androidfilehost.com',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'user-agent': user_agent,
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'x-mod-sbb-ctype': 'xhr',
'accept': '*/*',
'referer': f'https://androidfilehost.com/?fid={fid}',
'authority': 'androidfilehost.com',
'x-requested-with': 'XMLHttpRequest',
}
data = {
'submit': 'submit',
'action': 'getdownloadmirrors',
'fid': f'{fid}'
}
mirrors = None
reply = ''
error = "`Error: Can't find Mirrors for the link`\n"
try:
req = session.post(
'https://androidfilehost.com/libs/otf/mirrors.otf.php',
headers=headers,
data=data,
cookies=res.cookies)
mirrors = req.json()['MIRRORS']
except (json.decoder.JSONDecodeError, TypeError):
reply += error
if not mirrors:
reply += error
return reply
for item in mirrors:
name = item['name']
dl_url = item['url']
reply += f'[{name}]({dl_url}) '
return reply
def useragent():
"""
useragent random setter
"""
useragents = BeautifulSoup(
requests.get(
'https://developers.whatismybrowser.com/'
'useragents/explore/operating_system_name/android/').content,
'lxml').findAll('td', {'class': 'useragent'})
user_agent = choice(useragents)
return user_agent.text
CMD_HELP.update({
"direct":
".direct <url>\n"
"Usage: Reply to a link or paste a URL to\n"
"generate a direct download link\n\n"
"List of supported URLs:\n"
"`Google Drive - MEGA.nz - Cloud Mail - Yandex.Disk - AFH - "
"ZippyShare - MediaFire - SourceForge - OSDN - GitHub`"
})
| # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing various sites direct links generators"""
from subprocess import PIPE, Popen
import re
import urllib.parse
import json
from random import choice
import requests
from bs4 import BeautifulSoup
from humanize import naturalsize
from userbot import CMD_HELP
from userbot.events import register
def subprocess_run(cmd):
subproc = Popen(cmd, stdout=PIPE, stderr=PIPE,
shell=True, universal_newlines=True,
executable="bash")
talk = subproc.communicate()
exitCode = subproc.returncode
if exitCode != 0:
print('An error was detected while running the subprocess:\n'
f'exit code: {exitCode}\n'
f'stdout: {talk[0]}\n'
f'stderr: {talk[1]}')
return talk
@register(outgoing=True, pattern=r"^.direct(?: |$)([\s\S]*)")
async def direct_link_generator(request):
""" direct links generator """
await request.edit("`Processing...`")
textx = await request.get_reply_message()
message = request.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await request.edit("`Usage: .direct <url>`")
return
reply = ''
links = re.findall(r'\bhttps?://.*\.\S+', message)
if not links:
reply = "`No links found!`"
await request.edit(reply)
for link in links:
if 'drive.google.com' in link:
reply += gdrive(link)
elif 'zippyshare.com' in link:
reply += zippy_share(link)
elif 'mega.' in link:
reply += mega_dl(link)
elif 'yadi.sk' in link:
reply += yandex_disk(link)
elif 'cloud.mail.ru' in link:
reply += cm_ru(link)
elif 'mediafire.com' in link:
reply += mediafire(link)
elif 'sourceforge.net' in link:
reply += sourceforge(link)
elif 'osdn.net' in link:
reply += osdn(link)
elif 'github.com' in link:
reply += github(link)
elif 'androidfilehost.com' in link:
reply += androidfilehost(link)
else:
reply += re.findall(r"\bhttps?://(.*?[^/]+)",
link)[0] + 'is not supported'
await request.edit(reply)
def gdrive(url: str) -> str:
""" GDrive direct links generator """
drive = 'https://drive.google.com'
try:
link = re.findall(r'\bhttps?://drive\.google\.com\S+', url)[0]
except IndexError:
reply = "`No Google drive links found`\n"
return reply
file_id = ''
reply = ''
if link.find("view") != -1:
file_id = link.split('/')[-2]
elif link.find("open?id=") != -1:
file_id = link.split("open?id=")[1].strip()
elif link.find("uc?id=") != -1:
file_id = link.split("uc?id=")[1].strip()
url = f'{drive}/uc?export=download&id={file_id}'
download = requests.get(url, stream=True, allow_redirects=False)
cookies = download.cookies
try:
# In case of small file size, Google downloads directly
dl_url = download.headers["location"]
if 'accounts.google.com' in dl_url: # non-public file
reply += '`Link is not public!`\n'
return reply
name = 'Direct Download Link'
except KeyError:
# In case of download warning page
page = BeautifulSoup(download.content, 'lxml')
export = drive + page.find('a', {'id': 'uc-download-link'}).get('href')
name = page.find('span', {'class': 'uc-name-size'}).text
response = requests.get(export,
stream=True,
allow_redirects=False,
cookies=cookies)
dl_url = response.headers['location']
if 'accounts.google.com' in dl_url:
reply += 'Link is not public!'
return reply
reply += f'[{name}]({dl_url})\n'
return reply
def zippy_share(url: str) -> str:
""" ZippyShare direct links generator
Based on https://github.com/LameLemon/ziggy"""
reply = ''
dl_url = ''
try:
link = re.findall(r'\bhttps?://.*zippyshare\.com\S+', url)[0]
except IndexError:
reply = "`No ZippyShare links found`\n"
return reply
session = requests.Session()
base_url = re.search('http.+.com', link).group()
response = session.get(link)
page_soup = BeautifulSoup(response.content, "lxml")
scripts = page_soup.find_all("script", {"type": "text/javascript"})
for script in scripts:
if "getElementById('dlbutton')" in script.text:
url_raw = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);',
script.text).group('url')
math = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);',
script.text).group('math')
dl_url = url_raw.replace(math, '"' + str(eval(math)) + '"')
break
dl_url = base_url + eval(dl_url)
name = urllib.parse.unquote(dl_url.split('/')[-1])
reply += f'[{name}]({dl_url})\n'
return reply
def yandex_disk(url: str) -> str:
""" Yandex.Disk direct links generator
Based on https://github.com/wldhx/yadisk-direct"""
reply = ''
try:
link = re.findall(r'\bhttps?://.*yadi\.sk\S+', url)[0]
except IndexError:
reply = "`No Yandex.Disk links found`\n"
return reply
api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}'
try:
dl_url = requests.get(api.format(link)).json()['href']
name = dl_url.split('filename=')[1].split('&disposition')[0]
reply += f'[{name}]({dl_url})\n'
except KeyError:
reply += '`Error: File not found / Download limit reached`\n'
return reply
return reply
def mega_dl(url: str) -> str:
""" MEGA.nz direct links generator
Using https://github.com/tonikelope/megadown"""
reply = ''
try:
link = re.findall(r'\bhttps?://.*mega.*\.nz\S+', url)[0]
except IndexError:
reply = "`No MEGA.nz links found`\n"
return reply
cmd = f'bin/megadown -q -m {link}'
result = subprocess_run(cmd)
try:
data = json.loads(result[0])
print(data)
except json.JSONDecodeError:
reply += "`Error: Can't extract the link`\n"
return reply
dl_url = data['url']
name = data['file_name']
size = naturalsize(int(data['file_size']))
reply += f'[{name} ({size})]({dl_url})\n'
return reply
def cm_ru(url: str) -> str:
""" cloud.mail.ru direct links generator
Using https://github.com/JrMasterModelBuilder/cmrudl.py"""
reply = ''
try:
link = re.findall(r'\bhttps?://.*cloud\.mail\.ru\S+', url)[0]
except IndexError:
reply = "`No cloud.mail.ru links found`\n"
return reply
cmd = f'bin/cmrudl -s {link}'
result = subprocess_run(cmd)
result = result[0].splitlines()[-1]
try:
data = json.loads(result)
except json.decoder.JSONDecodeError:
reply += "`Error: Can't extract the link`\n"
return reply
dl_url = data['download']
name = data['file_name']
size = naturalsize(int(data['file_size']))
reply += f'[{name} ({size})]({dl_url})\n'
return reply
def mediafire(url: str) -> str:
""" MediaFire direct links generator """
try:
link = re.findall(r'\bhttps?://.*mediafire\.com\S+', url)[0]
except IndexError:
reply = "`No MediaFire links found`\n"
return reply
reply = ''
page = BeautifulSoup(requests.get(link).content, 'lxml')
info = page.find('a', {'aria-label': 'Download file'})
dl_url = info.get('href')
size = re.findall(r'\(.*\)', info.text)[0]
name = page.find('div', {'class': 'filename'}).text
reply += f'[{name} {size}]({dl_url})\n'
return reply
def sourceforge(url: str) -> str:
""" SourceForge direct links generator """
try:
link = re.findall(r'\bhttps?://.*sourceforge\.net\S+', url)[0]
except IndexError:
reply = "`No SourceForge links found`\n"
return reply
file_path = re.findall(r'files(.*)/download', link)[0]
reply = f"Mirrors for __{file_path.split('/')[-1]}__\n"
project = re.findall(r'projects?/(.*?)/files', link)[0]
mirrors = f'https://sourceforge.net/settings/mirror_choices?' \
f'projectname={project}&filename={file_path}'
page = BeautifulSoup(requests.get(mirrors).content, 'html.parser')
info = page.find('ul', {'id': 'mirrorList'}).findAll('li')
for mirror in info[1:]:
name = re.findall(r'\((.*)\)', mirror.text.strip())[0]
dl_url = f'https://{mirror["id"]}.dl.sourceforge.net/project/{project}/{file_path}'
reply += f'[{name}]({dl_url}) '
return reply
def osdn(url: str) -> str:
""" OSDN direct links generator """
osdn_link = 'https://osdn.net'
try:
link = re.findall(r'\bhttps?://.*osdn\.net\S+', url)[0]
except IndexError:
reply = "`No OSDN links found`\n"
return reply
page = BeautifulSoup(
requests.get(link, allow_redirects=True).content, 'lxml')
info = page.find('a', {'class': 'mirror_link'})
link = urllib.parse.unquote(osdn_link + info['href'])
reply = f"Mirrors for __{link.split('/')[-1]}__\n"
mirrors = page.find('form', {'id': 'mirror-select-form'}).findAll('tr')
for data in mirrors[1:]:
mirror = data.find('input')['value']
name = re.findall(r'\((.*)\)', data.findAll('td')[-1].text.strip())[0]
dl_url = re.sub(r'm=(.*)&f', f'm={mirror}&f', link)
reply += f'[{name}]({dl_url}) '
return reply
def github(url: str) -> str:
""" GitHub direct links generator """
try:
link = re.findall(r'\bhttps?://.*github\.com.*releases\S+', url)[0]
except IndexError:
reply = "`No GitHub Releases links found`\n"
return reply
reply = ''
dl_url = ''
download = requests.get(url, stream=True, allow_redirects=False)
try:
dl_url = download.headers["location"]
except KeyError:
reply += "`Error: Can't extract the link`\n"
name = link.split('/')[-1]
reply += f'[{name}]({dl_url}) '
return reply
def androidfilehost(url: str) -> str:
""" AFH direct links generator """
try:
link = re.findall(r'\bhttps?://.*androidfilehost.*fid.*\S+', url)[0]
except IndexError:
reply = "`No AFH links found`\n"
return reply
fid = re.findall(r'\?fid=(.*)', link)[0]
session = requests.Session()
user_agent = useragent()
headers = {'user-agent': user_agent}
res = session.get(link, headers=headers, allow_redirects=True)
headers = {
'origin': 'https://androidfilehost.com',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'user-agent': user_agent,
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'x-mod-sbb-ctype': 'xhr',
'accept': '*/*',
'referer': f'https://androidfilehost.com/?fid={fid}',
'authority': 'androidfilehost.com',
'x-requested-with': 'XMLHttpRequest',
}
data = {
'submit': 'submit',
'action': 'getdownloadmirrors',
'fid': f'{fid}'
}
mirrors = None
reply = ''
error = "`Error: Can't find Mirrors for the link`\n"
try:
req = session.post(
'https://androidfilehost.com/libs/otf/mirrors.otf.php',
headers=headers,
data=data,
cookies=res.cookies)
mirrors = req.json()['MIRRORS']
except (json.decoder.JSONDecodeError, TypeError):
reply += error
if not mirrors:
reply += error
return reply
for item in mirrors:
name = item['name']
dl_url = item['url']
reply += f'[{name}]({dl_url}) '
return reply
def useragent():
"""
useragent random setter
"""
useragents = BeautifulSoup(
requests.get(
'https://developers.whatismybrowser.com/'
'useragents/explore/operating_system_name/android/').content,
'lxml').findAll('td', {'class': 'useragent'})
user_agent = choice(useragents)
return user_agent.text
CMD_HELP.update({
"direct":
".direct <url>\n"
"Usage: Reply to a link or paste a URL to\n"
"generate a direct download link\n\n"
"List of supported URLs:\n"
"`Google Drive - MEGA.nz - Cloud Mail - Yandex.Disk - AFH - "
"ZippyShare - MediaFire - SourceForge - OSDN - GitHub`"
})
|
"""Test the UniFi Protect switch platform."""
# pylint: disable=protected-access
from __future__ import annotations
from unittest.mock import AsyncMock, Mock
import pytest
from pyunifiprotect.data import (
Camera,
Light,
RecordingMode,
SmartDetectObjectType,
VideoMode,
)
from homeassistant.components.unifiprotect.const import DEFAULT_ATTRIBUTION
from homeassistant.components.unifiprotect.switch import (
CAMERA_SWITCHES,
LIGHT_SWITCHES,
ProtectSwitchEntityDescription,
)
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_ENTITY_ID, STATE_OFF, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from .conftest import (
MockEntityFixture,
assert_entity_counts,
enable_entity,
ids_from_device_description,
)
CAMERA_SWITCHES_BASIC = [
d
for d in CAMERA_SWITCHES
if d.name != "Detections: Face"
and d.name != "Detections: Package"
and d.name != "SSH Enabled"
]
CAMERA_SWITCHES_NO_EXTRA = [
d for d in CAMERA_SWITCHES_BASIC if d.name not in ("High FPS", "Privacy Mode")
]
@pytest.fixture(name="light")
async def light_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_light: Light
):
"""Fixture for a single light for testing the switch platform."""
# disable pydantic validation so mocking can happen
Light.__config__.validate_assignment = False
light_obj = mock_light.copy(deep=True)
light_obj._api = mock_entry.api
light_obj.name = "Test Light"
light_obj.is_ssh_enabled = False
light_obj.light_device_settings.is_indicator_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.lights = {
light_obj.id: light_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 2, 1)
yield light_obj
Light.__config__.validate_assignment = True
@pytest.fixture(name="camera")
async def camera_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = True
camera_obj.feature_flags.has_hdr = True
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT, VideoMode.HIGH_FPS]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = True
camera_obj.feature_flags.has_smart_detect = True
camera_obj.feature_flags.smart_detect_types = [
SmartDetectObjectType.PERSON,
SmartDetectObjectType.VEHICLE,
]
camera_obj.is_ssh_enabled = False
camera_obj.led_settings.is_enabled = False
camera_obj.hdr_mode = False
camera_obj.video_mode = VideoMode.DEFAULT
camera_obj.remove_privacy_zone()
camera_obj.speaker_settings.are_system_sounds_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
camera_obj.smart_detect_settings.object_types = []
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 12, 11)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_none")
async def camera_none_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = False
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 5, 4)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_privacy")
async def camera_privacy_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.NEVER
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.add_privacy_zone()
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 6, 5)
yield camera_obj
Camera.__config__.validate_assignment = True
async def test_switch_setup_light(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
light: Light,
):
"""Test switch entity setup for light devices."""
entity_registry = er.async_get(hass)
description = LIGHT_SWITCHES[1]
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, light, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = LIGHT_SWITCHES[0]
unique_id = f"{light.id}_{description.key}"
entity_id = f"switch.test_light_{description.name.lower().replace(" ", "_")}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_all(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera: Camera,
):
"""Test switch entity setup for camera devices (all enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES_BASIC:
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = CAMERA_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_none(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera_none: Camera,
):
"""Test switch entity setup for camera devices (no enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES_BASIC:
if description.ufp_required_field is not None:
continue
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera_none, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = CAMERA_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera_none.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_light_status(hass: HomeAssistant, light: Light):
"""Tests status light switch for lights."""
description = LIGHT_SWITCHES[1]
light.__fields__["set_status_light"] = Mock()
light.set_status_light = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, light, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_with(False)
async def test_switch_camera_ssh(
hass: HomeAssistant, camera: Camera, mock_entry: MockEntityFixture
):
"""Tests SSH switch for cameras."""
description = CAMERA_SWITCHES[0]
camera.__fields__["set_ssh"] = Mock()
camera.set_ssh = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_with(False)
@pytest.mark.parametrize("description", CAMERA_SWITCHES_NO_EXTRA)
async def test_switch_camera_simple(
hass: HomeAssistant, camera: Camera, description: ProtectSwitchEntityDescription
):
"""Tests all simple switches for cameras."""
assert description.ufp_set_method is not None
camera.__fields__[description.ufp_set_method] = Mock()
setattr(camera, description.ufp_set_method, AsyncMock())
set_method = getattr(camera, description.ufp_set_method)
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_with(False)
async def test_switch_camera_highfps(hass: HomeAssistant, camera: Camera):
"""Tests High FPS switch for cameras."""
description = CAMERA_SWITCHES[3]
camera.__fields__["set_video_mode"] = Mock()
camera.set_video_mode = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_once_with(VideoMode.HIGH_FPS)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_with(VideoMode.DEFAULT)
async def test_switch_camera_privacy(hass: HomeAssistant, camera: Camera):
"""Tests Privacy Mode switch for cameras."""
description = CAMERA_SWITCHES[4]
camera.__fields__["set_privacy"] = Mock()
camera.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_once_with(True, 0, RecordingMode.NEVER)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_with(
False, camera.mic_volume, camera.recording_settings.mode
)
async def test_switch_camera_privacy_already_on(
hass: HomeAssistant, camera_privacy: Camera
):
"""Tests Privacy Mode switch for cameras with privacy mode defaulted on."""
description = CAMERA_SWITCHES[4]
camera_privacy.__fields__["set_privacy"] = Mock()
camera_privacy.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(
Platform.SWITCH, camera_privacy, description
)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera_privacy.set_privacy.assert_called_once_with(False, 100, RecordingMode.ALWAYS)
| """Test the UniFi Protect switch platform."""
# pylint: disable=protected-access
from __future__ import annotations
from unittest.mock import AsyncMock, Mock
import pytest
from pyunifiprotect.data import (
Camera,
Light,
RecordingMode,
SmartDetectObjectType,
VideoMode,
)
from homeassistant.components.unifiprotect.const import DEFAULT_ATTRIBUTION
from homeassistant.components.unifiprotect.switch import (
CAMERA_SWITCHES,
LIGHT_SWITCHES,
ProtectSwitchEntityDescription,
)
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_ENTITY_ID, STATE_OFF, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from .conftest import (
MockEntityFixture,
assert_entity_counts,
enable_entity,
ids_from_device_description,
)
CAMERA_SWITCHES_BASIC = [
d
for d in CAMERA_SWITCHES
if d.name != "Detections: Face"
and d.name != "Detections: Package"
and d.name != "SSH Enabled"
]
CAMERA_SWITCHES_NO_EXTRA = [
d for d in CAMERA_SWITCHES_BASIC if d.name not in ("High FPS", "Privacy Mode")
]
@pytest.fixture(name="light")
async def light_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_light: Light
):
"""Fixture for a single light for testing the switch platform."""
# disable pydantic validation so mocking can happen
Light.__config__.validate_assignment = False
light_obj = mock_light.copy(deep=True)
light_obj._api = mock_entry.api
light_obj.name = "Test Light"
light_obj.is_ssh_enabled = False
light_obj.light_device_settings.is_indicator_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.lights = {
light_obj.id: light_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 2, 1)
yield light_obj
Light.__config__.validate_assignment = True
@pytest.fixture(name="camera")
async def camera_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = True
camera_obj.feature_flags.has_hdr = True
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT, VideoMode.HIGH_FPS]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = True
camera_obj.feature_flags.has_smart_detect = True
camera_obj.feature_flags.smart_detect_types = [
SmartDetectObjectType.PERSON,
SmartDetectObjectType.VEHICLE,
]
camera_obj.is_ssh_enabled = False
camera_obj.led_settings.is_enabled = False
camera_obj.hdr_mode = False
camera_obj.video_mode = VideoMode.DEFAULT
camera_obj.remove_privacy_zone()
camera_obj.speaker_settings.are_system_sounds_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
camera_obj.smart_detect_settings.object_types = []
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 12, 11)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_none")
async def camera_none_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = False
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 5, 4)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_privacy")
async def camera_privacy_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.NEVER
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.add_privacy_zone()
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 6, 5)
yield camera_obj
Camera.__config__.validate_assignment = True
async def test_switch_setup_light(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
light: Light,
):
"""Test switch entity setup for light devices."""
entity_registry = er.async_get(hass)
description = LIGHT_SWITCHES[1]
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, light, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = LIGHT_SWITCHES[0]
unique_id = f"{light.id}_{description.key}"
entity_id = f"switch.test_light_{description.name.lower().replace(' ', '_')}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_all(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera: Camera,
):
"""Test switch entity setup for camera devices (all enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES_BASIC:
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = CAMERA_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_none(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera_none: Camera,
):
"""Test switch entity setup for camera devices (no enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES_BASIC:
if description.ufp_required_field is not None:
continue
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera_none, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = CAMERA_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera_none.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_light_status(hass: HomeAssistant, light: Light):
"""Tests status light switch for lights."""
description = LIGHT_SWITCHES[1]
light.__fields__["set_status_light"] = Mock()
light.set_status_light = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, light, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_with(False)
async def test_switch_camera_ssh(
hass: HomeAssistant, camera: Camera, mock_entry: MockEntityFixture
):
"""Tests SSH switch for cameras."""
description = CAMERA_SWITCHES[0]
camera.__fields__["set_ssh"] = Mock()
camera.set_ssh = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_with(False)
@pytest.mark.parametrize("description", CAMERA_SWITCHES_NO_EXTRA)
async def test_switch_camera_simple(
hass: HomeAssistant, camera: Camera, description: ProtectSwitchEntityDescription
):
"""Tests all simple switches for cameras."""
assert description.ufp_set_method is not None
camera.__fields__[description.ufp_set_method] = Mock()
setattr(camera, description.ufp_set_method, AsyncMock())
set_method = getattr(camera, description.ufp_set_method)
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_with(False)
async def test_switch_camera_highfps(hass: HomeAssistant, camera: Camera):
"""Tests High FPS switch for cameras."""
description = CAMERA_SWITCHES[3]
camera.__fields__["set_video_mode"] = Mock()
camera.set_video_mode = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_once_with(VideoMode.HIGH_FPS)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_with(VideoMode.DEFAULT)
async def test_switch_camera_privacy(hass: HomeAssistant, camera: Camera):
"""Tests Privacy Mode switch for cameras."""
description = CAMERA_SWITCHES[4]
camera.__fields__["set_privacy"] = Mock()
camera.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_once_with(True, 0, RecordingMode.NEVER)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_with(
False, camera.mic_volume, camera.recording_settings.mode
)
async def test_switch_camera_privacy_already_on(
hass: HomeAssistant, camera_privacy: Camera
):
"""Tests Privacy Mode switch for cameras with privacy mode defaulted on."""
description = CAMERA_SWITCHES[4]
camera_privacy.__fields__["set_privacy"] = Mock()
camera_privacy.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(
Platform.SWITCH, camera_privacy, description
)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera_privacy.set_privacy.assert_called_once_with(False, 100, RecordingMode.ALWAYS)
|
import logging
import threading
import time
import traceback
from dataclasses import dataclass
from functools import reduce
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple, Union
from concurrent.futures.thread import ThreadPoolExecutor
from blspy import G1Element, PrivateKey
from chiapos import DiskProver
from mogua.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from mogua.types.blockchain_format.proof_of_space import ProofOfSpace
from mogua.types.blockchain_format.sized_bytes import bytes32
from mogua.util.config import load_config, save_config
from mogua.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
@dataclass
class PlotInfo:
prover: DiskProver
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
file_size: int
time_modified: float
def _get_filenames(directory: Path) -> List[Path]:
try:
if not directory.exists():
log.warning(f"Directory: {directory} does not exist.")
return []
except OSError as e:
log.warning(f"Error checking if directory {directory} exists: {e}")
return []
all_files: List[Path] = []
try:
for child in directory.iterdir():
if not child.is_dir():
# If it is a file ending in .plot, add it - work around MacOS ._ files
if child.suffix == ".plot" and not child.name.startswith("._"):
all_files.append(child)
else:
log.debug(f"Not checking subdirectory {child}, subdirectories not added by default")
except Exception as e:
log.warning(f"Error reading directory {directory} {e}")
return all_files
def get_plot_filenames(config: Dict) -> Dict[Path, List[Path]]:
# Returns a map from directory to a list of all plots in the directory
directory_names: List[str] = config["plot_directories"]
all_files: Dict[Path, List[Path]] = {}
for directory_name in directory_names:
directory = Path(directory_name).resolve()
all_files[directory] = _get_filenames(directory)
return all_files
def parse_plot_info(memo: bytes) -> Tuple[Union[G1Element, bytes32], G1Element, PrivateKey]:
# Parses the plot info bytes into keys
if len(memo) == (48 + 48 + 32):
# This is a public key memo
return (
G1Element.from_bytes(memo[:48]),
G1Element.from_bytes(memo[48:96]),
PrivateKey.from_bytes(memo[96:]),
)
elif len(memo) == (32 + 48 + 32):
# This is a pool_contract_puzzle_hash memo
return (
bytes32(memo[:32]),
G1Element.from_bytes(memo[32:80]),
PrivateKey.from_bytes(memo[80:]),
)
else:
raise ValueError(f"Invalid number of bytes {len(memo)}")
def stream_plot_info_pk(
pool_public_key: G1Element,
farmer_public_key: G1Element,
local_master_sk: PrivateKey,
):
# There are two ways to stream plot info: with a pool public key, or with a pool contract puzzle hash.
# This one streams the public key, into bytes
data = bytes(pool_public_key) + bytes(farmer_public_key) + bytes(local_master_sk)
assert len(data) == (48 + 48 + 32)
return data
def stream_plot_info_ph(
pool_contract_puzzle_hash: bytes32,
farmer_public_key: G1Element,
local_master_sk: PrivateKey,
):
# There are two ways to stream plot info: with a pool public key, or with a pool contract puzzle hash.
# This one streams the pool contract puzzle hash, into bytes
data = pool_contract_puzzle_hash + bytes(farmer_public_key) + bytes(local_master_sk)
assert len(data) == (32 + 48 + 32)
return data
def add_plot_directory(str_path: str, root_path: Path) -> Dict:
config = load_config(root_path, "config.yaml")
if str(Path(str_path).resolve()) not in config["harvester"]["plot_directories"]:
config["harvester"]["plot_directories"].append(str(Path(str_path).resolve()))
save_config(root_path, "config.yaml", config)
return config
def get_plot_directories(root_path: Path) -> List[str]:
config = load_config(root_path, "config.yaml")
return [str(Path(str_path).resolve()) for str_path in config["harvester"]["plot_directories"]]
def remove_plot_directory(str_path: str, root_path: Path) -> None:
config = load_config(root_path, "config.yaml")
str_paths: List[str] = config["harvester"]["plot_directories"]
# If path str matches exactly, remove
if str_path in str_paths:
str_paths.remove(str_path)
# If path matcehs full path, remove
new_paths = [Path(sp).resolve() for sp in str_paths]
if Path(str_path).resolve() in new_paths:
new_paths.remove(Path(str_path).resolve())
config["harvester"]["plot_directories"] = [str(np) for np in new_paths]
save_config(root_path, "config.yaml", config)
def load_plots(
provers: Dict[Path, PlotInfo],
failed_to_open_filenames: Dict[Path, int],
farmer_public_keys: Optional[List[G1Element]],
pool_public_keys: Optional[List[G1Element]],
match_str: Optional[str],
show_memo: bool,
root_path: Path,
open_no_key_filenames=False,
) -> Tuple[bool, Dict[Path, PlotInfo], Dict[Path, int], Set[Path]]:
start_time = time.time()
config_file = load_config(root_path, "config.yaml", "harvester")
changed = False
no_key_filenames: Set[Path] = set()
log.info(f'Searching directories {config_file['plot_directories']}')
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file)
all_filenames: List[Path] = []
for paths in plot_filenames.values():
all_filenames += paths
plot_ids: Set[bytes32] = set()
plot_ids_lock = threading.Lock()
if match_str is not None:
log.info(f'Only loading plots that contain "{match_str}" in the file or directory name')
def process_file(filename: Path) -> Tuple[int, Dict]:
new_provers: Dict[Path, PlotInfo] = {}
nonlocal changed
filename_str = str(filename)
if match_str is not None and match_str not in filename_str:
return 0, new_provers
if filename.exists():
if filename in failed_to_open_filenames and (time.time() - failed_to_open_filenames[filename]) < 1200:
# Try once every 20 minutes to open the file
return 0, new_provers
if filename in provers:
try:
stat_info = filename.stat()
except Exception as e:
log.error(f"Failed to open file {filename}. {e}")
return 0, new_provers
if stat_info.st_mtime == provers[filename].time_modified:
with plot_ids_lock:
if provers[filename].prover.get_id() in plot_ids:
log.warning(f"Have multiple copies of the plot {filename}, not adding it.")
return 0, new_provers
plot_ids.add(provers[filename].prover.get_id())
new_provers[filename] = provers[filename]
return stat_info.st_size, new_provers
try:
prover = DiskProver(str(filename))
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
stat_info = filename.stat()
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return 0, new_provers
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
# Only use plots that correct keys associated with them
if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys:
log.warning(f"Plot {filename} has a farmer public key that is not in the farmer's pk list.")
no_key_filenames.add(filename)
if not open_no_key_filenames:
return 0, new_provers
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
pool_contract_puzzle_hash = None
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_public_key = None
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
if (
pool_public_keys is not None
and pool_public_key is not None
and pool_public_key not in pool_public_keys
):
log.warning(f"Plot {filename} has a pool public key that is not in the farmer's pool pk list.")
no_key_filenames.add(filename)
if not open_no_key_filenames:
return 0, new_provers
stat_info = filename.stat()
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
with plot_ids_lock:
if prover.get_id() in plot_ids:
log.warning(f"Have multiple copies of the plot {filename}, not adding it.")
return 0, new_provers
plot_ids.add(prover.get_id())
new_provers[filename] = PlotInfo(
prover,
pool_public_key,
pool_contract_puzzle_hash,
plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
)
changed = True
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {filename}. {e} {tb}")
failed_to_open_filenames[filename] = int(time.time())
return 0, new_provers
log.info(f"Found plot {filename} of size {new_provers[filename].prover.get_size()}")
if show_memo:
plot_memo: bytes32
if pool_contract_puzzle_hash is None:
plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk)
else:
plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk)
plot_memo_str: str = plot_memo.hex()
log.info(f"Memo: {plot_memo_str}")
return stat_info.st_size, new_provers
return 0, new_provers
def reduce_function(x: Tuple[int, Dict], y: Tuple[int, Dict]) -> Tuple[int, Dict]:
(total_size1, new_provers1) = x
(total_size2, new_provers2) = y
return total_size1 + total_size2, {**new_provers1, **new_provers2}
with ThreadPoolExecutor() as executor:
initial_value: Tuple[int, Dict[Path, PlotInfo]] = (0, {})
total_size, new_provers = reduce(reduce_function, executor.map(process_file, all_filenames), initial_value)
log.info(
f"Loaded a total of {len(new_provers)} plots of size {total_size / (1024 ** 4)} TiB, in"
f" {time.time()-start_time} seconds"
)
return changed, new_provers, failed_to_open_filenames, no_key_filenames
def find_duplicate_plot_IDs(all_filenames=None) -> None:
if all_filenames is None:
all_filenames = []
plot_ids_set = set()
duplicate_plot_ids = set()
all_filenames_str: List[str] = []
for filename in all_filenames:
filename_str: str = str(filename)
all_filenames_str.append(filename_str)
filename_parts: List[str] = filename_str.split("-")
plot_id: str = filename_parts[-1]
# Skipped parsing and verifying plot ID for faster performance
# Skipped checking K size for faster performance
# Only checks end of filenames: 64 char plot ID + .plot = 69 characters
if len(plot_id) == 69:
if plot_id in plot_ids_set:
duplicate_plot_ids.add(plot_id)
else:
plot_ids_set.add(plot_id)
else:
log.warning(f"{filename} does not end with -[64 char plot ID].plot")
for plot_id in duplicate_plot_ids:
log_message: str = plot_id + " found in multiple files:\n"
duplicate_filenames: List[str] = [filename_str for filename_str in all_filenames_str if plot_id in filename_str]
for filename_str in duplicate_filenames:
log_message += "\t" + filename_str + "\n"
log.warning(f"{log_message}")
| import logging
import threading
import time
import traceback
from dataclasses import dataclass
from functools import reduce
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple, Union
from concurrent.futures.thread import ThreadPoolExecutor
from blspy import G1Element, PrivateKey
from chiapos import DiskProver
from mogua.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from mogua.types.blockchain_format.proof_of_space import ProofOfSpace
from mogua.types.blockchain_format.sized_bytes import bytes32
from mogua.util.config import load_config, save_config
from mogua.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
@dataclass
class PlotInfo:
prover: DiskProver
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
file_size: int
time_modified: float
def _get_filenames(directory: Path) -> List[Path]:
try:
if not directory.exists():
log.warning(f"Directory: {directory} does not exist.")
return []
except OSError as e:
log.warning(f"Error checking if directory {directory} exists: {e}")
return []
all_files: List[Path] = []
try:
for child in directory.iterdir():
if not child.is_dir():
# If it is a file ending in .plot, add it - work around MacOS ._ files
if child.suffix == ".plot" and not child.name.startswith("._"):
all_files.append(child)
else:
log.debug(f"Not checking subdirectory {child}, subdirectories not added by default")
except Exception as e:
log.warning(f"Error reading directory {directory} {e}")
return all_files
def get_plot_filenames(config: Dict) -> Dict[Path, List[Path]]:
# Returns a map from directory to a list of all plots in the directory
directory_names: List[str] = config["plot_directories"]
all_files: Dict[Path, List[Path]] = {}
for directory_name in directory_names:
directory = Path(directory_name).resolve()
all_files[directory] = _get_filenames(directory)
return all_files
def parse_plot_info(memo: bytes) -> Tuple[Union[G1Element, bytes32], G1Element, PrivateKey]:
# Parses the plot info bytes into keys
if len(memo) == (48 + 48 + 32):
# This is a public key memo
return (
G1Element.from_bytes(memo[:48]),
G1Element.from_bytes(memo[48:96]),
PrivateKey.from_bytes(memo[96:]),
)
elif len(memo) == (32 + 48 + 32):
# This is a pool_contract_puzzle_hash memo
return (
bytes32(memo[:32]),
G1Element.from_bytes(memo[32:80]),
PrivateKey.from_bytes(memo[80:]),
)
else:
raise ValueError(f"Invalid number of bytes {len(memo)}")
def stream_plot_info_pk(
pool_public_key: G1Element,
farmer_public_key: G1Element,
local_master_sk: PrivateKey,
):
# There are two ways to stream plot info: with a pool public key, or with a pool contract puzzle hash.
# This one streams the public key, into bytes
data = bytes(pool_public_key) + bytes(farmer_public_key) + bytes(local_master_sk)
assert len(data) == (48 + 48 + 32)
return data
def stream_plot_info_ph(
pool_contract_puzzle_hash: bytes32,
farmer_public_key: G1Element,
local_master_sk: PrivateKey,
):
# There are two ways to stream plot info: with a pool public key, or with a pool contract puzzle hash.
# This one streams the pool contract puzzle hash, into bytes
data = pool_contract_puzzle_hash + bytes(farmer_public_key) + bytes(local_master_sk)
assert len(data) == (32 + 48 + 32)
return data
def add_plot_directory(str_path: str, root_path: Path) -> Dict:
config = load_config(root_path, "config.yaml")
if str(Path(str_path).resolve()) not in config["harvester"]["plot_directories"]:
config["harvester"]["plot_directories"].append(str(Path(str_path).resolve()))
save_config(root_path, "config.yaml", config)
return config
def get_plot_directories(root_path: Path) -> List[str]:
config = load_config(root_path, "config.yaml")
return [str(Path(str_path).resolve()) for str_path in config["harvester"]["plot_directories"]]
def remove_plot_directory(str_path: str, root_path: Path) -> None:
config = load_config(root_path, "config.yaml")
str_paths: List[str] = config["harvester"]["plot_directories"]
# If path str matches exactly, remove
if str_path in str_paths:
str_paths.remove(str_path)
# If path matcehs full path, remove
new_paths = [Path(sp).resolve() for sp in str_paths]
if Path(str_path).resolve() in new_paths:
new_paths.remove(Path(str_path).resolve())
config["harvester"]["plot_directories"] = [str(np) for np in new_paths]
save_config(root_path, "config.yaml", config)
def load_plots(
provers: Dict[Path, PlotInfo],
failed_to_open_filenames: Dict[Path, int],
farmer_public_keys: Optional[List[G1Element]],
pool_public_keys: Optional[List[G1Element]],
match_str: Optional[str],
show_memo: bool,
root_path: Path,
open_no_key_filenames=False,
) -> Tuple[bool, Dict[Path, PlotInfo], Dict[Path, int], Set[Path]]:
start_time = time.time()
config_file = load_config(root_path, "config.yaml", "harvester")
changed = False
no_key_filenames: Set[Path] = set()
log.info(f'Searching directories {config_file["plot_directories"]}')
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file)
all_filenames: List[Path] = []
for paths in plot_filenames.values():
all_filenames += paths
plot_ids: Set[bytes32] = set()
plot_ids_lock = threading.Lock()
if match_str is not None:
log.info(f'Only loading plots that contain "{match_str}" in the file or directory name')
def process_file(filename: Path) -> Tuple[int, Dict]:
new_provers: Dict[Path, PlotInfo] = {}
nonlocal changed
filename_str = str(filename)
if match_str is not None and match_str not in filename_str:
return 0, new_provers
if filename.exists():
if filename in failed_to_open_filenames and (time.time() - failed_to_open_filenames[filename]) < 1200:
# Try once every 20 minutes to open the file
return 0, new_provers
if filename in provers:
try:
stat_info = filename.stat()
except Exception as e:
log.error(f"Failed to open file {filename}. {e}")
return 0, new_provers
if stat_info.st_mtime == provers[filename].time_modified:
with plot_ids_lock:
if provers[filename].prover.get_id() in plot_ids:
log.warning(f"Have multiple copies of the plot {filename}, not adding it.")
return 0, new_provers
plot_ids.add(provers[filename].prover.get_id())
new_provers[filename] = provers[filename]
return stat_info.st_size, new_provers
try:
prover = DiskProver(str(filename))
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
stat_info = filename.stat()
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return 0, new_provers
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
# Only use plots that correct keys associated with them
if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys:
log.warning(f"Plot {filename} has a farmer public key that is not in the farmer's pk list.")
no_key_filenames.add(filename)
if not open_no_key_filenames:
return 0, new_provers
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
pool_contract_puzzle_hash = None
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_public_key = None
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
if (
pool_public_keys is not None
and pool_public_key is not None
and pool_public_key not in pool_public_keys
):
log.warning(f"Plot {filename} has a pool public key that is not in the farmer's pool pk list.")
no_key_filenames.add(filename)
if not open_no_key_filenames:
return 0, new_provers
stat_info = filename.stat()
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
with plot_ids_lock:
if prover.get_id() in plot_ids:
log.warning(f"Have multiple copies of the plot {filename}, not adding it.")
return 0, new_provers
plot_ids.add(prover.get_id())
new_provers[filename] = PlotInfo(
prover,
pool_public_key,
pool_contract_puzzle_hash,
plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
)
changed = True
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {filename}. {e} {tb}")
failed_to_open_filenames[filename] = int(time.time())
return 0, new_provers
log.info(f"Found plot {filename} of size {new_provers[filename].prover.get_size()}")
if show_memo:
plot_memo: bytes32
if pool_contract_puzzle_hash is None:
plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk)
else:
plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk)
plot_memo_str: str = plot_memo.hex()
log.info(f"Memo: {plot_memo_str}")
return stat_info.st_size, new_provers
return 0, new_provers
def reduce_function(x: Tuple[int, Dict], y: Tuple[int, Dict]) -> Tuple[int, Dict]:
(total_size1, new_provers1) = x
(total_size2, new_provers2) = y
return total_size1 + total_size2, {**new_provers1, **new_provers2}
with ThreadPoolExecutor() as executor:
initial_value: Tuple[int, Dict[Path, PlotInfo]] = (0, {})
total_size, new_provers = reduce(reduce_function, executor.map(process_file, all_filenames), initial_value)
log.info(
f"Loaded a total of {len(new_provers)} plots of size {total_size / (1024 ** 4)} TiB, in"
f" {time.time()-start_time} seconds"
)
return changed, new_provers, failed_to_open_filenames, no_key_filenames
def find_duplicate_plot_IDs(all_filenames=None) -> None:
if all_filenames is None:
all_filenames = []
plot_ids_set = set()
duplicate_plot_ids = set()
all_filenames_str: List[str] = []
for filename in all_filenames:
filename_str: str = str(filename)
all_filenames_str.append(filename_str)
filename_parts: List[str] = filename_str.split("-")
plot_id: str = filename_parts[-1]
# Skipped parsing and verifying plot ID for faster performance
# Skipped checking K size for faster performance
# Only checks end of filenames: 64 char plot ID + .plot = 69 characters
if len(plot_id) == 69:
if plot_id in plot_ids_set:
duplicate_plot_ids.add(plot_id)
else:
plot_ids_set.add(plot_id)
else:
log.warning(f"{filename} does not end with -[64 char plot ID].plot")
for plot_id in duplicate_plot_ids:
log_message: str = plot_id + " found in multiple files:\n"
duplicate_filenames: List[str] = [filename_str for filename_str in all_filenames_str if plot_id in filename_str]
for filename_str in duplicate_filenames:
log_message += "\t" + filename_str + "\n"
log.warning(f"{log_message}")
|
"""Extract data from transcription format."""
import sys
import datetime
import io
import pathlib
import tabulate
import pandas as pd
from . import config
substitution_keys = ("*", "+", "^", "&", "$", "%")
def process_source(game, value):
title, d = (x.strip() for x in value.split(",", 1))
d = datetime.datetime.strptime(d, "%B %d, %Y")
d = d.strftime("%Y-%m-%d")
game["source"] = {"title": title, "date": d}
return game
def process_status(game, status):
if "," in status:
status, reason = (x.strip() for x in status.split(","))
else:
status, reason = status.strip(), ""
if status in ["postponed", "completed early", "abandoned"]:
game["status"]["code"] = status
else:
print(f"Unknown status '{status}'")
sys.exit(1)
if reason:
game["status"]["reason"] = reason
return game
def process_linescore(game: dict, value: str):
try:
club, score = (x.strip() for x in value.split(":"))
except ValueError:
print(f"ERROR: Ill-formed linescore string '{value}'")
return
for team in game["teams"]:
if club == team["name"]:
break
else:
print(f"Unknown team '{club}' in linescore")
return
byinning, total = map(lambda x: x.strip(), score.split("-"))
try:
int(total)
except ValueError:
print(f"ERROR: Ill-formed linescore string '{value}'")
team["score"] = total
team["innings"] = [x.lower() for x in byinning.split(" ")]
return game
def parse_name(text: str) -> dict:
"""Parse out a personal name in form 'last, first'. Return as a dict."""
if "," in text:
return dict(zip(['last', 'first'],
(x.strip() for x in text.split(","))))
else:
return {'last': text.strip()}
def parse_umpires(game: dict, value: str):
for name in value.split(";"):
game["umpires"].append(parse_name(name))
def parse_date(game: dict, value: str):
game['data']['date'] = value
game['data']['season'] = value[:4]
def parse_number(game: dict, value: str):
game['data']['number'] = value
def parse_league(game: dict, value: str):
if "League" not in value and "Association" not in value:
value = value + " League"
game['data']['league'] = value
for team in game['teams']:
team['league'] = value
def parse_status(game: dict, value: str):
if "," in value:
game['data']['status_code'], game['data']['status_reason'] = (
(x.strip() for x in value.split(","))
)
else:
game['data']['status_code'] = value
def parse_team(game: dict, align: int, value: str):
game['teams'][align]['name'] = value
def parse_duration(game: dict, value: str):
game['data']['duration'] = value
def parse_player_table(team: dict, data):
while True:
try:
k, v = next(data)
except ValueError:
print(f"WARNING: Ill-formed player line after '{k}'")
if k == "TOTALS":
break
name, pos = (x.strip() for x in k.split("@"))
name = name.split(")")[-1]
if name.startswith(substitution_keys):
name = name[1:]
team['players'].append(dict(
**parse_name(name),
**{'pos': pos}
))
def extract_pairs(text: str):
for line in (x.strip() for x in text.split("\n")):
if not line:
continue
yield tuple(x.strip() for x in line.split(":", 1))
def parse_game(text: str) -> dict:
game = {
"data": {'date': None, 'number': None, 'status_code': "final"},
"teams": [
{'alignment': "away", 'name': None, 'league': None,
'players': []},
{'alignment': "home", 'name': None, 'league': None,
'players': []}
],
"umpires": []
}
dispatch = {
'date': parse_date,
'number': parse_number,
'league': parse_league,
'status': parse_status,
'T': parse_duration,
'U': parse_umpires,
'away': lambda g, val: parse_team(g, 0, val),
'home': lambda g, val: parse_team(g, 1, val),
'line': process_linescore
}
data = extract_pairs(text)
try:
while True:
k, v = next(data)
for team in game['teams']:
if k == team['name']:
parse_player_table(team, data)
try:
fn = dispatch[k]
except KeyError:
continue
fn(game, v)
except StopIteration:
pass
print(f"{game["data"]["date"]}#{game["data"]["number"]} "
f"{game["teams"][0]["name"]} at {game["teams"][1]["name"]}")
return game
def separate_games(fn: pathlib.Path):
"""Iterate over the games in 'fn' and separate the text of each."""
game = io.StringIO()
with fn.open() as f:
for line in f:
if line.startswith("---"):
yield game.getvalue()
game = io.StringIO()
else:
game.write(line)
value = game.getvalue().strip()
if value:
yield value
def process_files(inpath: pathlib.Path):
fnlist = [fn for fn in sorted(inpath.glob("*.txt"))
if fn.name.lower() not in ["readme.txt", "notes.txt"]]
if not fnlist:
print(f"No files found at '{inpath}'")
sys.exit(1)
return [
parse_game(game)
for fn in fnlist
for game in separate_games(fn)
]
def index_games(games: list, source: str) -> pd.DataFrame:
df = pd.DataFrame(
[{'game.id': i,
'source': source,
'key': None,
'season': game['data']['season'],
'date': game['data']['date'],
'number': game['data']['number'],
'league': game['data']['league'],
'status_code': game['data']['status_code'],
'status_reason': game['data'].get('status_reason', None)}
for (i, game) in enumerate(games)]
)
return df
def index_teams(games: list) -> pd.DataFrame:
df = pd.DataFrame([
{'game.id': i,
'team.name': team['name'],
'team.league': team['league'],
'team.align': team['alignment'][0],
'team.score': team.get('score', None)}
for (i, game) in enumerate(games)
for team in game['teams']
])
return df
def index_players(games: list) -> pd.DataFrame:
return pd.DataFrame([
{'game.id': i,
'team.name': team['name'],
'team.league': team['league'],
'person.seq': f"{prefix}{j:02d}",
'person.name.last': player['last'],
'person.name.first': player.get('first', None),
'pos': player['pos']}
for (i, game) in enumerate(games)
for (prefix, team) in zip(("a", "b"), game['teams'])
for (j, player) in enumerate(team['players'])
])
def identify_teams(df: pd.DataFrame, year: int) -> pd.DataFrame:
index = pd.read_csv(f"data/support/{year}/teams.csv")
df = df.merge(index, how='left', on=['team.league', 'team.name'])
unmatched = (
df.query("`team.key`.isnull()")
[['team.league', 'team.name']]
.drop_duplicates()
.sort_values(['team.league', 'team.name'])
)
if not unmatched.empty:
print("The following teams were not matched to a key:")
print(tabulate.tabulate(unmatched, headers='keys', showindex=False))
sys.exit(1)
return df
def identify_games(df: pd.DataFrame, teams: pd.DataFrame) -> pd.DataFrame:
return (
df
.merge(teams.query("`team.align` == 'a'")
[['game.id',
'team.name', 'team.align', 'team.score', 'team.key']]
.rename(columns=lambda x: x.replace('team.', 'team1.')),
how='left', on='game.id')
.merge(teams.query("`team.align` == 'h'")
[['game.id',
'team.name', 'team.align', 'team.score', 'team.key']]
.rename(columns=lambda x: x.replace('team.', 'team2.')),
how='left', on='game.id')
.assign(**{
'key': lambda x: (
x['date'].str.replace("-", "") + "-" +
x[['team1.key', 'team2.key']].fillna("").min(axis='columns') +
"-" +
x[['team1.key', 'team2.key']].fillna("").max(axis='columns') +
"-" +
x['number']
)
})
)
def update_game_index(path: pathlib.Path,
source: str, league: str,
df: pd.DataFrame) -> pd.DataFrame:
"""Update the game index for league with data from source."""
league = league.replace(" ", "").replace("-", "")
(path/league).mkdir(exist_ok=True, parents=True)
dfcols = ['key', 'league', 'date', 'number', 'source',
'team1.name', 'team1.align', 'team1.score',
'team2.name', 'team2.align', 'team2.score',
'status_code', 'status_reason']
try:
index = pd.read_csv(path/league/"games.csv", usecols=dfcols).fillna("")
except FileNotFoundError:
index = pd.DataFrame(columns=dfcols)
index = (
pd.concat([index.query(f"source != '{source}'"), df],
ignore_index=True, sort=False)
.reindex(columns=dfcols)
.fillna("")
.sort_values(['league', 'key'])
)
index.to_csv(path/league/"games.csv", index=False, float_format='%d')
return index
def update_player_index(path: pathlib.Path,
source: str, league: str,
df: pd.DataFrame) -> pd.DataFrame:
"""Update the player index for league with data from source."""
league = league.replace(" ", "").replace("-", "")
(path/league).mkdir(exist_ok=True, parents=True)
dfcols = ['team.league', 'team.key',
'team.name', 'source',
'key',
'gloss.name.last', 'gloss.name.first',
'person.name.last', 'person.name.first',
'person.seq', 'pos']
try:
index = pd.read_csv(path/league/"players.csv", usecols=dfcols).fillna("")
except FileNotFoundError:
index = pd.DataFrame(columns=dfcols)
df = (
df.fillna("")
.merge(index[['source', 'team.name', 'key',
'person.name.last', 'person.name.first',
'person.seq',
'gloss.name.last', 'gloss.name.first']],
how='left',
on=['source', 'team.name', 'key',
'person.name.last', 'person.name.first',
'person.seq'])
)
index = (
pd.concat([index.query(f"source != '{source}'"), df],
ignore_index=True, sort=False)
.reindex(columns=dfcols)
.fillna("")
.assign(
sortlast=lambda x: (
x['gloss.name.last'].replace("", pd.NA)
.fillna(x['person.name.last']).fillna("")
),
sortfirst=lambda x: (
x['gloss.name.first'].replace("", pd.NA)
.fillna(x['person.name.first']).fillna(""))
)
.sort_values(['team.name', 'sortlast', 'sortfirst', 'key', 'source'])
.drop(columns=['sortlast', 'sortfirst'])
)
index.to_csv(path/league/"players.csv", index=False, float_format='%d')
return index
def main(source: str):
try:
year, paper = source.split("-")
except ValueError:
print(f"Invalid source name '{source}'")
sys.exit(1)
inpath = config.data_path/"transcript"/year/source
outpath = pathlib.Path(f"data/index/{year}")
outpath.mkdir(exist_ok=True, parents=True)
data = process_files(inpath)
games_teams = index_teams(data).pipe(identify_teams, year)
# games_teams.to_csv("games_teams.csv", index=False, float_format='%d')
games = index_games(data, source).pipe(identify_games, games_teams)
for (league, group) in games.groupby('league'):
print(f"Writing {len(group):5d} games for {league}")
update_game_index(outpath, source, league, group)
players = (
index_players(data).pipe(identify_teams, year)
.merge(games[['game.id', 'source', 'key']], how='left', on='game.id')
.reindex(columns=['team.league', 'team.key', 'team.name',
'source', 'key',
'person.name.last', 'person.name.first',
'person.seq', 'pos'])
.sort_values(['team.league', 'team.name',
'person.name.last', 'key', 'source'])
)
for (league, group) in players.groupby('team.league'):
print(f"Writing {len(group):5d} players for {league}")
update_player_index(outpath, source, league, group)
| """Extract data from transcription format."""
import sys
import datetime
import io
import pathlib
import tabulate
import pandas as pd
from . import config
substitution_keys = ("*", "+", "^", "&", "$", "%")
def process_source(game, value):
title, d = (x.strip() for x in value.split(",", 1))
d = datetime.datetime.strptime(d, "%B %d, %Y")
d = d.strftime("%Y-%m-%d")
game["source"] = {"title": title, "date": d}
return game
def process_status(game, status):
if "," in status:
status, reason = (x.strip() for x in status.split(","))
else:
status, reason = status.strip(), ""
if status in ["postponed", "completed early", "abandoned"]:
game["status"]["code"] = status
else:
print(f"Unknown status '{status}'")
sys.exit(1)
if reason:
game["status"]["reason"] = reason
return game
def process_linescore(game: dict, value: str):
try:
club, score = (x.strip() for x in value.split(":"))
except ValueError:
print(f"ERROR: Ill-formed linescore string '{value}'")
return
for team in game["teams"]:
if club == team["name"]:
break
else:
print(f"Unknown team '{club}' in linescore")
return
byinning, total = map(lambda x: x.strip(), score.split("-"))
try:
int(total)
except ValueError:
print(f"ERROR: Ill-formed linescore string '{value}'")
team["score"] = total
team["innings"] = [x.lower() for x in byinning.split(" ")]
return game
def parse_name(text: str) -> dict:
"""Parse out a personal name in form 'last, first'. Return as a dict."""
if "," in text:
return dict(zip(['last', 'first'],
(x.strip() for x in text.split(","))))
else:
return {'last': text.strip()}
def parse_umpires(game: dict, value: str):
for name in value.split(";"):
game["umpires"].append(parse_name(name))
def parse_date(game: dict, value: str):
game['data']['date'] = value
game['data']['season'] = value[:4]
def parse_number(game: dict, value: str):
game['data']['number'] = value
def parse_league(game: dict, value: str):
if "League" not in value and "Association" not in value:
value = value + " League"
game['data']['league'] = value
for team in game['teams']:
team['league'] = value
def parse_status(game: dict, value: str):
if "," in value:
game['data']['status_code'], game['data']['status_reason'] = (
(x.strip() for x in value.split(","))
)
else:
game['data']['status_code'] = value
def parse_team(game: dict, align: int, value: str):
game['teams'][align]['name'] = value
def parse_duration(game: dict, value: str):
game['data']['duration'] = value
def parse_player_table(team: dict, data):
while True:
try:
k, v = next(data)
except ValueError:
print(f"WARNING: Ill-formed player line after '{k}'")
if k == "TOTALS":
break
name, pos = (x.strip() for x in k.split("@"))
name = name.split(")")[-1]
if name.startswith(substitution_keys):
name = name[1:]
team['players'].append(dict(
**parse_name(name),
**{'pos': pos}
))
def extract_pairs(text: str):
for line in (x.strip() for x in text.split("\n")):
if not line:
continue
yield tuple(x.strip() for x in line.split(":", 1))
def parse_game(text: str) -> dict:
game = {
"data": {'date': None, 'number': None, 'status_code': "final"},
"teams": [
{'alignment': "away", 'name': None, 'league': None,
'players': []},
{'alignment': "home", 'name': None, 'league': None,
'players': []}
],
"umpires": []
}
dispatch = {
'date': parse_date,
'number': parse_number,
'league': parse_league,
'status': parse_status,
'T': parse_duration,
'U': parse_umpires,
'away': lambda g, val: parse_team(g, 0, val),
'home': lambda g, val: parse_team(g, 1, val),
'line': process_linescore
}
data = extract_pairs(text)
try:
while True:
k, v = next(data)
for team in game['teams']:
if k == team['name']:
parse_player_table(team, data)
try:
fn = dispatch[k]
except KeyError:
continue
fn(game, v)
except StopIteration:
pass
print(f"{game['data']['date']}#{game['data']['number']} "
f"{game['teams'][0]['name']} at {game['teams'][1]['name']}")
return game
def separate_games(fn: pathlib.Path):
"""Iterate over the games in 'fn' and separate the text of each."""
game = io.StringIO()
with fn.open() as f:
for line in f:
if line.startswith("---"):
yield game.getvalue()
game = io.StringIO()
else:
game.write(line)
value = game.getvalue().strip()
if value:
yield value
def process_files(inpath: pathlib.Path):
fnlist = [fn for fn in sorted(inpath.glob("*.txt"))
if fn.name.lower() not in ["readme.txt", "notes.txt"]]
if not fnlist:
print(f"No files found at '{inpath}'")
sys.exit(1)
return [
parse_game(game)
for fn in fnlist
for game in separate_games(fn)
]
def index_games(games: list, source: str) -> pd.DataFrame:
df = pd.DataFrame(
[{'game.id': i,
'source': source,
'key': None,
'season': game['data']['season'],
'date': game['data']['date'],
'number': game['data']['number'],
'league': game['data']['league'],
'status_code': game['data']['status_code'],
'status_reason': game['data'].get('status_reason', None)}
for (i, game) in enumerate(games)]
)
return df
def index_teams(games: list) -> pd.DataFrame:
df = pd.DataFrame([
{'game.id': i,
'team.name': team['name'],
'team.league': team['league'],
'team.align': team['alignment'][0],
'team.score': team.get('score', None)}
for (i, game) in enumerate(games)
for team in game['teams']
])
return df
def index_players(games: list) -> pd.DataFrame:
return pd.DataFrame([
{'game.id': i,
'team.name': team['name'],
'team.league': team['league'],
'person.seq': f"{prefix}{j:02d}",
'person.name.last': player['last'],
'person.name.first': player.get('first', None),
'pos': player['pos']}
for (i, game) in enumerate(games)
for (prefix, team) in zip(("a", "b"), game['teams'])
for (j, player) in enumerate(team['players'])
])
def identify_teams(df: pd.DataFrame, year: int) -> pd.DataFrame:
index = pd.read_csv(f"data/support/{year}/teams.csv")
df = df.merge(index, how='left', on=['team.league', 'team.name'])
unmatched = (
df.query("`team.key`.isnull()")
[['team.league', 'team.name']]
.drop_duplicates()
.sort_values(['team.league', 'team.name'])
)
if not unmatched.empty:
print("The following teams were not matched to a key:")
print(tabulate.tabulate(unmatched, headers='keys', showindex=False))
sys.exit(1)
return df
def identify_games(df: pd.DataFrame, teams: pd.DataFrame) -> pd.DataFrame:
return (
df
.merge(teams.query("`team.align` == 'a'")
[['game.id',
'team.name', 'team.align', 'team.score', 'team.key']]
.rename(columns=lambda x: x.replace('team.', 'team1.')),
how='left', on='game.id')
.merge(teams.query("`team.align` == 'h'")
[['game.id',
'team.name', 'team.align', 'team.score', 'team.key']]
.rename(columns=lambda x: x.replace('team.', 'team2.')),
how='left', on='game.id')
.assign(**{
'key': lambda x: (
x['date'].str.replace("-", "") + "-" +
x[['team1.key', 'team2.key']].fillna("").min(axis='columns') +
"-" +
x[['team1.key', 'team2.key']].fillna("").max(axis='columns') +
"-" +
x['number']
)
})
)
def update_game_index(path: pathlib.Path,
source: str, league: str,
df: pd.DataFrame) -> pd.DataFrame:
"""Update the game index for league with data from source."""
league = league.replace(" ", "").replace("-", "")
(path/league).mkdir(exist_ok=True, parents=True)
dfcols = ['key', 'league', 'date', 'number', 'source',
'team1.name', 'team1.align', 'team1.score',
'team2.name', 'team2.align', 'team2.score',
'status_code', 'status_reason']
try:
index = pd.read_csv(path/league/"games.csv", usecols=dfcols).fillna("")
except FileNotFoundError:
index = pd.DataFrame(columns=dfcols)
index = (
pd.concat([index.query(f"source != '{source}'"), df],
ignore_index=True, sort=False)
.reindex(columns=dfcols)
.fillna("")
.sort_values(['league', 'key'])
)
index.to_csv(path/league/"games.csv", index=False, float_format='%d')
return index
def update_player_index(path: pathlib.Path,
source: str, league: str,
df: pd.DataFrame) -> pd.DataFrame:
"""Update the player index for league with data from source."""
league = league.replace(" ", "").replace("-", "")
(path/league).mkdir(exist_ok=True, parents=True)
dfcols = ['team.league', 'team.key',
'team.name', 'source',
'key',
'gloss.name.last', 'gloss.name.first',
'person.name.last', 'person.name.first',
'person.seq', 'pos']
try:
index = pd.read_csv(path/league/"players.csv", usecols=dfcols).fillna("")
except FileNotFoundError:
index = pd.DataFrame(columns=dfcols)
df = (
df.fillna("")
.merge(index[['source', 'team.name', 'key',
'person.name.last', 'person.name.first',
'person.seq',
'gloss.name.last', 'gloss.name.first']],
how='left',
on=['source', 'team.name', 'key',
'person.name.last', 'person.name.first',
'person.seq'])
)
index = (
pd.concat([index.query(f"source != '{source}'"), df],
ignore_index=True, sort=False)
.reindex(columns=dfcols)
.fillna("")
.assign(
sortlast=lambda x: (
x['gloss.name.last'].replace("", pd.NA)
.fillna(x['person.name.last']).fillna("")
),
sortfirst=lambda x: (
x['gloss.name.first'].replace("", pd.NA)
.fillna(x['person.name.first']).fillna(""))
)
.sort_values(['team.name', 'sortlast', 'sortfirst', 'key', 'source'])
.drop(columns=['sortlast', 'sortfirst'])
)
index.to_csv(path/league/"players.csv", index=False, float_format='%d')
return index
def main(source: str):
try:
year, paper = source.split("-")
except ValueError:
print(f"Invalid source name '{source}'")
sys.exit(1)
inpath = config.data_path/"transcript"/year/source
outpath = pathlib.Path(f"data/index/{year}")
outpath.mkdir(exist_ok=True, parents=True)
data = process_files(inpath)
games_teams = index_teams(data).pipe(identify_teams, year)
# games_teams.to_csv("games_teams.csv", index=False, float_format='%d')
games = index_games(data, source).pipe(identify_games, games_teams)
for (league, group) in games.groupby('league'):
print(f"Writing {len(group):5d} games for {league}")
update_game_index(outpath, source, league, group)
players = (
index_players(data).pipe(identify_teams, year)
.merge(games[['game.id', 'source', 'key']], how='left', on='game.id')
.reindex(columns=['team.league', 'team.key', 'team.name',
'source', 'key',
'person.name.last', 'person.name.first',
'person.seq', 'pos'])
.sort_values(['team.league', 'team.name',
'person.name.last', 'key', 'source'])
)
for (league, group) in players.groupby('team.league'):
print(f"Writing {len(group):5d} players for {league}")
update_player_index(outpath, source, league, group)
|
import time
from datetime import datetime, timedelta
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
from flask.views import MethodView
from config.setting import BOT_TOKEN
from models import User
hitcon_zeroday_base_url = "https://zeroday.hitcon.org"
hitcon_zeroday_all_url = "https://zeroday.hitcon.org/vulnerability/all"
hitcon_zeroday_disclosed_url = "https://zeroday.hitcon.org/vulnerability/disclosed/"
def get_last_page_num(hitcon_zeroday_url):
r = requests.get(hitcon_zeroday_url)
soup = BeautifulSoup(r.text, 'html.parser')
try:
return int(soup.find("span", {"class": "last-page"}).text)
except Exception:
return 0
def get_report_info(report_url):
r = requests.get(report_url)
if r.status_code != 200:
return {}
soup = BeautifulSoup(r.text, 'html.parser')
last_update_str = soup.find("div", {"class": "status-descr"}).text
last_update_date = datetime.strptime(last_update_str, r"Last Update : %Y/%m/%d").date()
# Get utf+8 datetime
now_utc_plus_8 = datetime.utcnow() + timedelta(hours=8)
# Get only yesterday's data
now_date = datetime.strptime((now_utc_plus_8 - timedelta(days=1)).strftime("%Y/%m/%d"), "%Y/%m/%d").date()
if now_date != last_update_date:
return {}
data = {
"status": soup.find("div", {"class": "status-label"}).text
}
for li in soup.find("div", {"class": "info"}).findAll("li"):
if "風險" in li.text:
data["risk"] = li.text.split(":")[-1]
elif "類型" in li.text:
data["type"] = li.text.split(":")[-1]
return data
def search_page(hitcon_zeroday_base_url, hitcon_zeroday_category_url):
last_page_num = get_last_page_num(hitcon_zeroday_category_url)
msg_list = []
msg_list_len = len(msg_list)
for page_num in range(1, last_page_num+1):
page_url = urljoin(hitcon_zeroday_category_url, f"page/{page_num}")
r = requests.get(page_url)
if r.status_code != 200:
break
soup = BeautifulSoup(r.text, 'html.parser')
# parse all blocks
for li in soup.findAll("li", {"class": "strip"}):
a = li.find("h4").find("a")
report_url = urljoin(hitcon_zeroday_base_url, a["href"])
title = a.text
_data = get_report_info(report_url)
if _data:
msg_list.append(f"[[{_data["status"]} - {_data["risk"]}]] {_data["type"]}")
msg_list.append(f"[{title}]({report_url})")
# break if not append new data
if len(msg_list) == msg_list_len:
break
msg_list_len = len(msg_list)
return msg_list
def send_message(chat_id, msg):
api_url = f"https://api.telegram.org/bot{BOT_TOKEN}/sendMessage?chat_id={chat_id}&parse_mode=Markdown&disable_web_page_preview=1&text={msg}"
requests.get(api_url)
class App(MethodView):
def get(self):
msg_list = search_page(hitcon_zeroday_base_url, hitcon_zeroday_all_url)
report_msg = "%0A".join(msg_list)
for user in User.get_all():
print(user, report_msg)
send_message(user["chat_id"], report_msg)
return "OK"
| import time
from datetime import datetime, timedelta
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
from flask.views import MethodView
from config.setting import BOT_TOKEN
from models import User
hitcon_zeroday_base_url = "https://zeroday.hitcon.org"
hitcon_zeroday_all_url = "https://zeroday.hitcon.org/vulnerability/all"
hitcon_zeroday_disclosed_url = "https://zeroday.hitcon.org/vulnerability/disclosed/"
def get_last_page_num(hitcon_zeroday_url):
r = requests.get(hitcon_zeroday_url)
soup = BeautifulSoup(r.text, 'html.parser')
try:
return int(soup.find("span", {"class": "last-page"}).text)
except Exception:
return 0
def get_report_info(report_url):
r = requests.get(report_url)
if r.status_code != 200:
return {}
soup = BeautifulSoup(r.text, 'html.parser')
last_update_str = soup.find("div", {"class": "status-descr"}).text
last_update_date = datetime.strptime(last_update_str, r"Last Update : %Y/%m/%d").date()
# Get utf+8 datetime
now_utc_plus_8 = datetime.utcnow() + timedelta(hours=8)
# Get only yesterday's data
now_date = datetime.strptime((now_utc_plus_8 - timedelta(days=1)).strftime("%Y/%m/%d"), "%Y/%m/%d").date()
if now_date != last_update_date:
return {}
data = {
"status": soup.find("div", {"class": "status-label"}).text
}
for li in soup.find("div", {"class": "info"}).findAll("li"):
if "風險" in li.text:
data["risk"] = li.text.split(":")[-1]
elif "類型" in li.text:
data["type"] = li.text.split(":")[-1]
return data
def search_page(hitcon_zeroday_base_url, hitcon_zeroday_category_url):
last_page_num = get_last_page_num(hitcon_zeroday_category_url)
msg_list = []
msg_list_len = len(msg_list)
for page_num in range(1, last_page_num+1):
page_url = urljoin(hitcon_zeroday_category_url, f"page/{page_num}")
r = requests.get(page_url)
if r.status_code != 200:
break
soup = BeautifulSoup(r.text, 'html.parser')
# parse all blocks
for li in soup.findAll("li", {"class": "strip"}):
a = li.find("h4").find("a")
report_url = urljoin(hitcon_zeroday_base_url, a["href"])
title = a.text
_data = get_report_info(report_url)
if _data:
msg_list.append(f"[[{_data['status']} - {_data['risk']}]] {_data['type']}")
msg_list.append(f"[{title}]({report_url})")
# break if not append new data
if len(msg_list) == msg_list_len:
break
msg_list_len = len(msg_list)
return msg_list
def send_message(chat_id, msg):
api_url = f"https://api.telegram.org/bot{BOT_TOKEN}/sendMessage?chat_id={chat_id}&parse_mode=Markdown&disable_web_page_preview=1&text={msg}"
requests.get(api_url)
class App(MethodView):
def get(self):
msg_list = search_page(hitcon_zeroday_base_url, hitcon_zeroday_all_url)
report_msg = "%0A".join(msg_list)
for user in User.get_all():
print(user, report_msg)
send_message(user["chat_id"], report_msg)
return "OK"
|
import abc
from collections import Counter
from contextlib import suppress
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from typing import Union
import discord
from discord.ext import commands, tasks
from discord.ext.events.utils import fetch_recent_audit_log_entry
from discord.ext.menus.views import ViewMenuPages
from discord.ui import button
from helpers import checks, constants, time
from helpers.pagination import AsyncEmbedFieldsPageSource
from helpers.utils import FakeUser, FetchUserConverter
class ModerationUserFriendlyTime(time.UserFriendlyTime):
def __init__(self):
super().__init__(commands.clean_content, default="No reason provided")
def message_channel(ctx, message):
if isinstance(message, discord.TextChannel):
return dict(message_id=message.last_message_id, channel_id=message.id)
message = message or ctx.message
return dict(message_id=message.id, channel_id=message.channel.id)
@dataclass
class Action(abc.ABC):
target: discord.Member
user: discord.Member
reason: str
guild_id: int
channel_id: int = None
message_id: int = None
created_at: datetime = None
expires_at: datetime = None
note: str = None
automod_bucket: str = None
resolved: bool = None
_id: int = None
def __post_init__(self):
if self.created_at is None:
self.created_at = datetime.now(timezone.utc)
if self.expires_at is not None:
self.resolved = False
@classmethod
def build_from_mongo(cls, bot, x):
guild = bot.get_guild(x["guild_id"])
user = guild.get_member(x["user_id"]) or FakeUser(x["user_id"])
target = guild.get_member(x["target_id"]) or FakeUser(x["target_id"])
kwargs = {
"_id": x["_id"],
"target": target,
"user": user,
"reason": x["reason"],
"guild_id": x["guild_id"],
"channel_id": x.get("channel_id"),
"message_id": x.get("message_id"),
"created_at": x["created_at"],
}
if "expires_at" in x:
kwargs["expires_at"] = x["expires_at"]
kwargs["resolved"] = x["resolved"]
if "automod_bucket" in x:
kwargs["automod_bucket"] = x["automod_bucket"]
if "note" in x:
kwargs["note"] = x["note"]
return cls_dict[x["type"]](**kwargs)
@property
def duration(self):
if self.expires_at is None:
return None
return self.expires_at - self.created_at
@property
def logs_url(self):
if self.message_id is None or self.channel_id is None:
return None
return f"https://admin.poketwo.net/logs/{self.guild_id}/{self.channel_id}?before={self.message_id+1}"
def to_dict(self):
base = {
"target_id": self.target.id,
"user_id": self.user.id,
"type": self.type,
"reason": self.reason,
"guild_id": self.guild_id,
"channel_id": self.channel_id,
"message_id": self.message_id,
"created_at": self.created_at,
}
if self.expires_at is not None:
base["resolved"] = self.resolved
base["expires_at"] = self.expires_at
if self.automod_bucket is not None:
base["automod_bucket"] = self.automod_bucket
return base
def to_user_embed(self):
embed = discord.Embed(
title=f"{self.emoji} {self.past_tense.title()}",
description=f"You have been {self.past_tense}.",
color=self.color,
)
reason = self.reason or "No reason provided"
embed.add_field(name="Reason", value=reason, inline=False)
if self.duration is not None:
embed.add_field(name="Duration", value=time.human_timedelta(self.duration))
embed.set_footer(text="Expires")
embed.timestamp = self.expires_at
return embed
def to_log_embed(self):
reason = self.reason or "No reason provided"
if self.logs_url is not None:
reason += f" ([Logs]({self.logs_url}))"
embed = discord.Embed(color=self.color)
embed.set_author(name=f"{self.user} (ID: {self.user.id})", icon_url=self.user.display_avatar.url)
embed.set_thumbnail(url=self.target.display_avatar.url)
embed.add_field(
name=f"{self.emoji} {self.past_tense.title()} {self.target} (ID: {self.target.id})",
value=reason,
)
if self.duration is not None:
embed.set_footer(text=f"Duration • {time.human_timedelta(self.duration)}\nExpires")
embed.timestamp = self.expires_at
return embed
def to_info_embed(self):
reason = self.reason or "No reason provided"
embed = discord.Embed(color=self.color, title=f"{self.emoji} {self.past_tense.title()} {self.target}")
embed.set_author(name=f"{self.user}", icon_url=self.user.display_avatar.url)
embed.set_thumbnail(url=self.target.display_avatar.url)
embed.add_field(name="Reason", value=reason, inline=False)
if self.note is not None:
embed.add_field(name="Note", value=self.note)
if self.logs_url is not None:
embed.add_field(name="Logs", value=f"[Link]({self.logs_url})", inline=False)
if self.duration is not None:
duration = f"{time.human_timedelta(self.duration)}"
expires_at = f"{discord.utils.format_dt(self.expires_at)} ({discord.utils.format_dt(self.expires_at, "R")}"
embed.add_field(name="Duration", value=duration, inline=False)
embed.add_field(name="Expires At", value=expires_at, inline=False)
embed.timestamp = self.created_at
return embed
async def notify(self):
with suppress(discord.Forbidden, discord.HTTPException):
await self.target.send(embed=self.to_user_embed())
@abc.abstractmethod
async def execute(self, ctx):
await ctx.bot.get_cog("Moderation").save_action(self)
class Kick(Action):
type = "kick"
past_tense = "kicked"
emoji = "\N{WOMANS BOOTS}"
color = discord.Color.orange()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
await ctx.guild.kick(self.target, reason=reason)
await super().execute(ctx)
class Ban(Action):
type = "ban"
past_tense = "banned"
emoji = "\N{HAMMER}"
color = discord.Color.red()
def to_user_embed(self):
embed = super().to_user_embed()
embed.description += " Please do not DM staff members to get unpunished. If you would like to appeal, [click here](https://forms.poketwo.net/)."
return embed
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
await ctx.guild.ban(self.target, reason=reason)
await super().execute(ctx)
class Unban(Action):
type = "unban"
past_tense = "unbanned"
emoji = "\N{OPEN LOCK}"
color = discord.Color.green()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
await ctx.guild.unban(self.target, reason=reason)
await super().execute(ctx)
class Warn(Action):
type = "warn"
past_tense = "warned"
emoji = "\N{WARNING SIGN}"
color = discord.Color.orange()
async def execute(self, ctx):
await super().execute(ctx)
class Timeout(Action):
type = "timeout"
past_tense = "placed in timeout"
emoji = "\N{SPEAKER WITH CANCELLATION STROKE}"
color = discord.Color.blue()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
await self.target.edit(timed_out_until=self.expires_at, reason=reason)
await super().execute(ctx)
class _Untimeout(Action):
type = "untimeout"
past_tense = "removed from timeout"
emoji = "\N{SPEAKER}"
color = discord.Color.green()
class Untimeout(_Untimeout):
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
await self.target.edit(timed_out_until=None, reason=reason)
await super().execute(ctx)
class SymbolicUntimeout(_Untimeout):
async def execute(self, ctx):
await super().execute(ctx)
class Mute(Action):
type = "mute"
past_tense = "muted"
emoji = "\N{SPEAKER WITH CANCELLATION STROKE}"
color = discord.Color.blue()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
role = discord.utils.get(ctx.guild.roles, name="Muted")
await self.target.add_roles(role, reason=reason)
await ctx.bot.mongo.db.member.update_one({"_id": self.target.id}, {"$set": {"muted": True}}, upsert=True)
await super().execute(ctx)
class Unmute(Action):
type = "unmute"
past_tense = "unmuted"
emoji = "\N{SPEAKER}"
color = discord.Color.green()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
role = discord.utils.get(ctx.guild.roles, name="Muted")
await self.target.remove_roles(role, reason=reason)
await ctx.bot.mongo.db.member.update_one({"_id": self.target.id}, {"$set": {"muted": False}}, upsert=True)
await super().execute(ctx)
class TradingMute(Action):
type = "trading_mute"
past_tense = "muted in trading"
emoji = "\N{SPEAKER WITH CANCELLATION STROKE}"
color = discord.Color.blue()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
role = discord.utils.get(ctx.guild.roles, name="Trading Muted")
role2 = discord.utils.get(ctx.guild.roles, name="Trading")
await self.target.add_roles(role, reason=reason)
await self.target.remove_roles(role2, reason=reason)
await ctx.bot.mongo.db.member.update_one(
{"_id": self.target.id}, {"$set": {"trading_muted": True}}, upsert=True
)
await super().execute(ctx)
class TradingUnmute(Action):
type = "trading_unmute"
past_tense = "unmuted in trading"
emoji = "\N{SPEAKER}"
color = discord.Color.green()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
role = discord.utils.get(ctx.guild.roles, name="Trading Muted")
await self.target.remove_roles(role, reason=reason)
await ctx.bot.mongo.db.member.update_one(
{"_id": self.target.id}, {"$set": {"trading_muted": False}}, upsert=True
)
await super().execute(ctx)
@dataclass
class FakeContext:
bot: commands.Bot
guild: discord.Guild
cls_dict = {x.type: x for x in (Kick, Ban, Unban, Warn, Timeout, Untimeout, Mute, Unmute, TradingMute, TradingUnmute)}
class BanConverter(commands.Converter):
async def convert(self, ctx, arg):
try:
return await ctx.guild.fetch_ban(discord.Object(id=int(arg)))
except discord.NotFound:
raise commands.BadArgument("This member is not banned.")
except ValueError:
pass
bans = await ctx.guild.bans()
ban = discord.utils.find(lambda u: str(u.user) == arg, bans)
if ban is None:
raise commands.BadArgument("This member is not banned.")
return ban
class MemberOrIdConverter(commands.Converter):
async def convert(self, ctx, arg):
with suppress(commands.MemberNotFound):
return await commands.MemberConverter().convert(ctx, arg)
try:
return FakeUser(int(arg))
except ValueError:
raise commands.MemberNotFound(arg)
class Moderation(commands.Cog):
"""For moderation."""
def __init__(self, bot):
self.bot = bot
self.cls_dict = cls_dict
self.check_actions.start()
@commands.Cog.listener()
async def on_member_join(self, member):
data = await self.bot.mongo.db.member.find_one({"_id": member.id, "guild_id": member.guild.id})
if data is None:
return
ctx = FakeContext(self.bot, member.guild)
kwargs = dict(
target=member,
user=self.bot.user,
reason="User rejoined guild",
guild_id=member.guild.id,
)
if data.get("muted", False):
await Mute(**kwargs).execute(ctx)
if data.get("trading_muted", False):
await TradingMute(**kwargs).execute(ctx)
async def save_action(self, action: Action):
await self.bot.mongo.db.action.update_many(
{
"target_id": action.target.id,
"guild_id": action.guild_id,
"type": action.type,
"resolved": False,
},
{"$set": {"resolved": True}},
)
action._id = await self.bot.mongo.reserve_id("action")
await self.bot.mongo.db.action.insert_one({"_id": action._id, **action.to_dict()})
data = await self.bot.mongo.db.guild.find_one({"_id": action.guild_id})
channel = self.bot.get_channel(data["logs_channel_id"])
if channel is not None:
await channel.send(embed=action.to_log_embed())
@commands.Cog.listener()
async def on_member_update(self, before, after):
if after.timed_out_until == before.timed_out_until:
return
if after.timed_out_until is not None and after.timed_out_until < datetime.now():
return
entry = await fetch_recent_audit_log_entry(
self.bot,
after.guild,
target=after,
action=discord.AuditLogAction.member_update,
retry=3,
)
if entry.user == self.bot.user:
return
if after.timed_out_until is None:
action_cls = SymbolicUntimeout
else:
action_cls = Timeout
action = action_cls(
target=after,
user=entry.user,
reason=entry.reason,
guild_id=after.guild.id,
created_at=entry.created_at,
expires_at=after.timed_out_until,
)
await action.notify()
await self.save_action(action)
@commands.Cog.listener()
async def on_member_ban(self, guild, target):
"""Logs ban events not made through the bot."""
entry = await fetch_recent_audit_log_entry(
self.bot, guild, target=target, action=discord.AuditLogAction.ban, retry=3
)
if entry.user == self.bot.user:
return
action = Ban(
target=target,
user=entry.user,
reason=entry.reason,
guild_id=guild.id,
created_at=entry.created_at,
)
await self.save_action(action)
@commands.Cog.listener()
async def on_member_unban(self, guild, target):
entry = await fetch_recent_audit_log_entry(
self.bot, guild, target=target, action=discord.AuditLogAction.unban, retry=3
)
if entry.user == self.bot.user:
return
action = Unban(
target=target,
user=entry.user,
reason=entry.reason,
guild_id=guild.id,
created_at=entry.created_at,
)
await self.save_action(action)
@commands.Cog.listener()
async def on_member_kick(self, target, entry):
if entry.user == self.bot.user:
return
action = Kick(
target=target,
user=entry.user,
reason=entry.reason,
guild_id=target.guild.id,
created_at=entry.created_at,
)
await self.save_action(action)
async def run_purge(self, ctx, limit, check):
class ConfirmPurgeView(discord.ui.View):
@button(label=f"Purge up to {limit} messages", style=discord.ButtonStyle.danger)
async def confirm(_self, interaction: discord.Interaction, button: discord.ui.Button):
if interaction.user != ctx.author:
return
_self.stop()
await interaction.message.delete()
await self._purge(ctx, limit, check)
@button(label="Cancel")
async def cancel(_self, interaction: discord.Interaction, button: discord.ui.Button):
if interaction.user != ctx.author:
return
_self.stop()
await interaction.message.edit("The operation has been canceled.", view=None)
if limit > 10000:
await ctx.send("Too many messages to purge.")
elif limit > 100:
view = ConfirmPurgeView()
await ctx.send(f"Are you sure you want to purge up to {limit} messages?", view=view)
else:
await self._purge(ctx, limit, check)
async def _purge(self, ctx, limit, check):
await ctx.message.delete()
deleted = await ctx.channel.purge(limit=limit, check=check, before=ctx.message)
spammers = Counter(m.author.display_name for m in deleted)
count = len(deleted)
messages = [f'{count} message{' was' if count == 1 else 's were'} removed.']
if len(deleted) > 0:
messages.append("")
spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True)
messages.extend(f"– **{author}**: {count}" for author, count in spammers)
await ctx.send("\n".join(messages), delete_after=5)
@commands.command()
@commands.guild_only()
@checks.is_moderator()
async def cleanup(self, ctx, search=100):
"""Cleans up the bot's messages from the channel.
You must have the Moderator Role to use this.
"""
await self.run_purge(ctx, search, lambda m: m.author == ctx.me or m.content.startswith(ctx.prefix))
@commands.group(invoke_without_command=True, aliases=("remove", "clean", "clear"))
@commands.guild_only()
@checks.is_moderator()
async def purge(self, ctx, search: Union[discord.Member, int]):
"""Mass deletes messages that meet a certain criteria.
If no subcommand is called, purges either all messages from a user or
all messages, depending on the argument provided.
You must have the Moderator role to use this.
"""
if isinstance(search, discord.Member):
await ctx.invoke(self.user, user=search)
else:
await ctx.invoke(self.all, search=search)
@purge.command()
@checks.is_moderator()
async def all(self, ctx, search: int = 100):
"""Purges all messages."""
await self.run_purge(ctx, search, lambda m: True)
@purge.command()
@checks.is_moderator()
async def user(self, ctx, user: discord.Member, search: int = 100):
"""Purges messages from a user."""
await self.run_purge(ctx, search, lambda m: m.author == user)
@purge.command()
@checks.is_moderator()
async def contains(self, ctx, *text):
"""Purges messages that contain a substring."""
search = 100
if text[-1].isdigit() and len(text) > 1:
text, search = text[:-1], int(text[-1])
await self.run_purge(ctx, search, lambda m: " ".join(text).casefold() in m.content.casefold())
@commands.command()
@commands.guild_only()
@checks.is_moderator()
async def warn(self, ctx, target: discord.Member, *, reason):
"""Warns a member in the server.
You must have the Moderator role to use this.
"""
if any(role.id in constants.MODERATOR_ROLES for role in getattr(target, "roles", [])):
return await ctx.send("You can't punish that person!")
action = Warn(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
created_at=ctx.message.created_at,
)
await action.execute(ctx)
await action.notify()
await ctx.send(f"Warned **{target}** (Case #{action._id}).")
@commands.command()
@commands.guild_only()
@checks.is_moderator()
async def kick(self, ctx, target: discord.Member, *, reason):
"""Kicks a member from the server.
You must have the Moderator role to use this.
"""
if any(role.id in constants.MODERATOR_ROLES for role in getattr(target, "roles", [])):
return await ctx.send("You can't punish that person!")
action = Kick(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
created_at=ctx.message.created_at,
)
await action.notify()
await action.execute(ctx)
await ctx.send(f"Kicked **{target}** (Case #{action._id}).")
@commands.command(usage="<target> [expires_at] [reason]")
@commands.guild_only()
@checks.is_moderator()
async def ban(self, ctx, target: MemberOrIdConverter, *, reason: Union[ModerationUserFriendlyTime, str]):
"""Bans a member from the server.
You must have the Moderator role to use this.
"""
if any(role.id in constants.MODERATOR_ROLES for role in getattr(target, "roles", [])):
return await ctx.send("You can't punish that person!")
if isinstance(reason, time.UserFriendlyTime):
expires_at = reason.dt
reason = reason.arg
else:
expires_at = None
action = Ban(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
created_at=ctx.message.created_at,
expires_at=expires_at,
)
await action.notify()
await action.execute(ctx)
if action.duration is None:
await ctx.send(f"Banned **{target}** (Case #{action._id}).")
else:
await ctx.send(f"Banned **{target}** for **{time.human_timedelta(action.duration)}** (Case #{action._id}).")
@commands.command()
@commands.guild_only()
@checks.is_moderator()
async def unban(self, ctx, target: BanConverter, *, reason=None):
"""Unbans a member from the server.
You must have the Moderator role to use this.
"""
action = Unban(
target=target.user,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
)
await action.execute(ctx)
await ctx.send(f"Unbanned **{target.user}** (Case #{action._id}).")
@commands.command(aliases=("mute",), usage="<target> <expires_at> [reason]")
@commands.guild_only()
@checks.is_moderator()
async def timeout(self, ctx, target: discord.Member, *, reason: Union[ModerationUserFriendlyTime, str]):
"""Places a member in timeout within the server.
If duration is longer than 28 days, falls back to a mute.
You must have the Moderator role to use this.
"""
if any(role.id in constants.MODERATOR_ROLES for role in getattr(target, "roles", [])):
return await ctx.send("You can't punish that person!")
if isinstance(reason, time.UserFriendlyTime):
expires_at = reason.dt
reason = reason.arg
if expires_at > ctx.message.created_at + timedelta(days=28):
action_cls = Mute
else:
action_cls = Timeout
else:
expires_at = None
action_cls = Mute
action = action_cls(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
created_at=ctx.message.created_at,
expires_at=expires_at,
)
await action.execute(ctx)
await action.notify()
if action_cls is Timeout:
await ctx.send(
f"Placed **{target}** in timeout for **{time.human_timedelta(action.duration)}** (Case #{action._id})."
)
elif action.duration is None:
await ctx.send(f"Muted **{target}** (Case #{action._id}).")
else:
await ctx.send(f"Muted **{target}** for **{time.human_timedelta(action.duration)}** (Case #{action._id}).")
@commands.command(aliases=("unmute",))
@commands.guild_only()
@checks.is_moderator()
async def untimeout(self, ctx, target: discord.Member, *, reason=None):
"""Removes a member from timeout within the server.
If the member is muted, unmutes instead.
You must have the Moderator role to use this.
"""
if any(x.name == "Muted" for x in target.roles):
action_cls = Unmute
else:
action_cls = Untimeout
action = action_cls(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
)
await action.execute(ctx)
await action.notify()
if action_cls is Unmute:
await ctx.send(f"Unmuted **{target}** (Case #{action._id}).")
else:
await ctx.send(f"Removed **{target}** from timeout (Case #{action._id}).")
@commands.command(aliases=("tmute",), usage="<target> [expires_at] [reason]")
@checks.community_server_only()
@checks.is_moderator()
async def tradingmute(self, ctx, target: discord.Member, *, reason: Union[ModerationUserFriendlyTime, str]):
"""Mutes a member in trading channels.
You must have the Moderator role to use this.
"""
if any(role.id in constants.MODERATOR_ROLES for role in getattr(target, "roles", [])):
return await ctx.send("You can't punish that person!")
if isinstance(reason, time.UserFriendlyTime):
expires_at = reason.dt
reason = reason.arg
else:
expires_at = None
action = TradingMute(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
created_at=ctx.message.created_at,
expires_at=expires_at,
)
await action.execute(ctx)
await action.notify()
if action.duration is None:
await ctx.send(f"Muted **{target}** in trading channels (Case #{action._id}).")
else:
await ctx.send(
f"Muted **{target}** in trading channels for **{time.human_timedelta(action.duration)}** (Case #{action._id})."
)
@commands.command(aliases=("untradingmute", "tunmute", "untmute"))
@checks.community_server_only()
@checks.is_moderator()
async def tradingunmute(self, ctx, target: discord.Member, *, reason=None):
"""Unmutes a member in trading channels.
You must have the Moderator role to use this.
"""
action = TradingUnmute(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
)
await action.execute(ctx)
await action.notify()
await ctx.send(f"Unmuted **{target}** in trading channels (Case #{action._id}).")
async def reverse_raw_action(self, raw_action):
action = Action.build_from_mongo(self.bot, raw_action)
guild = self.bot.get_guild(action.guild_id)
target = action.target
if action.type == "ban":
action_type = Unban
try:
ban = await guild.fetch_ban(discord.Object(id=raw_action["target_id"]))
except (ValueError, discord.NotFound):
return
target = ban.user
elif action.type == "mute":
action_type = Unmute
elif action.type == "timeout":
action_type = SymbolicUntimeout
elif action.type == "trading_mute":
action_type = TradingUnmute
else:
return
new_action = action_type(
target=target,
user=self.bot.user,
reason="Punishment duration expired",
guild_id=action.guild_id,
created_at=datetime.now(timezone.utc),
)
await new_action.execute(FakeContext(self.bot, guild))
await new_action.notify()
await self.bot.mongo.db.action.update_one({"_id": raw_action["_id"]}, {"$set": {"resolved": True}})
@tasks.loop(seconds=30)
async def check_actions(self):
await self.bot.wait_until_ready()
query = {"resolved": False, "expires_at": {"$lt": datetime.now(timezone.utc)}}
async for action in self.bot.mongo.db.action.find(query):
self.bot.loop.create_task(self.reverse_raw_action(action))
@commands.group(aliases=("his",), invoke_without_command=True)
@commands.guild_only()
@checks.is_moderator()
async def history(self, ctx, *, target: Union[discord.Member, FetchUserConverter]):
"""Views a member's punishment history.
You must have the Moderator role to use this.
"""
query = {"target_id": target.id, "guild_id": ctx.guild.id}
count = await self.bot.mongo.db.action.count_documents(query)
async def get_actions():
async for x in self.bot.mongo.db.action.find(query).sort("created_at", -1):
yield Action.build_from_mongo(self.bot, x)
def format_item(i, x):
name = f"{x._id}. {x.emoji} {x.past_tense.title()} by {x.user}"
reason = x.reason or "No reason provided"
lines = [
f"– **Reason:** {reason}",
f"– at {discord.utils.format_dt(x.created_at)} ({discord.utils.format_dt(x.created_at, "R")})",
]
if x.duration is not None:
lines.insert(1, f"– **Duration:** {time.human_timedelta(x.duration)}")
if x.note is not None:
lines.insert(1, f"– **Note:** {x.note}")
return {"name": name, "value": "\n".join(lines)[:1024], "inline": False}
pages = ViewMenuPages(
source=AsyncEmbedFieldsPageSource(
get_actions(),
title=f"Punishment History • {target}",
format_item=format_item,
count=count,
)
)
try:
await pages.start(ctx)
except IndexError:
await ctx.send("No punishment history found.")
@history.command(aliases=("del",))
@commands.guild_only()
@checks.is_moderator()
async def delete(self, ctx, ids: commands.Greedy[int]):
"""Deletes one or more entries from punishment history.
You must have the Moderator role to use this.
"""
result = await self.bot.mongo.db.action.delete_many({"_id": {"$in": ids}, "guild_id": ctx.guild.id})
word = "entry" if result.deleted_count == 1 else "entries"
await ctx.send(f"Successfully deleted {result.deleted_count} {word}.")
@history.command()
@commands.guild_only()
@checks.is_moderator()
async def note(self, ctx, id: int, *, note):
"""Adds a note to an entry from punishment history.
You must have the Moderator role to use this.
"""
result = await self.bot.mongo.db.action.find_one_and_update(
{"_id": id, "guild_id": ctx.guild.id}, {"$set": {"note": note}}
)
if result is None:
return await ctx.send("Could not find an entry with that ID.")
if note.lower() == "reset":
await self.bot.mongo.db.action.update_one({"_id": id, "guild_id": ctx.guild.id}, {"$unset": {"note": 1}})
return await ctx.send(f"Successfully removed note of entry **{id}**.")
await ctx.send(f"Successfully added a note to entry **{id}**.")
@history.command(aliases=("show",))
@commands.guild_only()
@checks.is_moderator()
async def info(self, ctx, id: int):
"""Shows an entry from punishment history.
You must have the Moderator role to use this.
"""
action = await self.bot.mongo.db.action.find_one({"_id": id, "guild_id": ctx.guild.id})
if action is None:
return await ctx.send("Could not find an entry with that ID.")
action = Action.build_from_mongo(self.bot, action)
await ctx.send(embed=action.to_info_embed())
@commands.command(cooldown_after_parsing=True)
@commands.cooldown(1, 20, commands.BucketType.user)
@checks.community_server_only()
async def report(self, ctx, user: discord.Member, *, reason):
"""Reports a user to server moderators."""
data = await self.bot.mongo.db.guild.find_one({"_id": ctx.guild.id})
channel = ctx.guild.get_channel_or_thread(data["report_channel_id"])
await channel.send(f"{ctx.author.mention} reported {user.mention} in {ctx.channel.mention} for:\n> {reason}")
await ctx.send(f"Reported **{user}**.")
async def cog_unload(self):
self.check_actions.cancel()
async def setup(bot):
await bot.add_cog(Moderation(bot))
| import abc
from collections import Counter
from contextlib import suppress
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from typing import Union
import discord
from discord.ext import commands, tasks
from discord.ext.events.utils import fetch_recent_audit_log_entry
from discord.ext.menus.views import ViewMenuPages
from discord.ui import button
from helpers import checks, constants, time
from helpers.pagination import AsyncEmbedFieldsPageSource
from helpers.utils import FakeUser, FetchUserConverter
class ModerationUserFriendlyTime(time.UserFriendlyTime):
def __init__(self):
super().__init__(commands.clean_content, default="No reason provided")
def message_channel(ctx, message):
if isinstance(message, discord.TextChannel):
return dict(message_id=message.last_message_id, channel_id=message.id)
message = message or ctx.message
return dict(message_id=message.id, channel_id=message.channel.id)
@dataclass
class Action(abc.ABC):
target: discord.Member
user: discord.Member
reason: str
guild_id: int
channel_id: int = None
message_id: int = None
created_at: datetime = None
expires_at: datetime = None
note: str = None
automod_bucket: str = None
resolved: bool = None
_id: int = None
def __post_init__(self):
if self.created_at is None:
self.created_at = datetime.now(timezone.utc)
if self.expires_at is not None:
self.resolved = False
@classmethod
def build_from_mongo(cls, bot, x):
guild = bot.get_guild(x["guild_id"])
user = guild.get_member(x["user_id"]) or FakeUser(x["user_id"])
target = guild.get_member(x["target_id"]) or FakeUser(x["target_id"])
kwargs = {
"_id": x["_id"],
"target": target,
"user": user,
"reason": x["reason"],
"guild_id": x["guild_id"],
"channel_id": x.get("channel_id"),
"message_id": x.get("message_id"),
"created_at": x["created_at"],
}
if "expires_at" in x:
kwargs["expires_at"] = x["expires_at"]
kwargs["resolved"] = x["resolved"]
if "automod_bucket" in x:
kwargs["automod_bucket"] = x["automod_bucket"]
if "note" in x:
kwargs["note"] = x["note"]
return cls_dict[x["type"]](**kwargs)
@property
def duration(self):
if self.expires_at is None:
return None
return self.expires_at - self.created_at
@property
def logs_url(self):
if self.message_id is None or self.channel_id is None:
return None
return f"https://admin.poketwo.net/logs/{self.guild_id}/{self.channel_id}?before={self.message_id+1}"
def to_dict(self):
base = {
"target_id": self.target.id,
"user_id": self.user.id,
"type": self.type,
"reason": self.reason,
"guild_id": self.guild_id,
"channel_id": self.channel_id,
"message_id": self.message_id,
"created_at": self.created_at,
}
if self.expires_at is not None:
base["resolved"] = self.resolved
base["expires_at"] = self.expires_at
if self.automod_bucket is not None:
base["automod_bucket"] = self.automod_bucket
return base
def to_user_embed(self):
embed = discord.Embed(
title=f"{self.emoji} {self.past_tense.title()}",
description=f"You have been {self.past_tense}.",
color=self.color,
)
reason = self.reason or "No reason provided"
embed.add_field(name="Reason", value=reason, inline=False)
if self.duration is not None:
embed.add_field(name="Duration", value=time.human_timedelta(self.duration))
embed.set_footer(text="Expires")
embed.timestamp = self.expires_at
return embed
def to_log_embed(self):
reason = self.reason or "No reason provided"
if self.logs_url is not None:
reason += f" ([Logs]({self.logs_url}))"
embed = discord.Embed(color=self.color)
embed.set_author(name=f"{self.user} (ID: {self.user.id})", icon_url=self.user.display_avatar.url)
embed.set_thumbnail(url=self.target.display_avatar.url)
embed.add_field(
name=f"{self.emoji} {self.past_tense.title()} {self.target} (ID: {self.target.id})",
value=reason,
)
if self.duration is not None:
embed.set_footer(text=f"Duration • {time.human_timedelta(self.duration)}\nExpires")
embed.timestamp = self.expires_at
return embed
def to_info_embed(self):
reason = self.reason or "No reason provided"
embed = discord.Embed(color=self.color, title=f"{self.emoji} {self.past_tense.title()} {self.target}")
embed.set_author(name=f"{self.user}", icon_url=self.user.display_avatar.url)
embed.set_thumbnail(url=self.target.display_avatar.url)
embed.add_field(name="Reason", value=reason, inline=False)
if self.note is not None:
embed.add_field(name="Note", value=self.note)
if self.logs_url is not None:
embed.add_field(name="Logs", value=f"[Link]({self.logs_url})", inline=False)
if self.duration is not None:
duration = f"{time.human_timedelta(self.duration)}"
expires_at = f"{discord.utils.format_dt(self.expires_at)} ({discord.utils.format_dt(self.expires_at, 'R')}"
embed.add_field(name="Duration", value=duration, inline=False)
embed.add_field(name="Expires At", value=expires_at, inline=False)
embed.timestamp = self.created_at
return embed
async def notify(self):
with suppress(discord.Forbidden, discord.HTTPException):
await self.target.send(embed=self.to_user_embed())
@abc.abstractmethod
async def execute(self, ctx):
await ctx.bot.get_cog("Moderation").save_action(self)
class Kick(Action):
type = "kick"
past_tense = "kicked"
emoji = "\N{WOMANS BOOTS}"
color = discord.Color.orange()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
await ctx.guild.kick(self.target, reason=reason)
await super().execute(ctx)
class Ban(Action):
type = "ban"
past_tense = "banned"
emoji = "\N{HAMMER}"
color = discord.Color.red()
def to_user_embed(self):
embed = super().to_user_embed()
embed.description += " Please do not DM staff members to get unpunished. If you would like to appeal, [click here](https://forms.poketwo.net/)."
return embed
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
await ctx.guild.ban(self.target, reason=reason)
await super().execute(ctx)
class Unban(Action):
type = "unban"
past_tense = "unbanned"
emoji = "\N{OPEN LOCK}"
color = discord.Color.green()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
await ctx.guild.unban(self.target, reason=reason)
await super().execute(ctx)
class Warn(Action):
type = "warn"
past_tense = "warned"
emoji = "\N{WARNING SIGN}"
color = discord.Color.orange()
async def execute(self, ctx):
await super().execute(ctx)
class Timeout(Action):
type = "timeout"
past_tense = "placed in timeout"
emoji = "\N{SPEAKER WITH CANCELLATION STROKE}"
color = discord.Color.blue()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
await self.target.edit(timed_out_until=self.expires_at, reason=reason)
await super().execute(ctx)
class _Untimeout(Action):
type = "untimeout"
past_tense = "removed from timeout"
emoji = "\N{SPEAKER}"
color = discord.Color.green()
class Untimeout(_Untimeout):
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
await self.target.edit(timed_out_until=None, reason=reason)
await super().execute(ctx)
class SymbolicUntimeout(_Untimeout):
async def execute(self, ctx):
await super().execute(ctx)
class Mute(Action):
type = "mute"
past_tense = "muted"
emoji = "\N{SPEAKER WITH CANCELLATION STROKE}"
color = discord.Color.blue()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
role = discord.utils.get(ctx.guild.roles, name="Muted")
await self.target.add_roles(role, reason=reason)
await ctx.bot.mongo.db.member.update_one({"_id": self.target.id}, {"$set": {"muted": True}}, upsert=True)
await super().execute(ctx)
class Unmute(Action):
type = "unmute"
past_tense = "unmuted"
emoji = "\N{SPEAKER}"
color = discord.Color.green()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
role = discord.utils.get(ctx.guild.roles, name="Muted")
await self.target.remove_roles(role, reason=reason)
await ctx.bot.mongo.db.member.update_one({"_id": self.target.id}, {"$set": {"muted": False}}, upsert=True)
await super().execute(ctx)
class TradingMute(Action):
type = "trading_mute"
past_tense = "muted in trading"
emoji = "\N{SPEAKER WITH CANCELLATION STROKE}"
color = discord.Color.blue()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
role = discord.utils.get(ctx.guild.roles, name="Trading Muted")
role2 = discord.utils.get(ctx.guild.roles, name="Trading")
await self.target.add_roles(role, reason=reason)
await self.target.remove_roles(role2, reason=reason)
await ctx.bot.mongo.db.member.update_one(
{"_id": self.target.id}, {"$set": {"trading_muted": True}}, upsert=True
)
await super().execute(ctx)
class TradingUnmute(Action):
type = "trading_unmute"
past_tense = "unmuted in trading"
emoji = "\N{SPEAKER}"
color = discord.Color.green()
async def execute(self, ctx):
reason = self.reason or f"Action done by {self.user} (ID: {self.user.id})"
role = discord.utils.get(ctx.guild.roles, name="Trading Muted")
await self.target.remove_roles(role, reason=reason)
await ctx.bot.mongo.db.member.update_one(
{"_id": self.target.id}, {"$set": {"trading_muted": False}}, upsert=True
)
await super().execute(ctx)
@dataclass
class FakeContext:
bot: commands.Bot
guild: discord.Guild
cls_dict = {x.type: x for x in (Kick, Ban, Unban, Warn, Timeout, Untimeout, Mute, Unmute, TradingMute, TradingUnmute)}
class BanConverter(commands.Converter):
async def convert(self, ctx, arg):
try:
return await ctx.guild.fetch_ban(discord.Object(id=int(arg)))
except discord.NotFound:
raise commands.BadArgument("This member is not banned.")
except ValueError:
pass
bans = await ctx.guild.bans()
ban = discord.utils.find(lambda u: str(u.user) == arg, bans)
if ban is None:
raise commands.BadArgument("This member is not banned.")
return ban
class MemberOrIdConverter(commands.Converter):
async def convert(self, ctx, arg):
with suppress(commands.MemberNotFound):
return await commands.MemberConverter().convert(ctx, arg)
try:
return FakeUser(int(arg))
except ValueError:
raise commands.MemberNotFound(arg)
class Moderation(commands.Cog):
"""For moderation."""
def __init__(self, bot):
self.bot = bot
self.cls_dict = cls_dict
self.check_actions.start()
@commands.Cog.listener()
async def on_member_join(self, member):
data = await self.bot.mongo.db.member.find_one({"_id": member.id, "guild_id": member.guild.id})
if data is None:
return
ctx = FakeContext(self.bot, member.guild)
kwargs = dict(
target=member,
user=self.bot.user,
reason="User rejoined guild",
guild_id=member.guild.id,
)
if data.get("muted", False):
await Mute(**kwargs).execute(ctx)
if data.get("trading_muted", False):
await TradingMute(**kwargs).execute(ctx)
async def save_action(self, action: Action):
await self.bot.mongo.db.action.update_many(
{
"target_id": action.target.id,
"guild_id": action.guild_id,
"type": action.type,
"resolved": False,
},
{"$set": {"resolved": True}},
)
action._id = await self.bot.mongo.reserve_id("action")
await self.bot.mongo.db.action.insert_one({"_id": action._id, **action.to_dict()})
data = await self.bot.mongo.db.guild.find_one({"_id": action.guild_id})
channel = self.bot.get_channel(data["logs_channel_id"])
if channel is not None:
await channel.send(embed=action.to_log_embed())
@commands.Cog.listener()
async def on_member_update(self, before, after):
if after.timed_out_until == before.timed_out_until:
return
if after.timed_out_until is not None and after.timed_out_until < datetime.now():
return
entry = await fetch_recent_audit_log_entry(
self.bot,
after.guild,
target=after,
action=discord.AuditLogAction.member_update,
retry=3,
)
if entry.user == self.bot.user:
return
if after.timed_out_until is None:
action_cls = SymbolicUntimeout
else:
action_cls = Timeout
action = action_cls(
target=after,
user=entry.user,
reason=entry.reason,
guild_id=after.guild.id,
created_at=entry.created_at,
expires_at=after.timed_out_until,
)
await action.notify()
await self.save_action(action)
@commands.Cog.listener()
async def on_member_ban(self, guild, target):
"""Logs ban events not made through the bot."""
entry = await fetch_recent_audit_log_entry(
self.bot, guild, target=target, action=discord.AuditLogAction.ban, retry=3
)
if entry.user == self.bot.user:
return
action = Ban(
target=target,
user=entry.user,
reason=entry.reason,
guild_id=guild.id,
created_at=entry.created_at,
)
await self.save_action(action)
@commands.Cog.listener()
async def on_member_unban(self, guild, target):
entry = await fetch_recent_audit_log_entry(
self.bot, guild, target=target, action=discord.AuditLogAction.unban, retry=3
)
if entry.user == self.bot.user:
return
action = Unban(
target=target,
user=entry.user,
reason=entry.reason,
guild_id=guild.id,
created_at=entry.created_at,
)
await self.save_action(action)
@commands.Cog.listener()
async def on_member_kick(self, target, entry):
if entry.user == self.bot.user:
return
action = Kick(
target=target,
user=entry.user,
reason=entry.reason,
guild_id=target.guild.id,
created_at=entry.created_at,
)
await self.save_action(action)
async def run_purge(self, ctx, limit, check):
class ConfirmPurgeView(discord.ui.View):
@button(label=f"Purge up to {limit} messages", style=discord.ButtonStyle.danger)
async def confirm(_self, interaction: discord.Interaction, button: discord.ui.Button):
if interaction.user != ctx.author:
return
_self.stop()
await interaction.message.delete()
await self._purge(ctx, limit, check)
@button(label="Cancel")
async def cancel(_self, interaction: discord.Interaction, button: discord.ui.Button):
if interaction.user != ctx.author:
return
_self.stop()
await interaction.message.edit("The operation has been canceled.", view=None)
if limit > 10000:
await ctx.send("Too many messages to purge.")
elif limit > 100:
view = ConfirmPurgeView()
await ctx.send(f"Are you sure you want to purge up to {limit} messages?", view=view)
else:
await self._purge(ctx, limit, check)
async def _purge(self, ctx, limit, check):
await ctx.message.delete()
deleted = await ctx.channel.purge(limit=limit, check=check, before=ctx.message)
spammers = Counter(m.author.display_name for m in deleted)
count = len(deleted)
messages = [f'{count} message{" was" if count == 1 else "s were"} removed.']
if len(deleted) > 0:
messages.append("")
spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True)
messages.extend(f"– **{author}**: {count}" for author, count in spammers)
await ctx.send("\n".join(messages), delete_after=5)
@commands.command()
@commands.guild_only()
@checks.is_moderator()
async def cleanup(self, ctx, search=100):
"""Cleans up the bot's messages from the channel.
You must have the Moderator Role to use this.
"""
await self.run_purge(ctx, search, lambda m: m.author == ctx.me or m.content.startswith(ctx.prefix))
@commands.group(invoke_without_command=True, aliases=("remove", "clean", "clear"))
@commands.guild_only()
@checks.is_moderator()
async def purge(self, ctx, search: Union[discord.Member, int]):
"""Mass deletes messages that meet a certain criteria.
If no subcommand is called, purges either all messages from a user or
all messages, depending on the argument provided.
You must have the Moderator role to use this.
"""
if isinstance(search, discord.Member):
await ctx.invoke(self.user, user=search)
else:
await ctx.invoke(self.all, search=search)
@purge.command()
@checks.is_moderator()
async def all(self, ctx, search: int = 100):
"""Purges all messages."""
await self.run_purge(ctx, search, lambda m: True)
@purge.command()
@checks.is_moderator()
async def user(self, ctx, user: discord.Member, search: int = 100):
"""Purges messages from a user."""
await self.run_purge(ctx, search, lambda m: m.author == user)
@purge.command()
@checks.is_moderator()
async def contains(self, ctx, *text):
"""Purges messages that contain a substring."""
search = 100
if text[-1].isdigit() and len(text) > 1:
text, search = text[:-1], int(text[-1])
await self.run_purge(ctx, search, lambda m: " ".join(text).casefold() in m.content.casefold())
@commands.command()
@commands.guild_only()
@checks.is_moderator()
async def warn(self, ctx, target: discord.Member, *, reason):
"""Warns a member in the server.
You must have the Moderator role to use this.
"""
if any(role.id in constants.MODERATOR_ROLES for role in getattr(target, "roles", [])):
return await ctx.send("You can't punish that person!")
action = Warn(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
created_at=ctx.message.created_at,
)
await action.execute(ctx)
await action.notify()
await ctx.send(f"Warned **{target}** (Case #{action._id}).")
@commands.command()
@commands.guild_only()
@checks.is_moderator()
async def kick(self, ctx, target: discord.Member, *, reason):
"""Kicks a member from the server.
You must have the Moderator role to use this.
"""
if any(role.id in constants.MODERATOR_ROLES for role in getattr(target, "roles", [])):
return await ctx.send("You can't punish that person!")
action = Kick(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
created_at=ctx.message.created_at,
)
await action.notify()
await action.execute(ctx)
await ctx.send(f"Kicked **{target}** (Case #{action._id}).")
@commands.command(usage="<target> [expires_at] [reason]")
@commands.guild_only()
@checks.is_moderator()
async def ban(self, ctx, target: MemberOrIdConverter, *, reason: Union[ModerationUserFriendlyTime, str]):
"""Bans a member from the server.
You must have the Moderator role to use this.
"""
if any(role.id in constants.MODERATOR_ROLES for role in getattr(target, "roles", [])):
return await ctx.send("You can't punish that person!")
if isinstance(reason, time.UserFriendlyTime):
expires_at = reason.dt
reason = reason.arg
else:
expires_at = None
action = Ban(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
created_at=ctx.message.created_at,
expires_at=expires_at,
)
await action.notify()
await action.execute(ctx)
if action.duration is None:
await ctx.send(f"Banned **{target}** (Case #{action._id}).")
else:
await ctx.send(f"Banned **{target}** for **{time.human_timedelta(action.duration)}** (Case #{action._id}).")
@commands.command()
@commands.guild_only()
@checks.is_moderator()
async def unban(self, ctx, target: BanConverter, *, reason=None):
"""Unbans a member from the server.
You must have the Moderator role to use this.
"""
action = Unban(
target=target.user,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
)
await action.execute(ctx)
await ctx.send(f"Unbanned **{target.user}** (Case #{action._id}).")
@commands.command(aliases=("mute",), usage="<target> <expires_at> [reason]")
@commands.guild_only()
@checks.is_moderator()
async def timeout(self, ctx, target: discord.Member, *, reason: Union[ModerationUserFriendlyTime, str]):
"""Places a member in timeout within the server.
If duration is longer than 28 days, falls back to a mute.
You must have the Moderator role to use this.
"""
if any(role.id in constants.MODERATOR_ROLES for role in getattr(target, "roles", [])):
return await ctx.send("You can't punish that person!")
if isinstance(reason, time.UserFriendlyTime):
expires_at = reason.dt
reason = reason.arg
if expires_at > ctx.message.created_at + timedelta(days=28):
action_cls = Mute
else:
action_cls = Timeout
else:
expires_at = None
action_cls = Mute
action = action_cls(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
created_at=ctx.message.created_at,
expires_at=expires_at,
)
await action.execute(ctx)
await action.notify()
if action_cls is Timeout:
await ctx.send(
f"Placed **{target}** in timeout for **{time.human_timedelta(action.duration)}** (Case #{action._id})."
)
elif action.duration is None:
await ctx.send(f"Muted **{target}** (Case #{action._id}).")
else:
await ctx.send(f"Muted **{target}** for **{time.human_timedelta(action.duration)}** (Case #{action._id}).")
@commands.command(aliases=("unmute",))
@commands.guild_only()
@checks.is_moderator()
async def untimeout(self, ctx, target: discord.Member, *, reason=None):
"""Removes a member from timeout within the server.
If the member is muted, unmutes instead.
You must have the Moderator role to use this.
"""
if any(x.name == "Muted" for x in target.roles):
action_cls = Unmute
else:
action_cls = Untimeout
action = action_cls(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
)
await action.execute(ctx)
await action.notify()
if action_cls is Unmute:
await ctx.send(f"Unmuted **{target}** (Case #{action._id}).")
else:
await ctx.send(f"Removed **{target}** from timeout (Case #{action._id}).")
@commands.command(aliases=("tmute",), usage="<target> [expires_at] [reason]")
@checks.community_server_only()
@checks.is_moderator()
async def tradingmute(self, ctx, target: discord.Member, *, reason: Union[ModerationUserFriendlyTime, str]):
"""Mutes a member in trading channels.
You must have the Moderator role to use this.
"""
if any(role.id in constants.MODERATOR_ROLES for role in getattr(target, "roles", [])):
return await ctx.send("You can't punish that person!")
if isinstance(reason, time.UserFriendlyTime):
expires_at = reason.dt
reason = reason.arg
else:
expires_at = None
action = TradingMute(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
created_at=ctx.message.created_at,
expires_at=expires_at,
)
await action.execute(ctx)
await action.notify()
if action.duration is None:
await ctx.send(f"Muted **{target}** in trading channels (Case #{action._id}).")
else:
await ctx.send(
f"Muted **{target}** in trading channels for **{time.human_timedelta(action.duration)}** (Case #{action._id})."
)
@commands.command(aliases=("untradingmute", "tunmute", "untmute"))
@checks.community_server_only()
@checks.is_moderator()
async def tradingunmute(self, ctx, target: discord.Member, *, reason=None):
"""Unmutes a member in trading channels.
You must have the Moderator role to use this.
"""
action = TradingUnmute(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
)
await action.execute(ctx)
await action.notify()
await ctx.send(f"Unmuted **{target}** in trading channels (Case #{action._id}).")
async def reverse_raw_action(self, raw_action):
action = Action.build_from_mongo(self.bot, raw_action)
guild = self.bot.get_guild(action.guild_id)
target = action.target
if action.type == "ban":
action_type = Unban
try:
ban = await guild.fetch_ban(discord.Object(id=raw_action["target_id"]))
except (ValueError, discord.NotFound):
return
target = ban.user
elif action.type == "mute":
action_type = Unmute
elif action.type == "timeout":
action_type = SymbolicUntimeout
elif action.type == "trading_mute":
action_type = TradingUnmute
else:
return
new_action = action_type(
target=target,
user=self.bot.user,
reason="Punishment duration expired",
guild_id=action.guild_id,
created_at=datetime.now(timezone.utc),
)
await new_action.execute(FakeContext(self.bot, guild))
await new_action.notify()
await self.bot.mongo.db.action.update_one({"_id": raw_action["_id"]}, {"$set": {"resolved": True}})
@tasks.loop(seconds=30)
async def check_actions(self):
await self.bot.wait_until_ready()
query = {"resolved": False, "expires_at": {"$lt": datetime.now(timezone.utc)}}
async for action in self.bot.mongo.db.action.find(query):
self.bot.loop.create_task(self.reverse_raw_action(action))
@commands.group(aliases=("his",), invoke_without_command=True)
@commands.guild_only()
@checks.is_moderator()
async def history(self, ctx, *, target: Union[discord.Member, FetchUserConverter]):
"""Views a member's punishment history.
You must have the Moderator role to use this.
"""
query = {"target_id": target.id, "guild_id": ctx.guild.id}
count = await self.bot.mongo.db.action.count_documents(query)
async def get_actions():
async for x in self.bot.mongo.db.action.find(query).sort("created_at", -1):
yield Action.build_from_mongo(self.bot, x)
def format_item(i, x):
name = f"{x._id}. {x.emoji} {x.past_tense.title()} by {x.user}"
reason = x.reason or "No reason provided"
lines = [
f"– **Reason:** {reason}",
f"– at {discord.utils.format_dt(x.created_at)} ({discord.utils.format_dt(x.created_at, 'R')})",
]
if x.duration is not None:
lines.insert(1, f"– **Duration:** {time.human_timedelta(x.duration)}")
if x.note is not None:
lines.insert(1, f"– **Note:** {x.note}")
return {"name": name, "value": "\n".join(lines)[:1024], "inline": False}
pages = ViewMenuPages(
source=AsyncEmbedFieldsPageSource(
get_actions(),
title=f"Punishment History • {target}",
format_item=format_item,
count=count,
)
)
try:
await pages.start(ctx)
except IndexError:
await ctx.send("No punishment history found.")
@history.command(aliases=("del",))
@commands.guild_only()
@checks.is_moderator()
async def delete(self, ctx, ids: commands.Greedy[int]):
"""Deletes one or more entries from punishment history.
You must have the Moderator role to use this.
"""
result = await self.bot.mongo.db.action.delete_many({"_id": {"$in": ids}, "guild_id": ctx.guild.id})
word = "entry" if result.deleted_count == 1 else "entries"
await ctx.send(f"Successfully deleted {result.deleted_count} {word}.")
@history.command()
@commands.guild_only()
@checks.is_moderator()
async def note(self, ctx, id: int, *, note):
"""Adds a note to an entry from punishment history.
You must have the Moderator role to use this.
"""
result = await self.bot.mongo.db.action.find_one_and_update(
{"_id": id, "guild_id": ctx.guild.id}, {"$set": {"note": note}}
)
if result is None:
return await ctx.send("Could not find an entry with that ID.")
if note.lower() == "reset":
await self.bot.mongo.db.action.update_one({"_id": id, "guild_id": ctx.guild.id}, {"$unset": {"note": 1}})
return await ctx.send(f"Successfully removed note of entry **{id}**.")
await ctx.send(f"Successfully added a note to entry **{id}**.")
@history.command(aliases=("show",))
@commands.guild_only()
@checks.is_moderator()
async def info(self, ctx, id: int):
"""Shows an entry from punishment history.
You must have the Moderator role to use this.
"""
action = await self.bot.mongo.db.action.find_one({"_id": id, "guild_id": ctx.guild.id})
if action is None:
return await ctx.send("Could not find an entry with that ID.")
action = Action.build_from_mongo(self.bot, action)
await ctx.send(embed=action.to_info_embed())
@commands.command(cooldown_after_parsing=True)
@commands.cooldown(1, 20, commands.BucketType.user)
@checks.community_server_only()
async def report(self, ctx, user: discord.Member, *, reason):
"""Reports a user to server moderators."""
data = await self.bot.mongo.db.guild.find_one({"_id": ctx.guild.id})
channel = ctx.guild.get_channel_or_thread(data["report_channel_id"])
await channel.send(f"{ctx.author.mention} reported {user.mention} in {ctx.channel.mention} for:\n> {reason}")
await ctx.send(f"Reported **{user}**.")
async def cog_unload(self):
self.check_actions.cancel()
async def setup(bot):
await bot.add_cog(Moderation(bot))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
币安推荐码: 返佣10%
https://www.binancezh.pro/cn/register?ref=AIR1GC70
币安合约推荐码: 返佣10%
https://www.binancezh.com/cn/futures/ref/51bitquant
if you don't have a binance account, you can use the invitation link to register one:
https://www.binancezh.com/cn/futures/ref/51bitquant
or use the inviation code: 51bitquant
网格交易: 适合币圈的高波动率的品种,适合现货, 如果交易合约,需要注意防止极端行情爆仓。
服务器购买地址: https://www.ucloud.cn/site/global.html?invitation_code=C1x2EA81CD79B8C#dongjing
"""
from gateway import BinanceSpotHttp, OrderStatus, OrderType, OrderSide
from utils import config
from utils import utility, round_to
from enum import Enum
import logging
from datetime import datetime
class BinanceTrader(object):
def __init__(self):
"""
:param api_key:
:param secret:
:param trade_type: 交易的类型, only support future and spot.
"""
self.http_client = BinanceSpotHttp(api_key=config.api_key, secret=config.api_secret, proxy_host=config.proxy_host, proxy_port=config.proxy_port)
self.buy_orders = [] # 买单.
self.sell_orders = [] # 卖单.
def get_bid_ask_price(self):
ticker = self.http_client.get_ticker(config.symbol)
bid_price = 0
ask_price = 0
if ticker:
bid_price = float(ticker.get('bidPrice', 0))
ask_price = float(ticker.get('askPrice', 0))
return bid_price, ask_price
def grid_trader(self):
"""
执行核心逻辑,网格交易的逻辑.
:return:
"""
bid_price, ask_price = self.get_bid_ask_price()
print(f"bid_price: {bid_price}, ask_price: {ask_price}")
quantity = round_to(float(config.quantity), float(config.min_qty))
self.buy_orders.sort(key=lambda x: float(x['price']), reverse=True) # 最高价到最低价.
self.sell_orders.sort(key=lambda x: float(x['price']), reverse=True) # 最高价到最低价.
print(f"buy orders: {self.buy_orders}")
print("------------------------------")
print(f"sell orders: {self.sell_orders}")
buy_delete_orders = [] # 需要删除买单
sell_delete_orders = [] # 需要删除的卖单
# 买单逻辑,检查成交的情况.
for buy_order in self.buy_orders:
check_order = self.http_client.get_order(buy_order.get('symbol', config.symbol),client_order_id=buy_order.get('clientOrderId'))
if check_order:
if check_order.get('status') == OrderStatus.CANCELED.value:
buy_delete_orders.append(buy_order)
print(f"buy order status was canceled: {check_order.get("status")}")
elif check_order.get('status') == OrderStatus.FILLED.value:
# 买单成交,挂卖单.
logging.info(f"买单成交时间: {datetime.now()}, 价格: {check_order.get("price")}, 数量: {check_order.get("origQty")}")
sell_price = round_to(float(check_order.get("price")) * (1 + float(config.gap_percent)), float(config.min_price))
if 0 < sell_price < ask_price:
# 防止价格
sell_price = round_to(ask_price, float(config.min_price))
new_sell_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.SELL, order_type=OrderType.LIMIT, quantity=quantity, price=sell_price)
if new_sell_order:
buy_delete_orders.append(buy_order)
self.sell_orders.append(new_sell_order)
buy_price = round_to(float(check_order.get("price")) * (1 - float(config.gap_percent)),
config.min_price)
if buy_price > bid_price > 0:
buy_price = round_to(bid_price, float(config.min_price))
new_buy_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.BUY, order_type=OrderType.LIMIT, quantity=quantity, price=buy_price)
if new_buy_order:
self.buy_orders.append(new_buy_order)
elif check_order.get('status') == OrderStatus.NEW.value:
print("buy order status is: New")
else:
print(f"buy order status is not above options: {check_order.get("status")}")
# 过期或者拒绝的订单删除掉.
for delete_order in buy_delete_orders:
self.buy_orders.remove(delete_order)
# 卖单逻辑, 检查卖单成交情况.
for sell_order in self.sell_orders:
check_order = self.http_client.get_order(sell_order.get('symbol', config.symbol),
client_order_id=sell_order.get('clientOrderId'))
if check_order:
if check_order.get('status') == OrderStatus.CANCELED.value:
sell_delete_orders.append(sell_order)
print(f"sell order status was canceled: {check_order.get("status")}")
elif check_order.get('status') == OrderStatus.FILLED.value:
logging.info(
f"卖单成交时间: {datetime.now()}, 价格: {check_order.get("price")}, 数量: {check_order.get("origQty")}")
# 卖单成交,先下买单.
buy_price = round_to(float(check_order.get("price")) * (1 - float(config.gap_percent)), float(config.min_price))
if buy_price > bid_price > 0:
buy_price = round_to(bid_price, float(config.min_price))
new_buy_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.BUY,
order_type=OrderType.LIMIT, quantity=quantity, price=buy_price)
if new_buy_order:
sell_delete_orders.append(sell_order)
self.buy_orders.append(new_buy_order)
sell_price = round_to(float(check_order.get("price")) * (1 + float(config.gap_percent)), float(config.min_price))
if 0 < sell_price < ask_price:
# 防止价格
sell_price = round_to(ask_price, float(config.min_price))
new_sell_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.SELL,
order_type=OrderType.LIMIT, quantity=quantity,
price=sell_price)
if new_sell_order:
self.sell_orders.append(new_sell_order)
elif check_order.get('status') == OrderStatus.NEW.value:
print("sell order status is: New")
else:
print(f"sell order status is not in above options: {check_order.get("status")}")
# 过期或者拒绝的订单删除掉.
for delete_order in sell_delete_orders:
self.sell_orders.remove(delete_order)
# 没有买单的时候.
if len(self.buy_orders) <= 0:
if bid_price > 0:
price = round_to(bid_price * (1 - float(config.gap_percent)), float(config.min_price))
buy_order = self.http_client.place_order(symbol=config.symbol,order_side=OrderSide.BUY, order_type=OrderType.LIMIT, quantity=quantity,price=price)
if buy_order:
self.buy_orders.append(buy_order)
elif len(self.buy_orders) > int(config.max_orders): # 最多允许的挂单数量.
# 订单数量比较多的时候.
self.buy_orders.sort(key=lambda x: float(x['price']), reverse=False) # 最低价到最高价
delete_order = self.buy_orders[0]
order = self.http_client.cancel_order(delete_order.get('symbol'), client_order_id=delete_order.get('clientOrderId'))
if order:
self.buy_orders.remove(delete_order)
# 没有卖单的时候.
if len(self.sell_orders) <= 0:
if ask_price > 0:
price = round_to(ask_price * (1 + float(config.gap_percent)), float(config.min_price))
order = self.http_client.place_order(symbol=config.symbol,order_side=OrderSide.SELL, order_type=OrderType.LIMIT, quantity=quantity,price=price)
if order:
self.sell_orders.append(order)
elif len(self.sell_orders) > int(config.max_orders): # 最多允许的挂单数量.
# 订单数量比较多的时候.
self.sell_orders.sort(key=lambda x: x['price'], reverse=True) # 最高价到最低价
delete_order = self.sell_orders[0]
order = self.http_client.cancel_order(delete_order.get('symbol'),
client_order_id=delete_order.get('clientOrderId'))
if order:
self.sell_orders.remove(delete_order)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
币安推荐码: 返佣10%
https://www.binancezh.pro/cn/register?ref=AIR1GC70
币安合约推荐码: 返佣10%
https://www.binancezh.com/cn/futures/ref/51bitquant
if you don't have a binance account, you can use the invitation link to register one:
https://www.binancezh.com/cn/futures/ref/51bitquant
or use the inviation code: 51bitquant
网格交易: 适合币圈的高波动率的品种,适合现货, 如果交易合约,需要注意防止极端行情爆仓。
服务器购买地址: https://www.ucloud.cn/site/global.html?invitation_code=C1x2EA81CD79B8C#dongjing
"""
from gateway import BinanceSpotHttp, OrderStatus, OrderType, OrderSide
from utils import config
from utils import utility, round_to
from enum import Enum
import logging
from datetime import datetime
class BinanceTrader(object):
def __init__(self):
"""
:param api_key:
:param secret:
:param trade_type: 交易的类型, only support future and spot.
"""
self.http_client = BinanceSpotHttp(api_key=config.api_key, secret=config.api_secret, proxy_host=config.proxy_host, proxy_port=config.proxy_port)
self.buy_orders = [] # 买单.
self.sell_orders = [] # 卖单.
def get_bid_ask_price(self):
ticker = self.http_client.get_ticker(config.symbol)
bid_price = 0
ask_price = 0
if ticker:
bid_price = float(ticker.get('bidPrice', 0))
ask_price = float(ticker.get('askPrice', 0))
return bid_price, ask_price
def grid_trader(self):
"""
执行核心逻辑,网格交易的逻辑.
:return:
"""
bid_price, ask_price = self.get_bid_ask_price()
print(f"bid_price: {bid_price}, ask_price: {ask_price}")
quantity = round_to(float(config.quantity), float(config.min_qty))
self.buy_orders.sort(key=lambda x: float(x['price']), reverse=True) # 最高价到最低价.
self.sell_orders.sort(key=lambda x: float(x['price']), reverse=True) # 最高价到最低价.
print(f"buy orders: {self.buy_orders}")
print("------------------------------")
print(f"sell orders: {self.sell_orders}")
buy_delete_orders = [] # 需要删除买单
sell_delete_orders = [] # 需要删除的卖单
# 买单逻辑,检查成交的情况.
for buy_order in self.buy_orders:
check_order = self.http_client.get_order(buy_order.get('symbol', config.symbol),client_order_id=buy_order.get('clientOrderId'))
if check_order:
if check_order.get('status') == OrderStatus.CANCELED.value:
buy_delete_orders.append(buy_order)
print(f"buy order status was canceled: {check_order.get('status')}")
elif check_order.get('status') == OrderStatus.FILLED.value:
# 买单成交,挂卖单.
logging.info(f"买单成交时间: {datetime.now()}, 价格: {check_order.get('price')}, 数量: {check_order.get('origQty')}")
sell_price = round_to(float(check_order.get("price")) * (1 + float(config.gap_percent)), float(config.min_price))
if 0 < sell_price < ask_price:
# 防止价格
sell_price = round_to(ask_price, float(config.min_price))
new_sell_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.SELL, order_type=OrderType.LIMIT, quantity=quantity, price=sell_price)
if new_sell_order:
buy_delete_orders.append(buy_order)
self.sell_orders.append(new_sell_order)
buy_price = round_to(float(check_order.get("price")) * (1 - float(config.gap_percent)),
config.min_price)
if buy_price > bid_price > 0:
buy_price = round_to(bid_price, float(config.min_price))
new_buy_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.BUY, order_type=OrderType.LIMIT, quantity=quantity, price=buy_price)
if new_buy_order:
self.buy_orders.append(new_buy_order)
elif check_order.get('status') == OrderStatus.NEW.value:
print("buy order status is: New")
else:
print(f"buy order status is not above options: {check_order.get('status')}")
# 过期或者拒绝的订单删除掉.
for delete_order in buy_delete_orders:
self.buy_orders.remove(delete_order)
# 卖单逻辑, 检查卖单成交情况.
for sell_order in self.sell_orders:
check_order = self.http_client.get_order(sell_order.get('symbol', config.symbol),
client_order_id=sell_order.get('clientOrderId'))
if check_order:
if check_order.get('status') == OrderStatus.CANCELED.value:
sell_delete_orders.append(sell_order)
print(f"sell order status was canceled: {check_order.get('status')}")
elif check_order.get('status') == OrderStatus.FILLED.value:
logging.info(
f"卖单成交时间: {datetime.now()}, 价格: {check_order.get('price')}, 数量: {check_order.get('origQty')}")
# 卖单成交,先下买单.
buy_price = round_to(float(check_order.get("price")) * (1 - float(config.gap_percent)), float(config.min_price))
if buy_price > bid_price > 0:
buy_price = round_to(bid_price, float(config.min_price))
new_buy_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.BUY,
order_type=OrderType.LIMIT, quantity=quantity, price=buy_price)
if new_buy_order:
sell_delete_orders.append(sell_order)
self.buy_orders.append(new_buy_order)
sell_price = round_to(float(check_order.get("price")) * (1 + float(config.gap_percent)), float(config.min_price))
if 0 < sell_price < ask_price:
# 防止价格
sell_price = round_to(ask_price, float(config.min_price))
new_sell_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.SELL,
order_type=OrderType.LIMIT, quantity=quantity,
price=sell_price)
if new_sell_order:
self.sell_orders.append(new_sell_order)
elif check_order.get('status') == OrderStatus.NEW.value:
print("sell order status is: New")
else:
print(f"sell order status is not in above options: {check_order.get('status')}")
# 过期或者拒绝的订单删除掉.
for delete_order in sell_delete_orders:
self.sell_orders.remove(delete_order)
# 没有买单的时候.
if len(self.buy_orders) <= 0:
if bid_price > 0:
price = round_to(bid_price * (1 - float(config.gap_percent)), float(config.min_price))
buy_order = self.http_client.place_order(symbol=config.symbol,order_side=OrderSide.BUY, order_type=OrderType.LIMIT, quantity=quantity,price=price)
if buy_order:
self.buy_orders.append(buy_order)
elif len(self.buy_orders) > int(config.max_orders): # 最多允许的挂单数量.
# 订单数量比较多的时候.
self.buy_orders.sort(key=lambda x: float(x['price']), reverse=False) # 最低价到最高价
delete_order = self.buy_orders[0]
order = self.http_client.cancel_order(delete_order.get('symbol'), client_order_id=delete_order.get('clientOrderId'))
if order:
self.buy_orders.remove(delete_order)
# 没有卖单的时候.
if len(self.sell_orders) <= 0:
if ask_price > 0:
price = round_to(ask_price * (1 + float(config.gap_percent)), float(config.min_price))
order = self.http_client.place_order(symbol=config.symbol,order_side=OrderSide.SELL, order_type=OrderType.LIMIT, quantity=quantity,price=price)
if order:
self.sell_orders.append(order)
elif len(self.sell_orders) > int(config.max_orders): # 最多允许的挂单数量.
# 订单数量比较多的时候.
self.sell_orders.sort(key=lambda x: x['price'], reverse=True) # 最高价到最低价
delete_order = self.sell_orders[0]
order = self.http_client.cancel_order(delete_order.get('symbol'),
client_order_id=delete_order.get('clientOrderId'))
if order:
self.sell_orders.remove(delete_order)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fetches, cleans, outputs TMCFs and CSVs for all World Bank development
indicator codes provided in WorldBankIndicators.csv for all years and for
all countries provided in WorldBankCountries.csv. """
from absl import app
import pandas as pd
import itertools
import requests
import zipfile
import io
import re
# Remaps the columns provided by World Bank API.
WORLDBANK_COL_REMAP = {
'Country Name': 'CountryName',
'Country Code': 'CountryCode',
'Indicator Name': 'IndicatorName',
'Indicator Code': 'IndicatorCode'
}
TEMPLATE_TMCF = """
Node: E:WorldBank->E0
typeOf: dcs:StatVarObservation
variableMeasured: C:WorldBank->StatisticalVariable
observationDate: C:WorldBank->Year
observationPeriod: "P1Y"
observationAbout: C:WorldBank->ISO3166Alpha3
value: C:WorldBank->Value
"""
TEMPLATE_STAT_VAR = """
Node: dcid:WorldBank/{INDICATOR}
name: "{NAME}"
description: "{DESCRIPTION}"
typeOf: dcs:StatisticalVariable
populationType: dcs:{populationType}
statType: dcs:{statType}
measuredProperty: dcs:{measuredProperty}
measurementDenominator: dcs:{measurementDenominator}
{CONSTRAINTS}
"""
def read_worldbank(iso3166alpha3):
""" Fetches and tidies all ~1500 World Bank indicators
for a given ISO 3166 alpha 3 code.
For a particular alpha 3 code, this function fetches the entire ZIP
file for that particular country for all World Bank indicators in a
wide format where years are columns. The dataframe is changed into a
narrow format so that year becomes a single column with each row
representing a different year for a single indicator.
Args:
iso3166alpha3: ISO 3166 alpha 3 for a country, as a string.
Returns:
A tidied pandas dataframe with all indicator codes for a particular
country in the format of (country, indicator, year, value).
Notes:
Takes approximately 10 seconds to download and
tidy one country in a Jupyter notebook.
"""
country_zip = ("http://api.worldbank.org/v2/en/country/" + iso3166alpha3 +
"?downloadformat=csv")
r = requests.get(country_zip)
filebytes = io.BytesIO(r.content)
myzipfile = zipfile.ZipFile(filebytes)
# We need to select the data file which starts with "API",
# but does not have an otherwise regular filename structure.
file_to_open = None
for file in myzipfile.namelist():
if file.startswith("API"):
file_to_open = file
break
assert file_to_open is not None, \
"Failed to find data for" + iso3166alpha3
df = None
# Captures any text contained in double quotatations.
line_match = re.compile(r"\"([^\"]*)\"")
for line in myzipfile.open(file_to_open).readlines():
# Cells are contained in quotations and comma separated.
cols = line_match.findall(line.decode("utf-8"))
# CSVs include header informational lines which should be ignored.
if len(cols) > 2:
# Use first row as the header.
if df is None:
df = pd.DataFrame(columns=cols)
else:
df = df.append(pd.DataFrame([cols], columns=df.columns),
ignore_index=True)
df = df.rename(columns=WORLDBANK_COL_REMAP)
# Turn each year into its own row.
df = df.set_index(
['CountryName', 'CountryCode', 'IndicatorName', 'IndicatorCode'])
df = df.stack()
df.index = df.index.rename('year', level=4)
df.name = "Value"
df = df.reset_index()
# Convert to numeric and drop empty values.
df['Value'] = pd.to_numeric(df['Value'])
df = df.dropna()
return df
def build_stat_vars_from_indicator_list(row):
""" Generates World Bank StatVar for a row in the indicators dataframe. """
def row_to_constraints(row):
""" Helper to generate list of constraints. """
constraints_text = ""
next_constraint = 1
while (f"p{next_constraint}" in row and
not pd.isna(row[f"p{next_constraint}"])):
variable = row[f'p{next_constraint}']
constraint = row[f'v{next_constraint}']
constraints_text += f"{variable}: dcs:{constraint}\n"
next_constraint += 1
return constraints_text
# yapf: disable
# Input all required statistical variable fields.
new_stat_var = (TEMPLATE_STAT_VAR
.replace("{INDICATOR}", row['IndicatorCode'].replace(".", "_"))
.replace("{NAME}", row['IndicatorName'])
.replace("{DESCRIPTION}", row['SourceNote'])
.replace("{measuredProperty}", row['measuredProp'])
.replace("{CONSTRAINTS}", row_to_constraints(row))
)
# yapf: enable
# Include or remove option fields.
for optional_col in ([
'populationType', 'statType', 'measurementDenominator'
]):
if not pd.isna(row[optional_col]):
new_stat_var = new_stat_var.replace(f"{{{optional_col}}}",
row[optional_col])
else:
new_stat_var = new_stat_var.replace(
f"{optional_col}: dcs:{{{optional_col}}}\n", "")
return new_stat_var
def group_stat_vars_by_observation_properties(indicator_codes):
""" Groups stat vars by their observation schemas.
Groups Stat Vars by their inclusion of StatVar Observation
properties like measurementMethod or Unit.
The current template MCF schema does not support optional values in the
CSV so we must place these stat vars into
different template MCFs and CSVs.
Args:
indicator_codes: List of World Bank indicator codes with
their Data Commons mappings, as a pandas dataframe.
Returns:
Array of tuples for each statistical variable grouping.
1) template MCF, as a string.
2) columns to include in exported csv, as a list of strings.
3) indicator codes in this grouping, as a list of strings.
"""
# All the statistical observation properties that we included.
properties_of_stat_var_observation = ([
'measurementMethod', 'scalingFactor', 'sourceScalingFactor', 'unit'
])
# List of tuples to return.
tmcfs_for_stat_vars = []
# Dataframe that tracks which values are null.
null_status = indicator_codes.notna()
# Iterates over all permutations of stat var properties being included.
for permutation in list(
itertools.product([False, True],
repeat=len(properties_of_stat_var_observation))):
codes_that_match = null_status.copy()
base_template_mcf = TEMPLATE_TMCF
cols_to_include_in_csv = ['IndicatorCode']
# Loop over each obs column and whether to include it.
for include_col, column in (zip(permutation,
properties_of_stat_var_observation)):
# Filter the dataframe by this observation.
codes_that_match = codes_that_match.query(
f"{column} == {include_col}")
# Include the column in TMCF and column list.
if include_col:
base_template_mcf += f"{column}: C:WorldBank->{column}\n"
cols_to_include_in_csv.append(f"{column}")
tmcfs_for_stat_vars.append(
(base_template_mcf, cols_to_include_in_csv,
list(
indicator_codes.loc[codes_that_match.index]['IndicatorCode'])))
return tmcfs_for_stat_vars
def download_indicator_data(worldbank_countries, indicator_codes):
""" Downloads World Bank country data for all countries and
indicators provided.
Retains only the unique indicator codes provided.
Args:
worldbank_countries: Dataframe with ISO 3166 alpha 3 code for each
country.
indicator_code: Dataframe with INDICATOR_CODES to include.
Returns:
worldbank_dataframe: A tidied pandas dataframe where each row has
the format (indicator code, ISO 3166 alpha 3, year, value)
for all countries and all indicators provided.
"""
worldbank_dataframe = pd.DataFrame()
indicators_to_keep = list(indicator_codes['IndicatorCode'].unique())
for index, country_code in enumerate(worldbank_countries['ISO3166Alpha3']):
print(f"Downloading {country_code}")
country_df = read_worldbank(country_code)
# Remove unneccessary indicators.
country_df = country_df[country_df['IndicatorCode'].isin(
indicators_to_keep)]
# Map country codes to ISO.
country_df['ISO3166Alpha3'] = country_code
# Add new row to main datframe.
worldbank_dataframe = worldbank_dataframe.append(country_df)
# Map indicator codes to unique Statistical Variable.
worldbank_dataframe['StatisticalVariable'] = (
worldbank_dataframe['IndicatorCode'].apply(
lambda code: f"WorldBank/{code.replace(".", "_")}"))
return worldbank_dataframe.rename({'year': 'Year'}, axis=1)
def output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes):
""" Outputs TMCFs and CSVs for each grouping of stat vars.
Args:
worldbank_dataframe: Dataframe containing all indicators for all
countries.
tmcfs_for_stat_vars: Array of tuples of template MCF,
columns on stat var observations,
indicator codes for that template.
indicator_codes -> Dataframe with INDICATOR_CODES to include.
"""
# Only include a subset of columns in the final csv
output_csv = worldbank_dataframe[[
'StatisticalVariable', 'IndicatorCode', 'ISO3166Alpha3', 'Year', 'Value'
]]
# Output tmcf and csv for each unique World Bank grouping.
for index, enum in enumerate(tmcfs_for_stat_vars):
tmcf, stat_var_obs_cols, stat_vars_in_group = enum
if len(stat_vars_in_group) != 0:
with open(f"output/WorldBank_{index}.tmcf", 'w',
newline='') as f_out:
f_out.write(tmcf)
# Output only the indicator codes in that grouping.
matching_csv = output_csv[output_csv['IndicatorCode'].isin(
stat_vars_in_group)]
# Include the Stat Observation columns in the output CSV.
if len(stat_var_obs_cols) > 1:
matching_csv = pd.merge(matching_csv,
indicator_codes[stat_var_obs_cols],
on="IndicatorCode")
# Format to decimals.
matching_csv = matching_csv.round(10)
matching_csv.drop("IndicatorCode",
axis=1).to_csv(f"output/WorldBank_{index}.csv",
float_format='%.10f',
index=False)
def source_scaling_remap(row, scaling_factor_lookup, existing_stat_var_lookup):
""" Scales values by sourceScalingFactor and inputs exisiting stat vars.
First, this function converts all values to per capita. Some measures
in the World Bank dataset are per thousand or per hundred thousand, but
we need to scale these to the common denomination format. Secondly,
some statistical variables such as Count_Person_InLaborForce are not
World Bank specific and need to be replaced. Both of these are imputted
from the following two lists in args.
Args:
scaling_factor_lookup: A dictionary of a mapping between World Bank
indicator code to the respective numeric scaling factor.
existing_stat_var_lookup: A dictionary of a mapping between all
indicator to be replaced with the exisiting stat var to replace it.
"""
indicator_code = row['IndicatorCode']
if indicator_code in scaling_factor_lookup:
row['Value'] = (row['Value'] /
int(scaling_factor_lookup[indicator_code]))
if indicator_code in existing_stat_var_lookup:
row['StatisticalVariable'] = ("dcid:" +
existing_stat_var_lookup[indicator_code])
return row
def main(_):
# Load statistical variable configuration file.
indicator_codes = pd.read_csv("WorldBankIndicators.csv")
# Add source description to note.
def add_source_to_description(row):
if not pd.isna(row['Source']):
return row['SourceNote'] + " " + str(row['Source'])
else:
return row['SourceNote']
indicator_codes['SourceNote'] = indicator_codes.apply(
add_source_to_description, axis=1)
# Generate stat vars
with open("output/WorldBank_StatisticalVariables.mcf", "w+") as f_out:
# Generate StatVars for fields that don't exist. Some fields such as
# Count_Person_Unemployed are already statistical variables so we do
# not need to recreate them.
for _, row in indicator_codes[
indicator_codes['ExistingStatVar'].isna()].iterrows():
f_out.write(build_stat_vars_from_indicator_list(row))
# Create template MCFs for each grouping of stat vars.
tmcfs_for_stat_vars = (
group_stat_vars_by_observation_properties(indicator_codes))
# Download data for all countries.
worldbank_countries = pd.read_csv("WorldBankCountries.csv")
worldbank_dataframe = download_indicator_data(worldbank_countries,
indicator_codes)
# Remap columns to match expected format.
worldbank_dataframe['Value'] = pd.to_numeric(worldbank_dataframe['Value'])
worldbank_dataframe['ISO3166Alpha3'] = (
worldbank_dataframe['ISO3166Alpha3'].apply(
lambda code: "dcs:country/" + code))
worldbank_dataframe['StatisticalVariable'] = \
worldbank_dataframe['StatisticalVariable'].apply(
lambda code: "dcs:" + code)
# Scale values by scaling factor and replace exisiting StatVars.
scaling_factor_lookup = (indicator_codes.set_index("IndicatorCode")
['sourceScalingFactor'].dropna().to_dict())
existing_stat_var_lookup = (indicator_codes.set_index("IndicatorCode")
['ExistingStatVar'].dropna().to_dict())
worldbank_dataframe = worldbank_dataframe.apply(
lambda row: source_scaling_remap(row, scaling_factor_lookup,
existing_stat_var_lookup),
axis=1)
# Convert integer columns.
int_cols = (list(indicator_codes[indicator_codes['ConvertToInt'] == True]
['IndicatorCode'].unique()))
worldbank_subset = worldbank_dataframe[
worldbank_dataframe['IndicatorCode'].isin(int_cols)].index
worldbank_dataframe.loc[worldbank_subset, "Value"] = (pd.to_numeric(
worldbank_dataframe.loc[worldbank_subset, "Value"], downcast="integer"))
# Output final CSVs and variables.
output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes)
if __name__ == '__main__':
app.run(main)
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fetches, cleans, outputs TMCFs and CSVs for all World Bank development
indicator codes provided in WorldBankIndicators.csv for all years and for
all countries provided in WorldBankCountries.csv. """
from absl import app
import pandas as pd
import itertools
import requests
import zipfile
import io
import re
# Remaps the columns provided by World Bank API.
WORLDBANK_COL_REMAP = {
'Country Name': 'CountryName',
'Country Code': 'CountryCode',
'Indicator Name': 'IndicatorName',
'Indicator Code': 'IndicatorCode'
}
TEMPLATE_TMCF = """
Node: E:WorldBank->E0
typeOf: dcs:StatVarObservation
variableMeasured: C:WorldBank->StatisticalVariable
observationDate: C:WorldBank->Year
observationPeriod: "P1Y"
observationAbout: C:WorldBank->ISO3166Alpha3
value: C:WorldBank->Value
"""
TEMPLATE_STAT_VAR = """
Node: dcid:WorldBank/{INDICATOR}
name: "{NAME}"
description: "{DESCRIPTION}"
typeOf: dcs:StatisticalVariable
populationType: dcs:{populationType}
statType: dcs:{statType}
measuredProperty: dcs:{measuredProperty}
measurementDenominator: dcs:{measurementDenominator}
{CONSTRAINTS}
"""
def read_worldbank(iso3166alpha3):
""" Fetches and tidies all ~1500 World Bank indicators
for a given ISO 3166 alpha 3 code.
For a particular alpha 3 code, this function fetches the entire ZIP
file for that particular country for all World Bank indicators in a
wide format where years are columns. The dataframe is changed into a
narrow format so that year becomes a single column with each row
representing a different year for a single indicator.
Args:
iso3166alpha3: ISO 3166 alpha 3 for a country, as a string.
Returns:
A tidied pandas dataframe with all indicator codes for a particular
country in the format of (country, indicator, year, value).
Notes:
Takes approximately 10 seconds to download and
tidy one country in a Jupyter notebook.
"""
country_zip = ("http://api.worldbank.org/v2/en/country/" + iso3166alpha3 +
"?downloadformat=csv")
r = requests.get(country_zip)
filebytes = io.BytesIO(r.content)
myzipfile = zipfile.ZipFile(filebytes)
# We need to select the data file which starts with "API",
# but does not have an otherwise regular filename structure.
file_to_open = None
for file in myzipfile.namelist():
if file.startswith("API"):
file_to_open = file
break
assert file_to_open is not None, \
"Failed to find data for" + iso3166alpha3
df = None
# Captures any text contained in double quotatations.
line_match = re.compile(r"\"([^\"]*)\"")
for line in myzipfile.open(file_to_open).readlines():
# Cells are contained in quotations and comma separated.
cols = line_match.findall(line.decode("utf-8"))
# CSVs include header informational lines which should be ignored.
if len(cols) > 2:
# Use first row as the header.
if df is None:
df = pd.DataFrame(columns=cols)
else:
df = df.append(pd.DataFrame([cols], columns=df.columns),
ignore_index=True)
df = df.rename(columns=WORLDBANK_COL_REMAP)
# Turn each year into its own row.
df = df.set_index(
['CountryName', 'CountryCode', 'IndicatorName', 'IndicatorCode'])
df = df.stack()
df.index = df.index.rename('year', level=4)
df.name = "Value"
df = df.reset_index()
# Convert to numeric and drop empty values.
df['Value'] = pd.to_numeric(df['Value'])
df = df.dropna()
return df
def build_stat_vars_from_indicator_list(row):
""" Generates World Bank StatVar for a row in the indicators dataframe. """
def row_to_constraints(row):
""" Helper to generate list of constraints. """
constraints_text = ""
next_constraint = 1
while (f"p{next_constraint}" in row and
not pd.isna(row[f"p{next_constraint}"])):
variable = row[f'p{next_constraint}']
constraint = row[f'v{next_constraint}']
constraints_text += f"{variable}: dcs:{constraint}\n"
next_constraint += 1
return constraints_text
# yapf: disable
# Input all required statistical variable fields.
new_stat_var = (TEMPLATE_STAT_VAR
.replace("{INDICATOR}", row['IndicatorCode'].replace(".", "_"))
.replace("{NAME}", row['IndicatorName'])
.replace("{DESCRIPTION}", row['SourceNote'])
.replace("{measuredProperty}", row['measuredProp'])
.replace("{CONSTRAINTS}", row_to_constraints(row))
)
# yapf: enable
# Include or remove option fields.
for optional_col in ([
'populationType', 'statType', 'measurementDenominator'
]):
if not pd.isna(row[optional_col]):
new_stat_var = new_stat_var.replace(f"{{{optional_col}}}",
row[optional_col])
else:
new_stat_var = new_stat_var.replace(
f"{optional_col}: dcs:{{{optional_col}}}\n", "")
return new_stat_var
def group_stat_vars_by_observation_properties(indicator_codes):
""" Groups stat vars by their observation schemas.
Groups Stat Vars by their inclusion of StatVar Observation
properties like measurementMethod or Unit.
The current template MCF schema does not support optional values in the
CSV so we must place these stat vars into
different template MCFs and CSVs.
Args:
indicator_codes: List of World Bank indicator codes with
their Data Commons mappings, as a pandas dataframe.
Returns:
Array of tuples for each statistical variable grouping.
1) template MCF, as a string.
2) columns to include in exported csv, as a list of strings.
3) indicator codes in this grouping, as a list of strings.
"""
# All the statistical observation properties that we included.
properties_of_stat_var_observation = ([
'measurementMethod', 'scalingFactor', 'sourceScalingFactor', 'unit'
])
# List of tuples to return.
tmcfs_for_stat_vars = []
# Dataframe that tracks which values are null.
null_status = indicator_codes.notna()
# Iterates over all permutations of stat var properties being included.
for permutation in list(
itertools.product([False, True],
repeat=len(properties_of_stat_var_observation))):
codes_that_match = null_status.copy()
base_template_mcf = TEMPLATE_TMCF
cols_to_include_in_csv = ['IndicatorCode']
# Loop over each obs column and whether to include it.
for include_col, column in (zip(permutation,
properties_of_stat_var_observation)):
# Filter the dataframe by this observation.
codes_that_match = codes_that_match.query(
f"{column} == {include_col}")
# Include the column in TMCF and column list.
if include_col:
base_template_mcf += f"{column}: C:WorldBank->{column}\n"
cols_to_include_in_csv.append(f"{column}")
tmcfs_for_stat_vars.append(
(base_template_mcf, cols_to_include_in_csv,
list(
indicator_codes.loc[codes_that_match.index]['IndicatorCode'])))
return tmcfs_for_stat_vars
def download_indicator_data(worldbank_countries, indicator_codes):
""" Downloads World Bank country data for all countries and
indicators provided.
Retains only the unique indicator codes provided.
Args:
worldbank_countries: Dataframe with ISO 3166 alpha 3 code for each
country.
indicator_code: Dataframe with INDICATOR_CODES to include.
Returns:
worldbank_dataframe: A tidied pandas dataframe where each row has
the format (indicator code, ISO 3166 alpha 3, year, value)
for all countries and all indicators provided.
"""
worldbank_dataframe = pd.DataFrame()
indicators_to_keep = list(indicator_codes['IndicatorCode'].unique())
for index, country_code in enumerate(worldbank_countries['ISO3166Alpha3']):
print(f"Downloading {country_code}")
country_df = read_worldbank(country_code)
# Remove unneccessary indicators.
country_df = country_df[country_df['IndicatorCode'].isin(
indicators_to_keep)]
# Map country codes to ISO.
country_df['ISO3166Alpha3'] = country_code
# Add new row to main datframe.
worldbank_dataframe = worldbank_dataframe.append(country_df)
# Map indicator codes to unique Statistical Variable.
worldbank_dataframe['StatisticalVariable'] = (
worldbank_dataframe['IndicatorCode'].apply(
lambda code: f"WorldBank/{code.replace('.', '_')}"))
return worldbank_dataframe.rename({'year': 'Year'}, axis=1)
def output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes):
""" Outputs TMCFs and CSVs for each grouping of stat vars.
Args:
worldbank_dataframe: Dataframe containing all indicators for all
countries.
tmcfs_for_stat_vars: Array of tuples of template MCF,
columns on stat var observations,
indicator codes for that template.
indicator_codes -> Dataframe with INDICATOR_CODES to include.
"""
# Only include a subset of columns in the final csv
output_csv = worldbank_dataframe[[
'StatisticalVariable', 'IndicatorCode', 'ISO3166Alpha3', 'Year', 'Value'
]]
# Output tmcf and csv for each unique World Bank grouping.
for index, enum in enumerate(tmcfs_for_stat_vars):
tmcf, stat_var_obs_cols, stat_vars_in_group = enum
if len(stat_vars_in_group) != 0:
with open(f"output/WorldBank_{index}.tmcf", 'w',
newline='') as f_out:
f_out.write(tmcf)
# Output only the indicator codes in that grouping.
matching_csv = output_csv[output_csv['IndicatorCode'].isin(
stat_vars_in_group)]
# Include the Stat Observation columns in the output CSV.
if len(stat_var_obs_cols) > 1:
matching_csv = pd.merge(matching_csv,
indicator_codes[stat_var_obs_cols],
on="IndicatorCode")
# Format to decimals.
matching_csv = matching_csv.round(10)
matching_csv.drop("IndicatorCode",
axis=1).to_csv(f"output/WorldBank_{index}.csv",
float_format='%.10f',
index=False)
def source_scaling_remap(row, scaling_factor_lookup, existing_stat_var_lookup):
""" Scales values by sourceScalingFactor and inputs exisiting stat vars.
First, this function converts all values to per capita. Some measures
in the World Bank dataset are per thousand or per hundred thousand, but
we need to scale these to the common denomination format. Secondly,
some statistical variables such as Count_Person_InLaborForce are not
World Bank specific and need to be replaced. Both of these are imputted
from the following two lists in args.
Args:
scaling_factor_lookup: A dictionary of a mapping between World Bank
indicator code to the respective numeric scaling factor.
existing_stat_var_lookup: A dictionary of a mapping between all
indicator to be replaced with the exisiting stat var to replace it.
"""
indicator_code = row['IndicatorCode']
if indicator_code in scaling_factor_lookup:
row['Value'] = (row['Value'] /
int(scaling_factor_lookup[indicator_code]))
if indicator_code in existing_stat_var_lookup:
row['StatisticalVariable'] = ("dcid:" +
existing_stat_var_lookup[indicator_code])
return row
def main(_):
# Load statistical variable configuration file.
indicator_codes = pd.read_csv("WorldBankIndicators.csv")
# Add source description to note.
def add_source_to_description(row):
if not pd.isna(row['Source']):
return row['SourceNote'] + " " + str(row['Source'])
else:
return row['SourceNote']
indicator_codes['SourceNote'] = indicator_codes.apply(
add_source_to_description, axis=1)
# Generate stat vars
with open("output/WorldBank_StatisticalVariables.mcf", "w+") as f_out:
# Generate StatVars for fields that don't exist. Some fields such as
# Count_Person_Unemployed are already statistical variables so we do
# not need to recreate them.
for _, row in indicator_codes[
indicator_codes['ExistingStatVar'].isna()].iterrows():
f_out.write(build_stat_vars_from_indicator_list(row))
# Create template MCFs for each grouping of stat vars.
tmcfs_for_stat_vars = (
group_stat_vars_by_observation_properties(indicator_codes))
# Download data for all countries.
worldbank_countries = pd.read_csv("WorldBankCountries.csv")
worldbank_dataframe = download_indicator_data(worldbank_countries,
indicator_codes)
# Remap columns to match expected format.
worldbank_dataframe['Value'] = pd.to_numeric(worldbank_dataframe['Value'])
worldbank_dataframe['ISO3166Alpha3'] = (
worldbank_dataframe['ISO3166Alpha3'].apply(
lambda code: "dcs:country/" + code))
worldbank_dataframe['StatisticalVariable'] = \
worldbank_dataframe['StatisticalVariable'].apply(
lambda code: "dcs:" + code)
# Scale values by scaling factor and replace exisiting StatVars.
scaling_factor_lookup = (indicator_codes.set_index("IndicatorCode")
['sourceScalingFactor'].dropna().to_dict())
existing_stat_var_lookup = (indicator_codes.set_index("IndicatorCode")
['ExistingStatVar'].dropna().to_dict())
worldbank_dataframe = worldbank_dataframe.apply(
lambda row: source_scaling_remap(row, scaling_factor_lookup,
existing_stat_var_lookup),
axis=1)
# Convert integer columns.
int_cols = (list(indicator_codes[indicator_codes['ConvertToInt'] == True]
['IndicatorCode'].unique()))
worldbank_subset = worldbank_dataframe[
worldbank_dataframe['IndicatorCode'].isin(int_cols)].index
worldbank_dataframe.loc[worldbank_subset, "Value"] = (pd.to_numeric(
worldbank_dataframe.loc[worldbank_subset, "Value"], downcast="integer"))
# Output final CSVs and variables.
output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes)
if __name__ == '__main__':
app.run(main)
|
import os
import os.path as osp
import pickle
import random
from collections import deque
from datetime import datetime
import gym
import numpy as np
import scipy.stats as stats
import torch
import torch.optim as optim
from mpi4py import MPI
import dr
from dr.ppo.models import Policy, ValueNet
from dr.ppo.train import one_train_iter
from dr.ppo.utils import set_torch_num_threads, RunningMeanStd, traj_seg_gen
COMM = MPI.COMM_WORLD
import tensorboardX
def set_global_seeds(i):
torch.manual_seed(i)
np.random.seed(i)
random.seed(i)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(i)
class CEMOptimizer(object):
def __init__(self, sol_dim, max_iters, popsize, num_elites, cost_function,
upper_bound=None, lower_bound=None, epsilon=0.001, alpha=0.25, viz_dir=None):
"""Creates an instance of this class.
Arguments:
sol_dim (int): The dimensionality of the problem space
max_iters (int): The maximum number of iterations to perform during optimization
popsize (int): The number of candidate solutions to be sampled at every iteration
num_elites (int): The number of top solutions that will be used to obtain the distribution
at the next iteration.
upper_bound (np.array): An array of upper bounds
lower_bound (np.array): An array of lower bounds
epsilon (float): A minimum variance. If the maximum variance drops below epsilon, optimization is
stopped.
alpha (float): Controls how much of the previous mean and variance is used for the next iteration.
next_mean = alpha * old_mean + (1 - alpha) * elite_mean, and similarly for variance.
"""
super().__init__()
self.sol_dim, self.max_iters, self.popsize, self.num_elites = sol_dim, max_iters, popsize, num_elites
self.ub, self.lb = upper_bound, lower_bound
self.epsilon, self.alpha = epsilon, alpha
self.cost_function = cost_function
if viz_dir is not None:
self.writer = tensorboardX.SummaryWriter(viz_dir)
else:
self.writer = tensorboardX.SummaryWriter()
if num_elites > popsize:
raise ValueError("Number of elites must be at most the population size.")
def reset(self):
pass
def obtain_solution(self, init_mean, init_var):
"""Optimizes the cost function using the provided initial candidate distribution
Arguments:
init_mean (np.ndarray): The mean of the initial candidate distribution.
init_var (np.ndarray): The variance of the initial candidate distribution.
"""
mean, var, t = init_mean, init_var, 0
X = stats.truncnorm(-2, 2, loc=np.zeros_like(mean), scale=np.ones_like(var))
costs_hist = []
mean_hist = []
var_hist = []
while (t < self.max_iters) and np.max(var) > self.epsilon:
lb_dist, ub_dist = mean - self.lb, self.ub - mean
constrained_var = np.minimum(np.minimum(np.square(lb_dist / 2), np.square(ub_dist / 2)), var)
samples = X.rvs(size=[self.popsize, self.sol_dim]) * np.sqrt(constrained_var) + mean
samples = samples.astype(np.float32)
costs = self.cost_function(samples, t)
elites = samples[np.argsort(costs)][:self.num_elites]
new_mean = np.mean(elites, axis=0)
new_var = np.var(elites, axis=0)
mean = self.alpha * mean + (1 - self.alpha) * new_mean
var = self.alpha * var + (1 - self.alpha) * new_var
for i, m in enumerate(mean):
self.writer.add_scalar(f'mean/{i}', m, t)
for i, m in enumerate(var):
self.writer.add_scalar(f'var/{i}', m, t)
self.writer.add_scalar('costs', np.min(costs), t)
t += 1
costs_hist.append(costs)
mean_hist.append(mean)
var_hist.append(var)
self.writer.close()
return dict(
mean_hist=mean_hist, costs_hist=costs_hist, var_hist=var_hist
)
class PPO_Pytorch(object):
def __init__(self, experiment_name, env_params, train_params, **kwargs):
self.experiment_name = experiment_name
self.env_params = env_params
self.train_params = train_params
self.log_dir = osp.join('runs',
f'seed_{str(train_params['seed'])}_{datetime.now().strftime('%b%d_%H-%M-%S')}')
os.makedirs(self.log_dir, exist_ok=True)
with open(osp.join(self.log_dir, 'env_params.pkl'), 'wb+') as f:
pickle.dump(env_params, f)
with open(osp.join(self.log_dir, 'train_params.pkl'), 'wb+') as f:
pickle.dump(train_params, f)
super().__init__()
def train(self, env_id, backend,
train_params, env_params,
means, stdevs):
# Unpack params
hid_size = train_params['hid_size']
pol_init_std = train_params['pol_init_std']
adam_epsilon = train_params['adam_epsilon']
optim_stepsize = train_params['optim_stepsize']
# Convert means and stdevs to dict format
assert len(means) == len(stdevs), (len(means), len(stdevs))
mean_dict, stdev_dict = PPO_Pytorch._vec_to_dict(env_id, means, stdevs)
# Set parameter of env
self.env_dist.default_parameters = mean_dict
self.env_dist.stdev_dict = stdev_dict
env = self.env_dist.root_env
set_torch_num_threads()
# Construct policy and value network
pol = Policy(env.observation_space, env.action_space, hid_size, pol_init_std)
pol_optim = optim.Adam(pol.parameters(), lr=optim_stepsize, eps=adam_epsilon)
val = ValueNet(env.observation_space, hid_size)
val_optim = optim.Adam(val.parameters(), lr=optim_stepsize, eps=adam_epsilon)
optims = {'pol_optim': pol_optim, 'val_optim': val_optim}
num_train_iter = int(train_params['num_timesteps'] / train_params['ts_per_batch'])
# Buffer for running statistics
eps_rets_buff = deque(maxlen=100)
eps_rets_mean_buff = []
state_running_m_std = RunningMeanStd(shape=env.observation_space.shape)
# seg_gen is a generator that yields the training data points
seg_gen = traj_seg_gen(self.env_dist, pol, val, state_running_m_std, env_params, train_params)
eval_perfs = []
for iter_i in range(num_train_iter):
one_train_iter(pol, val, optims,
iter_i, eps_rets_buff, eps_rets_mean_buff, seg_gen,
state_running_m_std, train_params, self.eval_envs, eval_perfs)
return eval_perfs
def run(self):
set_global_seeds(self.train_params['seed'])
# Unpack params
env_name = self.env_params['env_name']
backend = self.env_params['backend']
stdev = self.train_params['env_dist_stdev']
mean_scale = self.train_params['mean_scale']
seed = self.train_params['seed']
num_eval_env = self.train_params['num_eval_env']
collision_detector = self.env_params['collision_detector']
# Obtain the initial value for the simulation parameters
env_dist = dr.dist.Normal(env_name, backend, mean_scale=mean_scale)
init_mean_param = PPO_Pytorch._dict_to_vec(env_name, env_dist.default_parameters)
init_stdev_param = np.array([stdev] * len(init_mean_param), dtype=np.float32)
cem_init_mean = np.concatenate((init_mean_param, init_stdev_param))
cem_init_stdev = np.array([1.0] * len(cem_init_mean), dtype=np.float32)
# Make envs that will be reused for training and eval
self.env_dist = dr.dist.Normal(env_name, backend)
self.env_dist.backend.set_collision_detector(env_dist.root_env, collision_detector)
self.env_dist.seed(seed)
if env_name == 'Walker':
self.eval_envs = [gym.make('Walker2d-v2') for _ in range(num_eval_env)]
elif env_name == 'Hopper':
self.eval_envs = [gym.make('Hopper-v2') for _ in range(num_eval_env)]
else:
exit('Unrecognized environment')
if COMM.Get_rank() == 0:
self.optimizer = CEMOptimizer(
sol_dim=30,
max_iters=300,
popsize=self.train_params['pop_size'],
num_elites=self.train_params['num_elites'],
cost_function=self._cost_function,
lower_bound=0.0,
# TODO: setting the upper bound this way, means that
# if the initial dimension value is 0, then the upper bound is 0
upper_bound=cem_init_mean * 5.0,
alpha=0.75,
viz_dir=self.log_dir
)
# This is buggy
# https://github.com/lanpa/tensorboardX/issues/345
self.optimizer.writer.add_text('env_params', str(self.env_params), 0)
self.optimizer.writer.add_text('train_params', str(self.train_params), 0)
res = self.optimizer.obtain_solution(cem_init_mean, cem_init_stdev)
path = osp.join(self.log_dir, 'res.pkl')
with open(path, 'wb') as f:
pickle.dump(res, f)
COMM.Abort(0)
else:
while True:
args = COMM.recv(source=0)
r = self.train(*args)
COMM.send(r, dest=0)
def _cost_function(self, samples, cem_timestep):
print(f'cem_timestep: {cem_timestep}')
env_name = self.env_params['env_name']
backend = self.env_params['backend']
pop_size = self.train_params['pop_size']
argss = [(env_name, backend,
self.train_params, self.env_params,
samples[rank][:len(samples[rank]) // 2],
samples[rank][len(samples[rank]) // 2:]) for rank in range(len(samples))]
# Send args to other MPI processes
for rank in range(1, COMM.size):
COMM.send(argss[rank], dest=rank)
# Obtain results for all args
r = self.train(*argss[0])
reses = [(0, r)] # 0 is the rank of this process
# Receive results from the other processes:
for rank in range(1, COMM.size):
r = COMM.recv(source=rank)
reses.append((rank, r))
reses = sorted(reses, key=lambda k: k[0])
print(reses)
# Get the index of the highest performing model in population
# and write result to tensorboard
max_idx = 0
max_perf = max(reses[0][1]) # 0 is the result of process rank 0. 1 brings us the eval perf list
for i, item in enumerate(reses):
perf = max(item[1])
if perf > max_perf:
max_perf = perf
max_idx = i
# Obtain the "costs" that the CEM cost function should return
costs = [- max(i[1]) for i in reses]
print(costs)
print(min(costs))
print()
return costs
@classmethod
def _dict_to_vec(cls, env_id, d):
return np.concatenate((
d['mass'],
d['damping'],
[d['gravity']]
)).flatten().copy()
@classmethod
def _vec_to_dict(cls, env_id, means, stdevs):
if env_id == 'Walker':
return dict(
mass=means[:7],
damping=means[7:-1],
gravity=means[-1]
), dict(
mass=stdevs[:7],
damping=stdevs[7:-1],
gravity=stdevs[-1]
)
elif env_id == 'Hopper':
return dict(
mass=means[:4],
damping=means[4:-1],
gravity=means[-1]
), dict(
mass=stdevs[:4],
damping=stdevs[4:-1],
gravity=stdevs[-1]
)
else:
exit('Unrecognized environment')
| import os
import os.path as osp
import pickle
import random
from collections import deque
from datetime import datetime
import gym
import numpy as np
import scipy.stats as stats
import torch
import torch.optim as optim
from mpi4py import MPI
import dr
from dr.ppo.models import Policy, ValueNet
from dr.ppo.train import one_train_iter
from dr.ppo.utils import set_torch_num_threads, RunningMeanStd, traj_seg_gen
COMM = MPI.COMM_WORLD
import tensorboardX
def set_global_seeds(i):
torch.manual_seed(i)
np.random.seed(i)
random.seed(i)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(i)
class CEMOptimizer(object):
def __init__(self, sol_dim, max_iters, popsize, num_elites, cost_function,
upper_bound=None, lower_bound=None, epsilon=0.001, alpha=0.25, viz_dir=None):
"""Creates an instance of this class.
Arguments:
sol_dim (int): The dimensionality of the problem space
max_iters (int): The maximum number of iterations to perform during optimization
popsize (int): The number of candidate solutions to be sampled at every iteration
num_elites (int): The number of top solutions that will be used to obtain the distribution
at the next iteration.
upper_bound (np.array): An array of upper bounds
lower_bound (np.array): An array of lower bounds
epsilon (float): A minimum variance. If the maximum variance drops below epsilon, optimization is
stopped.
alpha (float): Controls how much of the previous mean and variance is used for the next iteration.
next_mean = alpha * old_mean + (1 - alpha) * elite_mean, and similarly for variance.
"""
super().__init__()
self.sol_dim, self.max_iters, self.popsize, self.num_elites = sol_dim, max_iters, popsize, num_elites
self.ub, self.lb = upper_bound, lower_bound
self.epsilon, self.alpha = epsilon, alpha
self.cost_function = cost_function
if viz_dir is not None:
self.writer = tensorboardX.SummaryWriter(viz_dir)
else:
self.writer = tensorboardX.SummaryWriter()
if num_elites > popsize:
raise ValueError("Number of elites must be at most the population size.")
def reset(self):
pass
def obtain_solution(self, init_mean, init_var):
"""Optimizes the cost function using the provided initial candidate distribution
Arguments:
init_mean (np.ndarray): The mean of the initial candidate distribution.
init_var (np.ndarray): The variance of the initial candidate distribution.
"""
mean, var, t = init_mean, init_var, 0
X = stats.truncnorm(-2, 2, loc=np.zeros_like(mean), scale=np.ones_like(var))
costs_hist = []
mean_hist = []
var_hist = []
while (t < self.max_iters) and np.max(var) > self.epsilon:
lb_dist, ub_dist = mean - self.lb, self.ub - mean
constrained_var = np.minimum(np.minimum(np.square(lb_dist / 2), np.square(ub_dist / 2)), var)
samples = X.rvs(size=[self.popsize, self.sol_dim]) * np.sqrt(constrained_var) + mean
samples = samples.astype(np.float32)
costs = self.cost_function(samples, t)
elites = samples[np.argsort(costs)][:self.num_elites]
new_mean = np.mean(elites, axis=0)
new_var = np.var(elites, axis=0)
mean = self.alpha * mean + (1 - self.alpha) * new_mean
var = self.alpha * var + (1 - self.alpha) * new_var
for i, m in enumerate(mean):
self.writer.add_scalar(f'mean/{i}', m, t)
for i, m in enumerate(var):
self.writer.add_scalar(f'var/{i}', m, t)
self.writer.add_scalar('costs', np.min(costs), t)
t += 1
costs_hist.append(costs)
mean_hist.append(mean)
var_hist.append(var)
self.writer.close()
return dict(
mean_hist=mean_hist, costs_hist=costs_hist, var_hist=var_hist
)
class PPO_Pytorch(object):
def __init__(self, experiment_name, env_params, train_params, **kwargs):
self.experiment_name = experiment_name
self.env_params = env_params
self.train_params = train_params
self.log_dir = osp.join('runs',
f'seed_{str(train_params["seed"])}_{datetime.now().strftime("%b%d_%H-%M-%S")}')
os.makedirs(self.log_dir, exist_ok=True)
with open(osp.join(self.log_dir, 'env_params.pkl'), 'wb+') as f:
pickle.dump(env_params, f)
with open(osp.join(self.log_dir, 'train_params.pkl'), 'wb+') as f:
pickle.dump(train_params, f)
super().__init__()
def train(self, env_id, backend,
train_params, env_params,
means, stdevs):
# Unpack params
hid_size = train_params['hid_size']
pol_init_std = train_params['pol_init_std']
adam_epsilon = train_params['adam_epsilon']
optim_stepsize = train_params['optim_stepsize']
# Convert means and stdevs to dict format
assert len(means) == len(stdevs), (len(means), len(stdevs))
mean_dict, stdev_dict = PPO_Pytorch._vec_to_dict(env_id, means, stdevs)
# Set parameter of env
self.env_dist.default_parameters = mean_dict
self.env_dist.stdev_dict = stdev_dict
env = self.env_dist.root_env
set_torch_num_threads()
# Construct policy and value network
pol = Policy(env.observation_space, env.action_space, hid_size, pol_init_std)
pol_optim = optim.Adam(pol.parameters(), lr=optim_stepsize, eps=adam_epsilon)
val = ValueNet(env.observation_space, hid_size)
val_optim = optim.Adam(val.parameters(), lr=optim_stepsize, eps=adam_epsilon)
optims = {'pol_optim': pol_optim, 'val_optim': val_optim}
num_train_iter = int(train_params['num_timesteps'] / train_params['ts_per_batch'])
# Buffer for running statistics
eps_rets_buff = deque(maxlen=100)
eps_rets_mean_buff = []
state_running_m_std = RunningMeanStd(shape=env.observation_space.shape)
# seg_gen is a generator that yields the training data points
seg_gen = traj_seg_gen(self.env_dist, pol, val, state_running_m_std, env_params, train_params)
eval_perfs = []
for iter_i in range(num_train_iter):
one_train_iter(pol, val, optims,
iter_i, eps_rets_buff, eps_rets_mean_buff, seg_gen,
state_running_m_std, train_params, self.eval_envs, eval_perfs)
return eval_perfs
def run(self):
set_global_seeds(self.train_params['seed'])
# Unpack params
env_name = self.env_params['env_name']
backend = self.env_params['backend']
stdev = self.train_params['env_dist_stdev']
mean_scale = self.train_params['mean_scale']
seed = self.train_params['seed']
num_eval_env = self.train_params['num_eval_env']
collision_detector = self.env_params['collision_detector']
# Obtain the initial value for the simulation parameters
env_dist = dr.dist.Normal(env_name, backend, mean_scale=mean_scale)
init_mean_param = PPO_Pytorch._dict_to_vec(env_name, env_dist.default_parameters)
init_stdev_param = np.array([stdev] * len(init_mean_param), dtype=np.float32)
cem_init_mean = np.concatenate((init_mean_param, init_stdev_param))
cem_init_stdev = np.array([1.0] * len(cem_init_mean), dtype=np.float32)
# Make envs that will be reused for training and eval
self.env_dist = dr.dist.Normal(env_name, backend)
self.env_dist.backend.set_collision_detector(env_dist.root_env, collision_detector)
self.env_dist.seed(seed)
if env_name == 'Walker':
self.eval_envs = [gym.make('Walker2d-v2') for _ in range(num_eval_env)]
elif env_name == 'Hopper':
self.eval_envs = [gym.make('Hopper-v2') for _ in range(num_eval_env)]
else:
exit('Unrecognized environment')
if COMM.Get_rank() == 0:
self.optimizer = CEMOptimizer(
sol_dim=30,
max_iters=300,
popsize=self.train_params['pop_size'],
num_elites=self.train_params['num_elites'],
cost_function=self._cost_function,
lower_bound=0.0,
# TODO: setting the upper bound this way, means that
# if the initial dimension value is 0, then the upper bound is 0
upper_bound=cem_init_mean * 5.0,
alpha=0.75,
viz_dir=self.log_dir
)
# This is buggy
# https://github.com/lanpa/tensorboardX/issues/345
self.optimizer.writer.add_text('env_params', str(self.env_params), 0)
self.optimizer.writer.add_text('train_params', str(self.train_params), 0)
res = self.optimizer.obtain_solution(cem_init_mean, cem_init_stdev)
path = osp.join(self.log_dir, 'res.pkl')
with open(path, 'wb') as f:
pickle.dump(res, f)
COMM.Abort(0)
else:
while True:
args = COMM.recv(source=0)
r = self.train(*args)
COMM.send(r, dest=0)
def _cost_function(self, samples, cem_timestep):
print(f'cem_timestep: {cem_timestep}')
env_name = self.env_params['env_name']
backend = self.env_params['backend']
pop_size = self.train_params['pop_size']
argss = [(env_name, backend,
self.train_params, self.env_params,
samples[rank][:len(samples[rank]) // 2],
samples[rank][len(samples[rank]) // 2:]) for rank in range(len(samples))]
# Send args to other MPI processes
for rank in range(1, COMM.size):
COMM.send(argss[rank], dest=rank)
# Obtain results for all args
r = self.train(*argss[0])
reses = [(0, r)] # 0 is the rank of this process
# Receive results from the other processes:
for rank in range(1, COMM.size):
r = COMM.recv(source=rank)
reses.append((rank, r))
reses = sorted(reses, key=lambda k: k[0])
print(reses)
# Get the index of the highest performing model in population
# and write result to tensorboard
max_idx = 0
max_perf = max(reses[0][1]) # 0 is the result of process rank 0. 1 brings us the eval perf list
for i, item in enumerate(reses):
perf = max(item[1])
if perf > max_perf:
max_perf = perf
max_idx = i
# Obtain the "costs" that the CEM cost function should return
costs = [- max(i[1]) for i in reses]
print(costs)
print(min(costs))
print()
return costs
@classmethod
def _dict_to_vec(cls, env_id, d):
return np.concatenate((
d['mass'],
d['damping'],
[d['gravity']]
)).flatten().copy()
@classmethod
def _vec_to_dict(cls, env_id, means, stdevs):
if env_id == 'Walker':
return dict(
mass=means[:7],
damping=means[7:-1],
gravity=means[-1]
), dict(
mass=stdevs[:7],
damping=stdevs[7:-1],
gravity=stdevs[-1]
)
elif env_id == 'Hopper':
return dict(
mass=means[:4],
damping=means[4:-1],
gravity=means[-1]
), dict(
mass=stdevs[:4],
damping=stdevs[4:-1],
gravity=stdevs[-1]
)
else:
exit('Unrecognized environment')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.