edited_code
stringlengths
17
978k
original_code
stringlengths
17
978k
""" 火币合约接口 """ import re import urllib import base64 import json import zlib import hashlib import hmac import sys from copy import copy from datetime import datetime, timedelta from threading import Lock from typing import Sequence from vnpy.event import Event from vnpy.api.rest import RestClient, Request from vnpy.api.websocket import WebsocketClient from vnpy.trader.constant import ( Direction, Offset, Exchange, Product, Status, OrderType, Interval ) from vnpy.trader.gateway import BaseGateway from vnpy.trader.object import ( TickData, OrderData, TradeData, BarData, AccountData, PositionData, ContractData, OrderRequest, CancelRequest, SubscribeRequest, HistoryRequest ) from vnpy.trader.event import EVENT_TIMER REST_HOST = "https://api.hbdm.com" WEBSOCKET_DATA_HOST = "wss://www.hbdm.com/ws" # Market Data WEBSOCKET_TRADE_HOST = "wss://api.hbdm.com/notification" # Account and Order STATUS_HBDM2VT = { 3: Status.NOTTRADED, 4: Status.PARTTRADED, 5: Status.CANCELLED, 6: Status.ALLTRADED, 7: Status.CANCELLED, } ORDERTYPE_VT2HBDM = { OrderType.MARKET: "opponent", OrderType.LIMIT: "limit", OrderType.FOK: "fok", OrderType.FAK: "ioc" } ORDERTYPE_HBDM2VT = {v: k for k, v in ORDERTYPE_VT2HBDM.items()} ORDERTYPE_HBDM2VT[1] = OrderType.LIMIT ORDERTYPE_HBDM2VT[3] = OrderType.MARKET ORDERTYPE_HBDM2VT[4] = OrderType.MARKET ORDERTYPE_HBDM2VT[5] = OrderType.STOP ORDERTYPE_HBDM2VT[6] = OrderType.LIMIT ORDERTYPE_HBDM2VT["lightning"] = OrderType.MARKET ORDERTYPE_HBDM2VT["optimal_5"] = OrderType.MARKET ORDERTYPE_HBDM2VT["optimal_10"] = OrderType.MARKET ORDERTYPE_HBDM2VT["optimal_20"] = OrderType.MARKET DIRECTION_VT2HBDM = { Direction.LONG: "buy", Direction.SHORT: "sell", } DIRECTION_HBDM2VT = {v: k for k, v in DIRECTION_VT2HBDM.items()} OFFSET_VT2HBDM = { Offset.OPEN: "open", Offset.CLOSE: "close", } OFFSET_HBDM2VT = {v: k for k, v in OFFSET_VT2HBDM.items()} INTERVAL_VT2HBDM = { Interval.MINUTE: "1min", Interval.HOUR: "60min", Interval.DAILY: "1day" } CONTRACT_TYPE_MAP = { "this_week": "CW", "next_week": "NW", "quarter": "CQ" } TIMEDELTA_MAP = { Interval.MINUTE: timedelta(minutes=1), Interval.HOUR: timedelta(hours=1), Interval.DAILY: timedelta(days=1), } symbol_type_map = {} class HbdmGateway(BaseGateway): """ VN Trader Gateway for Hbdm connection. """ default_setting = { "API Key": "", "Secret Key": "", "会话数": 3, "代理地址": "", "代理端口": "", } exchanges = [Exchange.HUOBI] def __init__(self, event_engine): """Constructor""" super().__init__(event_engine, "HBDM") self.rest_api = HbdmRestApi(self) self.trade_ws_api = HbdmTradeWebsocketApi(self) self.market_ws_api = HbdmDataWebsocketApi(self) def connect(self, setting: dict): """""" key = setting["API Key"] secret = setting["Secret Key"] session_number = setting["会话数"] proxy_host = setting["代理地址"] proxy_port = setting["代理端口"] if proxy_port.isdigit(): proxy_port = int(proxy_port) else: proxy_port = 0 self.rest_api.connect(key, secret, session_number, proxy_host, proxy_port) self.trade_ws_api.connect(key, secret, proxy_host, proxy_port) self.market_ws_api.connect(key, secret, proxy_host, proxy_port) self.init_query() def subscribe(self, req: SubscribeRequest): """""" self.market_ws_api.subscribe(req) def send_order(self, req: OrderRequest): """""" return self.rest_api.send_order(req) def cancel_order(self, req: CancelRequest): """""" self.rest_api.cancel_order(req) def send_orders(self, reqs: Sequence[OrderRequest]): """""" return self.rest_api.send_orders(reqs) def query_account(self): """""" self.rest_api.query_account() def query_position(self): """""" self.rest_api.query_position() def query_history(self, req: HistoryRequest): """""" return self.rest_api.query_history(req) def close(self): """""" self.rest_api.stop() self.trade_ws_api.stop() self.market_ws_api.stop() def process_timer_event(self, event: Event): """""" self.count += 1 if self.count < 3: return self.query_account() self.query_position() def init_query(self): """""" self.count = 0 self.event_engine.register(EVENT_TIMER, self.process_timer_event) class HbdmRestApi(RestClient): """ HBDM REST API """ def __init__(self, gateway: BaseGateway): """""" super().__init__() self.gateway = gateway self.gateway_name = gateway.gateway_name self.host = "" self.key = "" self.secret = "" self.account_id = "" self.order_count = 10000 self.order_count_lock = Lock() self.connect_time = 0 self.positions = {} self.currencies = set() def sign(self, request): """ Generate HBDM signature. """ request.headers = { "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36" } params_with_signature = create_signature( self.key, request.method, self.host, request.path, self.secret, request.params ) request.params = params_with_signature if request.method == "POST": request.headers["Content-Type"] = "application/json" if request.data: request.data = json.dumps(request.data) return request def connect( self, key: str, secret: str, session_number: int, proxy_host: str, proxy_port: int ): """ Initialize connection to REST server. """ self.key = key self.secret = secret self.host, _ = _split_url(REST_HOST) self.connect_time = int(datetime.now().strftime("%y%m%d%H%M%S")) self.init(REST_HOST, proxy_host, proxy_port) self.start(session_number) self.gateway.write_log("REST API启动成功") self.query_contract() def query_account(self): """""" self.add_request( method="POST", path="/api/v1/contract_account_info", callback=self.on_query_account ) def query_position(self): """""" self.add_request( method="POST", path="/api/v1/contract_position_info", callback=self.on_query_position ) def query_order(self): """""" for currency in self.currencies: # Open Orders data = {"symbol": currency} self.add_request( method="POST", path="/api/v1/contract_openorders", callback=self.on_query_order, data=data, extra=currency ) def query_contract(self): """""" self.add_request( method="GET", path="/api/v1/contract_contract_info", callback=self.on_query_contract ) def query_history(self, req: HistoryRequest): """""" history = [] count = 2000 start = req.start time_delta = TIMEDELTA_MAP[req.interval] # Convert symbol contract_type = symbol_type_map.get(req.symbol, "") buf = [i for i in req.symbol if not i.isdigit()] symbol = "".join(buf) ws_contract_type = CONTRACT_TYPE_MAP[contract_type] ws_symbol = f"{symbol}_{ws_contract_type}" while True: # Calculate end time end = start + time_delta * count # Create query params params = { "symbol": ws_symbol, "period": INTERVAL_VT2HBDM[req.interval], "from": int(start.timestamp()), "to": int(end.timestamp()) } # Get response from server resp = self.request( "GET", "/market/history/kline", params=params ) # Break if request failed with other status code if resp.status_code // 100 != 2: msg = f"获取历史数据失败,状态码:{resp.status_code},信息:{resp.text}" self.gateway.write_log(msg) break else: data = resp.json() if not data: msg = f"获取历史数据为空" self.gateway.write_log(msg) break buf = [] for d in data["data"]: dt = datetime.fromtimestamp(d["id"]) bar = BarData( symbol=req.symbol, exchange=req.exchange, datetime=dt, interval=req.interval, volume=d["vol"], open_price=d["open"], high_price=d["high"], low_price=d["low"], close_price=d["close"], gateway_name=self.gateway_name ) buf.append(bar) history.extend(buf) begin = buf[0].datetime end = buf[-1].datetime msg = f"获取历史数据成功,{req.symbol} - {req.interval.value},{begin} - {end}" self.gateway.write_log(msg) # Update start time start = bar.datetime # Break if data end reached if len(buf) < count: break return history def new_local_orderid(self): """""" with self.order_count_lock: self.order_count += 1 local_orderid = f"{self.connect_time}{self.order_count}" return local_orderid def send_order(self, req: OrderRequest): """""" local_orderid = self.new_local_orderid() order = req.create_order_data( local_orderid, self.gateway_name ) order.time = datetime.now().strftime("%H:%M:%S") data = { "contract_code": req.symbol, "client_order_id": int(local_orderid), "price": req.price, "volume": int(req.volume), "direction": DIRECTION_VT2HBDM.get(req.direction, ""), "offset": OFFSET_VT2HBDM.get(req.offset, ""), "order_price_type": ORDERTYPE_VT2HBDM.get(req.type, ""), "lever_rate": 20 } self.add_request( method="POST", path="/api/v1/contract_order", callback=self.on_send_order, data=data, extra=order, on_error=self.on_send_order_error, on_failed=self.on_send_order_failed ) self.gateway.on_order(order) return order.vt_orderid def send_orders(self, reqs: Sequence[OrderRequest]): """""" orders_data = [] orders = [] vt_orderids = [] for req in reqs: local_orderid = self.new_local_orderid() order = req.create_order_data( local_orderid, self.gateway_name ) order.time = datetime.now().strftime("%H:%M:%S") self.gateway.on_order(order) d = { "contract_code": req.symbol, "client_order_id": int(local_orderid), "price": req.price, "volume": int(req.volume), "direction": DIRECTION_VT2HBDM.get(req.direction, ""), "offset": OFFSET_VT2HBDM.get(req.offset, ""), "order_price_type": ORDERTYPE_VT2HBDM.get(req.type, ""), "lever_rate": 20 } orders_data.append(d) orders.append(order) vt_orderids.append(order.vt_orderid) data = { "orders_data": orders_data } self.add_request( method="POST", path="/api/v1/contract_batchorder", callback=self.on_send_orders, data=data, extra=orders, on_error=self.on_send_orders_error, on_failed=self.on_send_orders_failed ) return vt_orderids def cancel_order(self, req: CancelRequest): """""" buf = [i for i in req.symbol if not i.isdigit()] data = { "symbol": "".join(buf), } orderid = int(req.orderid) if orderid > 1000000: data["client_order_id"] = orderid else: data["order_id"] = orderid self.add_request( method="POST", path="/api/v1/contract_cancel", callback=self.on_cancel_order, on_failed=self.on_cancel_order_failed, data=data, extra=req ) def on_query_account(self, data, request): """""" if self.check_error(data, "查询账户"): return for d in data["data"]: account = AccountData( accountid=d["symbol"], balance=d["margin_balance"], frozen=d["margin_frozen"], gateway_name=self.gateway_name, ) self.gateway.on_account(account) def on_query_position(self, data, request): """""" if self.check_error(data, "查询持仓"): return # Clear all buf data for position in self.positions.values(): position.volume = 0 position.frozen = 0 position.price = 0 position.pnl = 0 for d in data["data"]: key = f"{d["contract_code"]}_{d["direction"]}" position = self.positions.get(key, None) if not position: position = PositionData( symbol=d["contract_code"], exchange=Exchange.HUOBI, direction=DIRECTION_HBDM2VT[d["direction"]], gateway_name=self.gateway_name ) self.positions[key] = position position.volume = d["volume"] position.frozen = d["frozen"] position.price = d["cost_hold"] position.pnl = d["profit"] for position in self.positions.values(): self.gateway.on_position(position) def on_query_order(self, data, request): """""" if self.check_error(data, "查询活动委托"): return for d in data["data"]["orders"]: timestamp = d["created_at"] dt = datetime.fromtimestamp(timestamp / 1000) time = dt.strftime("%H:%M:%S") if d["client_order_id"]: orderid = d["client_order_id"] else: orderid = d["order_id"] order = OrderData( orderid=orderid, symbol=d["contract_code"], exchange=Exchange.HUOBI, price=d["price"], volume=d["volume"], type=ORDERTYPE_HBDM2VT[d["order_price_type"]], direction=DIRECTION_HBDM2VT[d["direction"]], offset=OFFSET_HBDM2VT[d["offset"]], traded=d["trade_volume"], status=STATUS_HBDM2VT[d["status"]], time=time, gateway_name=self.gateway_name, ) self.gateway.on_order(order) self.gateway.write_log(f"{request.extra}活动委托信息查询成功") def on_query_contract(self, data, request): # type: (dict, Request)->None """""" if self.check_error(data, "查询合约"): return for d in data["data"]: self.currencies.add(d["symbol"]) contract = ContractData( symbol=d["contract_code"], exchange=Exchange.HUOBI, name=d["contract_code"], pricetick=d["price_tick"], size=int(d["contract_size"]), min_volume=1, product=Product.FUTURES, history_data=True, gateway_name=self.gateway_name, ) self.gateway.on_contract(contract) symbol_type_map[contract.symbol] = d["contract_type"] self.gateway.write_log("合约信息查询成功") self.query_order() def on_send_order(self, data, request): """""" order = request.extra if self.check_error(data, "委托"): order.status = Status.REJECTED self.gateway.on_order(order) def on_send_order_failed(self, status_code: str, request: Request): """ Callback when sending order failed on server. """ order = request.extra order.status = Status.REJECTED self.gateway.on_order(order) msg = f"委托失败,状态码:{status_code},信息:{request.response.text}" self.gateway.write_log(msg) def on_send_order_error( self, exception_type: type, exception_value: Exception, tb, request: Request ): """ Callback when sending order caused exception. """ order = request.extra order.status = Status.REJECTED self.gateway.on_order(order) # Record exception if not ConnectionError if not issubclass(exception_type, ConnectionError): self.on_error(exception_type, exception_value, tb, request) def on_cancel_order(self, data, request): """""" self.check_error(data, "撤单") def on_cancel_order_failed(self, status_code: str, request: Request): """ Callback when canceling order failed on server. """ msg = f"撤单失败,状态码:{status_code},信息:{request.response.text}" self.gateway.write_log(msg) def on_send_orders(self, data, request): """""" orders = request.extra errors = data.get("errors", None) if errors: for d in errors: ix = d["index"] code = d["err_code"] msg = d["err_msg"] order = orders[ix] order.status = Status.REJECTED self.gateway.on_order(order) msg = f"批量委托失败,状态码:{code},信息:{msg}" self.gateway.write_log(msg) def on_send_orders_failed(self, status_code: str, request: Request): """ Callback when sending order failed on server. """ orders = request.extra for order in orders: order.status = Status.REJECTED self.gateway.on_order(order) msg = f"批量委托失败,状态码:{status_code},信息:{request.response.text}" self.gateway.write_log(msg) def on_send_orders_error( self, exception_type: type, exception_value: Exception, tb, request: Request ): """ Callback when sending order caused exception. """ orders = request.extra for order in orders: order.status = Status.REJECTED self.gateway.on_order(order) # Record exception if not ConnectionError if not issubclass(exception_type, ConnectionError): self.on_error(exception_type, exception_value, tb, request) def on_error( self, exception_type: type, exception_value: Exception, tb, request: Request ): """ Callback to handler request exception. """ msg = f"触发异常,状态码:{exception_type},信息:{exception_value}" self.gateway.write_log(msg) sys.stderr.write( self.exception_detail(exception_type, exception_value, tb, request) ) def check_error(self, data: dict, func: str = ""): """""" if data["status"] != "error": return False error_code = data["err_code"] error_msg = data["err_msg"] self.gateway.write_log(f"{func}请求出错,代码:{error_code},信息:{error_msg}") return True class HbdmWebsocketApiBase(WebsocketClient): """""" def __init__(self, gateway): """""" super(HbdmWebsocketApiBase, self).__init__() self.gateway = gateway self.gateway_name = gateway.gateway_name self.key = "" self.secret = "" self.sign_host = "" self.path = "" self.req_id = 0 def connect( self, key: str, secret: str, url: str, proxy_host: str, proxy_port: int ): """""" self.key = key self.secret = secret host, path = _split_url(url) self.sign_host = host self.path = path self.init(url, proxy_host, proxy_port) self.start() def login(self): """""" self.req_id += 1 params = { "op": "auth", "type": "api", "cid": str(self.req_id), } params.update(create_signature(self.key, "GET", self.sign_host, self.path, self.secret)) return self.send_packet(params) def on_login(self, packet): """""" pass @staticmethod def unpack_data(data): """""" return json.loads(zlib.decompress(data, 31)) def on_packet(self, packet): """""" if "ping" in packet: req = {"pong": packet["ping"]} self.send_packet(req) elif "op" in packet and packet["op"] == "ping": req = { "op": "pong", "ts": packet["ts"] } self.send_packet(req) elif "err-msg" in packet: return self.on_error_msg(packet) elif "op" in packet and packet["op"] == "auth": return self.on_login() else: self.on_data(packet) def on_data(self, packet): """""" print("data : {}".format(packet)) def on_error_msg(self, packet): """""" msg = packet["err-msg"] if msg == "invalid pong": return self.gateway.write_log(packet["err-msg"]) class HbdmTradeWebsocketApi(HbdmWebsocketApiBase): """""" def __init__(self, gateway): """""" super().__init__(gateway) def connect(self, key, secret, proxy_host, proxy_port): """""" super().connect(key, secret, WEBSOCKET_TRADE_HOST, proxy_host, proxy_port) def subscribe(self): """""" self.req_id += 1 req = { "op": "sub", "cid": str(self.req_id), "topic": f"orders.*" } self.send_packet(req) def on_connected(self): """""" self.gateway.write_log("交易Websocket API连接成功") self.login() def on_login(self): """""" self.gateway.write_log("交易Websocket API登录成功") self.subscribe() def on_data(self, packet): # type: (dict)->None """""" op = packet.get("op", None) if op != "notify": return topic = packet["topic"] if "orders" in topic: self.on_order(packet) def on_order(self, data: dict): """""" dt = datetime.fromtimestamp(data["created_at"] / 1000) time = dt.strftime("%H:%M:%S") if data["client_order_id"]: orderid = data["client_order_id"] else: orderid = data["order_id"] order = OrderData( symbol=data["contract_code"], exchange=Exchange.HUOBI, orderid=orderid, type=ORDERTYPE_HBDM2VT[data["order_price_type"]], direction=DIRECTION_HBDM2VT[data["direction"]], offset=OFFSET_HBDM2VT[data["offset"]], price=data["price"], volume=data["volume"], traded=data["trade_volume"], status=STATUS_HBDM2VT[data["status"]], time=time, gateway_name=self.gateway_name ) self.gateway.on_order(order) # Push trade event trades = data["trade"] if not trades: return for d in trades: dt = datetime.fromtimestamp(d["created_at"] / 1000) time = dt.strftime("%H:%M:%S") trade = TradeData( symbol=order.symbol, exchange=Exchange.HUOBI, orderid=order.orderid, tradeid=str(d["trade_id"]), direction=order.direction, offset=order.offset, price=d["trade_price"], volume=d["trade_volume"], time=time, gateway_name=self.gateway_name, ) self.gateway.on_trade(trade) class HbdmDataWebsocketApi(HbdmWebsocketApiBase): """""" def __init__(self, gateway): """""" super().__init__(gateway) self.ticks = {} def connect(self, key: str, secret: str, proxy_host: str, proxy_port: int): """""" super().connect(key, secret, WEBSOCKET_DATA_HOST, proxy_host, proxy_port) def on_connected(self): """""" self.gateway.write_log("行情Websocket API连接成功") for ws_symbol in self.ticks.keys(): self.subscribe_data(ws_symbol) def subscribe(self, req: SubscribeRequest): """""" contract_type = symbol_type_map.get(req.symbol, "") if not contract_type: return buf = [i for i in req.symbol if not i.isdigit()] symbol = "".join(buf) ws_contract_type = CONTRACT_TYPE_MAP[contract_type] ws_symbol = f"{symbol}_{ws_contract_type}" # Create tick data buffer tick = TickData( symbol=req.symbol, name=req.symbol, exchange=Exchange.HUOBI, datetime=datetime.now(), gateway_name=self.gateway_name, ) self.ticks[ws_symbol] = tick self.subscribe_data(ws_symbol) def subscribe_data(self, ws_symbol: str): """""" # Subscribe to market depth update self.req_id += 1 req = { "sub": f"market.{ws_symbol}.depth.step0", "id": str(self.req_id) } self.send_packet(req) # Subscribe to market detail update self.req_id += 1 req = { "sub": f"market.{ws_symbol}.detail", "id": str(self.req_id) } self.send_packet(req) def on_data(self, packet): # type: (dict)->None """""" channel = packet.get("ch", None) if channel: if "depth.step" in channel: self.on_market_depth(packet) elif "detail" in channel: self.on_market_detail(packet) elif "err_code" in packet: code = packet["err_code"] msg = packet["err_msg"] self.gateway.write_log(f"错误代码:{code}, 错误信息:{msg}") def on_market_depth(self, data): """行情深度推送 """ ws_symbol = data["ch"].split(".")[1] tick = self.ticks[ws_symbol] tick.datetime = datetime.fromtimestamp(data["ts"] / 1000) tick_data = data["tick"] if "bids" not in tick_data or "asks" not in tick_data: return bids = tick_data["bids"] for n in range(5): price, volume = bids[n] tick.__setattr__("bid_price_" + str(n + 1), float(price)) tick.__setattr__("bid_volume_" + str(n + 1), float(volume)) asks = tick_data["asks"] for n in range(5): price, volume = asks[n] tick.__setattr__("ask_price_" + str(n + 1), float(price)) tick.__setattr__("ask_volume_" + str(n + 1), float(volume)) if tick.last_price: self.gateway.on_tick(copy(tick)) def on_market_detail(self, data): """市场细节推送""" ws_symbol = data["ch"].split(".")[1] tick = self.ticks[ws_symbol] tick.datetime = datetime.fromtimestamp(data["ts"] / 1000) tick_data = data["tick"] tick.open_price = tick_data["open"] tick.high_price = tick_data["high"] tick.low_price = tick_data["low"] tick.last_price = tick_data["close"] tick.volume = tick_data["vol"] if tick.bid_price_1: self.gateway.on_tick(copy(tick)) def _split_url(url): """ 将url拆分为host和path :return: host, path """ result = re.match("\w+://([^/]*)(.*)", url) # noqa if result: return result.group(1), result.group(2) def create_signature(api_key, method, host, path, secret_key, get_params=None): """ 创建签名 :param get_params: dict 使用GET方法时附带的额外参数(urlparams) :return: """ sorted_params = [ ("AccessKeyId", api_key), ("SignatureMethod", "HmacSHA256"), ("SignatureVersion", "2"), ("Timestamp", datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")) ] if get_params: sorted_params.extend(list(get_params.items())) sorted_params = list(sorted(sorted_params)) encode_params = urllib.parse.urlencode(sorted_params) payload = [method, host, path, encode_params] payload = "\n".join(payload) payload = payload.encode(encoding="UTF8") secret_key = secret_key.encode(encoding="UTF8") digest = hmac.new(secret_key, payload, digestmod=hashlib.sha256).digest() signature = base64.b64encode(digest) params = dict(sorted_params) params["Signature"] = signature.decode("UTF8") return params
""" 火币合约接口 """ import re import urllib import base64 import json import zlib import hashlib import hmac import sys from copy import copy from datetime import datetime, timedelta from threading import Lock from typing import Sequence from vnpy.event import Event from vnpy.api.rest import RestClient, Request from vnpy.api.websocket import WebsocketClient from vnpy.trader.constant import ( Direction, Offset, Exchange, Product, Status, OrderType, Interval ) from vnpy.trader.gateway import BaseGateway from vnpy.trader.object import ( TickData, OrderData, TradeData, BarData, AccountData, PositionData, ContractData, OrderRequest, CancelRequest, SubscribeRequest, HistoryRequest ) from vnpy.trader.event import EVENT_TIMER REST_HOST = "https://api.hbdm.com" WEBSOCKET_DATA_HOST = "wss://www.hbdm.com/ws" # Market Data WEBSOCKET_TRADE_HOST = "wss://api.hbdm.com/notification" # Account and Order STATUS_HBDM2VT = { 3: Status.NOTTRADED, 4: Status.PARTTRADED, 5: Status.CANCELLED, 6: Status.ALLTRADED, 7: Status.CANCELLED, } ORDERTYPE_VT2HBDM = { OrderType.MARKET: "opponent", OrderType.LIMIT: "limit", OrderType.FOK: "fok", OrderType.FAK: "ioc" } ORDERTYPE_HBDM2VT = {v: k for k, v in ORDERTYPE_VT2HBDM.items()} ORDERTYPE_HBDM2VT[1] = OrderType.LIMIT ORDERTYPE_HBDM2VT[3] = OrderType.MARKET ORDERTYPE_HBDM2VT[4] = OrderType.MARKET ORDERTYPE_HBDM2VT[5] = OrderType.STOP ORDERTYPE_HBDM2VT[6] = OrderType.LIMIT ORDERTYPE_HBDM2VT["lightning"] = OrderType.MARKET ORDERTYPE_HBDM2VT["optimal_5"] = OrderType.MARKET ORDERTYPE_HBDM2VT["optimal_10"] = OrderType.MARKET ORDERTYPE_HBDM2VT["optimal_20"] = OrderType.MARKET DIRECTION_VT2HBDM = { Direction.LONG: "buy", Direction.SHORT: "sell", } DIRECTION_HBDM2VT = {v: k for k, v in DIRECTION_VT2HBDM.items()} OFFSET_VT2HBDM = { Offset.OPEN: "open", Offset.CLOSE: "close", } OFFSET_HBDM2VT = {v: k for k, v in OFFSET_VT2HBDM.items()} INTERVAL_VT2HBDM = { Interval.MINUTE: "1min", Interval.HOUR: "60min", Interval.DAILY: "1day" } CONTRACT_TYPE_MAP = { "this_week": "CW", "next_week": "NW", "quarter": "CQ" } TIMEDELTA_MAP = { Interval.MINUTE: timedelta(minutes=1), Interval.HOUR: timedelta(hours=1), Interval.DAILY: timedelta(days=1), } symbol_type_map = {} class HbdmGateway(BaseGateway): """ VN Trader Gateway for Hbdm connection. """ default_setting = { "API Key": "", "Secret Key": "", "会话数": 3, "代理地址": "", "代理端口": "", } exchanges = [Exchange.HUOBI] def __init__(self, event_engine): """Constructor""" super().__init__(event_engine, "HBDM") self.rest_api = HbdmRestApi(self) self.trade_ws_api = HbdmTradeWebsocketApi(self) self.market_ws_api = HbdmDataWebsocketApi(self) def connect(self, setting: dict): """""" key = setting["API Key"] secret = setting["Secret Key"] session_number = setting["会话数"] proxy_host = setting["代理地址"] proxy_port = setting["代理端口"] if proxy_port.isdigit(): proxy_port = int(proxy_port) else: proxy_port = 0 self.rest_api.connect(key, secret, session_number, proxy_host, proxy_port) self.trade_ws_api.connect(key, secret, proxy_host, proxy_port) self.market_ws_api.connect(key, secret, proxy_host, proxy_port) self.init_query() def subscribe(self, req: SubscribeRequest): """""" self.market_ws_api.subscribe(req) def send_order(self, req: OrderRequest): """""" return self.rest_api.send_order(req) def cancel_order(self, req: CancelRequest): """""" self.rest_api.cancel_order(req) def send_orders(self, reqs: Sequence[OrderRequest]): """""" return self.rest_api.send_orders(reqs) def query_account(self): """""" self.rest_api.query_account() def query_position(self): """""" self.rest_api.query_position() def query_history(self, req: HistoryRequest): """""" return self.rest_api.query_history(req) def close(self): """""" self.rest_api.stop() self.trade_ws_api.stop() self.market_ws_api.stop() def process_timer_event(self, event: Event): """""" self.count += 1 if self.count < 3: return self.query_account() self.query_position() def init_query(self): """""" self.count = 0 self.event_engine.register(EVENT_TIMER, self.process_timer_event) class HbdmRestApi(RestClient): """ HBDM REST API """ def __init__(self, gateway: BaseGateway): """""" super().__init__() self.gateway = gateway self.gateway_name = gateway.gateway_name self.host = "" self.key = "" self.secret = "" self.account_id = "" self.order_count = 10000 self.order_count_lock = Lock() self.connect_time = 0 self.positions = {} self.currencies = set() def sign(self, request): """ Generate HBDM signature. """ request.headers = { "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36" } params_with_signature = create_signature( self.key, request.method, self.host, request.path, self.secret, request.params ) request.params = params_with_signature if request.method == "POST": request.headers["Content-Type"] = "application/json" if request.data: request.data = json.dumps(request.data) return request def connect( self, key: str, secret: str, session_number: int, proxy_host: str, proxy_port: int ): """ Initialize connection to REST server. """ self.key = key self.secret = secret self.host, _ = _split_url(REST_HOST) self.connect_time = int(datetime.now().strftime("%y%m%d%H%M%S")) self.init(REST_HOST, proxy_host, proxy_port) self.start(session_number) self.gateway.write_log("REST API启动成功") self.query_contract() def query_account(self): """""" self.add_request( method="POST", path="/api/v1/contract_account_info", callback=self.on_query_account ) def query_position(self): """""" self.add_request( method="POST", path="/api/v1/contract_position_info", callback=self.on_query_position ) def query_order(self): """""" for currency in self.currencies: # Open Orders data = {"symbol": currency} self.add_request( method="POST", path="/api/v1/contract_openorders", callback=self.on_query_order, data=data, extra=currency ) def query_contract(self): """""" self.add_request( method="GET", path="/api/v1/contract_contract_info", callback=self.on_query_contract ) def query_history(self, req: HistoryRequest): """""" history = [] count = 2000 start = req.start time_delta = TIMEDELTA_MAP[req.interval] # Convert symbol contract_type = symbol_type_map.get(req.symbol, "") buf = [i for i in req.symbol if not i.isdigit()] symbol = "".join(buf) ws_contract_type = CONTRACT_TYPE_MAP[contract_type] ws_symbol = f"{symbol}_{ws_contract_type}" while True: # Calculate end time end = start + time_delta * count # Create query params params = { "symbol": ws_symbol, "period": INTERVAL_VT2HBDM[req.interval], "from": int(start.timestamp()), "to": int(end.timestamp()) } # Get response from server resp = self.request( "GET", "/market/history/kline", params=params ) # Break if request failed with other status code if resp.status_code // 100 != 2: msg = f"获取历史数据失败,状态码:{resp.status_code},信息:{resp.text}" self.gateway.write_log(msg) break else: data = resp.json() if not data: msg = f"获取历史数据为空" self.gateway.write_log(msg) break buf = [] for d in data["data"]: dt = datetime.fromtimestamp(d["id"]) bar = BarData( symbol=req.symbol, exchange=req.exchange, datetime=dt, interval=req.interval, volume=d["vol"], open_price=d["open"], high_price=d["high"], low_price=d["low"], close_price=d["close"], gateway_name=self.gateway_name ) buf.append(bar) history.extend(buf) begin = buf[0].datetime end = buf[-1].datetime msg = f"获取历史数据成功,{req.symbol} - {req.interval.value},{begin} - {end}" self.gateway.write_log(msg) # Update start time start = bar.datetime # Break if data end reached if len(buf) < count: break return history def new_local_orderid(self): """""" with self.order_count_lock: self.order_count += 1 local_orderid = f"{self.connect_time}{self.order_count}" return local_orderid def send_order(self, req: OrderRequest): """""" local_orderid = self.new_local_orderid() order = req.create_order_data( local_orderid, self.gateway_name ) order.time = datetime.now().strftime("%H:%M:%S") data = { "contract_code": req.symbol, "client_order_id": int(local_orderid), "price": req.price, "volume": int(req.volume), "direction": DIRECTION_VT2HBDM.get(req.direction, ""), "offset": OFFSET_VT2HBDM.get(req.offset, ""), "order_price_type": ORDERTYPE_VT2HBDM.get(req.type, ""), "lever_rate": 20 } self.add_request( method="POST", path="/api/v1/contract_order", callback=self.on_send_order, data=data, extra=order, on_error=self.on_send_order_error, on_failed=self.on_send_order_failed ) self.gateway.on_order(order) return order.vt_orderid def send_orders(self, reqs: Sequence[OrderRequest]): """""" orders_data = [] orders = [] vt_orderids = [] for req in reqs: local_orderid = self.new_local_orderid() order = req.create_order_data( local_orderid, self.gateway_name ) order.time = datetime.now().strftime("%H:%M:%S") self.gateway.on_order(order) d = { "contract_code": req.symbol, "client_order_id": int(local_orderid), "price": req.price, "volume": int(req.volume), "direction": DIRECTION_VT2HBDM.get(req.direction, ""), "offset": OFFSET_VT2HBDM.get(req.offset, ""), "order_price_type": ORDERTYPE_VT2HBDM.get(req.type, ""), "lever_rate": 20 } orders_data.append(d) orders.append(order) vt_orderids.append(order.vt_orderid) data = { "orders_data": orders_data } self.add_request( method="POST", path="/api/v1/contract_batchorder", callback=self.on_send_orders, data=data, extra=orders, on_error=self.on_send_orders_error, on_failed=self.on_send_orders_failed ) return vt_orderids def cancel_order(self, req: CancelRequest): """""" buf = [i for i in req.symbol if not i.isdigit()] data = { "symbol": "".join(buf), } orderid = int(req.orderid) if orderid > 1000000: data["client_order_id"] = orderid else: data["order_id"] = orderid self.add_request( method="POST", path="/api/v1/contract_cancel", callback=self.on_cancel_order, on_failed=self.on_cancel_order_failed, data=data, extra=req ) def on_query_account(self, data, request): """""" if self.check_error(data, "查询账户"): return for d in data["data"]: account = AccountData( accountid=d["symbol"], balance=d["margin_balance"], frozen=d["margin_frozen"], gateway_name=self.gateway_name, ) self.gateway.on_account(account) def on_query_position(self, data, request): """""" if self.check_error(data, "查询持仓"): return # Clear all buf data for position in self.positions.values(): position.volume = 0 position.frozen = 0 position.price = 0 position.pnl = 0 for d in data["data"]: key = f"{d['contract_code']}_{d['direction']}" position = self.positions.get(key, None) if not position: position = PositionData( symbol=d["contract_code"], exchange=Exchange.HUOBI, direction=DIRECTION_HBDM2VT[d["direction"]], gateway_name=self.gateway_name ) self.positions[key] = position position.volume = d["volume"] position.frozen = d["frozen"] position.price = d["cost_hold"] position.pnl = d["profit"] for position in self.positions.values(): self.gateway.on_position(position) def on_query_order(self, data, request): """""" if self.check_error(data, "查询活动委托"): return for d in data["data"]["orders"]: timestamp = d["created_at"] dt = datetime.fromtimestamp(timestamp / 1000) time = dt.strftime("%H:%M:%S") if d["client_order_id"]: orderid = d["client_order_id"] else: orderid = d["order_id"] order = OrderData( orderid=orderid, symbol=d["contract_code"], exchange=Exchange.HUOBI, price=d["price"], volume=d["volume"], type=ORDERTYPE_HBDM2VT[d["order_price_type"]], direction=DIRECTION_HBDM2VT[d["direction"]], offset=OFFSET_HBDM2VT[d["offset"]], traded=d["trade_volume"], status=STATUS_HBDM2VT[d["status"]], time=time, gateway_name=self.gateway_name, ) self.gateway.on_order(order) self.gateway.write_log(f"{request.extra}活动委托信息查询成功") def on_query_contract(self, data, request): # type: (dict, Request)->None """""" if self.check_error(data, "查询合约"): return for d in data["data"]: self.currencies.add(d["symbol"]) contract = ContractData( symbol=d["contract_code"], exchange=Exchange.HUOBI, name=d["contract_code"], pricetick=d["price_tick"], size=int(d["contract_size"]), min_volume=1, product=Product.FUTURES, history_data=True, gateway_name=self.gateway_name, ) self.gateway.on_contract(contract) symbol_type_map[contract.symbol] = d["contract_type"] self.gateway.write_log("合约信息查询成功") self.query_order() def on_send_order(self, data, request): """""" order = request.extra if self.check_error(data, "委托"): order.status = Status.REJECTED self.gateway.on_order(order) def on_send_order_failed(self, status_code: str, request: Request): """ Callback when sending order failed on server. """ order = request.extra order.status = Status.REJECTED self.gateway.on_order(order) msg = f"委托失败,状态码:{status_code},信息:{request.response.text}" self.gateway.write_log(msg) def on_send_order_error( self, exception_type: type, exception_value: Exception, tb, request: Request ): """ Callback when sending order caused exception. """ order = request.extra order.status = Status.REJECTED self.gateway.on_order(order) # Record exception if not ConnectionError if not issubclass(exception_type, ConnectionError): self.on_error(exception_type, exception_value, tb, request) def on_cancel_order(self, data, request): """""" self.check_error(data, "撤单") def on_cancel_order_failed(self, status_code: str, request: Request): """ Callback when canceling order failed on server. """ msg = f"撤单失败,状态码:{status_code},信息:{request.response.text}" self.gateway.write_log(msg) def on_send_orders(self, data, request): """""" orders = request.extra errors = data.get("errors", None) if errors: for d in errors: ix = d["index"] code = d["err_code"] msg = d["err_msg"] order = orders[ix] order.status = Status.REJECTED self.gateway.on_order(order) msg = f"批量委托失败,状态码:{code},信息:{msg}" self.gateway.write_log(msg) def on_send_orders_failed(self, status_code: str, request: Request): """ Callback when sending order failed on server. """ orders = request.extra for order in orders: order.status = Status.REJECTED self.gateway.on_order(order) msg = f"批量委托失败,状态码:{status_code},信息:{request.response.text}" self.gateway.write_log(msg) def on_send_orders_error( self, exception_type: type, exception_value: Exception, tb, request: Request ): """ Callback when sending order caused exception. """ orders = request.extra for order in orders: order.status = Status.REJECTED self.gateway.on_order(order) # Record exception if not ConnectionError if not issubclass(exception_type, ConnectionError): self.on_error(exception_type, exception_value, tb, request) def on_error( self, exception_type: type, exception_value: Exception, tb, request: Request ): """ Callback to handler request exception. """ msg = f"触发异常,状态码:{exception_type},信息:{exception_value}" self.gateway.write_log(msg) sys.stderr.write( self.exception_detail(exception_type, exception_value, tb, request) ) def check_error(self, data: dict, func: str = ""): """""" if data["status"] != "error": return False error_code = data["err_code"] error_msg = data["err_msg"] self.gateway.write_log(f"{func}请求出错,代码:{error_code},信息:{error_msg}") return True class HbdmWebsocketApiBase(WebsocketClient): """""" def __init__(self, gateway): """""" super(HbdmWebsocketApiBase, self).__init__() self.gateway = gateway self.gateway_name = gateway.gateway_name self.key = "" self.secret = "" self.sign_host = "" self.path = "" self.req_id = 0 def connect( self, key: str, secret: str, url: str, proxy_host: str, proxy_port: int ): """""" self.key = key self.secret = secret host, path = _split_url(url) self.sign_host = host self.path = path self.init(url, proxy_host, proxy_port) self.start() def login(self): """""" self.req_id += 1 params = { "op": "auth", "type": "api", "cid": str(self.req_id), } params.update(create_signature(self.key, "GET", self.sign_host, self.path, self.secret)) return self.send_packet(params) def on_login(self, packet): """""" pass @staticmethod def unpack_data(data): """""" return json.loads(zlib.decompress(data, 31)) def on_packet(self, packet): """""" if "ping" in packet: req = {"pong": packet["ping"]} self.send_packet(req) elif "op" in packet and packet["op"] == "ping": req = { "op": "pong", "ts": packet["ts"] } self.send_packet(req) elif "err-msg" in packet: return self.on_error_msg(packet) elif "op" in packet and packet["op"] == "auth": return self.on_login() else: self.on_data(packet) def on_data(self, packet): """""" print("data : {}".format(packet)) def on_error_msg(self, packet): """""" msg = packet["err-msg"] if msg == "invalid pong": return self.gateway.write_log(packet["err-msg"]) class HbdmTradeWebsocketApi(HbdmWebsocketApiBase): """""" def __init__(self, gateway): """""" super().__init__(gateway) def connect(self, key, secret, proxy_host, proxy_port): """""" super().connect(key, secret, WEBSOCKET_TRADE_HOST, proxy_host, proxy_port) def subscribe(self): """""" self.req_id += 1 req = { "op": "sub", "cid": str(self.req_id), "topic": f"orders.*" } self.send_packet(req) def on_connected(self): """""" self.gateway.write_log("交易Websocket API连接成功") self.login() def on_login(self): """""" self.gateway.write_log("交易Websocket API登录成功") self.subscribe() def on_data(self, packet): # type: (dict)->None """""" op = packet.get("op", None) if op != "notify": return topic = packet["topic"] if "orders" in topic: self.on_order(packet) def on_order(self, data: dict): """""" dt = datetime.fromtimestamp(data["created_at"] / 1000) time = dt.strftime("%H:%M:%S") if data["client_order_id"]: orderid = data["client_order_id"] else: orderid = data["order_id"] order = OrderData( symbol=data["contract_code"], exchange=Exchange.HUOBI, orderid=orderid, type=ORDERTYPE_HBDM2VT[data["order_price_type"]], direction=DIRECTION_HBDM2VT[data["direction"]], offset=OFFSET_HBDM2VT[data["offset"]], price=data["price"], volume=data["volume"], traded=data["trade_volume"], status=STATUS_HBDM2VT[data["status"]], time=time, gateway_name=self.gateway_name ) self.gateway.on_order(order) # Push trade event trades = data["trade"] if not trades: return for d in trades: dt = datetime.fromtimestamp(d["created_at"] / 1000) time = dt.strftime("%H:%M:%S") trade = TradeData( symbol=order.symbol, exchange=Exchange.HUOBI, orderid=order.orderid, tradeid=str(d["trade_id"]), direction=order.direction, offset=order.offset, price=d["trade_price"], volume=d["trade_volume"], time=time, gateway_name=self.gateway_name, ) self.gateway.on_trade(trade) class HbdmDataWebsocketApi(HbdmWebsocketApiBase): """""" def __init__(self, gateway): """""" super().__init__(gateway) self.ticks = {} def connect(self, key: str, secret: str, proxy_host: str, proxy_port: int): """""" super().connect(key, secret, WEBSOCKET_DATA_HOST, proxy_host, proxy_port) def on_connected(self): """""" self.gateway.write_log("行情Websocket API连接成功") for ws_symbol in self.ticks.keys(): self.subscribe_data(ws_symbol) def subscribe(self, req: SubscribeRequest): """""" contract_type = symbol_type_map.get(req.symbol, "") if not contract_type: return buf = [i for i in req.symbol if not i.isdigit()] symbol = "".join(buf) ws_contract_type = CONTRACT_TYPE_MAP[contract_type] ws_symbol = f"{symbol}_{ws_contract_type}" # Create tick data buffer tick = TickData( symbol=req.symbol, name=req.symbol, exchange=Exchange.HUOBI, datetime=datetime.now(), gateway_name=self.gateway_name, ) self.ticks[ws_symbol] = tick self.subscribe_data(ws_symbol) def subscribe_data(self, ws_symbol: str): """""" # Subscribe to market depth update self.req_id += 1 req = { "sub": f"market.{ws_symbol}.depth.step0", "id": str(self.req_id) } self.send_packet(req) # Subscribe to market detail update self.req_id += 1 req = { "sub": f"market.{ws_symbol}.detail", "id": str(self.req_id) } self.send_packet(req) def on_data(self, packet): # type: (dict)->None """""" channel = packet.get("ch", None) if channel: if "depth.step" in channel: self.on_market_depth(packet) elif "detail" in channel: self.on_market_detail(packet) elif "err_code" in packet: code = packet["err_code"] msg = packet["err_msg"] self.gateway.write_log(f"错误代码:{code}, 错误信息:{msg}") def on_market_depth(self, data): """行情深度推送 """ ws_symbol = data["ch"].split(".")[1] tick = self.ticks[ws_symbol] tick.datetime = datetime.fromtimestamp(data["ts"] / 1000) tick_data = data["tick"] if "bids" not in tick_data or "asks" not in tick_data: return bids = tick_data["bids"] for n in range(5): price, volume = bids[n] tick.__setattr__("bid_price_" + str(n + 1), float(price)) tick.__setattr__("bid_volume_" + str(n + 1), float(volume)) asks = tick_data["asks"] for n in range(5): price, volume = asks[n] tick.__setattr__("ask_price_" + str(n + 1), float(price)) tick.__setattr__("ask_volume_" + str(n + 1), float(volume)) if tick.last_price: self.gateway.on_tick(copy(tick)) def on_market_detail(self, data): """市场细节推送""" ws_symbol = data["ch"].split(".")[1] tick = self.ticks[ws_symbol] tick.datetime = datetime.fromtimestamp(data["ts"] / 1000) tick_data = data["tick"] tick.open_price = tick_data["open"] tick.high_price = tick_data["high"] tick.low_price = tick_data["low"] tick.last_price = tick_data["close"] tick.volume = tick_data["vol"] if tick.bid_price_1: self.gateway.on_tick(copy(tick)) def _split_url(url): """ 将url拆分为host和path :return: host, path """ result = re.match("\w+://([^/]*)(.*)", url) # noqa if result: return result.group(1), result.group(2) def create_signature(api_key, method, host, path, secret_key, get_params=None): """ 创建签名 :param get_params: dict 使用GET方法时附带的额外参数(urlparams) :return: """ sorted_params = [ ("AccessKeyId", api_key), ("SignatureMethod", "HmacSHA256"), ("SignatureVersion", "2"), ("Timestamp", datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")) ] if get_params: sorted_params.extend(list(get_params.items())) sorted_params = list(sorted(sorted_params)) encode_params = urllib.parse.urlencode(sorted_params) payload = [method, host, path, encode_params] payload = "\n".join(payload) payload = payload.encode(encoding="UTF8") secret_key = secret_key.encode(encoding="UTF8") digest = hmac.new(secret_key, payload, digestmod=hashlib.sha256).digest() signature = base64.b64encode(digest) params = dict(sorted_params) params["Signature"] = signature.decode("UTF8") return params
""" The piece of code that puts everything together. """ import random import sys import pyxel # When bumping to a higher Python requirement, please # modify this varible, basically to avoid users from # running with an old Python. EXPECTED_PYTHON = (3, 7) if sys.version_info < EXPECTED_PYTHON: sys.exit( f"Error: expected Python version {EXPECTED_PYTHON} or newer, got {sys.version_info}" ) pyxel.init(180, 140, title="Diddi and the Bugs") class Bullet: "An independent bullet." def __init__(self, x, y, bullet_trick=False): self.x = x self.y = y self.h = 2 self.w = 6 self.speed = 4 self.alive = True # When "bullet_trick" is set True, it is part of the "bullet trick" self.bullet_trick = bullet_trick def update(self): self.x += self.speed if self.x > pyxel.width: self.alive = False def draw(self): if self.alive: # Will display in dark green color (3) when # the bullet trick is on. If not, the bullet # will be a lighter green (11). color = 3 if self.bullet_trick else 11 pyxel.rect(self.x, self.y, self.w, self.h, color) class Enemy: "Some bugs!" def __init__(self): self.possible_enemies = [ (0, 8), (8, 8), (16, 0), (24, 0), (32, 0), (32, 8), (40, 0), ] self.aspect = random.choice(self.possible_enemies) self.x = pyxel.width self.y = random.randint(10, pyxel.height - 28) self.max_speed = 2 self.speed = random.randint(1, self.max_speed) self.alive = True self.show = False def try_to_activate(self, possibilities): if possibilities > 100: exp = 15 elif possibilities > 50: exp = 10 else: exp = 5 if random.randint(1, possibilities * exp) == 1: self.show = True def update(self, bullets): if not self.show: return None self.x -= self.speed if self.x == 0: self.aspect = random.choice(self.possible_enemies) self.x = pyxel.width self.y = random.randint(10, pyxel.height - 28) self.speed = random.randint(1, self.max_speed) self.show = False for bullet in bullets: if ( self.x in range(bullet.x - 5, bullet.x + bullet.w + 5) and self.y in range(bullet.y - 5, bullet.y + bullet.h + 5) and bullet.alive ): self.alive = False bullet.alive = False def draw(self): if not self.show: return None if self.alive: pyxel.blt(self.x, self.y, 0, self.aspect[0], self.aspect[1], 8, 8, 0) class Trash(Enemy): "Not all the space trash is harmful. Here, it can make you win 50, 100 or even 200 extra points!" def __init__(self): # override these stuff self.possible_enemies = [ (16, 8), (24, 8), (40, 8), ] self.aspect = random.choice(self.possible_enemies) self.max_speed = 3 self.x = pyxel.width self.y = random.randint(10, pyxel.height - 28) self.speed = random.randint(1, self.max_speed) self.alive = True self.show = False class App: "The main piece of the game. It also operates the starfighter." def __init__(self): # This variable is not on `reset`, because # we are keeping a message record until # the app quits. self.messages = [] self.message_goodies = [ "Woo hoo!", "Let's save the Earth!", "Find the 'easter eggs'!", "Willpower to Earth, can you hear me?", "Hang tight...", "I'm hungry, aren't you?", "It's cold here, don't you think?", "Wow! This spacecraft really moves!", ] self.reset() self.add_message("Let's go!") pyxel.run(self.update, self.draw) def reset(self): pyxel.load("resource.pyxres") self.alive = True # the player is still alive self.already_won = False self.pause = False self.player_x = 10 self.player_y = 50 self.player_lives = 3 self.bullet_list = [] self.continous_bullets_delay = 30 self.continous_bullets_spacing = 2 self.continous_bullets_message = False self.bullet_last_num_frame = 0 self.bullet_last_held_long = False self.enemies = [Enemy() for sth in range(200)] self.trash = [Trash() for sth in range(50)] self.score = 0 pyxel.stop() pyxel.playm(0, loop=True) def update(self): if pyxel.btnp(pyxel.KEY_Q): pyxel.quit() if pyxel.btnp(pyxel.KEY_R): self.reset() if pyxel.btnp(pyxel.KEY_P): if self.pause: self.pause = False pyxel.playm(0, loop=True) else: self.pause = True if pyxel.btnp(pyxel.KEY_M): self.add_message(random.choice(self.message_goodies)) if pyxel.btnr(pyxel.KEY_P) and self.alive: # We have just raised a "pause event", so we should say it self.add_message("Game paused" if self.pause else "Game resumed", True) if pyxel.btnr(pyxel.KEY_R): self.add_message("Re-started the game", True) if self.pause and self.alive: return None if not self.alive or self.already_won: return None if pyxel.btnp(pyxel.KEY_SPACE): # Reverting back the firing logic that was here, inside of `pyxel.btnp()` self.bullet_list.append(Bullet(self.player_x + 9, self.player_y + 3)) pyxel.playm(3) if pyxel.btn(pyxel.KEY_SPACE): # The continous bullet easter-egg goes here, under `pyxel.btn()` self.bullet_last_num_frame += 1 if self.bullet_last_held_long: if pyxel.frame_count % self.continous_bullets_spacing == 0: if not self.continous_bullets_message: self.add_message("Ah! Continous bullets!") self.continous_bullets_message = True self.bullet_list.append( Bullet(self.player_x + 9, self.player_y + 3, True) ) pyxel.playm(3) if pyxel.btnr(pyxel.KEY_SPACE): # Reset continous bullets back if space key is released self.bullet_last_num_frame = 0 self.bullet_last_held_long = False self.continous_bullets_message = False for bullet in self.bullet_list: if bullet.alive: bullet.update() else: self.bullet_list.remove(bullet) for enem in self.enemies: enem.try_to_activate(len(self.enemies)) for trash in self.trash: trash.try_to_activate(101) self.add_enemies() self.add_trash() self.move_spacecraft() if len(self.enemies) < 1 and self.alive and not self.already_won: # we can play a victory sound! pyxel.stop() pyxel.playm(2) self.already_won = True self.add_message("Yay! We won!") if self.bullet_last_num_frame >= self.continous_bullets_delay: self.bullet_last_num_frame = 0 self.bullet_last_held_long = True def move_spacecraft(self): if pyxel.btn(pyxel.KEY_UP) or pyxel.btn(pyxel.KEY_W): # Move up self.player_y = max(self.player_y - 2, 10) # Resetting bullets back when moved self.bullet_last_num_frame = 0 self.bullet_last_held_long = False elif pyxel.btn(pyxel.KEY_DOWN) or pyxel.btn(pyxel.KEY_S): # Move down self.player_y = min(self.player_y + 2, pyxel.height - 28) # Resetting bullets back when moved self.bullet_last_num_frame = 0 self.bullet_last_held_long = False def add_enemies(self): try: for enem in range(len(self.enemies)): if self.enemies[enem].alive: self.enemies[enem].update(self.bullet_list) if self.player_x in range( self.enemies[enem].x - 5, self.enemies[enem].x + 5 ) and self.player_y in range( self.enemies[enem].y - 5, self.enemies[enem].y + 5 ): self.alive = False pyxel.stop() pyxel.playm(1) self.add_message("Oh no! We loose!") else: self.score += 10 self.enemies.pop(enem) pyxel.playm(4) except Exception: # out of range, just pass pass def add_trash(self): try: for item in range(len(self.trash)): if self.trash[item].alive: self.trash[item].update(self.bullet_list) if self.player_x in range( self.trash[item].x - 5, self.trash[item].x + 5 ) and self.player_y in range( self.trash[item].y - 5, self.trash[item].y + 5 ): self.alive = False pyxel.stop() pyxel.playm(1) self.add_message("Oh no! We loose!") else: self.score += random.choice([50, 100, 200]) self.trash.pop(item) pyxel.playm(4) except Exception: # just like the enemies, this will just pass pass def add_message(self, msg, system=False): self.messages.append(f"{"Diddi" if not system else "System"}: {msg}") if len(self.messages) >= 3: self.messages.pop(0) def draw_message_bar(self): # This will draw the messages bar. pyxel.rect(0, pyxel.height - 20, pyxel.width, 20, 5) pyxel.rect(0, pyxel.height - 20, pyxel.width, 2, 13) # Draw the messages if len(self.messages) > 0: pyxel.text(1, pyxel.height - 17, self.messages[0], 1) pyxel.text(2, pyxel.height - 17, self.messages[0], 7) if len(self.messages) > 1: pyxel.text(1, pyxel.height - 8, self.messages[1], 1) pyxel.text(2, pyxel.height - 8, self.messages[1], 7) def draw(self): pyxel.cls(0) score = f"Score: {self.score}" enem_count = f"Enemies: {len(self.enemies)}" pyxel.text(5, 4, score, 1) pyxel.text(4, 4, score, 7) pyxel.text(71, 4, enem_count, 1) pyxel.text(70, 4, enem_count, 7) self.draw_message_bar() if self.pause and self.alive: # paused, don't worry pyxel.stop() pyxel.text( 19, 50, "The game is paused. Press P to play,\nR to restart and Q to quit", 1, ) pyxel.text( 18, 50, "The game is paused. Press P to play,\nR to restart and Q to quit", 7, ) elif len(self.enemies) < 1 and self.alive: # you won!!! pyxel.text( 21, 50, "You won! :) Press R to restart\n or press Q to quit the game", 1, ) pyxel.text( 20, 50, "You won! :) Press R to restart\n or press Q to quit the game", 7, ) elif self.alive: # the show is keep going! pyxel.blt(self.player_x, self.player_y, 0, 8, 0, 8, 8, 0) for bullet in self.bullet_list: bullet.draw() for enem in self.enemies: enem.draw() for trash in self.trash: trash.draw() else: # you loose! try again pyxel.text( 21, 50, "Oh no! :( Press R to restart\n or press Q to quit the game", 1 ) pyxel.text( 20, 50, "Oh no! :( Press R to restart\n or press Q to quit the game", 7 ) App()
""" The piece of code that puts everything together. """ import random import sys import pyxel # When bumping to a higher Python requirement, please # modify this varible, basically to avoid users from # running with an old Python. EXPECTED_PYTHON = (3, 7) if sys.version_info < EXPECTED_PYTHON: sys.exit( f"Error: expected Python version {EXPECTED_PYTHON} or newer, got {sys.version_info}" ) pyxel.init(180, 140, title="Diddi and the Bugs") class Bullet: "An independent bullet." def __init__(self, x, y, bullet_trick=False): self.x = x self.y = y self.h = 2 self.w = 6 self.speed = 4 self.alive = True # When "bullet_trick" is set True, it is part of the "bullet trick" self.bullet_trick = bullet_trick def update(self): self.x += self.speed if self.x > pyxel.width: self.alive = False def draw(self): if self.alive: # Will display in dark green color (3) when # the bullet trick is on. If not, the bullet # will be a lighter green (11). color = 3 if self.bullet_trick else 11 pyxel.rect(self.x, self.y, self.w, self.h, color) class Enemy: "Some bugs!" def __init__(self): self.possible_enemies = [ (0, 8), (8, 8), (16, 0), (24, 0), (32, 0), (32, 8), (40, 0), ] self.aspect = random.choice(self.possible_enemies) self.x = pyxel.width self.y = random.randint(10, pyxel.height - 28) self.max_speed = 2 self.speed = random.randint(1, self.max_speed) self.alive = True self.show = False def try_to_activate(self, possibilities): if possibilities > 100: exp = 15 elif possibilities > 50: exp = 10 else: exp = 5 if random.randint(1, possibilities * exp) == 1: self.show = True def update(self, bullets): if not self.show: return None self.x -= self.speed if self.x == 0: self.aspect = random.choice(self.possible_enemies) self.x = pyxel.width self.y = random.randint(10, pyxel.height - 28) self.speed = random.randint(1, self.max_speed) self.show = False for bullet in bullets: if ( self.x in range(bullet.x - 5, bullet.x + bullet.w + 5) and self.y in range(bullet.y - 5, bullet.y + bullet.h + 5) and bullet.alive ): self.alive = False bullet.alive = False def draw(self): if not self.show: return None if self.alive: pyxel.blt(self.x, self.y, 0, self.aspect[0], self.aspect[1], 8, 8, 0) class Trash(Enemy): "Not all the space trash is harmful. Here, it can make you win 50, 100 or even 200 extra points!" def __init__(self): # override these stuff self.possible_enemies = [ (16, 8), (24, 8), (40, 8), ] self.aspect = random.choice(self.possible_enemies) self.max_speed = 3 self.x = pyxel.width self.y = random.randint(10, pyxel.height - 28) self.speed = random.randint(1, self.max_speed) self.alive = True self.show = False class App: "The main piece of the game. It also operates the starfighter." def __init__(self): # This variable is not on `reset`, because # we are keeping a message record until # the app quits. self.messages = [] self.message_goodies = [ "Woo hoo!", "Let's save the Earth!", "Find the 'easter eggs'!", "Willpower to Earth, can you hear me?", "Hang tight...", "I'm hungry, aren't you?", "It's cold here, don't you think?", "Wow! This spacecraft really moves!", ] self.reset() self.add_message("Let's go!") pyxel.run(self.update, self.draw) def reset(self): pyxel.load("resource.pyxres") self.alive = True # the player is still alive self.already_won = False self.pause = False self.player_x = 10 self.player_y = 50 self.player_lives = 3 self.bullet_list = [] self.continous_bullets_delay = 30 self.continous_bullets_spacing = 2 self.continous_bullets_message = False self.bullet_last_num_frame = 0 self.bullet_last_held_long = False self.enemies = [Enemy() for sth in range(200)] self.trash = [Trash() for sth in range(50)] self.score = 0 pyxel.stop() pyxel.playm(0, loop=True) def update(self): if pyxel.btnp(pyxel.KEY_Q): pyxel.quit() if pyxel.btnp(pyxel.KEY_R): self.reset() if pyxel.btnp(pyxel.KEY_P): if self.pause: self.pause = False pyxel.playm(0, loop=True) else: self.pause = True if pyxel.btnp(pyxel.KEY_M): self.add_message(random.choice(self.message_goodies)) if pyxel.btnr(pyxel.KEY_P) and self.alive: # We have just raised a "pause event", so we should say it self.add_message("Game paused" if self.pause else "Game resumed", True) if pyxel.btnr(pyxel.KEY_R): self.add_message("Re-started the game", True) if self.pause and self.alive: return None if not self.alive or self.already_won: return None if pyxel.btnp(pyxel.KEY_SPACE): # Reverting back the firing logic that was here, inside of `pyxel.btnp()` self.bullet_list.append(Bullet(self.player_x + 9, self.player_y + 3)) pyxel.playm(3) if pyxel.btn(pyxel.KEY_SPACE): # The continous bullet easter-egg goes here, under `pyxel.btn()` self.bullet_last_num_frame += 1 if self.bullet_last_held_long: if pyxel.frame_count % self.continous_bullets_spacing == 0: if not self.continous_bullets_message: self.add_message("Ah! Continous bullets!") self.continous_bullets_message = True self.bullet_list.append( Bullet(self.player_x + 9, self.player_y + 3, True) ) pyxel.playm(3) if pyxel.btnr(pyxel.KEY_SPACE): # Reset continous bullets back if space key is released self.bullet_last_num_frame = 0 self.bullet_last_held_long = False self.continous_bullets_message = False for bullet in self.bullet_list: if bullet.alive: bullet.update() else: self.bullet_list.remove(bullet) for enem in self.enemies: enem.try_to_activate(len(self.enemies)) for trash in self.trash: trash.try_to_activate(101) self.add_enemies() self.add_trash() self.move_spacecraft() if len(self.enemies) < 1 and self.alive and not self.already_won: # we can play a victory sound! pyxel.stop() pyxel.playm(2) self.already_won = True self.add_message("Yay! We won!") if self.bullet_last_num_frame >= self.continous_bullets_delay: self.bullet_last_num_frame = 0 self.bullet_last_held_long = True def move_spacecraft(self): if pyxel.btn(pyxel.KEY_UP) or pyxel.btn(pyxel.KEY_W): # Move up self.player_y = max(self.player_y - 2, 10) # Resetting bullets back when moved self.bullet_last_num_frame = 0 self.bullet_last_held_long = False elif pyxel.btn(pyxel.KEY_DOWN) or pyxel.btn(pyxel.KEY_S): # Move down self.player_y = min(self.player_y + 2, pyxel.height - 28) # Resetting bullets back when moved self.bullet_last_num_frame = 0 self.bullet_last_held_long = False def add_enemies(self): try: for enem in range(len(self.enemies)): if self.enemies[enem].alive: self.enemies[enem].update(self.bullet_list) if self.player_x in range( self.enemies[enem].x - 5, self.enemies[enem].x + 5 ) and self.player_y in range( self.enemies[enem].y - 5, self.enemies[enem].y + 5 ): self.alive = False pyxel.stop() pyxel.playm(1) self.add_message("Oh no! We loose!") else: self.score += 10 self.enemies.pop(enem) pyxel.playm(4) except Exception: # out of range, just pass pass def add_trash(self): try: for item in range(len(self.trash)): if self.trash[item].alive: self.trash[item].update(self.bullet_list) if self.player_x in range( self.trash[item].x - 5, self.trash[item].x + 5 ) and self.player_y in range( self.trash[item].y - 5, self.trash[item].y + 5 ): self.alive = False pyxel.stop() pyxel.playm(1) self.add_message("Oh no! We loose!") else: self.score += random.choice([50, 100, 200]) self.trash.pop(item) pyxel.playm(4) except Exception: # just like the enemies, this will just pass pass def add_message(self, msg, system=False): self.messages.append(f"{'Diddi' if not system else 'System'}: {msg}") if len(self.messages) >= 3: self.messages.pop(0) def draw_message_bar(self): # This will draw the messages bar. pyxel.rect(0, pyxel.height - 20, pyxel.width, 20, 5) pyxel.rect(0, pyxel.height - 20, pyxel.width, 2, 13) # Draw the messages if len(self.messages) > 0: pyxel.text(1, pyxel.height - 17, self.messages[0], 1) pyxel.text(2, pyxel.height - 17, self.messages[0], 7) if len(self.messages) > 1: pyxel.text(1, pyxel.height - 8, self.messages[1], 1) pyxel.text(2, pyxel.height - 8, self.messages[1], 7) def draw(self): pyxel.cls(0) score = f"Score: {self.score}" enem_count = f"Enemies: {len(self.enemies)}" pyxel.text(5, 4, score, 1) pyxel.text(4, 4, score, 7) pyxel.text(71, 4, enem_count, 1) pyxel.text(70, 4, enem_count, 7) self.draw_message_bar() if self.pause and self.alive: # paused, don't worry pyxel.stop() pyxel.text( 19, 50, "The game is paused. Press P to play,\nR to restart and Q to quit", 1, ) pyxel.text( 18, 50, "The game is paused. Press P to play,\nR to restart and Q to quit", 7, ) elif len(self.enemies) < 1 and self.alive: # you won!!! pyxel.text( 21, 50, "You won! :) Press R to restart\n or press Q to quit the game", 1, ) pyxel.text( 20, 50, "You won! :) Press R to restart\n or press Q to quit the game", 7, ) elif self.alive: # the show is keep going! pyxel.blt(self.player_x, self.player_y, 0, 8, 0, 8, 8, 0) for bullet in self.bullet_list: bullet.draw() for enem in self.enemies: enem.draw() for trash in self.trash: trash.draw() else: # you loose! try again pyxel.text( 21, 50, "Oh no! :( Press R to restart\n or press Q to quit the game", 1 ) pyxel.text( 20, 50, "Oh no! :( Press R to restart\n or press Q to quit the game", 7 ) App()
from datetime import datetime pixel_array_left = [18.25, 18.5, 24, 30.5, 29, 24.75, 26.25, 21, 26.25, 10.75, 16, 29, 34.75, 33, 41.75, 25.25, 13.75, 10, 17.5, 29.5, 18, 24.5, 21.5, 21.75, 10.75, 11.25, 25.75, 47.75, 39.5, 44.25, 46.25, 28.25, 11.75, 11, 16.25, 37.5, 41.25, 42.25, 42.5, 21, 11, 12, 20.25, 14.5, 15.75, 16.75, 15.5, 13.25, 11.75, 11, 16.5, 16.5, 13.75, 13.5, 13.75, 13.75, 11.75, 11, 11, 20.5, 15, 13.75, 13.75, 12.5] pixel_array_right = [17.25, 20.5, 20.25, 17.75, 20.25, 22.75, 21.75, 24.25, 25.5, 30.5, 25.25, 26.5, 22.5, 24.75, 31.25, 22.5, 28.5, 21.5, 42.5, 47, 36.25, 22, 31.75, 25.25, 24, 30.25, 43, 37.5, 43.25, 15.5, 11.5, 10, 12.5, 30.5, 38.25, 34.5, 40.25, 35, 12.75, 11.75, 14, 42.25, 48, 42.75, 49.25, 51.25, 15.75, 11.25, 12.25, 33.25, 39.25, 34, 36.75, 31.75, 13.75, 11, 11.75, 13.75, 19.75, 21, 20.5, 12.5, 11.715, 11] class MockData(object): """Docstring for MockGateway. """ id = 1 rssi = -96 datetime = '2018-07-24T12:00:51+01:00' status = 63 internaltemp = 23 thermal_sensors = [ {' sensorId': 'TH1', 'internalTemp': 23.25, 'pixelArray': pixel_array_left}, {'sensorId': 'TH2', 'internalTemp': 23.25, 'pixelArray': pixel_array_right} ] snapshots = [] def __init__(self, params): self.deviceId = f'{self.id:0>1}' self.datetime = datetime.now() self.lat = 53.073803 self.lon = 23.164168 self.status = self.set_status(params['charge'], params['status_w'], params['tv']) self.snapshots = { "deviceId": f'{self.id:0>15}', "datetime": datetime.now(), "lat": 53.073803, "lon": 23.164168, "status": self.status, "snapshots": [{ 'datetime': datetime.now(), 'thermalSensors': self.thermal_sensors, 'wristBands': [ self.wristband(params['wristband']), self.wristband(params['wristband']) ], }] } def set_status(self, charge: int=0, wristbands=[5, 4, 3, 2, 1], tv: int=0) -> int: """TODO: Set status gateway :returns: int """ status = ''.join((str(x) for x in [charge] + wristbands + [tv])) return int(status, 2) def wristband(self, data) -> dict: """Set dict data of wristband :returns: dict """ wristband_data={'wristBandId': 'BB:BB:BB:BB:01', 'rssi': -38, 'voltage': 4.23, 'status': 3} if data['status'] is not None: wristband_data['status']= int(''.join((str(x) for x in data['status'])), 2) if data['id'] is not None: wristband_data['wristBandId'] = f'BB:BB:BB:{self.id:0>2}:{data['id']:0>2}' return wristband_data def get(self): return self.snapshots def get_wristband(self, config={}): params = { 'wristband_id': f'BB:BB:BB:{self.id:0>2}:{self.id:0>2}', 'status': 0, 'charge': 0, 'battery': 1 } params.update(config) return params
from datetime import datetime pixel_array_left = [18.25, 18.5, 24, 30.5, 29, 24.75, 26.25, 21, 26.25, 10.75, 16, 29, 34.75, 33, 41.75, 25.25, 13.75, 10, 17.5, 29.5, 18, 24.5, 21.5, 21.75, 10.75, 11.25, 25.75, 47.75, 39.5, 44.25, 46.25, 28.25, 11.75, 11, 16.25, 37.5, 41.25, 42.25, 42.5, 21, 11, 12, 20.25, 14.5, 15.75, 16.75, 15.5, 13.25, 11.75, 11, 16.5, 16.5, 13.75, 13.5, 13.75, 13.75, 11.75, 11, 11, 20.5, 15, 13.75, 13.75, 12.5] pixel_array_right = [17.25, 20.5, 20.25, 17.75, 20.25, 22.75, 21.75, 24.25, 25.5, 30.5, 25.25, 26.5, 22.5, 24.75, 31.25, 22.5, 28.5, 21.5, 42.5, 47, 36.25, 22, 31.75, 25.25, 24, 30.25, 43, 37.5, 43.25, 15.5, 11.5, 10, 12.5, 30.5, 38.25, 34.5, 40.25, 35, 12.75, 11.75, 14, 42.25, 48, 42.75, 49.25, 51.25, 15.75, 11.25, 12.25, 33.25, 39.25, 34, 36.75, 31.75, 13.75, 11, 11.75, 13.75, 19.75, 21, 20.5, 12.5, 11.715, 11] class MockData(object): """Docstring for MockGateway. """ id = 1 rssi = -96 datetime = '2018-07-24T12:00:51+01:00' status = 63 internaltemp = 23 thermal_sensors = [ {' sensorId': 'TH1', 'internalTemp': 23.25, 'pixelArray': pixel_array_left}, {'sensorId': 'TH2', 'internalTemp': 23.25, 'pixelArray': pixel_array_right} ] snapshots = [] def __init__(self, params): self.deviceId = f'{self.id:0>1}' self.datetime = datetime.now() self.lat = 53.073803 self.lon = 23.164168 self.status = self.set_status(params['charge'], params['status_w'], params['tv']) self.snapshots = { "deviceId": f'{self.id:0>15}', "datetime": datetime.now(), "lat": 53.073803, "lon": 23.164168, "status": self.status, "snapshots": [{ 'datetime': datetime.now(), 'thermalSensors': self.thermal_sensors, 'wristBands': [ self.wristband(params['wristband']), self.wristband(params['wristband']) ], }] } def set_status(self, charge: int=0, wristbands=[5, 4, 3, 2, 1], tv: int=0) -> int: """TODO: Set status gateway :returns: int """ status = ''.join((str(x) for x in [charge] + wristbands + [tv])) return int(status, 2) def wristband(self, data) -> dict: """Set dict data of wristband :returns: dict """ wristband_data={'wristBandId': 'BB:BB:BB:BB:01', 'rssi': -38, 'voltage': 4.23, 'status': 3} if data['status'] is not None: wristband_data['status']= int(''.join((str(x) for x in data['status'])), 2) if data['id'] is not None: wristband_data['wristBandId'] = f'BB:BB:BB:{self.id:0>2}:{data["id"]:0>2}' return wristband_data def get(self): return self.snapshots def get_wristband(self, config={}): params = { 'wristband_id': f'BB:BB:BB:{self.id:0>2}:{self.id:0>2}', 'status': 0, 'charge': 0, 'battery': 1 } params.update(config) return params
""" Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ """ Case Type : 基础功能 Case Name : 创建发布订阅并对发布表进行闪回操作(发布订阅均存在闪回表) Description : 1.设置闪回参数 2.两个集群创建表 3.创建函数获取csn及timecapsule 4.创建发布订阅 5.更新数据,并查询更新 6.闪回查询 7.闪回表 8.订阅端闪回表 Expect : 1.成功 2.成功 3.成功 4.成功 5.插入数据成功,订阅端按顺序进行更新成功 6.集群A查询结果为空,集群B查询结果不变为1,99,3 7.t1查询内容为空,第二次闪回后查询结果为1,2,3 8.集群B更新数据成功,且闪回后数据内容为1,3,99.集群Aupdate成功,集群B数据同步为100,3,99 History : """ import unittest import os from yat.test import macro from yat.test import Node from testcase.utils.Logger import Logger from testcase.utils.CommonSH import CommonSH from testcase.utils.Common import Common from testcase.utils.Constant import Constant Primary_SH = CommonSH('PrimaryDbUser') @unittest.skipIf(3 != Primary_SH.get_node_num(), '非1+2环境不执行') class Pubsubclass(unittest.TestCase): def setUp(self): self.log = Logger() self.log.info("-----------this is setup-----------") self.log.info(f"-----{os.path.basename(__file__)[:-3]} start-----") self.pri_userdb_pub = Node(node='PrimaryDbUser') self.pri_userdb_sub = Node(node='remote1_PrimaryDbUser') self.constant = Constant() self.commsh_pub = CommonSH('PrimaryDbUser') self.commsh_sub = CommonSH('remote1_PrimaryDbUser') self.com_pub = Common() self.com_sub = Common('remote1_PrimaryDbUser') self.tb_name1 = 'tb_pubsub_074_1' self.tb_name2 = 'tb_pubsub_074_2' self.subname1 = "sub_074_1" self.pubname1 = "pub_074_1" self.parent_path_pub = os.path.dirname(macro.DB_INSTANCE_PATH) self.parent_path_sub = os.path.dirname(macro.DB_INSTANCE_PATH_REMOTE1) self.port = str(int(self.pri_userdb_pub.db_port) + 1) self.wal_level = self.com_pub.show_param("wal_level") self.user_param_pub = f'-U {self.pri_userdb_pub.db_user} ' \ f'-W {self.pri_userdb_pub.db_password}' self.user_param_sub = f'-U {self.pri_userdb_sub.db_user} ' \ f'-W {self.pri_userdb_sub.db_password}' cmd = f"cp " \ f"{os.path.join(macro.DB_INSTANCE_PATH, "pg_hba.conf")} " \ f"{os.path.join(self.parent_path_pub, "pg_hba.conf")};" self.log.info(cmd) result = self.pri_userdb_pub.sh(cmd).result() self.log.info(result) cmd = f"cp " \ f"{os.path.join(macro.DB_INSTANCE_PATH_REMOTE1, "pg_hba.conf")}" \ f" {os.path.join(self.parent_path_sub, "pg_hba.conf")};" self.log.info(cmd) result = self.pri_userdb_sub.sh(cmd).result() self.log.info(result) self.undo_retention_time = self.com_pub.show_param( "undo_retention_time") self.recyclebin_retention_time = self.com_pub.show_param( "recyclebin_retention_time") self.enable_recyclebin = self.com_pub.show_param("enable_recyclebin") self.enable_default_ustore_table = self.com_pub.show_param( "enable_default_ustore_table") self.undo_retention_time_sub = self.com_sub.show_param( "undo_retention_time", macro.DB_ENV_PATH_REMOTE1) self.recyclebin_retention_time_sub = self.com_sub.show_param( "recyclebin_retention_time", macro.DB_ENV_PATH_REMOTE1) self.enable_recyclebin_sub = self.com_sub.show_param( "enable_recyclebin", macro.DB_ENV_PATH_REMOTE1) self.enable_default_ustore_table_sub = self.com_sub.show_param( "enable_default_ustore_table", macro.DB_ENV_PATH_REMOTE1) def test_pubsub(self): text = '--step:预置条件,修改pg_hba expect:成功' self.log.info(text) guc_res = self.commsh_pub.execute_gsguc( 'reload', self.constant.GSGUC_SUCCESS_MSG, '', 'all', False, False, '', f'host replication {self.pri_userdb_pub.db_user} ' f'{self.pri_userdb_sub.db_host}/32 sha256') self.log.info(guc_res) self.assertTrue(guc_res, '执行失败:' + text) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'wal_level=logical') self.assertTrue(result, '执行失败:' + text) guc_res = self.commsh_sub.execute_gsguc( 'reload', self.constant.GSGUC_SUCCESS_MSG, '', 'all', False, False, macro.DB_INSTANCE_PATH_REMOTE1, f'host replication {self.pri_userdb_pub.db_user} ' f'{self.pri_userdb_pub.db_host}/32 sha256', macro.DB_ENV_PATH_REMOTE1) self.log.info(guc_res) self.assertTrue(guc_res, '执行失败:' + text) text = '--step1:设置闪回参数 expect:成功--' self.log.info(text) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'undo_retention_time=3h') self.assertTrue(result, '执行失败:' + text) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'recyclebin_retention_time=3h') self.assertTrue(result, '执行失败:' + text) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'enable_recyclebin=on') self.assertTrue(result, '执行失败:' + text) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'enable_default_ustore_table=on') self.assertTrue(result, '执行失败:' + text) result = self.commsh_pub.restart_db_cluster(True) flg = self.constant.START_SUCCESS_MSG in result or 'Degrade' in result self.assertTrue(flg, '执行失败:' + text) result = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'undo_retention_time=3h', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertTrue(result, '执行失败:' + text) result = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'recyclebin_retention_time=10000', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertTrue(result, '执行失败:' + text) result = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'enable_recyclebin=on', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertTrue(result, '执行失败:' + text) result = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'enable_default_ustore_table=on', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertTrue(result, '执行失败:' + text) result = self.commsh_sub.restart_db_cluster( True, macro.DB_ENV_PATH_REMOTE1) flg = self.constant.START_SUCCESS_MSG in result or 'Degrade' in result self.assertTrue(flg, '执行失败:' + text) text = '--step2:两个集群创建表 expect:成功--' self.log.info(text) create_sql = f"create table {self.tb_name1}(a int primary key) ;" \ f"create table {self.tb_name2}(id int primary key, snaptime " \ f"timestamptz, snapcsn bigint);" result = self.commsh_pub.execut_db_sql( create_sql, sql_type=self.user_param_pub) self.log.info(result) self.assertEqual(result.count(self.constant.TABLE_CREATE_SUCCESS), 4, '执行失败:' + text) result = self.commsh_sub.execut_db_sql(create_sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertEqual(result.count(self.constant.TABLE_CREATE_SUCCESS), 4, '执行失败:' + text) text = '--step3:创建函数获取csn及timecapsule expect:成功--' self.log.info(text) sql = f"CREATE OR REPLACE FUNCTION findCsn(int8) " \ f"RETURNS INTEGER LANGUAGE plpgsql " \ f"AS \$BODY\$ declare count integer;" \ f"begin" \ f" count = (select snapcsn from {self.tb_name2} " \ f"where id = \$1); return count;end;\$BODY\$;" \ f"CREATE OR REPLACE FUNCTION findTime(int8) " \ f"RETURNS timestamptz" \ f" LANGUAGE plpgsql AS \$BODY\$ declare " \ f"count timestamptz;begin" \ f" count = (select snaptime from " \ f"{self.tb_name2} where id=\$1); " \ f"return count;end;\$BODY\$;" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertEqual( 2, result.count(self.constant.CREATE_FUNCTION_SUCCESS_MSG), '执行失败:' + text) self.assertNotIn(self.constant.SQL_WRONG_MSG[1], result, '执行失败:' + text) result = self.commsh_sub.execut_db_sql(sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertIn(self.constant.CREATE_FUNCTION_SUCCESS_MSG, result, '执行失败:' + text) self.assertNotIn(self.constant.SQL_WRONG_MSG[1], result, '执行失败:' + text) text = "--step4:创建发布订阅 expect:成功--" self.log.info(text) sql = f"CREATE PUBLICATION {self.pubname1} for all tables ;" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertIn(self.constant.create_pub_succ_msg, result, '执行失败:' + text) self.assertNotIn(self.constant.SQL_WRONG_MSG[1], result, '执行失败:' + text) result = self.commsh_sub.execute_generate( macro.COMMON_PASSWD, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertIn('', result, '执行失败:' + text) sql = f"CREATE SUBSCRIPTION {self.subname1} CONNECTION " \ f"'host={self.pri_userdb_pub.db_host} " \ f"port={self.port} " \ f"user={self.pri_userdb_pub.db_user} " \ f"dbname={self.pri_userdb_pub.db_name} " \ f"password={self.pri_userdb_pub.ssh_password}' " \ f"PUBLICATION {self.pubname1};" result = self.commsh_sub.execut_db_sql(sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertEqual(result.count(self.constant.create_sub_succ_msg), 1, '执行失败:' + text) text = "--step5:更新数据,并查询更新 expect:插入数据成功,订阅端按顺序进行更新成功--" self.log.info(text) sql = f"select pg_sleep(10);insert into {self.tb_name2} " \ f"select 1, now(), int8in(xidout(next_csn)) " \ f"from gs_get_next_xid_csn();" \ f"insert into {self.tb_name1} values(1),(2),(3);" \ f"select pg_sleep(10);" \ f"insert into {self.tb_name2} select 2, now(), " \ f"int8in(xidout(next_csn)) from gs_get_next_xid_csn();" \ f"update {self.tb_name1} set a = 99 where a = 2;" \ f"select pg_sleep(10);" \ f"insert into {self.tb_name2} select 3, " \ f"now(), int8in(xidout(next_csn)) from gs_get_next_xid_csn();" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertEqual(result.count(self.constant.INSERT_SUCCESS_MSG), 4, '执行失败:' + text) self.assertNotIn(self.constant.SQL_WRONG_MSG[1], result, '执行失败:' + text) sql_select = f"select * from {self.tb_name1} order by a;" \ f"select * from {self.tb_name2};" result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertEqual(result.count('3 rows'), 2, '执行失败:' + text) self.assertIn('99', result, '执行失败:' + text) self.assertIn('1', result, '执行失败:' + text) self.assertIn('3', result, '执行失败:' + text) result_a = self.commsh_pub.execut_db_sql(sql_select, sql_type=self.user_param_pub) self.log.info(result_a) self.assertEqual(result, result_a, '执行失败:' + text) text = "--step6:闪回查询 expect:集群A查询结果为空,集群B查询结果不变为1,99,3--" self.log.info(text) sql = f"select * from {self.tb_name1} timecapsule " \ f"csn findCsn(1) order by a;" \ f"select * from {self.tb_name1} timecapsule " \ f"timestamp findTime(1) order by a;" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertIn('0 rows', result, '执行失败:' + text) result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.assertIn('3 rows', result, '执行失败:' + text) self.log.info(result) text = "--step7:闪回表 expect:t1查询内容为空,第二次闪回后查询结果为1,2,3--" self.log.info(text) sql = f"timecapsule table {self.tb_name1} to csn findCsn(1);" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertIn(self.constant.timecapsule_table_succ_msg, result, '执行失败:' + text) result_a = self.commsh_pub.execut_db_sql(sql_select, sql_type=self.user_param_pub) self.log.info(result_a) result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertIn('0 rows', result, '执行失败:' + text) self.assertEqual(result, result_a, '执行失败:' + text) sql = f"timecapsule table {self.tb_name1} to timestamp findTime(2);" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertIn(self.constant.timecapsule_table_succ_msg, result, '执行失败:' + text) result_a = self.commsh_pub.execut_db_sql(sql_select, sql_type=self.user_param_pub) self.log.info(result_a) result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertIn('3 rows', result, '执行失败:' + text) self.assertIn('1', result.splitlines()[2], '执行失败:' + text) self.assertIn('2', result.splitlines()[3], '执行失败:' + text) self.assertIn('3', result.splitlines()[4], '执行失败:' + text) self.assertEqual(result, result_a, '执行失败:' + text) text = "--step8:订阅端闪回表 expect:集群B更新数据成功,且闪回后数据内容为1,3,99." \ "集群Aupdate成功,集群B数据同步为100,3,99--" self.log.info(text) sql = f"update {self.tb_name1} set a = 99 where a = 2;" \ f"insert into {self.tb_name2} select 4, now(), " \ f"int8in(xidout(next_csn)) from gs_get_next_xid_csn();" \ f"insert into {self.tb_name1} values(4);" \ f"timecapsule table {self.tb_name1} to csn findCsn(4);" result = self.commsh_sub.execut_db_sql(sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertIn('3 rows', result, '执行失败:' + text) self.assertIn('1', result.splitlines()[2], '执行失败:' + text) self.assertIn('3', result.splitlines()[3], '执行失败:' + text) self.assertIn('99', result.splitlines()[4], '执行失败:' + text) sql = f"update {self.tb_name1} set a=100 where a=1;" result_a = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result_a) result_a = self.commsh_pub.execut_db_sql(sql_select, sql_type=self.user_param_pub) self.log.info(result_a) self.assertIn('2', result_a.splitlines()[2], '执行失败:' + text) self.assertIn('3', result_a.splitlines()[3], '执行失败:' + text) self.assertIn('100', result_a.splitlines()[4], '执行失败:' + text) result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertIn('3', result.splitlines()[2], '执行失败:' + text) self.assertIn('99', result.splitlines()[3], '执行失败:' + text) self.assertIn('100', result.splitlines()[4], '执行失败:' + text) def tearDown(self): self.log.info('------------this is tearDown-------------') text = '--清理环境--' self.log.info(text) sql = f"DROP PUBLICATION if exists {self.pubname1};" drop_pub_result = self.commsh_pub.execut_db_sql( sql, sql_type=self.user_param_pub) self.log.info(drop_pub_result) sql = f"DROP SUBSCRIPTION {self.subname1};" drop_sub_result = self.commsh_sub.execut_db_sql( sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(drop_sub_result) sql = f"DROP table if exists {self.tb_name2};" \ f"DROP table if exists {self.tb_name1};" \ f"drop function if exists findCsn;" \ f"drop function if exists findTime;" result = self.commsh_sub.execut_db_sql(sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) cmd = f"mv " \ f"{os.path.join(self.parent_path_pub, "pg_hba.conf")} "\ f"{os.path.join(macro.DB_INSTANCE_PATH, "pg_hba.conf")};" self.log.info(cmd) result = self.pri_userdb_pub.sh(cmd).result() self.log.info(result) cmd = f"mv " \ f"{os.path.join(self.parent_path_sub, "pg_hba.conf")} "\ f"{os.path.join(macro.DB_INSTANCE_PATH_REMOTE1, "pg_hba.conf")} " self.log.info(cmd) result = self.pri_userdb_sub.sh(cmd).result() self.log.info(result) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'wal_level={self.wal_level}') result1 = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'undo_retention_time={self.undo_retention_time}') result2 = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'recyclebin_retention_time={self.recyclebin_retention_time}') result3 = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'enable_recyclebin={self.enable_recyclebin}') result4 = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'enable_default_ustore_table={self.enable_default_ustore_table}') result5 = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'undo_retention_time={self.undo_retention_time_sub}', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) result6 = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'recyclebin_retention_time={self.recyclebin_retention_time_sub}', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) result7 = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'enable_recyclebin={self.enable_recyclebin_sub}', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) result8 = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'enable_default_ustore_table=' f'{self.enable_default_ustore_table_sub}', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertTrue(result and result1 and result2 and result3 and result4 and result5 and result6 and result7 and result8, '执行失败:' + text) self.commsh_pub.restart_db_cluster(True) self.commsh_sub.restart_db_cluster(True, macro.DB_ENV_PATH_REMOTE1) self.assertIn(self.constant.drop_pub_succ_msg, drop_pub_result, '执行失败' + text) self.assertIn(self.constant.drop_sub_succ_msg, drop_sub_result, '执行失败' + text) self.log.info(f"-----{os.path.basename(__file__)[:-3]} end-----")
""" Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ """ Case Type : 基础功能 Case Name : 创建发布订阅并对发布表进行闪回操作(发布订阅均存在闪回表) Description : 1.设置闪回参数 2.两个集群创建表 3.创建函数获取csn及timecapsule 4.创建发布订阅 5.更新数据,并查询更新 6.闪回查询 7.闪回表 8.订阅端闪回表 Expect : 1.成功 2.成功 3.成功 4.成功 5.插入数据成功,订阅端按顺序进行更新成功 6.集群A查询结果为空,集群B查询结果不变为1,99,3 7.t1查询内容为空,第二次闪回后查询结果为1,2,3 8.集群B更新数据成功,且闪回后数据内容为1,3,99.集群Aupdate成功,集群B数据同步为100,3,99 History : """ import unittest import os from yat.test import macro from yat.test import Node from testcase.utils.Logger import Logger from testcase.utils.CommonSH import CommonSH from testcase.utils.Common import Common from testcase.utils.Constant import Constant Primary_SH = CommonSH('PrimaryDbUser') @unittest.skipIf(3 != Primary_SH.get_node_num(), '非1+2环境不执行') class Pubsubclass(unittest.TestCase): def setUp(self): self.log = Logger() self.log.info("-----------this is setup-----------") self.log.info(f"-----{os.path.basename(__file__)[:-3]} start-----") self.pri_userdb_pub = Node(node='PrimaryDbUser') self.pri_userdb_sub = Node(node='remote1_PrimaryDbUser') self.constant = Constant() self.commsh_pub = CommonSH('PrimaryDbUser') self.commsh_sub = CommonSH('remote1_PrimaryDbUser') self.com_pub = Common() self.com_sub = Common('remote1_PrimaryDbUser') self.tb_name1 = 'tb_pubsub_074_1' self.tb_name2 = 'tb_pubsub_074_2' self.subname1 = "sub_074_1" self.pubname1 = "pub_074_1" self.parent_path_pub = os.path.dirname(macro.DB_INSTANCE_PATH) self.parent_path_sub = os.path.dirname(macro.DB_INSTANCE_PATH_REMOTE1) self.port = str(int(self.pri_userdb_pub.db_port) + 1) self.wal_level = self.com_pub.show_param("wal_level") self.user_param_pub = f'-U {self.pri_userdb_pub.db_user} ' \ f'-W {self.pri_userdb_pub.db_password}' self.user_param_sub = f'-U {self.pri_userdb_sub.db_user} ' \ f'-W {self.pri_userdb_sub.db_password}' cmd = f"cp " \ f"{os.path.join(macro.DB_INSTANCE_PATH, 'pg_hba.conf')} " \ f"{os.path.join(self.parent_path_pub, 'pg_hba.conf')};" self.log.info(cmd) result = self.pri_userdb_pub.sh(cmd).result() self.log.info(result) cmd = f"cp " \ f"{os.path.join(macro.DB_INSTANCE_PATH_REMOTE1, 'pg_hba.conf')}" \ f" {os.path.join(self.parent_path_sub, 'pg_hba.conf')};" self.log.info(cmd) result = self.pri_userdb_sub.sh(cmd).result() self.log.info(result) self.undo_retention_time = self.com_pub.show_param( "undo_retention_time") self.recyclebin_retention_time = self.com_pub.show_param( "recyclebin_retention_time") self.enable_recyclebin = self.com_pub.show_param("enable_recyclebin") self.enable_default_ustore_table = self.com_pub.show_param( "enable_default_ustore_table") self.undo_retention_time_sub = self.com_sub.show_param( "undo_retention_time", macro.DB_ENV_PATH_REMOTE1) self.recyclebin_retention_time_sub = self.com_sub.show_param( "recyclebin_retention_time", macro.DB_ENV_PATH_REMOTE1) self.enable_recyclebin_sub = self.com_sub.show_param( "enable_recyclebin", macro.DB_ENV_PATH_REMOTE1) self.enable_default_ustore_table_sub = self.com_sub.show_param( "enable_default_ustore_table", macro.DB_ENV_PATH_REMOTE1) def test_pubsub(self): text = '--step:预置条件,修改pg_hba expect:成功' self.log.info(text) guc_res = self.commsh_pub.execute_gsguc( 'reload', self.constant.GSGUC_SUCCESS_MSG, '', 'all', False, False, '', f'host replication {self.pri_userdb_pub.db_user} ' f'{self.pri_userdb_sub.db_host}/32 sha256') self.log.info(guc_res) self.assertTrue(guc_res, '执行失败:' + text) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'wal_level=logical') self.assertTrue(result, '执行失败:' + text) guc_res = self.commsh_sub.execute_gsguc( 'reload', self.constant.GSGUC_SUCCESS_MSG, '', 'all', False, False, macro.DB_INSTANCE_PATH_REMOTE1, f'host replication {self.pri_userdb_pub.db_user} ' f'{self.pri_userdb_pub.db_host}/32 sha256', macro.DB_ENV_PATH_REMOTE1) self.log.info(guc_res) self.assertTrue(guc_res, '执行失败:' + text) text = '--step1:设置闪回参数 expect:成功--' self.log.info(text) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'undo_retention_time=3h') self.assertTrue(result, '执行失败:' + text) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'recyclebin_retention_time=3h') self.assertTrue(result, '执行失败:' + text) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'enable_recyclebin=on') self.assertTrue(result, '执行失败:' + text) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'enable_default_ustore_table=on') self.assertTrue(result, '执行失败:' + text) result = self.commsh_pub.restart_db_cluster(True) flg = self.constant.START_SUCCESS_MSG in result or 'Degrade' in result self.assertTrue(flg, '执行失败:' + text) result = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'undo_retention_time=3h', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertTrue(result, '执行失败:' + text) result = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'recyclebin_retention_time=10000', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertTrue(result, '执行失败:' + text) result = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'enable_recyclebin=on', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertTrue(result, '执行失败:' + text) result = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, 'enable_default_ustore_table=on', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertTrue(result, '执行失败:' + text) result = self.commsh_sub.restart_db_cluster( True, macro.DB_ENV_PATH_REMOTE1) flg = self.constant.START_SUCCESS_MSG in result or 'Degrade' in result self.assertTrue(flg, '执行失败:' + text) text = '--step2:两个集群创建表 expect:成功--' self.log.info(text) create_sql = f"create table {self.tb_name1}(a int primary key) ;" \ f"create table {self.tb_name2}(id int primary key, snaptime " \ f"timestamptz, snapcsn bigint);" result = self.commsh_pub.execut_db_sql( create_sql, sql_type=self.user_param_pub) self.log.info(result) self.assertEqual(result.count(self.constant.TABLE_CREATE_SUCCESS), 4, '执行失败:' + text) result = self.commsh_sub.execut_db_sql(create_sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertEqual(result.count(self.constant.TABLE_CREATE_SUCCESS), 4, '执行失败:' + text) text = '--step3:创建函数获取csn及timecapsule expect:成功--' self.log.info(text) sql = f"CREATE OR REPLACE FUNCTION findCsn(int8) " \ f"RETURNS INTEGER LANGUAGE plpgsql " \ f"AS \$BODY\$ declare count integer;" \ f"begin" \ f" count = (select snapcsn from {self.tb_name2} " \ f"where id = \$1); return count;end;\$BODY\$;" \ f"CREATE OR REPLACE FUNCTION findTime(int8) " \ f"RETURNS timestamptz" \ f" LANGUAGE plpgsql AS \$BODY\$ declare " \ f"count timestamptz;begin" \ f" count = (select snaptime from " \ f"{self.tb_name2} where id=\$1); " \ f"return count;end;\$BODY\$;" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertEqual( 2, result.count(self.constant.CREATE_FUNCTION_SUCCESS_MSG), '执行失败:' + text) self.assertNotIn(self.constant.SQL_WRONG_MSG[1], result, '执行失败:' + text) result = self.commsh_sub.execut_db_sql(sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertIn(self.constant.CREATE_FUNCTION_SUCCESS_MSG, result, '执行失败:' + text) self.assertNotIn(self.constant.SQL_WRONG_MSG[1], result, '执行失败:' + text) text = "--step4:创建发布订阅 expect:成功--" self.log.info(text) sql = f"CREATE PUBLICATION {self.pubname1} for all tables ;" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertIn(self.constant.create_pub_succ_msg, result, '执行失败:' + text) self.assertNotIn(self.constant.SQL_WRONG_MSG[1], result, '执行失败:' + text) result = self.commsh_sub.execute_generate( macro.COMMON_PASSWD, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertIn('', result, '执行失败:' + text) sql = f"CREATE SUBSCRIPTION {self.subname1} CONNECTION " \ f"'host={self.pri_userdb_pub.db_host} " \ f"port={self.port} " \ f"user={self.pri_userdb_pub.db_user} " \ f"dbname={self.pri_userdb_pub.db_name} " \ f"password={self.pri_userdb_pub.ssh_password}' " \ f"PUBLICATION {self.pubname1};" result = self.commsh_sub.execut_db_sql(sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertEqual(result.count(self.constant.create_sub_succ_msg), 1, '执行失败:' + text) text = "--step5:更新数据,并查询更新 expect:插入数据成功,订阅端按顺序进行更新成功--" self.log.info(text) sql = f"select pg_sleep(10);insert into {self.tb_name2} " \ f"select 1, now(), int8in(xidout(next_csn)) " \ f"from gs_get_next_xid_csn();" \ f"insert into {self.tb_name1} values(1),(2),(3);" \ f"select pg_sleep(10);" \ f"insert into {self.tb_name2} select 2, now(), " \ f"int8in(xidout(next_csn)) from gs_get_next_xid_csn();" \ f"update {self.tb_name1} set a = 99 where a = 2;" \ f"select pg_sleep(10);" \ f"insert into {self.tb_name2} select 3, " \ f"now(), int8in(xidout(next_csn)) from gs_get_next_xid_csn();" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertEqual(result.count(self.constant.INSERT_SUCCESS_MSG), 4, '执行失败:' + text) self.assertNotIn(self.constant.SQL_WRONG_MSG[1], result, '执行失败:' + text) sql_select = f"select * from {self.tb_name1} order by a;" \ f"select * from {self.tb_name2};" result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertEqual(result.count('3 rows'), 2, '执行失败:' + text) self.assertIn('99', result, '执行失败:' + text) self.assertIn('1', result, '执行失败:' + text) self.assertIn('3', result, '执行失败:' + text) result_a = self.commsh_pub.execut_db_sql(sql_select, sql_type=self.user_param_pub) self.log.info(result_a) self.assertEqual(result, result_a, '执行失败:' + text) text = "--step6:闪回查询 expect:集群A查询结果为空,集群B查询结果不变为1,99,3--" self.log.info(text) sql = f"select * from {self.tb_name1} timecapsule " \ f"csn findCsn(1) order by a;" \ f"select * from {self.tb_name1} timecapsule " \ f"timestamp findTime(1) order by a;" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertIn('0 rows', result, '执行失败:' + text) result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.assertIn('3 rows', result, '执行失败:' + text) self.log.info(result) text = "--step7:闪回表 expect:t1查询内容为空,第二次闪回后查询结果为1,2,3--" self.log.info(text) sql = f"timecapsule table {self.tb_name1} to csn findCsn(1);" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertIn(self.constant.timecapsule_table_succ_msg, result, '执行失败:' + text) result_a = self.commsh_pub.execut_db_sql(sql_select, sql_type=self.user_param_pub) self.log.info(result_a) result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertIn('0 rows', result, '执行失败:' + text) self.assertEqual(result, result_a, '执行失败:' + text) sql = f"timecapsule table {self.tb_name1} to timestamp findTime(2);" result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) self.assertIn(self.constant.timecapsule_table_succ_msg, result, '执行失败:' + text) result_a = self.commsh_pub.execut_db_sql(sql_select, sql_type=self.user_param_pub) self.log.info(result_a) result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertIn('3 rows', result, '执行失败:' + text) self.assertIn('1', result.splitlines()[2], '执行失败:' + text) self.assertIn('2', result.splitlines()[3], '执行失败:' + text) self.assertIn('3', result.splitlines()[4], '执行失败:' + text) self.assertEqual(result, result_a, '执行失败:' + text) text = "--step8:订阅端闪回表 expect:集群B更新数据成功,且闪回后数据内容为1,3,99." \ "集群Aupdate成功,集群B数据同步为100,3,99--" self.log.info(text) sql = f"update {self.tb_name1} set a = 99 where a = 2;" \ f"insert into {self.tb_name2} select 4, now(), " \ f"int8in(xidout(next_csn)) from gs_get_next_xid_csn();" \ f"insert into {self.tb_name1} values(4);" \ f"timecapsule table {self.tb_name1} to csn findCsn(4);" result = self.commsh_sub.execut_db_sql(sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertIn('3 rows', result, '执行失败:' + text) self.assertIn('1', result.splitlines()[2], '执行失败:' + text) self.assertIn('3', result.splitlines()[3], '执行失败:' + text) self.assertIn('99', result.splitlines()[4], '执行失败:' + text) sql = f"update {self.tb_name1} set a=100 where a=1;" result_a = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result_a) result_a = self.commsh_pub.execut_db_sql(sql_select, sql_type=self.user_param_pub) self.log.info(result_a) self.assertIn('2', result_a.splitlines()[2], '执行失败:' + text) self.assertIn('3', result_a.splitlines()[3], '执行失败:' + text) self.assertIn('100', result_a.splitlines()[4], '执行失败:' + text) result = self.commsh_sub.execut_db_sql(sql_select, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) self.assertIn('3', result.splitlines()[2], '执行失败:' + text) self.assertIn('99', result.splitlines()[3], '执行失败:' + text) self.assertIn('100', result.splitlines()[4], '执行失败:' + text) def tearDown(self): self.log.info('------------this is tearDown-------------') text = '--清理环境--' self.log.info(text) sql = f"DROP PUBLICATION if exists {self.pubname1};" drop_pub_result = self.commsh_pub.execut_db_sql( sql, sql_type=self.user_param_pub) self.log.info(drop_pub_result) sql = f"DROP SUBSCRIPTION {self.subname1};" drop_sub_result = self.commsh_sub.execut_db_sql( sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(drop_sub_result) sql = f"DROP table if exists {self.tb_name2};" \ f"DROP table if exists {self.tb_name1};" \ f"drop function if exists findCsn;" \ f"drop function if exists findTime;" result = self.commsh_sub.execut_db_sql(sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1) self.log.info(result) result = self.commsh_pub.execut_db_sql(sql, sql_type=self.user_param_pub) self.log.info(result) cmd = f"mv " \ f"{os.path.join(self.parent_path_pub, 'pg_hba.conf')} "\ f"{os.path.join(macro.DB_INSTANCE_PATH, 'pg_hba.conf')};" self.log.info(cmd) result = self.pri_userdb_pub.sh(cmd).result() self.log.info(result) cmd = f"mv " \ f"{os.path.join(self.parent_path_sub, 'pg_hba.conf')} "\ f"{os.path.join(macro.DB_INSTANCE_PATH_REMOTE1, 'pg_hba.conf')} " self.log.info(cmd) result = self.pri_userdb_sub.sh(cmd).result() self.log.info(result) result = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'wal_level={self.wal_level}') result1 = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'undo_retention_time={self.undo_retention_time}') result2 = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'recyclebin_retention_time={self.recyclebin_retention_time}') result3 = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'enable_recyclebin={self.enable_recyclebin}') result4 = self.commsh_pub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'enable_default_ustore_table={self.enable_default_ustore_table}') result5 = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'undo_retention_time={self.undo_retention_time_sub}', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) result6 = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'recyclebin_retention_time={self.recyclebin_retention_time_sub}', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) result7 = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'enable_recyclebin={self.enable_recyclebin_sub}', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) result8 = self.commsh_sub.execute_gsguc( 'set', self.constant.GSGUC_SUCCESS_MSG, f'enable_default_ustore_table=' f'{self.enable_default_ustore_table_sub}', dn_path=macro.DB_INSTANCE_PATH_REMOTE1, env_path=macro.DB_ENV_PATH_REMOTE1) self.assertTrue(result and result1 and result2 and result3 and result4 and result5 and result6 and result7 and result8, '执行失败:' + text) self.commsh_pub.restart_db_cluster(True) self.commsh_sub.restart_db_cluster(True, macro.DB_ENV_PATH_REMOTE1) self.assertIn(self.constant.drop_pub_succ_msg, drop_pub_result, '执行失败' + text) self.assertIn(self.constant.drop_sub_succ_msg, drop_sub_result, '执行失败' + text) self.log.info(f"-----{os.path.basename(__file__)[:-3]} end-----")
import os from typing import Dict, IO, Iterable, Union import pyteomics.mgf import spectrum_utils.spectrum as sus def get_spectra(source: Union[IO, str]) -> Iterable[sus.MsmsSpectrum]: """ Get the MS/MS spectra from the given MGF file. Parameters ---------- source : Union[IO, str] The MGF source (file name or open file object) from which the spectra are read. Returns ------- Iterator[MsmsSpectrum] An iterator over the spectra in the given file. """ with pyteomics.mgf.MGF(source) as f_in: filename = os.path.splitext(os.path.basename(f_in.name))[0] for spectrum_i, spectrum_dict in enumerate(f_in): # USI-inspired cluster identifier. if 'scans' in spectrum_dict['params']: # Use a scan number as identifier. spectrum_dict['params']['title'] = \ f'{filename}:scan:{spectrum_dict['params']['scans']}' else: # Use the index in the MGF file as identifier. spectrum_dict['params']['title'] = \ f'{filename}:index:{spectrum_i}' try: yield _parse_spectrum(spectrum_dict) except ValueError: pass def get_one_spectrum(filename, id): # Not very efficient but this function should disappear for sp in get_spectra(filename): _, _, curr_id = sp.identifier.split(':') curr_id = int(curr_id) if curr_id == id: return sp Exception("Spectrum not found in .mgf file") def _parse_spectrum(spectrum_dict: Dict) -> sus.MsmsSpectrum: """ Parse the Pyteomics cluster dict. Parameters ---------- spectrum_dict : Dict The Pyteomics cluster dict to be parsed. Returns ------- MsmsSpectrum The parsed cluster. """ identifier = spectrum_dict['params']['title'] mz_array = spectrum_dict['m/z array'] intensity_array = spectrum_dict['intensity array'] retention_time = float(spectrum_dict['params'].get('rtinseconds', 0)) precursor_mz = float(spectrum_dict['params']['pepmass'][0]) if 'charge' in spectrum_dict['params']: precursor_charge = int(spectrum_dict['params']['charge'][0]) else: raise ValueError('Unknown precursor charge') return sus.MsmsSpectrum(identifier, precursor_mz, precursor_charge, mz_array, intensity_array, None, retention_time) def write_spectra(filename: str, spectra: Iterable[sus.MsmsSpectrum]) -> None: """ Write the given spectra to an MGF file. Parameters ---------- filename : str The MGF file name where the spectra will be written. spectra : Iterable[MsmsSpectrum] The spectra to be written to the MGF file. """ with open(filename, 'w') as f_out: pyteomics.mgf.write(_spectra_to_dicts(spectra), f_out, use_numpy=True) def _spectra_to_dicts(spectra: Iterable[sus.MsmsSpectrum]) -> Iterable[Dict]: """ Convert MsmsSpectrum objects to Pyteomics MGF cluster dictionaries. Parameters ---------- spectra : Iterable[MsmsSpectrum] The spectra to be converted to Pyteomics MGF dictionaries. Returns ------- Iterable[Dict] The given spectra as Pyteomics MGF dictionaries. """ for spectrum in spectra: params = {'title': spectrum.identifier, 'pepmass': spectrum.precursor_mz, 'charge': spectrum.precursor_charge} if hasattr(spectrum, 'retention_time'): params['rtinseconds'] = spectrum.retention_time if hasattr(spectrum, 'scan'): params['scan'] = spectrum.scan if hasattr(spectrum, 'cluster'): params['cluster'] = spectrum.cluster yield {'params': params, 'm/z array': spectrum.mz, 'intensity array': spectrum.intensity}
import os from typing import Dict, IO, Iterable, Union import pyteomics.mgf import spectrum_utils.spectrum as sus def get_spectra(source: Union[IO, str]) -> Iterable[sus.MsmsSpectrum]: """ Get the MS/MS spectra from the given MGF file. Parameters ---------- source : Union[IO, str] The MGF source (file name or open file object) from which the spectra are read. Returns ------- Iterator[MsmsSpectrum] An iterator over the spectra in the given file. """ with pyteomics.mgf.MGF(source) as f_in: filename = os.path.splitext(os.path.basename(f_in.name))[0] for spectrum_i, spectrum_dict in enumerate(f_in): # USI-inspired cluster identifier. if 'scans' in spectrum_dict['params']: # Use a scan number as identifier. spectrum_dict['params']['title'] = \ f'{filename}:scan:{spectrum_dict["params"]["scans"]}' else: # Use the index in the MGF file as identifier. spectrum_dict['params']['title'] = \ f'{filename}:index:{spectrum_i}' try: yield _parse_spectrum(spectrum_dict) except ValueError: pass def get_one_spectrum(filename, id): # Not very efficient but this function should disappear for sp in get_spectra(filename): _, _, curr_id = sp.identifier.split(':') curr_id = int(curr_id) if curr_id == id: return sp Exception("Spectrum not found in .mgf file") def _parse_spectrum(spectrum_dict: Dict) -> sus.MsmsSpectrum: """ Parse the Pyteomics cluster dict. Parameters ---------- spectrum_dict : Dict The Pyteomics cluster dict to be parsed. Returns ------- MsmsSpectrum The parsed cluster. """ identifier = spectrum_dict['params']['title'] mz_array = spectrum_dict['m/z array'] intensity_array = spectrum_dict['intensity array'] retention_time = float(spectrum_dict['params'].get('rtinseconds', 0)) precursor_mz = float(spectrum_dict['params']['pepmass'][0]) if 'charge' in spectrum_dict['params']: precursor_charge = int(spectrum_dict['params']['charge'][0]) else: raise ValueError('Unknown precursor charge') return sus.MsmsSpectrum(identifier, precursor_mz, precursor_charge, mz_array, intensity_array, None, retention_time) def write_spectra(filename: str, spectra: Iterable[sus.MsmsSpectrum]) -> None: """ Write the given spectra to an MGF file. Parameters ---------- filename : str The MGF file name where the spectra will be written. spectra : Iterable[MsmsSpectrum] The spectra to be written to the MGF file. """ with open(filename, 'w') as f_out: pyteomics.mgf.write(_spectra_to_dicts(spectra), f_out, use_numpy=True) def _spectra_to_dicts(spectra: Iterable[sus.MsmsSpectrum]) -> Iterable[Dict]: """ Convert MsmsSpectrum objects to Pyteomics MGF cluster dictionaries. Parameters ---------- spectra : Iterable[MsmsSpectrum] The spectra to be converted to Pyteomics MGF dictionaries. Returns ------- Iterable[Dict] The given spectra as Pyteomics MGF dictionaries. """ for spectrum in spectra: params = {'title': spectrum.identifier, 'pepmass': spectrum.precursor_mz, 'charge': spectrum.precursor_charge} if hasattr(spectrum, 'retention_time'): params['rtinseconds'] = spectrum.retention_time if hasattr(spectrum, 'scan'): params['scan'] = spectrum.scan if hasattr(spectrum, 'cluster'): params['cluster'] = spectrum.cluster yield {'params': params, 'm/z array': spectrum.mz, 'intensity array': spectrum.intensity}
""" Author: Sara Blichner <s.m.blichner@geo.uio.no> Based on conv_ERA-interim.sh: # Author: Matthias Hummel <hummel@geo.uio.no> # and Inger Helene H. Karset <i.h.h.karset@geo.uio.no> # Date: 08.09.2016 # Modified by Moa Sporre 14.11.2017 """ import os import sys from glob import glob from pathlib import Path from subprocess import run import xarray as xr from path_defs import dic_month, data_folder, input_folder, res_file, res_file_T, vct_file, tmp_folder, out_folder out_folder.mkdir(exist_ok=True) tmp_folder.mkdir(exist_ok=True) def get_daily_fl(mon_nr, year, prefix='', folder=tmp_folder): """ Gets list of files for daily files :param mon_nr: month in format number :param year: year :param prefix: prefix before the filename :param folder: where to look. :return: """ fn = fn_raw_nc(mon_nr, year, '3D') tmpfn = folder / f'{prefix}{fn}' fls = glob(f'{str(tmpfn)[:-3]}_day*.nc') return fls def grb2nc(fn, output_folder=input_folder): """ Compute netcdf file from grb files. :param fn: grb file to be converted. :param output_folder: where to put the nc files. :return: """ fn_out = output_folder / f'{Path(fn).name[:-3]}nc' cdo_com = f'cdo -s -t ecmwf -f nc copy {fn} {fn_out}' if not fn_out.is_file(): run(cdo_com, shell=True) else: print(f'File found, using existing file {fn_out}') return def change_unit_time_and_save_final(year, mon_num, fn, day_num=None): """ Unit of time needs to be changed and then final file is saved. Original file in "hours since", but we need "days since" :param year: :param mon_num: :param fn: :param day_num: :return: """ ds = xr.open_dataset(fn) if 'units' in ds['time'].attrs.keys(): if 'days' in str(ds['time'].units): print(f'unit already in days: {ds['time'].units}') return if day_num is None: day_num = fn[-5:-3] mon_nr = '%02d' % mon_num date = f'{year}-{mon_nr}-{day_num}' com1 = f'ncap2 -s "time@units=\\"days since {date} 00:00:00\\"" {fn}' print(com1) run(com1, shell=True) final_out_fn = out_folder / f'{date}.nc' com2 = f'ncap2 -s time=time/24 {fn} {final_out_fn}' print(com2) run(com2, shell=True) return def fn_raw(month_nr, year, fieldtype): """ Filename raw input file :param month_nr: :param year: :param fieldtype: :return: """ return f'{dic_month[month_nr]}{year}{fieldtype}.grb' def fn_raw_nc(month_nr, year, fieldtype): """ Filename raw in nc format :param month_nr: :param year: :param fieldtype: :return: """ return f'{dic_month[month_nr]}{year}{fieldtype}.nc' def get_fl_raw(fieldtype, year): """ File list of raw files in year. :param fieldtype: "3D" og "surf" :param year: :return: """ fl_3D = glob(str(input_folder) + f'/*{year}{fieldtype}.nc') fl_check = [fn_raw_nc(i, year, fieldtype) for i in range(1, 13)] fl_check2 = [Path(f).name for f in fl_3D] if set(fl_check) != set(fl_check2): ms_f = [f for f in fl_check if f not in fl_check2] # print(f'Lacking files for year {year}: \n {ms_f}. Want to proceed? (y/n)') if __name__ == '__main__': ans = input(f'Lacking files for year {year}: \n {ms_f}. Want to proceed? (y/n) ') if ans.strip() in ['n', 'N']: sys.exit() return fl_3D def make_vct_file(res_fn=res_file, outfn=vct_file): """ Make vct file for re-formatting lev. :param res_fn: :param outfn: :return: """ res_f = xr.open_dataset(res_fn) hyai = 'hyai' hybi = 'hybi' hyai_da = res_f[hyai].values hybi_da = res_f[hybi].values outfile = open(str(outfn), 'w') info_line = '# k vct_a(k) [Pa] vct_b(k) []\n' outfile.write(info_line) # hyai needs to be multiplied by 1e5 for i in range(0, len(hyai_da)): outfile.write('%4d %25.17f %25.17f \n' % (i, 1e5 * hyai_da[i], hybi_da[i])) outfile.close() return outfn def input_fn_from_ym(year, month): """ filename from year/month input. :param year: :param month: :return: """ return f'{dic_month[month]}{year}.nc' # %% def main(year): fl_3D = get_fl_raw('3D', year) print(fl_3D) # %% # Get resolution: rf = xr.open_dataset(res_file) xinc = float(rf['lon'][1] - rf['lon'][0]) # .values[0] yinc = float(rf['lat'][1] - rf['lat'][0]) print(f'Resolution file found: dx = {xinc}, dy={yinc}') # %% # cp surface to new 3D files: daily_files = [] for mon_nr in range(1, 13): fn_3D_grb = input_folder / fn_raw(mon_nr, year, '3D') fn_surf_grb = input_folder / fn_raw(mon_nr, year, 'surf') # if not fn if not fn_3D_grb.is_file(): print(f'Could not find file {fn_3D_grb}') continue if not fn_surf_grb.is_file(): print(f'Could not find file {fn_surf_grb}') continue print(f'converting {fn_3D_grb} to nc') grb2nc(fn_3D_grb) print(f'converting {fn_surf_grb} to nc') grb2nc(fn_surf_grb) print('Done converting to nc') print('Starting to merge files:') for mon_nr in range(1, 13): fn_3D = input_folder / fn_raw_nc(mon_nr, year, '3D') fn_surf = input_folder / fn_raw_nc(mon_nr, year, 'surf') if not fn_3D.is_file(): print(f'Could not find file {fn_3D}') continue if not fn_surf.is_file(): print(f'Could not find file {fn_surf}') continue tmpfn = tmp_folder / fn_3D.name fls = glob(f'{str(tmpfn)[:-3]}_day*.nc') if len(fls) >= 28: print('Daily files already computed') daily_files = daily_files + fls continue cdo_comm = f'cdo -s merge {str(fn_surf)} {str(fn_3D)} {str(tmpfn)}' print(cdo_comm) run(cdo_comm, shell=True) cdo_comm2 = f'cdo -s splitday {str(tmpfn)} {str(tmpfn)[:-3]}_day' print(cdo_comm2) run(cdo_comm2, shell=True) os.remove(tmpfn) fls = glob(f'{str(tmpfn)[:-3]}_day*.nc') daily_files = daily_files + fls # make vct file: vct_filen = make_vct_file(res_fn=res_file) # %% daily_files.sort() # %% print(f'Remapping level:') for fd in daily_files: pd = Path(fd) pout = pd.parent / f'vert_{pd.name}' if pout.is_file(): continue comm_cdo = f'cdo -s --no_warnings remapeta,{vct_filen} -chname,SP,APS -selname,U,V,T,Q,SP {fd} {pout}' print(comm_cdo) run(comm_cdo, shell=True) # %% print('Done remapping eta/lev') print('Change horizontal resolution...') fd = daily_files[0] pd = Path(fd) vert_f = pd.parent / f'vert_{pd.name}' # change horizontal resolution print('creating weights file....') cdo_comm = f'cdo -s selname,T {res_file} {res_file_T}' p_weights = data_folder / 'weights.nc' cdo_comm2 = f'cdo -s genbil,{res_file_T} {vert_f} {p_weights}' print(cdo_comm) run(cdo_comm, shell=True) print(cdo_comm2) run(cdo_comm2, shell=True) for fd in daily_files: pd = Path(fd) v_fp = pd.parent / f'vert_{pd.name}' pout = v_fp.parent / f'horiz_{v_fp.name}' cdo_comm = f'cdo -s remap,{res_file_T},{p_weights} {v_fp} {pout}' print(cdo_comm) run(cdo_comm, shell=True) rn_comm = f'ncrename -v APS,PS {pout}' print(rn_comm) run(rn_comm, shell=True) for mon_nr in range(1, 13): fl = get_daily_fl(mon_nr, year, prefix='horiz_vert_', folder=tmp_folder) for fd in fl: change_unit_time_and_save_final(year, mon_nr, fd) return # %% if __name__ == '__main__': if len(sys.argv) == 1: sys.exit('Lacking input year \n Correct usage: ' 'python conv_ERA_interim.py <year>') inyear = sys.argv[1] main(inyear)
""" Author: Sara Blichner <s.m.blichner@geo.uio.no> Based on conv_ERA-interim.sh: # Author: Matthias Hummel <hummel@geo.uio.no> # and Inger Helene H. Karset <i.h.h.karset@geo.uio.no> # Date: 08.09.2016 # Modified by Moa Sporre 14.11.2017 """ import os import sys from glob import glob from pathlib import Path from subprocess import run import xarray as xr from path_defs import dic_month, data_folder, input_folder, res_file, res_file_T, vct_file, tmp_folder, out_folder out_folder.mkdir(exist_ok=True) tmp_folder.mkdir(exist_ok=True) def get_daily_fl(mon_nr, year, prefix='', folder=tmp_folder): """ Gets list of files for daily files :param mon_nr: month in format number :param year: year :param prefix: prefix before the filename :param folder: where to look. :return: """ fn = fn_raw_nc(mon_nr, year, '3D') tmpfn = folder / f'{prefix}{fn}' fls = glob(f'{str(tmpfn)[:-3]}_day*.nc') return fls def grb2nc(fn, output_folder=input_folder): """ Compute netcdf file from grb files. :param fn: grb file to be converted. :param output_folder: where to put the nc files. :return: """ fn_out = output_folder / f'{Path(fn).name[:-3]}nc' cdo_com = f'cdo -s -t ecmwf -f nc copy {fn} {fn_out}' if not fn_out.is_file(): run(cdo_com, shell=True) else: print(f'File found, using existing file {fn_out}') return def change_unit_time_and_save_final(year, mon_num, fn, day_num=None): """ Unit of time needs to be changed and then final file is saved. Original file in "hours since", but we need "days since" :param year: :param mon_num: :param fn: :param day_num: :return: """ ds = xr.open_dataset(fn) if 'units' in ds['time'].attrs.keys(): if 'days' in str(ds['time'].units): print(f'unit already in days: {ds["time"].units}') return if day_num is None: day_num = fn[-5:-3] mon_nr = '%02d' % mon_num date = f'{year}-{mon_nr}-{day_num}' com1 = f'ncap2 -s "time@units=\\"days since {date} 00:00:00\\"" {fn}' print(com1) run(com1, shell=True) final_out_fn = out_folder / f'{date}.nc' com2 = f'ncap2 -s time=time/24 {fn} {final_out_fn}' print(com2) run(com2, shell=True) return def fn_raw(month_nr, year, fieldtype): """ Filename raw input file :param month_nr: :param year: :param fieldtype: :return: """ return f'{dic_month[month_nr]}{year}{fieldtype}.grb' def fn_raw_nc(month_nr, year, fieldtype): """ Filename raw in nc format :param month_nr: :param year: :param fieldtype: :return: """ return f'{dic_month[month_nr]}{year}{fieldtype}.nc' def get_fl_raw(fieldtype, year): """ File list of raw files in year. :param fieldtype: "3D" og "surf" :param year: :return: """ fl_3D = glob(str(input_folder) + f'/*{year}{fieldtype}.nc') fl_check = [fn_raw_nc(i, year, fieldtype) for i in range(1, 13)] fl_check2 = [Path(f).name for f in fl_3D] if set(fl_check) != set(fl_check2): ms_f = [f for f in fl_check if f not in fl_check2] # print(f'Lacking files for year {year}: \n {ms_f}. Want to proceed? (y/n)') if __name__ == '__main__': ans = input(f'Lacking files for year {year}: \n {ms_f}. Want to proceed? (y/n) ') if ans.strip() in ['n', 'N']: sys.exit() return fl_3D def make_vct_file(res_fn=res_file, outfn=vct_file): """ Make vct file for re-formatting lev. :param res_fn: :param outfn: :return: """ res_f = xr.open_dataset(res_fn) hyai = 'hyai' hybi = 'hybi' hyai_da = res_f[hyai].values hybi_da = res_f[hybi].values outfile = open(str(outfn), 'w') info_line = '# k vct_a(k) [Pa] vct_b(k) []\n' outfile.write(info_line) # hyai needs to be multiplied by 1e5 for i in range(0, len(hyai_da)): outfile.write('%4d %25.17f %25.17f \n' % (i, 1e5 * hyai_da[i], hybi_da[i])) outfile.close() return outfn def input_fn_from_ym(year, month): """ filename from year/month input. :param year: :param month: :return: """ return f'{dic_month[month]}{year}.nc' # %% def main(year): fl_3D = get_fl_raw('3D', year) print(fl_3D) # %% # Get resolution: rf = xr.open_dataset(res_file) xinc = float(rf['lon'][1] - rf['lon'][0]) # .values[0] yinc = float(rf['lat'][1] - rf['lat'][0]) print(f'Resolution file found: dx = {xinc}, dy={yinc}') # %% # cp surface to new 3D files: daily_files = [] for mon_nr in range(1, 13): fn_3D_grb = input_folder / fn_raw(mon_nr, year, '3D') fn_surf_grb = input_folder / fn_raw(mon_nr, year, 'surf') # if not fn if not fn_3D_grb.is_file(): print(f'Could not find file {fn_3D_grb}') continue if not fn_surf_grb.is_file(): print(f'Could not find file {fn_surf_grb}') continue print(f'converting {fn_3D_grb} to nc') grb2nc(fn_3D_grb) print(f'converting {fn_surf_grb} to nc') grb2nc(fn_surf_grb) print('Done converting to nc') print('Starting to merge files:') for mon_nr in range(1, 13): fn_3D = input_folder / fn_raw_nc(mon_nr, year, '3D') fn_surf = input_folder / fn_raw_nc(mon_nr, year, 'surf') if not fn_3D.is_file(): print(f'Could not find file {fn_3D}') continue if not fn_surf.is_file(): print(f'Could not find file {fn_surf}') continue tmpfn = tmp_folder / fn_3D.name fls = glob(f'{str(tmpfn)[:-3]}_day*.nc') if len(fls) >= 28: print('Daily files already computed') daily_files = daily_files + fls continue cdo_comm = f'cdo -s merge {str(fn_surf)} {str(fn_3D)} {str(tmpfn)}' print(cdo_comm) run(cdo_comm, shell=True) cdo_comm2 = f'cdo -s splitday {str(tmpfn)} {str(tmpfn)[:-3]}_day' print(cdo_comm2) run(cdo_comm2, shell=True) os.remove(tmpfn) fls = glob(f'{str(tmpfn)[:-3]}_day*.nc') daily_files = daily_files + fls # make vct file: vct_filen = make_vct_file(res_fn=res_file) # %% daily_files.sort() # %% print(f'Remapping level:') for fd in daily_files: pd = Path(fd) pout = pd.parent / f'vert_{pd.name}' if pout.is_file(): continue comm_cdo = f'cdo -s --no_warnings remapeta,{vct_filen} -chname,SP,APS -selname,U,V,T,Q,SP {fd} {pout}' print(comm_cdo) run(comm_cdo, shell=True) # %% print('Done remapping eta/lev') print('Change horizontal resolution...') fd = daily_files[0] pd = Path(fd) vert_f = pd.parent / f'vert_{pd.name}' # change horizontal resolution print('creating weights file....') cdo_comm = f'cdo -s selname,T {res_file} {res_file_T}' p_weights = data_folder / 'weights.nc' cdo_comm2 = f'cdo -s genbil,{res_file_T} {vert_f} {p_weights}' print(cdo_comm) run(cdo_comm, shell=True) print(cdo_comm2) run(cdo_comm2, shell=True) for fd in daily_files: pd = Path(fd) v_fp = pd.parent / f'vert_{pd.name}' pout = v_fp.parent / f'horiz_{v_fp.name}' cdo_comm = f'cdo -s remap,{res_file_T},{p_weights} {v_fp} {pout}' print(cdo_comm) run(cdo_comm, shell=True) rn_comm = f'ncrename -v APS,PS {pout}' print(rn_comm) run(rn_comm, shell=True) for mon_nr in range(1, 13): fl = get_daily_fl(mon_nr, year, prefix='horiz_vert_', folder=tmp_folder) for fd in fl: change_unit_time_and_save_final(year, mon_nr, fd) return # %% if __name__ == '__main__': if len(sys.argv) == 1: sys.exit('Lacking input year \n Correct usage: ' 'python conv_ERA_interim.py <year>') inyear = sys.argv[1] main(inyear)
import os import pickle as pkl import typing from abc import abstractmethod from functools import lru_cache, partial from pathlib import Path import numpy as np from scipy.io import loadmat from sklearn.preprocessing import LabelEncoder, StandardScaler from braincode.abstract import Object from braincode.benchmarks import ProgramBenchmark from braincode.embeddings import ProgramEmbedder class DataLoader(Object): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._datadir = self._base_path.joinpath("inputs") self._events = (12, 6) # nruns, nblocks @property def datadir(self) -> Path: return self._datadir @property def _runs(self) -> int: return self._events[0] @property def _blocks(self) -> int: return self._events[1] @property def samples(self) -> int: return np.prod(self._events) def _load_brain_data( self, subject: Path ) -> typing.Tuple[ np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray ]: if "brain" not in self._feature: raise ValueError( "Feature set incorrectly. Must be brain network to load subject data." ) mat = loadmat(subject) network = self._feature.split("-")[1] network_indices = mat[f"{network}_tags"] return ( mat["data"], network_indices, mat["problem_content"], mat["problem_lang"], mat["problem_structure"], mat["problem_ID"], ) @staticmethod def _formatcell(matcellarray: np.ndarray) -> np.ndarray: if isinstance(matcellarray[0][0], np.ndarray): return np.array([i[0][0] for i in matcellarray]) if isinstance(matcellarray[0][0], np.uint8): return np.array([i[0] for i in matcellarray]) raise TypeError("MATLAB cell array type not handled.") def _load_select_programs( self, lang: np.ndarray, ident: np.ndarray ) -> typing.Tuple[np.ndarray, np.ndarray]: programs, fnames = [], [] for i in range(ident.size): fnames.append( list( self.datadir.joinpath("python_programs", lang[i]).glob( f"{ident[i]}_*" ) )[0].as_posix() ) with open(fnames[-1], "r") as f: programs.append(f.read()) return np.array(programs), np.array(fnames) def _prep_code_reps( self, content: np.ndarray, lang: np.ndarray, structure: np.ndarray, ident: np.ndarray, encoder=LabelEncoder(), ) -> typing.Tuple[np.ndarray, np.ndarray]: code = np.array( ["sent" if i == "sent" else "code" for i in self._formatcell(lang)] ) if self._target == "test-code": Y = code mask = np.ones(code.size, dtype="bool") else: mask = code == "code" if self._target in ["task-content", "test-lang", "task-structure"]: Y = self._formatcell(locals()[self._target.split("-")[1]])[mask] else: Y, fnames = self._load_select_programs( self._formatcell(lang)[mask], self._formatcell(ident)[mask] ) if "code-" in self._target: encoder = ProgramEmbedder( self._target, self._base_path, self._code_model_dim ) elif "task-" in self._target: encoder = ProgramBenchmark(self._target, self._base_path, fnames) else: raise ValueError("Target not recognized. Select valid target.") return encoder.fit_transform(Y), mask def _prep_brain_reps( self, data: np.ndarray, parc: np.ndarray, mask: np.ndarray ) -> np.ndarray: data = data[:, np.flatnonzero(parc)] for i in range(self._runs): idx = np.arange(i, self.samples, self._runs) data[idx, :] = StandardScaler().fit_transform(data[idx, :]) return data[mask] @staticmethod def _prep_runs(runs: int, blocks: int) -> np.ndarray: return np.tile(np.arange(runs), blocks) def _load_all_programs( self, ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: programs, content, lang, structure, fnames = [], [], [], [], [] files = list(self.datadir.joinpath("python_programs", "en").rglob("*.py")) for file in sorted(files): fnames.append(file.as_posix()) with open(fnames[-1], "r") as f: programs.append(f.read()) info = fnames[-1].split(os.sep)[-1].split(" ")[1].split("_") content.append(info[0]) lang.append(fnames[-1].split(os.sep)[-2]) structure.append(info[1]) return ( np.array(programs), np.array(content), np.array(lang), np.array(structure), np.array(fnames), ) def _get_fname(self, analysis: str, subject: str = "") -> Path: dim = getattr(self, "_code_model_dim") if subject != "": subject = f"_sub{subject}".replace(".mat", "") if dim != "": dim = f"_dim{dim}" fname = self._base_path.joinpath( ".cache", "representations", analysis, f"{self._feature.split("-")[1]}_{self._target.split("-")[1]}{subject}{dim}.pkl", ) if not fname.parent.exists(): fname.parent.mkdir(parents=True, exist_ok=True) return fname @abstractmethod def _prep_data( self, subject: Path ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: raise NotImplementedError("Handled by subclass.") def _get_loader(self, subject: Path) -> partial: return partial(self._prep_data, subject) @lru_cache(maxsize=None) def get_data( self, analysis: str, subject: Path = Path(""), ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: fname = self._get_fname(analysis, subject.name) if fname.exists() and not self._debug: with open(fname, "rb") as f: data = pkl.load(f) return data["X"], data["y"], data["runs"] load_data = self._get_loader(subject) X, Y, runs = load_data() if not self._debug: with open(fname, "wb") as f: pkl.dump({"X": X, "y": Y, "runs": runs}, f) self._logger.info(f"Caching '{fname.name}'.") return X, Y, runs class DataLoaderPRDA(DataLoader): def _prep_data( # type: ignore self, k: int = 5 ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: if "+" in self._feature or "+" in self._target: raise RuntimeError("PRDA does not support joint variables.") programs, content, lang, structure, fnames = self._load_all_programs() if self._target in ["task-content", "task-structure"]: Y = locals()[self._target.split("-")[1]] else: Y = ProgramBenchmark(self._target, self._base_path, fnames).fit_transform( programs ) X = ProgramEmbedder(self._feature, self._base_path, "").fit_transform(programs) runs = self._prep_runs(k, (Y.size // k + 1))[: Y.size] # kfold CV return X, Y, runs def _get_loader(self, subject: Path) -> partial: return partial(self._prep_data) class DataLoaderMVPA(DataLoader): def _prep_xyr( self, subject: Path ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: data, parc, content, lang, structure, ident = self._load_brain_data(subject) Y, mask = self._prep_code_reps(content, lang, structure, ident) X = self._prep_brain_reps(data, parc, mask) runs = self._prep_runs(self._runs, self._blocks)[mask] return X, Y, runs def _prep_xyr_joint( self, subject: Path ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: temp = getattr(self, "_feature") parts = temp.split("-") prefix, units = parts[0], parts[1] X = [] for unit in units.split("+"): setattr(self, "_feature", f"{prefix}-{unit}") x, Y, runs = self._prep_xyr(subject) X.append(x) setattr(self, "_feature", temp) X = np.concatenate(X, axis=1) return X, Y, runs def _prep_data( self, subject: Path ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: joint_feature = "+" in self._feature joint_target = "+" in self._target if joint_target: raise RuntimeError("Should only be using joint features.") if joint_feature: if "MVPA" not in self._name: raise RuntimeError("Only MVPA supports joint features.") return self._prep_xyr_joint(subject) return self._prep_xyr(subject)
import os import pickle as pkl import typing from abc import abstractmethod from functools import lru_cache, partial from pathlib import Path import numpy as np from scipy.io import loadmat from sklearn.preprocessing import LabelEncoder, StandardScaler from braincode.abstract import Object from braincode.benchmarks import ProgramBenchmark from braincode.embeddings import ProgramEmbedder class DataLoader(Object): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._datadir = self._base_path.joinpath("inputs") self._events = (12, 6) # nruns, nblocks @property def datadir(self) -> Path: return self._datadir @property def _runs(self) -> int: return self._events[0] @property def _blocks(self) -> int: return self._events[1] @property def samples(self) -> int: return np.prod(self._events) def _load_brain_data( self, subject: Path ) -> typing.Tuple[ np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray ]: if "brain" not in self._feature: raise ValueError( "Feature set incorrectly. Must be brain network to load subject data." ) mat = loadmat(subject) network = self._feature.split("-")[1] network_indices = mat[f"{network}_tags"] return ( mat["data"], network_indices, mat["problem_content"], mat["problem_lang"], mat["problem_structure"], mat["problem_ID"], ) @staticmethod def _formatcell(matcellarray: np.ndarray) -> np.ndarray: if isinstance(matcellarray[0][0], np.ndarray): return np.array([i[0][0] for i in matcellarray]) if isinstance(matcellarray[0][0], np.uint8): return np.array([i[0] for i in matcellarray]) raise TypeError("MATLAB cell array type not handled.") def _load_select_programs( self, lang: np.ndarray, ident: np.ndarray ) -> typing.Tuple[np.ndarray, np.ndarray]: programs, fnames = [], [] for i in range(ident.size): fnames.append( list( self.datadir.joinpath("python_programs", lang[i]).glob( f"{ident[i]}_*" ) )[0].as_posix() ) with open(fnames[-1], "r") as f: programs.append(f.read()) return np.array(programs), np.array(fnames) def _prep_code_reps( self, content: np.ndarray, lang: np.ndarray, structure: np.ndarray, ident: np.ndarray, encoder=LabelEncoder(), ) -> typing.Tuple[np.ndarray, np.ndarray]: code = np.array( ["sent" if i == "sent" else "code" for i in self._formatcell(lang)] ) if self._target == "test-code": Y = code mask = np.ones(code.size, dtype="bool") else: mask = code == "code" if self._target in ["task-content", "test-lang", "task-structure"]: Y = self._formatcell(locals()[self._target.split("-")[1]])[mask] else: Y, fnames = self._load_select_programs( self._formatcell(lang)[mask], self._formatcell(ident)[mask] ) if "code-" in self._target: encoder = ProgramEmbedder( self._target, self._base_path, self._code_model_dim ) elif "task-" in self._target: encoder = ProgramBenchmark(self._target, self._base_path, fnames) else: raise ValueError("Target not recognized. Select valid target.") return encoder.fit_transform(Y), mask def _prep_brain_reps( self, data: np.ndarray, parc: np.ndarray, mask: np.ndarray ) -> np.ndarray: data = data[:, np.flatnonzero(parc)] for i in range(self._runs): idx = np.arange(i, self.samples, self._runs) data[idx, :] = StandardScaler().fit_transform(data[idx, :]) return data[mask] @staticmethod def _prep_runs(runs: int, blocks: int) -> np.ndarray: return np.tile(np.arange(runs), blocks) def _load_all_programs( self, ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: programs, content, lang, structure, fnames = [], [], [], [], [] files = list(self.datadir.joinpath("python_programs", "en").rglob("*.py")) for file in sorted(files): fnames.append(file.as_posix()) with open(fnames[-1], "r") as f: programs.append(f.read()) info = fnames[-1].split(os.sep)[-1].split(" ")[1].split("_") content.append(info[0]) lang.append(fnames[-1].split(os.sep)[-2]) structure.append(info[1]) return ( np.array(programs), np.array(content), np.array(lang), np.array(structure), np.array(fnames), ) def _get_fname(self, analysis: str, subject: str = "") -> Path: dim = getattr(self, "_code_model_dim") if subject != "": subject = f"_sub{subject}".replace(".mat", "") if dim != "": dim = f"_dim{dim}" fname = self._base_path.joinpath( ".cache", "representations", analysis, f"{self._feature.split('-')[1]}_{self._target.split('-')[1]}{subject}{dim}.pkl", ) if not fname.parent.exists(): fname.parent.mkdir(parents=True, exist_ok=True) return fname @abstractmethod def _prep_data( self, subject: Path ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: raise NotImplementedError("Handled by subclass.") def _get_loader(self, subject: Path) -> partial: return partial(self._prep_data, subject) @lru_cache(maxsize=None) def get_data( self, analysis: str, subject: Path = Path(""), ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: fname = self._get_fname(analysis, subject.name) if fname.exists() and not self._debug: with open(fname, "rb") as f: data = pkl.load(f) return data["X"], data["y"], data["runs"] load_data = self._get_loader(subject) X, Y, runs = load_data() if not self._debug: with open(fname, "wb") as f: pkl.dump({"X": X, "y": Y, "runs": runs}, f) self._logger.info(f"Caching '{fname.name}'.") return X, Y, runs class DataLoaderPRDA(DataLoader): def _prep_data( # type: ignore self, k: int = 5 ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: if "+" in self._feature or "+" in self._target: raise RuntimeError("PRDA does not support joint variables.") programs, content, lang, structure, fnames = self._load_all_programs() if self._target in ["task-content", "task-structure"]: Y = locals()[self._target.split("-")[1]] else: Y = ProgramBenchmark(self._target, self._base_path, fnames).fit_transform( programs ) X = ProgramEmbedder(self._feature, self._base_path, "").fit_transform(programs) runs = self._prep_runs(k, (Y.size // k + 1))[: Y.size] # kfold CV return X, Y, runs def _get_loader(self, subject: Path) -> partial: return partial(self._prep_data) class DataLoaderMVPA(DataLoader): def _prep_xyr( self, subject: Path ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: data, parc, content, lang, structure, ident = self._load_brain_data(subject) Y, mask = self._prep_code_reps(content, lang, structure, ident) X = self._prep_brain_reps(data, parc, mask) runs = self._prep_runs(self._runs, self._blocks)[mask] return X, Y, runs def _prep_xyr_joint( self, subject: Path ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: temp = getattr(self, "_feature") parts = temp.split("-") prefix, units = parts[0], parts[1] X = [] for unit in units.split("+"): setattr(self, "_feature", f"{prefix}-{unit}") x, Y, runs = self._prep_xyr(subject) X.append(x) setattr(self, "_feature", temp) X = np.concatenate(X, axis=1) return X, Y, runs def _prep_data( self, subject: Path ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: joint_feature = "+" in self._feature joint_target = "+" in self._target if joint_target: raise RuntimeError("Should only be using joint features.") if joint_feature: if "MVPA" not in self._name: raise RuntimeError("Only MVPA supports joint features.") return self._prep_xyr_joint(subject) return self._prep_xyr(subject)
""" A convenient class to hold: - dataset creation - model train procedure - inference on dataset - evaluating predictions - and more """ #pylint: disable=import-error, no-name-in-module, wrong-import-position, protected-access import os import gc import logging from time import perf_counter from ast import literal_eval from pprint import pformat import psutil import numpy as np import torch from ..plotters import plot_loss from ...batchflow import Config, Monitor from ...batchflow.models.torch import EncoderDecoder class BaseController: """ A common interface for train, inference, postprocessing and quality assessment. Supposed to be used in an environment with set `CUDA_VISIBLE_DEVICES` variable. At initialization, a nested configuration dict should be provided. Common parameters are defined on root level of the config: savedir : str Directory to store outputs: logs, graphs, predictions. monitor : bool Whether to track resources during execution. logger : callable Function to log messages. bar : bool Whether to show progress bars during execution. plot : bool Whether to display graphs during execution. Each of the methods retrieves parameters from the configuration by its name: - `train` - `inference` - `postprocess` - `evaluate` Each of the methods also has the `config` argument to override parameters from that configuration. Keyword arguments are used with the highest priority. """ #pylint: disable=attribute-defined-outside-init DEFAULTS = Config({ # General parameters 'savedir': None, 'monitor': True, 'logger': None, 'bar': False, 'plot': False, 'train': { 'model_class': EncoderDecoder, 'model_config': None, 'batch_size': None, 'crop_shape': None, 'rebatch_threshold': 0.8, 'rescale_batch_size': True, 'prefetch': 1, 'n_iters': 100, 'early_stopping': True, }, 'inference': {}, # Common keys for both train and inference 'common': {}, # Make predictions better 'postprocess': {}, # Compute metrics 'evaluate': {} }) def __init__(self, config=None, **kwargs): self.config = Config(self.DEFAULTS) self.config += config or {} self.config += kwargs self.monitor = self.config.monitor self.plot = self.config.plot devices = os.getenv('CUDA_VISIBLE_DEVICES') if devices: gpu_list = literal_eval(devices) self.gpu_list = list(gpu_list) if isinstance(gpu_list, tuple) else [gpu_list] else: self.gpu_list = [] self.make_filelogger() self.log(f'Initialized {self.__class__.__name__}') # Utility functions def make_savepath(self, *postfix): """ Create nested path from provided strings. Uses `savedir` config option. If `savedir` config option is None, then None is returned: that is used as signal to omit saving of, for example, metric map images, etc. """ savedir = self.config['savedir'] if savedir is not None: path = os.path.join(savedir, *postfix[:-1]) os.makedirs(path, exist_ok=True) return os.path.join(savedir, *postfix) return None # Logging def make_filelogger(self): """ Create logger inside `savedir`. Note that logging is important. """ log_path = self.make_savepath('controller.log') if log_path: handler = logging.FileHandler(log_path, mode='w') handler.setFormatter(logging.Formatter('%(asctime)s %(message)s')) logger = logging.getLogger(str(id(self))) logger.addHandler(handler) logger.setLevel(logging.INFO) self.filelogger = logger.info else: self.filelogger = None def log(self, msg): """ Log supplied message into both filelogger and supplied one. """ process = psutil.Process(os.getpid()) uss = process.memory_full_info().uss / (1024 ** 3) msg = f'{self.__class__.__name__} ::: {uss:2.4f} ::: {msg}' logger = self.config.logger if logger: logger = logger if isinstance(logger, (tuple, list)) else [logger] for logger_ in logger: logger_(msg) if self.filelogger: self.filelogger(msg) def log_to_file(self, msg, path): """ Log message to a separate file. """ log_path = self.make_savepath(path) if log_path: with open(log_path, 'w', encoding='utf-8') as file: print(msg, file=file) # Dataset creation def make_dataset(self, **kwargs): """ Create dataset to train/inference on. Must be implemented in inherited classes. """ _ = kwargs def make_notifier(self): """ Create notifier. """ return { 'bar': self.config.bar, 'monitors': 'loss_history', 'file': self.make_savepath('末 model_loss.log'), } # Train def train(self, dataset, sampler, config=None, **kwargs): """ Train model on a provided dataset. Uses the `get_train_template` method to create pipeline of model training. Returns ------- Model instance """ # Prepare parameters config = config or {} pipeline_config = Config({**self.config['common'], **self.config['train'], **config, **kwargs}) n_iters, prefetch, rescale = pipeline_config.pop(['n_iters', 'prefetch', 'rescale_batch_size']) notifier = self.make_notifier() if self.config['bar'] else None self.log(f'Train started on device={self.gpu_list}') # Start resource tracking if self.monitor: monitor = Monitor(['uss', 'gpu', 'gpu_memory'], frequency=0.5, gpu_list=self.gpu_list) monitor.__enter__() # Make pipeline pipeline_config['sampler'] = sampler train_pipeline = self.get_train_template(**kwargs) << pipeline_config << dataset # Log: pipeline_config to a file self.log_to_file(pformat(pipeline_config.config, depth=2), '末 train_config.txt') # Test batch to initialize model and log stats batch = train_pipeline.next_batch() model = train_pipeline.m('model') self.log(f'Target batch size: {pipeline_config['batch_size']}') self.log(f'Actual batch size: {len(batch)}') self.log(f'Cache sizes: {dataset.geometries.cache_size}') self.log(f'Cache lengths: {dataset.geometries.cache_length}') # Log: full and short model repr self.log_to_file(repr(model.model), '末 model_repr.txt') self.log_to_file(model._short_repr(), '末 model_shortrepr.txt') # Rescale batch size, if needed if rescale: scale = pipeline_config['batch_size'] / len(batch) pipeline_config['batch_size'] = int(pipeline_config['batch_size'] * scale) self.log(f'Rescaling batch size to: {pipeline_config['batch_size']}') train_pipeline.set_config(pipeline_config) # Run training procedure start_time = perf_counter() self.log(f'Train run: n_iters={n_iters}, prefetch={prefetch}') train_pipeline.run(n_iters=n_iters, prefetch=prefetch, notifier=notifier) elapsed = perf_counter() - start_time # Log: resource graphs if self.monitor: monitor.__exit__(None, None, None) monitor.visualize(savepath=self.make_savepath('末 train_resource.png'), show=self.plot) # Log: loss over iteration plot_loss(model.loss_list, show=self.plot, savepath=self.make_savepath('末 model_loss.png')) final_loss = np.mean(model.loss_list[-25:]) # Log: model train information self.log_to_file(model._information(config=True, devices=True, model=False, misc=True), '末 model_info.txt') # Log: stats self.log(f'Trained for {model.iteration} iterations in {elapsed:4.1f}s') self.log(f'Average of 25 last loss values: {final_loss:4.3f}') self.log(f'Cache sizes: {dataset.geometries.cache_size}') self.log(f'Cache lengths: {dataset.geometries.cache_length}') # Cleanup torch.cuda.empty_cache() gc.collect() train_pipeline.reset('variables') dataset.geometries.reset_cache() self.log('') self.train_log = { 'start_time': start_time, 'elapsed': elapsed, 'final_loss': final_loss, } return model def finetune(self, dataset, sampler, model, config=None, **kwargs): """ Train given model for a couple more iterations on a specific sampler. Used to fine-tune the model on specific range during inference stage. """ # Prepare parameters config = config or {} pipeline_config = Config({**self.config['common'], **self.config['train'], **self.config['finetune'], **config, **kwargs}) n_iters, prefetch = pipeline_config.pop(['n_iters', 'prefetch']) pipeline_config['sampler'] = sampler pipeline_config['source_model'] = model train_pipeline = self.get_train_template(**kwargs) << pipeline_config << dataset train_pipeline.run(n_iters=n_iters, prefetch=prefetch) torch.cuda.empty_cache() # Inference def inference(self, dataset, model, **kwargs): """ Inference: use trained/loaded model for making predictions on the supplied dataset. Must be implemented in inherited classes. """ _ = dataset, model, kwargs # Postprocess def postprocess(self, predictions, **kwargs): """ Optional postprocessing: algorithmic adjustments to predictions. Must be implemented in inherited classes. """ _ = predictions, kwargs # Evaluate def evaluate(self, predictions, targets=None, dataset=None, **kwargs): """ Assess quality of model generated outputs. Must be implemented in inherited classes. """ _ = predictions, targets, dataset, kwargs # Pipelines: used inside train/inference methods def get_train_template(self, **kwargs): """ Define the whole training procedure pipeline including data loading, augmentation and model training. """ _ = kwargs
""" A convenient class to hold: - dataset creation - model train procedure - inference on dataset - evaluating predictions - and more """ #pylint: disable=import-error, no-name-in-module, wrong-import-position, protected-access import os import gc import logging from time import perf_counter from ast import literal_eval from pprint import pformat import psutil import numpy as np import torch from ..plotters import plot_loss from ...batchflow import Config, Monitor from ...batchflow.models.torch import EncoderDecoder class BaseController: """ A common interface for train, inference, postprocessing and quality assessment. Supposed to be used in an environment with set `CUDA_VISIBLE_DEVICES` variable. At initialization, a nested configuration dict should be provided. Common parameters are defined on root level of the config: savedir : str Directory to store outputs: logs, graphs, predictions. monitor : bool Whether to track resources during execution. logger : callable Function to log messages. bar : bool Whether to show progress bars during execution. plot : bool Whether to display graphs during execution. Each of the methods retrieves parameters from the configuration by its name: - `train` - `inference` - `postprocess` - `evaluate` Each of the methods also has the `config` argument to override parameters from that configuration. Keyword arguments are used with the highest priority. """ #pylint: disable=attribute-defined-outside-init DEFAULTS = Config({ # General parameters 'savedir': None, 'monitor': True, 'logger': None, 'bar': False, 'plot': False, 'train': { 'model_class': EncoderDecoder, 'model_config': None, 'batch_size': None, 'crop_shape': None, 'rebatch_threshold': 0.8, 'rescale_batch_size': True, 'prefetch': 1, 'n_iters': 100, 'early_stopping': True, }, 'inference': {}, # Common keys for both train and inference 'common': {}, # Make predictions better 'postprocess': {}, # Compute metrics 'evaluate': {} }) def __init__(self, config=None, **kwargs): self.config = Config(self.DEFAULTS) self.config += config or {} self.config += kwargs self.monitor = self.config.monitor self.plot = self.config.plot devices = os.getenv('CUDA_VISIBLE_DEVICES') if devices: gpu_list = literal_eval(devices) self.gpu_list = list(gpu_list) if isinstance(gpu_list, tuple) else [gpu_list] else: self.gpu_list = [] self.make_filelogger() self.log(f'Initialized {self.__class__.__name__}') # Utility functions def make_savepath(self, *postfix): """ Create nested path from provided strings. Uses `savedir` config option. If `savedir` config option is None, then None is returned: that is used as signal to omit saving of, for example, metric map images, etc. """ savedir = self.config['savedir'] if savedir is not None: path = os.path.join(savedir, *postfix[:-1]) os.makedirs(path, exist_ok=True) return os.path.join(savedir, *postfix) return None # Logging def make_filelogger(self): """ Create logger inside `savedir`. Note that logging is important. """ log_path = self.make_savepath('controller.log') if log_path: handler = logging.FileHandler(log_path, mode='w') handler.setFormatter(logging.Formatter('%(asctime)s %(message)s')) logger = logging.getLogger(str(id(self))) logger.addHandler(handler) logger.setLevel(logging.INFO) self.filelogger = logger.info else: self.filelogger = None def log(self, msg): """ Log supplied message into both filelogger and supplied one. """ process = psutil.Process(os.getpid()) uss = process.memory_full_info().uss / (1024 ** 3) msg = f'{self.__class__.__name__} ::: {uss:2.4f} ::: {msg}' logger = self.config.logger if logger: logger = logger if isinstance(logger, (tuple, list)) else [logger] for logger_ in logger: logger_(msg) if self.filelogger: self.filelogger(msg) def log_to_file(self, msg, path): """ Log message to a separate file. """ log_path = self.make_savepath(path) if log_path: with open(log_path, 'w', encoding='utf-8') as file: print(msg, file=file) # Dataset creation def make_dataset(self, **kwargs): """ Create dataset to train/inference on. Must be implemented in inherited classes. """ _ = kwargs def make_notifier(self): """ Create notifier. """ return { 'bar': self.config.bar, 'monitors': 'loss_history', 'file': self.make_savepath('末 model_loss.log'), } # Train def train(self, dataset, sampler, config=None, **kwargs): """ Train model on a provided dataset. Uses the `get_train_template` method to create pipeline of model training. Returns ------- Model instance """ # Prepare parameters config = config or {} pipeline_config = Config({**self.config['common'], **self.config['train'], **config, **kwargs}) n_iters, prefetch, rescale = pipeline_config.pop(['n_iters', 'prefetch', 'rescale_batch_size']) notifier = self.make_notifier() if self.config['bar'] else None self.log(f'Train started on device={self.gpu_list}') # Start resource tracking if self.monitor: monitor = Monitor(['uss', 'gpu', 'gpu_memory'], frequency=0.5, gpu_list=self.gpu_list) monitor.__enter__() # Make pipeline pipeline_config['sampler'] = sampler train_pipeline = self.get_train_template(**kwargs) << pipeline_config << dataset # Log: pipeline_config to a file self.log_to_file(pformat(pipeline_config.config, depth=2), '末 train_config.txt') # Test batch to initialize model and log stats batch = train_pipeline.next_batch() model = train_pipeline.m('model') self.log(f'Target batch size: {pipeline_config["batch_size"]}') self.log(f'Actual batch size: {len(batch)}') self.log(f'Cache sizes: {dataset.geometries.cache_size}') self.log(f'Cache lengths: {dataset.geometries.cache_length}') # Log: full and short model repr self.log_to_file(repr(model.model), '末 model_repr.txt') self.log_to_file(model._short_repr(), '末 model_shortrepr.txt') # Rescale batch size, if needed if rescale: scale = pipeline_config['batch_size'] / len(batch) pipeline_config['batch_size'] = int(pipeline_config['batch_size'] * scale) self.log(f'Rescaling batch size to: {pipeline_config["batch_size"]}') train_pipeline.set_config(pipeline_config) # Run training procedure start_time = perf_counter() self.log(f'Train run: n_iters={n_iters}, prefetch={prefetch}') train_pipeline.run(n_iters=n_iters, prefetch=prefetch, notifier=notifier) elapsed = perf_counter() - start_time # Log: resource graphs if self.monitor: monitor.__exit__(None, None, None) monitor.visualize(savepath=self.make_savepath('末 train_resource.png'), show=self.plot) # Log: loss over iteration plot_loss(model.loss_list, show=self.plot, savepath=self.make_savepath('末 model_loss.png')) final_loss = np.mean(model.loss_list[-25:]) # Log: model train information self.log_to_file(model._information(config=True, devices=True, model=False, misc=True), '末 model_info.txt') # Log: stats self.log(f'Trained for {model.iteration} iterations in {elapsed:4.1f}s') self.log(f'Average of 25 last loss values: {final_loss:4.3f}') self.log(f'Cache sizes: {dataset.geometries.cache_size}') self.log(f'Cache lengths: {dataset.geometries.cache_length}') # Cleanup torch.cuda.empty_cache() gc.collect() train_pipeline.reset('variables') dataset.geometries.reset_cache() self.log('') self.train_log = { 'start_time': start_time, 'elapsed': elapsed, 'final_loss': final_loss, } return model def finetune(self, dataset, sampler, model, config=None, **kwargs): """ Train given model for a couple more iterations on a specific sampler. Used to fine-tune the model on specific range during inference stage. """ # Prepare parameters config = config or {} pipeline_config = Config({**self.config['common'], **self.config['train'], **self.config['finetune'], **config, **kwargs}) n_iters, prefetch = pipeline_config.pop(['n_iters', 'prefetch']) pipeline_config['sampler'] = sampler pipeline_config['source_model'] = model train_pipeline = self.get_train_template(**kwargs) << pipeline_config << dataset train_pipeline.run(n_iters=n_iters, prefetch=prefetch) torch.cuda.empty_cache() # Inference def inference(self, dataset, model, **kwargs): """ Inference: use trained/loaded model for making predictions on the supplied dataset. Must be implemented in inherited classes. """ _ = dataset, model, kwargs # Postprocess def postprocess(self, predictions, **kwargs): """ Optional postprocessing: algorithmic adjustments to predictions. Must be implemented in inherited classes. """ _ = predictions, kwargs # Evaluate def evaluate(self, predictions, targets=None, dataset=None, **kwargs): """ Assess quality of model generated outputs. Must be implemented in inherited classes. """ _ = predictions, targets, dataset, kwargs # Pipelines: used inside train/inference methods def get_train_template(self, **kwargs): """ Define the whole training procedure pipeline including data loading, augmentation and model training. """ _ = kwargs
'''This module implements specialized container datatypes providing alternatives to Python's general purpose built-in containers, dict, list, set, and tuple. * namedtuple factory function for creating tuple subclasses with named fields * deque list-like container with fast appends and pops on either end * ChainMap dict-like class for creating a single view of multiple mappings * Counter dict subclass for counting hashable objects * OrderedDict dict subclass that remembers the order entries were added * defaultdict dict subclass that calls a factory function to supply missing values * UserDict wrapper around dictionary objects for easier dict subclassing * UserList wrapper around list objects for easier list subclassing * UserString wrapper around string objects for easier string subclassing ''' __all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', 'UserString', 'Counter', 'OrderedDict', 'ChainMap'] import _collections_abc from operator import itemgetter as _itemgetter, eq as _eq from keyword import iskeyword as _iskeyword import sys as _sys import heapq as _heapq from _weakref import proxy as _proxy from itertools import repeat as _repeat, chain as _chain, starmap as _starmap from reprlib import recursive_repr as _recursive_repr try: from _collections import deque except ImportError: pass else: _collections_abc.MutableSequence.register(deque) try: from _collections import defaultdict except ImportError: pass def __getattr__(name): # For backwards compatibility, continue to make the collections ABCs # through Python 3.6 available through the collections module. # Note, no new collections ABCs were added in Python 3.7 if name in _collections_abc.__all__: obj = getattr(_collections_abc, name) import warnings warnings.warn("Using or importing the ABCs from 'collections' instead " "of from 'collections.abc' is deprecated since Python 3.3, " "and in 3.10 it will stop working", DeprecationWarning, stacklevel=2) globals()[name] = obj return obj raise AttributeError(f'module {__name__!r} has no attribute {name!r}') ################################################################################ ### OrderedDict ################################################################################ class _OrderedDictKeysView(_collections_abc.KeysView): def __reversed__(self): yield from reversed(self._mapping) class _OrderedDictItemsView(_collections_abc.ItemsView): def __reversed__(self): for key in reversed(self._mapping): yield (key, self._mapping[key]) class _OrderedDictValuesView(_collections_abc.ValuesView): def __reversed__(self): for key in reversed(self._mapping): yield self._mapping[key] class _Link(object): __slots__ = 'prev', 'next', 'key', '__weakref__' class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as regular dictionaries. # The internal self.__map dict maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # The sentinel is in self.__hardroot with a weakref proxy in self.__root. # The prev links are weakref proxies (to prevent circular references). # Individual links are kept alive by the hard reference in self.__map. # Those hard references disappear when a key is deleted from an OrderedDict. def __init__(self, other=(), /, **kwds): '''Initialize an ordered dictionary. The signature is the same as regular dictionaries. Keyword argument order is preserved. ''' try: self.__root except AttributeError: self.__hardroot = _Link() self.__root = root = _proxy(self.__hardroot) root.prev = root.next = root self.__map = {} self.__update(other, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link at the end of the linked list, # and the inherited dictionary is updated with the new key/value pair. if key not in self: self.__map[key] = link = Link() root = self.__root last = root.prev link.prev, link.next, link.key = last, root, key last.next = link root.prev = proxy(link) dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which gets # removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link = self.__map.pop(key) link_prev = link.prev link_next = link.next link_prev.next = link_next link_next.prev = link_prev link.prev = None link.next = None def __iter__(self): 'od.__iter__() <==> iter(od)' # Traverse the linked list in order. root = self.__root curr = root.next while curr is not root: yield curr.key curr = curr.next def __reversed__(self): 'od.__reversed__() <==> reversed(od)' # Traverse the linked list in reverse order. root = self.__root curr = root.prev while curr is not root: yield curr.key curr = curr.prev def clear(self): 'od.clear() -> None. Remove all items from od.' root = self.__root root.prev = root.next = root self.__map.clear() dict.clear(self) def popitem(self, last=True): '''Remove and return a (key, value) pair from the dictionary. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root.prev link_prev = link.prev link_prev.next = root root.prev = link_prev else: link = root.next link_next = link.next root.next = link_next link_next.prev = root key = link.key del self.__map[key] value = dict.pop(self, key) return key, value def move_to_end(self, key, last=True): '''Move an existing element to the end (or beginning if last is false). Raise KeyError if the element does not exist. ''' link = self.__map[key] link_prev = link.prev link_next = link.next soft_link = link_next.prev link_prev.next = link_next link_next.prev = link_prev root = self.__root if last: last = root.prev link.prev = last link.next = root root.prev = soft_link last.next = link else: first = root.next link.prev = root link.next = first first.prev = soft_link root.next = link def __sizeof__(self): sizeof = _sys.getsizeof n = len(self) + 1 # number of links including root size = sizeof(self.__dict__) # instance dictionary size += sizeof(self.__map) * 2 # internal dict and inherited dict size += sizeof(self.__hardroot) * n # link objects size += sizeof(self.__root) * n # proxy objects return size update = __update = _collections_abc.MutableMapping.update def keys(self): "D.keys() -> a set-like object providing a view on D's keys" return _OrderedDictKeysView(self) def items(self): "D.items() -> a set-like object providing a view on D's items" return _OrderedDictItemsView(self) def values(self): "D.values() -> an object providing a view on D's values" return _OrderedDictValuesView(self) __ne__ = _collections_abc.MutableMapping.__ne__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): '''Insert key with a value of default if key is not in the dictionary. Return the value for key if key is in the dictionary, else default. ''' if key in self: return self[key] self[key] = default return default @_recursive_repr() def __repr__(self): 'od.__repr__() <==> repr(od)' if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self.items())) def __reduce__(self): 'Return state information for pickling' inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) return self.__class__, (), inst_dict or None, None, iter(self.items()) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''Create a new ordered dictionary with keys from iterable and values set to value. ''' self = cls() for key in iterable: self[key] = value return self def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return dict.__eq__(self, other) and all(map(_eq, self, other)) return dict.__eq__(self, other) def __ior__(self, other): self.update(other) return self def __or__(self, other): if not isinstance(other, dict): return NotImplemented new = self.__class__(self) new.update(other) return new def __ror__(self, other): if not isinstance(other, dict): return NotImplemented new = self.__class__(other) new.update(self) return new try: from _collections import OrderedDict except ImportError: # Leave the pure Python version in place. pass ################################################################################ ### namedtuple ################################################################################ try: from _collections import _tuplegetter except ImportError: _tuplegetter = lambda index, doc: property(_itemgetter(index), doc=doc) def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = repr(field_names).replace("'", "")[1:-1] repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace s = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' namespace = {'_tuple_new': tuple_new, '__name__': f'namedtuple_{typename}'} __new__ = eval(s, namespace) __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in (__new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result ######################################################################## ### Counter ######################################################################## def _count_elements(mapping, iterable): 'Tally elements from the iterable.' mapping_get = mapping.get for elem in iterable: mapping[elem] = mapping_get(elem, 0) + 1 try: # Load C helper function if available from _collections import _count_elements except ImportError: pass class Counter(dict): '''Dict subclass for counting hashable items. Sometimes called a bag or multiset. Elements are stored as dictionary keys and their counts are stored as dictionary values. >>> c = Counter('abcdeabcdabcaba') # count elements from a string >>> c.most_common(3) # three most common elements [('a', 5), ('b', 4), ('c', 3)] >>> sorted(c) # list all unique elements ['a', 'b', 'c', 'd', 'e'] >>> ''.join(sorted(c.elements())) # list elements with repetitions 'aaaaabbbbcccdde' >>> sum(c.values()) # total of all counts 15 >>> c['a'] # count of letter 'a' 5 >>> for elem in 'shazam': # update counts from an iterable ... c[elem] += 1 # by adding 1 to each element's count >>> c['a'] # now there are seven 'a' 7 >>> del c['b'] # remove all 'b' >>> c['b'] # now there are zero 'b' 0 >>> d = Counter('simsalabim') # make another counter >>> c.update(d) # add in the second counter >>> c['a'] # now there are nine 'a' 9 >>> c.clear() # empty the counter >>> c Counter() Note: If a count is set to zero or reduced to zero, it will remain in the counter until the entry is deleted or the counter is cleared: >>> c = Counter('aaabbc') >>> c['b'] -= 2 # reduce the count of 'b' by two >>> c.most_common() # 'b' is still in, but its count is zero [('a', 3), ('c', 1), ('b', 0)] ''' # References: # http://en.wikipedia.org/wiki/Multiset # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm # http://code.activestate.com/recipes/259174/ # Knuth, TAOCP Vol. II section 4.6.3 def __init__(self, iterable=None, /, **kwds): '''Create a new, empty Counter object. And if given, count elements from an input iterable. Or, initialize the count from another mapping of elements to their counts. >>> c = Counter() # a new, empty counter >>> c = Counter('gallahad') # a new counter from an iterable >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping >>> c = Counter(a=4, b=2) # a new counter from keyword args ''' super(Counter, self).__init__() self.update(iterable, **kwds) def __missing__(self, key): 'The count of elements not in the Counter is zero.' # Needed so that self[missing_item] does not raise KeyError return 0 def most_common(self, n=None): '''List the n most common elements and their counts from the most common to the least. If n is None, then list all element counts. >>> Counter('abracadabra').most_common(3) [('a', 5), ('b', 2), ('r', 2)] ''' # Emulate Bag.sortedByCount from Smalltalk if n is None: return sorted(self.items(), key=_itemgetter(1), reverse=True) return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary @classmethod def fromkeys(cls, iterable, v=None): # There is no equivalent method for counters because the semantics # would be ambiguous in cases such as Counter.fromkeys('aaabbc', v=2). # Initializing counters to zero values isn't necessary because zero # is already the default value for counter lookups. Initializing # to one is easily accomplished with Counter(set(iterable)). For # more exotic cases, create a dictionary first using a dictionary # comprehension or dict.fromkeys(). raise NotImplementedError( 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') def update(self, iterable=None, /, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' # The regular dict.update() operation makes no sense here because the # replace behavior results in the some of original untouched counts # being mixed-in with all of the other counts for a mismash that # doesn't have a straight-forward interpretation in most counting # contexts. Instead, we implement straight-addition. Both the inputs # and outputs are allowed to contain zero and negative counts. if iterable is not None: if isinstance(iterable, _collections_abc.Mapping): if self: self_get = self.get for elem, count in iterable.items(): self[elem] = count + self_get(elem, 0) else: super(Counter, self).update(iterable) # fast path when counter is empty else: _count_elements(self, iterable) if kwds: self.update(kwds) def subtract(self, iterable=None, /, **kwds): '''Like dict.update() but subtracts counts instead of replacing them. Counts can be reduced below zero. Both the inputs and outputs are allowed to contain zero and negative counts. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.subtract('witch') # subtract elements from another iterable >>> c.subtract(Counter('watch')) # subtract elements from another counter >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch 0 >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch -1 ''' if iterable is not None: self_get = self.get if isinstance(iterable, _collections_abc.Mapping): for elem, count in iterable.items(): self[elem] = self_get(elem, 0) - count else: for elem in iterable: self[elem] = self_get(elem, 0) - 1 if kwds: self.subtract(kwds) def copy(self): 'Return a shallow copy.' return self.__class__(self) def __reduce__(self): return self.__class__, (dict(self),) def __delitem__(self, elem): 'Like dict.__delitem__() but does not raise KeyError for missing values.' if elem in self: super().__delitem__(elem) def __eq__(self, other): 'True if all counts agree. Missing counts are treated as zero.' if not isinstance(other, Counter): return NotImplemented return all(self[e] == other[e] for c in (self, other) for e in c) def __ne__(self, other): 'True if any counts disagree. Missing counts are treated as zero.' if not isinstance(other, Counter): return NotImplemented return not self == other def __le__(self, other): 'True if all counts in self are a subset of those in other.' if not isinstance(other, Counter): return NotImplemented return all(self[e] <= other[e] for c in (self, other) for e in c) def __lt__(self, other): 'True if all counts in self are a proper subset of those in other.' if not isinstance(other, Counter): return NotImplemented return self <= other and self != other def __ge__(self, other): 'True if all counts in self are a superset of those in other.' if not isinstance(other, Counter): return NotImplemented return all(self[e] >= other[e] for c in (self, other) for e in c) def __gt__(self, other): 'True if all counts in self are a proper superset of those in other.' if not isinstance(other, Counter): return NotImplemented return self >= other and self != other def __repr__(self): if not self: return '%s()' % self.__class__.__name__ try: items = ', '.join(map('%r: %r'.__mod__, self.most_common())) return '%s({%s})' % (self.__class__.__name__, items) except TypeError: # handle case where values are not orderable return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) # Multiset-style mathematical operations discussed in: # Knuth TAOCP Volume II section 4.6.3 exercise 19 # and at http://en.wikipedia.org/wiki/Multiset # # Outputs guaranteed to only include positive counts. # # To strip negative and zero counts, add-in an empty counter: # c += Counter() # # When the multiplicities are all zero or one, multiset operations # are guaranteed to be equivalent to the corresponding operations # for regular sets. # Given counter multisets such as: # cp = Counter(a=1, b=0, c=1) # cq = Counter(c=1, d=0, e=1) # The corresponding regular sets would be: # sp = {'a', 'c'} # sq = {'c', 'e'} # All of the following relations would hold: # set(cp + cq) == sp | sq # set(cp - cq) == sp - sq # set(cp | cq) == sp | sq # set(cp & cq) == sp & sq # (cp == cq) == (sp == sq) # (cp != cq) == (sp != sq) # (cp <= cq) == (sp <= sq) # (cp < cq) == (sp < sq) # (cp >= cq) == (sp >= sq) # (cp > cq) == (sp > sq) def __add__(self, other): '''Add counts from two counters. >>> Counter('abbb') + Counter('bcc') Counter({'b': 4, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count + other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __sub__(self, other): ''' Subtract count, but keep only results with positive counts. >>> Counter('abbbc') - Counter('bccd') Counter({'b': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count - other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count < 0: result[elem] = 0 - count return result def __or__(self, other): '''Union is the maximum of value in either of the input counters. >>> Counter('abbb') | Counter('bcc') Counter({'b': 3, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = other_count if count < other_count else count if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __and__(self, other): ''' Intersection is the minimum of corresponding counts. >>> Counter('abbb') & Counter('bcc') Counter({'b': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = count if count < other_count else other_count if newcount > 0: result[elem] = newcount return result def __pos__(self): 'Adds an empty counter, effectively stripping negative and zero counts' result = Counter() for elem, count in self.items(): if count > 0: result[elem] = count return result def __neg__(self): '''Subtracts from an empty counter. Strips positive and zero counts, and flips the sign on negative counts. ''' result = Counter() for elem, count in self.items(): if count < 0: result[elem] = 0 - count return result def _keep_positive(self): '''Internal method to strip elements with a negative or zero count''' nonpositive = [elem for elem, count in self.items() if not count > 0] for elem in nonpositive: del self[elem] return self def __iadd__(self, other): '''Inplace add from another counter, keeping only positive counts. >>> c = Counter('abbb') >>> c += Counter('bcc') >>> c Counter({'b': 4, 'c': 2, 'a': 1}) ''' for elem, count in other.items(): self[elem] += count return self._keep_positive() def __isub__(self, other): '''Inplace subtract counter, but keep only results with positive counts. >>> c = Counter('abbbc') >>> c -= Counter('bccd') >>> c Counter({'b': 2, 'a': 1}) ''' for elem, count in other.items(): self[elem] -= count return self._keep_positive() def __ior__(self, other): '''Inplace union is the maximum of value from either counter. >>> c = Counter('abbb') >>> c |= Counter('bcc') >>> c Counter({'b': 3, 'c': 2, 'a': 1}) ''' for elem, other_count in other.items(): count = self[elem] if other_count > count: self[elem] = other_count return self._keep_positive() def __iand__(self, other): '''Inplace intersection is the minimum of corresponding counts. >>> c = Counter('abbb') >>> c &= Counter('bcc') >>> c Counter({'b': 1}) ''' for elem, count in self.items(): other_count = other[elem] if other_count < count: self[elem] = other_count return self._keep_positive() ######################################################################## ### ChainMap ######################################################################## class ChainMap(_collections_abc.MutableMapping): ''' A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can be accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. ''' def __init__(self, *maps): '''Initialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. ''' self.maps = list(maps) or [{}] # always at least one map def __missing__(self, key): raise KeyError(key) def __getitem__(self, key): for mapping in self.maps: try: return mapping[key] # can't use 'key in mapping' with defaultdict except KeyError: pass return self.__missing__(key) # support subclasses that define __missing__ def get(self, key, default=None): return self[key] if key in self else default def __len__(self): return len(set().union(*self.maps)) # reuses stored hash values if possible def __iter__(self): d = {} for mapping in reversed(self.maps): d.update(mapping) # reuses stored hash values if possible return iter(d) def __contains__(self, key): return any(key in m for m in self.maps) def __bool__(self): return any(self.maps) @_recursive_repr() def __repr__(self): return f'{self.__class__.__name__}({', '.join(map(repr, self.maps))})' @classmethod def fromkeys(cls, iterable, *args): 'Create a ChainMap with a single dict created from the iterable.' return cls(dict.fromkeys(iterable, *args)) def copy(self): 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy def new_child(self, m=None): # like Django's Context.push() '''New ChainMap with a new map followed by all previous maps. If no map is provided, an empty dict is used. ''' if m is None: m = {} return self.__class__(m, *self.maps) @property def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:]) def __setitem__(self, key, value): self.maps[0][key] = value def __delitem__(self, key): try: del self.maps[0][key] except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def popitem(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' try: return self.maps[0].popitem() except KeyError: raise KeyError('No keys found in the first mapping.') def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def clear(self): 'Clear maps[0], leaving maps[1:] intact.' self.maps[0].clear() def __ior__(self, other): self.maps[0] |= other return self def __or__(self, other): if isinstance(other, _collections_abc.Mapping): m = self.maps[0].copy() m.update(other) return self.__class__(m, *self.maps[1:]) return NotImplemented def __ror__(self, other): if isinstance(other, _collections_abc.Mapping): m = dict(other) for child in reversed(self.maps): m.update(child) return self.__class__(m) return NotImplemented ################################################################################ ### UserDict ################################################################################ class UserDict(_collections_abc.MutableMapping): # Start by filling-out the abstract methods def __init__(self, dict=None, /, **kwargs): self.data = {} if dict is not None: self.update(dict) if kwargs: self.update(kwargs) def __len__(self): return len(self.data) def __getitem__(self, key): if key in self.data: return self.data[key] if hasattr(self.__class__, "__missing__"): return self.__class__.__missing__(self, key) raise KeyError(key) def __setitem__(self, key, item): self.data[key] = item def __delitem__(self, key): del self.data[key] def __iter__(self): return iter(self.data) # Modify __contains__ to work correctly when __missing__ is present def __contains__(self, key): return key in self.data # Now, add the methods in dicts but not in MutableMapping def __repr__(self): return repr(self.data) def __or__(self, other): if isinstance(other, UserDict): return self.__class__(self.data | other.data) if isinstance(other, dict): return self.__class__(self.data | other) return NotImplemented def __ror__(self, other): if isinstance(other, UserDict): return self.__class__(other.data | self.data) if isinstance(other, dict): return self.__class__(other | self.data) return NotImplemented def __ior__(self, other): if isinstance(other, UserDict): self.data |= other.data else: self.data |= other return self def __copy__(self): inst = self.__class__.__new__(self.__class__) inst.__dict__.update(self.__dict__) # Create a copy and avoid triggering descriptors inst.__dict__["data"] = self.__dict__["data"].copy() return inst def copy(self): if self.__class__ is UserDict: return UserDict(self.data.copy()) import copy data = self.data try: self.data = {} c = copy.copy(self) finally: self.data = data c.update(self) return c @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d ################################################################################ ### UserList ################################################################################ class UserList(_collections_abc.MutableSequence): """A more or less complete user-defined wrapper around list objects.""" def __init__(self, initlist=None): self.data = [] if initlist is not None: # XXX should this accept an arbitrary sequence? if type(initlist) == type(self.data): self.data[:] = initlist elif isinstance(initlist, UserList): self.data[:] = initlist.data[:] else: self.data = list(initlist) def __repr__(self): return repr(self.data) def __lt__(self, other): return self.data < self.__cast(other) def __le__(self, other): return self.data <= self.__cast(other) def __eq__(self, other): return self.data == self.__cast(other) def __gt__(self, other): return self.data > self.__cast(other) def __ge__(self, other): return self.data >= self.__cast(other) def __cast(self, other): return other.data if isinstance(other, UserList) else other def __contains__(self, item): return item in self.data def __len__(self): return len(self.data) def __getitem__(self, i): if isinstance(i, slice): return self.__class__(self.data[i]) else: return self.data[i] def __setitem__(self, i, item): self.data[i] = item def __delitem__(self, i): del self.data[i] def __add__(self, other): if isinstance(other, UserList): return self.__class__(self.data + other.data) elif isinstance(other, type(self.data)): return self.__class__(self.data + other) return self.__class__(self.data + list(other)) def __radd__(self, other): if isinstance(other, UserList): return self.__class__(other.data + self.data) elif isinstance(other, type(self.data)): return self.__class__(other + self.data) return self.__class__(list(other) + self.data) def __iadd__(self, other): if isinstance(other, UserList): self.data += other.data elif isinstance(other, type(self.data)): self.data += other else: self.data += list(other) return self def __mul__(self, n): return self.__class__(self.data*n) __rmul__ = __mul__ def __imul__(self, n): self.data *= n return self def __copy__(self): inst = self.__class__.__new__(self.__class__) inst.__dict__.update(self.__dict__) # Create a copy and avoid triggering descriptors inst.__dict__["data"] = self.__dict__["data"][:] return inst def append(self, item): self.data.append(item) def insert(self, i, item): self.data.insert(i, item) def pop(self, i=-1): return self.data.pop(i) def remove(self, item): self.data.remove(item) def clear(self): self.data.clear() def copy(self): return self.__class__(self) def count(self, item): return self.data.count(item) def index(self, item, *args): return self.data.index(item, *args) def reverse(self): self.data.reverse() def sort(self, /, *args, **kwds): self.data.sort(*args, **kwds) def extend(self, other): if isinstance(other, UserList): self.data.extend(other.data) else: self.data.extend(other) ################################################################################ ### UserString ################################################################################ class UserString(_collections_abc.Sequence): def __init__(self, seq): if isinstance(seq, str): self.data = seq elif isinstance(seq, UserString): self.data = seq.data[:] else: self.data = str(seq) def __str__(self): return str(self.data) def __repr__(self): return repr(self.data) def __int__(self): return int(self.data) def __float__(self): return float(self.data) def __complex__(self): return complex(self.data) def __hash__(self): return hash(self.data) def __getnewargs__(self): return (self.data[:],) def __eq__(self, string): if isinstance(string, UserString): return self.data == string.data return self.data == string def __lt__(self, string): if isinstance(string, UserString): return self.data < string.data return self.data < string def __le__(self, string): if isinstance(string, UserString): return self.data <= string.data return self.data <= string def __gt__(self, string): if isinstance(string, UserString): return self.data > string.data return self.data > string def __ge__(self, string): if isinstance(string, UserString): return self.data >= string.data return self.data >= string def __contains__(self, char): if isinstance(char, UserString): char = char.data return char in self.data def __len__(self): return len(self.data) def __getitem__(self, index): return self.__class__(self.data[index]) def __add__(self, other): if isinstance(other, UserString): return self.__class__(self.data + other.data) elif isinstance(other, str): return self.__class__(self.data + other) return self.__class__(self.data + str(other)) def __radd__(self, other): if isinstance(other, str): return self.__class__(other + self.data) return self.__class__(str(other) + self.data) def __mul__(self, n): return self.__class__(self.data*n) __rmul__ = __mul__ def __mod__(self, args): return self.__class__(self.data % args) def __rmod__(self, template): return self.__class__(str(template) % self) # the following methods are defined in alphabetical order: def capitalize(self): return self.__class__(self.data.capitalize()) def casefold(self): return self.__class__(self.data.casefold()) def center(self, width, *args): return self.__class__(self.data.center(width, *args)) def count(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.count(sub, start, end) def removeprefix(self, prefix, /): if isinstance(prefix, UserString): prefix = prefix.data return self.__class__(self.data.removeprefix(prefix)) def removesuffix(self, suffix, /): if isinstance(suffix, UserString): suffix = suffix.data return self.__class__(self.data.removesuffix(suffix)) def encode(self, encoding='utf-8', errors='strict'): encoding = 'utf-8' if encoding is None else encoding errors = 'strict' if errors is None else errors return self.data.encode(encoding, errors) def endswith(self, suffix, start=0, end=_sys.maxsize): return self.data.endswith(suffix, start, end) def expandtabs(self, tabsize=8): return self.__class__(self.data.expandtabs(tabsize)) def find(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.find(sub, start, end) def format(self, /, *args, **kwds): return self.data.format(*args, **kwds) def format_map(self, mapping): return self.data.format_map(mapping) def index(self, sub, start=0, end=_sys.maxsize): return self.data.index(sub, start, end) def isalpha(self): return self.data.isalpha() def isalnum(self): return self.data.isalnum() def isascii(self): return self.data.isascii() def isdecimal(self): return self.data.isdecimal() def isdigit(self): return self.data.isdigit() def isidentifier(self): return self.data.isidentifier() def islower(self): return self.data.islower() def isnumeric(self): return self.data.isnumeric() def isprintable(self): return self.data.isprintable() def isspace(self): return self.data.isspace() def istitle(self): return self.data.istitle() def isupper(self): return self.data.isupper() def join(self, seq): return self.data.join(seq) def ljust(self, width, *args): return self.__class__(self.data.ljust(width, *args)) def lower(self): return self.__class__(self.data.lower()) def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) maketrans = str.maketrans def partition(self, sep): return self.data.partition(sep) def replace(self, old, new, maxsplit=-1): if isinstance(old, UserString): old = old.data if isinstance(new, UserString): new = new.data return self.__class__(self.data.replace(old, new, maxsplit)) def rfind(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.rfind(sub, start, end) def rindex(self, sub, start=0, end=_sys.maxsize): return self.data.rindex(sub, start, end) def rjust(self, width, *args): return self.__class__(self.data.rjust(width, *args)) def rpartition(self, sep): return self.data.rpartition(sep) def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars)) def split(self, sep=None, maxsplit=-1): return self.data.split(sep, maxsplit) def rsplit(self, sep=None, maxsplit=-1): return self.data.rsplit(sep, maxsplit) def splitlines(self, keepends=False): return self.data.splitlines(keepends) def startswith(self, prefix, start=0, end=_sys.maxsize): return self.data.startswith(prefix, start, end) def strip(self, chars=None): return self.__class__(self.data.strip(chars)) def swapcase(self): return self.__class__(self.data.swapcase()) def title(self): return self.__class__(self.data.title()) def translate(self, *args): return self.__class__(self.data.translate(*args)) def upper(self): return self.__class__(self.data.upper()) def zfill(self, width): return self.__class__(self.data.zfill(width))
'''This module implements specialized container datatypes providing alternatives to Python's general purpose built-in containers, dict, list, set, and tuple. * namedtuple factory function for creating tuple subclasses with named fields * deque list-like container with fast appends and pops on either end * ChainMap dict-like class for creating a single view of multiple mappings * Counter dict subclass for counting hashable objects * OrderedDict dict subclass that remembers the order entries were added * defaultdict dict subclass that calls a factory function to supply missing values * UserDict wrapper around dictionary objects for easier dict subclassing * UserList wrapper around list objects for easier list subclassing * UserString wrapper around string objects for easier string subclassing ''' __all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', 'UserString', 'Counter', 'OrderedDict', 'ChainMap'] import _collections_abc from operator import itemgetter as _itemgetter, eq as _eq from keyword import iskeyword as _iskeyword import sys as _sys import heapq as _heapq from _weakref import proxy as _proxy from itertools import repeat as _repeat, chain as _chain, starmap as _starmap from reprlib import recursive_repr as _recursive_repr try: from _collections import deque except ImportError: pass else: _collections_abc.MutableSequence.register(deque) try: from _collections import defaultdict except ImportError: pass def __getattr__(name): # For backwards compatibility, continue to make the collections ABCs # through Python 3.6 available through the collections module. # Note, no new collections ABCs were added in Python 3.7 if name in _collections_abc.__all__: obj = getattr(_collections_abc, name) import warnings warnings.warn("Using or importing the ABCs from 'collections' instead " "of from 'collections.abc' is deprecated since Python 3.3, " "and in 3.10 it will stop working", DeprecationWarning, stacklevel=2) globals()[name] = obj return obj raise AttributeError(f'module {__name__!r} has no attribute {name!r}') ################################################################################ ### OrderedDict ################################################################################ class _OrderedDictKeysView(_collections_abc.KeysView): def __reversed__(self): yield from reversed(self._mapping) class _OrderedDictItemsView(_collections_abc.ItemsView): def __reversed__(self): for key in reversed(self._mapping): yield (key, self._mapping[key]) class _OrderedDictValuesView(_collections_abc.ValuesView): def __reversed__(self): for key in reversed(self._mapping): yield self._mapping[key] class _Link(object): __slots__ = 'prev', 'next', 'key', '__weakref__' class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as regular dictionaries. # The internal self.__map dict maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # The sentinel is in self.__hardroot with a weakref proxy in self.__root. # The prev links are weakref proxies (to prevent circular references). # Individual links are kept alive by the hard reference in self.__map. # Those hard references disappear when a key is deleted from an OrderedDict. def __init__(self, other=(), /, **kwds): '''Initialize an ordered dictionary. The signature is the same as regular dictionaries. Keyword argument order is preserved. ''' try: self.__root except AttributeError: self.__hardroot = _Link() self.__root = root = _proxy(self.__hardroot) root.prev = root.next = root self.__map = {} self.__update(other, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link at the end of the linked list, # and the inherited dictionary is updated with the new key/value pair. if key not in self: self.__map[key] = link = Link() root = self.__root last = root.prev link.prev, link.next, link.key = last, root, key last.next = link root.prev = proxy(link) dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which gets # removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link = self.__map.pop(key) link_prev = link.prev link_next = link.next link_prev.next = link_next link_next.prev = link_prev link.prev = None link.next = None def __iter__(self): 'od.__iter__() <==> iter(od)' # Traverse the linked list in order. root = self.__root curr = root.next while curr is not root: yield curr.key curr = curr.next def __reversed__(self): 'od.__reversed__() <==> reversed(od)' # Traverse the linked list in reverse order. root = self.__root curr = root.prev while curr is not root: yield curr.key curr = curr.prev def clear(self): 'od.clear() -> None. Remove all items from od.' root = self.__root root.prev = root.next = root self.__map.clear() dict.clear(self) def popitem(self, last=True): '''Remove and return a (key, value) pair from the dictionary. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root.prev link_prev = link.prev link_prev.next = root root.prev = link_prev else: link = root.next link_next = link.next root.next = link_next link_next.prev = root key = link.key del self.__map[key] value = dict.pop(self, key) return key, value def move_to_end(self, key, last=True): '''Move an existing element to the end (or beginning if last is false). Raise KeyError if the element does not exist. ''' link = self.__map[key] link_prev = link.prev link_next = link.next soft_link = link_next.prev link_prev.next = link_next link_next.prev = link_prev root = self.__root if last: last = root.prev link.prev = last link.next = root root.prev = soft_link last.next = link else: first = root.next link.prev = root link.next = first first.prev = soft_link root.next = link def __sizeof__(self): sizeof = _sys.getsizeof n = len(self) + 1 # number of links including root size = sizeof(self.__dict__) # instance dictionary size += sizeof(self.__map) * 2 # internal dict and inherited dict size += sizeof(self.__hardroot) * n # link objects size += sizeof(self.__root) * n # proxy objects return size update = __update = _collections_abc.MutableMapping.update def keys(self): "D.keys() -> a set-like object providing a view on D's keys" return _OrderedDictKeysView(self) def items(self): "D.items() -> a set-like object providing a view on D's items" return _OrderedDictItemsView(self) def values(self): "D.values() -> an object providing a view on D's values" return _OrderedDictValuesView(self) __ne__ = _collections_abc.MutableMapping.__ne__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): '''Insert key with a value of default if key is not in the dictionary. Return the value for key if key is in the dictionary, else default. ''' if key in self: return self[key] self[key] = default return default @_recursive_repr() def __repr__(self): 'od.__repr__() <==> repr(od)' if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self.items())) def __reduce__(self): 'Return state information for pickling' inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) return self.__class__, (), inst_dict or None, None, iter(self.items()) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''Create a new ordered dictionary with keys from iterable and values set to value. ''' self = cls() for key in iterable: self[key] = value return self def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return dict.__eq__(self, other) and all(map(_eq, self, other)) return dict.__eq__(self, other) def __ior__(self, other): self.update(other) return self def __or__(self, other): if not isinstance(other, dict): return NotImplemented new = self.__class__(self) new.update(other) return new def __ror__(self, other): if not isinstance(other, dict): return NotImplemented new = self.__class__(other) new.update(self) return new try: from _collections import OrderedDict except ImportError: # Leave the pure Python version in place. pass ################################################################################ ### namedtuple ################################################################################ try: from _collections import _tuplegetter except ImportError: _tuplegetter = lambda index, doc: property(_itemgetter(index), doc=doc) def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = repr(field_names).replace("'", "")[1:-1] repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace s = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' namespace = {'_tuple_new': tuple_new, '__name__': f'namedtuple_{typename}'} __new__ = eval(s, namespace) __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in (__new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result ######################################################################## ### Counter ######################################################################## def _count_elements(mapping, iterable): 'Tally elements from the iterable.' mapping_get = mapping.get for elem in iterable: mapping[elem] = mapping_get(elem, 0) + 1 try: # Load C helper function if available from _collections import _count_elements except ImportError: pass class Counter(dict): '''Dict subclass for counting hashable items. Sometimes called a bag or multiset. Elements are stored as dictionary keys and their counts are stored as dictionary values. >>> c = Counter('abcdeabcdabcaba') # count elements from a string >>> c.most_common(3) # three most common elements [('a', 5), ('b', 4), ('c', 3)] >>> sorted(c) # list all unique elements ['a', 'b', 'c', 'd', 'e'] >>> ''.join(sorted(c.elements())) # list elements with repetitions 'aaaaabbbbcccdde' >>> sum(c.values()) # total of all counts 15 >>> c['a'] # count of letter 'a' 5 >>> for elem in 'shazam': # update counts from an iterable ... c[elem] += 1 # by adding 1 to each element's count >>> c['a'] # now there are seven 'a' 7 >>> del c['b'] # remove all 'b' >>> c['b'] # now there are zero 'b' 0 >>> d = Counter('simsalabim') # make another counter >>> c.update(d) # add in the second counter >>> c['a'] # now there are nine 'a' 9 >>> c.clear() # empty the counter >>> c Counter() Note: If a count is set to zero or reduced to zero, it will remain in the counter until the entry is deleted or the counter is cleared: >>> c = Counter('aaabbc') >>> c['b'] -= 2 # reduce the count of 'b' by two >>> c.most_common() # 'b' is still in, but its count is zero [('a', 3), ('c', 1), ('b', 0)] ''' # References: # http://en.wikipedia.org/wiki/Multiset # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm # http://code.activestate.com/recipes/259174/ # Knuth, TAOCP Vol. II section 4.6.3 def __init__(self, iterable=None, /, **kwds): '''Create a new, empty Counter object. And if given, count elements from an input iterable. Or, initialize the count from another mapping of elements to their counts. >>> c = Counter() # a new, empty counter >>> c = Counter('gallahad') # a new counter from an iterable >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping >>> c = Counter(a=4, b=2) # a new counter from keyword args ''' super(Counter, self).__init__() self.update(iterable, **kwds) def __missing__(self, key): 'The count of elements not in the Counter is zero.' # Needed so that self[missing_item] does not raise KeyError return 0 def most_common(self, n=None): '''List the n most common elements and their counts from the most common to the least. If n is None, then list all element counts. >>> Counter('abracadabra').most_common(3) [('a', 5), ('b', 2), ('r', 2)] ''' # Emulate Bag.sortedByCount from Smalltalk if n is None: return sorted(self.items(), key=_itemgetter(1), reverse=True) return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary @classmethod def fromkeys(cls, iterable, v=None): # There is no equivalent method for counters because the semantics # would be ambiguous in cases such as Counter.fromkeys('aaabbc', v=2). # Initializing counters to zero values isn't necessary because zero # is already the default value for counter lookups. Initializing # to one is easily accomplished with Counter(set(iterable)). For # more exotic cases, create a dictionary first using a dictionary # comprehension or dict.fromkeys(). raise NotImplementedError( 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') def update(self, iterable=None, /, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' # The regular dict.update() operation makes no sense here because the # replace behavior results in the some of original untouched counts # being mixed-in with all of the other counts for a mismash that # doesn't have a straight-forward interpretation in most counting # contexts. Instead, we implement straight-addition. Both the inputs # and outputs are allowed to contain zero and negative counts. if iterable is not None: if isinstance(iterable, _collections_abc.Mapping): if self: self_get = self.get for elem, count in iterable.items(): self[elem] = count + self_get(elem, 0) else: super(Counter, self).update(iterable) # fast path when counter is empty else: _count_elements(self, iterable) if kwds: self.update(kwds) def subtract(self, iterable=None, /, **kwds): '''Like dict.update() but subtracts counts instead of replacing them. Counts can be reduced below zero. Both the inputs and outputs are allowed to contain zero and negative counts. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.subtract('witch') # subtract elements from another iterable >>> c.subtract(Counter('watch')) # subtract elements from another counter >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch 0 >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch -1 ''' if iterable is not None: self_get = self.get if isinstance(iterable, _collections_abc.Mapping): for elem, count in iterable.items(): self[elem] = self_get(elem, 0) - count else: for elem in iterable: self[elem] = self_get(elem, 0) - 1 if kwds: self.subtract(kwds) def copy(self): 'Return a shallow copy.' return self.__class__(self) def __reduce__(self): return self.__class__, (dict(self),) def __delitem__(self, elem): 'Like dict.__delitem__() but does not raise KeyError for missing values.' if elem in self: super().__delitem__(elem) def __eq__(self, other): 'True if all counts agree. Missing counts are treated as zero.' if not isinstance(other, Counter): return NotImplemented return all(self[e] == other[e] for c in (self, other) for e in c) def __ne__(self, other): 'True if any counts disagree. Missing counts are treated as zero.' if not isinstance(other, Counter): return NotImplemented return not self == other def __le__(self, other): 'True if all counts in self are a subset of those in other.' if not isinstance(other, Counter): return NotImplemented return all(self[e] <= other[e] for c in (self, other) for e in c) def __lt__(self, other): 'True if all counts in self are a proper subset of those in other.' if not isinstance(other, Counter): return NotImplemented return self <= other and self != other def __ge__(self, other): 'True if all counts in self are a superset of those in other.' if not isinstance(other, Counter): return NotImplemented return all(self[e] >= other[e] for c in (self, other) for e in c) def __gt__(self, other): 'True if all counts in self are a proper superset of those in other.' if not isinstance(other, Counter): return NotImplemented return self >= other and self != other def __repr__(self): if not self: return '%s()' % self.__class__.__name__ try: items = ', '.join(map('%r: %r'.__mod__, self.most_common())) return '%s({%s})' % (self.__class__.__name__, items) except TypeError: # handle case where values are not orderable return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) # Multiset-style mathematical operations discussed in: # Knuth TAOCP Volume II section 4.6.3 exercise 19 # and at http://en.wikipedia.org/wiki/Multiset # # Outputs guaranteed to only include positive counts. # # To strip negative and zero counts, add-in an empty counter: # c += Counter() # # When the multiplicities are all zero or one, multiset operations # are guaranteed to be equivalent to the corresponding operations # for regular sets. # Given counter multisets such as: # cp = Counter(a=1, b=0, c=1) # cq = Counter(c=1, d=0, e=1) # The corresponding regular sets would be: # sp = {'a', 'c'} # sq = {'c', 'e'} # All of the following relations would hold: # set(cp + cq) == sp | sq # set(cp - cq) == sp - sq # set(cp | cq) == sp | sq # set(cp & cq) == sp & sq # (cp == cq) == (sp == sq) # (cp != cq) == (sp != sq) # (cp <= cq) == (sp <= sq) # (cp < cq) == (sp < sq) # (cp >= cq) == (sp >= sq) # (cp > cq) == (sp > sq) def __add__(self, other): '''Add counts from two counters. >>> Counter('abbb') + Counter('bcc') Counter({'b': 4, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count + other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __sub__(self, other): ''' Subtract count, but keep only results with positive counts. >>> Counter('abbbc') - Counter('bccd') Counter({'b': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count - other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count < 0: result[elem] = 0 - count return result def __or__(self, other): '''Union is the maximum of value in either of the input counters. >>> Counter('abbb') | Counter('bcc') Counter({'b': 3, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = other_count if count < other_count else count if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __and__(self, other): ''' Intersection is the minimum of corresponding counts. >>> Counter('abbb') & Counter('bcc') Counter({'b': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = count if count < other_count else other_count if newcount > 0: result[elem] = newcount return result def __pos__(self): 'Adds an empty counter, effectively stripping negative and zero counts' result = Counter() for elem, count in self.items(): if count > 0: result[elem] = count return result def __neg__(self): '''Subtracts from an empty counter. Strips positive and zero counts, and flips the sign on negative counts. ''' result = Counter() for elem, count in self.items(): if count < 0: result[elem] = 0 - count return result def _keep_positive(self): '''Internal method to strip elements with a negative or zero count''' nonpositive = [elem for elem, count in self.items() if not count > 0] for elem in nonpositive: del self[elem] return self def __iadd__(self, other): '''Inplace add from another counter, keeping only positive counts. >>> c = Counter('abbb') >>> c += Counter('bcc') >>> c Counter({'b': 4, 'c': 2, 'a': 1}) ''' for elem, count in other.items(): self[elem] += count return self._keep_positive() def __isub__(self, other): '''Inplace subtract counter, but keep only results with positive counts. >>> c = Counter('abbbc') >>> c -= Counter('bccd') >>> c Counter({'b': 2, 'a': 1}) ''' for elem, count in other.items(): self[elem] -= count return self._keep_positive() def __ior__(self, other): '''Inplace union is the maximum of value from either counter. >>> c = Counter('abbb') >>> c |= Counter('bcc') >>> c Counter({'b': 3, 'c': 2, 'a': 1}) ''' for elem, other_count in other.items(): count = self[elem] if other_count > count: self[elem] = other_count return self._keep_positive() def __iand__(self, other): '''Inplace intersection is the minimum of corresponding counts. >>> c = Counter('abbb') >>> c &= Counter('bcc') >>> c Counter({'b': 1}) ''' for elem, count in self.items(): other_count = other[elem] if other_count < count: self[elem] = other_count return self._keep_positive() ######################################################################## ### ChainMap ######################################################################## class ChainMap(_collections_abc.MutableMapping): ''' A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can be accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. ''' def __init__(self, *maps): '''Initialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. ''' self.maps = list(maps) or [{}] # always at least one map def __missing__(self, key): raise KeyError(key) def __getitem__(self, key): for mapping in self.maps: try: return mapping[key] # can't use 'key in mapping' with defaultdict except KeyError: pass return self.__missing__(key) # support subclasses that define __missing__ def get(self, key, default=None): return self[key] if key in self else default def __len__(self): return len(set().union(*self.maps)) # reuses stored hash values if possible def __iter__(self): d = {} for mapping in reversed(self.maps): d.update(mapping) # reuses stored hash values if possible return iter(d) def __contains__(self, key): return any(key in m for m in self.maps) def __bool__(self): return any(self.maps) @_recursive_repr() def __repr__(self): return f'{self.__class__.__name__}({", ".join(map(repr, self.maps))})' @classmethod def fromkeys(cls, iterable, *args): 'Create a ChainMap with a single dict created from the iterable.' return cls(dict.fromkeys(iterable, *args)) def copy(self): 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy def new_child(self, m=None): # like Django's Context.push() '''New ChainMap with a new map followed by all previous maps. If no map is provided, an empty dict is used. ''' if m is None: m = {} return self.__class__(m, *self.maps) @property def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:]) def __setitem__(self, key, value): self.maps[0][key] = value def __delitem__(self, key): try: del self.maps[0][key] except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def popitem(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' try: return self.maps[0].popitem() except KeyError: raise KeyError('No keys found in the first mapping.') def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def clear(self): 'Clear maps[0], leaving maps[1:] intact.' self.maps[0].clear() def __ior__(self, other): self.maps[0] |= other return self def __or__(self, other): if isinstance(other, _collections_abc.Mapping): m = self.maps[0].copy() m.update(other) return self.__class__(m, *self.maps[1:]) return NotImplemented def __ror__(self, other): if isinstance(other, _collections_abc.Mapping): m = dict(other) for child in reversed(self.maps): m.update(child) return self.__class__(m) return NotImplemented ################################################################################ ### UserDict ################################################################################ class UserDict(_collections_abc.MutableMapping): # Start by filling-out the abstract methods def __init__(self, dict=None, /, **kwargs): self.data = {} if dict is not None: self.update(dict) if kwargs: self.update(kwargs) def __len__(self): return len(self.data) def __getitem__(self, key): if key in self.data: return self.data[key] if hasattr(self.__class__, "__missing__"): return self.__class__.__missing__(self, key) raise KeyError(key) def __setitem__(self, key, item): self.data[key] = item def __delitem__(self, key): del self.data[key] def __iter__(self): return iter(self.data) # Modify __contains__ to work correctly when __missing__ is present def __contains__(self, key): return key in self.data # Now, add the methods in dicts but not in MutableMapping def __repr__(self): return repr(self.data) def __or__(self, other): if isinstance(other, UserDict): return self.__class__(self.data | other.data) if isinstance(other, dict): return self.__class__(self.data | other) return NotImplemented def __ror__(self, other): if isinstance(other, UserDict): return self.__class__(other.data | self.data) if isinstance(other, dict): return self.__class__(other | self.data) return NotImplemented def __ior__(self, other): if isinstance(other, UserDict): self.data |= other.data else: self.data |= other return self def __copy__(self): inst = self.__class__.__new__(self.__class__) inst.__dict__.update(self.__dict__) # Create a copy and avoid triggering descriptors inst.__dict__["data"] = self.__dict__["data"].copy() return inst def copy(self): if self.__class__ is UserDict: return UserDict(self.data.copy()) import copy data = self.data try: self.data = {} c = copy.copy(self) finally: self.data = data c.update(self) return c @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d ################################################################################ ### UserList ################################################################################ class UserList(_collections_abc.MutableSequence): """A more or less complete user-defined wrapper around list objects.""" def __init__(self, initlist=None): self.data = [] if initlist is not None: # XXX should this accept an arbitrary sequence? if type(initlist) == type(self.data): self.data[:] = initlist elif isinstance(initlist, UserList): self.data[:] = initlist.data[:] else: self.data = list(initlist) def __repr__(self): return repr(self.data) def __lt__(self, other): return self.data < self.__cast(other) def __le__(self, other): return self.data <= self.__cast(other) def __eq__(self, other): return self.data == self.__cast(other) def __gt__(self, other): return self.data > self.__cast(other) def __ge__(self, other): return self.data >= self.__cast(other) def __cast(self, other): return other.data if isinstance(other, UserList) else other def __contains__(self, item): return item in self.data def __len__(self): return len(self.data) def __getitem__(self, i): if isinstance(i, slice): return self.__class__(self.data[i]) else: return self.data[i] def __setitem__(self, i, item): self.data[i] = item def __delitem__(self, i): del self.data[i] def __add__(self, other): if isinstance(other, UserList): return self.__class__(self.data + other.data) elif isinstance(other, type(self.data)): return self.__class__(self.data + other) return self.__class__(self.data + list(other)) def __radd__(self, other): if isinstance(other, UserList): return self.__class__(other.data + self.data) elif isinstance(other, type(self.data)): return self.__class__(other + self.data) return self.__class__(list(other) + self.data) def __iadd__(self, other): if isinstance(other, UserList): self.data += other.data elif isinstance(other, type(self.data)): self.data += other else: self.data += list(other) return self def __mul__(self, n): return self.__class__(self.data*n) __rmul__ = __mul__ def __imul__(self, n): self.data *= n return self def __copy__(self): inst = self.__class__.__new__(self.__class__) inst.__dict__.update(self.__dict__) # Create a copy and avoid triggering descriptors inst.__dict__["data"] = self.__dict__["data"][:] return inst def append(self, item): self.data.append(item) def insert(self, i, item): self.data.insert(i, item) def pop(self, i=-1): return self.data.pop(i) def remove(self, item): self.data.remove(item) def clear(self): self.data.clear() def copy(self): return self.__class__(self) def count(self, item): return self.data.count(item) def index(self, item, *args): return self.data.index(item, *args) def reverse(self): self.data.reverse() def sort(self, /, *args, **kwds): self.data.sort(*args, **kwds) def extend(self, other): if isinstance(other, UserList): self.data.extend(other.data) else: self.data.extend(other) ################################################################################ ### UserString ################################################################################ class UserString(_collections_abc.Sequence): def __init__(self, seq): if isinstance(seq, str): self.data = seq elif isinstance(seq, UserString): self.data = seq.data[:] else: self.data = str(seq) def __str__(self): return str(self.data) def __repr__(self): return repr(self.data) def __int__(self): return int(self.data) def __float__(self): return float(self.data) def __complex__(self): return complex(self.data) def __hash__(self): return hash(self.data) def __getnewargs__(self): return (self.data[:],) def __eq__(self, string): if isinstance(string, UserString): return self.data == string.data return self.data == string def __lt__(self, string): if isinstance(string, UserString): return self.data < string.data return self.data < string def __le__(self, string): if isinstance(string, UserString): return self.data <= string.data return self.data <= string def __gt__(self, string): if isinstance(string, UserString): return self.data > string.data return self.data > string def __ge__(self, string): if isinstance(string, UserString): return self.data >= string.data return self.data >= string def __contains__(self, char): if isinstance(char, UserString): char = char.data return char in self.data def __len__(self): return len(self.data) def __getitem__(self, index): return self.__class__(self.data[index]) def __add__(self, other): if isinstance(other, UserString): return self.__class__(self.data + other.data) elif isinstance(other, str): return self.__class__(self.data + other) return self.__class__(self.data + str(other)) def __radd__(self, other): if isinstance(other, str): return self.__class__(other + self.data) return self.__class__(str(other) + self.data) def __mul__(self, n): return self.__class__(self.data*n) __rmul__ = __mul__ def __mod__(self, args): return self.__class__(self.data % args) def __rmod__(self, template): return self.__class__(str(template) % self) # the following methods are defined in alphabetical order: def capitalize(self): return self.__class__(self.data.capitalize()) def casefold(self): return self.__class__(self.data.casefold()) def center(self, width, *args): return self.__class__(self.data.center(width, *args)) def count(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.count(sub, start, end) def removeprefix(self, prefix, /): if isinstance(prefix, UserString): prefix = prefix.data return self.__class__(self.data.removeprefix(prefix)) def removesuffix(self, suffix, /): if isinstance(suffix, UserString): suffix = suffix.data return self.__class__(self.data.removesuffix(suffix)) def encode(self, encoding='utf-8', errors='strict'): encoding = 'utf-8' if encoding is None else encoding errors = 'strict' if errors is None else errors return self.data.encode(encoding, errors) def endswith(self, suffix, start=0, end=_sys.maxsize): return self.data.endswith(suffix, start, end) def expandtabs(self, tabsize=8): return self.__class__(self.data.expandtabs(tabsize)) def find(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.find(sub, start, end) def format(self, /, *args, **kwds): return self.data.format(*args, **kwds) def format_map(self, mapping): return self.data.format_map(mapping) def index(self, sub, start=0, end=_sys.maxsize): return self.data.index(sub, start, end) def isalpha(self): return self.data.isalpha() def isalnum(self): return self.data.isalnum() def isascii(self): return self.data.isascii() def isdecimal(self): return self.data.isdecimal() def isdigit(self): return self.data.isdigit() def isidentifier(self): return self.data.isidentifier() def islower(self): return self.data.islower() def isnumeric(self): return self.data.isnumeric() def isprintable(self): return self.data.isprintable() def isspace(self): return self.data.isspace() def istitle(self): return self.data.istitle() def isupper(self): return self.data.isupper() def join(self, seq): return self.data.join(seq) def ljust(self, width, *args): return self.__class__(self.data.ljust(width, *args)) def lower(self): return self.__class__(self.data.lower()) def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) maketrans = str.maketrans def partition(self, sep): return self.data.partition(sep) def replace(self, old, new, maxsplit=-1): if isinstance(old, UserString): old = old.data if isinstance(new, UserString): new = new.data return self.__class__(self.data.replace(old, new, maxsplit)) def rfind(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.rfind(sub, start, end) def rindex(self, sub, start=0, end=_sys.maxsize): return self.data.rindex(sub, start, end) def rjust(self, width, *args): return self.__class__(self.data.rjust(width, *args)) def rpartition(self, sep): return self.data.rpartition(sep) def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars)) def split(self, sep=None, maxsplit=-1): return self.data.split(sep, maxsplit) def rsplit(self, sep=None, maxsplit=-1): return self.data.rsplit(sep, maxsplit) def splitlines(self, keepends=False): return self.data.splitlines(keepends) def startswith(self, prefix, start=0, end=_sys.maxsize): return self.data.startswith(prefix, start, end) def strip(self, chars=None): return self.__class__(self.data.strip(chars)) def swapcase(self): return self.__class__(self.data.swapcase()) def title(self): return self.__class__(self.data.title()) def translate(self, *args): return self.__class__(self.data.translate(*args)) def upper(self): return self.__class__(self.data.upper()) def zfill(self, width): return self.__class__(self.data.zfill(width))
import random from timeit import timeit from typing import List, Tuple from quicksort import quicksort_my_version, quicksort_book_version def create_random_list(numbers: int, range_: Tuple[int, int], duplicates: bool = True) -> List[int]: """ Generates list of random, unsorted numbers from given range. :param numbers: how many numbers do you want? :param range_: what is a range you want to generate numbers from; two numbers as the beginning and the end of range :param duplicates: do you want duplicates in the result? :return: list of numbers """ if duplicates: return random.choices(range(range_[0], range_[1] + 1), k=numbers) return random.sample(range(range_[0], range_[1] + 1), k=numbers) def measure_time(test_code: str, setup: str, n: int) -> str: """ Executes test code, measure execution time and prints it out. :param test_code: code of function to execute :param setup: import function and input list :param n: how many times function will be execute :return: aggregate time of executing function n times """ return '{:,.0f}'.format(timeit(test_code, setup, number=n) * 1000) if __name__ == '__main__': numbers = 10_000 range_ = (0, 100_000) duplicates = True timer_reps = 1_000 random_list = create_random_list(numbers=numbers, range_=range_, duplicates=duplicates) list_copy = random_list.copy() print(f'Random list:\n{random_list}') print('-' * 100) print(f'{numbers:,} numbers from range({range_[0]:,}; {range_[1]:,}) with{'out' * (not duplicates)} duplicates.') print('-' * 100) print('QUICKSORT') test_code = "quicksort_my_version(list_copy, 0, len(list_copy) - 1)" setup = "from __main__ import quicksort_my_version, list_copy" time = measure_time(test_code, setup, timer_reps) print(f'My version: {time} milliseconds.') list_copy = random_list.copy() # my version sorts an array in place, so new assignment has to be done test_code = "quicksort_book_version(list_copy)" setup = "from __main__ import quicksort_book_version, list_copy" time = measure_time(test_code, setup, timer_reps) print(f'Book version: {time} milliseconds.')
import random from timeit import timeit from typing import List, Tuple from quicksort import quicksort_my_version, quicksort_book_version def create_random_list(numbers: int, range_: Tuple[int, int], duplicates: bool = True) -> List[int]: """ Generates list of random, unsorted numbers from given range. :param numbers: how many numbers do you want? :param range_: what is a range you want to generate numbers from; two numbers as the beginning and the end of range :param duplicates: do you want duplicates in the result? :return: list of numbers """ if duplicates: return random.choices(range(range_[0], range_[1] + 1), k=numbers) return random.sample(range(range_[0], range_[1] + 1), k=numbers) def measure_time(test_code: str, setup: str, n: int) -> str: """ Executes test code, measure execution time and prints it out. :param test_code: code of function to execute :param setup: import function and input list :param n: how many times function will be execute :return: aggregate time of executing function n times """ return '{:,.0f}'.format(timeit(test_code, setup, number=n) * 1000) if __name__ == '__main__': numbers = 10_000 range_ = (0, 100_000) duplicates = True timer_reps = 1_000 random_list = create_random_list(numbers=numbers, range_=range_, duplicates=duplicates) list_copy = random_list.copy() print(f'Random list:\n{random_list}') print('-' * 100) print(f'{numbers:,} numbers from range({range_[0]:,}; {range_[1]:,}) with{"out" * (not duplicates)} duplicates.') print('-' * 100) print('QUICKSORT') test_code = "quicksort_my_version(list_copy, 0, len(list_copy) - 1)" setup = "from __main__ import quicksort_my_version, list_copy" time = measure_time(test_code, setup, timer_reps) print(f'My version: {time} milliseconds.') list_copy = random_list.copy() # my version sorts an array in place, so new assignment has to be done test_code = "quicksort_book_version(list_copy)" setup = "from __main__ import quicksort_book_version, list_copy" time = measure_time(test_code, setup, timer_reps) print(f'Book version: {time} milliseconds.')
"""Command line functions for calling the root mfa command""" from __future__ import annotations import argparse import atexit import multiprocessing as mp import sys import time from datetime import datetime from typing import TYPE_CHECKING from montreal_forced_aligner.command_line.adapt import run_adapt_model from montreal_forced_aligner.command_line.align import run_align_corpus from montreal_forced_aligner.command_line.anchor import run_anchor from montreal_forced_aligner.command_line.classify_speakers import run_classify_speakers from montreal_forced_aligner.command_line.create_segments import run_create_segments from montreal_forced_aligner.command_line.g2p import run_g2p from montreal_forced_aligner.command_line.model import run_model from montreal_forced_aligner.command_line.train_acoustic_model import run_train_acoustic_model from montreal_forced_aligner.command_line.train_dictionary import run_train_dictionary from montreal_forced_aligner.command_line.train_g2p import run_train_g2p from montreal_forced_aligner.command_line.train_ivector_extractor import ( run_train_ivector_extractor, ) from montreal_forced_aligner.command_line.train_lm import run_train_lm from montreal_forced_aligner.command_line.transcribe import run_transcribe_corpus from montreal_forced_aligner.command_line.validate import run_validate_corpus from montreal_forced_aligner.config import ( load_command_history, load_global_config, update_command_history, update_global_config, ) from montreal_forced_aligner.exceptions import MFAError from montreal_forced_aligner.models import MODEL_TYPES from montreal_forced_aligner.utils import check_third_party if TYPE_CHECKING: from argparse import ArgumentParser BEGIN = time.time() BEGIN_DATE = datetime.now() __all__ = ["ExitHooks", "create_parser", "main"] class ExitHooks(object): """ Class for capturing exit information for MFA commands """ def __init__(self): self.exit_code = None self.exception = None def hook(self): """Hook for capturing information about exit code and exceptions""" self._orig_exit = sys.exit sys.exit = self.exit sys.excepthook = self.exc_handler def exit(self, code=0): """Actual exit for the program""" self.exit_code = code self._orig_exit(code) def exc_handler(self, exc_type, exc, *args): """Handle and save exceptions""" self.exception = exc self.exit_code = 1 def history_save_handler(self) -> None: """ Handler for saving history on exit. In addition to the command run, also saves exit code, whether an exception was encountered, when the command was executed, and how long it took to run """ from montreal_forced_aligner.utils import get_mfa_version history_data = { "command": " ".join(sys.argv), "execution_time": time.time() - BEGIN, "date": BEGIN_DATE, "version": get_mfa_version(), } if self.exit_code is not None: history_data["exit_code"] = self.exit_code history_data["exception"] = "" elif self.exception is not None: history_data["exit_code"] = 1 history_data["exception"] = str(self.exception) else: history_data["exception"] = "" history_data["exit_code"] = 0 update_command_history(history_data) if self.exception: raise self.exception def create_parser() -> ArgumentParser: """ Constructs the MFA argument parser Returns ------- :class:`~argparse.ArgumentParser` MFA argument parser """ GLOBAL_CONFIG = load_global_config() def add_global_options(subparser: argparse.ArgumentParser, textgrid_output: bool = False): """ Add a set of global options to a subparser Parameters ---------- subparser: :class:`~argparse.ArgumentParser` Subparser to augment textgrid_output: bool Flag for whether the subparser is used for a command that generates TextGrids """ subparser.add_argument( "-t", "--temp_directory", "--temporary_directory", dest="temporary_directory", type=str, default=GLOBAL_CONFIG["temporary_directory"], help=f"Temporary directory root to store MFA created files, default is {GLOBAL_CONFIG["temporary_directory"]}", ) subparser.add_argument( "--disable_mp", help=f"Disable any multiprocessing during alignment (not recommended), default is {not GLOBAL_CONFIG["use_mp"]}", action="store_true", default=not GLOBAL_CONFIG["use_mp"], ) subparser.add_argument( "-j", "--num_jobs", type=int, default=GLOBAL_CONFIG["num_jobs"], help=f"Number of data splits (and cores to use if multiprocessing is enabled), defaults " f"is {GLOBAL_CONFIG["num_jobs"]}", ) subparser.add_argument( "-v", "--verbose", help=f"Output debug messages, default is {GLOBAL_CONFIG["verbose"]}", action="store_true", default=GLOBAL_CONFIG["verbose"], ) subparser.add_argument( "--clean", help=f"Remove files from previous runs, default is {GLOBAL_CONFIG["clean"]}", action="store_true", default=GLOBAL_CONFIG["clean"], ) subparser.add_argument( "--overwrite", help=f"Overwrite output files when they exist, default is {GLOBAL_CONFIG["overwrite"]}", action="store_true", default=GLOBAL_CONFIG["overwrite"], ) subparser.add_argument( "--debug", help=f"Run extra steps for debugging issues, default is {GLOBAL_CONFIG["debug"]}", action="store_true", default=GLOBAL_CONFIG["debug"], ) if textgrid_output: subparser.add_argument( "--disable_textgrid_cleanup", help=f"Disable extra clean up steps on TextGrid output, default is {not GLOBAL_CONFIG["cleanup_textgrids"]}", action="store_true", default=not GLOBAL_CONFIG["cleanup_textgrids"], ) pretrained_acoustic = ", ".join(MODEL_TYPES["acoustic"].get_available_models()) if not pretrained_acoustic: pretrained_acoustic = ( "you can use ``mfa model download acoustic`` to get pretrained MFA models" ) pretrained_ivector = ", ".join(MODEL_TYPES["ivector"].get_available_models()) if not pretrained_ivector: pretrained_ivector = ( "you can use ``mfa model download ivector`` to get pretrained MFA models" ) pretrained_g2p = ", ".join(MODEL_TYPES["g2p"].get_available_models()) if not pretrained_g2p: pretrained_g2p = "you can use ``mfa model download g2p`` to get pretrained MFA models" pretrained_lm = ", ".join(MODEL_TYPES["language_model"].get_available_models()) if not pretrained_lm: pretrained_lm = ( "you can use ``mfa model download language_model`` to get pretrained MFA models" ) pretrained_dictionary = ", ".join(MODEL_TYPES["dictionary"].get_available_models()) if not pretrained_dictionary: pretrained_dictionary = ( "you can use ``mfa model download dictionary`` to get MFA dictionaries" ) dictionary_path_help = f"Full path to pronunciation dictionary, or saved dictionary name ({pretrained_dictionary})" acoustic_model_path_help = ( f"Full path to pre-trained acoustic model, or saved model name ({pretrained_acoustic})" ) language_model_path_help = ( f"Full path to pre-trained language model, or saved model name ({pretrained_lm})" ) ivector_model_path_help = f"Full path to pre-trained ivector extractor model, or saved model name ({pretrained_ivector})" g2p_model_path_help = ( f"Full path to pre-trained G2P model, or saved model name ({pretrained_g2p}). " "If not specified, then orthographic transcription is split into pronunciations." ) parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest="subcommand") subparsers.required = True _ = subparsers.add_parser("version") align_parser = subparsers.add_parser( "align", help="Align a corpus with a pretrained acoustic model" ) align_parser.add_argument("corpus_directory", help="Full path to the directory to align") align_parser.add_argument( "dictionary_path", help=dictionary_path_help, type=str, ) align_parser.add_argument( "acoustic_model_path", type=str, help=acoustic_model_path_help, ) align_parser.add_argument( "output_directory", type=str, help="Full path to output directory, will be created if it doesn't exist", ) align_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for alignment" ) align_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) align_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) align_parser.add_argument( "--reference_directory", type=str, default="", help="Directory containing gold standard alignments to evaluate", ) align_parser.add_argument( "--custom_mapping_path", type=str, default="", help="YAML file for mapping phones across phone sets in evaluations", ) add_global_options(align_parser, textgrid_output=True) adapt_parser = subparsers.add_parser("adapt", help="Adapt an acoustic model to a new corpus") adapt_parser.add_argument("corpus_directory", help="Full path to the directory to align") adapt_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help) adapt_parser.add_argument( "acoustic_model_path", type=str, help=acoustic_model_path_help, ) adapt_parser.add_argument( "output_paths", type=str, nargs="+", help="Path to save the new acoustic model, path to export aligned TextGrids, or both", ) adapt_parser.add_argument( "-o", "--output_model_path", type=str, default="", help="Full path to save adapted acoustic model", ) adapt_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for alignment" ) adapt_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) adapt_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) add_global_options(adapt_parser, textgrid_output=True) train_parser = subparsers.add_parser( "train", help="Train a new acoustic model on a corpus and optionally export alignments" ) train_parser.add_argument( "corpus_directory", type=str, help="Full path to the source directory to align" ) train_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help, default="") train_parser.add_argument( "output_paths", type=str, nargs="+", help="Path to save the new acoustic model, path to export aligned TextGrids, or both", ) train_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for training and alignment", ) train_parser.add_argument( "-o", "--output_model_path", type=str, default="", help="Full path to save resulting acoustic model", ) train_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of filenames to use for determining speaker, " "default is to use directory names", ) train_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) train_parser.add_argument( "--phone_set", dest="phone_set_type", type=str, help="Enable extra decision tree modeling based on the phone set", default="UNKNOWN", choices=["AUTO", "IPA", "ARPA", "PINYIN"], ) add_global_options(train_parser, textgrid_output=True) validate_parser = subparsers.add_parser("validate", help="Validate a corpus for use in MFA") validate_parser.add_argument( "corpus_directory", type=str, help="Full path to the source directory to align" ) validate_parser.add_argument( "dictionary_path", type=str, help=dictionary_path_help, default="" ) validate_parser.add_argument( "acoustic_model_path", type=str, nargs="?", default="", help=acoustic_model_path_help, ) validate_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) validate_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for training and alignment", ) validate_parser.add_argument( "--test_transcriptions", help="Test accuracy of transcriptions", action="store_true" ) validate_parser.add_argument( "--ignore_acoustics", "--skip_acoustics", dest="ignore_acoustics", help="Skip acoustic feature generation and associated validation", action="store_true", ) validate_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) validate_parser.add_argument( "--phone_set", dest="phone_set_type", type=str, help="Enable extra decision tree modeling based on the phone set", default="UNKNOWN", choices=["AUTO", "IPA", "ARPA", "PINYIN"], ) add_global_options(validate_parser) g2p_parser = subparsers.add_parser( "g2p", help="Generate a pronunciation dictionary using a G2P model" ) g2p_parser.add_argument( "g2p_model_path", help=g2p_model_path_help, type=str, nargs="?", ) g2p_parser.add_argument( "input_path", type=str, help="Corpus to base word list on or a text file of words to generate pronunciations", ) g2p_parser.add_argument("output_path", type=str, help="Path to save output dictionary") g2p_parser.add_argument( "--include_bracketed", help="Included words enclosed by brackets, job_name.e. [...], (...), <...>", action="store_true", ) g2p_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for G2P" ) add_global_options(g2p_parser) train_g2p_parser = subparsers.add_parser( "train_g2p", help="Train a G2P model from a pronunciation dictionary" ) train_g2p_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help) train_g2p_parser.add_argument( "output_model_path", type=str, help="Desired location of generated model" ) train_g2p_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for G2P" ) train_g2p_parser.add_argument( "--evaluate", "--validate", dest="evaluation_mode", action="store_true", help="Perform an analysis of accuracy training on " "most of the data and validating on an unseen subset", ) add_global_options(train_g2p_parser) help_message = "Inspect, download, and save pretrained MFA models" model_parser = subparsers.add_parser( "model", aliases=["models"], description=help_message, help=help_message ) model_subparsers = model_parser.add_subparsers(dest="action") model_subparsers.required = True help_message = "Download a pretrained model from the MFA repository" model_download_parser = model_subparsers.add_parser( "download", description=help_message, help=help_message ) model_download_parser.add_argument( "model_type", choices=sorted(MODEL_TYPES), help="Type of model to download" ) model_download_parser.add_argument( "name", help="Name of language code to download, if not specified, " "will list all available languages", type=str, nargs="?", ) help_message = "List of saved models" model_list_parser = model_subparsers.add_parser( "list", description=help_message, help=help_message ) model_list_parser.add_argument( "model_type", choices=sorted(MODEL_TYPES), type=str, nargs="?", help="Type of model to list", ) help_message = "Inspect a model and output its metadata" model_inspect_parser = model_subparsers.add_parser( "inspect", description=help_message, help=help_message ) model_inspect_parser.add_argument( "model_type", choices=sorted(MODEL_TYPES), type=str, nargs="?", help="Type of model to download", ) model_inspect_parser.add_argument( "name", type=str, help="Name of pretrained model or path to MFA model to inspect" ) help_message = "Save a MFA model to the pretrained directory for name-based referencing" model_save_parser = model_subparsers.add_parser( "save", description=help_message, help=help_message ) model_save_parser.add_argument( "model_type", type=str, choices=sorted(MODEL_TYPES), help="Type of MFA model" ) model_save_parser.add_argument( "path", help="Path to MFA model to save for invoking with just its name" ) model_save_parser.add_argument( "--name", help="Name to use as reference (defaults to the name of the zip file", type=str, default="", ) model_save_parser.add_argument( "--overwrite", help="Flag to overwrite existing pretrained models with the same name (and model type)", action="store_true", ) train_lm_parser = subparsers.add_parser( "train_lm", help="Train a language model from a corpus" ) train_lm_parser.add_argument( "source_path", type=str, help="Full path to the source directory to train from, alternatively " "an ARPA format language model to convert for MFA use", ) train_lm_parser.add_argument( "output_model_path", type=str, help="Full path to save resulting language model" ) train_lm_parser.add_argument( "-m", "--model_path", type=str, help="Full path to existing language model to merge probabilities", ) train_lm_parser.add_argument( "-w", "--model_weight", type=float, default=1.0, help="Weight factor for supplemental language model, defaults to 1.0", ) train_lm_parser.add_argument( "--dictionary_path", type=str, help=dictionary_path_help, default="" ) train_lm_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for training and alignment", ) add_global_options(train_lm_parser) train_dictionary_parser = subparsers.add_parser( "train_dictionary", help="Calculate pronunciation probabilities for a dictionary based on alignment results in a corpus", ) train_dictionary_parser.add_argument( "corpus_directory", help="Full path to the directory to align" ) train_dictionary_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help) train_dictionary_parser.add_argument( "acoustic_model_path", type=str, help=acoustic_model_path_help, ) train_dictionary_parser.add_argument( "output_directory", type=str, help="Full path to output directory, will be created if it doesn't exist", ) train_dictionary_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for alignment" ) train_dictionary_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) add_global_options(train_dictionary_parser) train_ivector_parser = subparsers.add_parser( "train_ivector", help="Train an ivector extractor from a corpus and pretrained acoustic model", ) train_ivector_parser.add_argument( "corpus_directory", type=str, help="Full path to the source directory to train the ivector extractor", ) train_ivector_parser.add_argument( "output_model_path", type=str, help="Full path to save resulting ivector extractor", ) train_ivector_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of filenames to use for determining speaker, " "default is to use directory names", ) train_ivector_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for training" ) add_global_options(train_ivector_parser) classify_speakers_parser = subparsers.add_parser( "classify_speakers", help="Use an ivector extractor to cluster utterances into speakers" ) classify_speakers_parser.add_argument( "corpus_directory", type=str, help="Full path to the source directory to run speaker classification", ) classify_speakers_parser.add_argument( "ivector_extractor_path", type=str, default="", help=ivector_model_path_help ) classify_speakers_parser.add_argument( "output_directory", type=str, help="Full path to output directory, will be created if it doesn't exist", ) classify_speakers_parser.add_argument( "-s", "--num_speakers", type=int, default=0, help="Number of speakers if known" ) classify_speakers_parser.add_argument( "--cluster", help="Using clustering instead of classification", action="store_true" ) classify_speakers_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for ivector extraction", ) add_global_options(classify_speakers_parser) create_segments_parser = subparsers.add_parser( "create_segments", help="Create segments based on voice activity dectection (VAD)" ) create_segments_parser.add_argument( "corpus_directory", help="Full path to the source directory to run VAD segmentation" ) create_segments_parser.add_argument( "output_directory", type=str, help="Full path to output directory, will be created if it doesn't exist", ) create_segments_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for segmentation" ) add_global_options(create_segments_parser) transcribe_parser = subparsers.add_parser( "transcribe", help="Transcribe utterances using an acoustic model, language model, and pronunciation dictionary", ) transcribe_parser.add_argument( "corpus_directory", type=str, help="Full path to the directory to transcribe" ) transcribe_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help) transcribe_parser.add_argument( "acoustic_model_path", type=str, help=acoustic_model_path_help, ) transcribe_parser.add_argument( "language_model_path", type=str, help=language_model_path_help, ) transcribe_parser.add_argument( "output_directory", type=str, help="Full path to output directory, will be created if it doesn't exist", ) transcribe_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for transcription" ) transcribe_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) transcribe_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) transcribe_parser.add_argument( "-e", "--evaluate", dest="evaluation_mode", help="Evaluate the transcription against golden texts", action="store_true", ) add_global_options(transcribe_parser) config_parser = subparsers.add_parser( "configure", help="The configure command is used to set global defaults for MFA so " "you don't have to set them every time you call an MFA command.", ) config_parser.add_argument( "-t", "--temp_directory", "--temporary_directory", dest="temporary_directory", type=str, default="", help=f"Set the default temporary directory, default is {GLOBAL_CONFIG["temporary_directory"]}", ) config_parser.add_argument( "-j", "--num_jobs", type=int, help=f"Set the number of processes to use by default, defaults to {GLOBAL_CONFIG["num_jobs"]}", ) config_parser.add_argument( "--always_clean", help="Always remove files from previous runs by default", action="store_true", ) config_parser.add_argument( "--never_clean", help="Don't remove files from previous runs by default", action="store_true", ) config_parser.add_argument( "--always_verbose", help="Default to verbose output", action="store_true" ) config_parser.add_argument( "--never_verbose", help="Default to non-verbose output", action="store_true" ) config_parser.add_argument( "--always_debug", help="Default to running debugging steps", action="store_true" ) config_parser.add_argument( "--never_debug", help="Default to not running debugging steps", action="store_true" ) config_parser.add_argument( "--always_overwrite", help="Always overwrite output files", action="store_true" ) config_parser.add_argument( "--never_overwrite", help="Never overwrite output files (if file already exists, " "the output will be saved in the temp directory)", action="store_true", ) config_parser.add_argument( "--disable_mp", help="Disable all multiprocessing (not recommended as it will usually " "increase processing times)", action="store_true", ) config_parser.add_argument( "--enable_mp", help="Enable multiprocessing (recommended and enabled by default)", action="store_true", ) config_parser.add_argument( "--disable_textgrid_cleanup", help="Disable postprocessing of TextGrids that cleans up " "silences and recombines compound words and clitics", action="store_true", ) config_parser.add_argument( "--enable_textgrid_cleanup", help="Enable postprocessing of TextGrids that cleans up " "silences and recombines compound words and clitics", action="store_true", ) config_parser.add_argument( "--disable_detect_phone_set", help="Disable auto-detecting phone sets from the dictionary during training", action="store_true", ) config_parser.add_argument( "--enable_detect_phone_set", help="Enable auto-detecting phone sets from the dictionary during training", action="store_true", ) config_parser.add_argument( "--disable_terminal_colors", help="Turn off colored text in output", action="store_true" ) config_parser.add_argument( "--enable_terminal_colors", help="Turn on colored text in output", action="store_true" ) config_parser.add_argument( "--terminal_width", help=f"Set width of terminal output, " f"currently set to {GLOBAL_CONFIG["terminal_width"]}", default=GLOBAL_CONFIG["terminal_width"], type=int, ) config_parser.add_argument( "--blas_num_threads", help=f"Number of threads to use for BLAS libraries, 1 is recommended " f"due to how much MFA relies on multiprocessing. " f"Currently set to {GLOBAL_CONFIG["blas_num_threads"]}", default=GLOBAL_CONFIG["blas_num_threads"], type=int, ) history_parser = subparsers.add_parser("history", help="Show previously run mfa commands") _ = subparsers.add_parser("thirdparty", help="DEPRECATED: Please install Kaldi via conda.") _ = subparsers.add_parser( "download", help="DEPRECATED: Please use mfa model download instead." ) history_parser.add_argument( "depth", type=int, help="Number of commands to list", nargs="?", default=10 ) history_parser.add_argument( "-v", "--verbose", help=f"Output debug messages, default is {GLOBAL_CONFIG["verbose"]}", action="store_true", ) _ = subparsers.add_parser( "anchor", aliases=["annotator"], help="Launch Anchor Annotator (if installed)" ) return parser parser = create_parser() def print_history(args): depth = args.depth history = load_command_history()[-depth:] if args.verbose: print("command\tDate\tExecution time\tVersion\tExit code\tException") for h in history: execution_time = time.strftime("%H:%M:%S", time.gmtime(h["execution_time"])) d = h["date"].isoformat() print( f"{h["command"]}\t{d}\t{execution_time}\t{h["version"]}\t{h["exit_code"]}\t{h["exception"]}" ) pass else: for h in history: print(h["command"]) def main() -> None: """ Main function for the MFA command line interface """ check_third_party() hooks = ExitHooks() hooks.hook() atexit.register(hooks.history_save_handler) from colorama import init init() parser = create_parser() mp.freeze_support() args, unknown = parser.parse_known_args() for short in ["-c", "-d"]: if short in unknown: print( f"Due to the number of options that `{short}` could refer to, it is not accepted. " "Please specify the full argument", file=sys.stderr, ) sys.exit(1) try: if args.subcommand in ["g2p", "train_g2p"]: try: import pynini # noqa except ImportError: print( "There was an issue importing Pynini, please ensure that it is installed. If you are on Windows, " "please use the Windows Subsystem for Linux to use g2p functionality.", file=sys.stderr, ) sys.exit(1) if args.subcommand == "align": run_align_corpus(args, unknown) elif args.subcommand == "adapt": run_adapt_model(args, unknown) elif args.subcommand == "train": run_train_acoustic_model(args, unknown) elif args.subcommand == "g2p": run_g2p(args, unknown) elif args.subcommand == "train_g2p": run_train_g2p(args, unknown) elif args.subcommand == "validate": run_validate_corpus(args, unknown) elif args.subcommand in ["model", "models"]: run_model(args) elif args.subcommand == "train_lm": run_train_lm(args, unknown) elif args.subcommand == "train_dictionary": run_train_dictionary(args, unknown) elif args.subcommand == "train_ivector": run_train_ivector_extractor(args, unknown) elif args.subcommand == "classify_speakers": # pragma: no cover run_classify_speakers(args, unknown) elif args.subcommand in ["annotator", "anchor"]: run_anchor() elif args.subcommand == "transcribe": run_transcribe_corpus(args, unknown) elif args.subcommand == "create_segments": run_create_segments(args, unknown) elif args.subcommand == "configure": update_global_config(args) global GLOBAL_CONFIG GLOBAL_CONFIG = load_global_config() elif args.subcommand == "history": print_history(args) elif args.subcommand == "version": from montreal_forced_aligner.utils import get_mfa_version print(get_mfa_version()) elif args.subcommand == "thirdparty": # Deprecated command raise DeprecationWarning( "Necessary thirdparty executables are now installed via conda. Please refer to the installation docs for the updated commands." ) elif args.subcommand == "download": # Deprecated command raise DeprecationWarning( "Downloading models is now run through the `mfa model download` command, please use that instead." ) except MFAError as e: if getattr(args, "debug", False): raise print(e, file=sys.stderr) sys.exit(1) if __name__ == "__main__": import warnings warnings.warn( "Use 'python -m montreal_forced_aligner', not 'python -m montreal_forced_aligner.command_line.mfa'", DeprecationWarning, ) main()
"""Command line functions for calling the root mfa command""" from __future__ import annotations import argparse import atexit import multiprocessing as mp import sys import time from datetime import datetime from typing import TYPE_CHECKING from montreal_forced_aligner.command_line.adapt import run_adapt_model from montreal_forced_aligner.command_line.align import run_align_corpus from montreal_forced_aligner.command_line.anchor import run_anchor from montreal_forced_aligner.command_line.classify_speakers import run_classify_speakers from montreal_forced_aligner.command_line.create_segments import run_create_segments from montreal_forced_aligner.command_line.g2p import run_g2p from montreal_forced_aligner.command_line.model import run_model from montreal_forced_aligner.command_line.train_acoustic_model import run_train_acoustic_model from montreal_forced_aligner.command_line.train_dictionary import run_train_dictionary from montreal_forced_aligner.command_line.train_g2p import run_train_g2p from montreal_forced_aligner.command_line.train_ivector_extractor import ( run_train_ivector_extractor, ) from montreal_forced_aligner.command_line.train_lm import run_train_lm from montreal_forced_aligner.command_line.transcribe import run_transcribe_corpus from montreal_forced_aligner.command_line.validate import run_validate_corpus from montreal_forced_aligner.config import ( load_command_history, load_global_config, update_command_history, update_global_config, ) from montreal_forced_aligner.exceptions import MFAError from montreal_forced_aligner.models import MODEL_TYPES from montreal_forced_aligner.utils import check_third_party if TYPE_CHECKING: from argparse import ArgumentParser BEGIN = time.time() BEGIN_DATE = datetime.now() __all__ = ["ExitHooks", "create_parser", "main"] class ExitHooks(object): """ Class for capturing exit information for MFA commands """ def __init__(self): self.exit_code = None self.exception = None def hook(self): """Hook for capturing information about exit code and exceptions""" self._orig_exit = sys.exit sys.exit = self.exit sys.excepthook = self.exc_handler def exit(self, code=0): """Actual exit for the program""" self.exit_code = code self._orig_exit(code) def exc_handler(self, exc_type, exc, *args): """Handle and save exceptions""" self.exception = exc self.exit_code = 1 def history_save_handler(self) -> None: """ Handler for saving history on exit. In addition to the command run, also saves exit code, whether an exception was encountered, when the command was executed, and how long it took to run """ from montreal_forced_aligner.utils import get_mfa_version history_data = { "command": " ".join(sys.argv), "execution_time": time.time() - BEGIN, "date": BEGIN_DATE, "version": get_mfa_version(), } if self.exit_code is not None: history_data["exit_code"] = self.exit_code history_data["exception"] = "" elif self.exception is not None: history_data["exit_code"] = 1 history_data["exception"] = str(self.exception) else: history_data["exception"] = "" history_data["exit_code"] = 0 update_command_history(history_data) if self.exception: raise self.exception def create_parser() -> ArgumentParser: """ Constructs the MFA argument parser Returns ------- :class:`~argparse.ArgumentParser` MFA argument parser """ GLOBAL_CONFIG = load_global_config() def add_global_options(subparser: argparse.ArgumentParser, textgrid_output: bool = False): """ Add a set of global options to a subparser Parameters ---------- subparser: :class:`~argparse.ArgumentParser` Subparser to augment textgrid_output: bool Flag for whether the subparser is used for a command that generates TextGrids """ subparser.add_argument( "-t", "--temp_directory", "--temporary_directory", dest="temporary_directory", type=str, default=GLOBAL_CONFIG["temporary_directory"], help=f"Temporary directory root to store MFA created files, default is {GLOBAL_CONFIG['temporary_directory']}", ) subparser.add_argument( "--disable_mp", help=f"Disable any multiprocessing during alignment (not recommended), default is {not GLOBAL_CONFIG['use_mp']}", action="store_true", default=not GLOBAL_CONFIG["use_mp"], ) subparser.add_argument( "-j", "--num_jobs", type=int, default=GLOBAL_CONFIG["num_jobs"], help=f"Number of data splits (and cores to use if multiprocessing is enabled), defaults " f"is {GLOBAL_CONFIG['num_jobs']}", ) subparser.add_argument( "-v", "--verbose", help=f"Output debug messages, default is {GLOBAL_CONFIG['verbose']}", action="store_true", default=GLOBAL_CONFIG["verbose"], ) subparser.add_argument( "--clean", help=f"Remove files from previous runs, default is {GLOBAL_CONFIG['clean']}", action="store_true", default=GLOBAL_CONFIG["clean"], ) subparser.add_argument( "--overwrite", help=f"Overwrite output files when they exist, default is {GLOBAL_CONFIG['overwrite']}", action="store_true", default=GLOBAL_CONFIG["overwrite"], ) subparser.add_argument( "--debug", help=f"Run extra steps for debugging issues, default is {GLOBAL_CONFIG['debug']}", action="store_true", default=GLOBAL_CONFIG["debug"], ) if textgrid_output: subparser.add_argument( "--disable_textgrid_cleanup", help=f"Disable extra clean up steps on TextGrid output, default is {not GLOBAL_CONFIG['cleanup_textgrids']}", action="store_true", default=not GLOBAL_CONFIG["cleanup_textgrids"], ) pretrained_acoustic = ", ".join(MODEL_TYPES["acoustic"].get_available_models()) if not pretrained_acoustic: pretrained_acoustic = ( "you can use ``mfa model download acoustic`` to get pretrained MFA models" ) pretrained_ivector = ", ".join(MODEL_TYPES["ivector"].get_available_models()) if not pretrained_ivector: pretrained_ivector = ( "you can use ``mfa model download ivector`` to get pretrained MFA models" ) pretrained_g2p = ", ".join(MODEL_TYPES["g2p"].get_available_models()) if not pretrained_g2p: pretrained_g2p = "you can use ``mfa model download g2p`` to get pretrained MFA models" pretrained_lm = ", ".join(MODEL_TYPES["language_model"].get_available_models()) if not pretrained_lm: pretrained_lm = ( "you can use ``mfa model download language_model`` to get pretrained MFA models" ) pretrained_dictionary = ", ".join(MODEL_TYPES["dictionary"].get_available_models()) if not pretrained_dictionary: pretrained_dictionary = ( "you can use ``mfa model download dictionary`` to get MFA dictionaries" ) dictionary_path_help = f"Full path to pronunciation dictionary, or saved dictionary name ({pretrained_dictionary})" acoustic_model_path_help = ( f"Full path to pre-trained acoustic model, or saved model name ({pretrained_acoustic})" ) language_model_path_help = ( f"Full path to pre-trained language model, or saved model name ({pretrained_lm})" ) ivector_model_path_help = f"Full path to pre-trained ivector extractor model, or saved model name ({pretrained_ivector})" g2p_model_path_help = ( f"Full path to pre-trained G2P model, or saved model name ({pretrained_g2p}). " "If not specified, then orthographic transcription is split into pronunciations." ) parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest="subcommand") subparsers.required = True _ = subparsers.add_parser("version") align_parser = subparsers.add_parser( "align", help="Align a corpus with a pretrained acoustic model" ) align_parser.add_argument("corpus_directory", help="Full path to the directory to align") align_parser.add_argument( "dictionary_path", help=dictionary_path_help, type=str, ) align_parser.add_argument( "acoustic_model_path", type=str, help=acoustic_model_path_help, ) align_parser.add_argument( "output_directory", type=str, help="Full path to output directory, will be created if it doesn't exist", ) align_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for alignment" ) align_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) align_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) align_parser.add_argument( "--reference_directory", type=str, default="", help="Directory containing gold standard alignments to evaluate", ) align_parser.add_argument( "--custom_mapping_path", type=str, default="", help="YAML file for mapping phones across phone sets in evaluations", ) add_global_options(align_parser, textgrid_output=True) adapt_parser = subparsers.add_parser("adapt", help="Adapt an acoustic model to a new corpus") adapt_parser.add_argument("corpus_directory", help="Full path to the directory to align") adapt_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help) adapt_parser.add_argument( "acoustic_model_path", type=str, help=acoustic_model_path_help, ) adapt_parser.add_argument( "output_paths", type=str, nargs="+", help="Path to save the new acoustic model, path to export aligned TextGrids, or both", ) adapt_parser.add_argument( "-o", "--output_model_path", type=str, default="", help="Full path to save adapted acoustic model", ) adapt_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for alignment" ) adapt_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) adapt_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) add_global_options(adapt_parser, textgrid_output=True) train_parser = subparsers.add_parser( "train", help="Train a new acoustic model on a corpus and optionally export alignments" ) train_parser.add_argument( "corpus_directory", type=str, help="Full path to the source directory to align" ) train_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help, default="") train_parser.add_argument( "output_paths", type=str, nargs="+", help="Path to save the new acoustic model, path to export aligned TextGrids, or both", ) train_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for training and alignment", ) train_parser.add_argument( "-o", "--output_model_path", type=str, default="", help="Full path to save resulting acoustic model", ) train_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of filenames to use for determining speaker, " "default is to use directory names", ) train_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) train_parser.add_argument( "--phone_set", dest="phone_set_type", type=str, help="Enable extra decision tree modeling based on the phone set", default="UNKNOWN", choices=["AUTO", "IPA", "ARPA", "PINYIN"], ) add_global_options(train_parser, textgrid_output=True) validate_parser = subparsers.add_parser("validate", help="Validate a corpus for use in MFA") validate_parser.add_argument( "corpus_directory", type=str, help="Full path to the source directory to align" ) validate_parser.add_argument( "dictionary_path", type=str, help=dictionary_path_help, default="" ) validate_parser.add_argument( "acoustic_model_path", type=str, nargs="?", default="", help=acoustic_model_path_help, ) validate_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) validate_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for training and alignment", ) validate_parser.add_argument( "--test_transcriptions", help="Test accuracy of transcriptions", action="store_true" ) validate_parser.add_argument( "--ignore_acoustics", "--skip_acoustics", dest="ignore_acoustics", help="Skip acoustic feature generation and associated validation", action="store_true", ) validate_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) validate_parser.add_argument( "--phone_set", dest="phone_set_type", type=str, help="Enable extra decision tree modeling based on the phone set", default="UNKNOWN", choices=["AUTO", "IPA", "ARPA", "PINYIN"], ) add_global_options(validate_parser) g2p_parser = subparsers.add_parser( "g2p", help="Generate a pronunciation dictionary using a G2P model" ) g2p_parser.add_argument( "g2p_model_path", help=g2p_model_path_help, type=str, nargs="?", ) g2p_parser.add_argument( "input_path", type=str, help="Corpus to base word list on or a text file of words to generate pronunciations", ) g2p_parser.add_argument("output_path", type=str, help="Path to save output dictionary") g2p_parser.add_argument( "--include_bracketed", help="Included words enclosed by brackets, job_name.e. [...], (...), <...>", action="store_true", ) g2p_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for G2P" ) add_global_options(g2p_parser) train_g2p_parser = subparsers.add_parser( "train_g2p", help="Train a G2P model from a pronunciation dictionary" ) train_g2p_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help) train_g2p_parser.add_argument( "output_model_path", type=str, help="Desired location of generated model" ) train_g2p_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for G2P" ) train_g2p_parser.add_argument( "--evaluate", "--validate", dest="evaluation_mode", action="store_true", help="Perform an analysis of accuracy training on " "most of the data and validating on an unseen subset", ) add_global_options(train_g2p_parser) help_message = "Inspect, download, and save pretrained MFA models" model_parser = subparsers.add_parser( "model", aliases=["models"], description=help_message, help=help_message ) model_subparsers = model_parser.add_subparsers(dest="action") model_subparsers.required = True help_message = "Download a pretrained model from the MFA repository" model_download_parser = model_subparsers.add_parser( "download", description=help_message, help=help_message ) model_download_parser.add_argument( "model_type", choices=sorted(MODEL_TYPES), help="Type of model to download" ) model_download_parser.add_argument( "name", help="Name of language code to download, if not specified, " "will list all available languages", type=str, nargs="?", ) help_message = "List of saved models" model_list_parser = model_subparsers.add_parser( "list", description=help_message, help=help_message ) model_list_parser.add_argument( "model_type", choices=sorted(MODEL_TYPES), type=str, nargs="?", help="Type of model to list", ) help_message = "Inspect a model and output its metadata" model_inspect_parser = model_subparsers.add_parser( "inspect", description=help_message, help=help_message ) model_inspect_parser.add_argument( "model_type", choices=sorted(MODEL_TYPES), type=str, nargs="?", help="Type of model to download", ) model_inspect_parser.add_argument( "name", type=str, help="Name of pretrained model or path to MFA model to inspect" ) help_message = "Save a MFA model to the pretrained directory for name-based referencing" model_save_parser = model_subparsers.add_parser( "save", description=help_message, help=help_message ) model_save_parser.add_argument( "model_type", type=str, choices=sorted(MODEL_TYPES), help="Type of MFA model" ) model_save_parser.add_argument( "path", help="Path to MFA model to save for invoking with just its name" ) model_save_parser.add_argument( "--name", help="Name to use as reference (defaults to the name of the zip file", type=str, default="", ) model_save_parser.add_argument( "--overwrite", help="Flag to overwrite existing pretrained models with the same name (and model type)", action="store_true", ) train_lm_parser = subparsers.add_parser( "train_lm", help="Train a language model from a corpus" ) train_lm_parser.add_argument( "source_path", type=str, help="Full path to the source directory to train from, alternatively " "an ARPA format language model to convert for MFA use", ) train_lm_parser.add_argument( "output_model_path", type=str, help="Full path to save resulting language model" ) train_lm_parser.add_argument( "-m", "--model_path", type=str, help="Full path to existing language model to merge probabilities", ) train_lm_parser.add_argument( "-w", "--model_weight", type=float, default=1.0, help="Weight factor for supplemental language model, defaults to 1.0", ) train_lm_parser.add_argument( "--dictionary_path", type=str, help=dictionary_path_help, default="" ) train_lm_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for training and alignment", ) add_global_options(train_lm_parser) train_dictionary_parser = subparsers.add_parser( "train_dictionary", help="Calculate pronunciation probabilities for a dictionary based on alignment results in a corpus", ) train_dictionary_parser.add_argument( "corpus_directory", help="Full path to the directory to align" ) train_dictionary_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help) train_dictionary_parser.add_argument( "acoustic_model_path", type=str, help=acoustic_model_path_help, ) train_dictionary_parser.add_argument( "output_directory", type=str, help="Full path to output directory, will be created if it doesn't exist", ) train_dictionary_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for alignment" ) train_dictionary_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) add_global_options(train_dictionary_parser) train_ivector_parser = subparsers.add_parser( "train_ivector", help="Train an ivector extractor from a corpus and pretrained acoustic model", ) train_ivector_parser.add_argument( "corpus_directory", type=str, help="Full path to the source directory to train the ivector extractor", ) train_ivector_parser.add_argument( "output_model_path", type=str, help="Full path to save resulting ivector extractor", ) train_ivector_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of filenames to use for determining speaker, " "default is to use directory names", ) train_ivector_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for training" ) add_global_options(train_ivector_parser) classify_speakers_parser = subparsers.add_parser( "classify_speakers", help="Use an ivector extractor to cluster utterances into speakers" ) classify_speakers_parser.add_argument( "corpus_directory", type=str, help="Full path to the source directory to run speaker classification", ) classify_speakers_parser.add_argument( "ivector_extractor_path", type=str, default="", help=ivector_model_path_help ) classify_speakers_parser.add_argument( "output_directory", type=str, help="Full path to output directory, will be created if it doesn't exist", ) classify_speakers_parser.add_argument( "-s", "--num_speakers", type=int, default=0, help="Number of speakers if known" ) classify_speakers_parser.add_argument( "--cluster", help="Using clustering instead of classification", action="store_true" ) classify_speakers_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for ivector extraction", ) add_global_options(classify_speakers_parser) create_segments_parser = subparsers.add_parser( "create_segments", help="Create segments based on voice activity dectection (VAD)" ) create_segments_parser.add_argument( "corpus_directory", help="Full path to the source directory to run VAD segmentation" ) create_segments_parser.add_argument( "output_directory", type=str, help="Full path to output directory, will be created if it doesn't exist", ) create_segments_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for segmentation" ) add_global_options(create_segments_parser) transcribe_parser = subparsers.add_parser( "transcribe", help="Transcribe utterances using an acoustic model, language model, and pronunciation dictionary", ) transcribe_parser.add_argument( "corpus_directory", type=str, help="Full path to the directory to transcribe" ) transcribe_parser.add_argument("dictionary_path", type=str, help=dictionary_path_help) transcribe_parser.add_argument( "acoustic_model_path", type=str, help=acoustic_model_path_help, ) transcribe_parser.add_argument( "language_model_path", type=str, help=language_model_path_help, ) transcribe_parser.add_argument( "output_directory", type=str, help="Full path to output directory, will be created if it doesn't exist", ) transcribe_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for transcription" ) transcribe_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) transcribe_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) transcribe_parser.add_argument( "-e", "--evaluate", dest="evaluation_mode", help="Evaluate the transcription against golden texts", action="store_true", ) add_global_options(transcribe_parser) config_parser = subparsers.add_parser( "configure", help="The configure command is used to set global defaults for MFA so " "you don't have to set them every time you call an MFA command.", ) config_parser.add_argument( "-t", "--temp_directory", "--temporary_directory", dest="temporary_directory", type=str, default="", help=f"Set the default temporary directory, default is {GLOBAL_CONFIG['temporary_directory']}", ) config_parser.add_argument( "-j", "--num_jobs", type=int, help=f"Set the number of processes to use by default, defaults to {GLOBAL_CONFIG['num_jobs']}", ) config_parser.add_argument( "--always_clean", help="Always remove files from previous runs by default", action="store_true", ) config_parser.add_argument( "--never_clean", help="Don't remove files from previous runs by default", action="store_true", ) config_parser.add_argument( "--always_verbose", help="Default to verbose output", action="store_true" ) config_parser.add_argument( "--never_verbose", help="Default to non-verbose output", action="store_true" ) config_parser.add_argument( "--always_debug", help="Default to running debugging steps", action="store_true" ) config_parser.add_argument( "--never_debug", help="Default to not running debugging steps", action="store_true" ) config_parser.add_argument( "--always_overwrite", help="Always overwrite output files", action="store_true" ) config_parser.add_argument( "--never_overwrite", help="Never overwrite output files (if file already exists, " "the output will be saved in the temp directory)", action="store_true", ) config_parser.add_argument( "--disable_mp", help="Disable all multiprocessing (not recommended as it will usually " "increase processing times)", action="store_true", ) config_parser.add_argument( "--enable_mp", help="Enable multiprocessing (recommended and enabled by default)", action="store_true", ) config_parser.add_argument( "--disable_textgrid_cleanup", help="Disable postprocessing of TextGrids that cleans up " "silences and recombines compound words and clitics", action="store_true", ) config_parser.add_argument( "--enable_textgrid_cleanup", help="Enable postprocessing of TextGrids that cleans up " "silences and recombines compound words and clitics", action="store_true", ) config_parser.add_argument( "--disable_detect_phone_set", help="Disable auto-detecting phone sets from the dictionary during training", action="store_true", ) config_parser.add_argument( "--enable_detect_phone_set", help="Enable auto-detecting phone sets from the dictionary during training", action="store_true", ) config_parser.add_argument( "--disable_terminal_colors", help="Turn off colored text in output", action="store_true" ) config_parser.add_argument( "--enable_terminal_colors", help="Turn on colored text in output", action="store_true" ) config_parser.add_argument( "--terminal_width", help=f"Set width of terminal output, " f"currently set to {GLOBAL_CONFIG['terminal_width']}", default=GLOBAL_CONFIG["terminal_width"], type=int, ) config_parser.add_argument( "--blas_num_threads", help=f"Number of threads to use for BLAS libraries, 1 is recommended " f"due to how much MFA relies on multiprocessing. " f"Currently set to {GLOBAL_CONFIG['blas_num_threads']}", default=GLOBAL_CONFIG["blas_num_threads"], type=int, ) history_parser = subparsers.add_parser("history", help="Show previously run mfa commands") _ = subparsers.add_parser("thirdparty", help="DEPRECATED: Please install Kaldi via conda.") _ = subparsers.add_parser( "download", help="DEPRECATED: Please use mfa model download instead." ) history_parser.add_argument( "depth", type=int, help="Number of commands to list", nargs="?", default=10 ) history_parser.add_argument( "-v", "--verbose", help=f"Output debug messages, default is {GLOBAL_CONFIG['verbose']}", action="store_true", ) _ = subparsers.add_parser( "anchor", aliases=["annotator"], help="Launch Anchor Annotator (if installed)" ) return parser parser = create_parser() def print_history(args): depth = args.depth history = load_command_history()[-depth:] if args.verbose: print("command\tDate\tExecution time\tVersion\tExit code\tException") for h in history: execution_time = time.strftime("%H:%M:%S", time.gmtime(h["execution_time"])) d = h["date"].isoformat() print( f"{h['command']}\t{d}\t{execution_time}\t{h['version']}\t{h['exit_code']}\t{h['exception']}" ) pass else: for h in history: print(h["command"]) def main() -> None: """ Main function for the MFA command line interface """ check_third_party() hooks = ExitHooks() hooks.hook() atexit.register(hooks.history_save_handler) from colorama import init init() parser = create_parser() mp.freeze_support() args, unknown = parser.parse_known_args() for short in ["-c", "-d"]: if short in unknown: print( f"Due to the number of options that `{short}` could refer to, it is not accepted. " "Please specify the full argument", file=sys.stderr, ) sys.exit(1) try: if args.subcommand in ["g2p", "train_g2p"]: try: import pynini # noqa except ImportError: print( "There was an issue importing Pynini, please ensure that it is installed. If you are on Windows, " "please use the Windows Subsystem for Linux to use g2p functionality.", file=sys.stderr, ) sys.exit(1) if args.subcommand == "align": run_align_corpus(args, unknown) elif args.subcommand == "adapt": run_adapt_model(args, unknown) elif args.subcommand == "train": run_train_acoustic_model(args, unknown) elif args.subcommand == "g2p": run_g2p(args, unknown) elif args.subcommand == "train_g2p": run_train_g2p(args, unknown) elif args.subcommand == "validate": run_validate_corpus(args, unknown) elif args.subcommand in ["model", "models"]: run_model(args) elif args.subcommand == "train_lm": run_train_lm(args, unknown) elif args.subcommand == "train_dictionary": run_train_dictionary(args, unknown) elif args.subcommand == "train_ivector": run_train_ivector_extractor(args, unknown) elif args.subcommand == "classify_speakers": # pragma: no cover run_classify_speakers(args, unknown) elif args.subcommand in ["annotator", "anchor"]: run_anchor() elif args.subcommand == "transcribe": run_transcribe_corpus(args, unknown) elif args.subcommand == "create_segments": run_create_segments(args, unknown) elif args.subcommand == "configure": update_global_config(args) global GLOBAL_CONFIG GLOBAL_CONFIG = load_global_config() elif args.subcommand == "history": print_history(args) elif args.subcommand == "version": from montreal_forced_aligner.utils import get_mfa_version print(get_mfa_version()) elif args.subcommand == "thirdparty": # Deprecated command raise DeprecationWarning( "Necessary thirdparty executables are now installed via conda. Please refer to the installation docs for the updated commands." ) elif args.subcommand == "download": # Deprecated command raise DeprecationWarning( "Downloading models is now run through the `mfa model download` command, please use that instead." ) except MFAError as e: if getattr(args, "debug", False): raise print(e, file=sys.stderr) sys.exit(1) if __name__ == "__main__": import warnings warnings.warn( "Use 'python -m montreal_forced_aligner', not 'python -m montreal_forced_aligner.command_line.mfa'", DeprecationWarning, ) main()
""" PyGMT is a library for processing geospatial and geophysical data and making publication quality maps and figures. It provides a Pythonic interface for the Generic Mapping Tools (GMT), a command-line program widely used in the Earth Sciences. Besides making GMT more accessible to new users, PyGMT aims to provide integration with the PyData ecosystem as well as support for rich display in Jupyter notebooks. Main Features ------------- Here are just a few of the things that PyGMT does well: - Easy handling of individual types of data like Cartesian, geographic, or time-series data. - Processing of (geo)spatial data including gridding, filtering, and masking - Allows plotting of a large spectrum of objects on figures including lines, vectors, polygons, and symbols (pre-defined and customized) - Generate publication-quality illustrations and make animations """ import atexit as _atexit from pkg_resources import get_distribution # Import modules to make the high-level GMT Python API from pygmt import datasets from pygmt.accessors import GMTDataArrayAccessor from pygmt.figure import Figure from pygmt.session_management import begin as _begin from pygmt.session_management import end as _end from pygmt.src import ( blockmean, blockmedian, config, grd2cpt, grdcut, grdfilter, grdinfo, grdtrack, info, makecpt, surface, which, x2sys_cross, x2sys_init, ) # Get semantic version through setuptools-scm __version__ = f'v{get_distribution('pygmt').version}' # e.g. v0.1.2.dev3+g0ab3cd78 __commit__ = __version__.split("+g")[-1] if "+g" in __version__ else "" # 0ab3cd78 # Start our global modern mode session _begin() # Tell Python to run _end when shutting down _atexit.register(_end) def print_clib_info(): """ Print information about the GMT shared library that we can find. Includes the GMT version, default values for parameters, the path to the ``libgmt`` shared library, and GMT directories. """ from pygmt.clib import Session lines = ["GMT library information:"] with Session() as ses: for key in sorted(ses.info): lines.append(" {}: {}".format(key, ses.info[key])) print("\n".join(lines)) def show_versions(): """ Prints various dependency versions useful when submitting bug reports. This includes information about: - PyGMT itself - System information (Python version, Operating System) - Core dependency versions (Numpy, Pandas, Xarray, etc) - GMT library information """ import importlib import platform import subprocess import sys def _get_module_version(modname): """ Get version information of a Python module. """ try: if modname in sys.modules: module = sys.modules[modname] else: module = importlib.import_module(modname) try: return module.__version__ except AttributeError: return module.version except ImportError: return None def _get_ghostscript_version(): """ Get ghostscript version. """ os_name = sys.platform if os_name.startswith(("linux", "freebsd", "darwin")): cmds = ["gs"] elif os_name == "win32": cmds = ["gswin64c.exe", "gswin32c.exe"] else: return None for gs_cmd in cmds: try: version = subprocess.check_output( [gs_cmd, "--version"], universal_newlines=True ).strip() return version except FileNotFoundError: continue return None def _get_gmt_version(): """ Get GMT version. """ try: version = subprocess.check_output( ["gmt", "--version"], universal_newlines=True ).strip() return version except FileNotFoundError: return None sys_info = { "python": sys.version.replace("\n", " "), "executable": sys.executable, "machine": platform.platform(), } deps = ["numpy", "pandas", "xarray", "netCDF4", "packaging"] print("PyGMT information:") print(f" version: {__version__}") print("System information:") for key, val in sys_info.items(): print(f" {key}: {val}") print("Dependency information:") for modname in deps: print(f" {modname}: {_get_module_version(modname)}") print(f" ghostscript: {_get_ghostscript_version()}") print(f" gmt: {_get_gmt_version()}") print_clib_info() def test(doctest=True, verbose=True, coverage=False, figures=True): """ Run the test suite. Uses `pytest <http://pytest.org/>`__ to discover and run the tests. If you haven't already, you can install it with `conda <http://conda.pydata.org/>`__ or `pip <https://pip.pypa.io/en/stable/>`__. Parameters ---------- doctest : bool If ``True``, will run the doctests as well (code examples that start with a ``>>>`` in the docs). verbose : bool If ``True``, will print extra information during the test run. coverage : bool If ``True``, will run test coverage analysis on the code as well. Requires ``pytest-cov``. figures : bool If ``True``, will test generated figures against saved baseline figures. Requires ``pytest-mpl`` and ``matplotlib``. Raises ------ AssertionError If pytest returns a non-zero error code indicating that some tests have failed. """ import pytest show_versions() package = __name__ args = [] if verbose: args.append("-vv") if coverage: args.append("--cov={}".format(package)) args.append("--cov-report=term-missing") if doctest: args.append("--doctest-modules") if figures: args.append("--mpl") args.append("--pyargs") args.append(package) status = pytest.main(args) assert status == 0, "Some tests have failed."
""" PyGMT is a library for processing geospatial and geophysical data and making publication quality maps and figures. It provides a Pythonic interface for the Generic Mapping Tools (GMT), a command-line program widely used in the Earth Sciences. Besides making GMT more accessible to new users, PyGMT aims to provide integration with the PyData ecosystem as well as support for rich display in Jupyter notebooks. Main Features ------------- Here are just a few of the things that PyGMT does well: - Easy handling of individual types of data like Cartesian, geographic, or time-series data. - Processing of (geo)spatial data including gridding, filtering, and masking - Allows plotting of a large spectrum of objects on figures including lines, vectors, polygons, and symbols (pre-defined and customized) - Generate publication-quality illustrations and make animations """ import atexit as _atexit from pkg_resources import get_distribution # Import modules to make the high-level GMT Python API from pygmt import datasets from pygmt.accessors import GMTDataArrayAccessor from pygmt.figure import Figure from pygmt.session_management import begin as _begin from pygmt.session_management import end as _end from pygmt.src import ( blockmean, blockmedian, config, grd2cpt, grdcut, grdfilter, grdinfo, grdtrack, info, makecpt, surface, which, x2sys_cross, x2sys_init, ) # Get semantic version through setuptools-scm __version__ = f'v{get_distribution("pygmt").version}' # e.g. v0.1.2.dev3+g0ab3cd78 __commit__ = __version__.split("+g")[-1] if "+g" in __version__ else "" # 0ab3cd78 # Start our global modern mode session _begin() # Tell Python to run _end when shutting down _atexit.register(_end) def print_clib_info(): """ Print information about the GMT shared library that we can find. Includes the GMT version, default values for parameters, the path to the ``libgmt`` shared library, and GMT directories. """ from pygmt.clib import Session lines = ["GMT library information:"] with Session() as ses: for key in sorted(ses.info): lines.append(" {}: {}".format(key, ses.info[key])) print("\n".join(lines)) def show_versions(): """ Prints various dependency versions useful when submitting bug reports. This includes information about: - PyGMT itself - System information (Python version, Operating System) - Core dependency versions (Numpy, Pandas, Xarray, etc) - GMT library information """ import importlib import platform import subprocess import sys def _get_module_version(modname): """ Get version information of a Python module. """ try: if modname in sys.modules: module = sys.modules[modname] else: module = importlib.import_module(modname) try: return module.__version__ except AttributeError: return module.version except ImportError: return None def _get_ghostscript_version(): """ Get ghostscript version. """ os_name = sys.platform if os_name.startswith(("linux", "freebsd", "darwin")): cmds = ["gs"] elif os_name == "win32": cmds = ["gswin64c.exe", "gswin32c.exe"] else: return None for gs_cmd in cmds: try: version = subprocess.check_output( [gs_cmd, "--version"], universal_newlines=True ).strip() return version except FileNotFoundError: continue return None def _get_gmt_version(): """ Get GMT version. """ try: version = subprocess.check_output( ["gmt", "--version"], universal_newlines=True ).strip() return version except FileNotFoundError: return None sys_info = { "python": sys.version.replace("\n", " "), "executable": sys.executable, "machine": platform.platform(), } deps = ["numpy", "pandas", "xarray", "netCDF4", "packaging"] print("PyGMT information:") print(f" version: {__version__}") print("System information:") for key, val in sys_info.items(): print(f" {key}: {val}") print("Dependency information:") for modname in deps: print(f" {modname}: {_get_module_version(modname)}") print(f" ghostscript: {_get_ghostscript_version()}") print(f" gmt: {_get_gmt_version()}") print_clib_info() def test(doctest=True, verbose=True, coverage=False, figures=True): """ Run the test suite. Uses `pytest <http://pytest.org/>`__ to discover and run the tests. If you haven't already, you can install it with `conda <http://conda.pydata.org/>`__ or `pip <https://pip.pypa.io/en/stable/>`__. Parameters ---------- doctest : bool If ``True``, will run the doctests as well (code examples that start with a ``>>>`` in the docs). verbose : bool If ``True``, will print extra information during the test run. coverage : bool If ``True``, will run test coverage analysis on the code as well. Requires ``pytest-cov``. figures : bool If ``True``, will test generated figures against saved baseline figures. Requires ``pytest-mpl`` and ``matplotlib``. Raises ------ AssertionError If pytest returns a non-zero error code indicating that some tests have failed. """ import pytest show_versions() package = __name__ args = [] if verbose: args.append("-vv") if coverage: args.append("--cov={}".format(package)) args.append("--cov-report=term-missing") if doctest: args.append("--doctest-modules") if figures: args.append("--mpl") args.append("--pyargs") args.append(package) status = pytest.main(args) assert status == 0, "Some tests have failed."
import os import subprocess import sys import zipfile if (os.path.exists("build")): dl=[] for r,ndl,fl in os.walk("build"): dl=[os.path.join(r,k) for k in ndl]+dl for f in fl: os.remove(os.path.join(r,f)) for k in dl: os.rmdir(k) else: os.mkdir("build") cd=os.getcwd() os.chdir("src") jfl=[] for r,_,fl in os.walk("."): for f in fl: if (f[-5:]==".java"): jfl.append(os.path.join(r,f)) if (subprocess.run(["javac","-d","../build"]+jfl).returncode!=0): sys.exit(1) os.chdir(cd) with zipfile.ZipFile("build/jwt_auth.jar","w") as zf: print("Writing: META-INF/MANIFEST.MF") zf.write("manifest.mf",arcname="META-INF/MANIFEST.MF") for r,_,fl in os.walk("build"): for f in fl: if (f[-6:]==".class"): print(f"Writing: {os.path.join(r,f)[6:].replace(chr(92),"/")}") zf.write(os.path.join(r,f),os.path.join(r,f)[6:]) if ("--run" in sys.argv): subprocess.run(["java","-jar","build/jwt_auth.jar"])
import os import subprocess import sys import zipfile if (os.path.exists("build")): dl=[] for r,ndl,fl in os.walk("build"): dl=[os.path.join(r,k) for k in ndl]+dl for f in fl: os.remove(os.path.join(r,f)) for k in dl: os.rmdir(k) else: os.mkdir("build") cd=os.getcwd() os.chdir("src") jfl=[] for r,_,fl in os.walk("."): for f in fl: if (f[-5:]==".java"): jfl.append(os.path.join(r,f)) if (subprocess.run(["javac","-d","../build"]+jfl).returncode!=0): sys.exit(1) os.chdir(cd) with zipfile.ZipFile("build/jwt_auth.jar","w") as zf: print("Writing: META-INF/MANIFEST.MF") zf.write("manifest.mf",arcname="META-INF/MANIFEST.MF") for r,_,fl in os.walk("build"): for f in fl: if (f[-6:]==".class"): print(f"Writing: {os.path.join(r,f)[6:].replace(chr(92),'/')}") zf.write(os.path.join(r,f),os.path.join(r,f)[6:]) if ("--run" in sys.argv): subprocess.run(["java","-jar","build/jwt_auth.jar"])
from argparse import ArgumentParser, Namespace from pathlib import Path from gwpycore import basic_cli_parser from {{ cookiecutter.tool_name_slug }}.core.{{ cookiecutter.tool_name_slug }}_filter import {{ cookiecutter.tool_name_camel_case }}Filter def load_command_line(version: str, args) -> Namespace: """ Parses the command line arguments and returns a Namespace with the results. All of the usual switches are allowed (--verbose, --debug, --logfile mylog.log, ...). The user can also specify an optional command (count or latest, for example). If a command is given, then the namespace will have a command attribute with a corresponding value. """ parser: ArgumentParser = basic_cli_parser( version_text=version, devel=True, trace=True, infile=True, outfile=True, configfile_default="local\\{{ cookiecutter.tool_name_slug }}.ini", logfile_default="{{ cookiecutter.tool_name_slug }}.log", ) parser.add_argument("command", choices={{ cookiecutter.tool_name_camel_case }}Filter.commands, nargs="?", default="gui", help=f"Filter commands availble: {", ".join({{ cookiecutter.tool_name_camel_case }}Filter.commands)}") switches = parser.parse_args(args) # noqa F811 if switches.logfile: switches.logfile = Path(switches.logfile) if switches.configfile: switches.configfile = Path(switches.configfile) if switches.infile: switches.infile = Path(switches.infile) if switches.outfile: switches.outfile = Path(switches.outfile) return switches # noqa F811 __all__ = ("load_command_line",)
from argparse import ArgumentParser, Namespace from pathlib import Path from gwpycore import basic_cli_parser from {{ cookiecutter.tool_name_slug }}.core.{{ cookiecutter.tool_name_slug }}_filter import {{ cookiecutter.tool_name_camel_case }}Filter def load_command_line(version: str, args) -> Namespace: """ Parses the command line arguments and returns a Namespace with the results. All of the usual switches are allowed (--verbose, --debug, --logfile mylog.log, ...). The user can also specify an optional command (count or latest, for example). If a command is given, then the namespace will have a command attribute with a corresponding value. """ parser: ArgumentParser = basic_cli_parser( version_text=version, devel=True, trace=True, infile=True, outfile=True, configfile_default="local\\{{ cookiecutter.tool_name_slug }}.ini", logfile_default="{{ cookiecutter.tool_name_slug }}.log", ) parser.add_argument("command", choices={{ cookiecutter.tool_name_camel_case }}Filter.commands, nargs="?", default="gui", help=f"Filter commands availble: {', '.join({{ cookiecutter.tool_name_camel_case }}Filter.commands)}") switches = parser.parse_args(args) # noqa F811 if switches.logfile: switches.logfile = Path(switches.logfile) if switches.configfile: switches.configfile = Path(switches.configfile) if switches.infile: switches.infile = Path(switches.infile) if switches.outfile: switches.outfile = Path(switches.outfile) return switches # noqa F811 __all__ = ("load_command_line",)
from saltproc import DepcodeSerpent from saltproc import Simulation from saltproc import Materialflow from saltproc import Process from saltproc import Reactor from saltproc import Sparger from saltproc import Separator # from depcode import Depcode # from simulation import Simulation # from materialflow import Materialflow import os import copy import json import jsonschema from collections import OrderedDict import gc import networkx as nx import pydotplus import argparse import numpy as np def parse_arguments(): """Parses arguments from command line. Parameters ---------- Returns ------- n: int Number of nodes for use in depletion code simulation. d: int Number of cores for use in depletion code simulation. i: str Path and name of main SaltProc input file (json format). """ parser = argparse.ArgumentParser() parser.add_argument('-n', # Number of nodes to use type=int, default=1, help='number of cluster nodes to use in \ depletion code simulation') parser.add_argument('-d', # Number of cores to use type=int, default=1, help='number of threads to use in \ depletion code simulation') parser.add_argument('-i', # main input file type=str, default=None, help='path and name of SaltProc main input file') args = parser.parse_args() return int(args.n), int(args.d), str(args.i) def read_main_input(main_inp_file): """Reads main SaltProc input file (json format). Parameters ---------- main_inp_file : str Path to SaltProc main input file and name of this file. """ input_schema = os.path.join(os.path.dirname(os.path.realpath(__file__)), './input_schema.json') with open(main_inp_file) as f: j = json.load(f) with open(input_schema) as s: v = json.load(s) try: jsonschema.validate(instance=j, schema=v) except jsonschema.exceptions.ValidationError: print("Your input file improperly structured.\ Please see saltproc/tests/test.json for an example.") # Global input path path_prefix = os.getcwd() input_path = os.path.join(path_prefix, os.path.dirname(f.name)) # Saltproc settings global spc_inp_file, dot_inp_file, output_path, num_depsteps spc_inp_file = os.path.join( os.path.dirname(f.name), j['proc_input_file']) dot_inp_file = os.path.join( os.path.dirname(f.name), j['dot_input_file']) output_path = j['output_path'] num_depsteps = j['num_depsteps'] # Global output path output_path = os.path.join(input_path, output_path) j['output_path'] = output_path # Class settings global depcode_inp, simulation_inp, reactor_inp depcode_inp = j['depcode'] simulation_inp = j['simulation'] reactor_inp = j['reactor'] depcode_inp['template_inputfile_path'] = os.path.join( input_path, depcode_inp['template_inputfile_path']) geo_list = depcode_inp['geo_file_paths'] # Global geometry file paths geo_file_paths = [] for g in geo_list: geo_file_paths += [os.path.join(input_path, g)] depcode_inp['geo_file_paths'] = geo_file_paths # Global output file paths depcode_inp['iter_inputfile'] = os.path.join( output_path, depcode_inp['iter_inputfile']) depcode_inp['iter_matfile'] = os.path.join( output_path, depcode_inp['iter_matfile']) db_name = os.path.join( output_path, simulation_inp['db_name']) simulation_inp['db_name'] = db_name dep_step_length_cumulative = reactor_inp['dep_step_length_cumulative'] power_levels = reactor_inp['power_levels'] if num_depsteps is not None and len(dep_step_length_cumulative) == 1: if num_depsteps < 0.0 or not int: raise ValueError('Depletion step interval cannot be negative') else: step = int(num_depsteps) deptot = float(dep_step_length_cumulative[0]) * step dep_step_length_cumulative = \ np.linspace(float(dep_step_length_cumulative[0]), deptot, num=step) power_levels = float(power_levels[0]) * \ np.ones_like(dep_step_length_cumulative) reactor_inp['dep_step_length_cumulative'] = \ dep_step_length_cumulative reactor_inp['power_levels'] = power_levels elif num_depsteps is None and isinstance(dep_step_length_cumulative, (np.ndarray, list)): if len(dep_step_length_cumulative) != len(power_levels): raise ValueError( 'Depletion step list and power list shape mismatch') def read_processes_from_input(): """Parses ``removal`` data from `.json` file with `Process` objects description. Then returns dictionary of `Process` objects describing extraction process efficiency for each target chemical element. Returns ------- mats : dict of str to Process Dictionary that contains `Process` objects. ``key`` Name of burnable material. ``value`` `Process` object holding extraction process parameters. """ processes = OrderedDict() with open(spc_inp_file) as f: j = json.load(f) for mat, value in j.items(): processes[mat] = OrderedDict() for obj_name, obj_data in j[mat]['extraction_processes'].items(): print("Processs object data: ", obj_data) st = obj_data['efficiency'] if obj_name == 'sparger' and st == "self": processes[mat][obj_name] = Sparger(**obj_data) elif obj_name == 'entrainment_separator' and st == "self": processes[mat][obj_name] = Separator(**obj_data) else: processes[mat][obj_name] = Process(**obj_data) gc.collect() return processes def read_feeds_from_input(): """Parses ``feed`` data from `.json` file with `Materialflow` objects description. Then returns dictionary of `Materialflow` objects describing fresh fuel feeds. Returns ------- mats : dict of str to Materialflow Dictionary that contains `Materialflow` objects with feeds. ``key`` Name of burnable material. ``value`` `Materialflow` object holding composition and properties of feed. """ feeds = OrderedDict() with open(spc_inp_file) as f: j = json.load(f) # print(j['feeds']) for mat, val in j.items(): feeds[mat] = OrderedDict() for obj_name, obj_data in j[mat]['feeds'].items(): # print(obj_data) nucvec = obj_data['comp'] feeds[mat][obj_name] = Materialflow(nucvec) feeds[mat][obj_name].mass = obj_data['mass'] feeds[mat][obj_name].density = obj_data['density'] feeds[mat][obj_name].vol = obj_data['volume'] return feeds def read_dot(dot_file): """Reads directed graph that describes fuel reprocessing system structure from `*.dot` file. Parameters ---------- dot_file : str Path to `.dot` file with reprocessing system structure. Returns ------- mat_name : str Name of burnable material which reprocessing scheme described in `.dot` file. paths_list : list List of lists containing all possible paths between `core_outlet` and `core_inlet`. """ graph_pydot = pydotplus.graph_from_dot_file(dot_file) digraph = nx.drawing.nx_pydot.from_pydot(graph_pydot) mat_name = digraph.name # iterate over all possible paths between 'core_outlet' and 'core_inlet' paths_list = [] all_simple_paths = nx.all_simple_paths(digraph, source='core_outlet', target='core_inlet') for path in all_simple_paths: paths_list.append(path) return mat_name, paths_list def reprocessing(mats): """Applies reprocessing scheme to burnable materials. Parameters ---------- mats : dict of str to Materialflow Dictionary that contains `Materialflow` objects with burnable material data right after irradiation in the core. ``key`` Name of burnable material. ``value`` `Materialflow` object holding composition and properties. Returns ------- waste : dict of str to Materialflow ``key`` Process name. ``value`` `Materialflow` object containing waste streams data. extracted_mass: dict of str to Materialflow ``key`` Name of burnable material. ``value`` Mass removed as waste in reprocessing function for each material (g). """ inmass = {} extracted_mass = {} waste = OrderedDict() forked_mats = OrderedDict() prcs = read_processes_from_input() mats_name_dot, paths = read_dot(dot_inp_file) for mname in prcs.keys(): # iterate over materials waste[mname] = {} forked_mats[mname] = [] inmass[mname] = float(mats[mname].mass) print("Material mass before reprocessing %f g" % inmass[mname]) if mname == 'fuel' and mats_name_dot == 'fuel': w = 'waste_' ctr = 0 for path in paths: forked_mats[mname].append(copy.deepcopy(mats[mname])) print("Material mass %f" % mats[mname].mass) for p in path: # Calculate fraction of the flow going to the process p divisor = float(prcs[mname][p].mass_flowrate / prcs[mname]['core_outlet'].mass_flowrate) print('Process %s, divisor=%f' % (p, divisor)) # Update materialflow byt multiplying it by flow fraction forked_mats[mname][ctr] = \ divisor * copy.deepcopy(forked_mats[mname][ctr]) waste[mname][w + p] = \ prcs[mname][p].rem_elements(forked_mats[mname][ctr]) ctr += 1 # Sum all forked material objects together # initilize correct obj instance mats[mname] = forked_mats[mname][0] for idx in range(1, len(forked_mats[mname])): mats[mname] += forked_mats[mname][idx] print('1 Forked material mass %f' % (forked_mats[mname][0].mass)) print('2 Forked material mass %f' % (forked_mats[mname][1].mass)) print('\nMass balance %f g = %f + %f + %f + %f + %f + %f' % (inmass[mname], mats[mname].mass, waste[mname]['waste_sparger'].mass, waste[mname]['waste_entrainment_separator'].mass, waste[mname]['waste_nickel_filter'].mass, waste[mname]['waste_bypass'].mass, waste[mname]['waste_liquid_metal'].mass)) # Bootstrap for many materials if mname == 'ctrlPois': waste[mname]['removal_tb_dy'] = \ prcs[mname]['removal_tb_dy'].rem_elements(mats[mname]) extracted_mass[mname] = inmass[mname] - float(mats[mname].mass) del prcs, inmass, mname, forked_mats, mats_name_dot, paths, divisor return waste, extracted_mass def refill(mats, extracted_mass, waste_dict): """Makes up material loss in removal processes by adding fresh fuel. Parameters ---------- mats : dict of str to Materialflow ``key`` Name of burnable material. ``value`` `Materialflow` object after performing all removals. extracted_mass : dict of str to float Name of burnable material. ``value`` Mass removed as waste in reprocessing function for each material. waste_dict : dict of str to Materialflow ``key`` Process name. ``value`` `Materialflow` object containing waste streams data. Returns ------- refilled_mats: dict of str to Materialflow Dictionary that contains `Materialflow` objects. ``key`` Name of burnable material. ``value`` `Materialflow` object after adding fresh fuel. """ print('Fuel before refill ^^^', mats['fuel'].print_attr()) feeds = read_feeds_from_input() refill_mats = OrderedDict() for mn, v in feeds.items(): # iterate over materials refill_mats[mn] = {} for feed_n, fval in feeds[mn].items(): # works with one feed only scale = extracted_mass[mn] / feeds[mn][feed_n].mass refill_mats[mn] = scale * feeds[mn][feed_n] waste_dict[mn]['feed_' + str(feed_n)] = refill_mats[mn] mats[mn] += refill_mats[mn] print('Refilled fresh material %s %f g' % (mn, refill_mats[mn].mass)) print('Refill Material ^^^', refill_mats[mn].print_attr()) print('Fuel after refill ^^^', mats[mn].print_attr()) return waste_dict def run(): """ Inititializes main run. """ # Parse arguments from command-lines nodes, cores, sp_input = parse_arguments() # Read main input file read_main_input(sp_input) # Print out input information print('Initiating Saltproc:\n' '\tRestart = ' + str(simulation_inp['restart_flag']) + '\n' '\tTemplate File Path = ' + os.path.abspath(depcode_inp['template_inputfile_path']) + '\n' '\tInput File Path = ' + os.path.abspath(depcode_inp['iter_inputfile']) + '\n' '\tMaterial File Path = ' + os.path.abspath(depcode_inp['iter_matfile']) + '\n' '\tOutput HDF5 database Path = ' + os.path.abspath(simulation_inp['db_name']) + '\n') # Intializing objects if depcode_inp['codename'] == 'serpent': depcode = DepcodeSerpent( exec_path=depcode_inp['exec_path'], template_inputfile_path=depcode_inp['template_inputfile_path'], iter_inputfile=depcode_inp['iter_inputfile'], iter_matfile=depcode_inp['iter_matfile'], geo_files=depcode_inp['geo_file_paths'], npop=depcode_inp['npop'], active_cycles=depcode_inp['active_cycles'], inactive_cycles=depcode_inp['inactive_cycles']) else: raise ValueError( f'{depcode_inp['codename']} is not a supported depletion code') simulation = Simulation( sim_name='Super test', sim_depcode=depcode, core_number=cores, node_number=nodes, restart_flag=simulation_inp['restart_flag'], adjust_geo=simulation_inp['adjust_geo'], db_path=simulation_inp['db_name']) msr = Reactor( volume=reactor_inp['volume'], mass_flowrate=reactor_inp['mass_flowrate'], power_levels=reactor_inp['power_levels'], dep_step_length_cumulative=reactor_inp['dep_step_length_cumulative']) # Check: Restarting previous simulation or starting new? simulation.check_restart() # Run sequence # Start sequence for dep_step in range(len(msr.dep_step_length_cumulative)): print("\n\n\nStep #%i has been started" % (dep_step + 1)) simulation.sim_depcode.write_depcode_input(msr, dep_step, simulation.restart_flag) depcode.run_depcode(cores, nodes) if dep_step == 0 and simulation.restart_flag is False: # First step # Read general simulation data which never changes simulation.store_run_init_info() # Parse and store data for initial state (beginning of dep_step) mats = depcode.read_dep_comp(False) simulation.store_mat_data(mats, dep_step - 1, False) # Finish of First step # Main sequence mats = depcode.read_dep_comp(True) simulation.store_mat_data(mats, dep_step, False) simulation.store_run_step_info() # Reprocessing here print("\nMass and volume of fuel before reproc %f g; %f cm3" % (mats['fuel'].mass, mats['fuel'].vol)) # print("Mass and volume of ctrlPois before reproc %f g; %f cm3" % # (mats['ctrlPois'].mass, # mats['ctrlPois'].vol)) waste_st, rem_mass = reprocessing(mats) print("\nMass and volume of fuel after reproc %f g; %f cm3" % (mats['fuel'].mass, mats['fuel'].vol)) # print("Mass and volume of ctrlPois after reproc %f g; %f cm3" % # (mats['ctrlPois'].mass, # mats['ctrlPois'].vol)) waste_feed_st = refill(mats, rem_mass, waste_st) print("\nMass and volume of fuel after REFILL %f g; %f cm3" % (mats['fuel'].mass, mats['fuel'].vol)) # print("Mass and volume of ctrlPois after REFILL %f g; %f cm3" % # (mats['ctrlPois'].mass, # mats['ctrlPois'].vol)) print("Removed mass [g]:", rem_mass) # Store in DB after reprocessing and refill (right before next depl) simulation.store_after_repr(mats, waste_feed_st, dep_step) depcode.write_mat_file(mats, simulation.burn_time) del mats, waste_st, waste_feed_st, rem_mass gc.collect() # Switch to another geometry? if simulation.adjust_geo and simulation.read_k_eds_delta(dep_step): depcode.switch_to_next_geometry() print("\nTime at the end of current depletion step %fd" % simulation.burn_time) print("Simulation succeeded.\n") '''print("Reactor object data.\n", msr.mass_flowrate, msr.power_levels, msr.dep_step_length_cumulative)'''
from saltproc import DepcodeSerpent from saltproc import Simulation from saltproc import Materialflow from saltproc import Process from saltproc import Reactor from saltproc import Sparger from saltproc import Separator # from depcode import Depcode # from simulation import Simulation # from materialflow import Materialflow import os import copy import json import jsonschema from collections import OrderedDict import gc import networkx as nx import pydotplus import argparse import numpy as np def parse_arguments(): """Parses arguments from command line. Parameters ---------- Returns ------- n: int Number of nodes for use in depletion code simulation. d: int Number of cores for use in depletion code simulation. i: str Path and name of main SaltProc input file (json format). """ parser = argparse.ArgumentParser() parser.add_argument('-n', # Number of nodes to use type=int, default=1, help='number of cluster nodes to use in \ depletion code simulation') parser.add_argument('-d', # Number of cores to use type=int, default=1, help='number of threads to use in \ depletion code simulation') parser.add_argument('-i', # main input file type=str, default=None, help='path and name of SaltProc main input file') args = parser.parse_args() return int(args.n), int(args.d), str(args.i) def read_main_input(main_inp_file): """Reads main SaltProc input file (json format). Parameters ---------- main_inp_file : str Path to SaltProc main input file and name of this file. """ input_schema = os.path.join(os.path.dirname(os.path.realpath(__file__)), './input_schema.json') with open(main_inp_file) as f: j = json.load(f) with open(input_schema) as s: v = json.load(s) try: jsonschema.validate(instance=j, schema=v) except jsonschema.exceptions.ValidationError: print("Your input file improperly structured.\ Please see saltproc/tests/test.json for an example.") # Global input path path_prefix = os.getcwd() input_path = os.path.join(path_prefix, os.path.dirname(f.name)) # Saltproc settings global spc_inp_file, dot_inp_file, output_path, num_depsteps spc_inp_file = os.path.join( os.path.dirname(f.name), j['proc_input_file']) dot_inp_file = os.path.join( os.path.dirname(f.name), j['dot_input_file']) output_path = j['output_path'] num_depsteps = j['num_depsteps'] # Global output path output_path = os.path.join(input_path, output_path) j['output_path'] = output_path # Class settings global depcode_inp, simulation_inp, reactor_inp depcode_inp = j['depcode'] simulation_inp = j['simulation'] reactor_inp = j['reactor'] depcode_inp['template_inputfile_path'] = os.path.join( input_path, depcode_inp['template_inputfile_path']) geo_list = depcode_inp['geo_file_paths'] # Global geometry file paths geo_file_paths = [] for g in geo_list: geo_file_paths += [os.path.join(input_path, g)] depcode_inp['geo_file_paths'] = geo_file_paths # Global output file paths depcode_inp['iter_inputfile'] = os.path.join( output_path, depcode_inp['iter_inputfile']) depcode_inp['iter_matfile'] = os.path.join( output_path, depcode_inp['iter_matfile']) db_name = os.path.join( output_path, simulation_inp['db_name']) simulation_inp['db_name'] = db_name dep_step_length_cumulative = reactor_inp['dep_step_length_cumulative'] power_levels = reactor_inp['power_levels'] if num_depsteps is not None and len(dep_step_length_cumulative) == 1: if num_depsteps < 0.0 or not int: raise ValueError('Depletion step interval cannot be negative') else: step = int(num_depsteps) deptot = float(dep_step_length_cumulative[0]) * step dep_step_length_cumulative = \ np.linspace(float(dep_step_length_cumulative[0]), deptot, num=step) power_levels = float(power_levels[0]) * \ np.ones_like(dep_step_length_cumulative) reactor_inp['dep_step_length_cumulative'] = \ dep_step_length_cumulative reactor_inp['power_levels'] = power_levels elif num_depsteps is None and isinstance(dep_step_length_cumulative, (np.ndarray, list)): if len(dep_step_length_cumulative) != len(power_levels): raise ValueError( 'Depletion step list and power list shape mismatch') def read_processes_from_input(): """Parses ``removal`` data from `.json` file with `Process` objects description. Then returns dictionary of `Process` objects describing extraction process efficiency for each target chemical element. Returns ------- mats : dict of str to Process Dictionary that contains `Process` objects. ``key`` Name of burnable material. ``value`` `Process` object holding extraction process parameters. """ processes = OrderedDict() with open(spc_inp_file) as f: j = json.load(f) for mat, value in j.items(): processes[mat] = OrderedDict() for obj_name, obj_data in j[mat]['extraction_processes'].items(): print("Processs object data: ", obj_data) st = obj_data['efficiency'] if obj_name == 'sparger' and st == "self": processes[mat][obj_name] = Sparger(**obj_data) elif obj_name == 'entrainment_separator' and st == "self": processes[mat][obj_name] = Separator(**obj_data) else: processes[mat][obj_name] = Process(**obj_data) gc.collect() return processes def read_feeds_from_input(): """Parses ``feed`` data from `.json` file with `Materialflow` objects description. Then returns dictionary of `Materialflow` objects describing fresh fuel feeds. Returns ------- mats : dict of str to Materialflow Dictionary that contains `Materialflow` objects with feeds. ``key`` Name of burnable material. ``value`` `Materialflow` object holding composition and properties of feed. """ feeds = OrderedDict() with open(spc_inp_file) as f: j = json.load(f) # print(j['feeds']) for mat, val in j.items(): feeds[mat] = OrderedDict() for obj_name, obj_data in j[mat]['feeds'].items(): # print(obj_data) nucvec = obj_data['comp'] feeds[mat][obj_name] = Materialflow(nucvec) feeds[mat][obj_name].mass = obj_data['mass'] feeds[mat][obj_name].density = obj_data['density'] feeds[mat][obj_name].vol = obj_data['volume'] return feeds def read_dot(dot_file): """Reads directed graph that describes fuel reprocessing system structure from `*.dot` file. Parameters ---------- dot_file : str Path to `.dot` file with reprocessing system structure. Returns ------- mat_name : str Name of burnable material which reprocessing scheme described in `.dot` file. paths_list : list List of lists containing all possible paths between `core_outlet` and `core_inlet`. """ graph_pydot = pydotplus.graph_from_dot_file(dot_file) digraph = nx.drawing.nx_pydot.from_pydot(graph_pydot) mat_name = digraph.name # iterate over all possible paths between 'core_outlet' and 'core_inlet' paths_list = [] all_simple_paths = nx.all_simple_paths(digraph, source='core_outlet', target='core_inlet') for path in all_simple_paths: paths_list.append(path) return mat_name, paths_list def reprocessing(mats): """Applies reprocessing scheme to burnable materials. Parameters ---------- mats : dict of str to Materialflow Dictionary that contains `Materialflow` objects with burnable material data right after irradiation in the core. ``key`` Name of burnable material. ``value`` `Materialflow` object holding composition and properties. Returns ------- waste : dict of str to Materialflow ``key`` Process name. ``value`` `Materialflow` object containing waste streams data. extracted_mass: dict of str to Materialflow ``key`` Name of burnable material. ``value`` Mass removed as waste in reprocessing function for each material (g). """ inmass = {} extracted_mass = {} waste = OrderedDict() forked_mats = OrderedDict() prcs = read_processes_from_input() mats_name_dot, paths = read_dot(dot_inp_file) for mname in prcs.keys(): # iterate over materials waste[mname] = {} forked_mats[mname] = [] inmass[mname] = float(mats[mname].mass) print("Material mass before reprocessing %f g" % inmass[mname]) if mname == 'fuel' and mats_name_dot == 'fuel': w = 'waste_' ctr = 0 for path in paths: forked_mats[mname].append(copy.deepcopy(mats[mname])) print("Material mass %f" % mats[mname].mass) for p in path: # Calculate fraction of the flow going to the process p divisor = float(prcs[mname][p].mass_flowrate / prcs[mname]['core_outlet'].mass_flowrate) print('Process %s, divisor=%f' % (p, divisor)) # Update materialflow byt multiplying it by flow fraction forked_mats[mname][ctr] = \ divisor * copy.deepcopy(forked_mats[mname][ctr]) waste[mname][w + p] = \ prcs[mname][p].rem_elements(forked_mats[mname][ctr]) ctr += 1 # Sum all forked material objects together # initilize correct obj instance mats[mname] = forked_mats[mname][0] for idx in range(1, len(forked_mats[mname])): mats[mname] += forked_mats[mname][idx] print('1 Forked material mass %f' % (forked_mats[mname][0].mass)) print('2 Forked material mass %f' % (forked_mats[mname][1].mass)) print('\nMass balance %f g = %f + %f + %f + %f + %f + %f' % (inmass[mname], mats[mname].mass, waste[mname]['waste_sparger'].mass, waste[mname]['waste_entrainment_separator'].mass, waste[mname]['waste_nickel_filter'].mass, waste[mname]['waste_bypass'].mass, waste[mname]['waste_liquid_metal'].mass)) # Bootstrap for many materials if mname == 'ctrlPois': waste[mname]['removal_tb_dy'] = \ prcs[mname]['removal_tb_dy'].rem_elements(mats[mname]) extracted_mass[mname] = inmass[mname] - float(mats[mname].mass) del prcs, inmass, mname, forked_mats, mats_name_dot, paths, divisor return waste, extracted_mass def refill(mats, extracted_mass, waste_dict): """Makes up material loss in removal processes by adding fresh fuel. Parameters ---------- mats : dict of str to Materialflow ``key`` Name of burnable material. ``value`` `Materialflow` object after performing all removals. extracted_mass : dict of str to float Name of burnable material. ``value`` Mass removed as waste in reprocessing function for each material. waste_dict : dict of str to Materialflow ``key`` Process name. ``value`` `Materialflow` object containing waste streams data. Returns ------- refilled_mats: dict of str to Materialflow Dictionary that contains `Materialflow` objects. ``key`` Name of burnable material. ``value`` `Materialflow` object after adding fresh fuel. """ print('Fuel before refill ^^^', mats['fuel'].print_attr()) feeds = read_feeds_from_input() refill_mats = OrderedDict() for mn, v in feeds.items(): # iterate over materials refill_mats[mn] = {} for feed_n, fval in feeds[mn].items(): # works with one feed only scale = extracted_mass[mn] / feeds[mn][feed_n].mass refill_mats[mn] = scale * feeds[mn][feed_n] waste_dict[mn]['feed_' + str(feed_n)] = refill_mats[mn] mats[mn] += refill_mats[mn] print('Refilled fresh material %s %f g' % (mn, refill_mats[mn].mass)) print('Refill Material ^^^', refill_mats[mn].print_attr()) print('Fuel after refill ^^^', mats[mn].print_attr()) return waste_dict def run(): """ Inititializes main run. """ # Parse arguments from command-lines nodes, cores, sp_input = parse_arguments() # Read main input file read_main_input(sp_input) # Print out input information print('Initiating Saltproc:\n' '\tRestart = ' + str(simulation_inp['restart_flag']) + '\n' '\tTemplate File Path = ' + os.path.abspath(depcode_inp['template_inputfile_path']) + '\n' '\tInput File Path = ' + os.path.abspath(depcode_inp['iter_inputfile']) + '\n' '\tMaterial File Path = ' + os.path.abspath(depcode_inp['iter_matfile']) + '\n' '\tOutput HDF5 database Path = ' + os.path.abspath(simulation_inp['db_name']) + '\n') # Intializing objects if depcode_inp['codename'] == 'serpent': depcode = DepcodeSerpent( exec_path=depcode_inp['exec_path'], template_inputfile_path=depcode_inp['template_inputfile_path'], iter_inputfile=depcode_inp['iter_inputfile'], iter_matfile=depcode_inp['iter_matfile'], geo_files=depcode_inp['geo_file_paths'], npop=depcode_inp['npop'], active_cycles=depcode_inp['active_cycles'], inactive_cycles=depcode_inp['inactive_cycles']) else: raise ValueError( f'{depcode_inp["codename"]} is not a supported depletion code') simulation = Simulation( sim_name='Super test', sim_depcode=depcode, core_number=cores, node_number=nodes, restart_flag=simulation_inp['restart_flag'], adjust_geo=simulation_inp['adjust_geo'], db_path=simulation_inp['db_name']) msr = Reactor( volume=reactor_inp['volume'], mass_flowrate=reactor_inp['mass_flowrate'], power_levels=reactor_inp['power_levels'], dep_step_length_cumulative=reactor_inp['dep_step_length_cumulative']) # Check: Restarting previous simulation or starting new? simulation.check_restart() # Run sequence # Start sequence for dep_step in range(len(msr.dep_step_length_cumulative)): print("\n\n\nStep #%i has been started" % (dep_step + 1)) simulation.sim_depcode.write_depcode_input(msr, dep_step, simulation.restart_flag) depcode.run_depcode(cores, nodes) if dep_step == 0 and simulation.restart_flag is False: # First step # Read general simulation data which never changes simulation.store_run_init_info() # Parse and store data for initial state (beginning of dep_step) mats = depcode.read_dep_comp(False) simulation.store_mat_data(mats, dep_step - 1, False) # Finish of First step # Main sequence mats = depcode.read_dep_comp(True) simulation.store_mat_data(mats, dep_step, False) simulation.store_run_step_info() # Reprocessing here print("\nMass and volume of fuel before reproc %f g; %f cm3" % (mats['fuel'].mass, mats['fuel'].vol)) # print("Mass and volume of ctrlPois before reproc %f g; %f cm3" % # (mats['ctrlPois'].mass, # mats['ctrlPois'].vol)) waste_st, rem_mass = reprocessing(mats) print("\nMass and volume of fuel after reproc %f g; %f cm3" % (mats['fuel'].mass, mats['fuel'].vol)) # print("Mass and volume of ctrlPois after reproc %f g; %f cm3" % # (mats['ctrlPois'].mass, # mats['ctrlPois'].vol)) waste_feed_st = refill(mats, rem_mass, waste_st) print("\nMass and volume of fuel after REFILL %f g; %f cm3" % (mats['fuel'].mass, mats['fuel'].vol)) # print("Mass and volume of ctrlPois after REFILL %f g; %f cm3" % # (mats['ctrlPois'].mass, # mats['ctrlPois'].vol)) print("Removed mass [g]:", rem_mass) # Store in DB after reprocessing and refill (right before next depl) simulation.store_after_repr(mats, waste_feed_st, dep_step) depcode.write_mat_file(mats, simulation.burn_time) del mats, waste_st, waste_feed_st, rem_mass gc.collect() # Switch to another geometry? if simulation.adjust_geo and simulation.read_k_eds_delta(dep_step): depcode.switch_to_next_geometry() print("\nTime at the end of current depletion step %fd" % simulation.burn_time) print("Simulation succeeded.\n") '''print("Reactor object data.\n", msr.mass_flowrate, msr.power_levels, msr.dep_step_length_cumulative)'''
from typing import Any, Dict, List, Optional import aiohttp from apple.cmds.units import units from apple.consensus.block_record import BlockRecord from apple.rpc.farmer_rpc_client import FarmerRpcClient from apple.rpc.full_node_rpc_client import FullNodeRpcClient from apple.rpc.wallet_rpc_client import WalletRpcClient from apple.util.config import load_config from apple.util.default_root import DEFAULT_ROOT_PATH from apple.util.ints import uint16 from apple.util.misc import format_bytes from apple.util.misc import format_minutes from apple.util.network import is_localhost SECONDS_PER_BLOCK = (24 * 3600) / 4608 async def get_harvesters(farmer_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]: try: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if farmer_rpc_port is None: farmer_rpc_port = config["farmer"]["rpc_port"] farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config) plots = await farmer_client.get_harvesters() except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): print(f"Connection error. Check if farmer is running at {farmer_rpc_port}") else: print(f"Exception from 'harvester' {e}") return None farmer_client.close() await farmer_client.await_closed() return plots async def get_blockchain_state(rpc_port: Optional[int]) -> Optional[Dict[str, Any]]: blockchain_state = None try: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if rpc_port is None: rpc_port = config["full_node"]["rpc_port"] client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config) blockchain_state = await client.get_blockchain_state() except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): print(f"Connection error. Check if full node is running at {rpc_port}") else: print(f"Exception from 'full node' {e}") client.close() await client.await_closed() return blockchain_state async def get_average_block_time(rpc_port: Optional[int]) -> float: try: blocks_to_compare = 500 config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if rpc_port is None: rpc_port = config["full_node"]["rpc_port"] client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config) blockchain_state = await client.get_blockchain_state() curr: Optional[BlockRecord] = blockchain_state["peak"] if curr is None or curr.height < (blocks_to_compare + 100): client.close() await client.await_closed() return SECONDS_PER_BLOCK while curr is not None and curr.height > 0 and not curr.is_transaction_block: curr = await client.get_block_record(curr.prev_hash) if curr is None: client.close() await client.await_closed() return SECONDS_PER_BLOCK past_curr = await client.get_block_record_by_height(curr.height - blocks_to_compare) while past_curr is not None and past_curr.height > 0 and not past_curr.is_transaction_block: past_curr = await client.get_block_record(past_curr.prev_hash) if past_curr is None: client.close() await client.await_closed() return SECONDS_PER_BLOCK client.close() await client.await_closed() return (curr.timestamp - past_curr.timestamp) / (curr.height - past_curr.height) except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): print(f"Connection error. Check if full node is running at {rpc_port}") else: print(f"Exception from 'full node' {e}") client.close() await client.await_closed() return SECONDS_PER_BLOCK async def get_wallets_stats(wallet_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]: amounts = None try: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if wallet_rpc_port is None: wallet_rpc_port = config["wallet"]["rpc_port"] wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config) amounts = await wallet_client.get_farmed_amount() # # Don't catch any exceptions, the caller will handle it # finally: wallet_client.close() await wallet_client.await_closed() return amounts async def is_farmer_running(farmer_rpc_port: Optional[int]) -> bool: is_running = False try: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if farmer_rpc_port is None: farmer_rpc_port = config["farmer"]["rpc_port"] farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config) await farmer_client.get_connections() is_running = True except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): print(f"Connection error. Check if farmer is running at {farmer_rpc_port}") else: print(f"Exception from 'farmer' {e}") farmer_client.close() await farmer_client.await_closed() return is_running async def get_challenges(farmer_rpc_port: Optional[int]) -> Optional[List[Dict[str, Any]]]: signage_points = None try: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if farmer_rpc_port is None: farmer_rpc_port = config["farmer"]["rpc_port"] farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config) signage_points = await farmer_client.get_signage_points() except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): print(f"Connection error. Check if farmer is running at {farmer_rpc_port}") else: print(f"Exception from 'farmer' {e}") farmer_client.close() await farmer_client.await_closed() return signage_points async def challenges(farmer_rpc_port: Optional[int], limit: int) -> None: signage_points = await get_challenges(farmer_rpc_port) if signage_points is None: return None signage_points.reverse() if limit != 0: signage_points = signage_points[:limit] for signage_point in signage_points: print( ( f"Hash: {signage_point["signage_point"]["challenge_hash"]} " f"Index: {signage_point["signage_point"]["signage_point_index"]}" ) ) async def summary( rpc_port: Optional[int], wallet_rpc_port: Optional[int], harvester_rpc_port: Optional[int], farmer_rpc_port: Optional[int], ) -> None: all_harvesters = await get_harvesters(farmer_rpc_port) blockchain_state = await get_blockchain_state(rpc_port) farmer_running = await is_farmer_running(farmer_rpc_port) wallet_not_ready: bool = False wallet_not_running: bool = False amounts = None try: amounts = await get_wallets_stats(wallet_rpc_port) except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): wallet_not_running = True else: wallet_not_ready = True print("Farming status: ", end="") if blockchain_state is None: print("Not available") elif blockchain_state["sync"]["sync_mode"]: print("Syncing") elif not blockchain_state["sync"]["synced"]: print("Not synced or not connected to peers") elif not farmer_running: print("Not running") else: print("Farming") if amounts is not None: print(f"Total apple farmed: {amounts["farmed_amount"] / units["apple"]}") print(f"User transaction fees: {amounts["fee_amount"] / units["apple"]}") print(f"Block rewards: {(amounts["farmer_reward_amount"] + amounts["pool_reward_amount"]) / units["apple"]}") print(f"Last height farmed: {amounts["last_height_farmed"]}") class PlotStats: total_plot_size = 0 total_plots = 0 if all_harvesters is not None: harvesters_local: dict = {} harvesters_remote: dict = {} for harvester in all_harvesters["harvesters"]: ip = harvester["connection"]["host"] if is_localhost(ip): harvesters_local[harvester["connection"]["node_id"]] = harvester else: if ip not in harvesters_remote: harvesters_remote[ip] = {} harvesters_remote[ip][harvester["connection"]["node_id"]] = harvester def process_harvesters(harvester_peers_in: dict): for harvester_peer_id, plots in harvester_peers_in.items(): total_plot_size_harvester = sum(map(lambda x: x["file_size"], plots["plots"])) PlotStats.total_plot_size += total_plot_size_harvester PlotStats.total_plots += len(plots["plots"]) print(f" {len(plots["plots"])} plots of size: {format_bytes(total_plot_size_harvester)}") if len(harvesters_local) > 0: print(f"Local Harvester{"s" if len(harvesters_local) > 1 else ""}") process_harvesters(harvesters_local) for harvester_ip, harvester_peers in harvesters_remote.items(): print(f"Remote Harvester{"s" if len(harvester_peers) > 1 else ""} for IP: {harvester_ip}") process_harvesters(harvester_peers) print(f"Plot count for all harvesters: {PlotStats.total_plots}") print("Total size of plots: ", end="") print(format_bytes(PlotStats.total_plot_size)) else: print("Plot count: Unknown") print("Total size of plots: Unknown") if blockchain_state is not None: print("Estimated network space: ", end="") print(format_bytes(blockchain_state["space"])) else: print("Estimated network space: Unknown") minutes = -1 if blockchain_state is not None and all_harvesters is not None: proportion = PlotStats.total_plot_size / blockchain_state["space"] if blockchain_state["space"] else -1 minutes = int((await get_average_block_time(rpc_port) / 60) / proportion) if proportion else -1 if all_harvesters is not None and PlotStats.total_plots == 0: print("Expected time to win: Never (no plots)") else: print("Expected time to win: " + format_minutes(minutes)) if amounts is None: if wallet_not_running: print("For details on farmed rewards and fees you should run 'apple start wallet' and 'apple wallet show'") elif wallet_not_ready: print("For details on farmed rewards and fees you should run 'apple wallet show'") else: print("Note: log into your key using 'apple wallet show' to see rewards for each key")
from typing import Any, Dict, List, Optional import aiohttp from apple.cmds.units import units from apple.consensus.block_record import BlockRecord from apple.rpc.farmer_rpc_client import FarmerRpcClient from apple.rpc.full_node_rpc_client import FullNodeRpcClient from apple.rpc.wallet_rpc_client import WalletRpcClient from apple.util.config import load_config from apple.util.default_root import DEFAULT_ROOT_PATH from apple.util.ints import uint16 from apple.util.misc import format_bytes from apple.util.misc import format_minutes from apple.util.network import is_localhost SECONDS_PER_BLOCK = (24 * 3600) / 4608 async def get_harvesters(farmer_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]: try: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if farmer_rpc_port is None: farmer_rpc_port = config["farmer"]["rpc_port"] farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config) plots = await farmer_client.get_harvesters() except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): print(f"Connection error. Check if farmer is running at {farmer_rpc_port}") else: print(f"Exception from 'harvester' {e}") return None farmer_client.close() await farmer_client.await_closed() return plots async def get_blockchain_state(rpc_port: Optional[int]) -> Optional[Dict[str, Any]]: blockchain_state = None try: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if rpc_port is None: rpc_port = config["full_node"]["rpc_port"] client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config) blockchain_state = await client.get_blockchain_state() except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): print(f"Connection error. Check if full node is running at {rpc_port}") else: print(f"Exception from 'full node' {e}") client.close() await client.await_closed() return blockchain_state async def get_average_block_time(rpc_port: Optional[int]) -> float: try: blocks_to_compare = 500 config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if rpc_port is None: rpc_port = config["full_node"]["rpc_port"] client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config) blockchain_state = await client.get_blockchain_state() curr: Optional[BlockRecord] = blockchain_state["peak"] if curr is None or curr.height < (blocks_to_compare + 100): client.close() await client.await_closed() return SECONDS_PER_BLOCK while curr is not None and curr.height > 0 and not curr.is_transaction_block: curr = await client.get_block_record(curr.prev_hash) if curr is None: client.close() await client.await_closed() return SECONDS_PER_BLOCK past_curr = await client.get_block_record_by_height(curr.height - blocks_to_compare) while past_curr is not None and past_curr.height > 0 and not past_curr.is_transaction_block: past_curr = await client.get_block_record(past_curr.prev_hash) if past_curr is None: client.close() await client.await_closed() return SECONDS_PER_BLOCK client.close() await client.await_closed() return (curr.timestamp - past_curr.timestamp) / (curr.height - past_curr.height) except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): print(f"Connection error. Check if full node is running at {rpc_port}") else: print(f"Exception from 'full node' {e}") client.close() await client.await_closed() return SECONDS_PER_BLOCK async def get_wallets_stats(wallet_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]: amounts = None try: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if wallet_rpc_port is None: wallet_rpc_port = config["wallet"]["rpc_port"] wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config) amounts = await wallet_client.get_farmed_amount() # # Don't catch any exceptions, the caller will handle it # finally: wallet_client.close() await wallet_client.await_closed() return amounts async def is_farmer_running(farmer_rpc_port: Optional[int]) -> bool: is_running = False try: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if farmer_rpc_port is None: farmer_rpc_port = config["farmer"]["rpc_port"] farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config) await farmer_client.get_connections() is_running = True except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): print(f"Connection error. Check if farmer is running at {farmer_rpc_port}") else: print(f"Exception from 'farmer' {e}") farmer_client.close() await farmer_client.await_closed() return is_running async def get_challenges(farmer_rpc_port: Optional[int]) -> Optional[List[Dict[str, Any]]]: signage_points = None try: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") self_hostname = config["self_hostname"] if farmer_rpc_port is None: farmer_rpc_port = config["farmer"]["rpc_port"] farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config) signage_points = await farmer_client.get_signage_points() except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): print(f"Connection error. Check if farmer is running at {farmer_rpc_port}") else: print(f"Exception from 'farmer' {e}") farmer_client.close() await farmer_client.await_closed() return signage_points async def challenges(farmer_rpc_port: Optional[int], limit: int) -> None: signage_points = await get_challenges(farmer_rpc_port) if signage_points is None: return None signage_points.reverse() if limit != 0: signage_points = signage_points[:limit] for signage_point in signage_points: print( ( f"Hash: {signage_point['signage_point']['challenge_hash']} " f"Index: {signage_point['signage_point']['signage_point_index']}" ) ) async def summary( rpc_port: Optional[int], wallet_rpc_port: Optional[int], harvester_rpc_port: Optional[int], farmer_rpc_port: Optional[int], ) -> None: all_harvesters = await get_harvesters(farmer_rpc_port) blockchain_state = await get_blockchain_state(rpc_port) farmer_running = await is_farmer_running(farmer_rpc_port) wallet_not_ready: bool = False wallet_not_running: bool = False amounts = None try: amounts = await get_wallets_stats(wallet_rpc_port) except Exception as e: if isinstance(e, aiohttp.ClientConnectorError): wallet_not_running = True else: wallet_not_ready = True print("Farming status: ", end="") if blockchain_state is None: print("Not available") elif blockchain_state["sync"]["sync_mode"]: print("Syncing") elif not blockchain_state["sync"]["synced"]: print("Not synced or not connected to peers") elif not farmer_running: print("Not running") else: print("Farming") if amounts is not None: print(f"Total apple farmed: {amounts['farmed_amount'] / units['apple']}") print(f"User transaction fees: {amounts['fee_amount'] / units['apple']}") print(f"Block rewards: {(amounts['farmer_reward_amount'] + amounts['pool_reward_amount']) / units['apple']}") print(f"Last height farmed: {amounts['last_height_farmed']}") class PlotStats: total_plot_size = 0 total_plots = 0 if all_harvesters is not None: harvesters_local: dict = {} harvesters_remote: dict = {} for harvester in all_harvesters["harvesters"]: ip = harvester["connection"]["host"] if is_localhost(ip): harvesters_local[harvester["connection"]["node_id"]] = harvester else: if ip not in harvesters_remote: harvesters_remote[ip] = {} harvesters_remote[ip][harvester["connection"]["node_id"]] = harvester def process_harvesters(harvester_peers_in: dict): for harvester_peer_id, plots in harvester_peers_in.items(): total_plot_size_harvester = sum(map(lambda x: x["file_size"], plots["plots"])) PlotStats.total_plot_size += total_plot_size_harvester PlotStats.total_plots += len(plots["plots"]) print(f" {len(plots['plots'])} plots of size: {format_bytes(total_plot_size_harvester)}") if len(harvesters_local) > 0: print(f"Local Harvester{'s' if len(harvesters_local) > 1 else ''}") process_harvesters(harvesters_local) for harvester_ip, harvester_peers in harvesters_remote.items(): print(f"Remote Harvester{'s' if len(harvester_peers) > 1 else ''} for IP: {harvester_ip}") process_harvesters(harvester_peers) print(f"Plot count for all harvesters: {PlotStats.total_plots}") print("Total size of plots: ", end="") print(format_bytes(PlotStats.total_plot_size)) else: print("Plot count: Unknown") print("Total size of plots: Unknown") if blockchain_state is not None: print("Estimated network space: ", end="") print(format_bytes(blockchain_state["space"])) else: print("Estimated network space: Unknown") minutes = -1 if blockchain_state is not None and all_harvesters is not None: proportion = PlotStats.total_plot_size / blockchain_state["space"] if blockchain_state["space"] else -1 minutes = int((await get_average_block_time(rpc_port) / 60) / proportion) if proportion else -1 if all_harvesters is not None and PlotStats.total_plots == 0: print("Expected time to win: Never (no plots)") else: print("Expected time to win: " + format_minutes(minutes)) if amounts is None: if wallet_not_running: print("For details on farmed rewards and fees you should run 'apple start wallet' and 'apple wallet show'") elif wallet_not_ready: print("For details on farmed rewards and fees you should run 'apple wallet show'") else: print("Note: log into your key using 'apple wallet show' to see rewards for each key")
import logging from traceback import format_exception from tinydb import TinyDB, Query import nextcord from nextcord.ext import commands def check_ping(guild_id_var): db = TinyDB('databases/pings.json') query = Query() values = str(list(map(lambda entry: entry["pingstate"], db.search(query.guild_id == str(guild_id_var))))[0]) return values.lower() class Errors(commands.Cog): def __init__(self, bot): self.bot = bot self.logger = logging.getLogger(__name__) @commands.Cog.listener() async def on_command_error(self, ctx, err): if hasattr(ctx.command, 'on_error'): return if isinstance(err, commands.ConversionError): await ctx.send(err) elif isinstance(err, commands.MissingRequiredArgument): await ctx.send(embed=nextcord.Embed(title=f"Missing required argument: `{err.param}`", color=nextcord.Color.random())) elif isinstance(err, commands.BadArgument): await ctx.send(err) elif isinstance(err, commands.ArgumentParsingError): await ctx.send(err) elif isinstance(err, commands.PrivateMessageOnly): await ctx.send(embed=nextcord.Embed(title="This command can only be used in DMs.", color=nextcord.Color.random())) elif isinstance(err, commands.NoPrivateMessage): await ctx.send(embed=nextcord.Embed(title="This command can only be used in Guilds.", color=nextcord.Color.random())) elif isinstance(err, commands.MissingPermissions): perms = ", ".join( f"`{perm.replace("_", " ").title()}`" for perm in err.missing_perms ) await ctx.send(embed=nextcord.Embed(title=f"You're missing the permissions: {perms}", color=nextcord.Color.random())) elif isinstance(err, commands.BotMissingPermissions): perms = ", ".join( f"`{perm.replace("_", " ").title()}`" for perm in err.missing_perms ) await ctx.send(embed=nextcord.Embed(title=f"I'm missing the permissions: {perms}", color=nextcord.COlor.random())) elif isinstance(err, commands.DisabledCommand): await ctx.send(embed=nextcord.Embed(title=f"`{ctx.command.qualified_name}` is currently disabled.", color=nextcord.Color.random())) elif isinstance(err, nextcord.HTTPException): await ctx.send( embed=nextcord.Embed(title="An error occurred while I was trying to execute a task. Are you sure I have the correct permissions?", color=nextcord.Color.random() )) elif isinstance(err, nextcord.errors.Forbidden): pass elif isinstance(err, commands.MaxConcurrencyReached): await ctx.send( embed=nextcord.Embed(title=f"`{ctx.command.qualified_name}` can only be used {err.number} command at a time under {str(err.per)}", color=nextcord.Color.random() )) elif isinstance(err, commands.errors.CommandNotFound): pass self.logger.error("".join(format_exception(err, err, err.__traceback__))) def setup(bot): bot.add_cog(Errors(bot))
import logging from traceback import format_exception from tinydb import TinyDB, Query import nextcord from nextcord.ext import commands def check_ping(guild_id_var): db = TinyDB('databases/pings.json') query = Query() values = str(list(map(lambda entry: entry["pingstate"], db.search(query.guild_id == str(guild_id_var))))[0]) return values.lower() class Errors(commands.Cog): def __init__(self, bot): self.bot = bot self.logger = logging.getLogger(__name__) @commands.Cog.listener() async def on_command_error(self, ctx, err): if hasattr(ctx.command, 'on_error'): return if isinstance(err, commands.ConversionError): await ctx.send(err) elif isinstance(err, commands.MissingRequiredArgument): await ctx.send(embed=nextcord.Embed(title=f"Missing required argument: `{err.param}`", color=nextcord.Color.random())) elif isinstance(err, commands.BadArgument): await ctx.send(err) elif isinstance(err, commands.ArgumentParsingError): await ctx.send(err) elif isinstance(err, commands.PrivateMessageOnly): await ctx.send(embed=nextcord.Embed(title="This command can only be used in DMs.", color=nextcord.Color.random())) elif isinstance(err, commands.NoPrivateMessage): await ctx.send(embed=nextcord.Embed(title="This command can only be used in Guilds.", color=nextcord.Color.random())) elif isinstance(err, commands.MissingPermissions): perms = ", ".join( f"`{perm.replace('_', ' ').title()}`" for perm in err.missing_perms ) await ctx.send(embed=nextcord.Embed(title=f"You're missing the permissions: {perms}", color=nextcord.Color.random())) elif isinstance(err, commands.BotMissingPermissions): perms = ", ".join( f"`{perm.replace('_', ' ').title()}`" for perm in err.missing_perms ) await ctx.send(embed=nextcord.Embed(title=f"I'm missing the permissions: {perms}", color=nextcord.COlor.random())) elif isinstance(err, commands.DisabledCommand): await ctx.send(embed=nextcord.Embed(title=f"`{ctx.command.qualified_name}` is currently disabled.", color=nextcord.Color.random())) elif isinstance(err, nextcord.HTTPException): await ctx.send( embed=nextcord.Embed(title="An error occurred while I was trying to execute a task. Are you sure I have the correct permissions?", color=nextcord.Color.random() )) elif isinstance(err, nextcord.errors.Forbidden): pass elif isinstance(err, commands.MaxConcurrencyReached): await ctx.send( embed=nextcord.Embed(title=f"`{ctx.command.qualified_name}` can only be used {err.number} command at a time under {str(err.per)}", color=nextcord.Color.random() )) elif isinstance(err, commands.errors.CommandNotFound): pass self.logger.error("".join(format_exception(err, err, err.__traceback__))) def setup(bot): bot.add_cog(Errors(bot))
from vkbottle.rule import FromMe from vkbottle.user import Blueprint, Message from idm_lp import const from idm_lp.database import Database from idm_lp.logger import logger_decorator from idm_lp.utils import edit_message user = Blueprint( name='auto_infection_blueprint' ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> +автозаражение") @logger_decorator async def activate_auto_infection_wrapper(message: Message, **kwargs): with Database.get_current() as db: db.auto_infection = True await edit_message( message, "✅ Автоматическое заражение включено" ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> -автозаражение") @logger_decorator async def deactivate_auto_infection_wrapper(message: Message, **kwargs): with Database.get_current() as db: db.auto_infection = False await edit_message( message, "✅ Автоматическое заражение выключено" ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> автозаражение интервал <interval:int>") @logger_decorator async def set_auto_infection_interval_wrapper(message: Message, interval: int, **kwargs): db = Database.get_current() if interval < 60: await edit_message( message, "⚠ Интервал автоматического заражения не может быть меньше 1 минуты" ) return db.auto_infection_interval = interval db.save() await edit_message( message, f"✅ Интервал автоматического заражения установлен на {interval} сек." ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> автозаражение аргумент <argument>") @logger_decorator async def set_auto_infection_argument_wrapper(message: Message, argument: str, **kwargs): db = Database.get_current() db.auto_infection_argument = argument db.save() await edit_message( message, "✅ Аргумент автоматического заражения изменен" ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> автозаражение установить чат") @logger_decorator async def set_auto_infection_chat_wrapper(message: Message, **kwargs): db = Database.get_current() db.auto_infection_peer_id = message.peer_id db.save() await edit_message( message, "✅ Чат автоматического заражения изменен" ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> автозаражение") @logger_decorator async def show_auto_infection_wrapper(message: Message, **kwargs): db = Database.get_current() jobs = const.scheduler.get_jobs() next_run_time = None for job in jobs: if job.id == 'auto_infection_timer': next_run_time = job.next_run_time text = ( f"☢ Состояние автоматического заражения:\n" f"{"Запущен" if db.auto_infection else "Остановлен"}\n" f"⏱ Интервал заражения: {db.auto_infection_interval} сек.\n" f"🧨 Аргумент заражения: {db.auto_infection_argument}\n" f"💬 Чат заражения: {db.auto_infection_peer_id}\n\n" ) if next_run_time: text += f'⏳ Следующий запуск: {next_run_time.strftime('%Y-%m-%d %H:%M:%S')}' await edit_message(message, text)
from vkbottle.rule import FromMe from vkbottle.user import Blueprint, Message from idm_lp import const from idm_lp.database import Database from idm_lp.logger import logger_decorator from idm_lp.utils import edit_message user = Blueprint( name='auto_infection_blueprint' ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> +автозаражение") @logger_decorator async def activate_auto_infection_wrapper(message: Message, **kwargs): with Database.get_current() as db: db.auto_infection = True await edit_message( message, "✅ Автоматическое заражение включено" ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> -автозаражение") @logger_decorator async def deactivate_auto_infection_wrapper(message: Message, **kwargs): with Database.get_current() as db: db.auto_infection = False await edit_message( message, "✅ Автоматическое заражение выключено" ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> автозаражение интервал <interval:int>") @logger_decorator async def set_auto_infection_interval_wrapper(message: Message, interval: int, **kwargs): db = Database.get_current() if interval < 60: await edit_message( message, "⚠ Интервал автоматического заражения не может быть меньше 1 минуты" ) return db.auto_infection_interval = interval db.save() await edit_message( message, f"✅ Интервал автоматического заражения установлен на {interval} сек." ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> автозаражение аргумент <argument>") @logger_decorator async def set_auto_infection_argument_wrapper(message: Message, argument: str, **kwargs): db = Database.get_current() db.auto_infection_argument = argument db.save() await edit_message( message, "✅ Аргумент автоматического заражения изменен" ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> автозаражение установить чат") @logger_decorator async def set_auto_infection_chat_wrapper(message: Message, **kwargs): db = Database.get_current() db.auto_infection_peer_id = message.peer_id db.save() await edit_message( message, "✅ Чат автоматического заражения изменен" ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> автозаражение") @logger_decorator async def show_auto_infection_wrapper(message: Message, **kwargs): db = Database.get_current() jobs = const.scheduler.get_jobs() next_run_time = None for job in jobs: if job.id == 'auto_infection_timer': next_run_time = job.next_run_time text = ( f"☢ Состояние автоматического заражения:\n" f"{'Запущен' if db.auto_infection else 'Остановлен'}\n" f"⏱ Интервал заражения: {db.auto_infection_interval} сек.\n" f"🧨 Аргумент заражения: {db.auto_infection_argument}\n" f"💬 Чат заражения: {db.auto_infection_peer_id}\n\n" ) if next_run_time: text += f'⏳ Следующий запуск: {next_run_time.strftime("%Y-%m-%d %H:%M:%S")}' await edit_message(message, text)
import logging import typing from eth_utils import remove_0x_prefix from web3.utils.events import get_event_data from ocean_lib.models import balancer_constants from .btoken import BToken from ocean_lib.ocean import util from ocean_lib.web3_internal.wallet import Wallet logger = logging.getLogger(__name__) class BPool(BToken): CONTRACT_NAME = 'BPool' def __init__(self, *args, **kwargs): BToken.__init__(self, *args, **kwargs) self._ccontract = self.contract_concise def __str__(self): s = [] s += ["BPool:"] s += [f" pool_address={self.address}"] s += [f" controller address = {self.getController()}"] s += [f" isPublicSwap = {self.isPublicSwap()}"] s += [f" isFinalized = {self.isFinalized()}"] swap_fee = util.from_base_18(self.getSwapFee()) s += [" swapFee = %.2f%%" % (swap_fee * 100.0)] s += [f" numTokens = {self.getNumTokens()}"] cur_addrs = self.getCurrentTokens() cur_symbols = [BToken(addr).symbol() for addr in cur_addrs] s += [f" currentTokens (as symbols) = {", ".join(cur_symbols)}"] if self.isFinalized(): final_addrs = self.getFinalTokens() final_symbols = [BToken(addr).symbol() for addr in final_addrs] s += [f" finalTokens (as symbols) = {final_symbols}"] s += [f" is bound:"] for addr, symbol in zip(cur_addrs, cur_symbols): s += [f" {symbol}: {self.isBound(addr)}"] s += [f" weights (fromBase):"] for addr, symbol in zip(cur_addrs, cur_symbols): denorm_w = util.from_base_18(self.getDenormalizedWeight(addr)) norm_w = util.from_base_18(self.getNormalizedWeight(addr)) s += [f" {symbol}: denorm_w={denorm_w}, norm_w={norm_w} "] total_denorm_w = util.from_base_18(self.getTotalDenormalizedWeight()) s += [f" total_denorm_w={total_denorm_w}"] s += [f" balances (fromBase):"] for addr, symbol in zip(cur_addrs, cur_symbols): balance_base = self.getBalance(addr) dec = BToken(addr).decimals() balance = util.from_base(balance_base, dec) s += [f" {symbol}: {balance}"] return "\n".join(s) def setup(self, data_token: str, data_token_amount: int, data_token_weight: int, base_token: str, base_token_amount: int, base_token_weight: int, swap_fee: int, from_wallet: Wallet) -> str: tx_id = self.send_transaction( 'setup', (data_token, data_token_amount, data_token_weight, base_token, base_token_amount, base_token_weight, swap_fee), from_wallet, {"gas": balancer_constants.GASLIMIT_BFACTORY_NEWBPOOL} ) return tx_id # ============================================================ # reflect BPool Solidity methods: everything at Balancer Interfaces "BPool" # docstrings are adapted from Balancer API # https://docs.balancer.finance/smart-contracts/api # ==== View Functions def isPublicSwap(self) -> bool: return self._ccontract.isPublicSwap() def isFinalized(self) -> bool: """ The `finalized` state lets users know that the weights, balances, and fees of this pool are immutable. In the `finalized` state, `SWAP`, `JOIN`, and `EXIT` are public. `CONTROL` capabilities are disabled. (https://docs.balancer.finance/smart-contracts/api#access-control) """ return self._ccontract.isFinalized() def isBound(self, token_address: str) -> bool: """ A bound token has a valid balance and weight. A token cannot be bound without valid parameters which will enable e.g. `getSpotPrice` in terms of other tokens. However, disabling `isSwapPublic` will disable any interaction with this token in practice (assuming there are no existing tokens in the pool, which can always `exitPool`). """ return self._ccontract.isBound(token_address) def getNumTokens(self) -> int: """ How many tokens are bound to this pool. """ return self._ccontract.getNumTokens() def getCurrentTokens(self) -> typing.List[str]: """@return -- list of [token_addr:str]""" return self._ccontract.getCurrentTokens() def getFinalTokens(self) -> typing.List[str]: """@return -- list of [token_addr:str]""" return self._ccontract.getFinalTokens() def getDenormalizedWeight(self, token_address: str) -> int: return self._ccontract.getDenormalizedWeight(token_address) def getTotalDenormalizedWeight(self) -> int: return self._ccontract.getTotalDenormalizedWeight() def getNormalizedWeight(self, token_address: str) -> int: """ The normalized weight of a token. The combined normalized weights of all tokens will sum up to 1. (Note: the actual sum may be 1 plus or minus a few wei due to division precision loss) """ return self._ccontract.getNormalizedWeight(token_address) def getBalance(self, token_address: str) -> int: return self._ccontract.getBalance(token_address) def getSwapFee(self) -> int: return self._ccontract.getSwapFee() def getController(self) -> str: """ Get the "controller" address, which can call `CONTROL` functions like `rebind`, `setSwapFee`, or `finalize`. """ return self._ccontract.getController() # ==== Controller Functions def setSwapFee(self, swapFee_base: int, from_wallet: Wallet): """ Caller must be controller. Pool must NOT be finalized. """ return self.send_transaction('setSwapFee', (swapFee_base,), from_wallet) def setController(self, manager_address: str, from_wallet: Wallet): return self.send_transaction('setController', (manager_address,), from_wallet) def setPublicSwap(self, public: bool, from_wallet: Wallet): """ Makes `isPublicSwap` return `_publicSwap`. Requires caller to be controller and pool not to be finalized. Finalized pools always have public swap. """ return self.send_transaction('setPublicSwap', (public, ), from_wallet) def finalize(self, from_wallet: Wallet): """ This makes the pool **finalized**. This is a one-way transition. `bind`, `rebind`, `unbind`, `setSwapFee` and `setPublicSwap` will all throw `ERR_IS_FINALIZED` after pool is finalized. This also switches `isSwapPublic` to true. """ return self.send_transaction('finalize', (), from_wallet) def bind(self, token_address: str, balance_base: int, weight_base: int, from_wallet: Wallet): """ Binds the token with address `token`. Tokens will be pushed/pulled from caller to adjust match new balance. Token must not already be bound. `balance` must be a valid balance and denorm must be a valid denormalized weight. `bind` creates the token record and then calls `rebind` for updating pool weights and token transfers. Possible errors: -`ERR_NOT_CONTROLLER` -- caller is not the controller -`ERR_IS_BOUND` -- T is already bound -`ERR_IS_FINALIZED` -- isFinalized() is true -`ERR_ERC20_FALSE` -- ERC20 token returned false -`ERR_MAX_TOKENS` -- Only 8 tokens are allowed per pool -unspecified error thrown by token """ return self.send_transaction('bind', (token_address, balance_base, weight_base), from_wallet) def rebind(self, token_address: str, balance_base: int, weight_base: int, from_wallet: Wallet): """ Changes the parameters of an already-bound token. Performs the same validation on the parameters. """ return self.send_transaction('rebind', (token_address, balance_base, weight_base), from_wallet) def unbind(self, token_address: str, from_wallet: Wallet): """ Unbinds a token, clearing all of its parameters. Exit fee is charged and the remaining balance is sent to caller. """ return self.send_transaction('unbind', (token_address,), from_wallet) def gulp(self, token_address: str, from_wallet: Wallet): """ This syncs the internal `balance` of `token` within a pool with the actual `balance` registered on the ERC20 contract. This is useful to wallet for airdropped tokens or any tokens sent to the pool without using the `join` or `joinSwap` methods. As an example, pools that contain `COMP` tokens can have the `COMP` balance updated with the rewards sent by Compound (https://etherscan.io/tx/0xeccd42bf2b8a180a561c026717707d9024a083059af2f22c197ee511d1010e23). In order for any airdrop balance to be gulped, the token must be bound to the pool. So if a shared pool (which is immutable) does not have a given token, any airdrops in that token will be locked in the pool forever. """ return self.send_transaction('gulp', (token_address,), from_wallet) # ==== Price Functions def getSpotPrice(self, tokenIn_address:str, tokenOut_address: str) -> int: return self._ccontract.getSpotPrice(tokenIn_address, tokenOut_address) def getSpotPriceSansFee( self, tokenIn_address: str, tokenOut_address: str) -> int: return self._ccontract.getSpotPriceSansFee( tokenIn_address, tokenOut_address) # ==== Trading and Liquidity Functions def joinPool( self, poolAmountOut_base: int, maxAmountsIn_base: typing.List[int], from_wallet: Wallet): """ Join the pool, getting `poolAmountOut` pool tokens. This will pull some of each of the currently trading tokens in the pool, meaning you must have called `approve` for each token for this pool. These values are limited by the array of `maxAmountsIn` in the order of the pool tokens. """ return self.send_transaction('joinPool', (poolAmountOut_base, maxAmountsIn_base), from_wallet) def exitPool( self, poolAmountIn_base: int, minAmountsOut_base : typing.List[int], from_wallet: Wallet): """ Exit the pool, paying `poolAmountIn` pool tokens and getting some of each of the currently trading tokens in return. These values are limited by the array of `minAmountsOut` in the order of the pool tokens. """ return self.send_transaction('exitPool', (poolAmountIn_base, minAmountsOut_base), from_wallet) def swapExactAmountIn( self, tokenIn_address: str, tokenAmountIn_base: int, tokenOut_address: str, minAmountOut_base: int, maxPrice_base: int, from_wallet: Wallet): """ Trades an exact `tokenAmountIn` of `tokenIn` taken from the caller by the pool, in exchange for at least `minAmountOut` of `tokenOut` given to the caller from the pool, with a maximum marginal price of `maxPrice`. Returns `(tokenAmountOut`, `spotPriceAfter)`, where `tokenAmountOut` is the amount of token that came out of the pool, and `spotPriceAfter` is the new marginal spot price, ie, the result of `getSpotPrice` after the call. (These values are what are limited by the arguments; you are guaranteed `tokenAmountOut >= minAmountOut` and `spotPriceAfter <= maxPrice)`. """ return self.send_transaction( 'swapExactAmountIn', (tokenIn_address, tokenAmountIn_base, tokenOut_address, minAmountOut_base, maxPrice_base), from_wallet ) def swapExactAmountOut( self, tokenIn_address: str, maxAmountIn_base: int, tokenOut_address: str, tokenAmountOut_base: int, maxPrice_base: int, from_wallet: Wallet): return self.send_transaction( 'swapExactAmountOut', (tokenIn_address, maxAmountIn_base, tokenOut_address, tokenAmountOut_base, maxPrice_base), from_wallet ) def joinswapExternAmountIn( self, tokenIn_address: str, tokenAmountIn_base: int, minPoolAmountOut_base: int, from_wallet: Wallet): """ Pay `tokenAmountIn` of token `tokenIn` to join the pool, getting `poolAmountOut` of the pool shares. """ return self.send_transaction('joinswapExternAmountIn', (tokenIn_address, tokenAmountIn_base, minPoolAmountOut_base), from_wallet) def joinswapPoolAmountOut( self, tokenIn_address: str, poolAmountOut_base: int, maxAmountIn_base: int, from_wallet: Wallet): """ Specify `poolAmountOut` pool shares that you want to get, and a token `tokenIn` to pay with. This costs `maxAmountIn` tokens (these went into the pool). """ return self.send_transaction('joinswapPoolAmountOut', (tokenIn_address, poolAmountOut_base, maxAmountIn_base), from_wallet) def exitswapPoolAmountIn( self, tokenOut_address: str, poolAmountIn_base: int, minAmountOut_base: int, from_wallet: Wallet): """ Pay `poolAmountIn` pool shares into the pool, getting `tokenAmountOut` of the given token `tokenOut` out of the pool. """ return self.send_transaction('exitswapPoolAmountIn', (tokenOut_address, poolAmountIn_base, minAmountOut_base), from_wallet) def exitswapExternAmountOut( self, tokenOut_address: str, tokenAmountOut_base: int, maxPoolAmountIn_base: int, from_wallet: Wallet): """ Specify `tokenAmountOut` of token `tokenOut` that you want to get out of the pool. This costs `poolAmountIn` pool shares (these went into the pool). """ return self.send_transaction( 'exitswapExternAmountOut', (tokenOut_address, tokenAmountOut_base, maxPoolAmountIn_base), from_wallet ) # ==== Balancer Pool as ERC20 def totalSupply(self) -> int: return self._ccontract.totalSupply() def balanceOf(self, whom_address: str) -> int: return self._ccontract.balanceOf(whom_address) def allowance(self, src_address: str, dst_address: str) -> int: return self._ccontract.allowance(src_address, dst_address) def approve(self, dst_address: str, amt_base: int, from_wallet: Wallet): return self.send_transaction('approve', (dst_address, amt_base), from_wallet) def transfer(self, dst_address: str, amt_base: int, from_wallet: Wallet): return self.send_transaction('transfer', (dst_address, amt_base), from_wallet) def transferFrom(self, src_address: str, dst_address: str, amt_base: int, from_wallet: Wallet): return self.send_transaction('transferFrom', (dst_address, src_address, amt_base), from_wallet) # ===== Calculators def calcSpotPrice( self, tokenBalanceIn_base: int, tokenWeightIn_base : int, tokenBalanceOut_base: int, tokenWeightOut_base : int, swapFee_base : int) -> int: """Returns spotPrice_base""" return self._ccontract.calcSpotPrice( tokenBalanceIn_base, tokenWeightIn_base, tokenBalanceOut_base, tokenWeightOut_base, swapFee_base) def calcOutGivenIn( self, tokenBalanceIn_base: int, tokenWeightIn_base : int, tokenBalanceOut : int, tokenWeightOut_base : int, tokenAmountIn_base : int, swapFee_base : int) -> int: """Returns tokenAmountOut_base""" return self._ccontract.calcOutGivenIn( tokenBalanceIn_base, tokenWeightIn_base, tokenBalanceOut, tokenWeightOut_base, tokenAmountIn_base, swapFee_base) def calcInGivenOut( self, tokenBalanceIn_base: int, tokenWeightIn_base : int, tokenBalanceOut_base : int, tokenWeightOut_base : int, tokenAmountOut_base: int, swapFee_base: int) -> int: """Returns tokenAmountIn_base""" return self._ccontract.calcInGivenOut( tokenBalanceIn_base, tokenWeightIn_base, tokenBalanceOut_base, tokenWeightOut_base, tokenAmountOut_base, swapFee_base) def calcPoolOutGivenSingleIn( self, tokenBalanceIn_base: int, tokenWeightIn_base: int, poolSupply_base: int, totalWeight_base: int, tokenAmountIn_base: int, swapFee_base: int) -> int: """Returns poolAmountOut_base""" return self._ccontract.calcPoolOutGivenSingleIn( tokenBalanceIn_base, tokenWeightIn_base, poolSupply_base, totalWeight_base, tokenAmountIn_base, swapFee_base) def calcSingleInGivenPoolOut( self, tokenBalanceIn_base: int, tokenWeightIn_base: int, poolSupply_base: int, totalWeight_base: int, poolAmountOut_base: int, swapFee_base: int) -> int: """Returns tokenAmountIn_base""" return self._ccontract.calcSingleInGivenPoolOut( tokenBalanceIn_base, tokenWeightIn_base, poolSupply_base, totalWeight_base, poolAmountOut_base, swapFee_base) def calcSingleOutGivenPoolIn( self, tokenBalanceOut_base: int, tokenWeightOut_base: int, poolSupply_base: int, totalWeight_base: int, poolAmountIn_base: int, swapFee_base: int) -> int: """Returns tokenAmountOut_base""" return self._ccontract.calcSingleOutGivenPoolIn( tokenBalanceOut_base, tokenWeightOut_base, poolSupply_base, totalWeight_base, poolAmountIn_base, swapFee_base) def calcPoolInGivenSingleOut( self, tokenBalanceOut_base: int, tokenWeightOut_base: int, poolSupply_base: int, totalWeight_base: int, tokenAmountOut_base: int, swapFee_base: int) -> int: """Returns poolAmountIn_base""" return self._ccontract.calcPoolInGivenSingleOut( tokenBalanceOut_base, tokenWeightOut_base, poolSupply_base, totalWeight_base, tokenAmountOut_base, swapFee_base) # ===== Events def get_liquidity_logs(self, event_name, web3, from_block, to_block=None, user_address=None, this_pool_only=True): """ :param event_name: str, one of LOG_JOIN, LOG_EXIT, LOG_SWAP """ topic0 = self.get_event_signature(event_name) to_block = to_block or 'latest' _filter = { 'fromBlock': from_block, 'toBlock': to_block, 'topics': [topic0,] } if this_pool_only: _filter['address'] = self.address if user_address: assert web3.isChecksumAddress(user_address) _filter['topics'].append(f'0x000000000000000000000000{remove_0x_prefix(user_address).lower()}') event = getattr(self.events, event_name) event_abi = event().abi try: logs = web3.eth.getLogs(_filter) logs = [get_event_data(event_abi, l) for l in logs] except ValueError as e: logger.error(f'get_join_logs failed -> web3.eth.getLogs (filter={_filter}) failed: ' f'{e}..') logs = [] return logs def get_join_logs(self, web3, from_block, to_block=None, user_address=None, this_pool_only=True): return self.get_liquidity_logs('LOG_JOIN', web3, from_block, to_block, user_address, this_pool_only) def get_exit_logs(self, web3, from_block, to_block=None, user_address=None, this_pool_only=True): return self.get_liquidity_logs('LOG_EXIT', web3, from_block, to_block, user_address, this_pool_only) def get_swap_logs(self, web3, from_block, to_block=None, user_address=None, this_pool_only=True): return self.get_liquidity_logs('LOG_SWAP', web3, from_block, to_block, user_address, this_pool_only)
import logging import typing from eth_utils import remove_0x_prefix from web3.utils.events import get_event_data from ocean_lib.models import balancer_constants from .btoken import BToken from ocean_lib.ocean import util from ocean_lib.web3_internal.wallet import Wallet logger = logging.getLogger(__name__) class BPool(BToken): CONTRACT_NAME = 'BPool' def __init__(self, *args, **kwargs): BToken.__init__(self, *args, **kwargs) self._ccontract = self.contract_concise def __str__(self): s = [] s += ["BPool:"] s += [f" pool_address={self.address}"] s += [f" controller address = {self.getController()}"] s += [f" isPublicSwap = {self.isPublicSwap()}"] s += [f" isFinalized = {self.isFinalized()}"] swap_fee = util.from_base_18(self.getSwapFee()) s += [" swapFee = %.2f%%" % (swap_fee * 100.0)] s += [f" numTokens = {self.getNumTokens()}"] cur_addrs = self.getCurrentTokens() cur_symbols = [BToken(addr).symbol() for addr in cur_addrs] s += [f" currentTokens (as symbols) = {', '.join(cur_symbols)}"] if self.isFinalized(): final_addrs = self.getFinalTokens() final_symbols = [BToken(addr).symbol() for addr in final_addrs] s += [f" finalTokens (as symbols) = {final_symbols}"] s += [f" is bound:"] for addr, symbol in zip(cur_addrs, cur_symbols): s += [f" {symbol}: {self.isBound(addr)}"] s += [f" weights (fromBase):"] for addr, symbol in zip(cur_addrs, cur_symbols): denorm_w = util.from_base_18(self.getDenormalizedWeight(addr)) norm_w = util.from_base_18(self.getNormalizedWeight(addr)) s += [f" {symbol}: denorm_w={denorm_w}, norm_w={norm_w} "] total_denorm_w = util.from_base_18(self.getTotalDenormalizedWeight()) s += [f" total_denorm_w={total_denorm_w}"] s += [f" balances (fromBase):"] for addr, symbol in zip(cur_addrs, cur_symbols): balance_base = self.getBalance(addr) dec = BToken(addr).decimals() balance = util.from_base(balance_base, dec) s += [f" {symbol}: {balance}"] return "\n".join(s) def setup(self, data_token: str, data_token_amount: int, data_token_weight: int, base_token: str, base_token_amount: int, base_token_weight: int, swap_fee: int, from_wallet: Wallet) -> str: tx_id = self.send_transaction( 'setup', (data_token, data_token_amount, data_token_weight, base_token, base_token_amount, base_token_weight, swap_fee), from_wallet, {"gas": balancer_constants.GASLIMIT_BFACTORY_NEWBPOOL} ) return tx_id # ============================================================ # reflect BPool Solidity methods: everything at Balancer Interfaces "BPool" # docstrings are adapted from Balancer API # https://docs.balancer.finance/smart-contracts/api # ==== View Functions def isPublicSwap(self) -> bool: return self._ccontract.isPublicSwap() def isFinalized(self) -> bool: """ The `finalized` state lets users know that the weights, balances, and fees of this pool are immutable. In the `finalized` state, `SWAP`, `JOIN`, and `EXIT` are public. `CONTROL` capabilities are disabled. (https://docs.balancer.finance/smart-contracts/api#access-control) """ return self._ccontract.isFinalized() def isBound(self, token_address: str) -> bool: """ A bound token has a valid balance and weight. A token cannot be bound without valid parameters which will enable e.g. `getSpotPrice` in terms of other tokens. However, disabling `isSwapPublic` will disable any interaction with this token in practice (assuming there are no existing tokens in the pool, which can always `exitPool`). """ return self._ccontract.isBound(token_address) def getNumTokens(self) -> int: """ How many tokens are bound to this pool. """ return self._ccontract.getNumTokens() def getCurrentTokens(self) -> typing.List[str]: """@return -- list of [token_addr:str]""" return self._ccontract.getCurrentTokens() def getFinalTokens(self) -> typing.List[str]: """@return -- list of [token_addr:str]""" return self._ccontract.getFinalTokens() def getDenormalizedWeight(self, token_address: str) -> int: return self._ccontract.getDenormalizedWeight(token_address) def getTotalDenormalizedWeight(self) -> int: return self._ccontract.getTotalDenormalizedWeight() def getNormalizedWeight(self, token_address: str) -> int: """ The normalized weight of a token. The combined normalized weights of all tokens will sum up to 1. (Note: the actual sum may be 1 plus or minus a few wei due to division precision loss) """ return self._ccontract.getNormalizedWeight(token_address) def getBalance(self, token_address: str) -> int: return self._ccontract.getBalance(token_address) def getSwapFee(self) -> int: return self._ccontract.getSwapFee() def getController(self) -> str: """ Get the "controller" address, which can call `CONTROL` functions like `rebind`, `setSwapFee`, or `finalize`. """ return self._ccontract.getController() # ==== Controller Functions def setSwapFee(self, swapFee_base: int, from_wallet: Wallet): """ Caller must be controller. Pool must NOT be finalized. """ return self.send_transaction('setSwapFee', (swapFee_base,), from_wallet) def setController(self, manager_address: str, from_wallet: Wallet): return self.send_transaction('setController', (manager_address,), from_wallet) def setPublicSwap(self, public: bool, from_wallet: Wallet): """ Makes `isPublicSwap` return `_publicSwap`. Requires caller to be controller and pool not to be finalized. Finalized pools always have public swap. """ return self.send_transaction('setPublicSwap', (public, ), from_wallet) def finalize(self, from_wallet: Wallet): """ This makes the pool **finalized**. This is a one-way transition. `bind`, `rebind`, `unbind`, `setSwapFee` and `setPublicSwap` will all throw `ERR_IS_FINALIZED` after pool is finalized. This also switches `isSwapPublic` to true. """ return self.send_transaction('finalize', (), from_wallet) def bind(self, token_address: str, balance_base: int, weight_base: int, from_wallet: Wallet): """ Binds the token with address `token`. Tokens will be pushed/pulled from caller to adjust match new balance. Token must not already be bound. `balance` must be a valid balance and denorm must be a valid denormalized weight. `bind` creates the token record and then calls `rebind` for updating pool weights and token transfers. Possible errors: -`ERR_NOT_CONTROLLER` -- caller is not the controller -`ERR_IS_BOUND` -- T is already bound -`ERR_IS_FINALIZED` -- isFinalized() is true -`ERR_ERC20_FALSE` -- ERC20 token returned false -`ERR_MAX_TOKENS` -- Only 8 tokens are allowed per pool -unspecified error thrown by token """ return self.send_transaction('bind', (token_address, balance_base, weight_base), from_wallet) def rebind(self, token_address: str, balance_base: int, weight_base: int, from_wallet: Wallet): """ Changes the parameters of an already-bound token. Performs the same validation on the parameters. """ return self.send_transaction('rebind', (token_address, balance_base, weight_base), from_wallet) def unbind(self, token_address: str, from_wallet: Wallet): """ Unbinds a token, clearing all of its parameters. Exit fee is charged and the remaining balance is sent to caller. """ return self.send_transaction('unbind', (token_address,), from_wallet) def gulp(self, token_address: str, from_wallet: Wallet): """ This syncs the internal `balance` of `token` within a pool with the actual `balance` registered on the ERC20 contract. This is useful to wallet for airdropped tokens or any tokens sent to the pool without using the `join` or `joinSwap` methods. As an example, pools that contain `COMP` tokens can have the `COMP` balance updated with the rewards sent by Compound (https://etherscan.io/tx/0xeccd42bf2b8a180a561c026717707d9024a083059af2f22c197ee511d1010e23). In order for any airdrop balance to be gulped, the token must be bound to the pool. So if a shared pool (which is immutable) does not have a given token, any airdrops in that token will be locked in the pool forever. """ return self.send_transaction('gulp', (token_address,), from_wallet) # ==== Price Functions def getSpotPrice(self, tokenIn_address:str, tokenOut_address: str) -> int: return self._ccontract.getSpotPrice(tokenIn_address, tokenOut_address) def getSpotPriceSansFee( self, tokenIn_address: str, tokenOut_address: str) -> int: return self._ccontract.getSpotPriceSansFee( tokenIn_address, tokenOut_address) # ==== Trading and Liquidity Functions def joinPool( self, poolAmountOut_base: int, maxAmountsIn_base: typing.List[int], from_wallet: Wallet): """ Join the pool, getting `poolAmountOut` pool tokens. This will pull some of each of the currently trading tokens in the pool, meaning you must have called `approve` for each token for this pool. These values are limited by the array of `maxAmountsIn` in the order of the pool tokens. """ return self.send_transaction('joinPool', (poolAmountOut_base, maxAmountsIn_base), from_wallet) def exitPool( self, poolAmountIn_base: int, minAmountsOut_base : typing.List[int], from_wallet: Wallet): """ Exit the pool, paying `poolAmountIn` pool tokens and getting some of each of the currently trading tokens in return. These values are limited by the array of `minAmountsOut` in the order of the pool tokens. """ return self.send_transaction('exitPool', (poolAmountIn_base, minAmountsOut_base), from_wallet) def swapExactAmountIn( self, tokenIn_address: str, tokenAmountIn_base: int, tokenOut_address: str, minAmountOut_base: int, maxPrice_base: int, from_wallet: Wallet): """ Trades an exact `tokenAmountIn` of `tokenIn` taken from the caller by the pool, in exchange for at least `minAmountOut` of `tokenOut` given to the caller from the pool, with a maximum marginal price of `maxPrice`. Returns `(tokenAmountOut`, `spotPriceAfter)`, where `tokenAmountOut` is the amount of token that came out of the pool, and `spotPriceAfter` is the new marginal spot price, ie, the result of `getSpotPrice` after the call. (These values are what are limited by the arguments; you are guaranteed `tokenAmountOut >= minAmountOut` and `spotPriceAfter <= maxPrice)`. """ return self.send_transaction( 'swapExactAmountIn', (tokenIn_address, tokenAmountIn_base, tokenOut_address, minAmountOut_base, maxPrice_base), from_wallet ) def swapExactAmountOut( self, tokenIn_address: str, maxAmountIn_base: int, tokenOut_address: str, tokenAmountOut_base: int, maxPrice_base: int, from_wallet: Wallet): return self.send_transaction( 'swapExactAmountOut', (tokenIn_address, maxAmountIn_base, tokenOut_address, tokenAmountOut_base, maxPrice_base), from_wallet ) def joinswapExternAmountIn( self, tokenIn_address: str, tokenAmountIn_base: int, minPoolAmountOut_base: int, from_wallet: Wallet): """ Pay `tokenAmountIn` of token `tokenIn` to join the pool, getting `poolAmountOut` of the pool shares. """ return self.send_transaction('joinswapExternAmountIn', (tokenIn_address, tokenAmountIn_base, minPoolAmountOut_base), from_wallet) def joinswapPoolAmountOut( self, tokenIn_address: str, poolAmountOut_base: int, maxAmountIn_base: int, from_wallet: Wallet): """ Specify `poolAmountOut` pool shares that you want to get, and a token `tokenIn` to pay with. This costs `maxAmountIn` tokens (these went into the pool). """ return self.send_transaction('joinswapPoolAmountOut', (tokenIn_address, poolAmountOut_base, maxAmountIn_base), from_wallet) def exitswapPoolAmountIn( self, tokenOut_address: str, poolAmountIn_base: int, minAmountOut_base: int, from_wallet: Wallet): """ Pay `poolAmountIn` pool shares into the pool, getting `tokenAmountOut` of the given token `tokenOut` out of the pool. """ return self.send_transaction('exitswapPoolAmountIn', (tokenOut_address, poolAmountIn_base, minAmountOut_base), from_wallet) def exitswapExternAmountOut( self, tokenOut_address: str, tokenAmountOut_base: int, maxPoolAmountIn_base: int, from_wallet: Wallet): """ Specify `tokenAmountOut` of token `tokenOut` that you want to get out of the pool. This costs `poolAmountIn` pool shares (these went into the pool). """ return self.send_transaction( 'exitswapExternAmountOut', (tokenOut_address, tokenAmountOut_base, maxPoolAmountIn_base), from_wallet ) # ==== Balancer Pool as ERC20 def totalSupply(self) -> int: return self._ccontract.totalSupply() def balanceOf(self, whom_address: str) -> int: return self._ccontract.balanceOf(whom_address) def allowance(self, src_address: str, dst_address: str) -> int: return self._ccontract.allowance(src_address, dst_address) def approve(self, dst_address: str, amt_base: int, from_wallet: Wallet): return self.send_transaction('approve', (dst_address, amt_base), from_wallet) def transfer(self, dst_address: str, amt_base: int, from_wallet: Wallet): return self.send_transaction('transfer', (dst_address, amt_base), from_wallet) def transferFrom(self, src_address: str, dst_address: str, amt_base: int, from_wallet: Wallet): return self.send_transaction('transferFrom', (dst_address, src_address, amt_base), from_wallet) # ===== Calculators def calcSpotPrice( self, tokenBalanceIn_base: int, tokenWeightIn_base : int, tokenBalanceOut_base: int, tokenWeightOut_base : int, swapFee_base : int) -> int: """Returns spotPrice_base""" return self._ccontract.calcSpotPrice( tokenBalanceIn_base, tokenWeightIn_base, tokenBalanceOut_base, tokenWeightOut_base, swapFee_base) def calcOutGivenIn( self, tokenBalanceIn_base: int, tokenWeightIn_base : int, tokenBalanceOut : int, tokenWeightOut_base : int, tokenAmountIn_base : int, swapFee_base : int) -> int: """Returns tokenAmountOut_base""" return self._ccontract.calcOutGivenIn( tokenBalanceIn_base, tokenWeightIn_base, tokenBalanceOut, tokenWeightOut_base, tokenAmountIn_base, swapFee_base) def calcInGivenOut( self, tokenBalanceIn_base: int, tokenWeightIn_base : int, tokenBalanceOut_base : int, tokenWeightOut_base : int, tokenAmountOut_base: int, swapFee_base: int) -> int: """Returns tokenAmountIn_base""" return self._ccontract.calcInGivenOut( tokenBalanceIn_base, tokenWeightIn_base, tokenBalanceOut_base, tokenWeightOut_base, tokenAmountOut_base, swapFee_base) def calcPoolOutGivenSingleIn( self, tokenBalanceIn_base: int, tokenWeightIn_base: int, poolSupply_base: int, totalWeight_base: int, tokenAmountIn_base: int, swapFee_base: int) -> int: """Returns poolAmountOut_base""" return self._ccontract.calcPoolOutGivenSingleIn( tokenBalanceIn_base, tokenWeightIn_base, poolSupply_base, totalWeight_base, tokenAmountIn_base, swapFee_base) def calcSingleInGivenPoolOut( self, tokenBalanceIn_base: int, tokenWeightIn_base: int, poolSupply_base: int, totalWeight_base: int, poolAmountOut_base: int, swapFee_base: int) -> int: """Returns tokenAmountIn_base""" return self._ccontract.calcSingleInGivenPoolOut( tokenBalanceIn_base, tokenWeightIn_base, poolSupply_base, totalWeight_base, poolAmountOut_base, swapFee_base) def calcSingleOutGivenPoolIn( self, tokenBalanceOut_base: int, tokenWeightOut_base: int, poolSupply_base: int, totalWeight_base: int, poolAmountIn_base: int, swapFee_base: int) -> int: """Returns tokenAmountOut_base""" return self._ccontract.calcSingleOutGivenPoolIn( tokenBalanceOut_base, tokenWeightOut_base, poolSupply_base, totalWeight_base, poolAmountIn_base, swapFee_base) def calcPoolInGivenSingleOut( self, tokenBalanceOut_base: int, tokenWeightOut_base: int, poolSupply_base: int, totalWeight_base: int, tokenAmountOut_base: int, swapFee_base: int) -> int: """Returns poolAmountIn_base""" return self._ccontract.calcPoolInGivenSingleOut( tokenBalanceOut_base, tokenWeightOut_base, poolSupply_base, totalWeight_base, tokenAmountOut_base, swapFee_base) # ===== Events def get_liquidity_logs(self, event_name, web3, from_block, to_block=None, user_address=None, this_pool_only=True): """ :param event_name: str, one of LOG_JOIN, LOG_EXIT, LOG_SWAP """ topic0 = self.get_event_signature(event_name) to_block = to_block or 'latest' _filter = { 'fromBlock': from_block, 'toBlock': to_block, 'topics': [topic0,] } if this_pool_only: _filter['address'] = self.address if user_address: assert web3.isChecksumAddress(user_address) _filter['topics'].append(f'0x000000000000000000000000{remove_0x_prefix(user_address).lower()}') event = getattr(self.events, event_name) event_abi = event().abi try: logs = web3.eth.getLogs(_filter) logs = [get_event_data(event_abi, l) for l in logs] except ValueError as e: logger.error(f'get_join_logs failed -> web3.eth.getLogs (filter={_filter}) failed: ' f'{e}..') logs = [] return logs def get_join_logs(self, web3, from_block, to_block=None, user_address=None, this_pool_only=True): return self.get_liquidity_logs('LOG_JOIN', web3, from_block, to_block, user_address, this_pool_only) def get_exit_logs(self, web3, from_block, to_block=None, user_address=None, this_pool_only=True): return self.get_liquidity_logs('LOG_EXIT', web3, from_block, to_block, user_address, this_pool_only) def get_swap_logs(self, web3, from_block, to_block=None, user_address=None, this_pool_only=True): return self.get_liquidity_logs('LOG_SWAP', web3, from_block, to_block, user_address, this_pool_only)
""" Client ------ The database client module. """ from contextlib import contextmanager import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.engine.url import make_url from sqlalchemy.orm.exc import UnmappedError from sqlalchemy.orm.session import Session from . import core from .model import declarative_base, get_model_class_registry from .query import SQLQuery from .utils import FrozenDict, is_sequence, raise_for_class_if_not_supported try: # sqlalchemy < 1.4 from sqlalchemy.ext.declarative.clsregistry import _MultipleClassMarker except ImportError: # pragma: no cover # sqlalchemy 1.4+ from sqlalchemy.orm.clsregistry import _MultipleClassMarker class SQLClientSettings: """Container class for :class:`SQLClient` configuration options for SQLAlchemy engine and session objects.""" def __init__( self, database_uri, autocommit=None, autoflush=None, expire_on_commit=None, isolation_level=None, pool_size=None, pool_timeout=None, pool_recycle=None, pool_pre_ping=None, max_overflow=None, encoding=None, convert_unicode=None, echo=None, echo_pool=None, engine_options=None, session_options=None, ): self.database_uri = database_uri self.autocommit = autocommit self.autoflush = autoflush self.expire_on_commit = expire_on_commit self.isolation_level = isolation_level self.pool_size = pool_size self.pool_timeout = pool_timeout self.pool_recycle = pool_recycle self.pool_pre_ping = pool_pre_ping self.max_overflow = max_overflow self.encoding = encoding self.convert_unicode = convert_unicode self.echo = echo self.echo_pool = echo_pool self._extra_engine_options = engine_options or {} self._extra_session_options = session_options or {} @classmethod def from_config(cls, config, engine_options=None, session_options=None): keymap = { "SQL_DATABASE_URI": "database_uri", "SQL_AUTOCOMMIT": "autocommit", "SQL_AUTOFLUSH": "autoflush", "SQL_EXPIRE_ON_COMMIT": "expire_on_commit", "SQL_ISOLATION_LEVEL": "isolation_level", "SQL_POOL_SIZE": "pool_size", "SQL_POOL_TIMEOUT": "pool_timeout", "SQL_POOL_RECYCLE": "pool_recycle", "SQL_POOL_PRE_PING": "pool_pre_ping", "SQL_MAX_OVERFLOW": "max_overflow", "SQL_ECHO": "echo", "SQL_ECHO_POOL": "echo_pool", } settings = _make_options(config, keymap) return cls(engine_options=engine_options, session_options=session_options, **settings) @property def config(self): return { "SQL_DATABASE_URI": self.database_uri, "SQL_AUTOCOMMIT": self.autocommit, "SQL_AUTOFLUSH": self.autoflush, "SQL_EXPIRE_ON_COMMIT": self.expire_on_commit, "SQL_ISOLATION_LEVEL": self.isolation_level, "SQL_POOL_SIZE": self.pool_size, "SQL_POOL_TIMEOUT": self.pool_timeout, "SQL_POOL_RECYCLE": self.pool_recycle, "SQL_POOL_PRE_PING": self.pool_pre_ping, "SQL_MAX_OVERFLOW": self.max_overflow, "SQL_ECHO": self.echo, "SQL_ECHO_POOL": self.echo_pool, } @property def engine_options(self): opts = { "echo": self.echo, "echo_pool": self.echo_pool, "encoding": self.encoding, "convert_unicode": self.convert_unicode, "isolation_level": self.isolation_level, "pool_size": self.pool_size, "pool_timeout": self.pool_timeout, "pool_recycle": self.pool_recycle, "pool_pre_ping": self.pool_pre_ping, "max_overflow": self.max_overflow, } opts.update(self._extra_engine_options) return _make_options(opts) @property def session_options(self): opts = { "autocommit": self.autocommit, "autoflush": self.autoflush, "expire_on_commit": self.expire_on_commit, } opts.update(self._extra_session_options) return _make_options(opts) class SQLClient: """ Database client for interacting with a database. The following configuration values can be passed into a new :class:`SQLClient` instance as a ``dict`` or as keyword arguments (see Args below). Alternatively, this class can be subclassed and :attr:`DEFAULT_CONFIG` overridden with custom defaults. The order or precedence for configuration sources is: - :attr:`DEFAULT_CONFIG` - ``config`` - keyword arguments Where keyword arguments have the highest precedence. ======================== ====================================================================== **SQL_DATABASE_URI** URI used to connect to the database. Defaults to ``sqlite://`` (an in-memory sqlite database). **SQL_AUTOCOMMIT** When ``True``, the ``Session`` does not keep a persistent transaction running, and will acquire connections from the engine on an as-needed basis, returning them immediately after their use. Defaults to ``False``. **SQL_AUTOFLUSH** When ``True``, all query operations will issue a ``flush()`` call to the ``Session`` before proceeding. This is a convenience feature so that ``flush()`` need not be called repeatedly in order for database queries to retrieve results. Defaults to ``True``. **SQL_EXPIRE_ON_COMMIT** When ``True`` all instances will be fully expired after each ``commit()``, so that all attribute/object access subsequent to a completed transaction will load from the most recent database state. Defaults to ``True``. **SQL_ISOLATION_LEVEL** String parameter interpreted by various dialects in order to affect the transaction isolation level of the database connection. The parameter essentially accepts some subset of these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``, ``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``. Behavior here varies per backend, and individual dialects should be consulted directly. Defaults to ``None``. **SQL_POOL_SIZE** The size of the database pool. Defaults to the engine's default (usually ``5``). **SQL_POOL_TIMEOUT** Specifies the connection timeout for the pool. Defaults to ``10``. **SQL_POOL_RECYCLE** Number of seconds after which a connection is automatically recycled. **SQL_POOL_PRE_PING** When ``True` will enable SQLAlchemy's connection pool “pre-ping” feature that tests connections for liveness upon each checkout. Defaults to ``False``. Requires SQLAlchemy >= 1.2. **SQL_MAX_OVERFLOW** Controls the number of connections that can be created after the pool reached its maximum size. When those additional connections are returned to the pool, they are disconnected and discarded. **SQL_ENCODING** The string encoding used by SQLAlchemy for string encode/decode operations which occur within SQLAlchemy, outside of the DBAPI. Defaults to `utf-8`. **SQL_CONVERT_UNICODE** When ``True`` it sets the default behavior of ``convert_unicode`` on the ``String`` type to ``True``, regardless of a setting of ``False`` on an individual ``String`` type, thus causing all ``String`` -based columns to accommodate Python unicode objects. **SQL_ECHO** When ``True`` have SQLAlchemy log all SQL statements. Defaults to ``False``. **SQL_ECHO_POOL** When ``True`` have SQLAlchemy log all log all checkouts/checkins of the connection pool. Defaults to ``False``. ======================== ====================================================================== Args: config (dict|str): Database engine configuration options or database URI string. Defaults to ``None`` which uses an in-memory SQLite database. model_class (object): A SQLAlchemy ORM declarative base model. query_class (Query, optional): SQLAlchemy Query derived class to use as the default class when creating a new query object. session_class (Session, optional): SQLAlchemy Session derived class to use by the session maker. session_options (dict, optional): Additional session options use when creating the database session. engine_options (dict, optional): Additional engine options use when creating the database engine. database_uri (str, optional): See configuration table above. autocommit (bool, optional): See configuration table above. autoflush (bool, optional): See configuration table above. expire_on_commit (bool, optional): See configuration table above. isolation_level (str, optional): See configuration table above. pool_size (int, optional): See configuration table above. pool_timeout (int|float, optional): See configuration table above. pool_recycle (int|float, optional): See configuration table above. pool_pre_ping (bool, optional): See configuration table above. max_overflow (int, optional): See configuration table above. encoding (str, optional): See configuration table above. convert_unicode (bool, optional): See configuration table above. echo (bool, optional): See configuration table above. echo_pool (bool, optional): See configuration table above. """ #: The default client configuration for this class. Override in a subclass to set new class-wide #: defaults. DEFAULT_CONFIG = FrozenDict( { "SQL_DATABASE_URI": "sqlite://", "SQL_ECHO": False, "SQL_ECHO_POOL": False, "SQL_ENCODING": None, "SQL_CONVERT_UNICODE": None, "SQL_ISOLATION_LEVEL": None, "SQL_POOL_SIZE": None, "SQL_POOL_TIMEOUT": None, "SQL_POOL_RECYCLE": None, "SQL_MAX_OVERFLOW": None, "SQL_AUTOCOMMIT": False, "SQL_AUTOFLUSH": True, "SQL_EXPIRE_ON_COMMIT": True, "SQL_POOL_PRE_PING": None, } ) def __init__( self, config=None, model_class=None, query_class=SQLQuery, session_class=Session, session_options=None, engine_options=None, database_uri=None, autocommit=None, autoflush=None, expire_on_commit=None, isolation_level=None, pool_size=None, pool_timeout=None, pool_recycle=None, pool_pre_ping=None, max_overflow=None, sql_echo=None, sql_echo_pool=None, ): if model_class is None: # pragma: no cover model_class = declarative_base() if isinstance(config, str): config = {"SQL_DATABASE_URI": config} override_config = _make_options( { "SQL_DATABASE_URI": database_uri, "SQL_AUTOCOMMIT": autocommit, "SQL_AUTOFLUSH": autoflush, "SQL_EXPIRE_ON_COMMIT": expire_on_commit, "SQL_ISOLATION_LEVEL": isolation_level, "SQL_POOL_SIZE": pool_size, "SQL_POOL_TIMEOUT": pool_timeout, "SQL_POOL_RECYCLE": pool_recycle, "SQL_POOL_PRE_PING": pool_pre_ping, "SQL_MAX_OVERFLOW": max_overflow, "SQL_ECHO": sql_echo, "SQL_ECHO_POOL": sql_echo_pool, } ) cfg = self.DEFAULT_CONFIG.copy() cfg.update(config or {}) cfg.update(override_config) self.settings = SQLClientSettings.from_config( cfg, engine_options=engine_options, session_options=session_options ) self.model_class = model_class self.query_class = query_class self.session_class = session_class self.engine = self.create_engine(self.settings.database_uri, self.settings.engine_options) self.session = self.create_session( self.engine, self.settings.session_options, session_class=self.session_class, query_class=self.query_class, ) self.update_models_registry() def create_engine(self, uri, options=None): """ Factory function to create a database engine using `config` options. Args: uri (str): Database URI string. options (dict, optional): Engine configuration options. Returns: Engine: SQLAlchemy engine instance. """ if options is None: # pragma: no cover options = {} return sa.create_engine(make_url(uri), **options) def create_session(self, bind, options=None, session_class=Session, query_class=SQLQuery): """ Factory function to create a scoped session using `bind`. Args: bind (Engine|Connection): Database engine or connection instance. options (dict, optional): Session configuration options. session_class (obj, optional): Session class to use when creating new session instances. Defaults to :class:`.Session`. query_class (obj, optional): Query class used for ``session.query`` instances. Defaults to :class:`.SQLQuery`. Returns: Session: SQLAlchemy session instance bound to `bind`. """ if options is None: # pragma: no cover options = {} else: options = options.copy() if query_class: options["query_cls"] = query_class scopefunc = options.pop("scopefunc", None) session_factory = orm.sessionmaker(bind=bind, class_=session_class, **options) return orm.scoped_session(session_factory, scopefunc=scopefunc) def update_models_registry(self): """Update :attr:`models` registry as computed from :attr:`model_class`.""" self.models = self.create_models_registry(self.model_class) def create_models_registry(self, model_class): """Return model registry ``dict`` with model names as keys and corresponding model classes as values.""" models = {} class_registry = get_model_class_registry(model_class) if not class_registry: return models for name, model in class_registry.items(): if name.startswith("_sa_"): continue if isinstance(model, _MultipleClassMarker): # Handle case where there are multiple ORM models with the same # base class name but located in different submodules. model = list(model) if len(model) == 1: # pragma: no cover models[name] = model[0] else: for obj in list(model): modobj = f"{obj.__module__}.{obj.__name__}" models[modobj] = obj else: models[name] = model return models @property def config(self): """Proxy property to configuration settings.""" return self.settings.config @property def url(self): """Proxy property to database engine's database URL.""" return self.engine.url @property def database(self): """Proxy property to database engine's database name.""" return self.engine.url.database def get_metadata(self): """Return `MetaData` from :attr:`model` or raise an exception if :attr:`model` was never given.""" if self.metadata is None: # pragma: no cover raise UnmappedError("Missing declarative base model") return self.metadata @property def metadata(self): """Return `MetaData` from :attr:`model` or ``None``.""" return getattr(self.model_class, "metadata", None) @property def tables(self): """Return ``dict`` of table instances found in :attr:`metadata` with table names as keys and corresponding table objects as values.""" return self.metadata.tables def create_all(self): """Create all metadata (tables, etc) contained within :attr:`metadata`.""" self.get_metadata().create_all(self.engine) def drop_all(self): """Drop all metadata (tables, etc) contained within :attr:`metadata`.""" self.get_metadata().drop_all(self.engine) def reflect(self): """Reflect tables from database into :attr:`metadata`.""" self.get_metadata().reflect(self.engine) @property def session(self): """ Proxy to threadlocal session object returned by scoped session object. Note: Generally, the scoped session is sufficient to work with directly. However, the scoped session doesn't provide access to the custom session class used by the session factory. This property returns an instance of our custom session class. Multiple calls to the scoped session always returns the same active threadlocal session (i.e. ``self._Session() is self._Session()``). See Also: http://docs.sqlalchemy.org/en/latest/orm/contextual.html """ return self._Session() @session.setter def session(self, Session): """Set private :attr:`_Session`.""" self._Session = Session @property def add(self): """Proxy property to :meth:`session.add`.""" return self.session.add @property def add_all(self): """Proxy property to :meth:`session.add_all`.""" return self.session.add_all @property def delete(self): """Proxy property to :meth:`session.delete`.""" return self.session.delete @property def merge(self): """Proxy property to :meth:`session.merge`.""" return self.session.merge @property def execute(self): """Proxy property to :meth:`session.execute`.""" return self.session.execute @property def prepare(self): """Proxy property to :meth:`session.prepare`.""" return self.session.prepare @property def no_autoflush(self): """Proxy property to :meth:`session.no_autoflush`.""" return self.session.no_autoflush @property def scalar(self): """Proxy property to :meth:`session.scalar`.""" return self.session.scalar @property def close(self): """Proxy property to :meth:`session.close`.""" return self.session.close @property def close_all(self): """Proxy property to :meth:`_Session.close_all`.""" return self._Session.close_all @property def invalidate(self): """Proxy property to :meth:`session.invalidate`.""" return self.session.invalidate @property def is_active(self): """Proxy property to :attr:`session.is_active`.""" return self.session.is_active @property def is_modified(self): """Proxy property to :meth:`session.is_modified`.""" return self.session.is_modified @property def remove(self): """Proxy propery to :meth:`_Session.remove`.""" return self._Session.remove def disconnect(self): """Disconnect all database sessions and connections.""" self.remove() self.engine.dispose() def commit(self): """ Commit a session transaction but rollback if an error occurs. This helps ensure that the session is not left in an unstable state. """ try: self.session.commit() except Exception: # pragma: no cover self.session.rollback() raise @property def rollback(self): """Proxy property to :meth:`session.rollback`.""" return self.session.rollback @property def flush(self): """Proxy property to :meth:`session.flush`.""" return self.session.flush @property def refresh(self): """Proxy property to :meth:`session.refresh`.""" return self.session.refresh @property def expire(self): """Proxy property to :meth:`session.expire`.""" return self.session.expire @property def expire_all(self): """Proxy property to :meth:`session.expire`.""" return self.session.expire_all def expunge(self, *instances): """Remove all `instances` from :attr:`session`.""" for instance in instances: self.session.expunge(instance) @property def expunge_all(self): """Proxy property to :meth:`session.expunge`.""" return self.session.expunge_all @property @raise_for_class_if_not_supported def prune(self): # pragma: no cover """ Proxy property to :meth:`session.prune`. Warning: This is no longer supported in SQLAlchemy>=1.4. """ try: return self.session.prune except AttributeError: return NotImplemented @property def query(self): """Proxy property to :meth:`session.query`.""" return self.session.query def ping(self): """ Ping the database to check whether the connection is valid. Returns: bool: ``True`` when connection check passes. Raises: sqlalchemy.exc.SQLAlchemyError: When the connection check fails. """ conn = self.engine.connect() # Run a SELECT 1. Use a sa.select() so that the SELECT of a scalar value without a table is # appropriately formatted for the backend. try: conn.scalar(sa.select([1])) except sa.exc.DBAPIError as exc: # Catch SQLAlchemy's DBAPIError, which is a wrapper for the DBAPI's exception. It # includes a "connection_invalidated" attribute which specifies if this connection is a # "disconnect" condition, which is based on inspection of the original exception by the # dialect in use. if exc.connection_invalidated: # Run the same SELECT again. The connection will re-validate itself and establish a # new connection. The disconnect detection here also causes the whole connection # pool to be invalidated so that all stale connections are discarded. conn.scalar(sa.select([1])) else: raise conn.close() return True @contextmanager def transaction(self, commit=True, rollback=False, autoflush=None): """ Nestable session transaction context manager where only a single commit will be issued once all contexts have been exited. If an exception occurs either at commit time or before, the transaction will be rolled back and the original exception re-raised. Args: commit (bool, optional): Whether to commit the transaction or leave it open. Defaults to ``True``. rollback (bool, optional): Whether to rollback the transaction. Defaults to ``False``. WARNING: This overrides `commit`. autoflush (bool, optional): Whether to override ``session.autoflush``. Original ``session.autoflush`` will be restored after transaction. Defaults to ``None`` which doesn't modify ``session.autoflush``. Yields: :attr:`session` """ with core.transaction(self.session, commit=commit, rollback=rollback, autoflush=autoflush): yield self.session def save(self, models, before=None, after=None, identity=None): """ Save `models` into the database using insert, update, or upsert-on-primary-key. The `models` argument can be any of the following: - Model instance - ``list``/``tuple`` of Model instances Args: models (mixed): Models to save to database. before (function, optional): Function to call before each model is saved via ``session.add``. Function should have signature ``before(model, is_new)``. after (function, optional): Function to call after each model is saved via ``session.add``. Function should have signature ``after(model, is_new)``. identity (function, optional): Function used to return an idenity map for a given model. Function should have the signature ``identity(model)``. Defaults to :func:`.core.primary_identity_map`. Returns: Model: If a single item passed in. list: A ``list`` of Model instaces if multiple items passed in. """ if not is_sequence(models): models = [models] as_list = False else: models = list(models) as_list = True for idx, model in enumerate(models): if model.__class__ in self.models.values(): continue self.update_models_registry() if model.__class__ not in self.models.values(): if as_list: idx_msg = f"Item with index {idx} and value " else: idx_msg = "" raise TypeError( f"Type of value given to save() method is not a recognized SQLALchemy" f" declarative class that derives from {self.model_class}. {idx_msg} {model!r}" f" is an instance of {model.__class__!r}." ) return core.save( self.session, models if as_list else models[0], before=before, after=after, identity=identity, ) def bulk_insert(self, mapper, mappings): """ Perform a bulk insert into table/statement represented by `mapper` while utilizing a special syntax that replaces the tradtional ``executemany()`` DBAPI call with a multi-row VALUES clause for a single INSERT statement. See :meth:`bulk_insert_many` for bulk inserts using ``executemany()``. Args: mapper: An ORM class or SQLAlchemy insert-statement object. mappings (list): List of ``dict`` objects to insert. Returns: ResultProxy """ return core.bulk_insert(self.session, mapper, mappings) def bulk_insert_many(self, mapper, mappings): """ Perform a bulk insert into table/statement represented by `mapper` while utilizing the ``executemany()`` DBAPI call. See :meth:`bulk_insert` for bulk inserts using a multi-row VALUES clause for a single INSERT statement. Args: mapper: An ORM class or SQLAlchemy insert-statement object. mappings (list): List of ``dict`` objects to insert. Returns: ResultProxy """ return core.bulk_insert_many(self.session, mapper, mappings) def bulk_common_update(self, mapper, key_columns, mappings): """ Perform a bulk UPDATE on common shared values among `mappings`. What this means is that if multiple records are being updated to the same values, then issue only a single update for that value-set using the identity of the records in the WHERE clause. Args: mapper: An ORM class or SQLAlchemy insert-statement object. key_columns (tuple): A tuple of SQLAlchemy columns that represent the identity of each row (typically this would be a table's primary key values but they can be any set of columns). mappings (list): List of ``dict`` objects to update. Returns: list[ResultProxy] """ return core.bulk_common_update(self.session, mapper, key_columns, mappings) def bulk_diff_update(self, mapper, key_columns, previous_mappings, mappings): """ Perform a bulk INSERT/UPDATE on the difference between `mappings` and `previous_mappings` such that only the values that have changed are included in the update. If a mapping in `mappings` doesn't exist in `previous_mappings`, then it will be inclued in the bulk INSERT. The bulk INSERT will be deferred to :meth:`bulk_insert`. The bulk UPDATE will be deferred to :meth:`bulk_common_update`. Args: mapper: An ORM class or SQLAlchemy insert-statement object. mappings (list): List of ``dict`` objects to update. previous_mappings (list): List of ``dict`` objects that represent the previous values of all mappings found for this update set (i.e. these are the current database records). key_columns (tuple): A tuple of SQLAlchemy columns that represent the identity of each row (typically this would be a table's primary key values but they can be any set of columns). Returns: list[ResultProxy] """ return core.bulk_diff_update(self.session, mapper, key_columns, previous_mappings, mappings) @property def bulk_insert_mappings(self): """Proxy property to :meth:`session.bulk_insert_mappings`.""" return self.session.bulk_insert_mappings @property def bulk_save_objects(self): """Proxy property to :meth:`session.bulk_save_objects`.""" return self.session.bulk_save_objects @property def bulk_update_mappings(self): """Proxy property to :meth:`session.bulk_update_mappings`.""" return self.session.bulk_update_mappings def destroy(self, data, model_class=None, synchronize_session=False): """ Delete bulk records from `data`. The `data` argument can be any of the following: - ``dict`` - :attr:`model_class` instance - ``list``/``tuple`` of ``dict`` objects - ``list``/``tuple`` of :attr:`model_class` instances If a ``dict`` or ``list`` of ``dict`` is passed in, then `model_class` must be provided. Args: data (mixed): Data to delete from database. synchronize_session (bool|str): Argument passed to ``Query.delete``. Returns: int: Number of deleted records. """ return core.destroy( self.session, data, model_class=model_class, synchronize_session=synchronize_session ) def __repr__(self): return f"<{self.__class__.__name__}({repr(self.url)!r})>" def __getitem__(self, item): """ Return :attr:`service_class` instance corresponding to `item`. Args: item (str): Attribute corresponding to string name of model class. Returns: :attr:`service_class`: Instance of :attr:`service_class` initialized with model class. Raises: AttributeError: When item doesn't correspond to model class name found in :attr:`metadata`. """ if not isinstance(item, str): # If anything other than a string is supplied, use the item's # __name__ as the model name to index to. item = getattr(item, "__name__", item) return getattr(self, item) def __getattr__(self, attr): """ Return :attr:`service_class` instance corresponding to `attr`. Args: attr (str): Attribute corresponding to string name of model class. Returns: :attr:`service_class`: Instance of :attr:`service_class` initialized with model class. Raises: AttributeError: When attribute doesn't correspond to model class name found in :attr:`metadata`. """ if attr not in self.models: # pragma: no cover # Potentially, this model could have been imported after creation # of this class. Since we got a bad attribute, let's go ahead and # update the registry and try again. self.update_models_registry() if attr not in self.models: # pragma: no cover raise AttributeError( f"The attribute {attr!r} is not an attribute of {self.__class__.__name__} nor is" f" it a unique model class name in the declarative model class registry of" f" {self.model_class}. Valid model names are: {", ".join(self.models)}. If a model" f" name is shown as a full module path, then that model class name is not unique" f" and cannot be referenced via attribute access." ) return self.query(self.models[attr]) def _make_options(obj, keymap=None): if keymap is None: keymap = dict(zip(obj.keys(), obj.keys())) return {new: obj[old] for old, new in keymap.items() if obj.get(old) is not None}
""" Client ------ The database client module. """ from contextlib import contextmanager import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.engine.url import make_url from sqlalchemy.orm.exc import UnmappedError from sqlalchemy.orm.session import Session from . import core from .model import declarative_base, get_model_class_registry from .query import SQLQuery from .utils import FrozenDict, is_sequence, raise_for_class_if_not_supported try: # sqlalchemy < 1.4 from sqlalchemy.ext.declarative.clsregistry import _MultipleClassMarker except ImportError: # pragma: no cover # sqlalchemy 1.4+ from sqlalchemy.orm.clsregistry import _MultipleClassMarker class SQLClientSettings: """Container class for :class:`SQLClient` configuration options for SQLAlchemy engine and session objects.""" def __init__( self, database_uri, autocommit=None, autoflush=None, expire_on_commit=None, isolation_level=None, pool_size=None, pool_timeout=None, pool_recycle=None, pool_pre_ping=None, max_overflow=None, encoding=None, convert_unicode=None, echo=None, echo_pool=None, engine_options=None, session_options=None, ): self.database_uri = database_uri self.autocommit = autocommit self.autoflush = autoflush self.expire_on_commit = expire_on_commit self.isolation_level = isolation_level self.pool_size = pool_size self.pool_timeout = pool_timeout self.pool_recycle = pool_recycle self.pool_pre_ping = pool_pre_ping self.max_overflow = max_overflow self.encoding = encoding self.convert_unicode = convert_unicode self.echo = echo self.echo_pool = echo_pool self._extra_engine_options = engine_options or {} self._extra_session_options = session_options or {} @classmethod def from_config(cls, config, engine_options=None, session_options=None): keymap = { "SQL_DATABASE_URI": "database_uri", "SQL_AUTOCOMMIT": "autocommit", "SQL_AUTOFLUSH": "autoflush", "SQL_EXPIRE_ON_COMMIT": "expire_on_commit", "SQL_ISOLATION_LEVEL": "isolation_level", "SQL_POOL_SIZE": "pool_size", "SQL_POOL_TIMEOUT": "pool_timeout", "SQL_POOL_RECYCLE": "pool_recycle", "SQL_POOL_PRE_PING": "pool_pre_ping", "SQL_MAX_OVERFLOW": "max_overflow", "SQL_ECHO": "echo", "SQL_ECHO_POOL": "echo_pool", } settings = _make_options(config, keymap) return cls(engine_options=engine_options, session_options=session_options, **settings) @property def config(self): return { "SQL_DATABASE_URI": self.database_uri, "SQL_AUTOCOMMIT": self.autocommit, "SQL_AUTOFLUSH": self.autoflush, "SQL_EXPIRE_ON_COMMIT": self.expire_on_commit, "SQL_ISOLATION_LEVEL": self.isolation_level, "SQL_POOL_SIZE": self.pool_size, "SQL_POOL_TIMEOUT": self.pool_timeout, "SQL_POOL_RECYCLE": self.pool_recycle, "SQL_POOL_PRE_PING": self.pool_pre_ping, "SQL_MAX_OVERFLOW": self.max_overflow, "SQL_ECHO": self.echo, "SQL_ECHO_POOL": self.echo_pool, } @property def engine_options(self): opts = { "echo": self.echo, "echo_pool": self.echo_pool, "encoding": self.encoding, "convert_unicode": self.convert_unicode, "isolation_level": self.isolation_level, "pool_size": self.pool_size, "pool_timeout": self.pool_timeout, "pool_recycle": self.pool_recycle, "pool_pre_ping": self.pool_pre_ping, "max_overflow": self.max_overflow, } opts.update(self._extra_engine_options) return _make_options(opts) @property def session_options(self): opts = { "autocommit": self.autocommit, "autoflush": self.autoflush, "expire_on_commit": self.expire_on_commit, } opts.update(self._extra_session_options) return _make_options(opts) class SQLClient: """ Database client for interacting with a database. The following configuration values can be passed into a new :class:`SQLClient` instance as a ``dict`` or as keyword arguments (see Args below). Alternatively, this class can be subclassed and :attr:`DEFAULT_CONFIG` overridden with custom defaults. The order or precedence for configuration sources is: - :attr:`DEFAULT_CONFIG` - ``config`` - keyword arguments Where keyword arguments have the highest precedence. ======================== ====================================================================== **SQL_DATABASE_URI** URI used to connect to the database. Defaults to ``sqlite://`` (an in-memory sqlite database). **SQL_AUTOCOMMIT** When ``True``, the ``Session`` does not keep a persistent transaction running, and will acquire connections from the engine on an as-needed basis, returning them immediately after their use. Defaults to ``False``. **SQL_AUTOFLUSH** When ``True``, all query operations will issue a ``flush()`` call to the ``Session`` before proceeding. This is a convenience feature so that ``flush()`` need not be called repeatedly in order for database queries to retrieve results. Defaults to ``True``. **SQL_EXPIRE_ON_COMMIT** When ``True`` all instances will be fully expired after each ``commit()``, so that all attribute/object access subsequent to a completed transaction will load from the most recent database state. Defaults to ``True``. **SQL_ISOLATION_LEVEL** String parameter interpreted by various dialects in order to affect the transaction isolation level of the database connection. The parameter essentially accepts some subset of these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``, ``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``. Behavior here varies per backend, and individual dialects should be consulted directly. Defaults to ``None``. **SQL_POOL_SIZE** The size of the database pool. Defaults to the engine's default (usually ``5``). **SQL_POOL_TIMEOUT** Specifies the connection timeout for the pool. Defaults to ``10``. **SQL_POOL_RECYCLE** Number of seconds after which a connection is automatically recycled. **SQL_POOL_PRE_PING** When ``True` will enable SQLAlchemy's connection pool “pre-ping” feature that tests connections for liveness upon each checkout. Defaults to ``False``. Requires SQLAlchemy >= 1.2. **SQL_MAX_OVERFLOW** Controls the number of connections that can be created after the pool reached its maximum size. When those additional connections are returned to the pool, they are disconnected and discarded. **SQL_ENCODING** The string encoding used by SQLAlchemy for string encode/decode operations which occur within SQLAlchemy, outside of the DBAPI. Defaults to `utf-8`. **SQL_CONVERT_UNICODE** When ``True`` it sets the default behavior of ``convert_unicode`` on the ``String`` type to ``True``, regardless of a setting of ``False`` on an individual ``String`` type, thus causing all ``String`` -based columns to accommodate Python unicode objects. **SQL_ECHO** When ``True`` have SQLAlchemy log all SQL statements. Defaults to ``False``. **SQL_ECHO_POOL** When ``True`` have SQLAlchemy log all log all checkouts/checkins of the connection pool. Defaults to ``False``. ======================== ====================================================================== Args: config (dict|str): Database engine configuration options or database URI string. Defaults to ``None`` which uses an in-memory SQLite database. model_class (object): A SQLAlchemy ORM declarative base model. query_class (Query, optional): SQLAlchemy Query derived class to use as the default class when creating a new query object. session_class (Session, optional): SQLAlchemy Session derived class to use by the session maker. session_options (dict, optional): Additional session options use when creating the database session. engine_options (dict, optional): Additional engine options use when creating the database engine. database_uri (str, optional): See configuration table above. autocommit (bool, optional): See configuration table above. autoflush (bool, optional): See configuration table above. expire_on_commit (bool, optional): See configuration table above. isolation_level (str, optional): See configuration table above. pool_size (int, optional): See configuration table above. pool_timeout (int|float, optional): See configuration table above. pool_recycle (int|float, optional): See configuration table above. pool_pre_ping (bool, optional): See configuration table above. max_overflow (int, optional): See configuration table above. encoding (str, optional): See configuration table above. convert_unicode (bool, optional): See configuration table above. echo (bool, optional): See configuration table above. echo_pool (bool, optional): See configuration table above. """ #: The default client configuration for this class. Override in a subclass to set new class-wide #: defaults. DEFAULT_CONFIG = FrozenDict( { "SQL_DATABASE_URI": "sqlite://", "SQL_ECHO": False, "SQL_ECHO_POOL": False, "SQL_ENCODING": None, "SQL_CONVERT_UNICODE": None, "SQL_ISOLATION_LEVEL": None, "SQL_POOL_SIZE": None, "SQL_POOL_TIMEOUT": None, "SQL_POOL_RECYCLE": None, "SQL_MAX_OVERFLOW": None, "SQL_AUTOCOMMIT": False, "SQL_AUTOFLUSH": True, "SQL_EXPIRE_ON_COMMIT": True, "SQL_POOL_PRE_PING": None, } ) def __init__( self, config=None, model_class=None, query_class=SQLQuery, session_class=Session, session_options=None, engine_options=None, database_uri=None, autocommit=None, autoflush=None, expire_on_commit=None, isolation_level=None, pool_size=None, pool_timeout=None, pool_recycle=None, pool_pre_ping=None, max_overflow=None, sql_echo=None, sql_echo_pool=None, ): if model_class is None: # pragma: no cover model_class = declarative_base() if isinstance(config, str): config = {"SQL_DATABASE_URI": config} override_config = _make_options( { "SQL_DATABASE_URI": database_uri, "SQL_AUTOCOMMIT": autocommit, "SQL_AUTOFLUSH": autoflush, "SQL_EXPIRE_ON_COMMIT": expire_on_commit, "SQL_ISOLATION_LEVEL": isolation_level, "SQL_POOL_SIZE": pool_size, "SQL_POOL_TIMEOUT": pool_timeout, "SQL_POOL_RECYCLE": pool_recycle, "SQL_POOL_PRE_PING": pool_pre_ping, "SQL_MAX_OVERFLOW": max_overflow, "SQL_ECHO": sql_echo, "SQL_ECHO_POOL": sql_echo_pool, } ) cfg = self.DEFAULT_CONFIG.copy() cfg.update(config or {}) cfg.update(override_config) self.settings = SQLClientSettings.from_config( cfg, engine_options=engine_options, session_options=session_options ) self.model_class = model_class self.query_class = query_class self.session_class = session_class self.engine = self.create_engine(self.settings.database_uri, self.settings.engine_options) self.session = self.create_session( self.engine, self.settings.session_options, session_class=self.session_class, query_class=self.query_class, ) self.update_models_registry() def create_engine(self, uri, options=None): """ Factory function to create a database engine using `config` options. Args: uri (str): Database URI string. options (dict, optional): Engine configuration options. Returns: Engine: SQLAlchemy engine instance. """ if options is None: # pragma: no cover options = {} return sa.create_engine(make_url(uri), **options) def create_session(self, bind, options=None, session_class=Session, query_class=SQLQuery): """ Factory function to create a scoped session using `bind`. Args: bind (Engine|Connection): Database engine or connection instance. options (dict, optional): Session configuration options. session_class (obj, optional): Session class to use when creating new session instances. Defaults to :class:`.Session`. query_class (obj, optional): Query class used for ``session.query`` instances. Defaults to :class:`.SQLQuery`. Returns: Session: SQLAlchemy session instance bound to `bind`. """ if options is None: # pragma: no cover options = {} else: options = options.copy() if query_class: options["query_cls"] = query_class scopefunc = options.pop("scopefunc", None) session_factory = orm.sessionmaker(bind=bind, class_=session_class, **options) return orm.scoped_session(session_factory, scopefunc=scopefunc) def update_models_registry(self): """Update :attr:`models` registry as computed from :attr:`model_class`.""" self.models = self.create_models_registry(self.model_class) def create_models_registry(self, model_class): """Return model registry ``dict`` with model names as keys and corresponding model classes as values.""" models = {} class_registry = get_model_class_registry(model_class) if not class_registry: return models for name, model in class_registry.items(): if name.startswith("_sa_"): continue if isinstance(model, _MultipleClassMarker): # Handle case where there are multiple ORM models with the same # base class name but located in different submodules. model = list(model) if len(model) == 1: # pragma: no cover models[name] = model[0] else: for obj in list(model): modobj = f"{obj.__module__}.{obj.__name__}" models[modobj] = obj else: models[name] = model return models @property def config(self): """Proxy property to configuration settings.""" return self.settings.config @property def url(self): """Proxy property to database engine's database URL.""" return self.engine.url @property def database(self): """Proxy property to database engine's database name.""" return self.engine.url.database def get_metadata(self): """Return `MetaData` from :attr:`model` or raise an exception if :attr:`model` was never given.""" if self.metadata is None: # pragma: no cover raise UnmappedError("Missing declarative base model") return self.metadata @property def metadata(self): """Return `MetaData` from :attr:`model` or ``None``.""" return getattr(self.model_class, "metadata", None) @property def tables(self): """Return ``dict`` of table instances found in :attr:`metadata` with table names as keys and corresponding table objects as values.""" return self.metadata.tables def create_all(self): """Create all metadata (tables, etc) contained within :attr:`metadata`.""" self.get_metadata().create_all(self.engine) def drop_all(self): """Drop all metadata (tables, etc) contained within :attr:`metadata`.""" self.get_metadata().drop_all(self.engine) def reflect(self): """Reflect tables from database into :attr:`metadata`.""" self.get_metadata().reflect(self.engine) @property def session(self): """ Proxy to threadlocal session object returned by scoped session object. Note: Generally, the scoped session is sufficient to work with directly. However, the scoped session doesn't provide access to the custom session class used by the session factory. This property returns an instance of our custom session class. Multiple calls to the scoped session always returns the same active threadlocal session (i.e. ``self._Session() is self._Session()``). See Also: http://docs.sqlalchemy.org/en/latest/orm/contextual.html """ return self._Session() @session.setter def session(self, Session): """Set private :attr:`_Session`.""" self._Session = Session @property def add(self): """Proxy property to :meth:`session.add`.""" return self.session.add @property def add_all(self): """Proxy property to :meth:`session.add_all`.""" return self.session.add_all @property def delete(self): """Proxy property to :meth:`session.delete`.""" return self.session.delete @property def merge(self): """Proxy property to :meth:`session.merge`.""" return self.session.merge @property def execute(self): """Proxy property to :meth:`session.execute`.""" return self.session.execute @property def prepare(self): """Proxy property to :meth:`session.prepare`.""" return self.session.prepare @property def no_autoflush(self): """Proxy property to :meth:`session.no_autoflush`.""" return self.session.no_autoflush @property def scalar(self): """Proxy property to :meth:`session.scalar`.""" return self.session.scalar @property def close(self): """Proxy property to :meth:`session.close`.""" return self.session.close @property def close_all(self): """Proxy property to :meth:`_Session.close_all`.""" return self._Session.close_all @property def invalidate(self): """Proxy property to :meth:`session.invalidate`.""" return self.session.invalidate @property def is_active(self): """Proxy property to :attr:`session.is_active`.""" return self.session.is_active @property def is_modified(self): """Proxy property to :meth:`session.is_modified`.""" return self.session.is_modified @property def remove(self): """Proxy propery to :meth:`_Session.remove`.""" return self._Session.remove def disconnect(self): """Disconnect all database sessions and connections.""" self.remove() self.engine.dispose() def commit(self): """ Commit a session transaction but rollback if an error occurs. This helps ensure that the session is not left in an unstable state. """ try: self.session.commit() except Exception: # pragma: no cover self.session.rollback() raise @property def rollback(self): """Proxy property to :meth:`session.rollback`.""" return self.session.rollback @property def flush(self): """Proxy property to :meth:`session.flush`.""" return self.session.flush @property def refresh(self): """Proxy property to :meth:`session.refresh`.""" return self.session.refresh @property def expire(self): """Proxy property to :meth:`session.expire`.""" return self.session.expire @property def expire_all(self): """Proxy property to :meth:`session.expire`.""" return self.session.expire_all def expunge(self, *instances): """Remove all `instances` from :attr:`session`.""" for instance in instances: self.session.expunge(instance) @property def expunge_all(self): """Proxy property to :meth:`session.expunge`.""" return self.session.expunge_all @property @raise_for_class_if_not_supported def prune(self): # pragma: no cover """ Proxy property to :meth:`session.prune`. Warning: This is no longer supported in SQLAlchemy>=1.4. """ try: return self.session.prune except AttributeError: return NotImplemented @property def query(self): """Proxy property to :meth:`session.query`.""" return self.session.query def ping(self): """ Ping the database to check whether the connection is valid. Returns: bool: ``True`` when connection check passes. Raises: sqlalchemy.exc.SQLAlchemyError: When the connection check fails. """ conn = self.engine.connect() # Run a SELECT 1. Use a sa.select() so that the SELECT of a scalar value without a table is # appropriately formatted for the backend. try: conn.scalar(sa.select([1])) except sa.exc.DBAPIError as exc: # Catch SQLAlchemy's DBAPIError, which is a wrapper for the DBAPI's exception. It # includes a "connection_invalidated" attribute which specifies if this connection is a # "disconnect" condition, which is based on inspection of the original exception by the # dialect in use. if exc.connection_invalidated: # Run the same SELECT again. The connection will re-validate itself and establish a # new connection. The disconnect detection here also causes the whole connection # pool to be invalidated so that all stale connections are discarded. conn.scalar(sa.select([1])) else: raise conn.close() return True @contextmanager def transaction(self, commit=True, rollback=False, autoflush=None): """ Nestable session transaction context manager where only a single commit will be issued once all contexts have been exited. If an exception occurs either at commit time or before, the transaction will be rolled back and the original exception re-raised. Args: commit (bool, optional): Whether to commit the transaction or leave it open. Defaults to ``True``. rollback (bool, optional): Whether to rollback the transaction. Defaults to ``False``. WARNING: This overrides `commit`. autoflush (bool, optional): Whether to override ``session.autoflush``. Original ``session.autoflush`` will be restored after transaction. Defaults to ``None`` which doesn't modify ``session.autoflush``. Yields: :attr:`session` """ with core.transaction(self.session, commit=commit, rollback=rollback, autoflush=autoflush): yield self.session def save(self, models, before=None, after=None, identity=None): """ Save `models` into the database using insert, update, or upsert-on-primary-key. The `models` argument can be any of the following: - Model instance - ``list``/``tuple`` of Model instances Args: models (mixed): Models to save to database. before (function, optional): Function to call before each model is saved via ``session.add``. Function should have signature ``before(model, is_new)``. after (function, optional): Function to call after each model is saved via ``session.add``. Function should have signature ``after(model, is_new)``. identity (function, optional): Function used to return an idenity map for a given model. Function should have the signature ``identity(model)``. Defaults to :func:`.core.primary_identity_map`. Returns: Model: If a single item passed in. list: A ``list`` of Model instaces if multiple items passed in. """ if not is_sequence(models): models = [models] as_list = False else: models = list(models) as_list = True for idx, model in enumerate(models): if model.__class__ in self.models.values(): continue self.update_models_registry() if model.__class__ not in self.models.values(): if as_list: idx_msg = f"Item with index {idx} and value " else: idx_msg = "" raise TypeError( f"Type of value given to save() method is not a recognized SQLALchemy" f" declarative class that derives from {self.model_class}. {idx_msg} {model!r}" f" is an instance of {model.__class__!r}." ) return core.save( self.session, models if as_list else models[0], before=before, after=after, identity=identity, ) def bulk_insert(self, mapper, mappings): """ Perform a bulk insert into table/statement represented by `mapper` while utilizing a special syntax that replaces the tradtional ``executemany()`` DBAPI call with a multi-row VALUES clause for a single INSERT statement. See :meth:`bulk_insert_many` for bulk inserts using ``executemany()``. Args: mapper: An ORM class or SQLAlchemy insert-statement object. mappings (list): List of ``dict`` objects to insert. Returns: ResultProxy """ return core.bulk_insert(self.session, mapper, mappings) def bulk_insert_many(self, mapper, mappings): """ Perform a bulk insert into table/statement represented by `mapper` while utilizing the ``executemany()`` DBAPI call. See :meth:`bulk_insert` for bulk inserts using a multi-row VALUES clause for a single INSERT statement. Args: mapper: An ORM class or SQLAlchemy insert-statement object. mappings (list): List of ``dict`` objects to insert. Returns: ResultProxy """ return core.bulk_insert_many(self.session, mapper, mappings) def bulk_common_update(self, mapper, key_columns, mappings): """ Perform a bulk UPDATE on common shared values among `mappings`. What this means is that if multiple records are being updated to the same values, then issue only a single update for that value-set using the identity of the records in the WHERE clause. Args: mapper: An ORM class or SQLAlchemy insert-statement object. key_columns (tuple): A tuple of SQLAlchemy columns that represent the identity of each row (typically this would be a table's primary key values but they can be any set of columns). mappings (list): List of ``dict`` objects to update. Returns: list[ResultProxy] """ return core.bulk_common_update(self.session, mapper, key_columns, mappings) def bulk_diff_update(self, mapper, key_columns, previous_mappings, mappings): """ Perform a bulk INSERT/UPDATE on the difference between `mappings` and `previous_mappings` such that only the values that have changed are included in the update. If a mapping in `mappings` doesn't exist in `previous_mappings`, then it will be inclued in the bulk INSERT. The bulk INSERT will be deferred to :meth:`bulk_insert`. The bulk UPDATE will be deferred to :meth:`bulk_common_update`. Args: mapper: An ORM class or SQLAlchemy insert-statement object. mappings (list): List of ``dict`` objects to update. previous_mappings (list): List of ``dict`` objects that represent the previous values of all mappings found for this update set (i.e. these are the current database records). key_columns (tuple): A tuple of SQLAlchemy columns that represent the identity of each row (typically this would be a table's primary key values but they can be any set of columns). Returns: list[ResultProxy] """ return core.bulk_diff_update(self.session, mapper, key_columns, previous_mappings, mappings) @property def bulk_insert_mappings(self): """Proxy property to :meth:`session.bulk_insert_mappings`.""" return self.session.bulk_insert_mappings @property def bulk_save_objects(self): """Proxy property to :meth:`session.bulk_save_objects`.""" return self.session.bulk_save_objects @property def bulk_update_mappings(self): """Proxy property to :meth:`session.bulk_update_mappings`.""" return self.session.bulk_update_mappings def destroy(self, data, model_class=None, synchronize_session=False): """ Delete bulk records from `data`. The `data` argument can be any of the following: - ``dict`` - :attr:`model_class` instance - ``list``/``tuple`` of ``dict`` objects - ``list``/``tuple`` of :attr:`model_class` instances If a ``dict`` or ``list`` of ``dict`` is passed in, then `model_class` must be provided. Args: data (mixed): Data to delete from database. synchronize_session (bool|str): Argument passed to ``Query.delete``. Returns: int: Number of deleted records. """ return core.destroy( self.session, data, model_class=model_class, synchronize_session=synchronize_session ) def __repr__(self): return f"<{self.__class__.__name__}({repr(self.url)!r})>" def __getitem__(self, item): """ Return :attr:`service_class` instance corresponding to `item`. Args: item (str): Attribute corresponding to string name of model class. Returns: :attr:`service_class`: Instance of :attr:`service_class` initialized with model class. Raises: AttributeError: When item doesn't correspond to model class name found in :attr:`metadata`. """ if not isinstance(item, str): # If anything other than a string is supplied, use the item's # __name__ as the model name to index to. item = getattr(item, "__name__", item) return getattr(self, item) def __getattr__(self, attr): """ Return :attr:`service_class` instance corresponding to `attr`. Args: attr (str): Attribute corresponding to string name of model class. Returns: :attr:`service_class`: Instance of :attr:`service_class` initialized with model class. Raises: AttributeError: When attribute doesn't correspond to model class name found in :attr:`metadata`. """ if attr not in self.models: # pragma: no cover # Potentially, this model could have been imported after creation # of this class. Since we got a bad attribute, let's go ahead and # update the registry and try again. self.update_models_registry() if attr not in self.models: # pragma: no cover raise AttributeError( f"The attribute {attr!r} is not an attribute of {self.__class__.__name__} nor is" f" it a unique model class name in the declarative model class registry of" f" {self.model_class}. Valid model names are: {', '.join(self.models)}. If a model" f" name is shown as a full module path, then that model class name is not unique" f" and cannot be referenced via attribute access." ) return self.query(self.models[attr]) def _make_options(obj, keymap=None): if keymap is None: keymap = dict(zip(obj.keys(), obj.keys())) return {new: obj[old] for old, new in keymap.items() if obj.get(old) is not None}
"""build: Build documentation, requirements.txt, and run poetry build.""" from __future__ import annotations from pathlib import Path from typing import Any, cast import tomlkit import tomlkit.items from .utils import ANSI, _doSysExec, _getPyproject, _setPyproject def getProcVer(version: str) -> str: """Process a version string. This is pretty opinionated. Args: version (str): the version Returns: str: the processed version """ if version.startswith("^"): major = int(version[1:].split(".")[0]) if major > 1990 or major == 0: # if cal ver or zero ver return f"<{major + 2},>={version[1:]}" return f"<{major + 1},>={version[1:]}" return version def getDependencies() -> dict[str, Any]: """Get our dependencies as a dictionary. Returns: dict[str, str]: [description] """ return dict(**_getPyproject()["tool"]["poetry"]["dependencies"]) def subtaskGenRequirements() -> None: """Generate the requirements files.""" dependencies = getDependencies() dependencies.pop("python") requirements = [] requirementsOpt = [] for requirement in dependencies: if isinstance(dependencies[requirement], dict): dependent = dependencies[requirement] if "optional" in dependent and dependent["optional"]: requirementsOpt.append( f"{requirement}" + f"{"["+dependent["extras"][0]+"]" if "extras" in dependent else ""}" f"{getProcVer(dependent["version"])}" ) else: requirements.append( f"{requirement}" + f"{"["+dependent["extras"][0]+"]" if "extras" in dependent else ""}" f"{getProcVer(dependent["version"])}" ) else: dependent = cast(str, dependencies[requirement]) requirements.append(f"{requirement}{getProcVer(dependent)}") Path("requirements.txt").write_text("\n".join(sorted(requirements)) + "\n", encoding="utf-8") Path("requirements_optional.txt").write_text( "\n".join(sorted(requirements + requirementsOpt)) + "\n", encoding="utf-8" ) print("Done!\n") def subtaskUpdatePyproject(): """Update the pyproject.toml file with our shiny new version specifiers.""" pyproject = _getPyproject() dependencies = pyproject["tool"]["poetry"]["dependencies"] for requirement in dependencies: if requirement != "python": if isinstance(dependencies[requirement], tomlkit.items.InlineTable): dependent = dependencies[requirement]["version"] dependencies[requirement]["version"] = getProcVer(dependent) else: dependencies[requirement] = getProcVer(dependencies[requirement]) _setPyproject(pyproject) def taskBuild(kwargs: list[str]) -> None: """Run the build task. Args: kwargs (list[str]): additional args """ _ = kwargs # unused - silence pylint # Update pyproject.toml version specifiers print(f"{ANSI["CG"]}(Replacing poetry version specifiers){ANSI["CLR"]}") subtaskUpdatePyproject() # Deal with manual changes to pyproject.toml print(f"{ANSI["CG"]}(Refreshing Poetry){ANSI["CLR"]}\n") _doSysExec("poetry update") # Generate DOCS print( f"{ANSI["B"]}{ANSI["U"]}{ANSI["CB"]}Building{ANSI["CLR"]}\n\n{ANSI["B"]}" f"{ANSI["U"]}{ANSI["CG"]}Documentation{ANSI["CLR"]}" ) print(_doSysExec("fhdoc --cleanup")[1].replace("\\", "/")) # Generate requirements.txt print(f"{ANSI["B"]}{ANSI["U"]}{ANSI["CG"]}Requirements.txt{ANSI["CLR"]}") subtaskGenRequirements() # Generate dist files print(f"{ANSI["B"]}{ANSI["U"]}{ANSI["CG"]}Dist files{ANSI["CLR"]}") print(_doSysExec("poetry build")[1])
"""build: Build documentation, requirements.txt, and run poetry build.""" from __future__ import annotations from pathlib import Path from typing import Any, cast import tomlkit import tomlkit.items from .utils import ANSI, _doSysExec, _getPyproject, _setPyproject def getProcVer(version: str) -> str: """Process a version string. This is pretty opinionated. Args: version (str): the version Returns: str: the processed version """ if version.startswith("^"): major = int(version[1:].split(".")[0]) if major > 1990 or major == 0: # if cal ver or zero ver return f"<{major + 2},>={version[1:]}" return f"<{major + 1},>={version[1:]}" return version def getDependencies() -> dict[str, Any]: """Get our dependencies as a dictionary. Returns: dict[str, str]: [description] """ return dict(**_getPyproject()["tool"]["poetry"]["dependencies"]) def subtaskGenRequirements() -> None: """Generate the requirements files.""" dependencies = getDependencies() dependencies.pop("python") requirements = [] requirementsOpt = [] for requirement in dependencies: if isinstance(dependencies[requirement], dict): dependent = dependencies[requirement] if "optional" in dependent and dependent["optional"]: requirementsOpt.append( f"{requirement}" + f"{'['+dependent['extras'][0]+']' if 'extras' in dependent else ''}" f"{getProcVer(dependent['version'])}" ) else: requirements.append( f"{requirement}" + f"{'['+dependent['extras'][0]+']' if 'extras' in dependent else ''}" f"{getProcVer(dependent['version'])}" ) else: dependent = cast(str, dependencies[requirement]) requirements.append(f"{requirement}{getProcVer(dependent)}") Path("requirements.txt").write_text("\n".join(sorted(requirements)) + "\n", encoding="utf-8") Path("requirements_optional.txt").write_text( "\n".join(sorted(requirements + requirementsOpt)) + "\n", encoding="utf-8" ) print("Done!\n") def subtaskUpdatePyproject(): """Update the pyproject.toml file with our shiny new version specifiers.""" pyproject = _getPyproject() dependencies = pyproject["tool"]["poetry"]["dependencies"] for requirement in dependencies: if requirement != "python": if isinstance(dependencies[requirement], tomlkit.items.InlineTable): dependent = dependencies[requirement]["version"] dependencies[requirement]["version"] = getProcVer(dependent) else: dependencies[requirement] = getProcVer(dependencies[requirement]) _setPyproject(pyproject) def taskBuild(kwargs: list[str]) -> None: """Run the build task. Args: kwargs (list[str]): additional args """ _ = kwargs # unused - silence pylint # Update pyproject.toml version specifiers print(f"{ANSI['CG']}(Replacing poetry version specifiers){ANSI['CLR']}") subtaskUpdatePyproject() # Deal with manual changes to pyproject.toml print(f"{ANSI['CG']}(Refreshing Poetry){ANSI['CLR']}\n") _doSysExec("poetry update") # Generate DOCS print( f"{ANSI['B']}{ANSI['U']}{ANSI['CB']}Building{ANSI['CLR']}\n\n{ANSI['B']}" f"{ANSI['U']}{ANSI['CG']}Documentation{ANSI['CLR']}" ) print(_doSysExec("fhdoc --cleanup")[1].replace("\\", "/")) # Generate requirements.txt print(f"{ANSI['B']}{ANSI['U']}{ANSI['CG']}Requirements.txt{ANSI['CLR']}") subtaskGenRequirements() # Generate dist files print(f"{ANSI['B']}{ANSI['U']}{ANSI['CG']}Dist files{ANSI['CLR']}") print(_doSysExec("poetry build")[1])
# Modulo faq.py v1 #///---- Imports ----/// import re import os import logging from discord.ext import commands from faunadb import query as q from faunadb.objects import Ref from faunadb.client import FaunaClient #///---- Log ----/// log = logging.getLogger(__name__) #///---- Clase ----/// class FAQ(commands.Cog): ''' Consulta y edición de FAQ ''' def __init__(self, bot): ''' __init__ del bot ''' self.bot = bot self.db = database(bot) #! Comando faq @commands.group() async def faq(self, ctx): ''' Comando faq ''' PREFIX = os.getenv("DISCORD_PREFIX") if ctx.invoked_subcommand is None: await ctx.send(f"Este comando no existe! Tipea `{PREFIX}faq help` para ver los comandos disponibles :D") #! Subcomando help @faq.command() async def help(self, ctx): ''' Descripción: Ayuda de FAQ Precondicion: Escribir en un canal {PREFIX}faq help Poscondición: El bot escribe lista de comandos con descripción ''' PREFIX = os.getenv("DISCORD_PREFIX") lines = f''' ``` {PREFIX}faq help: Ayuda del FAQ {PREFIX}faq all: Por DM recibís el FAQ completo {PREFIX}faq general: Preguntas generales sobre el uso de Discord y el servidor {PREFIX}faq english: Preguntas relacionadas a los eventos para charlar en inglés {PREFIX}faq mentoring: Dudas sobre el sistema de mentorías {PREFIX}faq coworking: ¿Qué es el Coworking en FEC? {PREFIX}faq roles: Que són y cómo se obtienen los roles {PREFIX}faq projects: Consulta sobre los proyectos grupales de desarrollo {PREFIX}faq studygroup: Consulta sobre los grupos de estudio ``` ''' await ctx.send(lines) #! Subcomando all @faq.command() async def all(self, ctx): ''' Descripción: FAQ completo por DM Precondición: Escribir en un canal {PREFIX}faq all Poscondición: El bot envía por DM el FAQ ''' dataPrint = [""] * 4 dataFAQ = self.db.load() if len(dataFAQ) != 0: for data in dataFAQ: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data["Question"]}\n{data["Answer"]}\n\n" elif len(dataPrint[1]) < 1500: dataPrint[1] = dataPrint[1] + f"+{data["Question"]}\n{data["Answer"]}\n\n" elif len(dataPrint[2]) < 1500: dataPrint[2] = dataPrint[2] + f"+{data["Question"]}\n{data["Answer"]}\n\n" else: dataPrint[3] = dataPrint[3] + f"+{data["Question"]}\n{data["Answer"]}\n\n" message = ["FAQ completo:\n```diff\n" + dataPrint[0] + "```", "```diff\n" + dataPrint[1] + "```", "```diff\n" + dataPrint[2] + "```", "```diff\n" + dataPrint[3] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) if len(dataPrint[2]) != 0: await ctx.author.send(message[2]) if len(dataPrint[3]) != 0: await ctx.author.send(message[3]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando general @faq.command() async def general(self, ctx): ''' Descripción: Consulta de DB sobre categoría General Precondición: Escribir en un canal {PREFIX}faq general Poscondición: El bot envía por DM el FAQ de general ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'General'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data["Question"]}\n{data["Answer"]}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data["Question"]}\n{data["Answer"]}\n\n" message = ["General:\n```diff\n" + dataPrint[0] + "```", "General (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando english @faq.command() async def english(self, ctx): ''' Descripción: Consulta de DB sobre categoría English Precondición: Escribir en un canal {PREFIX}faq english Poscondición: El bot envía por DM el FAQ de english ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'English'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data["Question"]}\n{data["Answer"]}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data["Question"]}\n{data["Answer"]}\n\n" message = ["English:\n```diff\n" + dataPrint[0] + "```", "English (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando mentoring @faq.command() async def mentoring(self, ctx): ''' Descripción: Consulta de DB sobre categoría Mentoring Precondición: Escribir en un canal {PREFIX}faq mentoring Poscondición: El bot envía por DM el FAQ de mentoring ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'Mentoring'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data["Question"]}\n{data["Answer"]}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data["Question"]}\n{data["Answer"]}\n\n" message = ["Mentoring:\n```diff\n" + dataPrint[0] + "```", "Mentoring (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando coworking @faq.command() async def coworking(self, ctx): ''' Descripción: Consulta de DB sobre categoría Coworking Precondición: Escribir en un canal {PREFIX}faq coworking Poscondición: El bot envía por DM el FAQ de coworking ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'Coworking'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data["Question"]}\n{data["Answer"]}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data["Question"]}\n{data["Answer"]}\n\n" message = ["Coworking:\n```diff\n" + dataPrint[0] + "```", "Coworking (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando roles @faq.command() async def roles(self, ctx): ''' Descripción: Consulta de DB sobre categoría Roles Precondición: Escribir en un canal {PREFIX}faq roles Poscondición: El bot envía por DM el FAQ de roles ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'Roles'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data["Question"]}\n{data["Answer"]}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data["Question"]}\n{data["Answer"]}\n\n" message = ["Roles:\n```diff\n" + dataPrint[0] + "```", "Roles (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando projects @faq.command() async def projects(self, ctx): ''' Descripción: Consulta de DB sobre categoría Projects Precondición: Escribir en un canal {PREFIX}faq projects Poscondición: El bot envía por DM el FAQ de projects ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'Projects'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data["Question"]}\n{data["Answer"]}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data["Question"]}\n{data["Answer"]}\n\n" message = ["Projects:\n```diff\n" + dataPrint[0] + "```", "Projects (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando study-group @faq.command() async def studygroup(self, ctx): ''' Descripción: Consulta de DB sobre categoría English Precondición: Escribir en un canal {PREFIX}faq english Poscondición: El bot envía por DM el FAQ de english ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'Study-Group'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data["Question"]}\n{data["Answer"]}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data["Question"]}\n{data["Answer"]}\n\n" message = ["Study Group:\n```diff\n" + dataPrint[0] + "```", "Study Group (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') class database: ''' Clase database: Realizo la consulta a FaunaDB por todos los datos que existen en la collection FAQs ''' def __init__(self, bot): ''' __init__ ''' self.bot = bot DB_KEY = os.getenv("FAUNADB_SECRET_KEY") self.client = FaunaClient(secret = DB_KEY) def load(self): ''' Descripción: Cargo todos los datos de tipo diccionario a una lista ''' listFAQ = [] # Indezacion de datos allfaqs = self.client.query( q.paginate( q.match(q.index('all_faqs')) ) ) allfaqslist = [allfaqs['data']] result = re.findall('\\d+', str(allfaqslist)) # Creación de lista de diccionarios for i in range(0, len(result), 1): faqdetails = self.client.query(q.get(q.ref(q.collection('FAQs'), result[i]))) listFAQ += [faqdetails['data']] return listFAQ
# Modulo faq.py v1 #///---- Imports ----/// import re import os import logging from discord.ext import commands from faunadb import query as q from faunadb.objects import Ref from faunadb.client import FaunaClient #///---- Log ----/// log = logging.getLogger(__name__) #///---- Clase ----/// class FAQ(commands.Cog): ''' Consulta y edición de FAQ ''' def __init__(self, bot): ''' __init__ del bot ''' self.bot = bot self.db = database(bot) #! Comando faq @commands.group() async def faq(self, ctx): ''' Comando faq ''' PREFIX = os.getenv("DISCORD_PREFIX") if ctx.invoked_subcommand is None: await ctx.send(f"Este comando no existe! Tipea `{PREFIX}faq help` para ver los comandos disponibles :D") #! Subcomando help @faq.command() async def help(self, ctx): ''' Descripción: Ayuda de FAQ Precondicion: Escribir en un canal {PREFIX}faq help Poscondición: El bot escribe lista de comandos con descripción ''' PREFIX = os.getenv("DISCORD_PREFIX") lines = f''' ``` {PREFIX}faq help: Ayuda del FAQ {PREFIX}faq all: Por DM recibís el FAQ completo {PREFIX}faq general: Preguntas generales sobre el uso de Discord y el servidor {PREFIX}faq english: Preguntas relacionadas a los eventos para charlar en inglés {PREFIX}faq mentoring: Dudas sobre el sistema de mentorías {PREFIX}faq coworking: ¿Qué es el Coworking en FEC? {PREFIX}faq roles: Que són y cómo se obtienen los roles {PREFIX}faq projects: Consulta sobre los proyectos grupales de desarrollo {PREFIX}faq studygroup: Consulta sobre los grupos de estudio ``` ''' await ctx.send(lines) #! Subcomando all @faq.command() async def all(self, ctx): ''' Descripción: FAQ completo por DM Precondición: Escribir en un canal {PREFIX}faq all Poscondición: El bot envía por DM el FAQ ''' dataPrint = [""] * 4 dataFAQ = self.db.load() if len(dataFAQ) != 0: for data in dataFAQ: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n" elif len(dataPrint[1]) < 1500: dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n" elif len(dataPrint[2]) < 1500: dataPrint[2] = dataPrint[2] + f"+{data['Question']}\n{data['Answer']}\n\n" else: dataPrint[3] = dataPrint[3] + f"+{data['Question']}\n{data['Answer']}\n\n" message = ["FAQ completo:\n```diff\n" + dataPrint[0] + "```", "```diff\n" + dataPrint[1] + "```", "```diff\n" + dataPrint[2] + "```", "```diff\n" + dataPrint[3] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) if len(dataPrint[2]) != 0: await ctx.author.send(message[2]) if len(dataPrint[3]) != 0: await ctx.author.send(message[3]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando general @faq.command() async def general(self, ctx): ''' Descripción: Consulta de DB sobre categoría General Precondición: Escribir en un canal {PREFIX}faq general Poscondición: El bot envía por DM el FAQ de general ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'General'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n" message = ["General:\n```diff\n" + dataPrint[0] + "```", "General (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando english @faq.command() async def english(self, ctx): ''' Descripción: Consulta de DB sobre categoría English Precondición: Escribir en un canal {PREFIX}faq english Poscondición: El bot envía por DM el FAQ de english ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'English'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n" message = ["English:\n```diff\n" + dataPrint[0] + "```", "English (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando mentoring @faq.command() async def mentoring(self, ctx): ''' Descripción: Consulta de DB sobre categoría Mentoring Precondición: Escribir en un canal {PREFIX}faq mentoring Poscondición: El bot envía por DM el FAQ de mentoring ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'Mentoring'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n" message = ["Mentoring:\n```diff\n" + dataPrint[0] + "```", "Mentoring (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando coworking @faq.command() async def coworking(self, ctx): ''' Descripción: Consulta de DB sobre categoría Coworking Precondición: Escribir en un canal {PREFIX}faq coworking Poscondición: El bot envía por DM el FAQ de coworking ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'Coworking'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n" message = ["Coworking:\n```diff\n" + dataPrint[0] + "```", "Coworking (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando roles @faq.command() async def roles(self, ctx): ''' Descripción: Consulta de DB sobre categoría Roles Precondición: Escribir en un canal {PREFIX}faq roles Poscondición: El bot envía por DM el FAQ de roles ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'Roles'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n" message = ["Roles:\n```diff\n" + dataPrint[0] + "```", "Roles (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando projects @faq.command() async def projects(self, ctx): ''' Descripción: Consulta de DB sobre categoría Projects Precondición: Escribir en un canal {PREFIX}faq projects Poscondición: El bot envía por DM el FAQ de projects ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'Projects'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n" message = ["Projects:\n```diff\n" + dataPrint[0] + "```", "Projects (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') #! Subcomando study-group @faq.command() async def studygroup(self, ctx): ''' Descripción: Consulta de DB sobre categoría English Precondición: Escribir en un canal {PREFIX}faq english Poscondición: El bot envía por DM el FAQ de english ''' dataGen = [] dataPrint = [""] * 2 dataFAQ = self.db.load() dataGen = [data for data in dataFAQ if data['Category'] == 'Study-Group'] if len(dataGen) != 0: for data in dataGen: if len(dataPrint[0]) < 1500: dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n" else: dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n" message = ["Study Group:\n```diff\n" + dataPrint[0] + "```", "Study Group (continuación):\n```diff\n" + dataPrint[1] + "```"] await ctx.author.send(message[0]) if len(dataPrint[1]) != 0: await ctx.author.send(message[1]) else: await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!') class database: ''' Clase database: Realizo la consulta a FaunaDB por todos los datos que existen en la collection FAQs ''' def __init__(self, bot): ''' __init__ ''' self.bot = bot DB_KEY = os.getenv("FAUNADB_SECRET_KEY") self.client = FaunaClient(secret = DB_KEY) def load(self): ''' Descripción: Cargo todos los datos de tipo diccionario a una lista ''' listFAQ = [] # Indezacion de datos allfaqs = self.client.query( q.paginate( q.match(q.index('all_faqs')) ) ) allfaqslist = [allfaqs['data']] result = re.findall('\\d+', str(allfaqslist)) # Creación de lista de diccionarios for i in range(0, len(result), 1): faqdetails = self.client.query(q.get(q.ref(q.collection('FAQs'), result[i]))) listFAQ += [faqdetails['data']] return listFAQ
import os import shutil from typing import NoReturn, Union from client.session import Session from common.module_base import ModuleBase from common.share_helper import share_drive_letter from common.update_returns import EmitError from messages import ExecutionRequest, ExecutionDone, ExecutionResponse from messages.workspace_response import WorkspaceResponse class WorkspaceSetup(ModuleBase): def __init__(self): super().__init__(["WorkspaceResponse", "ExecutionResponse", "ExecutionDone"]) def handle(self, message_name: str, message_value: Union[WorkspaceResponse, ExecutionResponse, ExecutionDone], session: Session) -> NoReturn: if message_name == "WorkspaceResponse": if 'workspace-base-path' in session: # local and remote directories base_dir = session['workspace-base-path'] network_dir = os.path.join(share_drive_letter + "/", message_value.workspace) + "/" print("Received Workspace: " + network_dir) # copy files to remote shutil.copytree(base_dir, network_dir, dirs_exist_ok=True) # create workspaces field if 'workspaces' not in session: session['workspaces'] = {message_value.workspace: base_dir} # open instructions file & send to server try: with open(os.path.join(base_dir, "hivemind.txt"), "r") as file: tasks = file.readlines() session.to_send.put(ExecutionRequest(tasks, message_value.workspace)) except FileNotFoundError: print("error opening file") return EmitError("Error: directory is invalid! Does not contain hivemind.txt") else: return EmitError("Error: received workspace response without active workspace location") if message_name == "ExecutionResponse": # inform the user if the server refused execution if not message_value.accepted: return EmitError("Server refused execution of our script!") if message_name == "ExecutionDone": print(f"Environment {session["workspaces"][message_value.workspace]} done") # make output folder output_dir = session['workspaces'][message_value.workspace] + '_finished' os.makedirs(output_dir, exist_ok=True) # copy remote to local folder shutil.copytree(os.path.join(share_drive_letter, message_value.workspace), output_dir, dirs_exist_ok=True)
import os import shutil from typing import NoReturn, Union from client.session import Session from common.module_base import ModuleBase from common.share_helper import share_drive_letter from common.update_returns import EmitError from messages import ExecutionRequest, ExecutionDone, ExecutionResponse from messages.workspace_response import WorkspaceResponse class WorkspaceSetup(ModuleBase): def __init__(self): super().__init__(["WorkspaceResponse", "ExecutionResponse", "ExecutionDone"]) def handle(self, message_name: str, message_value: Union[WorkspaceResponse, ExecutionResponse, ExecutionDone], session: Session) -> NoReturn: if message_name == "WorkspaceResponse": if 'workspace-base-path' in session: # local and remote directories base_dir = session['workspace-base-path'] network_dir = os.path.join(share_drive_letter + "/", message_value.workspace) + "/" print("Received Workspace: " + network_dir) # copy files to remote shutil.copytree(base_dir, network_dir, dirs_exist_ok=True) # create workspaces field if 'workspaces' not in session: session['workspaces'] = {message_value.workspace: base_dir} # open instructions file & send to server try: with open(os.path.join(base_dir, "hivemind.txt"), "r") as file: tasks = file.readlines() session.to_send.put(ExecutionRequest(tasks, message_value.workspace)) except FileNotFoundError: print("error opening file") return EmitError("Error: directory is invalid! Does not contain hivemind.txt") else: return EmitError("Error: received workspace response without active workspace location") if message_name == "ExecutionResponse": # inform the user if the server refused execution if not message_value.accepted: return EmitError("Server refused execution of our script!") if message_name == "ExecutionDone": print(f"Environment {session['workspaces'][message_value.workspace]} done") # make output folder output_dir = session['workspaces'][message_value.workspace] + '_finished' os.makedirs(output_dir, exist_ok=True) # copy remote to local folder shutil.copytree(os.path.join(share_drive_letter, message_value.workspace), output_dir, dirs_exist_ok=True)
import numpy as np from nemf import caller from nemf import worker from nemf.interaction_functions import * from copy import deepcopy import yaml # Model_Classes class model_class: # initialization methods # they are only used when a new model_class is created def __init__(self,model_path,ref_data_path=None): self.init_sys_config = worker.initialize_ode_system(model_path) self.sanity_check_input() # makes a copy of the system config for further usage self.compartment = deepcopy( self.init_sys_config['compartment']) self.interactions = deepcopy( self.init_sys_config['interactions']) self.configuration = deepcopy( self.init_sys_config['configuration']) # imports reference data self.load_reference_data(ref_data_path) # imports sinks and sources if ('sinks' in self.configuration) and ('sources' in self.configuration): self.fetch_index_of_source_and_sink() # imports alternative interaction names and adds them to the namespace if ('alternative_interaction_names' in self.configuration): self.init_alternative_interaction_names() # imports constraints used in the fitting process if 'constraints_path' in self.configuration: self.load_constraints(self.configuration['constraints_path']) def load_reference_data(self,ref_data_path=None,**kwargs): """ Loads reference data used in model optimization from file Either, the path to the reference data is provided in the yaml configuration file or passed to this function. Latter overwrites the path in the configuration file. Parameters ---------- ref_data_path : string (optional) path to the file containing the reference data """ if ref_data_path != None: ref_data,ref_headers = \ worker.import_reference_data(ref_data_path,**kwargs) if len(np.shape(ref_data)) == 1: ref_data = np.reshape(ref_data,(1,len(ref_data))) self.reference_data = ref_data self.reference_headers = ref_headers print('Reference data set has been added.') elif 'ref_data_path' in self.configuration: ref_data_path = self.configuration['ref_data_path'] ref_data,ref_headers = worker.import_reference_data(ref_data_path) if len(np.shape(ref_data)) == 1: ref_data = np.reshape(ref_data,(1,len(ref_data))) self.reference_data = ref_data self.reference_headers = ref_headers print('Reference data set has been added.') else: self.reference_data = None self.reference_headers = None print('No reference data has been provided') def prep_ref_data(self): """ prepares the reference data set for use in the objective funtion Returns ------- da : numpy.array (DataArray) first column contains the posix time stamps for each observation the remaining column contain the data point for the corrosponding compartments compart_of_interest : list of integers contains the indices of the compartments in the model which are also represented in the reference data """ ref_data = self.reference_data ref_names = self.reference_headers # datetime stamp column of the reference data ref_timestamps = ref_data[:,0] t_eval = list(ref_timestamps) t_eval = np.reshape(t_eval,(len(t_eval),1)) # clean integers from data names (those can't be compartments) for ii,item in enumerate(ref_names): delete_columns = [] if type(item) == int: delete_columns.append(ii) ref_data = np.delete(ref_data,delete_columns,axis=1) ref_names = \ [item for ii,item in enumerate(ref_names) if ii not in delete_columns] # fetching the indexes of those compartments that are also modelled ref_idx = self.fetch_index_of_compartment(ref_names) # fetching the columns out of the ref. data set are also modelled columns_of_interest = \ [ii for (ii,item) in enumerate(ref_idx) if type(item) == int] # fetching indices of compartments that are present in the ref. data set compart_of_interest = \ [item for (ii,item) in enumerate(ref_idx) if type(item) == int] # reference data sliced down to the columns that contain the reference data # of the compartments that are modelled ref_data = ref_data[:,columns_of_interest] da = np.concatenate((t_eval,ref_data),axis=1) return da, compart_of_interest def load_constraints(self,path): """ Loads constraints from python file Parameters ---------- path : string path to python file """ self.configuration['constraints'] = \ worker.import_constraints(path) ## Export def export_to_yaml(self,path='current_model.yml'): """ Writes model compartments and interaction configuration to yaml-file Parameters ---------- path : string (optional) Path to output file. Writes into 'current_model.yml if no path is given. """ # reduces the model to the minimum export_model = {} export_model['compartment'] = deepcopy(self.compartment) export_model['interactions'] = deepcopy(self.interactions) # transforms numpy objects to regular python types. # this is necessary as the numpy object are dumped with all the extra # unnecessary class information. compart = export_model['compartment'] for item in compart: compart[item]['value'] = float(compart[item]['value']) inter = export_model['interactions'] for item in list(inter): for ii,fkt in enumerate(list(inter[item])): for jj,val in enumerate(fkt['parameters']): if type(val) != int: inter[item][ii]['parameters'][jj] = float(val) # writes dict to file with open(path, 'w') as yaml_file: dump = yaml.dump(export_model, default_flow_style = False,sort_keys=False) yaml_file.write(dump) def sanity_check_input(self): """ checks for obvious errors in the configuration file. passing this test doesn't guarante a correct configuration. """ unit = self.init_sys_config # checks if compartment is well defined name = "Model configuration " worker.assert_if_exists_non_empty( 'compartment',unit,reference='compartment') assert (len(list(unit['compartment'])) > 1), \ name + "only contains a single compartment" ## check if all compartment are well defined for item in list(unit['compartment']): worker.assert_if_non_empty( item,unit['compartment'],item,reference='state') worker.assert_if_exists('optimise',unit['compartment'][item],item) worker.assert_if_exists_non_empty('value',unit['compartment'][item], item,'value') worker.assert_if_exists('optimise',unit['compartment'][item],item) # checks if interactions is well defined worker.assert_if_exists_non_empty('interactions',unit,'interactions') ## check if all interactions are well defined for item in list(unit['interactions']): for edge in unit['interactions'][item]: assert edge != None, \ name + "interaction {} is empty".format(item) worker.assert_if_exists_non_empty('fkt',edge,item,) worker.assert_if_exists('parameters',edge,item) for interaction in unit['interactions']: for fkt in unit['interactions'][interaction]: allowed_items = ['fkt', 'parameters', 'optimise'] for item in list(fkt): assert item in allowed_items, \ f"Invalid key: {item} in interaction {interaction}" # checks if configuration is well defined worker.assert_if_exists_non_empty('configuration', unit) required_elements = ['time_evo_max'] for element in required_elements: worker.assert_if_exists_non_empty(element,unit['configuration']) def init_alternative_interaction_names(self): """ add renamed copies defined in the yaml config to globals() """ alt_names = self.configuration['alternative_interaction_names'] for new_func in list(alt_names): orig_func = globals()[alt_names[new_func]] globals()[new_func] = orig_func def initialize_log(self,maxiter): max_iter = maxiter + 1 fit_parameter = self.fetch_to_optimize_args()[0][1] param_log = np.full((max_iter,len(fit_parameter)), np.nan) cost_log = np.full( (max_iter), np.nan ) log_dict = {'parameters': param_log, 'cost': cost_log, 'iter_idx': 0} self.log = log_dict # Logging def construct_callback(self,method='SLSQP',debug=False): model = self if method == 'trust-constr': def callback(xk, opt):# -> bool if debug: print(f'xk: \n{xk}') model.to_log(xk,cost=opt.fun) else: def callback(xk):# -> bool if debug: print(f'xk: \n{xk}') model.to_log(xk) return callback def to_log(self,parameters,cost=None): #current monte sample idx = self.log['iter_idx'] self.log['parameters'][idx] = parameters self.log['cost'][idx] = cost self.log['iter_idx'] += 1 # Parsing def to_grad_method(self): """ fetches the parameters necessary for the gradient descent method Returns: free_parameters, constraints """ free_parameters = [] constraints = [] labels = [] for ii in self.compartment: if self.compartment[ii]['optimise'] is not None: labels.append('{}'.format(ii)) value = self.compartment[ii]['value'] lower_bound = self.compartment[ii]['optimise']['lower'] upper_bound = self.compartment[ii]['optimise']['upper'] free_parameters.append(value) constraints.append([lower_bound,upper_bound]) for ii in self.interactions: #function for item in self.interactions[ii]: #parameters if 'optimise' in item: if item['optimise'] is not None: for jj,elements in enumerate(item['optimise']): labels.append('{},fkt: {} #{}'.format( ii,item['fkt'],elements['parameter_no'])) value = item['parameters'][jj] lower_bound = elements['lower'] upper_bound = elements['upper'] free_parameters.append(value) constraints.append([lower_bound,upper_bound]) free_parameters = np.array(free_parameters) constraints = np.array(constraints) return free_parameters, constraints, labels def update_system_with_parameters(self, parameters): values = list(parameters) for ii in self.compartment: if self.compartment[ii]['optimise'] is not None: self.compartment[ii]['value'] = values.pop(0) for ii in self.interactions: #function for item in self.interactions[ii]: #parameters if 'optimise' in item: if item['optimise'] is not None: for element in item['optimise']: item['parameters'][element['parameter_no']-1] = \ values.pop(0) # Fetching # they retrieve some information from the model and output them in a certain # form. They mostly look through dicts to extract a subset of them def fetch_constraints(self): # placeholder for constraints generator if 'constraints' in self.configuration: return self.configuration['constraints'] else: return None def fetch_index_of_interaction(self): """ gets the indices in the interaction matrix """ ## separate row & column interactions = list(self.interactions) compartments = list(self.compartment) interaction_index = interactions.copy() for index,item in enumerate(interactions): interaction_index[index] = item.split(':') ## parse them with the index for index, item in enumerate(interaction_index): interaction_index[index][0] = compartments.index(item[0]) interaction_index[index][1] = compartments.index(item[1]) return interaction_index def fetch_index_of_source_and_sink(self): if self.configuration['sources'] == None: sources = None idx_sources = [] else: sources = list(self.configuration['sources']) idx_sources = sources.copy() for ii, item in enumerate(idx_sources): idx_sources[ii] = list(self.compartment).index(item) if self.configuration['sinks'] == None: sinks = None idx_sinks = [] else: sinks = list(self.configuration['sinks']) idx_sinks = sinks.copy() for ii, item in enumerate(idx_sinks): idx_sinks[ii] = list(self.compartment).index(item) self.configuration['idx_sources'] = idx_sources self.configuration['idx_sinks'] = idx_sinks def fetch_index_of_compartment(self,parameters): # add something to make sure all stanges to parameters stay internal compartments = list(self.compartment) for nn,entry in enumerate(parameters): if (type(entry) == str) & (entry in list(self.compartment)): #print(entry, compartments.index(entry)) parameters[nn] = compartments.index(entry) return parameters def fetch_to_optimize_args(self): """ fetches the parameters necessary for the gradient descent method Returns: free_parameters, constraints """ labels = [] idx_state = []; val_state = []; bnd_state = [] for ii,entry in enumerate(self.compartment): if self.compartment[entry]['optimise'] is not None: labels.append('{}'.format(entry)) idx_state.append(ii) val_state.append(self.compartment[entry]['value']) lower_bound = self.compartment[entry]['optimise']['lower'] upper_bound = self.compartment[entry]['optimise']['upper'] bnd_state.append([lower_bound,upper_bound]) idx_args = []; val_args = []; bnd_args = [] for ii,interaction in enumerate(self.interactions): for jj,function in enumerate(self.interactions[interaction]): if 'optimise' in function: if function['optimise'] is not None: for kk,parameter in enumerate(function['optimise']): labels.append('{},fkt: {} #{}'.format( interaction,function['fkt'], parameter['parameter_no'])) current_idx_args = \ [ii,jj,parameter['parameter_no']-1] current_val_args = \ self.fetch_arg_by_idx(current_idx_args) lower_bound = parameter['lower'] upper_bound = parameter['upper'] idx_args.append(current_idx_args) val_args.append(current_val_args) bnd_args.append([lower_bound,upper_bound]) fit_indices = [idx_state,idx_args] fit_param = val_state + val_args bnd_param = bnd_state + bnd_args return [fit_indices,fit_param,bnd_param], labels def fetch_states(self): states = [] compartment = self.compartment for item in compartment: states.append(compartment[item]['value']) return states def fetch_args(self): args = [] for interactions in self.interactions: args_edge = [] for edges in self.interactions[interactions]: indexed_args = self.fetch_index_of_compartment(edges['parameters']) args_edge.append(indexed_args) args.append(args_edge) return args def fetch_param(self): states = self.fetch_states() args = self.fetch_args() return [states,args] def fetch_arg_by_idx(self,index): args = self.fetch_args() idx = index arg = args[idx[0]][idx[1]][idx[2]] return arg def de_constructor(self): # the benefit of constructing it like this is that: # * we are able to get the signature f(x,args) # * all non-(x,args) related objects are only evaluated once. # however, this for looping is still super inefficient and a more # vectorized object should be intended # args is expected to have the same shape as set_of_functions set_of_function = self.interactions idx_interactions = self.fetch_index_of_interaction() n_compartments = len(self.compartment) def differential_equation(t,x,args): #(t,x) later y = np.zeros((n_compartments,n_compartments)) for ii,functions in enumerate(set_of_function): interaction = set_of_function[functions] for jj,edge in enumerate(interaction): kk,ll = idx_interactions[ii] #print(f'{edge['fkt']} \t\t flows into '+' # {list(self\.compartment)[kk]} outof {list(self\.compartment)[ll]}') flow = max(0,globals()[edge['fkt']](x,kk,*args[ii][jj])) # flows into kk (outof ll) y[kk,ll] += flow # flow outof ll (into kk) y[ll,kk] -= flow return np.sum(y,axis=1) return differential_equation def import_interaction_functions(func): """ Adds the functions from 'func' to the globals in models func : list List containing the functions that will be added to globals """ for item in func: name = item.__name__ if name in globals(): print('Warning! A function with the same name as interaction ' + f"function '{name}' is already known in globals!\n" + 'Function will be overwritten!') globals()[name] = item
import numpy as np from nemf import caller from nemf import worker from nemf.interaction_functions import * from copy import deepcopy import yaml # Model_Classes class model_class: # initialization methods # they are only used when a new model_class is created def __init__(self,model_path,ref_data_path=None): self.init_sys_config = worker.initialize_ode_system(model_path) self.sanity_check_input() # makes a copy of the system config for further usage self.compartment = deepcopy( self.init_sys_config['compartment']) self.interactions = deepcopy( self.init_sys_config['interactions']) self.configuration = deepcopy( self.init_sys_config['configuration']) # imports reference data self.load_reference_data(ref_data_path) # imports sinks and sources if ('sinks' in self.configuration) and ('sources' in self.configuration): self.fetch_index_of_source_and_sink() # imports alternative interaction names and adds them to the namespace if ('alternative_interaction_names' in self.configuration): self.init_alternative_interaction_names() # imports constraints used in the fitting process if 'constraints_path' in self.configuration: self.load_constraints(self.configuration['constraints_path']) def load_reference_data(self,ref_data_path=None,**kwargs): """ Loads reference data used in model optimization from file Either, the path to the reference data is provided in the yaml configuration file or passed to this function. Latter overwrites the path in the configuration file. Parameters ---------- ref_data_path : string (optional) path to the file containing the reference data """ if ref_data_path != None: ref_data,ref_headers = \ worker.import_reference_data(ref_data_path,**kwargs) if len(np.shape(ref_data)) == 1: ref_data = np.reshape(ref_data,(1,len(ref_data))) self.reference_data = ref_data self.reference_headers = ref_headers print('Reference data set has been added.') elif 'ref_data_path' in self.configuration: ref_data_path = self.configuration['ref_data_path'] ref_data,ref_headers = worker.import_reference_data(ref_data_path) if len(np.shape(ref_data)) == 1: ref_data = np.reshape(ref_data,(1,len(ref_data))) self.reference_data = ref_data self.reference_headers = ref_headers print('Reference data set has been added.') else: self.reference_data = None self.reference_headers = None print('No reference data has been provided') def prep_ref_data(self): """ prepares the reference data set for use in the objective funtion Returns ------- da : numpy.array (DataArray) first column contains the posix time stamps for each observation the remaining column contain the data point for the corrosponding compartments compart_of_interest : list of integers contains the indices of the compartments in the model which are also represented in the reference data """ ref_data = self.reference_data ref_names = self.reference_headers # datetime stamp column of the reference data ref_timestamps = ref_data[:,0] t_eval = list(ref_timestamps) t_eval = np.reshape(t_eval,(len(t_eval),1)) # clean integers from data names (those can't be compartments) for ii,item in enumerate(ref_names): delete_columns = [] if type(item) == int: delete_columns.append(ii) ref_data = np.delete(ref_data,delete_columns,axis=1) ref_names = \ [item for ii,item in enumerate(ref_names) if ii not in delete_columns] # fetching the indexes of those compartments that are also modelled ref_idx = self.fetch_index_of_compartment(ref_names) # fetching the columns out of the ref. data set are also modelled columns_of_interest = \ [ii for (ii,item) in enumerate(ref_idx) if type(item) == int] # fetching indices of compartments that are present in the ref. data set compart_of_interest = \ [item for (ii,item) in enumerate(ref_idx) if type(item) == int] # reference data sliced down to the columns that contain the reference data # of the compartments that are modelled ref_data = ref_data[:,columns_of_interest] da = np.concatenate((t_eval,ref_data),axis=1) return da, compart_of_interest def load_constraints(self,path): """ Loads constraints from python file Parameters ---------- path : string path to python file """ self.configuration['constraints'] = \ worker.import_constraints(path) ## Export def export_to_yaml(self,path='current_model.yml'): """ Writes model compartments and interaction configuration to yaml-file Parameters ---------- path : string (optional) Path to output file. Writes into 'current_model.yml if no path is given. """ # reduces the model to the minimum export_model = {} export_model['compartment'] = deepcopy(self.compartment) export_model['interactions'] = deepcopy(self.interactions) # transforms numpy objects to regular python types. # this is necessary as the numpy object are dumped with all the extra # unnecessary class information. compart = export_model['compartment'] for item in compart: compart[item]['value'] = float(compart[item]['value']) inter = export_model['interactions'] for item in list(inter): for ii,fkt in enumerate(list(inter[item])): for jj,val in enumerate(fkt['parameters']): if type(val) != int: inter[item][ii]['parameters'][jj] = float(val) # writes dict to file with open(path, 'w') as yaml_file: dump = yaml.dump(export_model, default_flow_style = False,sort_keys=False) yaml_file.write(dump) def sanity_check_input(self): """ checks for obvious errors in the configuration file. passing this test doesn't guarante a correct configuration. """ unit = self.init_sys_config # checks if compartment is well defined name = "Model configuration " worker.assert_if_exists_non_empty( 'compartment',unit,reference='compartment') assert (len(list(unit['compartment'])) > 1), \ name + "only contains a single compartment" ## check if all compartment are well defined for item in list(unit['compartment']): worker.assert_if_non_empty( item,unit['compartment'],item,reference='state') worker.assert_if_exists('optimise',unit['compartment'][item],item) worker.assert_if_exists_non_empty('value',unit['compartment'][item], item,'value') worker.assert_if_exists('optimise',unit['compartment'][item],item) # checks if interactions is well defined worker.assert_if_exists_non_empty('interactions',unit,'interactions') ## check if all interactions are well defined for item in list(unit['interactions']): for edge in unit['interactions'][item]: assert edge != None, \ name + "interaction {} is empty".format(item) worker.assert_if_exists_non_empty('fkt',edge,item,) worker.assert_if_exists('parameters',edge,item) for interaction in unit['interactions']: for fkt in unit['interactions'][interaction]: allowed_items = ['fkt', 'parameters', 'optimise'] for item in list(fkt): assert item in allowed_items, \ f"Invalid key: {item} in interaction {interaction}" # checks if configuration is well defined worker.assert_if_exists_non_empty('configuration', unit) required_elements = ['time_evo_max'] for element in required_elements: worker.assert_if_exists_non_empty(element,unit['configuration']) def init_alternative_interaction_names(self): """ add renamed copies defined in the yaml config to globals() """ alt_names = self.configuration['alternative_interaction_names'] for new_func in list(alt_names): orig_func = globals()[alt_names[new_func]] globals()[new_func] = orig_func def initialize_log(self,maxiter): max_iter = maxiter + 1 fit_parameter = self.fetch_to_optimize_args()[0][1] param_log = np.full((max_iter,len(fit_parameter)), np.nan) cost_log = np.full( (max_iter), np.nan ) log_dict = {'parameters': param_log, 'cost': cost_log, 'iter_idx': 0} self.log = log_dict # Logging def construct_callback(self,method='SLSQP',debug=False): model = self if method == 'trust-constr': def callback(xk, opt):# -> bool if debug: print(f'xk: \n{xk}') model.to_log(xk,cost=opt.fun) else: def callback(xk):# -> bool if debug: print(f'xk: \n{xk}') model.to_log(xk) return callback def to_log(self,parameters,cost=None): #current monte sample idx = self.log['iter_idx'] self.log['parameters'][idx] = parameters self.log['cost'][idx] = cost self.log['iter_idx'] += 1 # Parsing def to_grad_method(self): """ fetches the parameters necessary for the gradient descent method Returns: free_parameters, constraints """ free_parameters = [] constraints = [] labels = [] for ii in self.compartment: if self.compartment[ii]['optimise'] is not None: labels.append('{}'.format(ii)) value = self.compartment[ii]['value'] lower_bound = self.compartment[ii]['optimise']['lower'] upper_bound = self.compartment[ii]['optimise']['upper'] free_parameters.append(value) constraints.append([lower_bound,upper_bound]) for ii in self.interactions: #function for item in self.interactions[ii]: #parameters if 'optimise' in item: if item['optimise'] is not None: for jj,elements in enumerate(item['optimise']): labels.append('{},fkt: {} #{}'.format( ii,item['fkt'],elements['parameter_no'])) value = item['parameters'][jj] lower_bound = elements['lower'] upper_bound = elements['upper'] free_parameters.append(value) constraints.append([lower_bound,upper_bound]) free_parameters = np.array(free_parameters) constraints = np.array(constraints) return free_parameters, constraints, labels def update_system_with_parameters(self, parameters): values = list(parameters) for ii in self.compartment: if self.compartment[ii]['optimise'] is not None: self.compartment[ii]['value'] = values.pop(0) for ii in self.interactions: #function for item in self.interactions[ii]: #parameters if 'optimise' in item: if item['optimise'] is not None: for element in item['optimise']: item['parameters'][element['parameter_no']-1] = \ values.pop(0) # Fetching # they retrieve some information from the model and output them in a certain # form. They mostly look through dicts to extract a subset of them def fetch_constraints(self): # placeholder for constraints generator if 'constraints' in self.configuration: return self.configuration['constraints'] else: return None def fetch_index_of_interaction(self): """ gets the indices in the interaction matrix """ ## separate row & column interactions = list(self.interactions) compartments = list(self.compartment) interaction_index = interactions.copy() for index,item in enumerate(interactions): interaction_index[index] = item.split(':') ## parse them with the index for index, item in enumerate(interaction_index): interaction_index[index][0] = compartments.index(item[0]) interaction_index[index][1] = compartments.index(item[1]) return interaction_index def fetch_index_of_source_and_sink(self): if self.configuration['sources'] == None: sources = None idx_sources = [] else: sources = list(self.configuration['sources']) idx_sources = sources.copy() for ii, item in enumerate(idx_sources): idx_sources[ii] = list(self.compartment).index(item) if self.configuration['sinks'] == None: sinks = None idx_sinks = [] else: sinks = list(self.configuration['sinks']) idx_sinks = sinks.copy() for ii, item in enumerate(idx_sinks): idx_sinks[ii] = list(self.compartment).index(item) self.configuration['idx_sources'] = idx_sources self.configuration['idx_sinks'] = idx_sinks def fetch_index_of_compartment(self,parameters): # add something to make sure all stanges to parameters stay internal compartments = list(self.compartment) for nn,entry in enumerate(parameters): if (type(entry) == str) & (entry in list(self.compartment)): #print(entry, compartments.index(entry)) parameters[nn] = compartments.index(entry) return parameters def fetch_to_optimize_args(self): """ fetches the parameters necessary for the gradient descent method Returns: free_parameters, constraints """ labels = [] idx_state = []; val_state = []; bnd_state = [] for ii,entry in enumerate(self.compartment): if self.compartment[entry]['optimise'] is not None: labels.append('{}'.format(entry)) idx_state.append(ii) val_state.append(self.compartment[entry]['value']) lower_bound = self.compartment[entry]['optimise']['lower'] upper_bound = self.compartment[entry]['optimise']['upper'] bnd_state.append([lower_bound,upper_bound]) idx_args = []; val_args = []; bnd_args = [] for ii,interaction in enumerate(self.interactions): for jj,function in enumerate(self.interactions[interaction]): if 'optimise' in function: if function['optimise'] is not None: for kk,parameter in enumerate(function['optimise']): labels.append('{},fkt: {} #{}'.format( interaction,function['fkt'], parameter['parameter_no'])) current_idx_args = \ [ii,jj,parameter['parameter_no']-1] current_val_args = \ self.fetch_arg_by_idx(current_idx_args) lower_bound = parameter['lower'] upper_bound = parameter['upper'] idx_args.append(current_idx_args) val_args.append(current_val_args) bnd_args.append([lower_bound,upper_bound]) fit_indices = [idx_state,idx_args] fit_param = val_state + val_args bnd_param = bnd_state + bnd_args return [fit_indices,fit_param,bnd_param], labels def fetch_states(self): states = [] compartment = self.compartment for item in compartment: states.append(compartment[item]['value']) return states def fetch_args(self): args = [] for interactions in self.interactions: args_edge = [] for edges in self.interactions[interactions]: indexed_args = self.fetch_index_of_compartment(edges['parameters']) args_edge.append(indexed_args) args.append(args_edge) return args def fetch_param(self): states = self.fetch_states() args = self.fetch_args() return [states,args] def fetch_arg_by_idx(self,index): args = self.fetch_args() idx = index arg = args[idx[0]][idx[1]][idx[2]] return arg def de_constructor(self): # the benefit of constructing it like this is that: # * we are able to get the signature f(x,args) # * all non-(x,args) related objects are only evaluated once. # however, this for looping is still super inefficient and a more # vectorized object should be intended # args is expected to have the same shape as set_of_functions set_of_function = self.interactions idx_interactions = self.fetch_index_of_interaction() n_compartments = len(self.compartment) def differential_equation(t,x,args): #(t,x) later y = np.zeros((n_compartments,n_compartments)) for ii,functions in enumerate(set_of_function): interaction = set_of_function[functions] for jj,edge in enumerate(interaction): kk,ll = idx_interactions[ii] #print(f'{edge["fkt"]} \t\t flows into '+' # {list(self\.compartment)[kk]} outof {list(self\.compartment)[ll]}') flow = max(0,globals()[edge['fkt']](x,kk,*args[ii][jj])) # flows into kk (outof ll) y[kk,ll] += flow # flow outof ll (into kk) y[ll,kk] -= flow return np.sum(y,axis=1) return differential_equation def import_interaction_functions(func): """ Adds the functions from 'func' to the globals in models func : list List containing the functions that will be added to globals """ for item in func: name = item.__name__ if name in globals(): print('Warning! A function with the same name as interaction ' + f"function '{name}' is already known in globals!\n" + 'Function will be overwritten!') globals()[name] = item
"""Tests for the RunTaskCommand class""" from cumulusci.cli.runtime import CliRuntime from cumulusci.cli.cci import RunTaskCommand import click import pytest from unittest.mock import Mock, patch from cumulusci.cli import cci from cumulusci.core.exceptions import CumulusCIUsageError from cumulusci.cli.tests.utils import run_click_command, DummyTask color_opts = {"options": {"color": {}}} multiple_opts = {"options": {"foo": {}, "bar": {}, "baz": {}}} test_tasks = { "dummy-task": {"class_path": "cumulusci.cli.tests.utils.DummyTask"}, "dummy-derived-task": { "class_path": "cumulusci.cli.tests.test_run_task.DummyDerivedTask" }, } @pytest.fixture def runtime(): runtime = CliRuntime(load_keychain=False) runtime.project_config.config["tasks"] = {**test_tasks} runtime.keychain = Mock() runtime.keychain.get_default_org.return_value = (None, None) with patch("cumulusci.cli.cci.RUNTIME", runtime): yield runtime def test_task_run(runtime): DummyTask._run_task = Mock() multi_cmd = cci.RunTaskCommand() cmd = multi_cmd.get_command(Mock, "dummy-task") run_click_command(cmd, "dummy-task", color="blue", runtime=runtime) DummyTask._run_task.assert_called_once() def test_task_run__debug_before(runtime): DummyTask._run_task = Mock() multi_cmd = cci.RunTaskCommand() set_trace = Mock(side_effect=SetTrace) with patch("pdb.set_trace", set_trace): with pytest.raises(SetTrace): cmd = multi_cmd.get_command(Mock(), "dummy-task") run_click_command( cmd, "dummy_task", color="blue", debug_before=True, debug_after=False, runtime=runtime, ) def test_task_run__debug_after(runtime): DummyTask._run_task = Mock() multi_cmd = cci.RunTaskCommand() set_trace = Mock(side_effect=SetTrace) with patch("pdb.set_trace", set_trace): with pytest.raises(SetTrace): cmd = multi_cmd.get_command(Mock(), "dummy-task") run_click_command( cmd, "dummy-task", color="blue", debug_before=False, debug_after=True, runtime=runtime, ) def test_task_run__list_commands(runtime): multi_cmd = cci.RunTaskCommand() commands = multi_cmd.list_commands(Mock()) assert commands == ["dummy-derived-task", "dummy-task"] def test_format_help(runtime): with patch("cumulusci.cli.cci.click.echo") as echo: runtime.universal_config = Mock() RunTaskCommand().format_help(Mock(), Mock()) assert 4 == echo.call_count assert 0 == len(runtime.universal_config.method_calls) def test_get_default_command_options(): opts = RunTaskCommand()._get_default_command_options(is_salesforce_task=False) assert len(opts) == 4 opts = RunTaskCommand()._get_default_command_options(is_salesforce_task=True) assert len(opts) == 5 assert any([o.name == "org" for o in opts]) def test_collect_task_options(): new_options = {"debug-before": None} old_options = (("color", "green"),) opts = RunTaskCommand()._collect_task_options( new_options, old_options, "dummy-task", color_opts["options"] ) assert opts == {"color": "green"} def test_collect_task_options__duplicate(): new_options = {"color": "aqua"} old_options = (("color", "green"),) with pytest.raises(CumulusCIUsageError): RunTaskCommand()._collect_task_options( new_options, old_options, "dummy-task", color_opts["options"] ) def test_collect_task_options__not_in_task(): new_options = {} old_options = (("color", "green"),) with pytest.raises(CumulusCIUsageError): RunTaskCommand()._collect_task_options( new_options, old_options, "dummy-task", {"not-color": {}} ) class SetTrace(Exception): pass class DummyDerivedTask(DummyTask): def _run_task(self): click.echo(f"<{self.__class__}>\n\tcolor: {self.options["color"]}")
"""Tests for the RunTaskCommand class""" from cumulusci.cli.runtime import CliRuntime from cumulusci.cli.cci import RunTaskCommand import click import pytest from unittest.mock import Mock, patch from cumulusci.cli import cci from cumulusci.core.exceptions import CumulusCIUsageError from cumulusci.cli.tests.utils import run_click_command, DummyTask color_opts = {"options": {"color": {}}} multiple_opts = {"options": {"foo": {}, "bar": {}, "baz": {}}} test_tasks = { "dummy-task": {"class_path": "cumulusci.cli.tests.utils.DummyTask"}, "dummy-derived-task": { "class_path": "cumulusci.cli.tests.test_run_task.DummyDerivedTask" }, } @pytest.fixture def runtime(): runtime = CliRuntime(load_keychain=False) runtime.project_config.config["tasks"] = {**test_tasks} runtime.keychain = Mock() runtime.keychain.get_default_org.return_value = (None, None) with patch("cumulusci.cli.cci.RUNTIME", runtime): yield runtime def test_task_run(runtime): DummyTask._run_task = Mock() multi_cmd = cci.RunTaskCommand() cmd = multi_cmd.get_command(Mock, "dummy-task") run_click_command(cmd, "dummy-task", color="blue", runtime=runtime) DummyTask._run_task.assert_called_once() def test_task_run__debug_before(runtime): DummyTask._run_task = Mock() multi_cmd = cci.RunTaskCommand() set_trace = Mock(side_effect=SetTrace) with patch("pdb.set_trace", set_trace): with pytest.raises(SetTrace): cmd = multi_cmd.get_command(Mock(), "dummy-task") run_click_command( cmd, "dummy_task", color="blue", debug_before=True, debug_after=False, runtime=runtime, ) def test_task_run__debug_after(runtime): DummyTask._run_task = Mock() multi_cmd = cci.RunTaskCommand() set_trace = Mock(side_effect=SetTrace) with patch("pdb.set_trace", set_trace): with pytest.raises(SetTrace): cmd = multi_cmd.get_command(Mock(), "dummy-task") run_click_command( cmd, "dummy-task", color="blue", debug_before=False, debug_after=True, runtime=runtime, ) def test_task_run__list_commands(runtime): multi_cmd = cci.RunTaskCommand() commands = multi_cmd.list_commands(Mock()) assert commands == ["dummy-derived-task", "dummy-task"] def test_format_help(runtime): with patch("cumulusci.cli.cci.click.echo") as echo: runtime.universal_config = Mock() RunTaskCommand().format_help(Mock(), Mock()) assert 4 == echo.call_count assert 0 == len(runtime.universal_config.method_calls) def test_get_default_command_options(): opts = RunTaskCommand()._get_default_command_options(is_salesforce_task=False) assert len(opts) == 4 opts = RunTaskCommand()._get_default_command_options(is_salesforce_task=True) assert len(opts) == 5 assert any([o.name == "org" for o in opts]) def test_collect_task_options(): new_options = {"debug-before": None} old_options = (("color", "green"),) opts = RunTaskCommand()._collect_task_options( new_options, old_options, "dummy-task", color_opts["options"] ) assert opts == {"color": "green"} def test_collect_task_options__duplicate(): new_options = {"color": "aqua"} old_options = (("color", "green"),) with pytest.raises(CumulusCIUsageError): RunTaskCommand()._collect_task_options( new_options, old_options, "dummy-task", color_opts["options"] ) def test_collect_task_options__not_in_task(): new_options = {} old_options = (("color", "green"),) with pytest.raises(CumulusCIUsageError): RunTaskCommand()._collect_task_options( new_options, old_options, "dummy-task", {"not-color": {}} ) class SetTrace(Exception): pass class DummyDerivedTask(DummyTask): def _run_task(self): click.echo(f"<{self.__class__}>\n\tcolor: {self.options['color']}")
from typing import Any, List, Dict, Tuple import numpy as np import torch import torch.nn as nn from torch.utils.data import DataLoader from torchvision import datasets, transforms import nni from ..interface import BaseTrainer from ...utils import register_trainer def get_default_transform(dataset: str) -> Any: """ To get a default transformation of image for a specific dataset. This is needed because transform objects can not be directly passed as arguments. Parameters ---------- dataset : str Dataset class name. Returns ------- transform object """ if dataset == 'MNIST': return transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) if dataset == 'CIFAR10': return transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) # unsupported dataset, return None return None @register_trainer class PyTorchImageClassificationTrainer(BaseTrainer): """ Image classification trainer for PyTorch. A model, along with corresponding dataset, optimizer config is used to initialize the trainer. The trainer will run for a fixed number of epochs (by default 10), and report the final result. TODO Support scheduler, validate every n epochs, train/valid dataset Limitation induced by NNI: kwargs must be serializable to put into a JSON packed in parameters. """ def __init__(self, model, dataset_cls='MNIST', dataset_kwargs=None, dataloader_kwargs=None, optimizer_cls='SGD', optimizer_kwargs=None, trainer_kwargs=None): """Initialization of image classification trainer. Parameters ---------- model : nn.Module Model to train. dataset_cls : str, optional Dataset class name that is available in ``torchvision.datasets``, by default 'MNIST' dataset_kwargs : dict, optional Keyword arguments passed to initialization of dataset class, by default None dataset_kwargs : dict, optional Keyword arguments passed to ``torch.utils.data.DataLoader``, by default None optimizer_cls : str, optional Optimizer class name that is available in ``torch.optim``, by default 'SGD' optimizer_kwargs : dict, optional Keyword arguments passed to initialization of optimizer class, by default None trainer_kwargs: dict, optional Keyword arguments passed to trainer. Will be passed to Trainer class in future. Currently, only the key ``max_epochs`` is useful. """ super().__init__() self._use_cuda = torch.cuda.is_available() self.model = model if self._use_cuda: self.model.cuda() self._loss_fn = nn.CrossEntropyLoss() self._train_dataset = getattr(datasets, dataset_cls)(train=True, transform=get_default_transform(dataset_cls), **(dataset_kwargs or {})) self._val_dataset = getattr(datasets, dataset_cls)(train=False, transform=get_default_transform(dataset_cls), **(dataset_kwargs or {})) self._optimizer = getattr(torch.optim, optimizer_cls)(model.parameters(), **(optimizer_kwargs or {})) self._trainer_kwargs = trainer_kwargs or {'max_epochs': 10} self._train_dataloader = DataLoader(self._train_dataset, **(dataloader_kwargs or {})) self._val_dataloader = DataLoader(self._val_dataset, **(dataloader_kwargs or {})) def _accuracy(self, input, target): # pylint: disable=redefined-builtin _, predict = torch.max(input.data, 1) correct = predict.eq(target.data).cpu().sum().item() return correct / input.size(0) def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int) -> Dict[str, Any]: x, y = self.training_step_before_model(batch, batch_idx) y_hat = self.model(x) return self.training_step_after_model(x, y, y_hat) def training_step_before_model(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int): x, y = batch if self._use_cuda: x, y = x.cuda(torch.device('cuda:0')), y.cuda(torch.device('cuda:0')) return x, y def training_step_after_model(self, x, y, y_hat): loss = self._loss_fn(y_hat, y) return loss def validation_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int) -> Dict[str, Any]: x, y = self.validation_step_before_model(batch, batch_idx) y_hat = self.model(x) return self.validation_step_after_model(x, y, y_hat) def validation_step_before_model(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int): x, y = batch if self._use_cuda: x, y = x.cuda(), y.cuda() return x, y def validation_step_after_model(self, x, y, y_hat): acc = self._accuracy(y_hat, y) return {'val_acc': acc} def validation_epoch_end(self, outputs: List[Dict[str, Any]]) -> Dict[str, Any]: # We might need dict metrics in future? avg_acc = np.mean([x['val_acc'] for x in outputs]).item() nni.report_intermediate_result(avg_acc) return {'val_acc': avg_acc} def _validate(self): validation_outputs = [] for i, batch in enumerate(self._val_dataloader): validation_outputs.append(self.validation_step(batch, i)) return self.validation_epoch_end(validation_outputs) def _train(self): for i, batch in enumerate(self._train_dataloader): self._optimizer.zero_grad() loss = self.training_step(batch, i) loss.backward() self._optimizer.step() def fit(self) -> None: for _ in range(self._trainer_kwargs['max_epochs']): self._train() self._validate() # assuming val_acc here nni.report_final_result(self._validate()['val_acc']) class PyTorchMultiModelTrainer(BaseTrainer): def __init__(self, multi_model, kwargs=[]): self.multi_model = multi_model self.kwargs = kwargs self._train_dataloaders = [] self._train_datasets = [] self._val_dataloaders = [] self._val_datasets = [] self._optimizers = [] self._trainers = [] self._loss_fn = nn.CrossEntropyLoss() self.max_steps = self.kwargs['max_steps'] if 'makx_steps' in self.kwargs else None self.n_model = len(self.kwargs['model_kwargs']) for m in self.kwargs['model_kwargs']: if m['use_input']: dataset_cls = m['dataset_cls'] dataset_kwargs = m['dataset_kwargs'] dataloader_kwargs = m['dataloader_kwargs'] train_dataset = getattr(datasets, dataset_cls)(train=True, transform=get_default_transform(dataset_cls), **(dataset_kwargs or {})) val_dataset = getattr(datasets, dataset_cls)(train=False, transform=get_default_transform(dataset_cls), **(dataset_kwargs or {})) train_dataloader = DataLoader(train_dataset, **(dataloader_kwargs or {})) val_dataloader = DataLoader(val_dataset, **(dataloader_kwargs or {})) self._train_datasets.append(train_dataset) self._train_dataloaders.append(train_dataloader) self._val_datasets.append(val_dataset) self._val_dataloaders.append(val_dataloader) if m['use_output']: optimizer_cls = m['optimizer_cls'] optimizer_kwargs = m['optimizer_kwargs'] m_header = f"M_{m["model_id"]}" one_model_params = [] for name, param in multi_model.named_parameters(): name_prefix = '_'.join(name.split('_')[:2]) if m_header == name_prefix: one_model_params.append(param) optimizer = getattr(torch.optim, optimizer_cls)(one_model_params, **(optimizer_kwargs or {})) self._optimizers.append(optimizer) def fit(self) -> None: torch.autograd.set_detect_anomaly(True) max_epochs = max([x['trainer_kwargs']['max_epochs'] for x in self.kwargs['model_kwargs']]) for _ in range(max_epochs): self._train() self._validate() nni.report_final_result(self._validate()) def _train(self): for batch_idx, multi_model_batch in enumerate(zip(*self._train_dataloaders)): for opt in self._optimizers: opt.zero_grad() xs = [] ys = [] for idx, batch in enumerate(multi_model_batch): x, y = self.training_step_before_model(batch, batch_idx, f'cuda:{idx}') xs.append(x) ys.append(y) y_hats = self.multi_model(*xs) if len(ys) != len(xs): raise ValueError('len(ys) should be equal to len(xs)') losses = [] report_loss = {} for output_idx, yhat in enumerate(y_hats): if len(ys) == len(y_hats): loss = self.training_step_after_model(xs[output_idx], ys[output_idx], yhat) elif len(ys) == 1: loss = self.training_step_after_model(xs[0], ys[0].to(yhat.get_device()), yhat) else: raise ValueError('len(ys) should be either 1 or len(y_hats)') losses.append(loss.to("cuda:0")) report_loss[self.kwargs['model_kwargs'][output_idx]['model_id']] = loss.item() summed_loss = sum(losses) summed_loss.backward() for opt in self._optimizers: opt.step() if self.max_steps and batch_idx >= self.max_steps: return def training_step_before_model(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int, device=None): x, y = batch if device: x, y = x.cuda(torch.device(device)), y.cuda(torch.device(device)) return x, y def training_step_after_model(self, x, y, y_hat): loss = self._loss_fn(y_hat, y) return loss def _validate(self): all_val_outputs = {idx: [] for idx in range(self.n_model)} for batch_idx, multi_model_batch in enumerate(zip(*self._val_dataloaders)): xs = [] ys = [] for idx, batch in enumerate(multi_model_batch): x, y = self.training_step_before_model(batch, batch_idx, f'cuda:{idx}') xs.append(x) ys.append(y) if len(ys) != len(xs): raise ValueError('len(ys) should be equal to len(xs)') y_hats = self.multi_model(*xs) for output_idx, yhat in enumerate(y_hats): if len(ys) == len(y_hats): acc = self.validation_step_after_model(xs[output_idx], ys[output_idx], yhat) elif len(ys) == 1: acc = self.validation_step_after_model(xs[0], ys[0].to(yhat.get_device()), yhat) else: raise ValueError('len(ys) should be either 1 or len(y_hats)') all_val_outputs[output_idx].append(acc) report_acc = {} for idx in all_val_outputs: avg_acc = np.mean([x['val_acc'] for x in all_val_outputs[idx]]).item() report_acc[self.kwargs['model_kwargs'][idx]['model_id']] = avg_acc nni.report_intermediate_result(report_acc) return report_acc def validation_step_before_model(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int, device=None): x, y = batch if device: x, y = x.cuda(torch.device(device)), y.cuda(torch.device(device)) return x, y def validation_step_after_model(self, x, y, y_hat): acc = self._accuracy(y_hat, y) return {'val_acc': acc} def _accuracy(self, input, target): # pylint: disable=redefined-builtin _, predict = torch.max(input.data, 1) correct = predict.eq(target.data).cpu().sum().item() return correct / input.size(0)
from typing import Any, List, Dict, Tuple import numpy as np import torch import torch.nn as nn from torch.utils.data import DataLoader from torchvision import datasets, transforms import nni from ..interface import BaseTrainer from ...utils import register_trainer def get_default_transform(dataset: str) -> Any: """ To get a default transformation of image for a specific dataset. This is needed because transform objects can not be directly passed as arguments. Parameters ---------- dataset : str Dataset class name. Returns ------- transform object """ if dataset == 'MNIST': return transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) if dataset == 'CIFAR10': return transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) # unsupported dataset, return None return None @register_trainer class PyTorchImageClassificationTrainer(BaseTrainer): """ Image classification trainer for PyTorch. A model, along with corresponding dataset, optimizer config is used to initialize the trainer. The trainer will run for a fixed number of epochs (by default 10), and report the final result. TODO Support scheduler, validate every n epochs, train/valid dataset Limitation induced by NNI: kwargs must be serializable to put into a JSON packed in parameters. """ def __init__(self, model, dataset_cls='MNIST', dataset_kwargs=None, dataloader_kwargs=None, optimizer_cls='SGD', optimizer_kwargs=None, trainer_kwargs=None): """Initialization of image classification trainer. Parameters ---------- model : nn.Module Model to train. dataset_cls : str, optional Dataset class name that is available in ``torchvision.datasets``, by default 'MNIST' dataset_kwargs : dict, optional Keyword arguments passed to initialization of dataset class, by default None dataset_kwargs : dict, optional Keyword arguments passed to ``torch.utils.data.DataLoader``, by default None optimizer_cls : str, optional Optimizer class name that is available in ``torch.optim``, by default 'SGD' optimizer_kwargs : dict, optional Keyword arguments passed to initialization of optimizer class, by default None trainer_kwargs: dict, optional Keyword arguments passed to trainer. Will be passed to Trainer class in future. Currently, only the key ``max_epochs`` is useful. """ super().__init__() self._use_cuda = torch.cuda.is_available() self.model = model if self._use_cuda: self.model.cuda() self._loss_fn = nn.CrossEntropyLoss() self._train_dataset = getattr(datasets, dataset_cls)(train=True, transform=get_default_transform(dataset_cls), **(dataset_kwargs or {})) self._val_dataset = getattr(datasets, dataset_cls)(train=False, transform=get_default_transform(dataset_cls), **(dataset_kwargs or {})) self._optimizer = getattr(torch.optim, optimizer_cls)(model.parameters(), **(optimizer_kwargs or {})) self._trainer_kwargs = trainer_kwargs or {'max_epochs': 10} self._train_dataloader = DataLoader(self._train_dataset, **(dataloader_kwargs or {})) self._val_dataloader = DataLoader(self._val_dataset, **(dataloader_kwargs or {})) def _accuracy(self, input, target): # pylint: disable=redefined-builtin _, predict = torch.max(input.data, 1) correct = predict.eq(target.data).cpu().sum().item() return correct / input.size(0) def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int) -> Dict[str, Any]: x, y = self.training_step_before_model(batch, batch_idx) y_hat = self.model(x) return self.training_step_after_model(x, y, y_hat) def training_step_before_model(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int): x, y = batch if self._use_cuda: x, y = x.cuda(torch.device('cuda:0')), y.cuda(torch.device('cuda:0')) return x, y def training_step_after_model(self, x, y, y_hat): loss = self._loss_fn(y_hat, y) return loss def validation_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int) -> Dict[str, Any]: x, y = self.validation_step_before_model(batch, batch_idx) y_hat = self.model(x) return self.validation_step_after_model(x, y, y_hat) def validation_step_before_model(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int): x, y = batch if self._use_cuda: x, y = x.cuda(), y.cuda() return x, y def validation_step_after_model(self, x, y, y_hat): acc = self._accuracy(y_hat, y) return {'val_acc': acc} def validation_epoch_end(self, outputs: List[Dict[str, Any]]) -> Dict[str, Any]: # We might need dict metrics in future? avg_acc = np.mean([x['val_acc'] for x in outputs]).item() nni.report_intermediate_result(avg_acc) return {'val_acc': avg_acc} def _validate(self): validation_outputs = [] for i, batch in enumerate(self._val_dataloader): validation_outputs.append(self.validation_step(batch, i)) return self.validation_epoch_end(validation_outputs) def _train(self): for i, batch in enumerate(self._train_dataloader): self._optimizer.zero_grad() loss = self.training_step(batch, i) loss.backward() self._optimizer.step() def fit(self) -> None: for _ in range(self._trainer_kwargs['max_epochs']): self._train() self._validate() # assuming val_acc here nni.report_final_result(self._validate()['val_acc']) class PyTorchMultiModelTrainer(BaseTrainer): def __init__(self, multi_model, kwargs=[]): self.multi_model = multi_model self.kwargs = kwargs self._train_dataloaders = [] self._train_datasets = [] self._val_dataloaders = [] self._val_datasets = [] self._optimizers = [] self._trainers = [] self._loss_fn = nn.CrossEntropyLoss() self.max_steps = self.kwargs['max_steps'] if 'makx_steps' in self.kwargs else None self.n_model = len(self.kwargs['model_kwargs']) for m in self.kwargs['model_kwargs']: if m['use_input']: dataset_cls = m['dataset_cls'] dataset_kwargs = m['dataset_kwargs'] dataloader_kwargs = m['dataloader_kwargs'] train_dataset = getattr(datasets, dataset_cls)(train=True, transform=get_default_transform(dataset_cls), **(dataset_kwargs or {})) val_dataset = getattr(datasets, dataset_cls)(train=False, transform=get_default_transform(dataset_cls), **(dataset_kwargs or {})) train_dataloader = DataLoader(train_dataset, **(dataloader_kwargs or {})) val_dataloader = DataLoader(val_dataset, **(dataloader_kwargs or {})) self._train_datasets.append(train_dataset) self._train_dataloaders.append(train_dataloader) self._val_datasets.append(val_dataset) self._val_dataloaders.append(val_dataloader) if m['use_output']: optimizer_cls = m['optimizer_cls'] optimizer_kwargs = m['optimizer_kwargs'] m_header = f"M_{m['model_id']}" one_model_params = [] for name, param in multi_model.named_parameters(): name_prefix = '_'.join(name.split('_')[:2]) if m_header == name_prefix: one_model_params.append(param) optimizer = getattr(torch.optim, optimizer_cls)(one_model_params, **(optimizer_kwargs or {})) self._optimizers.append(optimizer) def fit(self) -> None: torch.autograd.set_detect_anomaly(True) max_epochs = max([x['trainer_kwargs']['max_epochs'] for x in self.kwargs['model_kwargs']]) for _ in range(max_epochs): self._train() self._validate() nni.report_final_result(self._validate()) def _train(self): for batch_idx, multi_model_batch in enumerate(zip(*self._train_dataloaders)): for opt in self._optimizers: opt.zero_grad() xs = [] ys = [] for idx, batch in enumerate(multi_model_batch): x, y = self.training_step_before_model(batch, batch_idx, f'cuda:{idx}') xs.append(x) ys.append(y) y_hats = self.multi_model(*xs) if len(ys) != len(xs): raise ValueError('len(ys) should be equal to len(xs)') losses = [] report_loss = {} for output_idx, yhat in enumerate(y_hats): if len(ys) == len(y_hats): loss = self.training_step_after_model(xs[output_idx], ys[output_idx], yhat) elif len(ys) == 1: loss = self.training_step_after_model(xs[0], ys[0].to(yhat.get_device()), yhat) else: raise ValueError('len(ys) should be either 1 or len(y_hats)') losses.append(loss.to("cuda:0")) report_loss[self.kwargs['model_kwargs'][output_idx]['model_id']] = loss.item() summed_loss = sum(losses) summed_loss.backward() for opt in self._optimizers: opt.step() if self.max_steps and batch_idx >= self.max_steps: return def training_step_before_model(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int, device=None): x, y = batch if device: x, y = x.cuda(torch.device(device)), y.cuda(torch.device(device)) return x, y def training_step_after_model(self, x, y, y_hat): loss = self._loss_fn(y_hat, y) return loss def _validate(self): all_val_outputs = {idx: [] for idx in range(self.n_model)} for batch_idx, multi_model_batch in enumerate(zip(*self._val_dataloaders)): xs = [] ys = [] for idx, batch in enumerate(multi_model_batch): x, y = self.training_step_before_model(batch, batch_idx, f'cuda:{idx}') xs.append(x) ys.append(y) if len(ys) != len(xs): raise ValueError('len(ys) should be equal to len(xs)') y_hats = self.multi_model(*xs) for output_idx, yhat in enumerate(y_hats): if len(ys) == len(y_hats): acc = self.validation_step_after_model(xs[output_idx], ys[output_idx], yhat) elif len(ys) == 1: acc = self.validation_step_after_model(xs[0], ys[0].to(yhat.get_device()), yhat) else: raise ValueError('len(ys) should be either 1 or len(y_hats)') all_val_outputs[output_idx].append(acc) report_acc = {} for idx in all_val_outputs: avg_acc = np.mean([x['val_acc'] for x in all_val_outputs[idx]]).item() report_acc[self.kwargs['model_kwargs'][idx]['model_id']] = avg_acc nni.report_intermediate_result(report_acc) return report_acc def validation_step_before_model(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int, device=None): x, y = batch if device: x, y = x.cuda(torch.device(device)), y.cuda(torch.device(device)) return x, y def validation_step_after_model(self, x, y, y_hat): acc = self._accuracy(y_hat, y) return {'val_acc': acc} def _accuracy(self, input, target): # pylint: disable=redefined-builtin _, predict = torch.max(input.data, 1) correct = predict.eq(target.data).cpu().sum().item() return correct / input.size(0)
import asyncio from collections import defaultdict from enum import Enum from typing import Any from typing import DefaultDict from typing import Dict from typing import Iterable from typing import List from typing import Mapping from typing import MutableMapping from typing import Optional from typing import Sequence from typing import Set from typing import Tuple import a_sync import pytz from kubernetes.client import V1Container from kubernetes.client import V1ControllerRevision from kubernetes.client import V1Pod from kubernetes.client import V1Probe from kubernetes.client import V1ReplicaSet from kubernetes.client.rest import ApiException from mypy_extensions import TypedDict from paasta_tools import cassandracluster_tools from paasta_tools import envoy_tools from paasta_tools import flink_tools from paasta_tools import kafkacluster_tools from paasta_tools import kubernetes_tools from paasta_tools import marathon_tools from paasta_tools import monkrelaycluster_tools from paasta_tools import nrtsearchservice_tools from paasta_tools import smartstack_tools from paasta_tools.cli.utils import LONG_RUNNING_INSTANCE_TYPE_HANDLERS from paasta_tools.instance.hpa_metrics_parser import HPAMetricsDict from paasta_tools.instance.hpa_metrics_parser import HPAMetricsParser from paasta_tools.kubernetes_tools import get_pod_event_messages from paasta_tools.kubernetes_tools import get_tail_lines_for_kubernetes_container from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig from paasta_tools.long_running_service_tools import LongRunningServiceConfig from paasta_tools.long_running_service_tools import ServiceNamespaceConfig from paasta_tools.smartstack_tools import KubeSmartstackEnvoyReplicationChecker from paasta_tools.smartstack_tools import match_backends_and_pods from paasta_tools.utils import calculate_tail_lines INSTANCE_TYPES_CR = {"flink", "cassandracluster", "kafkacluster"} INSTANCE_TYPES_K8S = {"kubernetes", "cassandracluster"} INSTANCE_TYPES = INSTANCE_TYPES_K8S.union(INSTANCE_TYPES_CR) INSTANCE_TYPES_WITH_SET_STATE = {"flink"} INSTANCE_TYPE_CR_ID = dict( flink=flink_tools.cr_id, cassandracluster=cassandracluster_tools.cr_id, kafkacluster=kafkacluster_tools.cr_id, nrtsearchservice=nrtsearchservice_tools.cr_id, monkrelaycluster=monkrelaycluster_tools.cr_id, ) class ServiceMesh(Enum): SMARTSTACK = "smartstack" ENVOY = "envoy" class KubernetesAutoscalingStatusDict(TypedDict): min_instances: int max_instances: int metrics: List desired_replicas: int last_scale_time: str class KubernetesVersionDict(TypedDict, total=False): name: str type: str replicas: int ready_replicas: int create_timestamp: int git_sha: str config_sha: str pods: Sequence[Dict[str, Any]] def cr_id(service: str, instance: str, instance_type: str) -> Mapping[str, str]: cr_id_fn = INSTANCE_TYPE_CR_ID.get(instance_type) if not cr_id_fn: raise RuntimeError(f"Unknown instance type {instance_type}") return cr_id_fn(service, instance) def can_handle(instance_type: str) -> bool: return instance_type in INSTANCE_TYPES def can_set_state(instance_type: str) -> bool: return instance_type in INSTANCE_TYPES_WITH_SET_STATE def set_cr_desired_state( kube_client: kubernetes_tools.KubeClient, service: str, instance: str, instance_type: str, desired_state: str, ): try: kubernetes_tools.set_cr_desired_state( kube_client=kube_client, cr_id=cr_id(service, instance, instance_type), desired_state=desired_state, ) except ApiException as e: error_message = ( f"Error while setting state {desired_state} of " f"{service}.{instance}: {e}" ) raise RuntimeError(error_message) def autoscaling_status( kube_client: kubernetes_tools.KubeClient, job_config: LongRunningServiceConfig, namespace: str, ) -> KubernetesAutoscalingStatusDict: try: hpa = kube_client.autoscaling.read_namespaced_horizontal_pod_autoscaler( name=job_config.get_sanitised_deployment_name(), namespace=namespace ) except ApiException as e: if e.status == 404: return KubernetesAutoscalingStatusDict( min_instances=-1, max_instances=-1, metrics=[], desired_replicas=-1, last_scale_time="unknown (could not find HPA object)", ) else: raise # Parse metrics sources, based on # https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V2beta2ExternalMetricSource.md#v2beta2externalmetricsource parser = HPAMetricsParser(hpa) # https://github.com/python/mypy/issues/7217 metrics_by_name: DefaultDict[str, HPAMetricsDict] = defaultdict( lambda: HPAMetricsDict() ) if hpa.spec.metrics is not None: for metric_spec in hpa.spec.metrics: parsed = parser.parse_target(metric_spec) metrics_by_name[parsed["name"]].update(parsed) if hpa.status.current_metrics is not None: for metric_spec in hpa.status.current_metrics: parsed = parser.parse_current(metric_spec) if parsed is not None: metrics_by_name[parsed["name"]].update(parsed) metric_stats = list(metrics_by_name.values()) last_scale_time = ( hpa.status.last_scale_time.replace(tzinfo=pytz.UTC).isoformat() if getattr(hpa.status, "last_scale_time") else "N/A" ) return KubernetesAutoscalingStatusDict( min_instances=hpa.spec.min_replicas, max_instances=hpa.spec.max_replicas, metrics=metric_stats, desired_replicas=hpa.status.desired_replicas, last_scale_time=last_scale_time, ) async def pod_info( pod: V1Pod, client: kubernetes_tools.KubeClient, num_tail_lines: int, ): container_statuses = pod.status.container_statuses or [] try: pod_event_messages = await get_pod_event_messages(client, pod) except asyncio.TimeoutError: pod_event_messages = [{"error": "Could not fetch events for pod"}] containers = [ dict( name=container.name, tail_lines=await get_tail_lines_for_kubernetes_container( client, pod, container, num_tail_lines, ), ) for container in container_statuses ] return { "name": pod.metadata.name, "host": kubernetes_tools.get_pod_hostname(client, pod), "deployed_timestamp": pod.metadata.creation_timestamp.timestamp(), "phase": pod.status.phase, "ready": kubernetes_tools.is_pod_ready(pod), "containers": containers, "reason": pod.status.reason, "message": pod.status.message, "events": pod_event_messages, "git_sha": pod.metadata.labels.get("paasta.yelp.com/git_sha"), "config_sha": pod.metadata.labels.get("paasta.yelp.com/config_sha"), } @a_sync.to_blocking async def job_status( kstatus: MutableMapping[str, Any], client: kubernetes_tools.KubeClient, job_config: LongRunningServiceConfig, pod_list: Sequence[V1Pod], replicaset_list: Sequence[V1ReplicaSet], verbose: int, namespace: str, ) -> None: app_id = job_config.get_sanitised_deployment_name() kstatus["app_id"] = app_id kstatus["pods"] = [] kstatus["replicasets"] = [] if verbose > 0: num_tail_lines = calculate_tail_lines(verbose) kstatus["pods"] = await asyncio.gather( *[pod_info(pod, client, num_tail_lines) for pod in pod_list] ) for replicaset in replicaset_list: kstatus["replicasets"].append( { "name": replicaset.metadata.name, "replicas": replicaset.spec.replicas, "ready_replicas": ready_replicas_from_replicaset(replicaset), "create_timestamp": replicaset.metadata.creation_timestamp.timestamp(), "git_sha": replicaset.metadata.labels.get("paasta.yelp.com/git_sha"), "config_sha": replicaset.metadata.labels.get( "paasta.yelp.com/config_sha" ), } ) kstatus["expected_instance_count"] = job_config.get_instances() app = kubernetes_tools.get_kubernetes_app_by_name( name=app_id, kube_client=client, namespace=namespace ) desired_instances = ( job_config.get_instances() if job_config.get_desired_state() != "stop" else 0 ) deploy_status, message = kubernetes_tools.get_kubernetes_app_deploy_status( app=app, desired_instances=desired_instances, ) kstatus["deploy_status"] = kubernetes_tools.KubernetesDeployStatus.tostring( deploy_status ) kstatus["deploy_status_message"] = message kstatus["running_instance_count"] = ( app.status.ready_replicas if app.status.ready_replicas else 0 ) kstatus["create_timestamp"] = app.metadata.creation_timestamp.timestamp() kstatus["namespace"] = app.metadata.namespace def mesh_status( service: str, service_mesh: ServiceMesh, instance: str, job_config: LongRunningServiceConfig, service_namespace_config: ServiceNamespaceConfig, pods: Sequence[V1Pod], settings: Any, should_return_individual_backends: bool = False, ) -> Mapping[str, Any]: registration = job_config.get_registrations()[0] instance_pool = job_config.get_pool() replication_checker = KubeSmartstackEnvoyReplicationChecker( nodes=kubernetes_tools.get_all_nodes(settings.kubernetes_client), system_paasta_config=settings.system_paasta_config, ) node_hostname_by_location = replication_checker.get_allowed_locations_and_hosts( job_config ) expected_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace( service=service, namespace=job_config.get_nerve_namespace(), cluster=settings.cluster, instance_type_class=KubernetesDeploymentConfig, ) expected_count_per_location = int( expected_smartstack_count / len(node_hostname_by_location) ) mesh_status: MutableMapping[str, Any] = { "registration": registration, "expected_backends_per_location": expected_count_per_location, "locations": [], } for location, hosts in node_hostname_by_location.items(): host = replication_checker.get_first_host_in_pool(hosts, instance_pool) if service_mesh == ServiceMesh.SMARTSTACK: mesh_status["locations"].append( _build_smartstack_location_dict( synapse_host=host, synapse_port=settings.system_paasta_config.get_synapse_port(), synapse_haproxy_url_format=settings.system_paasta_config.get_synapse_haproxy_url_format(), registration=registration, pods=pods, location=location, should_return_individual_backends=should_return_individual_backends, ) ) elif service_mesh == ServiceMesh.ENVOY: mesh_status["locations"].append( _build_envoy_location_dict( envoy_host=host, envoy_admin_port=settings.system_paasta_config.get_envoy_admin_port(), envoy_admin_endpoint_format=settings.system_paasta_config.get_envoy_admin_endpoint_format(), registration=registration, pods=pods, location=location, should_return_individual_backends=should_return_individual_backends, ) ) return mesh_status def _build_envoy_location_dict( envoy_host: str, envoy_admin_port: int, envoy_admin_endpoint_format: str, registration: str, pods: Iterable[V1Pod], location: str, should_return_individual_backends: bool, ) -> MutableMapping[str, Any]: backends = envoy_tools.get_backends( registration, envoy_host=envoy_host, envoy_admin_port=envoy_admin_port, envoy_admin_endpoint_format=envoy_admin_endpoint_format, ) sorted_envoy_backends = sorted( [ backend[0] for _, service_backends in backends.items() for backend in service_backends ], key=lambda backend: backend["eds_health_status"], ) casper_proxied_backends = { (backend["address"], backend["port_value"]) for _, service_backends in backends.items() for backend, is_casper_proxied_backend in service_backends if is_casper_proxied_backend } matched_envoy_backends_and_pods = envoy_tools.match_backends_and_pods( sorted_envoy_backends, pods, ) return envoy_tools.build_envoy_location_dict( location, matched_envoy_backends_and_pods, should_return_individual_backends, casper_proxied_backends, ) def _build_smartstack_location_dict( synapse_host: str, synapse_port: int, synapse_haproxy_url_format: str, registration: str, pods: Iterable[V1Pod], location: str, should_return_individual_backends: bool, ) -> MutableMapping[str, Any]: sorted_backends = sorted( smartstack_tools.get_backends( registration, synapse_host=synapse_host, synapse_port=synapse_port, synapse_haproxy_url_format=synapse_haproxy_url_format, ), key=lambda backend: backend["status"], reverse=True, # put 'UP' backends above 'MAINT' backends ) matched_backends_and_pods = match_backends_and_pods(sorted_backends, pods) location_dict = smartstack_tools.build_smartstack_location_dict( location, matched_backends_and_pods, should_return_individual_backends ) return location_dict def cr_status( service: str, instance: str, verbose: int, instance_type: str, kube_client: Any, ) -> Mapping[str, Any]: status: MutableMapping[str, Any] = {} cr = ( kubernetes_tools.get_cr( kube_client=kube_client, cr_id=cr_id(service, instance, instance_type) ) or {} ) crstatus = cr.get("status") metadata = cr.get("metadata") if crstatus is not None: status["status"] = crstatus if metadata is not None: status["metadata"] = metadata return status def filter_actually_running_replicasets( replicaset_list: Sequence[V1ReplicaSet], ) -> List[V1ReplicaSet]: return [ rs for rs in replicaset_list if not (rs.spec.replicas == 0 and ready_replicas_from_replicaset(rs) == 0) ] def bounce_status( service: str, instance: str, settings: Any, ): status: Dict[str, Any] = {} job_config = kubernetes_tools.load_kubernetes_service_config( service=service, instance=instance, cluster=settings.cluster, soa_dir=settings.soa_dir, load_deployments=True, ) expected_instance_count = job_config.get_instances() status["expected_instance_count"] = expected_instance_count desired_state = job_config.get_desired_state() status["desired_state"] = desired_state kube_client = settings.kubernetes_client if kube_client is None: raise RuntimeError("Could not load Kubernetes client!") app = kubernetes_tools.get_kubernetes_app_by_name( name=job_config.get_sanitised_deployment_name(), kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) status["running_instance_count"] = ( app.status.ready_replicas if app.status.ready_replicas else 0 ) deploy_status, message = kubernetes_tools.get_kubernetes_app_deploy_status( app=app, desired_instances=(expected_instance_count if desired_state != "stop" else 0), ) status["deploy_status"] = kubernetes_tools.KubernetesDeployStatus.tostring( deploy_status ) if job_config.get_persistent_volumes(): version_objects = kubernetes_tools.controller_revisions_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) else: replicasets = kubernetes_tools.replicasets_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) version_objects = filter_actually_running_replicasets(replicasets) active_shas = kubernetes_tools.get_active_shas_for_service([app, *version_objects],) status["active_shas"] = list(active_shas) status["app_count"] = len(active_shas) return status def kubernetes_status_v2( service: str, instance: str, verbose: int, include_smartstack: bool, include_envoy: bool, instance_type: str, settings: Any, ): status: Dict[str, Any] = {} config_loader = LONG_RUNNING_INSTANCE_TYPE_HANDLERS[instance_type].loader job_config = config_loader( service=service, instance=instance, cluster=settings.cluster, soa_dir=settings.soa_dir, load_deployments=True, ) kube_client = settings.kubernetes_client if kube_client is None: return status if ( verbose > 1 and job_config.is_autoscaling_enabled() and job_config.get_autoscaling_params().get("decision_policy", "") != "bespoke" # type: ignore ): try: status["autoscaling_status"] = autoscaling_status( kube_client, job_config, job_config.get_kubernetes_namespace() ) except Exception as e: status[ "error_message" ] = f"Unknown error occurred while fetching autoscaling status. Please contact #compute-infra for help: {e}" desired_state = job_config.get_desired_state() status["app_name"] = job_config.get_sanitised_deployment_name() status["desired_state"] = desired_state status["desired_instances"] = ( job_config.get_instances() if desired_state != "stop" else 0 ) status["bounce_method"] = job_config.get_bounce_method() pod_list = kubernetes_tools.pods_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) service_namespace_config = kubernetes_tools.load_service_namespace_config( service=service, namespace=job_config.get_nerve_namespace(), soa_dir=settings.soa_dir, ) backends = None if "proxy_port" in service_namespace_config: envoy_status = mesh_status( service=service, service_mesh=ServiceMesh.ENVOY, instance=job_config.get_nerve_namespace(), job_config=job_config, service_namespace_config=service_namespace_config, pods=pod_list, should_return_individual_backends=True, settings=settings, ) if envoy_status.get("locations"): backends = { be["address"] for be in envoy_status["locations"][0].get("backends", []) } else: backends = set() if include_envoy: # Note we always include backends here now status["envoy"] = envoy_status update_kubernetes_status( status, kube_client, job_config, pod_list, backends, verbose, ) return status @a_sync.to_blocking async def update_kubernetes_status( status: MutableMapping[str, Any], client: kubernetes_tools.KubeClient, job_config: LongRunningServiceConfig, pod_list: List[V1Pod], backends: Optional[Set[str]], verbose: int, ): """ Updates a status object with relevant information, useful for async calls """ num_tail_lines = calculate_tail_lines(verbose) if job_config.get_persistent_volumes(): controller_revision_list = kubernetes_tools.controller_revisions_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=client, namespace=job_config.get_kubernetes_namespace(), ) status["versions"] = await get_versions_for_controller_revisions( controller_revision_list, client, pod_list, backends, num_tail_lines ) else: replicaset_list = kubernetes_tools.replicasets_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=client, namespace=job_config.get_kubernetes_namespace(), ) status["versions"] = await get_versions_for_replicasets( replicaset_list, client, pod_list, backends, num_tail_lines ) async def get_versions_for_replicasets( replicaset_list: Sequence[V1ReplicaSet], client: kubernetes_tools.KubeClient, pod_list: Sequence[V1Pod], backends: Optional[Set[str]], num_tail_lines: int, ) -> List[KubernetesVersionDict]: # For the purpose of active_shas/app_count, don't count replicasets that # are at 0/0. actually_running_replicasets = filter_actually_running_replicasets(replicaset_list) pods_by_replicaset = get_pods_by_replicaset(pod_list) versions = await asyncio.gather( *[ get_replicaset_status( replicaset, client, pods_by_replicaset.get(replicaset.metadata.name), backends, num_tail_lines, ) for replicaset in actually_running_replicasets ] ) return versions def get_pods_by_replicaset(pods: Sequence[V1Pod]) -> Dict[str, List[V1Pod]]: pods_by_replicaset: DefaultDict[str, List[V1Pod]] = defaultdict(list) for pod in pods: for owner_reference in pod.metadata.owner_references: if owner_reference.kind == "ReplicaSet": pods_by_replicaset[owner_reference.name].append(pod) return pods_by_replicaset async def get_replicaset_status( replicaset: V1ReplicaSet, client: kubernetes_tools.KubeClient, pods: Sequence[V1Pod], backends: Optional[Set[str]], num_tail_lines: int, ) -> KubernetesVersionDict: return { "name": replicaset.metadata.name, "type": "ReplicaSet", "replicas": replicaset.spec.replicas, "ready_replicas": ready_replicas_from_replicaset(replicaset), "create_timestamp": replicaset.metadata.creation_timestamp.timestamp(), "git_sha": replicaset.metadata.labels.get("paasta.yelp.com/git_sha"), "config_sha": replicaset.metadata.labels.get("paasta.yelp.com/config_sha"), "pods": await asyncio.gather( *[get_pod_status(pod, backends, client, num_tail_lines) for pod in pods] ), } async def get_pod_status( pod: V1Pod, backends: Optional[Set[str]], client: Any, num_tail_lines: int ) -> Dict[str, Any]: reason = pod.status.reason message = pod.status.message scheduled = kubernetes_tools.is_pod_scheduled(pod) ready = kubernetes_tools.is_pod_ready(pod) delete_timestamp = ( pod.metadata.deletion_timestamp.timestamp() if pod.metadata.deletion_timestamp else None ) try: # Filter events to only last 15m pod_event_messages = await get_pod_event_messages( client, pod, max_age_in_seconds=900 ) except asyncio.TimeoutError: pod_event_messages = [{"error": "Could not retrieve events. Please try again."}] # if evicted, there is no condition if not scheduled and reason != "Evicted": sched_condition = kubernetes_tools.get_pod_condition(pod, "PodScheduled") reason = sched_condition.reason message = sched_condition.message mesh_ready = None if backends is not None: # TODO: Remove this once k8s readiness reflects mesh readiness, PAASTA-17266 mesh_ready = pod.status.pod_ip in backends return { "name": pod.metadata.name, "ip": pod.status.pod_ip, "host": pod.status.host_ip, "phase": pod.status.phase, "reason": reason, "message": message, "scheduled": scheduled, "ready": ready, "mesh_ready": mesh_ready, "containers": await get_pod_containers(pod, client, num_tail_lines), "create_timestamp": pod.metadata.creation_timestamp.timestamp(), "delete_timestamp": delete_timestamp, "events": pod_event_messages, } def get_container_healthcheck(pod_ip: str, probe: V1Probe) -> Dict[str, Any]: if getattr(probe, "http_get", None): return { "http_url": f"http://{pod_ip}:{probe.http_get.port}{probe.http_get.path}" } if getattr(probe, "tcp_socket", None): return {"tcp_port": f"{probe.tcp_socket.port}"} if getattr(probe, "_exec", None): return {"cmd": f"{" ".join(probe._exec.command)}"} return {} async def get_pod_containers( pod: V1Pod, client: Any, num_tail_lines: int ) -> List[Dict[str, Any]]: containers = [] statuses = pod.status.container_statuses or [] container_specs = pod.spec.containers for cs in statuses: specs: List[V1Container] = [c for c in container_specs if c.name == cs.name] healthcheck_grace_period = 0 healthcheck = None if specs: # There should be only one matching spec spec = specs[0] if spec.liveness_probe: healthcheck_grace_period = ( spec.liveness_probe.initial_delay_seconds or 0 ) healthcheck = get_container_healthcheck( pod.status.pod_ip, spec.liveness_probe ) state_dict = cs.state.to_dict() state = None reason = None message = None start_timestamp = None for state_name, this_state in state_dict.items(): # Each container has only populated state at a time if this_state: state = state_name if "reason" in this_state: reason = this_state["reason"] if "message" in this_state: message = this_state["message"] if "started_at" in this_state: start_timestamp = this_state["started_at"].timestamp() last_state_dict = cs.last_state.to_dict() last_state = None last_reason = None last_message = None last_duration = None last_timestamp = None for state_name, this_state in last_state_dict.items(): if this_state: last_state = state_name if "reason" in this_state: last_reason = this_state["reason"] if "message" in this_state: last_message = this_state["message"] if this_state.get("started_at"): if this_state.get("finished_at"): last_duration = ( this_state["finished_at"] - this_state["started_at"] ).total_seconds() last_timestamp = this_state["started_at"].timestamp() async def get_tail_lines(): try: return await get_tail_lines_for_kubernetes_container( client, pod, cs, num_tail_lines, previous=False, ) except asyncio.TimeoutError: return {"error_message": f"Could not fetch logs for {cs.name}"} # get previous log lines as well if this container restarted recently async def get_previous_tail_lines(): nonlocal previous_tail_lines if state == "running" and kubernetes_tools.recent_container_restart( cs.restart_count, last_state, last_timestamp ): try: return await get_tail_lines_for_kubernetes_container( client, pod, cs, num_tail_lines, previous=True, ) except asyncio.TimeoutError: return { "error_message": f"Could not fetch previous logs for {cs.name}" } return None tail_lines, previous_tail_lines = await asyncio.gather( asyncio.ensure_future(get_tail_lines()), asyncio.ensure_future(get_previous_tail_lines()), ) containers.append( { "name": cs.name, "restart_count": cs.restart_count, "state": state, "reason": reason, "message": message, "last_state": last_state, "last_reason": last_reason, "last_message": last_message, "last_duration": last_duration, "last_timestamp": last_timestamp, "previous_tail_lines": previous_tail_lines, "timestamp": start_timestamp, "healthcheck_grace_period": healthcheck_grace_period, "healthcheck_cmd": healthcheck, "tail_lines": tail_lines, } ) return containers async def get_versions_for_controller_revisions( controller_revisions: Sequence[V1ControllerRevision], client: kubernetes_tools.KubeClient, pods: Sequence[V1Pod], backends: Optional[Set[str]], num_tail_lines: int, ) -> List[KubernetesVersionDict]: versions: List[KubernetesVersionDict] = [] cr_by_shas: Dict[Tuple[str, str], V1ControllerRevision] = {} for cr in controller_revisions: git_sha = cr.metadata.labels["paasta.yelp.com/git_sha"] config_sha = cr.metadata.labels["paasta.yelp.com/config_sha"] cr_by_shas[(git_sha, config_sha)] = cr pods_by_shas: DefaultDict[Tuple[str, str], List[V1Pod]] = defaultdict(list) for pod in pods: git_sha = pod.metadata.labels["paasta.yelp.com/git_sha"] config_sha = pod.metadata.labels["paasta.yelp.com/config_sha"] pods_by_shas[(git_sha, config_sha)].append(pod) versions = await asyncio.gather( *[ get_version_for_controller_revision( cr, pods_by_shas[(git_sha, config_sha)], backends, num_tail_lines, client, ) for (git_sha, config_sha), cr in cr_by_shas.items() ] ) return versions async def get_version_for_controller_revision( cr: V1ControllerRevision, pods: Sequence[V1Pod], backends: Optional[Set[str]], num_tail_lines: int, client: Any, ) -> KubernetesVersionDict: ready_pods = [pod for pod in pods if kubernetes_tools.is_pod_ready(pod)] return { "name": cr.metadata.name, "type": "ControllerRevision", "replicas": len(pods), "ready_replicas": len(ready_pods), "create_timestamp": cr.metadata.creation_timestamp.timestamp(), "git_sha": cr.metadata.labels.get("paasta.yelp.com/git_sha"), "config_sha": cr.metadata.labels.get("paasta.yelp.com/config_sha"), "pods": await asyncio.gather( *[get_pod_status(pod, backends, client, num_tail_lines) for pod in pods] ), } def kubernetes_status( service: str, instance: str, verbose: int, include_smartstack: bool, include_envoy: bool, instance_type: str, settings: Any, ) -> Mapping[str, Any]: kstatus: Dict[str, Any] = {} config_loader = LONG_RUNNING_INSTANCE_TYPE_HANDLERS[instance_type].loader job_config = config_loader( service=service, instance=instance, cluster=settings.cluster, soa_dir=settings.soa_dir, load_deployments=True, ) kube_client = settings.kubernetes_client if kube_client is None: return kstatus app = kubernetes_tools.get_kubernetes_app_by_name( name=job_config.get_sanitised_deployment_name(), kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) # bouncing status can be inferred from app_count, ref get_bouncing_status pod_list = kubernetes_tools.pods_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) replicaset_list = kubernetes_tools.replicasets_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) # For the purpose of active_shas/app_count, don't count replicasets that are at 0/0. actually_running_replicasets = filter_actually_running_replicasets(replicaset_list) active_shas = kubernetes_tools.get_active_shas_for_service( [app, *pod_list, *actually_running_replicasets] ) kstatus["app_count"] = len(active_shas) kstatus["desired_state"] = job_config.get_desired_state() kstatus["bounce_method"] = job_config.get_bounce_method() kstatus["active_shas"] = list(active_shas) job_status( kstatus=kstatus, client=kube_client, namespace=job_config.get_kubernetes_namespace(), job_config=job_config, verbose=verbose, pod_list=pod_list, replicaset_list=replicaset_list, ) if ( job_config.is_autoscaling_enabled() is True and job_config.get_autoscaling_params().get("decision_policy", "") != "bespoke" # type: ignore ): try: kstatus["autoscaling_status"] = autoscaling_status( kube_client, job_config, job_config.get_kubernetes_namespace() ) except Exception as e: kstatus[ "error_message" ] = f"Unknown error occurred while fetching autoscaling status. Please contact #compute-infra for help: {e}" evicted_count = 0 for pod in pod_list: if pod.status.reason == "Evicted": evicted_count += 1 kstatus["evicted_count"] = evicted_count if include_smartstack or include_envoy: service_namespace_config = kubernetes_tools.load_service_namespace_config( service=service, namespace=job_config.get_nerve_namespace(), soa_dir=settings.soa_dir, ) if "proxy_port" in service_namespace_config: if include_smartstack: kstatus["smartstack"] = mesh_status( service=service, service_mesh=ServiceMesh.SMARTSTACK, instance=job_config.get_nerve_namespace(), job_config=job_config, service_namespace_config=service_namespace_config, pods=pod_list, should_return_individual_backends=verbose > 0, settings=settings, ) if include_envoy: kstatus["envoy"] = mesh_status( service=service, service_mesh=ServiceMesh.ENVOY, instance=job_config.get_nerve_namespace(), job_config=job_config, service_namespace_config=service_namespace_config, pods=pod_list, should_return_individual_backends=verbose > 0, settings=settings, ) return kstatus def instance_status( service: str, instance: str, verbose: int, include_smartstack: bool, include_envoy: bool, use_new: bool, instance_type: str, settings: Any, ) -> Mapping[str, Any]: status = {} if not can_handle(instance_type): raise RuntimeError( f"Unknown instance type: {instance_type!r}, " f"can handle: {INSTANCE_TYPES}" ) if instance_type in INSTANCE_TYPES_CR: status[instance_type] = cr_status( service=service, instance=instance, instance_type=instance_type, verbose=verbose, kube_client=settings.kubernetes_client, ) if instance_type in INSTANCE_TYPES_K8S: if use_new: status["kubernetes_v2"] = kubernetes_status_v2( service=service, instance=instance, instance_type=instance_type, verbose=verbose, include_smartstack=include_smartstack, include_envoy=include_envoy, settings=settings, ) else: status["kubernetes"] = kubernetes_status( service=service, instance=instance, instance_type=instance_type, verbose=verbose, include_smartstack=include_smartstack, include_envoy=include_envoy, settings=settings, ) return status def ready_replicas_from_replicaset(replicaset: V1ReplicaSet) -> int: try: ready_replicas = replicaset.status.ready_replicas if ready_replicas is None: ready_replicas = 0 except AttributeError: ready_replicas = 0 return ready_replicas def kubernetes_mesh_status( service: str, instance: str, instance_type: str, settings: Any, include_smartstack: bool = True, include_envoy: bool = True, ) -> Mapping[str, Any]: if not include_smartstack and not include_envoy: raise RuntimeError("No mesh types specified when requesting mesh status") if instance_type not in LONG_RUNNING_INSTANCE_TYPE_HANDLERS: raise RuntimeError( f"Getting mesh status for {instance_type} instances is not supported" ) config_loader = LONG_RUNNING_INSTANCE_TYPE_HANDLERS[instance_type].loader job_config = config_loader( service=service, instance=instance, cluster=settings.cluster, soa_dir=settings.soa_dir, load_deployments=True, ) service_namespace_config = kubernetes_tools.load_service_namespace_config( service=service, namespace=job_config.get_nerve_namespace(), soa_dir=settings.soa_dir, ) if "proxy_port" not in service_namespace_config: raise RuntimeError( f"Instance '{service}.{instance}' is not configured for the mesh" ) kube_client = settings.kubernetes_client pod_list = kubernetes_tools.pods_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) kmesh: Dict[str, Any] = {} mesh_status_kwargs = dict( service=service, instance=job_config.get_nerve_namespace(), job_config=job_config, service_namespace_config=service_namespace_config, pods=pod_list, should_return_individual_backends=True, settings=settings, ) if include_smartstack: kmesh["smartstack"] = mesh_status( service_mesh=ServiceMesh.SMARTSTACK, **mesh_status_kwargs, ) if include_envoy: kmesh["envoy"] = mesh_status( service_mesh=ServiceMesh.ENVOY, **mesh_status_kwargs, ) return kmesh
import asyncio from collections import defaultdict from enum import Enum from typing import Any from typing import DefaultDict from typing import Dict from typing import Iterable from typing import List from typing import Mapping from typing import MutableMapping from typing import Optional from typing import Sequence from typing import Set from typing import Tuple import a_sync import pytz from kubernetes.client import V1Container from kubernetes.client import V1ControllerRevision from kubernetes.client import V1Pod from kubernetes.client import V1Probe from kubernetes.client import V1ReplicaSet from kubernetes.client.rest import ApiException from mypy_extensions import TypedDict from paasta_tools import cassandracluster_tools from paasta_tools import envoy_tools from paasta_tools import flink_tools from paasta_tools import kafkacluster_tools from paasta_tools import kubernetes_tools from paasta_tools import marathon_tools from paasta_tools import monkrelaycluster_tools from paasta_tools import nrtsearchservice_tools from paasta_tools import smartstack_tools from paasta_tools.cli.utils import LONG_RUNNING_INSTANCE_TYPE_HANDLERS from paasta_tools.instance.hpa_metrics_parser import HPAMetricsDict from paasta_tools.instance.hpa_metrics_parser import HPAMetricsParser from paasta_tools.kubernetes_tools import get_pod_event_messages from paasta_tools.kubernetes_tools import get_tail_lines_for_kubernetes_container from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig from paasta_tools.long_running_service_tools import LongRunningServiceConfig from paasta_tools.long_running_service_tools import ServiceNamespaceConfig from paasta_tools.smartstack_tools import KubeSmartstackEnvoyReplicationChecker from paasta_tools.smartstack_tools import match_backends_and_pods from paasta_tools.utils import calculate_tail_lines INSTANCE_TYPES_CR = {"flink", "cassandracluster", "kafkacluster"} INSTANCE_TYPES_K8S = {"kubernetes", "cassandracluster"} INSTANCE_TYPES = INSTANCE_TYPES_K8S.union(INSTANCE_TYPES_CR) INSTANCE_TYPES_WITH_SET_STATE = {"flink"} INSTANCE_TYPE_CR_ID = dict( flink=flink_tools.cr_id, cassandracluster=cassandracluster_tools.cr_id, kafkacluster=kafkacluster_tools.cr_id, nrtsearchservice=nrtsearchservice_tools.cr_id, monkrelaycluster=monkrelaycluster_tools.cr_id, ) class ServiceMesh(Enum): SMARTSTACK = "smartstack" ENVOY = "envoy" class KubernetesAutoscalingStatusDict(TypedDict): min_instances: int max_instances: int metrics: List desired_replicas: int last_scale_time: str class KubernetesVersionDict(TypedDict, total=False): name: str type: str replicas: int ready_replicas: int create_timestamp: int git_sha: str config_sha: str pods: Sequence[Dict[str, Any]] def cr_id(service: str, instance: str, instance_type: str) -> Mapping[str, str]: cr_id_fn = INSTANCE_TYPE_CR_ID.get(instance_type) if not cr_id_fn: raise RuntimeError(f"Unknown instance type {instance_type}") return cr_id_fn(service, instance) def can_handle(instance_type: str) -> bool: return instance_type in INSTANCE_TYPES def can_set_state(instance_type: str) -> bool: return instance_type in INSTANCE_TYPES_WITH_SET_STATE def set_cr_desired_state( kube_client: kubernetes_tools.KubeClient, service: str, instance: str, instance_type: str, desired_state: str, ): try: kubernetes_tools.set_cr_desired_state( kube_client=kube_client, cr_id=cr_id(service, instance, instance_type), desired_state=desired_state, ) except ApiException as e: error_message = ( f"Error while setting state {desired_state} of " f"{service}.{instance}: {e}" ) raise RuntimeError(error_message) def autoscaling_status( kube_client: kubernetes_tools.KubeClient, job_config: LongRunningServiceConfig, namespace: str, ) -> KubernetesAutoscalingStatusDict: try: hpa = kube_client.autoscaling.read_namespaced_horizontal_pod_autoscaler( name=job_config.get_sanitised_deployment_name(), namespace=namespace ) except ApiException as e: if e.status == 404: return KubernetesAutoscalingStatusDict( min_instances=-1, max_instances=-1, metrics=[], desired_replicas=-1, last_scale_time="unknown (could not find HPA object)", ) else: raise # Parse metrics sources, based on # https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V2beta2ExternalMetricSource.md#v2beta2externalmetricsource parser = HPAMetricsParser(hpa) # https://github.com/python/mypy/issues/7217 metrics_by_name: DefaultDict[str, HPAMetricsDict] = defaultdict( lambda: HPAMetricsDict() ) if hpa.spec.metrics is not None: for metric_spec in hpa.spec.metrics: parsed = parser.parse_target(metric_spec) metrics_by_name[parsed["name"]].update(parsed) if hpa.status.current_metrics is not None: for metric_spec in hpa.status.current_metrics: parsed = parser.parse_current(metric_spec) if parsed is not None: metrics_by_name[parsed["name"]].update(parsed) metric_stats = list(metrics_by_name.values()) last_scale_time = ( hpa.status.last_scale_time.replace(tzinfo=pytz.UTC).isoformat() if getattr(hpa.status, "last_scale_time") else "N/A" ) return KubernetesAutoscalingStatusDict( min_instances=hpa.spec.min_replicas, max_instances=hpa.spec.max_replicas, metrics=metric_stats, desired_replicas=hpa.status.desired_replicas, last_scale_time=last_scale_time, ) async def pod_info( pod: V1Pod, client: kubernetes_tools.KubeClient, num_tail_lines: int, ): container_statuses = pod.status.container_statuses or [] try: pod_event_messages = await get_pod_event_messages(client, pod) except asyncio.TimeoutError: pod_event_messages = [{"error": "Could not fetch events for pod"}] containers = [ dict( name=container.name, tail_lines=await get_tail_lines_for_kubernetes_container( client, pod, container, num_tail_lines, ), ) for container in container_statuses ] return { "name": pod.metadata.name, "host": kubernetes_tools.get_pod_hostname(client, pod), "deployed_timestamp": pod.metadata.creation_timestamp.timestamp(), "phase": pod.status.phase, "ready": kubernetes_tools.is_pod_ready(pod), "containers": containers, "reason": pod.status.reason, "message": pod.status.message, "events": pod_event_messages, "git_sha": pod.metadata.labels.get("paasta.yelp.com/git_sha"), "config_sha": pod.metadata.labels.get("paasta.yelp.com/config_sha"), } @a_sync.to_blocking async def job_status( kstatus: MutableMapping[str, Any], client: kubernetes_tools.KubeClient, job_config: LongRunningServiceConfig, pod_list: Sequence[V1Pod], replicaset_list: Sequence[V1ReplicaSet], verbose: int, namespace: str, ) -> None: app_id = job_config.get_sanitised_deployment_name() kstatus["app_id"] = app_id kstatus["pods"] = [] kstatus["replicasets"] = [] if verbose > 0: num_tail_lines = calculate_tail_lines(verbose) kstatus["pods"] = await asyncio.gather( *[pod_info(pod, client, num_tail_lines) for pod in pod_list] ) for replicaset in replicaset_list: kstatus["replicasets"].append( { "name": replicaset.metadata.name, "replicas": replicaset.spec.replicas, "ready_replicas": ready_replicas_from_replicaset(replicaset), "create_timestamp": replicaset.metadata.creation_timestamp.timestamp(), "git_sha": replicaset.metadata.labels.get("paasta.yelp.com/git_sha"), "config_sha": replicaset.metadata.labels.get( "paasta.yelp.com/config_sha" ), } ) kstatus["expected_instance_count"] = job_config.get_instances() app = kubernetes_tools.get_kubernetes_app_by_name( name=app_id, kube_client=client, namespace=namespace ) desired_instances = ( job_config.get_instances() if job_config.get_desired_state() != "stop" else 0 ) deploy_status, message = kubernetes_tools.get_kubernetes_app_deploy_status( app=app, desired_instances=desired_instances, ) kstatus["deploy_status"] = kubernetes_tools.KubernetesDeployStatus.tostring( deploy_status ) kstatus["deploy_status_message"] = message kstatus["running_instance_count"] = ( app.status.ready_replicas if app.status.ready_replicas else 0 ) kstatus["create_timestamp"] = app.metadata.creation_timestamp.timestamp() kstatus["namespace"] = app.metadata.namespace def mesh_status( service: str, service_mesh: ServiceMesh, instance: str, job_config: LongRunningServiceConfig, service_namespace_config: ServiceNamespaceConfig, pods: Sequence[V1Pod], settings: Any, should_return_individual_backends: bool = False, ) -> Mapping[str, Any]: registration = job_config.get_registrations()[0] instance_pool = job_config.get_pool() replication_checker = KubeSmartstackEnvoyReplicationChecker( nodes=kubernetes_tools.get_all_nodes(settings.kubernetes_client), system_paasta_config=settings.system_paasta_config, ) node_hostname_by_location = replication_checker.get_allowed_locations_and_hosts( job_config ) expected_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace( service=service, namespace=job_config.get_nerve_namespace(), cluster=settings.cluster, instance_type_class=KubernetesDeploymentConfig, ) expected_count_per_location = int( expected_smartstack_count / len(node_hostname_by_location) ) mesh_status: MutableMapping[str, Any] = { "registration": registration, "expected_backends_per_location": expected_count_per_location, "locations": [], } for location, hosts in node_hostname_by_location.items(): host = replication_checker.get_first_host_in_pool(hosts, instance_pool) if service_mesh == ServiceMesh.SMARTSTACK: mesh_status["locations"].append( _build_smartstack_location_dict( synapse_host=host, synapse_port=settings.system_paasta_config.get_synapse_port(), synapse_haproxy_url_format=settings.system_paasta_config.get_synapse_haproxy_url_format(), registration=registration, pods=pods, location=location, should_return_individual_backends=should_return_individual_backends, ) ) elif service_mesh == ServiceMesh.ENVOY: mesh_status["locations"].append( _build_envoy_location_dict( envoy_host=host, envoy_admin_port=settings.system_paasta_config.get_envoy_admin_port(), envoy_admin_endpoint_format=settings.system_paasta_config.get_envoy_admin_endpoint_format(), registration=registration, pods=pods, location=location, should_return_individual_backends=should_return_individual_backends, ) ) return mesh_status def _build_envoy_location_dict( envoy_host: str, envoy_admin_port: int, envoy_admin_endpoint_format: str, registration: str, pods: Iterable[V1Pod], location: str, should_return_individual_backends: bool, ) -> MutableMapping[str, Any]: backends = envoy_tools.get_backends( registration, envoy_host=envoy_host, envoy_admin_port=envoy_admin_port, envoy_admin_endpoint_format=envoy_admin_endpoint_format, ) sorted_envoy_backends = sorted( [ backend[0] for _, service_backends in backends.items() for backend in service_backends ], key=lambda backend: backend["eds_health_status"], ) casper_proxied_backends = { (backend["address"], backend["port_value"]) for _, service_backends in backends.items() for backend, is_casper_proxied_backend in service_backends if is_casper_proxied_backend } matched_envoy_backends_and_pods = envoy_tools.match_backends_and_pods( sorted_envoy_backends, pods, ) return envoy_tools.build_envoy_location_dict( location, matched_envoy_backends_and_pods, should_return_individual_backends, casper_proxied_backends, ) def _build_smartstack_location_dict( synapse_host: str, synapse_port: int, synapse_haproxy_url_format: str, registration: str, pods: Iterable[V1Pod], location: str, should_return_individual_backends: bool, ) -> MutableMapping[str, Any]: sorted_backends = sorted( smartstack_tools.get_backends( registration, synapse_host=synapse_host, synapse_port=synapse_port, synapse_haproxy_url_format=synapse_haproxy_url_format, ), key=lambda backend: backend["status"], reverse=True, # put 'UP' backends above 'MAINT' backends ) matched_backends_and_pods = match_backends_and_pods(sorted_backends, pods) location_dict = smartstack_tools.build_smartstack_location_dict( location, matched_backends_and_pods, should_return_individual_backends ) return location_dict def cr_status( service: str, instance: str, verbose: int, instance_type: str, kube_client: Any, ) -> Mapping[str, Any]: status: MutableMapping[str, Any] = {} cr = ( kubernetes_tools.get_cr( kube_client=kube_client, cr_id=cr_id(service, instance, instance_type) ) or {} ) crstatus = cr.get("status") metadata = cr.get("metadata") if crstatus is not None: status["status"] = crstatus if metadata is not None: status["metadata"] = metadata return status def filter_actually_running_replicasets( replicaset_list: Sequence[V1ReplicaSet], ) -> List[V1ReplicaSet]: return [ rs for rs in replicaset_list if not (rs.spec.replicas == 0 and ready_replicas_from_replicaset(rs) == 0) ] def bounce_status( service: str, instance: str, settings: Any, ): status: Dict[str, Any] = {} job_config = kubernetes_tools.load_kubernetes_service_config( service=service, instance=instance, cluster=settings.cluster, soa_dir=settings.soa_dir, load_deployments=True, ) expected_instance_count = job_config.get_instances() status["expected_instance_count"] = expected_instance_count desired_state = job_config.get_desired_state() status["desired_state"] = desired_state kube_client = settings.kubernetes_client if kube_client is None: raise RuntimeError("Could not load Kubernetes client!") app = kubernetes_tools.get_kubernetes_app_by_name( name=job_config.get_sanitised_deployment_name(), kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) status["running_instance_count"] = ( app.status.ready_replicas if app.status.ready_replicas else 0 ) deploy_status, message = kubernetes_tools.get_kubernetes_app_deploy_status( app=app, desired_instances=(expected_instance_count if desired_state != "stop" else 0), ) status["deploy_status"] = kubernetes_tools.KubernetesDeployStatus.tostring( deploy_status ) if job_config.get_persistent_volumes(): version_objects = kubernetes_tools.controller_revisions_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) else: replicasets = kubernetes_tools.replicasets_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) version_objects = filter_actually_running_replicasets(replicasets) active_shas = kubernetes_tools.get_active_shas_for_service([app, *version_objects],) status["active_shas"] = list(active_shas) status["app_count"] = len(active_shas) return status def kubernetes_status_v2( service: str, instance: str, verbose: int, include_smartstack: bool, include_envoy: bool, instance_type: str, settings: Any, ): status: Dict[str, Any] = {} config_loader = LONG_RUNNING_INSTANCE_TYPE_HANDLERS[instance_type].loader job_config = config_loader( service=service, instance=instance, cluster=settings.cluster, soa_dir=settings.soa_dir, load_deployments=True, ) kube_client = settings.kubernetes_client if kube_client is None: return status if ( verbose > 1 and job_config.is_autoscaling_enabled() and job_config.get_autoscaling_params().get("decision_policy", "") != "bespoke" # type: ignore ): try: status["autoscaling_status"] = autoscaling_status( kube_client, job_config, job_config.get_kubernetes_namespace() ) except Exception as e: status[ "error_message" ] = f"Unknown error occurred while fetching autoscaling status. Please contact #compute-infra for help: {e}" desired_state = job_config.get_desired_state() status["app_name"] = job_config.get_sanitised_deployment_name() status["desired_state"] = desired_state status["desired_instances"] = ( job_config.get_instances() if desired_state != "stop" else 0 ) status["bounce_method"] = job_config.get_bounce_method() pod_list = kubernetes_tools.pods_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) service_namespace_config = kubernetes_tools.load_service_namespace_config( service=service, namespace=job_config.get_nerve_namespace(), soa_dir=settings.soa_dir, ) backends = None if "proxy_port" in service_namespace_config: envoy_status = mesh_status( service=service, service_mesh=ServiceMesh.ENVOY, instance=job_config.get_nerve_namespace(), job_config=job_config, service_namespace_config=service_namespace_config, pods=pod_list, should_return_individual_backends=True, settings=settings, ) if envoy_status.get("locations"): backends = { be["address"] for be in envoy_status["locations"][0].get("backends", []) } else: backends = set() if include_envoy: # Note we always include backends here now status["envoy"] = envoy_status update_kubernetes_status( status, kube_client, job_config, pod_list, backends, verbose, ) return status @a_sync.to_blocking async def update_kubernetes_status( status: MutableMapping[str, Any], client: kubernetes_tools.KubeClient, job_config: LongRunningServiceConfig, pod_list: List[V1Pod], backends: Optional[Set[str]], verbose: int, ): """ Updates a status object with relevant information, useful for async calls """ num_tail_lines = calculate_tail_lines(verbose) if job_config.get_persistent_volumes(): controller_revision_list = kubernetes_tools.controller_revisions_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=client, namespace=job_config.get_kubernetes_namespace(), ) status["versions"] = await get_versions_for_controller_revisions( controller_revision_list, client, pod_list, backends, num_tail_lines ) else: replicaset_list = kubernetes_tools.replicasets_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=client, namespace=job_config.get_kubernetes_namespace(), ) status["versions"] = await get_versions_for_replicasets( replicaset_list, client, pod_list, backends, num_tail_lines ) async def get_versions_for_replicasets( replicaset_list: Sequence[V1ReplicaSet], client: kubernetes_tools.KubeClient, pod_list: Sequence[V1Pod], backends: Optional[Set[str]], num_tail_lines: int, ) -> List[KubernetesVersionDict]: # For the purpose of active_shas/app_count, don't count replicasets that # are at 0/0. actually_running_replicasets = filter_actually_running_replicasets(replicaset_list) pods_by_replicaset = get_pods_by_replicaset(pod_list) versions = await asyncio.gather( *[ get_replicaset_status( replicaset, client, pods_by_replicaset.get(replicaset.metadata.name), backends, num_tail_lines, ) for replicaset in actually_running_replicasets ] ) return versions def get_pods_by_replicaset(pods: Sequence[V1Pod]) -> Dict[str, List[V1Pod]]: pods_by_replicaset: DefaultDict[str, List[V1Pod]] = defaultdict(list) for pod in pods: for owner_reference in pod.metadata.owner_references: if owner_reference.kind == "ReplicaSet": pods_by_replicaset[owner_reference.name].append(pod) return pods_by_replicaset async def get_replicaset_status( replicaset: V1ReplicaSet, client: kubernetes_tools.KubeClient, pods: Sequence[V1Pod], backends: Optional[Set[str]], num_tail_lines: int, ) -> KubernetesVersionDict: return { "name": replicaset.metadata.name, "type": "ReplicaSet", "replicas": replicaset.spec.replicas, "ready_replicas": ready_replicas_from_replicaset(replicaset), "create_timestamp": replicaset.metadata.creation_timestamp.timestamp(), "git_sha": replicaset.metadata.labels.get("paasta.yelp.com/git_sha"), "config_sha": replicaset.metadata.labels.get("paasta.yelp.com/config_sha"), "pods": await asyncio.gather( *[get_pod_status(pod, backends, client, num_tail_lines) for pod in pods] ), } async def get_pod_status( pod: V1Pod, backends: Optional[Set[str]], client: Any, num_tail_lines: int ) -> Dict[str, Any]: reason = pod.status.reason message = pod.status.message scheduled = kubernetes_tools.is_pod_scheduled(pod) ready = kubernetes_tools.is_pod_ready(pod) delete_timestamp = ( pod.metadata.deletion_timestamp.timestamp() if pod.metadata.deletion_timestamp else None ) try: # Filter events to only last 15m pod_event_messages = await get_pod_event_messages( client, pod, max_age_in_seconds=900 ) except asyncio.TimeoutError: pod_event_messages = [{"error": "Could not retrieve events. Please try again."}] # if evicted, there is no condition if not scheduled and reason != "Evicted": sched_condition = kubernetes_tools.get_pod_condition(pod, "PodScheduled") reason = sched_condition.reason message = sched_condition.message mesh_ready = None if backends is not None: # TODO: Remove this once k8s readiness reflects mesh readiness, PAASTA-17266 mesh_ready = pod.status.pod_ip in backends return { "name": pod.metadata.name, "ip": pod.status.pod_ip, "host": pod.status.host_ip, "phase": pod.status.phase, "reason": reason, "message": message, "scheduled": scheduled, "ready": ready, "mesh_ready": mesh_ready, "containers": await get_pod_containers(pod, client, num_tail_lines), "create_timestamp": pod.metadata.creation_timestamp.timestamp(), "delete_timestamp": delete_timestamp, "events": pod_event_messages, } def get_container_healthcheck(pod_ip: str, probe: V1Probe) -> Dict[str, Any]: if getattr(probe, "http_get", None): return { "http_url": f"http://{pod_ip}:{probe.http_get.port}{probe.http_get.path}" } if getattr(probe, "tcp_socket", None): return {"tcp_port": f"{probe.tcp_socket.port}"} if getattr(probe, "_exec", None): return {"cmd": f"{' '.join(probe._exec.command)}"} return {} async def get_pod_containers( pod: V1Pod, client: Any, num_tail_lines: int ) -> List[Dict[str, Any]]: containers = [] statuses = pod.status.container_statuses or [] container_specs = pod.spec.containers for cs in statuses: specs: List[V1Container] = [c for c in container_specs if c.name == cs.name] healthcheck_grace_period = 0 healthcheck = None if specs: # There should be only one matching spec spec = specs[0] if spec.liveness_probe: healthcheck_grace_period = ( spec.liveness_probe.initial_delay_seconds or 0 ) healthcheck = get_container_healthcheck( pod.status.pod_ip, spec.liveness_probe ) state_dict = cs.state.to_dict() state = None reason = None message = None start_timestamp = None for state_name, this_state in state_dict.items(): # Each container has only populated state at a time if this_state: state = state_name if "reason" in this_state: reason = this_state["reason"] if "message" in this_state: message = this_state["message"] if "started_at" in this_state: start_timestamp = this_state["started_at"].timestamp() last_state_dict = cs.last_state.to_dict() last_state = None last_reason = None last_message = None last_duration = None last_timestamp = None for state_name, this_state in last_state_dict.items(): if this_state: last_state = state_name if "reason" in this_state: last_reason = this_state["reason"] if "message" in this_state: last_message = this_state["message"] if this_state.get("started_at"): if this_state.get("finished_at"): last_duration = ( this_state["finished_at"] - this_state["started_at"] ).total_seconds() last_timestamp = this_state["started_at"].timestamp() async def get_tail_lines(): try: return await get_tail_lines_for_kubernetes_container( client, pod, cs, num_tail_lines, previous=False, ) except asyncio.TimeoutError: return {"error_message": f"Could not fetch logs for {cs.name}"} # get previous log lines as well if this container restarted recently async def get_previous_tail_lines(): nonlocal previous_tail_lines if state == "running" and kubernetes_tools.recent_container_restart( cs.restart_count, last_state, last_timestamp ): try: return await get_tail_lines_for_kubernetes_container( client, pod, cs, num_tail_lines, previous=True, ) except asyncio.TimeoutError: return { "error_message": f"Could not fetch previous logs for {cs.name}" } return None tail_lines, previous_tail_lines = await asyncio.gather( asyncio.ensure_future(get_tail_lines()), asyncio.ensure_future(get_previous_tail_lines()), ) containers.append( { "name": cs.name, "restart_count": cs.restart_count, "state": state, "reason": reason, "message": message, "last_state": last_state, "last_reason": last_reason, "last_message": last_message, "last_duration": last_duration, "last_timestamp": last_timestamp, "previous_tail_lines": previous_tail_lines, "timestamp": start_timestamp, "healthcheck_grace_period": healthcheck_grace_period, "healthcheck_cmd": healthcheck, "tail_lines": tail_lines, } ) return containers async def get_versions_for_controller_revisions( controller_revisions: Sequence[V1ControllerRevision], client: kubernetes_tools.KubeClient, pods: Sequence[V1Pod], backends: Optional[Set[str]], num_tail_lines: int, ) -> List[KubernetesVersionDict]: versions: List[KubernetesVersionDict] = [] cr_by_shas: Dict[Tuple[str, str], V1ControllerRevision] = {} for cr in controller_revisions: git_sha = cr.metadata.labels["paasta.yelp.com/git_sha"] config_sha = cr.metadata.labels["paasta.yelp.com/config_sha"] cr_by_shas[(git_sha, config_sha)] = cr pods_by_shas: DefaultDict[Tuple[str, str], List[V1Pod]] = defaultdict(list) for pod in pods: git_sha = pod.metadata.labels["paasta.yelp.com/git_sha"] config_sha = pod.metadata.labels["paasta.yelp.com/config_sha"] pods_by_shas[(git_sha, config_sha)].append(pod) versions = await asyncio.gather( *[ get_version_for_controller_revision( cr, pods_by_shas[(git_sha, config_sha)], backends, num_tail_lines, client, ) for (git_sha, config_sha), cr in cr_by_shas.items() ] ) return versions async def get_version_for_controller_revision( cr: V1ControllerRevision, pods: Sequence[V1Pod], backends: Optional[Set[str]], num_tail_lines: int, client: Any, ) -> KubernetesVersionDict: ready_pods = [pod for pod in pods if kubernetes_tools.is_pod_ready(pod)] return { "name": cr.metadata.name, "type": "ControllerRevision", "replicas": len(pods), "ready_replicas": len(ready_pods), "create_timestamp": cr.metadata.creation_timestamp.timestamp(), "git_sha": cr.metadata.labels.get("paasta.yelp.com/git_sha"), "config_sha": cr.metadata.labels.get("paasta.yelp.com/config_sha"), "pods": await asyncio.gather( *[get_pod_status(pod, backends, client, num_tail_lines) for pod in pods] ), } def kubernetes_status( service: str, instance: str, verbose: int, include_smartstack: bool, include_envoy: bool, instance_type: str, settings: Any, ) -> Mapping[str, Any]: kstatus: Dict[str, Any] = {} config_loader = LONG_RUNNING_INSTANCE_TYPE_HANDLERS[instance_type].loader job_config = config_loader( service=service, instance=instance, cluster=settings.cluster, soa_dir=settings.soa_dir, load_deployments=True, ) kube_client = settings.kubernetes_client if kube_client is None: return kstatus app = kubernetes_tools.get_kubernetes_app_by_name( name=job_config.get_sanitised_deployment_name(), kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) # bouncing status can be inferred from app_count, ref get_bouncing_status pod_list = kubernetes_tools.pods_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) replicaset_list = kubernetes_tools.replicasets_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) # For the purpose of active_shas/app_count, don't count replicasets that are at 0/0. actually_running_replicasets = filter_actually_running_replicasets(replicaset_list) active_shas = kubernetes_tools.get_active_shas_for_service( [app, *pod_list, *actually_running_replicasets] ) kstatus["app_count"] = len(active_shas) kstatus["desired_state"] = job_config.get_desired_state() kstatus["bounce_method"] = job_config.get_bounce_method() kstatus["active_shas"] = list(active_shas) job_status( kstatus=kstatus, client=kube_client, namespace=job_config.get_kubernetes_namespace(), job_config=job_config, verbose=verbose, pod_list=pod_list, replicaset_list=replicaset_list, ) if ( job_config.is_autoscaling_enabled() is True and job_config.get_autoscaling_params().get("decision_policy", "") != "bespoke" # type: ignore ): try: kstatus["autoscaling_status"] = autoscaling_status( kube_client, job_config, job_config.get_kubernetes_namespace() ) except Exception as e: kstatus[ "error_message" ] = f"Unknown error occurred while fetching autoscaling status. Please contact #compute-infra for help: {e}" evicted_count = 0 for pod in pod_list: if pod.status.reason == "Evicted": evicted_count += 1 kstatus["evicted_count"] = evicted_count if include_smartstack or include_envoy: service_namespace_config = kubernetes_tools.load_service_namespace_config( service=service, namespace=job_config.get_nerve_namespace(), soa_dir=settings.soa_dir, ) if "proxy_port" in service_namespace_config: if include_smartstack: kstatus["smartstack"] = mesh_status( service=service, service_mesh=ServiceMesh.SMARTSTACK, instance=job_config.get_nerve_namespace(), job_config=job_config, service_namespace_config=service_namespace_config, pods=pod_list, should_return_individual_backends=verbose > 0, settings=settings, ) if include_envoy: kstatus["envoy"] = mesh_status( service=service, service_mesh=ServiceMesh.ENVOY, instance=job_config.get_nerve_namespace(), job_config=job_config, service_namespace_config=service_namespace_config, pods=pod_list, should_return_individual_backends=verbose > 0, settings=settings, ) return kstatus def instance_status( service: str, instance: str, verbose: int, include_smartstack: bool, include_envoy: bool, use_new: bool, instance_type: str, settings: Any, ) -> Mapping[str, Any]: status = {} if not can_handle(instance_type): raise RuntimeError( f"Unknown instance type: {instance_type!r}, " f"can handle: {INSTANCE_TYPES}" ) if instance_type in INSTANCE_TYPES_CR: status[instance_type] = cr_status( service=service, instance=instance, instance_type=instance_type, verbose=verbose, kube_client=settings.kubernetes_client, ) if instance_type in INSTANCE_TYPES_K8S: if use_new: status["kubernetes_v2"] = kubernetes_status_v2( service=service, instance=instance, instance_type=instance_type, verbose=verbose, include_smartstack=include_smartstack, include_envoy=include_envoy, settings=settings, ) else: status["kubernetes"] = kubernetes_status( service=service, instance=instance, instance_type=instance_type, verbose=verbose, include_smartstack=include_smartstack, include_envoy=include_envoy, settings=settings, ) return status def ready_replicas_from_replicaset(replicaset: V1ReplicaSet) -> int: try: ready_replicas = replicaset.status.ready_replicas if ready_replicas is None: ready_replicas = 0 except AttributeError: ready_replicas = 0 return ready_replicas def kubernetes_mesh_status( service: str, instance: str, instance_type: str, settings: Any, include_smartstack: bool = True, include_envoy: bool = True, ) -> Mapping[str, Any]: if not include_smartstack and not include_envoy: raise RuntimeError("No mesh types specified when requesting mesh status") if instance_type not in LONG_RUNNING_INSTANCE_TYPE_HANDLERS: raise RuntimeError( f"Getting mesh status for {instance_type} instances is not supported" ) config_loader = LONG_RUNNING_INSTANCE_TYPE_HANDLERS[instance_type].loader job_config = config_loader( service=service, instance=instance, cluster=settings.cluster, soa_dir=settings.soa_dir, load_deployments=True, ) service_namespace_config = kubernetes_tools.load_service_namespace_config( service=service, namespace=job_config.get_nerve_namespace(), soa_dir=settings.soa_dir, ) if "proxy_port" not in service_namespace_config: raise RuntimeError( f"Instance '{service}.{instance}' is not configured for the mesh" ) kube_client = settings.kubernetes_client pod_list = kubernetes_tools.pods_for_service_instance( service=job_config.service, instance=job_config.instance, kube_client=kube_client, namespace=job_config.get_kubernetes_namespace(), ) kmesh: Dict[str, Any] = {} mesh_status_kwargs = dict( service=service, instance=job_config.get_nerve_namespace(), job_config=job_config, service_namespace_config=service_namespace_config, pods=pod_list, should_return_individual_backends=True, settings=settings, ) if include_smartstack: kmesh["smartstack"] = mesh_status( service_mesh=ServiceMesh.SMARTSTACK, **mesh_status_kwargs, ) if include_envoy: kmesh["envoy"] = mesh_status( service_mesh=ServiceMesh.ENVOY, **mesh_status_kwargs, ) return kmesh
from django.conf import settings from django.contrib import messages from django.core.mail import send_mail from django.db import transaction from django.db.models import Count from django.http import HttpResponse from django.shortcuts import get_object_or_404, redirect, render from django.template.defaultfilters import pluralize from django.utils.text import slugify from django.views.generic import View from net.models import Connection from peeringdb.filters import NetworkIXLanFilterSet from peeringdb.forms import NetworkIXLanFilterForm from peeringdb.models import NetworkIXLan from peeringdb.tables import NetworkContactTable, NetworkIXLanTable from utils.forms import ConfirmationForm from utils.views import ( AddOrEditView, BulkAddFromDependencyView, BulkDeleteView, BulkEditView, DeleteView, DetailsView, ModelListView, PermissionRequiredMixin, ReturnURLMixin, ) from .filters import ( AutonomousSystemFilterSet, BGPGroupFilterSet, CommunityFilterSet, ConfigurationFilterSet, DirectPeeringSessionFilterSet, EmailFilterSet, InternetExchangeFilterSet, InternetExchangePeeringSessionFilterSet, RouterFilterSet, RoutingPolicyFilterSet, ) from .forms import ( AutonomousSystemEmailForm, AutonomousSystemFilterForm, AutonomousSystemForm, BGPGroupBulkEditForm, BGPGroupFilterForm, BGPGroupForm, CommunityBulkEditForm, CommunityFilterForm, CommunityForm, ConfigurationFilterForm, ConfigurationForm, DirectPeeringSessionBulkEditForm, DirectPeeringSessionFilterForm, DirectPeeringSessionForm, EmailFilterForm, EmailForm, InternetExchangeBulkEditForm, InternetExchangeFilterForm, InternetExchangeForm, InternetExchangePeeringDBForm, InternetExchangePeeringSessionBulkEditForm, InternetExchangePeeringSessionFilterForm, InternetExchangePeeringSessionForm, RouterBulkEditForm, RouterFilterForm, RouterForm, RoutingPolicyBulkEditForm, RoutingPolicyFilterForm, RoutingPolicyForm, ) from .models import ( AutonomousSystem, BGPGroup, BGPSession, Community, Configuration, DirectPeeringSession, Email, InternetExchange, InternetExchangePeeringSession, Router, RoutingPolicy, ) from .tables import ( AutonomousSystemTable, BGPGroupTable, CommunityTable, ConfigurationTable, DirectPeeringSessionTable, EmailTable, InternetExchangeConnectionTable, InternetExchangePeeringSessionTable, InternetExchangeTable, RouterConnectionTable, RouterTable, RoutingPolicyTable, ) class ASList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_autonomoussystem" queryset = AutonomousSystem.objects.annotate( directpeeringsession_count=Count("directpeeringsession", distinct=True), internetexchangepeeringsession_count=Count( "internetexchangepeeringsession", distinct=True ), ).order_by("affiliated", "asn") filter = AutonomousSystemFilterSet filter_form = AutonomousSystemFilterForm table = AutonomousSystemTable template = "peering/autonomoussystem/list.html" class ASAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_autonomoussystem" model = AutonomousSystem form = AutonomousSystemForm return_url = "peering:autonomoussystem_list" template = "peering/autonomoussystem/add_edit.html" class ASDetails(DetailsView): permission_required = "peering.view_autonomoussystem" queryset = AutonomousSystem.objects.all() def get_context(self, request, **kwargs): instance = get_object_or_404(self.queryset, **kwargs) try: affiliated = AutonomousSystem.objects.get( pk=request.user.preferences.get("context.as") ) except AutonomousSystem.DoesNotExist: affiliated = None shared_internet_exchanges = {} for ix in instance.get_shared_internet_exchanges(affiliated): shared_internet_exchanges[ix] = instance.get_missing_peering_sessions( affiliated, ix ) return { "instance": instance, "shared_internet_exchanges": shared_internet_exchanges, "active_tab": "main", } class ASEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_autonomoussystem" model = AutonomousSystem form = AutonomousSystemForm template = "peering/autonomoussystem/add_edit.html" class ASEmail(PermissionRequiredMixin, View): permission_required = "peering.send_email" def get(self, request, *args, **kwargs): instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) if not instance.can_receive_email: return redirect(instance.get_absolute_url()) form = AutonomousSystemEmailForm() form.fields["recipient"].choices = instance.get_contact_email_addresses() return render( request, "peering/autonomoussystem/email.html", {"instance": instance, "form": form, "active_tab": "email"}, ) def post(self, request, *args, **kwargs): instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) if not instance.can_receive_email: redirect(instance.get_absolute_url()) form = AutonomousSystemEmailForm(request.POST) form.fields["recipient"].choices = instance.get_contact_email_addresses() if form.is_valid(): sent = send_mail( form.cleaned_data["subject"], form.cleaned_data["body"], settings.SERVER_EMAIL, [form.cleaned_data["recipient"]], ) if sent == 1: messages.success(request, "Email sent.") else: messages.error(request, "Unable to send the email.") return redirect(instance.get_absolute_url()) class ASDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_autonomoussystem" model = AutonomousSystem return_url = "peering:autonomoussystem_list" class ASBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_autonomoussystem" model = AutonomousSystem filter = AutonomousSystemFilterSet table = AutonomousSystemTable class AutonomousSystemContacts(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_autonomoussystem" table = NetworkContactTable template = "peering/autonomoussystem/contacts.html" def build_queryset(self, request, kwargs): queryset = None if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) queryset = instance.peeringdb_contacts return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "contacts"} if "asn" in kwargs: extra_context.update( {"instance": get_object_or_404(AutonomousSystem, asn=kwargs["asn"])} ) return extra_context class AutonomousSystemDirectPeeringSessions(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_autonomoussystem" filter = DirectPeeringSessionFilterSet filter_form = DirectPeeringSessionFilterForm table = DirectPeeringSessionTable template = "peering/autonomoussystem/direct_peering_sessions.html" def build_queryset(self, request, kwargs): queryset = None # The queryset needs to be composed of DirectPeeringSession objects # related to the AS we are looking at. if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) queryset = instance.directpeeringsession_set.order_by( "relationship", "ip_address" ) return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "directsessions"} # Since we are in the context of an AS we need to keep the reference # for it if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) extra_context.update({"instance": instance, "asn": instance.asn}) return extra_context class AutonomousSystemInternetExchangesPeeringSessions( PermissionRequiredMixin, ModelListView ): permission_required = "peering.view_autonomoussystem" filter = InternetExchangePeeringSessionFilterSet filter_form = InternetExchangePeeringSessionFilterForm table = InternetExchangePeeringSessionTable template = "peering/autonomoussystem/internet_exchange_peering_sessions.html" hidden_filters = ["autonomous_system__id"] def build_queryset(self, request, kwargs): queryset = None # The queryset needs to be composed of InternetExchangePeeringSession objects but they # are linked to an AS. So first of all we need to retrieve the AS for # which we want to get the peering sessions. if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) queryset = instance.internetexchangepeeringsession_set.prefetch_related( "internet_exchange" ).order_by("internet_exchange", "ip_address") return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "ixsessions"} # Since we are in the context of an AS we need to keep the reference # for it if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) extra_context.update({"instance": instance, "asn": instance.asn}) return extra_context class AutonomousSystemPeers(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_autonomoussystem" table = NetworkIXLanTable template = "peering/autonomoussystem/peers.html" def build_queryset(self, request, kwargs): queryset = NetworkIXLan.objects.none() try: affiliated = AutonomousSystem.objects.get( pk=request.user.preferences.get("context.as") ) except AutonomousSystem.DoesNotExist: affiliated = None if "asn" in kwargs and affiliated: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) queryset = instance.get_missing_peering_sessions(affiliated) return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "peers"} if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) extra_context.update({"instance": instance}) return extra_context class AutonomousSystemAddFromPeeringDB( PermissionRequiredMixin, BulkAddFromDependencyView ): permission_required = "peering.add_internetexchangepeeringsession" model = InternetExchangePeeringSession dependency_model = NetworkIXLan form_model = InternetExchangePeeringSessionForm template = "peering/internetexchangepeeringsession/add_from_peeringdb.html" def process_dependency_object(self, request, dependency): try: affiliated = AutonomousSystem.objects.get( pk=request.user.preferences.get("context.as") ) except AutonomousSystem.DoesNotExist: return [] return InternetExchangePeeringSession.create_from_peeringdb( affiliated, dependency ) def sort_objects(self, object_list): objects = [] for object_couple in object_list: for o in object_couple: if o: objects.append( { "autonomous_system": o.autonomous_system, "ixp_connection": o.ixp_connection, "ip_address": o.ip_address, } ) return objects class BGPGroupList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_bgpgroup" queryset = BGPGroup.objects.annotate( directpeeringsession_count=Count("directpeeringsession") ).order_by("name", "slug") filter = BGPGroupFilterSet filter_form = BGPGroupFilterForm table = BGPGroupTable template = "peering/bgpgroup/list.html" class BGPGroupDetails(DetailsView): permission_required = "peering.view_bgpgroup" queryset = BGPGroup.objects.all() def get_context(self, request, **kwargs): return { "instance": get_object_or_404(self.queryset, **kwargs), "active_tab": "main", } class BGPGroupAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_bgpgroup" model = BGPGroup form = BGPGroupForm return_url = "peering:bgpgroup_list" template = "peering/bgpgroup/add_edit.html" class BGPGroupEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_bgpgroup" model = BGPGroup form = BGPGroupForm template = "peering/bgpgroup/add_edit.html" class BGPGroupBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_bgpgroup" queryset = BGPGroup.objects.all() filter = BGPGroupFilterSet table = BGPGroupTable form = BGPGroupBulkEditForm class BGPGroupDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_bgpgroup" model = BGPGroup return_url = "peering:bgpgroup_list" class BGPGroupBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_bgpgroup" model = BGPGroup filter = BGPGroupFilterSet table = BGPGroupTable class BGPGroupPeeringSessions(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_bgpgroup" filter = DirectPeeringSessionFilterSet filter_form = DirectPeeringSessionFilterForm table = DirectPeeringSessionTable template = "peering/bgpgroup/sessions.html" hidden_filters = ["bgp_group"] def build_queryset(self, request, kwargs): queryset = None if "slug" in kwargs: instance = get_object_or_404(BGPGroup, slug=kwargs["slug"]) queryset = instance.directpeeringsession_set.prefetch_related( "autonomous_system", "router" ).order_by("autonomous_system", "ip_address") return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "directsessions"} if "slug" in kwargs: extra_context.update( {"instance": get_object_or_404(BGPGroup, slug=kwargs["slug"])} ) return extra_context class CommunityList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_community" queryset = Community.objects.all() filter = CommunityFilterSet filter_form = CommunityFilterForm table = CommunityTable template = "peering/community/list.html" class CommunityAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_community" model = Community form = CommunityForm return_url = "peering:community_list" template = "peering/community/add_edit.html" class CommunityDetails(DetailsView): permission_required = "peering.view_community" queryset = Community.objects.all() def get_context(self, request, **kwargs): return { "instance": get_object_or_404(self.queryset, **kwargs), "active_tab": "main", } class CommunityEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_community" model = Community form = CommunityForm template = "peering/community/add_edit.html" class CommunityDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_community" model = Community return_url = "peering:community_list" class CommunityBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_community" model = Community filter = CommunityFilterSet table = CommunityTable class CommunityBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_community" queryset = Community.objects.all() filter = CommunityFilterSet table = CommunityTable form = CommunityBulkEditForm class ConfigurationList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_configuration" queryset = Configuration.objects.all() filter = ConfigurationFilterSet filter_form = ConfigurationFilterForm table = ConfigurationTable template = "peering/configuration/list.html" class ConfigurationAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_configuration" model = Configuration form = ConfigurationForm template = "peering/configuration/add_edit.html" return_url = "peering:configuration_list" class ConfigurationDetails(DetailsView): permission_required = "peering.view_configuration" queryset = Configuration.objects.all() def get_context(self, request, **kwargs): instance = get_object_or_404(self.queryset, **kwargs) return { "instance": instance, "routers": Router.objects.filter(configuration_template=instance), "active_tab": "main", } class ConfigurationEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_configuration" model = Configuration form = ConfigurationForm template = "peering/configuration/add_edit.html" class ConfigurationDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_configuration" model = Configuration return_url = "peering:configuration_list" class ConfigurationBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_configuration" model = Configuration filter = ConfigurationFilterSet table = ConfigurationTable class DirectPeeringSessionAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_directpeeringsession" model = DirectPeeringSession form = DirectPeeringSessionForm template = "peering/directpeeringsession/add_edit.html" class DirectPeeringSessionBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_directpeeringsession" model = DirectPeeringSession filter = DirectPeeringSessionFilterSet table = DirectPeeringSessionTable def filter_by_extra_context(self, queryset, request, kwargs): # If we are on an AutonomousSystem context, filter the session with # the given ASN if "asn" in request.POST: asn = request.POST.get("asn") autonomous_system = get_object_or_404(AutonomousSystem, asn=asn) return queryset.filter(autonomous_system=autonomous_system) # If we are on an Router context, filter the session with # the given Router ID if "router_id" in request.POST: router_id = int(request.POST.get("router_id")) router = get_object_or_404(Router, pk=router_id) return queryset.filter(router=router) return queryset class DirectPeeringSessionBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_directpeeringsession" queryset = DirectPeeringSession.objects.select_related("autonomous_system") parent_object = BGPSession filter = DirectPeeringSessionFilterSet table = DirectPeeringSessionTable form = DirectPeeringSessionBulkEditForm class DirectPeeringSessionDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_directpeeringsession" model = DirectPeeringSession class DirectPeeringSessionDetails(DetailsView): permission_required = "peering.view_directpeeringsession" queryset = DirectPeeringSession.objects.all() def get_context(self, request, **kwargs): return { "instance": get_object_or_404(self.queryset, **kwargs), "active_tab": "main", } class DirectPeeringSessionEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_directpeeringsession" model = DirectPeeringSession form = DirectPeeringSessionForm template = "peering/directpeeringsession/add_edit.html" class DirectPeeringSessionList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_directpeeringsession" queryset = DirectPeeringSession.objects.order_by( "local_autonomous_system", "autonomous_system", "ip_address" ) table = DirectPeeringSessionTable filter = DirectPeeringSessionFilterSet filter_form = DirectPeeringSessionFilterForm template = "peering/directpeeringsession/list.html" class EmailList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_email" queryset = Email.objects.all() filter = EmailFilterSet filter_form = EmailFilterForm table = EmailTable template = "peering/email/list.html" class EmailAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_email" model = Email form = EmailForm template = "peering/email/add_edit.html" return_url = "peering:email_list" class EmailDetails(DetailsView): permission_required = "peering.view_email" queryset = Email.objects.all() def get_context(self, request, **kwargs): return { "instance": get_object_or_404(self.queryset, **kwargs), "active_tab": "main", } class EmailEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_email" model = Email form = EmailForm template = "peering/email/add_edit.html" class EmailDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_email" model = Email return_url = "peering:email_list" class EmailBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_email" model = Email filter = EmailFilterSet table = EmailTable class InternetExchangeList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_internetexchange" queryset = InternetExchange.objects.all().order_by( "local_autonomous_system", "name", "slug" ) table = InternetExchangeTable filter = InternetExchangeFilterSet filter_form = InternetExchangeFilterForm template = "peering/internetexchange/list.html" class InternetExchangeAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_internetexchange" model = InternetExchange form = InternetExchangeForm return_url = "peering:internetexchange_list" template = "peering/internetexchange/add_edit.html" class InternetExchangePeeringDBImport(PermissionRequiredMixin, ReturnURLMixin, View): permission_required = "peering.add_internetexchange" default_return_url = "peering:internetexchange_list" def get_missing_ixps(self, request): try: affiliated = AutonomousSystem.objects.get( pk=request.user.preferences.get("context.as") ) except AutonomousSystem.DoesNotExist: messages.error( request, "Unable to import IXPs and connections without affiliated AS." ) return redirect(self.get_return_url(request)) # Get known IXPs and their connections netixlans = [ c.peeringdb_netixlan.pk for c in Connection.objects.filter(peeringdb_netixlan__isnull=False) ] ixlans = [ i.peeringdb_ixlan.pk for i in InternetExchange.objects.filter(peeringdb_ixlan__isnull=False) ] # Find missing connections missing_netixlans = NetworkIXLan.objects.filter(asn=affiliated.asn).exclude( pk__in=netixlans ) # Map missing IXPs based on missing connections missing_ixps = {} for netixlan in missing_netixlans: ixlan = missing_ixps.setdefault(netixlan.ixlan, []) ixlan.append(netixlan) return affiliated, missing_ixps @transaction.atomic def import_ixps(self, local_as, missing_ixps): """ Imports IXPs and connections in a single database transaction. """ imported_ixps, imported_connections = 0, 0 if not missing_ixps: return imported_ixps, imported_connections for ixp, connections in missing_ixps.items(): i, created = InternetExchange.objects.get_or_create( slug=slugify(f"{ixp.ix.name} {ixp.ix.pk}"), defaults={ "peeringdb_ixlan": ixp, "local_autonomous_system": local_as, "name": ixp.ix.name, }, ) for connection in connections: Connection.objects.create( peeringdb_netixlan=connection, internet_exchange_point=i, ipv4_address=connection.ipaddr4, ipv6_address=connection.ipaddr6, ) imported_connections += 1 if created: imported_ixps += 1 return imported_ixps, imported_connections def get(self, request): _, missing_ixps = self.get_missing_ixps(request) if not missing_ixps: messages.warning(request, "No IXPs nor connections to import.") return redirect(self.get_return_url(request)) return render( request, "peering/internetexchange/import.html", { "form": ConfirmationForm(initial=request.GET), "missing_ixps": missing_ixps, "return_url": self.get_return_url(request), }, ) def post(self, request): local_as, missing_ixps = self.get_missing_ixps(request) form = ConfirmationForm(request.POST) if form.is_valid(): ixp_number, connection_number = self.import_ixps(local_as, missing_ixps) if ixp_number == 0 and connection_number == 0: messages.warning(request, "No IXPs imported.") else: message = ["Imported"] if ixp_number > 0: message.append(f"{ixp_number} IXP{pluralize(ixp_number)}") if connection_number > 0: message.append( f"{connection_number} connection{pluralize(connection_number)}" ) messages.success(request, f"{" ".join(message)}.") return redirect(self.get_return_url(request)) class InternetExchangeDetails(DetailsView): permission_required = "peering.view_internetexchange" queryset = InternetExchange.objects.all() def get_context(self, request, **kwargs): instance = get_object_or_404(self.queryset, **kwargs) if not instance.linked_to_peeringdb: # Try fixing the PeeringDB record references if possible ix = instance.link_to_peeringdb() if ix: messages.info( request, "PeeringDB record for this IX was invalid, it's been fixed.", ) return {"instance": instance, "active_tab": "main"} class InternetExchangeEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_internetexchange" model = InternetExchange form = InternetExchangeForm template = "peering/internetexchange/add_edit.html" class InternetExchangeDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_internetexchange" model = InternetExchange return_url = "peering:internetexchange_list" class InternetExchangeBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_internetexchange" model = InternetExchange filter = InternetExchangeFilterSet table = InternetExchangeTable class InternetExchangeBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_internetexchange" queryset = InternetExchange.objects.all() filter = InternetExchangeFilterSet table = InternetExchangeTable form = InternetExchangeBulkEditForm class InternetExchangeConnections(PermissionRequiredMixin, ModelListView): permission_required = ("net.view_connection", "peering.view_internetexchange") table = InternetExchangeConnectionTable template = "peering/internetexchange/connections.html" def build_queryset(self, request, kwargs): return Connection.objects.filter( internet_exchange_point=get_object_or_404( InternetExchange, slug=kwargs["slug"] ) ) def extra_context(self, kwargs): return { "instance": get_object_or_404(InternetExchange, slug=kwargs["slug"]), "active_tab": "connections", } class InternetExchangePeeringSessions(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_internetexchange" filter = InternetExchangePeeringSessionFilterSet filter_form = InternetExchangePeeringSessionFilterForm table = InternetExchangePeeringSessionTable template = "peering/internetexchange/sessions.html" hidden_filters = ["internet_exchange__id"] def build_queryset(self, request, kwargs): instance = get_object_or_404(InternetExchange, slug=kwargs["slug"]) return instance.get_peering_sessions() def extra_context(self, kwargs): return { "instance": get_object_or_404(InternetExchange, slug=kwargs["slug"]), "active_tab": "sessions", } class InternetExchangePeers(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_internetexchange" filter = NetworkIXLanFilterSet filter_form = NetworkIXLanFilterForm table = NetworkIXLanTable template = "peering/internetexchange/peers.html" def build_queryset(self, request, kwargs): instance = get_object_or_404(InternetExchange, slug=kwargs["slug"]) return instance.get_available_peers() def extra_context(self, kwargs): instance = get_object_or_404(InternetExchange, slug=kwargs["slug"]) return { "active_tab": "peers", "instance": instance, "internet_exchange_id": instance.pk, } class InternetExchangePeeringSessionList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_internetexchangepeeringsession" queryset = InternetExchangePeeringSession.objects.order_by( "autonomous_system", "ip_address" ) table = InternetExchangePeeringSessionTable filter = InternetExchangePeeringSessionFilterSet filter_form = InternetExchangePeeringSessionFilterForm template = "peering/internetexchangepeeringsession/list.html" class InternetExchangePeeringSessionAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_internetexchangepeeringsession" model = InternetExchangePeeringSession form = InternetExchangePeeringSessionForm template = "peering/internetexchangepeeringsession/add_edit.html" class InternetExchangePeeringSessionBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_internetexchangepeeringsession" queryset = InternetExchangePeeringSession.objects.select_related( "autonomous_system" ) parent_object = BGPSession filter = InternetExchangePeeringSessionFilterSet table = InternetExchangePeeringSessionTable form = InternetExchangePeeringSessionBulkEditForm class InternetExchangePeeringSessionDetails(DetailsView): permission_required = "peering.view_internetexchangepeeringsession" queryset = InternetExchangePeeringSession.objects.all() def get_context(self, request, **kwargs): instance = get_object_or_404(self.queryset, **kwargs) return { "instance": instance, "is_abandoned": instance.is_abandoned(), "active_tab": "main", } class InternetExchangePeeringSessionEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_internetexchangepeeringsession" model = InternetExchangePeeringSession form = InternetExchangePeeringSessionForm template = "peering/internetexchangepeeringsession/add_edit.html" class InternetExchangePeeringSessionDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_internetexchangepeeringsession" model = InternetExchangePeeringSession class InternetExchangePeeringSessionAddFromPeeringDB( PermissionRequiredMixin, BulkAddFromDependencyView ): permission_required = "peering.add_internetexchangepeeringsession" model = InternetExchangePeeringSession dependency_model = NetworkIXLan form_model = InternetExchangePeeringSessionForm template = "peering/internetexchangepeeringsession/add_from_peeringdb.html" def process_dependency_object(self, request, dependency): try: affiliated = AutonomousSystem.objects.get( pk=request.user.preferences.get("context.as") ) except AutonomousSystem.DoesNotExist: return [] return InternetExchangePeeringSession.create_from_peeringdb( affiliated, dependency ) def sort_objects(self, object_list): objects = [] for object_couple in object_list: for o in object_couple: if o: objects.append( { "autonomous_system": o.autonomous_system, "ixp_connection": o.ixp_connection, "ip_address": o.ip_address, } ) return objects class InternetExchangePeeringSessionBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_internetexchangepeeringsession" model = InternetExchangePeeringSession filter = InternetExchangePeeringSessionFilterSet table = InternetExchangePeeringSessionTable def filter_by_extra_context(self, queryset, request, kwargs): # If we are on an Internet exchange context, filter the session with # the given IX if "internet_exchange_slug" in request.POST: internet_exchange_slug = request.POST.get("internet_exchange_slug") internet_exchange = get_object_or_404( InternetExchange, slug=internet_exchange_slug ) return queryset.filter(internet_exchange=internet_exchange) # If we are on an AutonomousSystem context, filter the session with # the given ASN if "asn" in request.POST: asn = request.POST.get("asn") autonomous_system = get_object_or_404(AutonomousSystem, asn=asn) return queryset.filter(autonomous_system=autonomous_system) # If we are on a Router context, filter the session with # the given Router ID if "router_id" in request.POST: router_id = int(request.POST.get("router_id")) router = get_object_or_404(Router, pk=router_id) return queryset.filter(internet_exchange__router=router) return queryset class RouterList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_router" queryset = ( Router.objects.annotate( connection_count=Count("connection", distinct=True), directpeeringsession_count=Count("directpeeringsession", distinct=True), ) .prefetch_related("configuration_template") .order_by("local_autonomous_system", "name") ) filter = RouterFilterSet filter_form = RouterFilterForm table = RouterTable template = "peering/router/list.html" class RouterAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_router" model = Router form = RouterForm return_url = "peering:router_list" template = "peering/router/add_edit.html" class RouterDetails(DetailsView): permission_required = "peering.view_router" queryset = Router.objects.all() def get_context(self, request, **kwargs): instance = get_object_or_404(Router, **kwargs) return { "instance": instance, "connections": Connection.objects.filter(router=instance), "active_tab": "main", } class RouterConfiguration(PermissionRequiredMixin, View): permission_required = "peering.view_router_configuration" def get(self, request, pk): return render( request, "peering/router/configuration.html", { "instance": get_object_or_404(Router, pk=pk), "active_tab": "configuration", }, ) class RouterEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_router" model = Router form = RouterForm template = "peering/router/add_edit.html" class RouterDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_router" model = Router return_url = "peering:router_list" class RouterBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_router" queryset = Router.objects.all() filter = RouterFilterSet table = RouterTable form = RouterBulkEditForm class RouterBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_router" model = Router filter = RouterFilterSet table = RouterTable class RouterConnections(PermissionRequiredMixin, ModelListView): permission_required = ("peering.view_router", "net.view_connection") table = RouterConnectionTable template = "peering/router/connections.html" def build_queryset(self, request, kwargs): instance = get_object_or_404(Router, pk=kwargs["pk"]) return Connection.objects.filter(router=instance) def extra_context(self, kwargs): return { "instance": get_object_or_404(Router, pk=kwargs["pk"]), "active_tab": "connections", } class RouterDirectPeeringSessions(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_router" filter = DirectPeeringSessionFilterSet filter_form = DirectPeeringSessionFilterForm table = DirectPeeringSessionTable template = "peering/router/direct_peering_sessions.html" def build_queryset(self, request, kwargs): queryset = None # The queryset needs to be composed of DirectPeeringSession objects # related to the AS we are looking at. if "pk" in kwargs: router = get_object_or_404(Router, pk=kwargs["pk"]) queryset = router.directpeeringsession_set.order_by( "relationship", "ip_address" ) return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "directsessions"} # Since we are in the context of a Router we need to keep the reference # for it if "pk" in kwargs: router = get_object_or_404(Router, pk=kwargs["pk"]) extra_context.update({"router": router.pk, "instance": router}) return extra_context class RouterInternetExchangesPeeringSessions(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_router" filter = InternetExchangePeeringSessionFilterSet filter_form = InternetExchangePeeringSessionFilterForm table = InternetExchangePeeringSessionTable template = "peering/router/internet_exchange_peering_sessions.html" hidden_filters = ["router__id"] def build_queryset(self, request, kwargs): queryset = None # The queryset needs to be composed of InternetExchangePeeringSession objects # but they are linked to an AS. So first of all we need to retrieve the AS for # which we want to get the peering sessions. if "pk" in kwargs: queryset = InternetExchangePeeringSession.objects.filter( internet_exchange__router__id=kwargs["pk"] ).order_by("internet_exchange", "ip_address") return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "ixsessions"} # Since we are in the context of a Router we need to keep the reference # for it if "pk" in kwargs: extra_context.update( {"instance": get_object_or_404(Router, pk=kwargs["pk"])} ) return extra_context class RoutingPolicyList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_routingpolicy" queryset = RoutingPolicy.objects.all() filter = RoutingPolicyFilterSet filter_form = RoutingPolicyFilterForm table = RoutingPolicyTable template = "peering/routingpolicy/list.html" class RoutingPolicyAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_routingpolicy" model = RoutingPolicy form = RoutingPolicyForm return_url = "peering:routingpolicy_list" template = "peering/routingpolicy/add_edit.html" class RoutingPolicyDetails(DetailsView): permission_required = "peering.view_routingpolicy" queryset = RoutingPolicy.objects.all() def get_context(self, request, **kwargs): return { "instance": get_object_or_404(RoutingPolicy, **kwargs), "active_tab": "main", } class RoutingPolicyEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_routingpolicy" model = RoutingPolicy form = RoutingPolicyForm template = "peering/routingpolicy/add_edit.html" class RoutingPolicyDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_routingpolicy" model = RoutingPolicy return_url = "peering:routingpolicy_list" class RoutingPolicyBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_routingpolicy" model = RoutingPolicy filter = RoutingPolicyFilterSet table = RoutingPolicyTable class RoutingPolicyBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_routingpolicy" queryset = RoutingPolicy.objects.all() filter = RoutingPolicyFilterSet table = RoutingPolicyTable form = RoutingPolicyBulkEditForm class ProvisioningAllAvailableIXPeers(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_internetexchange" filter = NetworkIXLanFilterSet filter_form = NetworkIXLanFilterForm table = NetworkIXLanTable template = "peering/provisioning/peers.html" def build_queryset(self, request, kwargs): queryset = None for ixp in InternetExchange.objects.all(): if queryset is None: queryset = ixp.get_available_peers() else: queryset = queryset | ixp.get_available_peers() return queryset
from django.conf import settings from django.contrib import messages from django.core.mail import send_mail from django.db import transaction from django.db.models import Count from django.http import HttpResponse from django.shortcuts import get_object_or_404, redirect, render from django.template.defaultfilters import pluralize from django.utils.text import slugify from django.views.generic import View from net.models import Connection from peeringdb.filters import NetworkIXLanFilterSet from peeringdb.forms import NetworkIXLanFilterForm from peeringdb.models import NetworkIXLan from peeringdb.tables import NetworkContactTable, NetworkIXLanTable from utils.forms import ConfirmationForm from utils.views import ( AddOrEditView, BulkAddFromDependencyView, BulkDeleteView, BulkEditView, DeleteView, DetailsView, ModelListView, PermissionRequiredMixin, ReturnURLMixin, ) from .filters import ( AutonomousSystemFilterSet, BGPGroupFilterSet, CommunityFilterSet, ConfigurationFilterSet, DirectPeeringSessionFilterSet, EmailFilterSet, InternetExchangeFilterSet, InternetExchangePeeringSessionFilterSet, RouterFilterSet, RoutingPolicyFilterSet, ) from .forms import ( AutonomousSystemEmailForm, AutonomousSystemFilterForm, AutonomousSystemForm, BGPGroupBulkEditForm, BGPGroupFilterForm, BGPGroupForm, CommunityBulkEditForm, CommunityFilterForm, CommunityForm, ConfigurationFilterForm, ConfigurationForm, DirectPeeringSessionBulkEditForm, DirectPeeringSessionFilterForm, DirectPeeringSessionForm, EmailFilterForm, EmailForm, InternetExchangeBulkEditForm, InternetExchangeFilterForm, InternetExchangeForm, InternetExchangePeeringDBForm, InternetExchangePeeringSessionBulkEditForm, InternetExchangePeeringSessionFilterForm, InternetExchangePeeringSessionForm, RouterBulkEditForm, RouterFilterForm, RouterForm, RoutingPolicyBulkEditForm, RoutingPolicyFilterForm, RoutingPolicyForm, ) from .models import ( AutonomousSystem, BGPGroup, BGPSession, Community, Configuration, DirectPeeringSession, Email, InternetExchange, InternetExchangePeeringSession, Router, RoutingPolicy, ) from .tables import ( AutonomousSystemTable, BGPGroupTable, CommunityTable, ConfigurationTable, DirectPeeringSessionTable, EmailTable, InternetExchangeConnectionTable, InternetExchangePeeringSessionTable, InternetExchangeTable, RouterConnectionTable, RouterTable, RoutingPolicyTable, ) class ASList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_autonomoussystem" queryset = AutonomousSystem.objects.annotate( directpeeringsession_count=Count("directpeeringsession", distinct=True), internetexchangepeeringsession_count=Count( "internetexchangepeeringsession", distinct=True ), ).order_by("affiliated", "asn") filter = AutonomousSystemFilterSet filter_form = AutonomousSystemFilterForm table = AutonomousSystemTable template = "peering/autonomoussystem/list.html" class ASAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_autonomoussystem" model = AutonomousSystem form = AutonomousSystemForm return_url = "peering:autonomoussystem_list" template = "peering/autonomoussystem/add_edit.html" class ASDetails(DetailsView): permission_required = "peering.view_autonomoussystem" queryset = AutonomousSystem.objects.all() def get_context(self, request, **kwargs): instance = get_object_or_404(self.queryset, **kwargs) try: affiliated = AutonomousSystem.objects.get( pk=request.user.preferences.get("context.as") ) except AutonomousSystem.DoesNotExist: affiliated = None shared_internet_exchanges = {} for ix in instance.get_shared_internet_exchanges(affiliated): shared_internet_exchanges[ix] = instance.get_missing_peering_sessions( affiliated, ix ) return { "instance": instance, "shared_internet_exchanges": shared_internet_exchanges, "active_tab": "main", } class ASEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_autonomoussystem" model = AutonomousSystem form = AutonomousSystemForm template = "peering/autonomoussystem/add_edit.html" class ASEmail(PermissionRequiredMixin, View): permission_required = "peering.send_email" def get(self, request, *args, **kwargs): instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) if not instance.can_receive_email: return redirect(instance.get_absolute_url()) form = AutonomousSystemEmailForm() form.fields["recipient"].choices = instance.get_contact_email_addresses() return render( request, "peering/autonomoussystem/email.html", {"instance": instance, "form": form, "active_tab": "email"}, ) def post(self, request, *args, **kwargs): instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) if not instance.can_receive_email: redirect(instance.get_absolute_url()) form = AutonomousSystemEmailForm(request.POST) form.fields["recipient"].choices = instance.get_contact_email_addresses() if form.is_valid(): sent = send_mail( form.cleaned_data["subject"], form.cleaned_data["body"], settings.SERVER_EMAIL, [form.cleaned_data["recipient"]], ) if sent == 1: messages.success(request, "Email sent.") else: messages.error(request, "Unable to send the email.") return redirect(instance.get_absolute_url()) class ASDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_autonomoussystem" model = AutonomousSystem return_url = "peering:autonomoussystem_list" class ASBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_autonomoussystem" model = AutonomousSystem filter = AutonomousSystemFilterSet table = AutonomousSystemTable class AutonomousSystemContacts(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_autonomoussystem" table = NetworkContactTable template = "peering/autonomoussystem/contacts.html" def build_queryset(self, request, kwargs): queryset = None if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) queryset = instance.peeringdb_contacts return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "contacts"} if "asn" in kwargs: extra_context.update( {"instance": get_object_or_404(AutonomousSystem, asn=kwargs["asn"])} ) return extra_context class AutonomousSystemDirectPeeringSessions(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_autonomoussystem" filter = DirectPeeringSessionFilterSet filter_form = DirectPeeringSessionFilterForm table = DirectPeeringSessionTable template = "peering/autonomoussystem/direct_peering_sessions.html" def build_queryset(self, request, kwargs): queryset = None # The queryset needs to be composed of DirectPeeringSession objects # related to the AS we are looking at. if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) queryset = instance.directpeeringsession_set.order_by( "relationship", "ip_address" ) return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "directsessions"} # Since we are in the context of an AS we need to keep the reference # for it if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) extra_context.update({"instance": instance, "asn": instance.asn}) return extra_context class AutonomousSystemInternetExchangesPeeringSessions( PermissionRequiredMixin, ModelListView ): permission_required = "peering.view_autonomoussystem" filter = InternetExchangePeeringSessionFilterSet filter_form = InternetExchangePeeringSessionFilterForm table = InternetExchangePeeringSessionTable template = "peering/autonomoussystem/internet_exchange_peering_sessions.html" hidden_filters = ["autonomous_system__id"] def build_queryset(self, request, kwargs): queryset = None # The queryset needs to be composed of InternetExchangePeeringSession objects but they # are linked to an AS. So first of all we need to retrieve the AS for # which we want to get the peering sessions. if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) queryset = instance.internetexchangepeeringsession_set.prefetch_related( "internet_exchange" ).order_by("internet_exchange", "ip_address") return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "ixsessions"} # Since we are in the context of an AS we need to keep the reference # for it if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) extra_context.update({"instance": instance, "asn": instance.asn}) return extra_context class AutonomousSystemPeers(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_autonomoussystem" table = NetworkIXLanTable template = "peering/autonomoussystem/peers.html" def build_queryset(self, request, kwargs): queryset = NetworkIXLan.objects.none() try: affiliated = AutonomousSystem.objects.get( pk=request.user.preferences.get("context.as") ) except AutonomousSystem.DoesNotExist: affiliated = None if "asn" in kwargs and affiliated: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) queryset = instance.get_missing_peering_sessions(affiliated) return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "peers"} if "asn" in kwargs: instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"]) extra_context.update({"instance": instance}) return extra_context class AutonomousSystemAddFromPeeringDB( PermissionRequiredMixin, BulkAddFromDependencyView ): permission_required = "peering.add_internetexchangepeeringsession" model = InternetExchangePeeringSession dependency_model = NetworkIXLan form_model = InternetExchangePeeringSessionForm template = "peering/internetexchangepeeringsession/add_from_peeringdb.html" def process_dependency_object(self, request, dependency): try: affiliated = AutonomousSystem.objects.get( pk=request.user.preferences.get("context.as") ) except AutonomousSystem.DoesNotExist: return [] return InternetExchangePeeringSession.create_from_peeringdb( affiliated, dependency ) def sort_objects(self, object_list): objects = [] for object_couple in object_list: for o in object_couple: if o: objects.append( { "autonomous_system": o.autonomous_system, "ixp_connection": o.ixp_connection, "ip_address": o.ip_address, } ) return objects class BGPGroupList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_bgpgroup" queryset = BGPGroup.objects.annotate( directpeeringsession_count=Count("directpeeringsession") ).order_by("name", "slug") filter = BGPGroupFilterSet filter_form = BGPGroupFilterForm table = BGPGroupTable template = "peering/bgpgroup/list.html" class BGPGroupDetails(DetailsView): permission_required = "peering.view_bgpgroup" queryset = BGPGroup.objects.all() def get_context(self, request, **kwargs): return { "instance": get_object_or_404(self.queryset, **kwargs), "active_tab": "main", } class BGPGroupAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_bgpgroup" model = BGPGroup form = BGPGroupForm return_url = "peering:bgpgroup_list" template = "peering/bgpgroup/add_edit.html" class BGPGroupEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_bgpgroup" model = BGPGroup form = BGPGroupForm template = "peering/bgpgroup/add_edit.html" class BGPGroupBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_bgpgroup" queryset = BGPGroup.objects.all() filter = BGPGroupFilterSet table = BGPGroupTable form = BGPGroupBulkEditForm class BGPGroupDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_bgpgroup" model = BGPGroup return_url = "peering:bgpgroup_list" class BGPGroupBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_bgpgroup" model = BGPGroup filter = BGPGroupFilterSet table = BGPGroupTable class BGPGroupPeeringSessions(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_bgpgroup" filter = DirectPeeringSessionFilterSet filter_form = DirectPeeringSessionFilterForm table = DirectPeeringSessionTable template = "peering/bgpgroup/sessions.html" hidden_filters = ["bgp_group"] def build_queryset(self, request, kwargs): queryset = None if "slug" in kwargs: instance = get_object_or_404(BGPGroup, slug=kwargs["slug"]) queryset = instance.directpeeringsession_set.prefetch_related( "autonomous_system", "router" ).order_by("autonomous_system", "ip_address") return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "directsessions"} if "slug" in kwargs: extra_context.update( {"instance": get_object_or_404(BGPGroup, slug=kwargs["slug"])} ) return extra_context class CommunityList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_community" queryset = Community.objects.all() filter = CommunityFilterSet filter_form = CommunityFilterForm table = CommunityTable template = "peering/community/list.html" class CommunityAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_community" model = Community form = CommunityForm return_url = "peering:community_list" template = "peering/community/add_edit.html" class CommunityDetails(DetailsView): permission_required = "peering.view_community" queryset = Community.objects.all() def get_context(self, request, **kwargs): return { "instance": get_object_or_404(self.queryset, **kwargs), "active_tab": "main", } class CommunityEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_community" model = Community form = CommunityForm template = "peering/community/add_edit.html" class CommunityDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_community" model = Community return_url = "peering:community_list" class CommunityBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_community" model = Community filter = CommunityFilterSet table = CommunityTable class CommunityBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_community" queryset = Community.objects.all() filter = CommunityFilterSet table = CommunityTable form = CommunityBulkEditForm class ConfigurationList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_configuration" queryset = Configuration.objects.all() filter = ConfigurationFilterSet filter_form = ConfigurationFilterForm table = ConfigurationTable template = "peering/configuration/list.html" class ConfigurationAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_configuration" model = Configuration form = ConfigurationForm template = "peering/configuration/add_edit.html" return_url = "peering:configuration_list" class ConfigurationDetails(DetailsView): permission_required = "peering.view_configuration" queryset = Configuration.objects.all() def get_context(self, request, **kwargs): instance = get_object_or_404(self.queryset, **kwargs) return { "instance": instance, "routers": Router.objects.filter(configuration_template=instance), "active_tab": "main", } class ConfigurationEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_configuration" model = Configuration form = ConfigurationForm template = "peering/configuration/add_edit.html" class ConfigurationDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_configuration" model = Configuration return_url = "peering:configuration_list" class ConfigurationBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_configuration" model = Configuration filter = ConfigurationFilterSet table = ConfigurationTable class DirectPeeringSessionAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_directpeeringsession" model = DirectPeeringSession form = DirectPeeringSessionForm template = "peering/directpeeringsession/add_edit.html" class DirectPeeringSessionBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_directpeeringsession" model = DirectPeeringSession filter = DirectPeeringSessionFilterSet table = DirectPeeringSessionTable def filter_by_extra_context(self, queryset, request, kwargs): # If we are on an AutonomousSystem context, filter the session with # the given ASN if "asn" in request.POST: asn = request.POST.get("asn") autonomous_system = get_object_or_404(AutonomousSystem, asn=asn) return queryset.filter(autonomous_system=autonomous_system) # If we are on an Router context, filter the session with # the given Router ID if "router_id" in request.POST: router_id = int(request.POST.get("router_id")) router = get_object_or_404(Router, pk=router_id) return queryset.filter(router=router) return queryset class DirectPeeringSessionBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_directpeeringsession" queryset = DirectPeeringSession.objects.select_related("autonomous_system") parent_object = BGPSession filter = DirectPeeringSessionFilterSet table = DirectPeeringSessionTable form = DirectPeeringSessionBulkEditForm class DirectPeeringSessionDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_directpeeringsession" model = DirectPeeringSession class DirectPeeringSessionDetails(DetailsView): permission_required = "peering.view_directpeeringsession" queryset = DirectPeeringSession.objects.all() def get_context(self, request, **kwargs): return { "instance": get_object_or_404(self.queryset, **kwargs), "active_tab": "main", } class DirectPeeringSessionEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_directpeeringsession" model = DirectPeeringSession form = DirectPeeringSessionForm template = "peering/directpeeringsession/add_edit.html" class DirectPeeringSessionList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_directpeeringsession" queryset = DirectPeeringSession.objects.order_by( "local_autonomous_system", "autonomous_system", "ip_address" ) table = DirectPeeringSessionTable filter = DirectPeeringSessionFilterSet filter_form = DirectPeeringSessionFilterForm template = "peering/directpeeringsession/list.html" class EmailList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_email" queryset = Email.objects.all() filter = EmailFilterSet filter_form = EmailFilterForm table = EmailTable template = "peering/email/list.html" class EmailAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_email" model = Email form = EmailForm template = "peering/email/add_edit.html" return_url = "peering:email_list" class EmailDetails(DetailsView): permission_required = "peering.view_email" queryset = Email.objects.all() def get_context(self, request, **kwargs): return { "instance": get_object_or_404(self.queryset, **kwargs), "active_tab": "main", } class EmailEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_email" model = Email form = EmailForm template = "peering/email/add_edit.html" class EmailDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_email" model = Email return_url = "peering:email_list" class EmailBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_email" model = Email filter = EmailFilterSet table = EmailTable class InternetExchangeList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_internetexchange" queryset = InternetExchange.objects.all().order_by( "local_autonomous_system", "name", "slug" ) table = InternetExchangeTable filter = InternetExchangeFilterSet filter_form = InternetExchangeFilterForm template = "peering/internetexchange/list.html" class InternetExchangeAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_internetexchange" model = InternetExchange form = InternetExchangeForm return_url = "peering:internetexchange_list" template = "peering/internetexchange/add_edit.html" class InternetExchangePeeringDBImport(PermissionRequiredMixin, ReturnURLMixin, View): permission_required = "peering.add_internetexchange" default_return_url = "peering:internetexchange_list" def get_missing_ixps(self, request): try: affiliated = AutonomousSystem.objects.get( pk=request.user.preferences.get("context.as") ) except AutonomousSystem.DoesNotExist: messages.error( request, "Unable to import IXPs and connections without affiliated AS." ) return redirect(self.get_return_url(request)) # Get known IXPs and their connections netixlans = [ c.peeringdb_netixlan.pk for c in Connection.objects.filter(peeringdb_netixlan__isnull=False) ] ixlans = [ i.peeringdb_ixlan.pk for i in InternetExchange.objects.filter(peeringdb_ixlan__isnull=False) ] # Find missing connections missing_netixlans = NetworkIXLan.objects.filter(asn=affiliated.asn).exclude( pk__in=netixlans ) # Map missing IXPs based on missing connections missing_ixps = {} for netixlan in missing_netixlans: ixlan = missing_ixps.setdefault(netixlan.ixlan, []) ixlan.append(netixlan) return affiliated, missing_ixps @transaction.atomic def import_ixps(self, local_as, missing_ixps): """ Imports IXPs and connections in a single database transaction. """ imported_ixps, imported_connections = 0, 0 if not missing_ixps: return imported_ixps, imported_connections for ixp, connections in missing_ixps.items(): i, created = InternetExchange.objects.get_or_create( slug=slugify(f"{ixp.ix.name} {ixp.ix.pk}"), defaults={ "peeringdb_ixlan": ixp, "local_autonomous_system": local_as, "name": ixp.ix.name, }, ) for connection in connections: Connection.objects.create( peeringdb_netixlan=connection, internet_exchange_point=i, ipv4_address=connection.ipaddr4, ipv6_address=connection.ipaddr6, ) imported_connections += 1 if created: imported_ixps += 1 return imported_ixps, imported_connections def get(self, request): _, missing_ixps = self.get_missing_ixps(request) if not missing_ixps: messages.warning(request, "No IXPs nor connections to import.") return redirect(self.get_return_url(request)) return render( request, "peering/internetexchange/import.html", { "form": ConfirmationForm(initial=request.GET), "missing_ixps": missing_ixps, "return_url": self.get_return_url(request), }, ) def post(self, request): local_as, missing_ixps = self.get_missing_ixps(request) form = ConfirmationForm(request.POST) if form.is_valid(): ixp_number, connection_number = self.import_ixps(local_as, missing_ixps) if ixp_number == 0 and connection_number == 0: messages.warning(request, "No IXPs imported.") else: message = ["Imported"] if ixp_number > 0: message.append(f"{ixp_number} IXP{pluralize(ixp_number)}") if connection_number > 0: message.append( f"{connection_number} connection{pluralize(connection_number)}" ) messages.success(request, f"{' '.join(message)}.") return redirect(self.get_return_url(request)) class InternetExchangeDetails(DetailsView): permission_required = "peering.view_internetexchange" queryset = InternetExchange.objects.all() def get_context(self, request, **kwargs): instance = get_object_or_404(self.queryset, **kwargs) if not instance.linked_to_peeringdb: # Try fixing the PeeringDB record references if possible ix = instance.link_to_peeringdb() if ix: messages.info( request, "PeeringDB record for this IX was invalid, it's been fixed.", ) return {"instance": instance, "active_tab": "main"} class InternetExchangeEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_internetexchange" model = InternetExchange form = InternetExchangeForm template = "peering/internetexchange/add_edit.html" class InternetExchangeDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_internetexchange" model = InternetExchange return_url = "peering:internetexchange_list" class InternetExchangeBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_internetexchange" model = InternetExchange filter = InternetExchangeFilterSet table = InternetExchangeTable class InternetExchangeBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_internetexchange" queryset = InternetExchange.objects.all() filter = InternetExchangeFilterSet table = InternetExchangeTable form = InternetExchangeBulkEditForm class InternetExchangeConnections(PermissionRequiredMixin, ModelListView): permission_required = ("net.view_connection", "peering.view_internetexchange") table = InternetExchangeConnectionTable template = "peering/internetexchange/connections.html" def build_queryset(self, request, kwargs): return Connection.objects.filter( internet_exchange_point=get_object_or_404( InternetExchange, slug=kwargs["slug"] ) ) def extra_context(self, kwargs): return { "instance": get_object_or_404(InternetExchange, slug=kwargs["slug"]), "active_tab": "connections", } class InternetExchangePeeringSessions(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_internetexchange" filter = InternetExchangePeeringSessionFilterSet filter_form = InternetExchangePeeringSessionFilterForm table = InternetExchangePeeringSessionTable template = "peering/internetexchange/sessions.html" hidden_filters = ["internet_exchange__id"] def build_queryset(self, request, kwargs): instance = get_object_or_404(InternetExchange, slug=kwargs["slug"]) return instance.get_peering_sessions() def extra_context(self, kwargs): return { "instance": get_object_or_404(InternetExchange, slug=kwargs["slug"]), "active_tab": "sessions", } class InternetExchangePeers(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_internetexchange" filter = NetworkIXLanFilterSet filter_form = NetworkIXLanFilterForm table = NetworkIXLanTable template = "peering/internetexchange/peers.html" def build_queryset(self, request, kwargs): instance = get_object_or_404(InternetExchange, slug=kwargs["slug"]) return instance.get_available_peers() def extra_context(self, kwargs): instance = get_object_or_404(InternetExchange, slug=kwargs["slug"]) return { "active_tab": "peers", "instance": instance, "internet_exchange_id": instance.pk, } class InternetExchangePeeringSessionList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_internetexchangepeeringsession" queryset = InternetExchangePeeringSession.objects.order_by( "autonomous_system", "ip_address" ) table = InternetExchangePeeringSessionTable filter = InternetExchangePeeringSessionFilterSet filter_form = InternetExchangePeeringSessionFilterForm template = "peering/internetexchangepeeringsession/list.html" class InternetExchangePeeringSessionAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_internetexchangepeeringsession" model = InternetExchangePeeringSession form = InternetExchangePeeringSessionForm template = "peering/internetexchangepeeringsession/add_edit.html" class InternetExchangePeeringSessionBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_internetexchangepeeringsession" queryset = InternetExchangePeeringSession.objects.select_related( "autonomous_system" ) parent_object = BGPSession filter = InternetExchangePeeringSessionFilterSet table = InternetExchangePeeringSessionTable form = InternetExchangePeeringSessionBulkEditForm class InternetExchangePeeringSessionDetails(DetailsView): permission_required = "peering.view_internetexchangepeeringsession" queryset = InternetExchangePeeringSession.objects.all() def get_context(self, request, **kwargs): instance = get_object_or_404(self.queryset, **kwargs) return { "instance": instance, "is_abandoned": instance.is_abandoned(), "active_tab": "main", } class InternetExchangePeeringSessionEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_internetexchangepeeringsession" model = InternetExchangePeeringSession form = InternetExchangePeeringSessionForm template = "peering/internetexchangepeeringsession/add_edit.html" class InternetExchangePeeringSessionDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_internetexchangepeeringsession" model = InternetExchangePeeringSession class InternetExchangePeeringSessionAddFromPeeringDB( PermissionRequiredMixin, BulkAddFromDependencyView ): permission_required = "peering.add_internetexchangepeeringsession" model = InternetExchangePeeringSession dependency_model = NetworkIXLan form_model = InternetExchangePeeringSessionForm template = "peering/internetexchangepeeringsession/add_from_peeringdb.html" def process_dependency_object(self, request, dependency): try: affiliated = AutonomousSystem.objects.get( pk=request.user.preferences.get("context.as") ) except AutonomousSystem.DoesNotExist: return [] return InternetExchangePeeringSession.create_from_peeringdb( affiliated, dependency ) def sort_objects(self, object_list): objects = [] for object_couple in object_list: for o in object_couple: if o: objects.append( { "autonomous_system": o.autonomous_system, "ixp_connection": o.ixp_connection, "ip_address": o.ip_address, } ) return objects class InternetExchangePeeringSessionBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_internetexchangepeeringsession" model = InternetExchangePeeringSession filter = InternetExchangePeeringSessionFilterSet table = InternetExchangePeeringSessionTable def filter_by_extra_context(self, queryset, request, kwargs): # If we are on an Internet exchange context, filter the session with # the given IX if "internet_exchange_slug" in request.POST: internet_exchange_slug = request.POST.get("internet_exchange_slug") internet_exchange = get_object_or_404( InternetExchange, slug=internet_exchange_slug ) return queryset.filter(internet_exchange=internet_exchange) # If we are on an AutonomousSystem context, filter the session with # the given ASN if "asn" in request.POST: asn = request.POST.get("asn") autonomous_system = get_object_or_404(AutonomousSystem, asn=asn) return queryset.filter(autonomous_system=autonomous_system) # If we are on a Router context, filter the session with # the given Router ID if "router_id" in request.POST: router_id = int(request.POST.get("router_id")) router = get_object_or_404(Router, pk=router_id) return queryset.filter(internet_exchange__router=router) return queryset class RouterList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_router" queryset = ( Router.objects.annotate( connection_count=Count("connection", distinct=True), directpeeringsession_count=Count("directpeeringsession", distinct=True), ) .prefetch_related("configuration_template") .order_by("local_autonomous_system", "name") ) filter = RouterFilterSet filter_form = RouterFilterForm table = RouterTable template = "peering/router/list.html" class RouterAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_router" model = Router form = RouterForm return_url = "peering:router_list" template = "peering/router/add_edit.html" class RouterDetails(DetailsView): permission_required = "peering.view_router" queryset = Router.objects.all() def get_context(self, request, **kwargs): instance = get_object_or_404(Router, **kwargs) return { "instance": instance, "connections": Connection.objects.filter(router=instance), "active_tab": "main", } class RouterConfiguration(PermissionRequiredMixin, View): permission_required = "peering.view_router_configuration" def get(self, request, pk): return render( request, "peering/router/configuration.html", { "instance": get_object_or_404(Router, pk=pk), "active_tab": "configuration", }, ) class RouterEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_router" model = Router form = RouterForm template = "peering/router/add_edit.html" class RouterDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_router" model = Router return_url = "peering:router_list" class RouterBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_router" queryset = Router.objects.all() filter = RouterFilterSet table = RouterTable form = RouterBulkEditForm class RouterBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_router" model = Router filter = RouterFilterSet table = RouterTable class RouterConnections(PermissionRequiredMixin, ModelListView): permission_required = ("peering.view_router", "net.view_connection") table = RouterConnectionTable template = "peering/router/connections.html" def build_queryset(self, request, kwargs): instance = get_object_or_404(Router, pk=kwargs["pk"]) return Connection.objects.filter(router=instance) def extra_context(self, kwargs): return { "instance": get_object_or_404(Router, pk=kwargs["pk"]), "active_tab": "connections", } class RouterDirectPeeringSessions(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_router" filter = DirectPeeringSessionFilterSet filter_form = DirectPeeringSessionFilterForm table = DirectPeeringSessionTable template = "peering/router/direct_peering_sessions.html" def build_queryset(self, request, kwargs): queryset = None # The queryset needs to be composed of DirectPeeringSession objects # related to the AS we are looking at. if "pk" in kwargs: router = get_object_or_404(Router, pk=kwargs["pk"]) queryset = router.directpeeringsession_set.order_by( "relationship", "ip_address" ) return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "directsessions"} # Since we are in the context of a Router we need to keep the reference # for it if "pk" in kwargs: router = get_object_or_404(Router, pk=kwargs["pk"]) extra_context.update({"router": router.pk, "instance": router}) return extra_context class RouterInternetExchangesPeeringSessions(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_router" filter = InternetExchangePeeringSessionFilterSet filter_form = InternetExchangePeeringSessionFilterForm table = InternetExchangePeeringSessionTable template = "peering/router/internet_exchange_peering_sessions.html" hidden_filters = ["router__id"] def build_queryset(self, request, kwargs): queryset = None # The queryset needs to be composed of InternetExchangePeeringSession objects # but they are linked to an AS. So first of all we need to retrieve the AS for # which we want to get the peering sessions. if "pk" in kwargs: queryset = InternetExchangePeeringSession.objects.filter( internet_exchange__router__id=kwargs["pk"] ).order_by("internet_exchange", "ip_address") return queryset def extra_context(self, kwargs): extra_context = {"active_tab": "ixsessions"} # Since we are in the context of a Router we need to keep the reference # for it if "pk" in kwargs: extra_context.update( {"instance": get_object_or_404(Router, pk=kwargs["pk"])} ) return extra_context class RoutingPolicyList(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_routingpolicy" queryset = RoutingPolicy.objects.all() filter = RoutingPolicyFilterSet filter_form = RoutingPolicyFilterForm table = RoutingPolicyTable template = "peering/routingpolicy/list.html" class RoutingPolicyAdd(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.add_routingpolicy" model = RoutingPolicy form = RoutingPolicyForm return_url = "peering:routingpolicy_list" template = "peering/routingpolicy/add_edit.html" class RoutingPolicyDetails(DetailsView): permission_required = "peering.view_routingpolicy" queryset = RoutingPolicy.objects.all() def get_context(self, request, **kwargs): return { "instance": get_object_or_404(RoutingPolicy, **kwargs), "active_tab": "main", } class RoutingPolicyEdit(PermissionRequiredMixin, AddOrEditView): permission_required = "peering.change_routingpolicy" model = RoutingPolicy form = RoutingPolicyForm template = "peering/routingpolicy/add_edit.html" class RoutingPolicyDelete(PermissionRequiredMixin, DeleteView): permission_required = "peering.delete_routingpolicy" model = RoutingPolicy return_url = "peering:routingpolicy_list" class RoutingPolicyBulkDelete(PermissionRequiredMixin, BulkDeleteView): permission_required = "peering.delete_routingpolicy" model = RoutingPolicy filter = RoutingPolicyFilterSet table = RoutingPolicyTable class RoutingPolicyBulkEdit(PermissionRequiredMixin, BulkEditView): permission_required = "peering.change_routingpolicy" queryset = RoutingPolicy.objects.all() filter = RoutingPolicyFilterSet table = RoutingPolicyTable form = RoutingPolicyBulkEditForm class ProvisioningAllAvailableIXPeers(PermissionRequiredMixin, ModelListView): permission_required = "peering.view_internetexchange" filter = NetworkIXLanFilterSet filter_form = NetworkIXLanFilterForm table = NetworkIXLanTable template = "peering/provisioning/peers.html" def build_queryset(self, request, kwargs): queryset = None for ixp in InternetExchange.objects.all(): if queryset is None: queryset = ixp.get_available_peers() else: queryset = queryset | ixp.get_available_peers() return queryset
from __future__ import annotations import collections import datetime import functools import json import os import re import urllib.request from typing import Any from typing import Counter from typing import Mapping from typing import Pattern from typing import Sequence from bot.config import Config from bot.data import command from bot.data import esc from bot.data import format_msg from bot.message import Message from bot.ranking import tied_rank CHAT_ALIASES = { 'kevinsjoberg': 'kevinwritescode', 'kmjao': 'kevinwritescode', 'makayla_fox': 'marsha_socks', 'jast_lucy': 'snipsyfox', } CHAT_LOG_RE = re.compile( r'^\[[^]]+\][^<*]*(<(?P<chat_user>[^>]+)>|\* (?P<action_user>[^ ]+))', ) BONKER_RE = re.compile(r'^\[[^]]+\][^<*]*<(?P<chat_user>[^>]+)> !bonk\b') BONKED_RE = re.compile(r'^\[[^]]+\][^<*]*<[^>]+> !bonk @?(?P<chat_user>\w+)') def _alias(user: str) -> str: return CHAT_ALIASES.get(user, user) @functools.lru_cache(maxsize=None) def _counts_per_file(filename: str, reg: Pattern[str]) -> Mapping[str, int]: counts: Counter[str] = collections.Counter() with open(filename, encoding='utf8') as f: for line in f: match = reg.match(line) if match is None: assert reg is not CHAT_LOG_RE continue user = match['chat_user'] or match['action_user'] assert user, line counts[_alias(user.lower())] += 1 return counts def _chat_rank_counts(reg: Pattern[str]) -> Counter[str]: total: Counter[str] = collections.Counter() for filename in os.listdir('logs'): full_filename = os.path.join('logs', filename) if filename != f'{datetime.date.today()}.log': total.update(_counts_per_file(full_filename, reg)) else: # don't use the cached version for today's logs total.update(_counts_per_file.__wrapped__(full_filename, reg)) return total def _user_rank_by_line_type( username: str, reg: Pattern[str], ) -> tuple[int, int] | None: total = _chat_rank_counts(reg) target_username = username.lower() for rank, (count, users) in tied_rank(total.most_common()): for username, _ in users: if target_username == username: return rank, count else: return None def _top_n_rank_by_line_type(reg: Pattern[str], n: int = 10) -> list[str]: total = _chat_rank_counts(reg) user_list = [] for rank, (count, users) in tied_rank(total.most_common(n)): usernames = ', '.join(username for username, _ in users) user_list.append(f'{rank}. {usernames} ({count})') return user_list @functools.lru_cache(maxsize=1) def _log_start_date() -> str: logs_start = min(os.listdir('logs')) logs_start, _, _ = logs_start.partition('.') return logs_start @command('!chatrank') async def cmd_chatrank(config: Config, msg: Message) -> str: # TODO: handle display name user = msg.optional_user_arg.lower() ret = _user_rank_by_line_type(user, CHAT_LOG_RE) if ret is None: return format_msg(msg, f'user not found {esc(user)}') else: rank, n = ret return format_msg( msg, f'{esc(user)} is ranked #{rank} with {n} messages ' f'(since {_log_start_date()})', ) @command('!top10chat') async def cmd_top_10_chat(config: Config, msg: Message) -> str: top_10_s = ', '.join(_top_n_rank_by_line_type(CHAT_LOG_RE, n=10)) return format_msg(msg, f'{top_10_s} (since {_log_start_date()})') @command('!bonkrank', secret=True) async def cmd_bonkrank(config: Config, msg: Message) -> str: # TODO: handle display name user = msg.optional_user_arg.lower() ret = _user_rank_by_line_type(user, BONKER_RE) if ret is None: return format_msg(msg, f'user not found {esc(user)}') else: rank, n = ret return format_msg( msg, f'{esc(user)} is ranked #{rank}, has bonked others {n} times', ) @command('!top5bonkers', secret=True) async def cmd_top_5_bonkers(config: Config, msg: Message) -> str: top_5_s = ', '.join(_top_n_rank_by_line_type(BONKER_RE, n=5)) return format_msg(msg, top_5_s) @command('!bonkedrank', secret=True) async def cmd_bonkedrank(config: Config, msg: Message) -> str: # TODO: handle display name user = msg.optional_user_arg.lower() ret = _user_rank_by_line_type(user, BONKED_RE) if ret is None: return format_msg(msg, f'user not found {esc(user)}') else: rank, n = ret return format_msg( msg, f'{esc(user)} is ranked #{rank}, has been bonked {n} times', ) @command('!top5bonked', secret=True) async def cmd_top_5_bonked(config: Config, msg: Message) -> str: top_5_s = ', '.join(_top_n_rank_by_line_type(BONKED_RE, n=5)) return format_msg(msg, top_5_s) def lin_regr(x: Sequence[float], y: Sequence[float]) -> tuple[float, float]: sum_x = sum(x) sum_xx = sum(xi * xi for xi in x) sum_y = sum(y) sum_xy = sum(xi * yi for xi, yi in zip(x, y)) b = (sum_y * sum_xx - sum_x * sum_xy) / (len(x) * sum_xx - sum_x * sum_x) a = (sum_xy - b * sum_x) / sum_xx return a, b @command('!chatplot') async def cmd_chatplot(config: Config, msg: Message) -> str: # TODO: handle display name user_list = msg.optional_user_arg.lower().split() user_list = [_alias(user.lstrip('@')) for user in user_list] user_list = list(dict.fromkeys(user_list)) if len(user_list) > 2: return format_msg(msg, 'sorry, can only compare 2 users') min_date = datetime.date.fromisoformat(_log_start_date()) comp_users: dict[str, dict[str, list[int]]] comp_users = collections.defaultdict(lambda: {'x': [], 'y': []}) for filename in sorted(os.listdir('logs')): if filename == f'{datetime.date.today()}.log': continue filename_date = datetime.date.fromisoformat(filename.split('.')[0]) full_filename = os.path.join('logs', filename) counts = _counts_per_file(full_filename, CHAT_LOG_RE) for user in user_list: if counts[user]: comp_users[user]['x'].append((filename_date - min_date).days) comp_users[user]['y'].append(counts[user]) # create the datasets (scatter and trend line) for all users to compare PLOT_COLORS = ('#00a3ce', '#fab040') datasets: list[dict[str, Any]] = [] for user, color in zip(user_list, PLOT_COLORS): if len(comp_users[user]['x']) < 2: if len(user_list) > 1: return format_msg( msg, 'sorry, all users need at least 2 days of data', ) else: return format_msg( msg, f'sorry {esc(user)}, need at least 2 days of data', ) point_data = { 'label': f"{user}'s chats", 'borderColor': color, # add alpha to the point fill color 'backgroundColor': f'{color}69', 'data': [ {'x': x_i, 'y': y_i} for x_i, y_i in zip(comp_users[user]['x'], comp_users[user]['y']) if y_i ], } m, c = lin_regr(comp_users[user]['x'], comp_users[user]['y']) trend_data = { 'borderColor': color, 'type': 'line', 'fill': False, 'pointRadius': 0, 'data': [ { 'x': comp_users[user]['x'][0], 'y': m * comp_users[user]['x'][0] + c, }, { 'x': comp_users[user]['x'][-1], 'y': m * comp_users[user]['x'][-1] + c, }, ], } datasets.append(point_data) datasets.append(trend_data) # generate title checking if we are comparing users if len(user_list) > 1: title_user = "'s, ".join(user_list) title_user = f"{title_user}'s" else: title_user = f"{user_list[0]}'s" chart = { 'type': 'scatter', 'data': { 'datasets': datasets, }, 'options': { 'scales': { 'xAxes': [{'ticks': {'callback': 'CALLBACK'}}], 'yAxes': [{'ticks': {'beginAtZero': True, 'min': 0}}], }, 'title': { 'display': True, 'text': f'{title_user} chat in twitch.tv/{config.channel}', }, 'legend': { 'labels': {'filter': 'FILTER'}, }, }, } callback = ( 'x=>{' f'y=new Date({str(min_date)!r});' 'y.setDate(x+y.getDate());return y.toISOString().slice(0,10)' '}' ) # https://github.com/chartjs/Chart.js/issues/3189#issuecomment-528362213 filter = ( '(legendItem, chartData)=>{' ' return (chartData.datasets[legendItem.datasetIndex].label);' '}' ) data = json.dumps(chart, separators=(',', ':')) data = data.replace('"CALLBACK"', callback) data = data.replace('"FILTER"', filter) post_data = {'chart': data} request = urllib.request.Request( 'https://quickchart.io/chart/create', method='POST', data=json.dumps(post_data).encode(), headers={'Content-Type': 'application/json'}, ) resp = urllib.request.urlopen(request) contents = json.load(resp) user_esc = [esc(user) for user in user_list] if len(user_list) > 1: return format_msg( msg, f'comparing {', '.join(user_esc)}: {contents['url']}', ) else: return format_msg(msg, f'{esc(user_esc[0])}: {contents['url']}')
from __future__ import annotations import collections import datetime import functools import json import os import re import urllib.request from typing import Any from typing import Counter from typing import Mapping from typing import Pattern from typing import Sequence from bot.config import Config from bot.data import command from bot.data import esc from bot.data import format_msg from bot.message import Message from bot.ranking import tied_rank CHAT_ALIASES = { 'kevinsjoberg': 'kevinwritescode', 'kmjao': 'kevinwritescode', 'makayla_fox': 'marsha_socks', 'jast_lucy': 'snipsyfox', } CHAT_LOG_RE = re.compile( r'^\[[^]]+\][^<*]*(<(?P<chat_user>[^>]+)>|\* (?P<action_user>[^ ]+))', ) BONKER_RE = re.compile(r'^\[[^]]+\][^<*]*<(?P<chat_user>[^>]+)> !bonk\b') BONKED_RE = re.compile(r'^\[[^]]+\][^<*]*<[^>]+> !bonk @?(?P<chat_user>\w+)') def _alias(user: str) -> str: return CHAT_ALIASES.get(user, user) @functools.lru_cache(maxsize=None) def _counts_per_file(filename: str, reg: Pattern[str]) -> Mapping[str, int]: counts: Counter[str] = collections.Counter() with open(filename, encoding='utf8') as f: for line in f: match = reg.match(line) if match is None: assert reg is not CHAT_LOG_RE continue user = match['chat_user'] or match['action_user'] assert user, line counts[_alias(user.lower())] += 1 return counts def _chat_rank_counts(reg: Pattern[str]) -> Counter[str]: total: Counter[str] = collections.Counter() for filename in os.listdir('logs'): full_filename = os.path.join('logs', filename) if filename != f'{datetime.date.today()}.log': total.update(_counts_per_file(full_filename, reg)) else: # don't use the cached version for today's logs total.update(_counts_per_file.__wrapped__(full_filename, reg)) return total def _user_rank_by_line_type( username: str, reg: Pattern[str], ) -> tuple[int, int] | None: total = _chat_rank_counts(reg) target_username = username.lower() for rank, (count, users) in tied_rank(total.most_common()): for username, _ in users: if target_username == username: return rank, count else: return None def _top_n_rank_by_line_type(reg: Pattern[str], n: int = 10) -> list[str]: total = _chat_rank_counts(reg) user_list = [] for rank, (count, users) in tied_rank(total.most_common(n)): usernames = ', '.join(username for username, _ in users) user_list.append(f'{rank}. {usernames} ({count})') return user_list @functools.lru_cache(maxsize=1) def _log_start_date() -> str: logs_start = min(os.listdir('logs')) logs_start, _, _ = logs_start.partition('.') return logs_start @command('!chatrank') async def cmd_chatrank(config: Config, msg: Message) -> str: # TODO: handle display name user = msg.optional_user_arg.lower() ret = _user_rank_by_line_type(user, CHAT_LOG_RE) if ret is None: return format_msg(msg, f'user not found {esc(user)}') else: rank, n = ret return format_msg( msg, f'{esc(user)} is ranked #{rank} with {n} messages ' f'(since {_log_start_date()})', ) @command('!top10chat') async def cmd_top_10_chat(config: Config, msg: Message) -> str: top_10_s = ', '.join(_top_n_rank_by_line_type(CHAT_LOG_RE, n=10)) return format_msg(msg, f'{top_10_s} (since {_log_start_date()})') @command('!bonkrank', secret=True) async def cmd_bonkrank(config: Config, msg: Message) -> str: # TODO: handle display name user = msg.optional_user_arg.lower() ret = _user_rank_by_line_type(user, BONKER_RE) if ret is None: return format_msg(msg, f'user not found {esc(user)}') else: rank, n = ret return format_msg( msg, f'{esc(user)} is ranked #{rank}, has bonked others {n} times', ) @command('!top5bonkers', secret=True) async def cmd_top_5_bonkers(config: Config, msg: Message) -> str: top_5_s = ', '.join(_top_n_rank_by_line_type(BONKER_RE, n=5)) return format_msg(msg, top_5_s) @command('!bonkedrank', secret=True) async def cmd_bonkedrank(config: Config, msg: Message) -> str: # TODO: handle display name user = msg.optional_user_arg.lower() ret = _user_rank_by_line_type(user, BONKED_RE) if ret is None: return format_msg(msg, f'user not found {esc(user)}') else: rank, n = ret return format_msg( msg, f'{esc(user)} is ranked #{rank}, has been bonked {n} times', ) @command('!top5bonked', secret=True) async def cmd_top_5_bonked(config: Config, msg: Message) -> str: top_5_s = ', '.join(_top_n_rank_by_line_type(BONKED_RE, n=5)) return format_msg(msg, top_5_s) def lin_regr(x: Sequence[float], y: Sequence[float]) -> tuple[float, float]: sum_x = sum(x) sum_xx = sum(xi * xi for xi in x) sum_y = sum(y) sum_xy = sum(xi * yi for xi, yi in zip(x, y)) b = (sum_y * sum_xx - sum_x * sum_xy) / (len(x) * sum_xx - sum_x * sum_x) a = (sum_xy - b * sum_x) / sum_xx return a, b @command('!chatplot') async def cmd_chatplot(config: Config, msg: Message) -> str: # TODO: handle display name user_list = msg.optional_user_arg.lower().split() user_list = [_alias(user.lstrip('@')) for user in user_list] user_list = list(dict.fromkeys(user_list)) if len(user_list) > 2: return format_msg(msg, 'sorry, can only compare 2 users') min_date = datetime.date.fromisoformat(_log_start_date()) comp_users: dict[str, dict[str, list[int]]] comp_users = collections.defaultdict(lambda: {'x': [], 'y': []}) for filename in sorted(os.listdir('logs')): if filename == f'{datetime.date.today()}.log': continue filename_date = datetime.date.fromisoformat(filename.split('.')[0]) full_filename = os.path.join('logs', filename) counts = _counts_per_file(full_filename, CHAT_LOG_RE) for user in user_list: if counts[user]: comp_users[user]['x'].append((filename_date - min_date).days) comp_users[user]['y'].append(counts[user]) # create the datasets (scatter and trend line) for all users to compare PLOT_COLORS = ('#00a3ce', '#fab040') datasets: list[dict[str, Any]] = [] for user, color in zip(user_list, PLOT_COLORS): if len(comp_users[user]['x']) < 2: if len(user_list) > 1: return format_msg( msg, 'sorry, all users need at least 2 days of data', ) else: return format_msg( msg, f'sorry {esc(user)}, need at least 2 days of data', ) point_data = { 'label': f"{user}'s chats", 'borderColor': color, # add alpha to the point fill color 'backgroundColor': f'{color}69', 'data': [ {'x': x_i, 'y': y_i} for x_i, y_i in zip(comp_users[user]['x'], comp_users[user]['y']) if y_i ], } m, c = lin_regr(comp_users[user]['x'], comp_users[user]['y']) trend_data = { 'borderColor': color, 'type': 'line', 'fill': False, 'pointRadius': 0, 'data': [ { 'x': comp_users[user]['x'][0], 'y': m * comp_users[user]['x'][0] + c, }, { 'x': comp_users[user]['x'][-1], 'y': m * comp_users[user]['x'][-1] + c, }, ], } datasets.append(point_data) datasets.append(trend_data) # generate title checking if we are comparing users if len(user_list) > 1: title_user = "'s, ".join(user_list) title_user = f"{title_user}'s" else: title_user = f"{user_list[0]}'s" chart = { 'type': 'scatter', 'data': { 'datasets': datasets, }, 'options': { 'scales': { 'xAxes': [{'ticks': {'callback': 'CALLBACK'}}], 'yAxes': [{'ticks': {'beginAtZero': True, 'min': 0}}], }, 'title': { 'display': True, 'text': f'{title_user} chat in twitch.tv/{config.channel}', }, 'legend': { 'labels': {'filter': 'FILTER'}, }, }, } callback = ( 'x=>{' f'y=new Date({str(min_date)!r});' 'y.setDate(x+y.getDate());return y.toISOString().slice(0,10)' '}' ) # https://github.com/chartjs/Chart.js/issues/3189#issuecomment-528362213 filter = ( '(legendItem, chartData)=>{' ' return (chartData.datasets[legendItem.datasetIndex].label);' '}' ) data = json.dumps(chart, separators=(',', ':')) data = data.replace('"CALLBACK"', callback) data = data.replace('"FILTER"', filter) post_data = {'chart': data} request = urllib.request.Request( 'https://quickchart.io/chart/create', method='POST', data=json.dumps(post_data).encode(), headers={'Content-Type': 'application/json'}, ) resp = urllib.request.urlopen(request) contents = json.load(resp) user_esc = [esc(user) for user in user_list] if len(user_list) > 1: return format_msg( msg, f'comparing {", ".join(user_esc)}: {contents["url"]}', ) else: return format_msg(msg, f'{esc(user_esc[0])}: {contents["url"]}')
# Original work Copyright 2021-2022, Daniel Biehl (Apache License V2) # Original work Copyright 2016-2020 Robot Framework Foundation (Apache License V2) # See ThirdPartyNotices.txt in the project root for license information. # All modifications Copyright (c) Robocorp Technologies Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This was based on robot.utils.htmlformatters.HtmlFormatter from Robot Framework and adapted in robotCode (https://github.com/d-biehl/robotcode/blob/v0.5.5/robotcode/language_server/robotframework/utils/markdownformatter.py) to deal with Markdown. """ from __future__ import annotations import functools import itertools import re from abc import ABC, abstractmethod from typing import Any, Callable, Iterator, List, Optional, Tuple class Formatter(ABC): _strip_lines = True def __init__(self) -> None: self._lines: List[str] = [] def handles(self, line: str) -> bool: return self._handles(line.strip() if self._strip_lines else line) @abstractmethod def _handles(self, line: str) -> bool: ... def add(self, line: str) -> None: self._lines.append(line.strip() if self._strip_lines else line) def end(self) -> str: result = self.format(self._lines) self._lines = [] return result @abstractmethod def format(self, lines: List[str]) -> str: ... class MarkDownFormatter: def __init__(self) -> None: self._results: List[str] = [] self._formatters: List[Formatter] = [ TableFormatter(), PreformattedFormatter(), ListFormatter(), HeaderFormatter(), RulerFormatter(), ] self._formatters.append(ParagraphFormatter(self._formatters[:])) self._current: Optional[Formatter] = None def format(self, text: str) -> str: for line in text.splitlines(): self._process_line(line) self._end_current() return "\n".join(self._results) def _process_line(self, line: str) -> None: if not line.strip(): self._end_current() elif self._current and self._current.handles(line): self._current.add(line) else: self._end_current() self._current = self._find_formatter(line) if self._current is not None: self._current.add(line) def _end_current(self) -> None: if self._current: self._results.append(self._current.end()) self._current = None def _find_formatter(self, line: str) -> Optional[Formatter]: for formatter in self._formatters: if formatter.handles(line): return formatter return None class SingleLineFormatter(Formatter): def _handles(self, line: str) -> bool: return bool(not self._lines and self.match(line)) @abstractmethod def match(self, line: str) -> Optional[re.Match[str]]: ... def format(self, lines: List[str]) -> str: return self.format_line(lines[0]) @abstractmethod def format_line(self, line: str) -> str: ... class HeaderFormatter(SingleLineFormatter): _regex = re.compile(r"^(={1,5})\s+(\S.*?)\s+\1$") def match(self, line: str) -> Optional[re.Match[str]]: return self._regex.match(line) def format_line(self, line: str) -> str: m = self.match(line) if m is not None: level, text = m.groups() return "%s %s\n" % ("#" * (len(level) + 1), text) return "" class LinkFormatter: _image_exts = (".jpg", ".jpeg", ".png", ".gif", ".bmp", ".svg") _link = re.compile(r"\[(.+?\|.*?)\]") _url = re.compile( r""" ((^|\ ) ["'(\[{]*) # begin of line or space and opt. any char "'([{ ([a-z][\w+-.]*://[^\s|]+?) # url (?=[)\]}"'.,!?:;|]* ($|\ )) # opt. any char )]}"'.,!?:;| and eol or space """, re.VERBOSE | re.MULTILINE | re.IGNORECASE, ) def format_url(self, text: str) -> str: return self._format_url(text, format_as_image=False) def _format_url(self, text: str, format_as_image: bool = True) -> str: if "://" not in text: return text return self._url.sub( functools.partial(self._replace_url, format_as_image), text ) def _replace_url(self, format_as_image: bool, match: re.Match[str]) -> str: pre = match.group(1) url = match.group(3) if format_as_image and self._is_image(url): return pre + self._get_image(url) return pre + self._get_link(url) def _get_image(self, src: str, title: Optional[str] = None) -> str: return f"![{title or src}]({src})" def _get_link(self, href: str, content: Optional[str] = None) -> str: return f"[{content or href}]({href})" def _quot(self, attr: str) -> str: return attr if '"' not in attr else attr.replace('"', "&quot;") def format_link(self, text: str) -> str: # 2nd, 4th, etc. token contains link, others surrounding content tokens = self._link.split(text) formatters: Iterator[Callable[[str], Any]] = itertools.cycle( (self._format_url, self._format_link) ) return "".join(f(t) for f, t in zip(formatters, tokens)) def _format_link(self, text: str) -> str: link, content = [t.strip() for t in text.split("|", 1)] if self._is_image(content): content = self._get_image(content, link) elif self._is_image(link): return self._get_image(link, content) return self._get_link(link, content) def remove_link(self, text: str) -> str: # 2nd, 4th, etc. token contains link, others surrounding content tokens = self._link.split(text) if len(tokens) > 1: formatters: Iterator[Callable[[str], Any]] = itertools.cycle( [self._remove_link] ) return "".join(f(t) for f, t in zip(formatters, tokens)) return text def _remove_link(self, text: str) -> str: if "|" not in text: return text link, content = [t.strip() for t in text.split("|", 1)] if self._is_image(content): content = self._get_image(content, link) return content def _is_image(self, text: str) -> bool: return text.startswith("data:image/") or text.lower().endswith(self._image_exts) class LineFormatter: _bold = re.compile( r""" ( # prefix (group 1) (^|\ ) # begin of line or space ["'(]* _? # optionally any char "'( and optional begin of italic ) # \* # start of bold ([^\ ].*?) # no space and then anything (group 3) \* # end of bold (?= # start of postfix (non-capturing group) _? ["').,!?:;]* # optional end of italic and any char "').,!?:; ($|\ ) # end of line or space ) """, re.VERBOSE, ) _italic = re.compile( r""" ( (^|\ ) ["'(]* ) # begin of line or space and opt. any char "'( _ # start of italic ([^\ _].*?) # no space or underline and then anything _ # end of italic (?= ["').,!?:;]* ($|\ ) ) # opt. any char "').,!?:; and end of line or space """, re.VERBOSE, ) _code = re.compile( r""" ( (^|\ ) ["'(]* ) # same as above with _ changed to `` `` ([^\ `].*?) `` (?= ["').,!?:;]* ($|\ ) ) """, re.VERBOSE, ) def __init__(self) -> None: super().__init__() self._formatters: List[Tuple[str, Callable[[str], str]]] = [ ("<", self._quote_lower_then), ("#", self._quote_hash), ("*", self._format_bold), ("_", self._format_italic), ("``", self._format_code), ("", functools.partial(LinkFormatter().format_link)), ] def format(self, line: str) -> str: for marker, formatter in self._formatters: if marker in line: line = formatter(line) return line def _quote_lower_then(self, line: str) -> str: return line.replace("<", "\\<") def _quote_hash(self, line: str) -> str: return line.replace("#", "\\#") def _format_bold(self, line: str) -> str: return self._bold.sub("\\1**\\3**", line) def _format_italic(self, line: str) -> str: return self._italic.sub("\\1*\\3*", line) def _format_code(self, line: str) -> str: return self._code.sub("\\1`\\3`", line) class PreformattedFormatter(Formatter): _format_line = functools.partial(LineFormatter().format) def _handles(self, line: str) -> bool: return line.startswith("| ") or line == "|" def format(self, lines: List[str]) -> str: lines = [LinkFormatter().remove_link(line[2:]) for line in lines] return "```text\n" + "\n".join(lines) + "\n```\n" class ParagraphFormatter(Formatter): _format_line = functools.partial(LineFormatter().format) def __init__(self, other_formatters: List[Formatter]) -> None: super().__init__() self._other_formatters = other_formatters def _handles(self, line: str) -> bool: return not any(other.handles(line) for other in self._other_formatters) def format(self, lines: List[str]) -> str: return self._format_line(" ".join(lines)) + "\n\n" class ListFormatter(Formatter): _strip_lines = False _format_item = functools.partial(LineFormatter().format) def _handles(self, line: str) -> bool: return bool( line.strip().startswith("- ") or line.startswith(" ") and self._lines ) def format(self, lines: List[str]) -> str: items = [ "- %s" % self._format_item(line) for line in self._combine_lines(lines) ] return "\n".join(items) + "\n\n" def _combine_lines(self, lines: List[str]) -> Iterator[str]: current = [] for line in lines: line = line.strip() if not line.startswith("- "): current.append(line) continue if current: yield " ".join(current) current = [line[2:].strip()] yield " ".join(current) class RulerFormatter(SingleLineFormatter): regex = re.compile("^-{3,}$") def match(self, line: str) -> Optional[re.Match[str]]: return self.regex.match(line) def format_line(self, line: str) -> str: return "---" class TableFormatter(Formatter): _table_line = re.compile(r"^\| (.* |)\|$") _line_splitter = re.compile(r" \|(?= )") _format_cell_content = functools.partial(LineFormatter().format) def _handles(self, line: str) -> bool: return self._table_line.match(line) is not None def format(self, lines: List[str]) -> str: return self._format_table([self._split_to_cells(line) for line in lines]) def _split_to_cells(self, line: str) -> List[str]: return [cell.strip() for cell in self._line_splitter.split(line[1:-1])] def _format_table(self, rows: List[List[str]]) -> str: table = [] max_columns = max(len(row) for row in rows) try: header_rows = [ list( next( row for row in rows if any(cell for cell in row if cell.startswith("=")) ) ) ] except StopIteration: header_rows = [[]] body_rows = [row for row in rows if row not in header_rows] for row in header_rows or [[]]: row += [""] * (max_columns - len(row)) table.append(f'|{'|'.join(self._format_cell(cell) for cell in row)}|') row_ = [" :--- "] * max_columns table.append(f'|{'|'.join(row_)}|') for row in body_rows: row += [""] * (max_columns - len(row)) table.append(f'|{'|'.join(self._format_cell(cell) for cell in row)}|') return "\n".join(table) + "\n\n" def _format_cell(self, content: str) -> str: if content.startswith("=") and content.endswith("="): content = content[1:-1] return f" {self._format_cell_content(content).strip()} " def convert(robot_doc: str) -> str: return MarkDownFormatter().format(robot_doc)
# Original work Copyright 2021-2022, Daniel Biehl (Apache License V2) # Original work Copyright 2016-2020 Robot Framework Foundation (Apache License V2) # See ThirdPartyNotices.txt in the project root for license information. # All modifications Copyright (c) Robocorp Technologies Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This was based on robot.utils.htmlformatters.HtmlFormatter from Robot Framework and adapted in robotCode (https://github.com/d-biehl/robotcode/blob/v0.5.5/robotcode/language_server/robotframework/utils/markdownformatter.py) to deal with Markdown. """ from __future__ import annotations import functools import itertools import re from abc import ABC, abstractmethod from typing import Any, Callable, Iterator, List, Optional, Tuple class Formatter(ABC): _strip_lines = True def __init__(self) -> None: self._lines: List[str] = [] def handles(self, line: str) -> bool: return self._handles(line.strip() if self._strip_lines else line) @abstractmethod def _handles(self, line: str) -> bool: ... def add(self, line: str) -> None: self._lines.append(line.strip() if self._strip_lines else line) def end(self) -> str: result = self.format(self._lines) self._lines = [] return result @abstractmethod def format(self, lines: List[str]) -> str: ... class MarkDownFormatter: def __init__(self) -> None: self._results: List[str] = [] self._formatters: List[Formatter] = [ TableFormatter(), PreformattedFormatter(), ListFormatter(), HeaderFormatter(), RulerFormatter(), ] self._formatters.append(ParagraphFormatter(self._formatters[:])) self._current: Optional[Formatter] = None def format(self, text: str) -> str: for line in text.splitlines(): self._process_line(line) self._end_current() return "\n".join(self._results) def _process_line(self, line: str) -> None: if not line.strip(): self._end_current() elif self._current and self._current.handles(line): self._current.add(line) else: self._end_current() self._current = self._find_formatter(line) if self._current is not None: self._current.add(line) def _end_current(self) -> None: if self._current: self._results.append(self._current.end()) self._current = None def _find_formatter(self, line: str) -> Optional[Formatter]: for formatter in self._formatters: if formatter.handles(line): return formatter return None class SingleLineFormatter(Formatter): def _handles(self, line: str) -> bool: return bool(not self._lines and self.match(line)) @abstractmethod def match(self, line: str) -> Optional[re.Match[str]]: ... def format(self, lines: List[str]) -> str: return self.format_line(lines[0]) @abstractmethod def format_line(self, line: str) -> str: ... class HeaderFormatter(SingleLineFormatter): _regex = re.compile(r"^(={1,5})\s+(\S.*?)\s+\1$") def match(self, line: str) -> Optional[re.Match[str]]: return self._regex.match(line) def format_line(self, line: str) -> str: m = self.match(line) if m is not None: level, text = m.groups() return "%s %s\n" % ("#" * (len(level) + 1), text) return "" class LinkFormatter: _image_exts = (".jpg", ".jpeg", ".png", ".gif", ".bmp", ".svg") _link = re.compile(r"\[(.+?\|.*?)\]") _url = re.compile( r""" ((^|\ ) ["'(\[{]*) # begin of line or space and opt. any char "'([{ ([a-z][\w+-.]*://[^\s|]+?) # url (?=[)\]}"'.,!?:;|]* ($|\ )) # opt. any char )]}"'.,!?:;| and eol or space """, re.VERBOSE | re.MULTILINE | re.IGNORECASE, ) def format_url(self, text: str) -> str: return self._format_url(text, format_as_image=False) def _format_url(self, text: str, format_as_image: bool = True) -> str: if "://" not in text: return text return self._url.sub( functools.partial(self._replace_url, format_as_image), text ) def _replace_url(self, format_as_image: bool, match: re.Match[str]) -> str: pre = match.group(1) url = match.group(3) if format_as_image and self._is_image(url): return pre + self._get_image(url) return pre + self._get_link(url) def _get_image(self, src: str, title: Optional[str] = None) -> str: return f"![{title or src}]({src})" def _get_link(self, href: str, content: Optional[str] = None) -> str: return f"[{content or href}]({href})" def _quot(self, attr: str) -> str: return attr if '"' not in attr else attr.replace('"', "&quot;") def format_link(self, text: str) -> str: # 2nd, 4th, etc. token contains link, others surrounding content tokens = self._link.split(text) formatters: Iterator[Callable[[str], Any]] = itertools.cycle( (self._format_url, self._format_link) ) return "".join(f(t) for f, t in zip(formatters, tokens)) def _format_link(self, text: str) -> str: link, content = [t.strip() for t in text.split("|", 1)] if self._is_image(content): content = self._get_image(content, link) elif self._is_image(link): return self._get_image(link, content) return self._get_link(link, content) def remove_link(self, text: str) -> str: # 2nd, 4th, etc. token contains link, others surrounding content tokens = self._link.split(text) if len(tokens) > 1: formatters: Iterator[Callable[[str], Any]] = itertools.cycle( [self._remove_link] ) return "".join(f(t) for f, t in zip(formatters, tokens)) return text def _remove_link(self, text: str) -> str: if "|" not in text: return text link, content = [t.strip() for t in text.split("|", 1)] if self._is_image(content): content = self._get_image(content, link) return content def _is_image(self, text: str) -> bool: return text.startswith("data:image/") or text.lower().endswith(self._image_exts) class LineFormatter: _bold = re.compile( r""" ( # prefix (group 1) (^|\ ) # begin of line or space ["'(]* _? # optionally any char "'( and optional begin of italic ) # \* # start of bold ([^\ ].*?) # no space and then anything (group 3) \* # end of bold (?= # start of postfix (non-capturing group) _? ["').,!?:;]* # optional end of italic and any char "').,!?:; ($|\ ) # end of line or space ) """, re.VERBOSE, ) _italic = re.compile( r""" ( (^|\ ) ["'(]* ) # begin of line or space and opt. any char "'( _ # start of italic ([^\ _].*?) # no space or underline and then anything _ # end of italic (?= ["').,!?:;]* ($|\ ) ) # opt. any char "').,!?:; and end of line or space """, re.VERBOSE, ) _code = re.compile( r""" ( (^|\ ) ["'(]* ) # same as above with _ changed to `` `` ([^\ `].*?) `` (?= ["').,!?:;]* ($|\ ) ) """, re.VERBOSE, ) def __init__(self) -> None: super().__init__() self._formatters: List[Tuple[str, Callable[[str], str]]] = [ ("<", self._quote_lower_then), ("#", self._quote_hash), ("*", self._format_bold), ("_", self._format_italic), ("``", self._format_code), ("", functools.partial(LinkFormatter().format_link)), ] def format(self, line: str) -> str: for marker, formatter in self._formatters: if marker in line: line = formatter(line) return line def _quote_lower_then(self, line: str) -> str: return line.replace("<", "\\<") def _quote_hash(self, line: str) -> str: return line.replace("#", "\\#") def _format_bold(self, line: str) -> str: return self._bold.sub("\\1**\\3**", line) def _format_italic(self, line: str) -> str: return self._italic.sub("\\1*\\3*", line) def _format_code(self, line: str) -> str: return self._code.sub("\\1`\\3`", line) class PreformattedFormatter(Formatter): _format_line = functools.partial(LineFormatter().format) def _handles(self, line: str) -> bool: return line.startswith("| ") or line == "|" def format(self, lines: List[str]) -> str: lines = [LinkFormatter().remove_link(line[2:]) for line in lines] return "```text\n" + "\n".join(lines) + "\n```\n" class ParagraphFormatter(Formatter): _format_line = functools.partial(LineFormatter().format) def __init__(self, other_formatters: List[Formatter]) -> None: super().__init__() self._other_formatters = other_formatters def _handles(self, line: str) -> bool: return not any(other.handles(line) for other in self._other_formatters) def format(self, lines: List[str]) -> str: return self._format_line(" ".join(lines)) + "\n\n" class ListFormatter(Formatter): _strip_lines = False _format_item = functools.partial(LineFormatter().format) def _handles(self, line: str) -> bool: return bool( line.strip().startswith("- ") or line.startswith(" ") and self._lines ) def format(self, lines: List[str]) -> str: items = [ "- %s" % self._format_item(line) for line in self._combine_lines(lines) ] return "\n".join(items) + "\n\n" def _combine_lines(self, lines: List[str]) -> Iterator[str]: current = [] for line in lines: line = line.strip() if not line.startswith("- "): current.append(line) continue if current: yield " ".join(current) current = [line[2:].strip()] yield " ".join(current) class RulerFormatter(SingleLineFormatter): regex = re.compile("^-{3,}$") def match(self, line: str) -> Optional[re.Match[str]]: return self.regex.match(line) def format_line(self, line: str) -> str: return "---" class TableFormatter(Formatter): _table_line = re.compile(r"^\| (.* |)\|$") _line_splitter = re.compile(r" \|(?= )") _format_cell_content = functools.partial(LineFormatter().format) def _handles(self, line: str) -> bool: return self._table_line.match(line) is not None def format(self, lines: List[str]) -> str: return self._format_table([self._split_to_cells(line) for line in lines]) def _split_to_cells(self, line: str) -> List[str]: return [cell.strip() for cell in self._line_splitter.split(line[1:-1])] def _format_table(self, rows: List[List[str]]) -> str: table = [] max_columns = max(len(row) for row in rows) try: header_rows = [ list( next( row for row in rows if any(cell for cell in row if cell.startswith("=")) ) ) ] except StopIteration: header_rows = [[]] body_rows = [row for row in rows if row not in header_rows] for row in header_rows or [[]]: row += [""] * (max_columns - len(row)) table.append(f'|{"|".join(self._format_cell(cell) for cell in row)}|') row_ = [" :--- "] * max_columns table.append(f'|{"|".join(row_)}|') for row in body_rows: row += [""] * (max_columns - len(row)) table.append(f'|{"|".join(self._format_cell(cell) for cell in row)}|') return "\n".join(table) + "\n\n" def _format_cell(self, content: str) -> str: if content.startswith("=") and content.endswith("="): content = content[1:-1] return f" {self._format_cell_content(content).strip()} " def convert(robot_doc: str) -> str: return MarkDownFormatter().format(robot_doc)
""" ASGI config for hear_me_django_app project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/dev/howto/deployment/asgi/ """ import os import sys from pathlib import Path from django.core.asgi import get_asgi_application # This allows easy placement of apps within the interior # hear_me_django_app directory. ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent sys.path.append(str(ROOT_DIR / "hear_me_django_app")) # If DJANGO_SETTINGS_MODULE is unset, default to the local settings os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local") # This application object is used by any ASGI server configured to use this file. django_application = get_asgi_application() # Apply ASGI middleware here. # from helloworld.asgi import HelloWorldApplication # application = HelloWorldApplication(application) # Import websocket application here, so apps from django_application are loaded first from config.websocket import websocket_application # noqa isort:skip async def application(scope, receive, send): if scope["type"] == "http": await django_application(scope, receive, send) elif scope["type"] == "websocket": await websocket_application(scope, receive, send) else: raise NotImplementedError(f"Unknown scope type {scope["type"]}")
""" ASGI config for hear_me_django_app project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/dev/howto/deployment/asgi/ """ import os import sys from pathlib import Path from django.core.asgi import get_asgi_application # This allows easy placement of apps within the interior # hear_me_django_app directory. ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent sys.path.append(str(ROOT_DIR / "hear_me_django_app")) # If DJANGO_SETTINGS_MODULE is unset, default to the local settings os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local") # This application object is used by any ASGI server configured to use this file. django_application = get_asgi_application() # Apply ASGI middleware here. # from helloworld.asgi import HelloWorldApplication # application = HelloWorldApplication(application) # Import websocket application here, so apps from django_application are loaded first from config.websocket import websocket_application # noqa isort:skip async def application(scope, receive, send): if scope["type"] == "http": await django_application(scope, receive, send) elif scope["type"] == "websocket": await websocket_application(scope, receive, send) else: raise NotImplementedError(f"Unknown scope type {scope['type']}")
#!/usr/bin/env python3 from PIL import Image, ImageDraw, ImageFont, IptcImagePlugin, ExifTags from datetime import datetime from pathlib import Path import imghdr import time # Check if 'desired' directory exists, if not, create it! def dir_check_make(dir_path, dir_name): if not Path(dir_path).is_dir(): dir_path.mkdir() print(f'Directory {dir_name} Created\n') else: print(f'Directory {dir_name} already exists\n') def get_image_dimensions(pil_img): width, height = pil_img.size desired_margin = int((height / 10) + 10) desired_caption_font_size = int(desired_margin / 2) desired_date_font_size = int(desired_margin / 4) desired_date_y_value = int(desired_margin / 2.5) return desired_margin, desired_caption_font_size, desired_date_font_size, desired_date_y_value # We can user getter function to get values from specific IIM codes # https://iptc.org/std/photometadata/specification/IPTC-PhotoMetadata def get_caption(image): iptc = IptcImagePlugin.getiptcinfo(image) return iptc.get((2, 5)).decode() def get_date(image): for DateTimeOriginal in ExifTags.TAGS.keys(): if ExifTags.TAGS[DateTimeOriginal] == 'DateTimeOriginal': break exif = dict(image._getexif().items()) date = str(exif[DateTimeOriginal]) my_date = datetime.strptime(date, "%Y:%m:%d %H:%M:%S") # Return date to be printed return f'{str(my_date.strftime('%B'))} {str(my_date.year)}' def add_margin(image, top, right, bottom, left, color): width, height = image.size new_width = width + right + left new_height = height + top + bottom image_with_margin = Image.new(image.mode, (new_width, new_height), color) image_with_margin.paste(image, (left, top)) return image_with_margin def add_caption(image, caption, desired_caption_font_size, desired_margin): width, height = image.size font = ImageFont.truetype("Avenir.ttc", desired_caption_font_size) draw = ImageDraw.Draw(image) text_width, text_height = draw.textsize(caption, font=font) text_slim_count = 0 if text_width > width: while text_width > (width - 300): text_slim_count += 1 print('Text width too great for image, slimming', desired_caption_font_size) desired_caption_font_size -= 2 font = ImageFont.truetype("Avenir.ttc", desired_caption_font_size) text_width, text_height = draw.textsize(caption, font=font) image_with_caption = draw.text(((width - text_width) / 2, height - (desired_margin - text_slim_count)), caption, (0, 0, 0), font=font) else: image_with_caption = draw.text(((width - text_width) / 2, height - desired_margin), caption, (0, 0, 0), font=font) return image_with_caption def add_date(image, date, desired_date_y_value, desired_date_font_size): date = str(date) width, height = image.size font = ImageFont.truetype("Avenir.ttc", desired_date_font_size) draw = ImageDraw.Draw(image) text_width, text_height = draw.textsize(date, font=font) image_with_date = draw.text(((width - text_width) / 2, height - desired_date_y_value), date, (0, 0, 0), font=font) return image_with_date def captioneer(): # Configure variables output_dir_name = 'newly_stamped_images' output_dir = Path.cwd() / output_dir_name image_files = [] sub_dirs = [] process_iteration_counter = 1 for sub_dir in Path.cwd().iterdir(): if sub_dir.is_dir(): # Skip over pre-existing newly_stamped_images folder if sub_dir.name == output_dir_name: pass else: sub_dirs.append(sub_dir) for filename in Path(sub_dir).iterdir(): if filename.is_dir(): continue image_type = imghdr.what(filename) if image_type == 'jpeg': image_files.append(filename) total_images = len(image_files) print(f''' Subdirectories: {len(sub_dirs)} Image files: {total_images} ''') proceed_question = input( 'Do you wish to stamp these images with Captions and dates? [Y]es/No ') # Moment to breath before execution, review the total number of files and directories if proceed_question.casefold() == 'no' or proceed_question.casefold() == 'n': exit() # Capture process start time process_start_time = time.time() # Create an output directory for the 'newly stamped images' dir_check_make(output_dir, output_dir_name) for sub_dir in sorted(sub_dirs): print( f'{'#' * ((len(str(sub_dir.name)))+38)}\n' f'Processing images from subdirectory "{sub_dir.name}"\n' f'{'#' * ((len(str(sub_dir.name)))+38)}\n') output_sub_dir = output_dir / sub_dir.name # print(outputSubDir) try: dir_check_make(output_sub_dir, sub_dir.name) sub_dir_image_files = [] except Exception: print(f'subdirectory creation failed') try: for filename in Path(sub_dir).iterdir(): image_type = imghdr.what(filename) # Optionally print identified file type for all files within subDir # print(imageType) if image_type == 'jpeg': sub_dir_image_files.append(filename) for filename in sorted(sub_dir_image_files): try: img = Image.open(filename) print(f'{filename.name}\n{'-' * len(filename.name)}\n' f'{process_iteration_counter} of {total_images}') process_iteration_counter += 1 except Exception as e: print(e) print(f'Open image {filename} failed') try: caption = get_caption(img) print(f'Caption: {caption}') except Exception: caption = sub_dir.name print( f'{'+' * (int((len(str(caption)))/2)-2)} NO IPTC DATA {'+' * (int((len(str(caption)))/2)-2)}\n' f'Using subdirectory name instead\n' f'Caption: {sub_dir.name}\n' f'{'+' * ((len(str(caption)))+10)}\n') try: date = get_date(img) print(f'Date: {date}') except Exception: print( f'------------------------------------------------------------\n' f'-------------------- NO DATE AVAILABLE ---------------------\n' f'------------------------------------------------------------\n') manual_date = input( f'Manual date entry required!\n' f'Enter date as you wish text to appear on stamped photo\n' f'"Month Year", "January 2021" or "November 1980" (for example)\n' f'Please enter date: ') date = str(manual_date) desired_margin, desired_caption_font_size, desired_date_font_size, desired_date_y_value = get_image_dimensions(img) try: image_output = add_margin(img, 0, 0, desired_margin, 0, (255, 255, 255)) add_caption(image_output, caption, desired_caption_font_size, desired_margin) add_date(image_output, date, desired_date_y_value, desired_date_font_size) except Exception as e: print( f'margin/caption/date failure\n' f'{e}') try: output_filename = f'{filename.stem}-stamped.jpg' image_output.save(output_sub_dir / output_filename, quality=95) print('\n') except Exception as e: print( f'Adding border and filename to image {filename} failed\n' f'{e}') except Exception as e: print(e) pass process_complete_time = time.time() process_duration = process_complete_time - process_start_time print(f''' ############################# PROCESS COMPLETE ############################# +=+=+=+=+ Summary +=+=+=+=+ Sub-directories: {len(sub_dirs)} Files processed: {total_images} Process duration: {int(process_duration)} seconds for an Average of: {round(process_duration / total_images, 2)} seconds per image ''') if __name__ == '__main__': captioneer()
#!/usr/bin/env python3 from PIL import Image, ImageDraw, ImageFont, IptcImagePlugin, ExifTags from datetime import datetime from pathlib import Path import imghdr import time # Check if 'desired' directory exists, if not, create it! def dir_check_make(dir_path, dir_name): if not Path(dir_path).is_dir(): dir_path.mkdir() print(f'Directory {dir_name} Created\n') else: print(f'Directory {dir_name} already exists\n') def get_image_dimensions(pil_img): width, height = pil_img.size desired_margin = int((height / 10) + 10) desired_caption_font_size = int(desired_margin / 2) desired_date_font_size = int(desired_margin / 4) desired_date_y_value = int(desired_margin / 2.5) return desired_margin, desired_caption_font_size, desired_date_font_size, desired_date_y_value # We can user getter function to get values from specific IIM codes # https://iptc.org/std/photometadata/specification/IPTC-PhotoMetadata def get_caption(image): iptc = IptcImagePlugin.getiptcinfo(image) return iptc.get((2, 5)).decode() def get_date(image): for DateTimeOriginal in ExifTags.TAGS.keys(): if ExifTags.TAGS[DateTimeOriginal] == 'DateTimeOriginal': break exif = dict(image._getexif().items()) date = str(exif[DateTimeOriginal]) my_date = datetime.strptime(date, "%Y:%m:%d %H:%M:%S") # Return date to be printed return f'{str(my_date.strftime("%B"))} {str(my_date.year)}' def add_margin(image, top, right, bottom, left, color): width, height = image.size new_width = width + right + left new_height = height + top + bottom image_with_margin = Image.new(image.mode, (new_width, new_height), color) image_with_margin.paste(image, (left, top)) return image_with_margin def add_caption(image, caption, desired_caption_font_size, desired_margin): width, height = image.size font = ImageFont.truetype("Avenir.ttc", desired_caption_font_size) draw = ImageDraw.Draw(image) text_width, text_height = draw.textsize(caption, font=font) text_slim_count = 0 if text_width > width: while text_width > (width - 300): text_slim_count += 1 print('Text width too great for image, slimming', desired_caption_font_size) desired_caption_font_size -= 2 font = ImageFont.truetype("Avenir.ttc", desired_caption_font_size) text_width, text_height = draw.textsize(caption, font=font) image_with_caption = draw.text(((width - text_width) / 2, height - (desired_margin - text_slim_count)), caption, (0, 0, 0), font=font) else: image_with_caption = draw.text(((width - text_width) / 2, height - desired_margin), caption, (0, 0, 0), font=font) return image_with_caption def add_date(image, date, desired_date_y_value, desired_date_font_size): date = str(date) width, height = image.size font = ImageFont.truetype("Avenir.ttc", desired_date_font_size) draw = ImageDraw.Draw(image) text_width, text_height = draw.textsize(date, font=font) image_with_date = draw.text(((width - text_width) / 2, height - desired_date_y_value), date, (0, 0, 0), font=font) return image_with_date def captioneer(): # Configure variables output_dir_name = 'newly_stamped_images' output_dir = Path.cwd() / output_dir_name image_files = [] sub_dirs = [] process_iteration_counter = 1 for sub_dir in Path.cwd().iterdir(): if sub_dir.is_dir(): # Skip over pre-existing newly_stamped_images folder if sub_dir.name == output_dir_name: pass else: sub_dirs.append(sub_dir) for filename in Path(sub_dir).iterdir(): if filename.is_dir(): continue image_type = imghdr.what(filename) if image_type == 'jpeg': image_files.append(filename) total_images = len(image_files) print(f''' Subdirectories: {len(sub_dirs)} Image files: {total_images} ''') proceed_question = input( 'Do you wish to stamp these images with Captions and dates? [Y]es/No ') # Moment to breath before execution, review the total number of files and directories if proceed_question.casefold() == 'no' or proceed_question.casefold() == 'n': exit() # Capture process start time process_start_time = time.time() # Create an output directory for the 'newly stamped images' dir_check_make(output_dir, output_dir_name) for sub_dir in sorted(sub_dirs): print( f'{"#" * ((len(str(sub_dir.name)))+38)}\n' f'Processing images from subdirectory "{sub_dir.name}"\n' f'{"#" * ((len(str(sub_dir.name)))+38)}\n') output_sub_dir = output_dir / sub_dir.name # print(outputSubDir) try: dir_check_make(output_sub_dir, sub_dir.name) sub_dir_image_files = [] except Exception: print(f'subdirectory creation failed') try: for filename in Path(sub_dir).iterdir(): image_type = imghdr.what(filename) # Optionally print identified file type for all files within subDir # print(imageType) if image_type == 'jpeg': sub_dir_image_files.append(filename) for filename in sorted(sub_dir_image_files): try: img = Image.open(filename) print(f'{filename.name}\n{"-" * len(filename.name)}\n' f'{process_iteration_counter} of {total_images}') process_iteration_counter += 1 except Exception as e: print(e) print(f'Open image {filename} failed') try: caption = get_caption(img) print(f'Caption: {caption}') except Exception: caption = sub_dir.name print( f'{"+" * (int((len(str(caption)))/2)-2)} NO IPTC DATA {"+" * (int((len(str(caption)))/2)-2)}\n' f'Using subdirectory name instead\n' f'Caption: {sub_dir.name}\n' f'{"+" * ((len(str(caption)))+10)}\n') try: date = get_date(img) print(f'Date: {date}') except Exception: print( f'------------------------------------------------------------\n' f'-------------------- NO DATE AVAILABLE ---------------------\n' f'------------------------------------------------------------\n') manual_date = input( f'Manual date entry required!\n' f'Enter date as you wish text to appear on stamped photo\n' f'"Month Year", "January 2021" or "November 1980" (for example)\n' f'Please enter date: ') date = str(manual_date) desired_margin, desired_caption_font_size, desired_date_font_size, desired_date_y_value = get_image_dimensions(img) try: image_output = add_margin(img, 0, 0, desired_margin, 0, (255, 255, 255)) add_caption(image_output, caption, desired_caption_font_size, desired_margin) add_date(image_output, date, desired_date_y_value, desired_date_font_size) except Exception as e: print( f'margin/caption/date failure\n' f'{e}') try: output_filename = f'{filename.stem}-stamped.jpg' image_output.save(output_sub_dir / output_filename, quality=95) print('\n') except Exception as e: print( f'Adding border and filename to image {filename} failed\n' f'{e}') except Exception as e: print(e) pass process_complete_time = time.time() process_duration = process_complete_time - process_start_time print(f''' ############################# PROCESS COMPLETE ############################# +=+=+=+=+ Summary +=+=+=+=+ Sub-directories: {len(sub_dirs)} Files processed: {total_images} Process duration: {int(process_duration)} seconds for an Average of: {round(process_duration / total_images, 2)} seconds per image ''') if __name__ == '__main__': captioneer()
import os import h5py import yaml import logging import numpy as np from PIL import Image from scipy.spatial.transform import Rotation as R from progress.bar import Bar from multiprocessing import Pool, cpu_count from omegaconf import OmegaConf from tools.utils import io # from tools.visualization import Viewer from utils import DataLoader, URDFReader, DatasetName log = logging.getLogger('proc_stage1') class ProcStage1Impl: def __init__(self, cfg): self.output_path = cfg.output_path self.tmp_dir = cfg.tmp_dir self.render_cfg = cfg.render_cfg self.rest_state_data_filename = cfg.rest_state_data_filename self.width = self.render_cfg.width self.height = self.render_cfg.height self.dataset_name = cfg.dataset_name def get_metadata(self, metadata_path, frame_index, num_parts): metadata = {} if DatasetName[self.dataset_name] == DatasetName.SAPIEN or \ DatasetName[self.dataset_name] == DatasetName.SHAPE2MOTION: with open(metadata_path, "r") as meta_file: metadata_all = yaml.load(meta_file, Loader=yaml.Loader) frame_metadata = metadata_all[f'frame_{frame_index}'] metadata = { 'projMat': np.reshape(frame_metadata['projMat'], (4, 4), order='F'), 'viewMat': np.reshape(frame_metadata['viewMat'], (4, 4), order='F'), 'linkAbsPoses': [] } num_links = len(frame_metadata['obj']) if num_links < num_parts: metadata['linkAbsPoses'].append(np.eye(4)) for link_idx in range(num_links): position = frame_metadata['obj'][link_idx][4] # x,y,z,w quaternion = frame_metadata['obj'][link_idx][5] orientation = R.from_quat(quaternion).as_matrix() pose = np.eye(4) pose[:3, :3] = orientation pose[:3, 3] = position metadata['linkAbsPoses'].append(pose) return metadata def __call__(self, idx, input_data): output_filepath = os.path.splitext(self.output_path)[0] + f'_{idx}' + os.path.splitext(self.output_path)[-1] h5file = h5py.File(output_filepath, 'w') bar = Bar(f'Stage1 Processing chunk {idx}', max=len(input_data)) for index, input_each in input_data.iterrows(): depth_frame_path = os.path.join(self.render_cfg.render_dir, input_each['objectCat'], input_each['objectId'], input_each['articulationId'], self.render_cfg.depth_folder, input_each['depthFrame']) mask_frame_path = os.path.join(self.render_cfg.render_dir, input_each['objectCat'], input_each['objectId'], input_each['articulationId'], self.render_cfg.mask_folder, input_each['maskFrame']) metadata_path = os.path.join(self.render_cfg.render_dir, input_each['objectCat'], input_each['objectId'], input_each['articulationId'], input_each['metadata']) tmp_data_dir = os.path.join(self.tmp_dir, input_each['objectCat'], input_each['objectId']) rest_state_data_path = os.path.join(tmp_data_dir, self.rest_state_data_filename) frame_index = int(input_each['depthFrame'].split(self.render_cfg.depth_ext)[0]) # float32 depth buffer, range from 0 to 1 depth_data = np.array(h5py.File(depth_frame_path, "r")["data"]).flatten() # uint8 mask, invalid value is 255 mask_frame = np.asarray(Image.open(mask_frame_path)) rest_data_data = io.read_json(rest_state_data_path) num_parts = len([link for link in rest_data_data['links'] if link if not link['virtual']]) assert depth_data.size == mask_frame.size metadata = self.get_metadata(metadata_path, frame_index, num_parts) x_range = np.linspace(-1, 1, self.width) y_range = np.linspace(1, -1, self.height) x, y = np.meshgrid(x_range, y_range) x = x.flatten() y = y.flatten() z = 2.0 * depth_data - 1.0 # shape nx4 points_tmp = np.column_stack((x, y, z, np.ones(self.height * self.width))) mask_tmp = mask_frame.flatten() # points in clip space points_clip = points_tmp[mask_tmp < 255] link_mask = mask_tmp[mask_tmp < 255] # check if unique value in mask match num parts assert points_clip.shape[0] == link_mask.shape[0] proj_mat = metadata['projMat'] view_mat = metadata['viewMat'] # transform points from clip space to camera space # shape 4xn points_camera = np.dot(np.linalg.inv(proj_mat), points_clip.transpose()) # homogeneous normalization points_camera = points_camera / points_camera[-1, :] # shape 4xn points_world = np.dot(np.linalg.inv(view_mat), points_camera) # transform links to rest state points_rest_state = np.empty_like(points_world) parts_camera2rest_state = [] for link_idx, link in enumerate(rest_data_data['links']): if link['virtual']: continue link_points_world = points_world[:, link_mask == link_idx] # virtual link link_index is -1 current_part_pose = metadata['linkAbsPoses'][link['part_index']] rest_state_pose = np.reshape(link['abs_pose'], (4, 4), order='F') transform2rest_state = np.dot(rest_state_pose, np.linalg.inv(current_part_pose)) link_points_rest_state = np.dot(transform2rest_state, link_points_world) points_rest_state[:, link_mask == link_idx] = link_points_rest_state # points in camera space to rest state camera2rest_state = np.dot(transform2rest_state, np.linalg.inv(view_mat)) # shape num parts x 16 parts_camera2rest_state.append(camera2rest_state.flatten('F')) parts_camera2rest_state = np.asarray(parts_camera2rest_state) # shape nx3 points_camera_p3 = points_camera.transpose()[:, :3] points_world_p3 = points_world.transpose()[:, :3] points_rest_state_p3 = points_rest_state.transpose()[:, :3] camera2base_matrix = np.linalg.inv(view_mat).flatten('F') instance_name = f'{input_each['objectCat']}_{input_each['objectId']}_{input_each['articulationId']}_{str(frame_index)}' h5frame = h5file.require_group(instance_name) h5frame.create_dataset("mask", shape=link_mask.shape, data=link_mask, compression="gzip") h5frame.create_dataset("points_camera", shape=points_camera_p3.shape, data=points_camera_p3, compression="gzip") h5frame.create_dataset("points_rest_state", shape=points_rest_state_p3.shape, data=points_rest_state_p3, compression="gzip") h5frame.create_dataset("parts_transformation", shape=parts_camera2rest_state.shape, data=parts_camera2rest_state, compression="gzip") h5frame.create_dataset("base_transformation", shape=camera2base_matrix.shape, data=camera2base_matrix, compression="gzip") bar.next() bar.finish() h5file.close() return output_filepath class ProcStage1: def __init__(self, cfg): self.cfg = cfg self.data_loader = DataLoader(cfg) self.data_loader.parse_input() self.input_cfg = self.cfg.paths.preprocess.stage1.input self.tmp_output = self.cfg.paths.preprocess.stage1.tmp_output self.output_cfg = self.cfg.paths.preprocess.stage1.output self.height = self.cfg.dataset.param.height self.width = self.cfg.dataset.param.width self.debug = self.cfg.debug def preprocess_motion_data(self, motion_data_df): bar = Bar('Stage1 Parse Motion Data', max=len(motion_data_df)) for index, motion_data in motion_data_df.iterrows(): motion_file_path = os.path.join(self.data_loader.motion_dir, motion_data['objectCat'], motion_data['objectId'], motion_data['motion']) assert io.file_exist(motion_file_path), f'Can not found Motion file {motion_file_path}!' if DatasetName[self.cfg.dataset.name] == DatasetName.SAPIEN or \ DatasetName[self.cfg.dataset.name] == DatasetName.SHAPE2MOTION: urdf_reader = URDFReader(motion_file_path) tmp_data_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name, motion_data['objectCat'], motion_data['objectId']) urdf_reader.export( result_data_path=tmp_data_dir, rest_state_data_filename=self.tmp_output.rest_state_data, rest_state_mesh_filename=self.tmp_output.rest_state_mesh ) bar.next() bar.finish() def process(self): input_data = self.data_loader.data_info io.ensure_dir_exists(self.cfg.paths.preprocess.tmp_dir) input_data.to_csv(os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.input_files)) motion_data_df = input_data.drop_duplicates(subset=['objectCat', 'objectId', 'motion']) self.preprocess_motion_data(motion_data_df) io.ensure_dir_exists(self.cfg.paths.preprocess.output_dir) num_processes = min(cpu_count(), self.cfg.num_workers) # calculate the chunk size chunk_size = max(1, int(input_data.shape[0] / num_processes)) chunks = [input_data.iloc[input_data.index[i:i + chunk_size]] for i in range(0, input_data.shape[0], chunk_size)] log.info(f'Stage1 Processing Start with {num_processes} workers and {len(chunks)} chunks') config = OmegaConf.create() config.output_path = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name, self.output_cfg.pcd_data) config.tmp_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name) render_cfg = OmegaConf.create() render_cfg.width = self.width render_cfg.height = self.height render_cfg.render_dir = self.data_loader.render_dir render_cfg.depth_ext = self.input_cfg.render.depth_ext render_cfg.mask_ext = self.input_cfg.render.mask_ext render_cfg.depth_folder = self.input_cfg.render.depth_folder render_cfg.mask_folder = self.input_cfg.render.mask_folder config.render_cfg = render_cfg config.rest_state_data_filename = self.tmp_output.rest_state_data config.dataset_name = self.cfg.dataset.name with Pool(processes=num_processes) as pool: proc_impl = ProcStage1Impl(config) output_filepath_list = pool.starmap(proc_impl, enumerate(chunks)) output_file_path = os.path.join(self.cfg.paths.preprocess.output_dir, self.output_cfg.pcd_data) h5file = h5py.File(output_file_path, 'w') for filepath in output_filepath_list: with h5py.File(filepath, 'r') as h5f: for key in h5f.keys(): h5f.copy(key, h5file) h5file.close() # if self.debug: # tmp_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name) # with h5py.File(output_file_path, 'r') as h5file: # bar = Bar('Stage1 Visualization', max=len(h5file.keys())) # for key in h5file.keys(): # h5group = h5file[key] # folder_names = key.split('_') # viz_output_dir = os.path.join(tmp_dir, folder_names[0], folder_names[1], folder_names[2]) # viz_output_filename = key # viz_output_path = os.path.join(viz_output_dir, viz_output_filename) # viewer = Viewer(h5group['points_camera'][:], mask=h5group['mask'][:]) # if self.cfg.show: # viewer.show(window_name=viz_output_filename + '_points_camera') # else: # viewer.render(fig_path=viz_output_path + '_points_camera.jpg') # if self.cfg.export: # viewer.export(mesh_path=viz_output_path + '_points_camera.ply') # viewer.reset() # viewer.add_geometry(h5group['points_rest_state'][:], mask=h5group['mask'][:]) # if self.cfg.show: # viewer.show(window_name=viz_output_filename + '_points_rest_state') # else: # viewer.render(fig_path=viz_output_path + '_points_rest_state.jpg') # if self.cfg.export: # viewer.export(mesh_path=viz_output_path + '_points_rest_state.ply') # del viewer # bar.next() # bar.finish()
import os import h5py import yaml import logging import numpy as np from PIL import Image from scipy.spatial.transform import Rotation as R from progress.bar import Bar from multiprocessing import Pool, cpu_count from omegaconf import OmegaConf from tools.utils import io # from tools.visualization import Viewer from utils import DataLoader, URDFReader, DatasetName log = logging.getLogger('proc_stage1') class ProcStage1Impl: def __init__(self, cfg): self.output_path = cfg.output_path self.tmp_dir = cfg.tmp_dir self.render_cfg = cfg.render_cfg self.rest_state_data_filename = cfg.rest_state_data_filename self.width = self.render_cfg.width self.height = self.render_cfg.height self.dataset_name = cfg.dataset_name def get_metadata(self, metadata_path, frame_index, num_parts): metadata = {} if DatasetName[self.dataset_name] == DatasetName.SAPIEN or \ DatasetName[self.dataset_name] == DatasetName.SHAPE2MOTION: with open(metadata_path, "r") as meta_file: metadata_all = yaml.load(meta_file, Loader=yaml.Loader) frame_metadata = metadata_all[f'frame_{frame_index}'] metadata = { 'projMat': np.reshape(frame_metadata['projMat'], (4, 4), order='F'), 'viewMat': np.reshape(frame_metadata['viewMat'], (4, 4), order='F'), 'linkAbsPoses': [] } num_links = len(frame_metadata['obj']) if num_links < num_parts: metadata['linkAbsPoses'].append(np.eye(4)) for link_idx in range(num_links): position = frame_metadata['obj'][link_idx][4] # x,y,z,w quaternion = frame_metadata['obj'][link_idx][5] orientation = R.from_quat(quaternion).as_matrix() pose = np.eye(4) pose[:3, :3] = orientation pose[:3, 3] = position metadata['linkAbsPoses'].append(pose) return metadata def __call__(self, idx, input_data): output_filepath = os.path.splitext(self.output_path)[0] + f'_{idx}' + os.path.splitext(self.output_path)[-1] h5file = h5py.File(output_filepath, 'w') bar = Bar(f'Stage1 Processing chunk {idx}', max=len(input_data)) for index, input_each in input_data.iterrows(): depth_frame_path = os.path.join(self.render_cfg.render_dir, input_each['objectCat'], input_each['objectId'], input_each['articulationId'], self.render_cfg.depth_folder, input_each['depthFrame']) mask_frame_path = os.path.join(self.render_cfg.render_dir, input_each['objectCat'], input_each['objectId'], input_each['articulationId'], self.render_cfg.mask_folder, input_each['maskFrame']) metadata_path = os.path.join(self.render_cfg.render_dir, input_each['objectCat'], input_each['objectId'], input_each['articulationId'], input_each['metadata']) tmp_data_dir = os.path.join(self.tmp_dir, input_each['objectCat'], input_each['objectId']) rest_state_data_path = os.path.join(tmp_data_dir, self.rest_state_data_filename) frame_index = int(input_each['depthFrame'].split(self.render_cfg.depth_ext)[0]) # float32 depth buffer, range from 0 to 1 depth_data = np.array(h5py.File(depth_frame_path, "r")["data"]).flatten() # uint8 mask, invalid value is 255 mask_frame = np.asarray(Image.open(mask_frame_path)) rest_data_data = io.read_json(rest_state_data_path) num_parts = len([link for link in rest_data_data['links'] if link if not link['virtual']]) assert depth_data.size == mask_frame.size metadata = self.get_metadata(metadata_path, frame_index, num_parts) x_range = np.linspace(-1, 1, self.width) y_range = np.linspace(1, -1, self.height) x, y = np.meshgrid(x_range, y_range) x = x.flatten() y = y.flatten() z = 2.0 * depth_data - 1.0 # shape nx4 points_tmp = np.column_stack((x, y, z, np.ones(self.height * self.width))) mask_tmp = mask_frame.flatten() # points in clip space points_clip = points_tmp[mask_tmp < 255] link_mask = mask_tmp[mask_tmp < 255] # check if unique value in mask match num parts assert points_clip.shape[0] == link_mask.shape[0] proj_mat = metadata['projMat'] view_mat = metadata['viewMat'] # transform points from clip space to camera space # shape 4xn points_camera = np.dot(np.linalg.inv(proj_mat), points_clip.transpose()) # homogeneous normalization points_camera = points_camera / points_camera[-1, :] # shape 4xn points_world = np.dot(np.linalg.inv(view_mat), points_camera) # transform links to rest state points_rest_state = np.empty_like(points_world) parts_camera2rest_state = [] for link_idx, link in enumerate(rest_data_data['links']): if link['virtual']: continue link_points_world = points_world[:, link_mask == link_idx] # virtual link link_index is -1 current_part_pose = metadata['linkAbsPoses'][link['part_index']] rest_state_pose = np.reshape(link['abs_pose'], (4, 4), order='F') transform2rest_state = np.dot(rest_state_pose, np.linalg.inv(current_part_pose)) link_points_rest_state = np.dot(transform2rest_state, link_points_world) points_rest_state[:, link_mask == link_idx] = link_points_rest_state # points in camera space to rest state camera2rest_state = np.dot(transform2rest_state, np.linalg.inv(view_mat)) # shape num parts x 16 parts_camera2rest_state.append(camera2rest_state.flatten('F')) parts_camera2rest_state = np.asarray(parts_camera2rest_state) # shape nx3 points_camera_p3 = points_camera.transpose()[:, :3] points_world_p3 = points_world.transpose()[:, :3] points_rest_state_p3 = points_rest_state.transpose()[:, :3] camera2base_matrix = np.linalg.inv(view_mat).flatten('F') instance_name = f'{input_each["objectCat"]}_{input_each["objectId"]}_{input_each["articulationId"]}_{str(frame_index)}' h5frame = h5file.require_group(instance_name) h5frame.create_dataset("mask", shape=link_mask.shape, data=link_mask, compression="gzip") h5frame.create_dataset("points_camera", shape=points_camera_p3.shape, data=points_camera_p3, compression="gzip") h5frame.create_dataset("points_rest_state", shape=points_rest_state_p3.shape, data=points_rest_state_p3, compression="gzip") h5frame.create_dataset("parts_transformation", shape=parts_camera2rest_state.shape, data=parts_camera2rest_state, compression="gzip") h5frame.create_dataset("base_transformation", shape=camera2base_matrix.shape, data=camera2base_matrix, compression="gzip") bar.next() bar.finish() h5file.close() return output_filepath class ProcStage1: def __init__(self, cfg): self.cfg = cfg self.data_loader = DataLoader(cfg) self.data_loader.parse_input() self.input_cfg = self.cfg.paths.preprocess.stage1.input self.tmp_output = self.cfg.paths.preprocess.stage1.tmp_output self.output_cfg = self.cfg.paths.preprocess.stage1.output self.height = self.cfg.dataset.param.height self.width = self.cfg.dataset.param.width self.debug = self.cfg.debug def preprocess_motion_data(self, motion_data_df): bar = Bar('Stage1 Parse Motion Data', max=len(motion_data_df)) for index, motion_data in motion_data_df.iterrows(): motion_file_path = os.path.join(self.data_loader.motion_dir, motion_data['objectCat'], motion_data['objectId'], motion_data['motion']) assert io.file_exist(motion_file_path), f'Can not found Motion file {motion_file_path}!' if DatasetName[self.cfg.dataset.name] == DatasetName.SAPIEN or \ DatasetName[self.cfg.dataset.name] == DatasetName.SHAPE2MOTION: urdf_reader = URDFReader(motion_file_path) tmp_data_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name, motion_data['objectCat'], motion_data['objectId']) urdf_reader.export( result_data_path=tmp_data_dir, rest_state_data_filename=self.tmp_output.rest_state_data, rest_state_mesh_filename=self.tmp_output.rest_state_mesh ) bar.next() bar.finish() def process(self): input_data = self.data_loader.data_info io.ensure_dir_exists(self.cfg.paths.preprocess.tmp_dir) input_data.to_csv(os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.input_files)) motion_data_df = input_data.drop_duplicates(subset=['objectCat', 'objectId', 'motion']) self.preprocess_motion_data(motion_data_df) io.ensure_dir_exists(self.cfg.paths.preprocess.output_dir) num_processes = min(cpu_count(), self.cfg.num_workers) # calculate the chunk size chunk_size = max(1, int(input_data.shape[0] / num_processes)) chunks = [input_data.iloc[input_data.index[i:i + chunk_size]] for i in range(0, input_data.shape[0], chunk_size)] log.info(f'Stage1 Processing Start with {num_processes} workers and {len(chunks)} chunks') config = OmegaConf.create() config.output_path = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name, self.output_cfg.pcd_data) config.tmp_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name) render_cfg = OmegaConf.create() render_cfg.width = self.width render_cfg.height = self.height render_cfg.render_dir = self.data_loader.render_dir render_cfg.depth_ext = self.input_cfg.render.depth_ext render_cfg.mask_ext = self.input_cfg.render.mask_ext render_cfg.depth_folder = self.input_cfg.render.depth_folder render_cfg.mask_folder = self.input_cfg.render.mask_folder config.render_cfg = render_cfg config.rest_state_data_filename = self.tmp_output.rest_state_data config.dataset_name = self.cfg.dataset.name with Pool(processes=num_processes) as pool: proc_impl = ProcStage1Impl(config) output_filepath_list = pool.starmap(proc_impl, enumerate(chunks)) output_file_path = os.path.join(self.cfg.paths.preprocess.output_dir, self.output_cfg.pcd_data) h5file = h5py.File(output_file_path, 'w') for filepath in output_filepath_list: with h5py.File(filepath, 'r') as h5f: for key in h5f.keys(): h5f.copy(key, h5file) h5file.close() # if self.debug: # tmp_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name) # with h5py.File(output_file_path, 'r') as h5file: # bar = Bar('Stage1 Visualization', max=len(h5file.keys())) # for key in h5file.keys(): # h5group = h5file[key] # folder_names = key.split('_') # viz_output_dir = os.path.join(tmp_dir, folder_names[0], folder_names[1], folder_names[2]) # viz_output_filename = key # viz_output_path = os.path.join(viz_output_dir, viz_output_filename) # viewer = Viewer(h5group['points_camera'][:], mask=h5group['mask'][:]) # if self.cfg.show: # viewer.show(window_name=viz_output_filename + '_points_camera') # else: # viewer.render(fig_path=viz_output_path + '_points_camera.jpg') # if self.cfg.export: # viewer.export(mesh_path=viz_output_path + '_points_camera.ply') # viewer.reset() # viewer.add_geometry(h5group['points_rest_state'][:], mask=h5group['mask'][:]) # if self.cfg.show: # viewer.show(window_name=viz_output_filename + '_points_rest_state') # else: # viewer.render(fig_path=viz_output_path + '_points_rest_state.jpg') # if self.cfg.export: # viewer.export(mesh_path=viz_output_path + '_points_rest_state.ply') # del viewer # bar.next() # bar.finish()
# -*- coding: utf-8 -*- from django.contrib.contenttypes.fields import GenericRelation from django.contrib.contenttypes.models import ContentType from django.db.models import ManyToManyRel from django.db.models.fields.related import RelatedField, lazy_related_operation from django.db.models.query_utils import PathInfo from django.utils.translation import gettext_lazy as _ from .managers import CategoryManager from .models import CategoryItem class RestrictByContentType: """ An extra restriction used for contenttype restriction in joins. """ contains_aggregate = False def __init__(self, alias, col, content_types): self.alias = alias self.col = col self.content_types = content_types def as_sql(self, compiler, connection): qn = compiler.quote_name_unless_alias if len(self.content_types) == 1: extra_where = f"{qn(self.alias)}.{qn(self.col)} = %s" else: extra_where = f"{qn(self.alias)}.{qn(self.col)} IN ({",".join(["%s"] * len(self.content_types))})" return extra_where, self.content_types def relabel_aliases(self, change_map): self.alias = change_map.get(self.alias, self.alias) def clone(self): return type(self)(self.alias, self.col, self.content_types[:]) # noinspection PyProtectedMember class CategoryField(RelatedField): # Field flags many_to_many = True many_to_one = False one_to_many = False one_to_one = False _related_name_counter = 0 def __init__( self, verbose_name=_("Categories"), help_text=_('Selected categories'), through=None, blank=False, related_name=None, to=None, manager=CategoryManager, ): self.through = through or CategoryItem rel = ManyToManyRel(self, to, related_name=related_name, through=self.through) super().__init__( verbose_name=verbose_name, help_text=help_text, blank=blank, null=True, serialize=False, rel=rel, ) self.swappable = False self.manager = manager def __get__(self, instance, model): if instance is not None and instance.pk is None: raise ValueError( f"{model.__name__} requires a primary key value before you can access their categories" ) return self.manager( through=self.through, model=model, instance=instance, prefetch_cache_name=self.name, ) def deconstruct(self): """ Deconstruct the object, used with migrations. """ name, path, args, kwargs = super().deconstruct() # Remove forced kwargs. for kwarg in ("serialize", "null"): del kwargs[kwarg] # Add arguments related to relations. rel = self.remote_field if isinstance(rel.through, str): kwargs["through"] = rel.through elif not rel.through._meta.auto_created: kwargs["through"] = f"{rel.through._meta.app_label}.{rel.through._meta.object_name}" related_model = rel.model if isinstance(related_model, str): kwargs["to"] = related_model else: kwargs["to"] = f"{related_model._meta.app_label}.{related_model._meta.object_name}" return name, path, args, kwargs def contribute_to_class(self, cls, name, private_only=False, **kwargs): self.set_attributes_from_name(name) self.model = cls self.opts = cls._meta cls._meta.add_field(self) setattr(cls, name, self) if not cls._meta.abstract: if isinstance(self.remote_field.model, str): def resolve_related_class(cls, model, field): field.remote_field.model = model lazy_related_operation( resolve_related_class, cls, self.remote_field.model, field=self ) if isinstance(self.through, str): def resolve_related_class(cls, model, field): self.through = model self.remote_field.through = model self.post_through_setup(cls) lazy_related_operation( resolve_related_class, cls, self.through, field=self ) else: self.post_through_setup(cls) def get_internal_type(self): return "ManyToManyField" def post_through_setup(self, cls): self.use_gfk = self.through is None or issubclass(self.through, CategoryItem) if not self.remote_field.model: self.remote_field.model = self.through._meta.get_field( "category" ).remote_field.model if self.use_gfk: tagged_items = GenericRelation(self.through) tagged_items.contribute_to_class(cls, "categories") for rel in cls._meta.local_many_to_many: if rel == self or not isinstance(rel, CategoryManager): continue if rel.through == self.through: raise ValueError("You can't have two CategoryManagers with the same through model.") def save_form_data(self, instance, value): getattr(instance, self.name).set(*value) def formfield(self, **kwargs): super().formfield(**kwargs) # def formfield(self, form_class=CategoryField, **kwargs): # defaults = { # "label": capfirst(self.verbose_name), # "help_text": self.help_text, # "required": not self.blank, # } # defaults.update(kwargs) # return form_class(**defaults) def value_from_object(self, obj): if obj.pk is None: return [] qs = self.through.objects.select_related("category").filter( **self.through.lookup_kwargs(obj) ) return [ti.tag for ti in qs] def m2m_reverse_name(self): return self.through._meta.get_field("category").column def m2m_reverse_field_name(self): return self.through._meta.get_field("category").name def m2m_target_field_name(self): return self.model._meta.pk.name def m2m_reverse_target_field_name(self): return self.remote_field.model._meta.pk.name def m2m_column_name(self): if self.use_gfk: return self.through._meta.private_fields[0].fk_field return self.through._meta.get_field("content_object").column def m2m_db_table(self): return self.through._meta.db_table def bulk_related_objects(self, new_objs, using): return [] def _get_mm_case_path_info(self, direct=False, filtered_relation=None): pathinfos = [] linkfield1 = self.through._meta.get_field("content_object") linkfield2 = self.through._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = linkfield1.get_reverse_path_info( filtered_relation=filtered_relation ) join2infos = linkfield2.get_path_info( filtered_relation=filtered_relation ) else: join1infos = linkfield2.get_reverse_path_info( filtered_relation=filtered_relation ) join2infos = linkfield1.get_path_info( filtered_relation=filtered_relation ) pathinfos.extend(join1infos) pathinfos.extend(join2infos) return pathinfos def _get_gfk_case_path_info(self, direct=False, filtered_relation=None): pathinfos = [] from_field = self.model._meta.pk opts = self.through._meta linkfield = self.through._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = [ PathInfo( self.model._meta, opts, [from_field], self.remote_field, True, False, filtered_relation, ) ] join2infos = linkfield.get_path_info( filtered_relation=filtered_relation ) else: join1infos = linkfield.get_reverse_path_info( filtered_relation=filtered_relation ) join2infos = [ PathInfo( opts, self.model._meta, [from_field], self, True, False, filtered_relation, ) ] pathinfos.extend(join1infos) pathinfos.extend(join2infos) return pathinfos def get_path_info(self, filtered_relation=None): if self.use_gfk: return self._get_gfk_case_path_info( direct=True, filtered_relation=filtered_relation ) else: return self._get_mm_case_path_info( direct=True, filtered_relation=filtered_relation ) def get_reverse_path_info(self, filtered_relation=None): if self.use_gfk: return self._get_gfk_case_path_info( direct=False, filtered_relation=filtered_relation ) else: return self._get_mm_case_path_info( direct=False, filtered_relation=filtered_relation ) def get_joining_columns(self, reverse_join=False): if reverse_join: return (self.model._meta.pk.column, "object_id"), else: return ("object_id", self.model._meta.pk.column), def get_extra_restriction(self, where_class, alias, related_alias): extra_col = self.through._meta.get_field("content_type").column content_type_ids = [ ContentType.objects.get_for_model(subclass).pk for subclass in _get_subclasses(self.model) ] return RestrictByContentType(related_alias, extra_col, content_type_ids) def get_reverse_joining_columns(self): return self.get_joining_columns(reverse_join=True) @property def related_fields(self): return [(self.through._meta.get_field("object_id"), self.model._meta.pk)] @property def foreign_related_fields(self): return [self.related_fields[0][1]] def _get_subclasses(model): subclasses = [model] for field in model._meta.get_fields(): if isinstance(field, OneToOneRel) and getattr( field.field.remote_field, "parent_link", None ): subclasses.extend(_get_subclasses(field.related_model)) return subclasses
# -*- coding: utf-8 -*- from django.contrib.contenttypes.fields import GenericRelation from django.contrib.contenttypes.models import ContentType from django.db.models import ManyToManyRel from django.db.models.fields.related import RelatedField, lazy_related_operation from django.db.models.query_utils import PathInfo from django.utils.translation import gettext_lazy as _ from .managers import CategoryManager from .models import CategoryItem class RestrictByContentType: """ An extra restriction used for contenttype restriction in joins. """ contains_aggregate = False def __init__(self, alias, col, content_types): self.alias = alias self.col = col self.content_types = content_types def as_sql(self, compiler, connection): qn = compiler.quote_name_unless_alias if len(self.content_types) == 1: extra_where = f"{qn(self.alias)}.{qn(self.col)} = %s" else: extra_where = f"{qn(self.alias)}.{qn(self.col)} IN ({','.join(['%s'] * len(self.content_types))})" return extra_where, self.content_types def relabel_aliases(self, change_map): self.alias = change_map.get(self.alias, self.alias) def clone(self): return type(self)(self.alias, self.col, self.content_types[:]) # noinspection PyProtectedMember class CategoryField(RelatedField): # Field flags many_to_many = True many_to_one = False one_to_many = False one_to_one = False _related_name_counter = 0 def __init__( self, verbose_name=_("Categories"), help_text=_('Selected categories'), through=None, blank=False, related_name=None, to=None, manager=CategoryManager, ): self.through = through or CategoryItem rel = ManyToManyRel(self, to, related_name=related_name, through=self.through) super().__init__( verbose_name=verbose_name, help_text=help_text, blank=blank, null=True, serialize=False, rel=rel, ) self.swappable = False self.manager = manager def __get__(self, instance, model): if instance is not None and instance.pk is None: raise ValueError( f"{model.__name__} requires a primary key value before you can access their categories" ) return self.manager( through=self.through, model=model, instance=instance, prefetch_cache_name=self.name, ) def deconstruct(self): """ Deconstruct the object, used with migrations. """ name, path, args, kwargs = super().deconstruct() # Remove forced kwargs. for kwarg in ("serialize", "null"): del kwargs[kwarg] # Add arguments related to relations. rel = self.remote_field if isinstance(rel.through, str): kwargs["through"] = rel.through elif not rel.through._meta.auto_created: kwargs["through"] = f"{rel.through._meta.app_label}.{rel.through._meta.object_name}" related_model = rel.model if isinstance(related_model, str): kwargs["to"] = related_model else: kwargs["to"] = f"{related_model._meta.app_label}.{related_model._meta.object_name}" return name, path, args, kwargs def contribute_to_class(self, cls, name, private_only=False, **kwargs): self.set_attributes_from_name(name) self.model = cls self.opts = cls._meta cls._meta.add_field(self) setattr(cls, name, self) if not cls._meta.abstract: if isinstance(self.remote_field.model, str): def resolve_related_class(cls, model, field): field.remote_field.model = model lazy_related_operation( resolve_related_class, cls, self.remote_field.model, field=self ) if isinstance(self.through, str): def resolve_related_class(cls, model, field): self.through = model self.remote_field.through = model self.post_through_setup(cls) lazy_related_operation( resolve_related_class, cls, self.through, field=self ) else: self.post_through_setup(cls) def get_internal_type(self): return "ManyToManyField" def post_through_setup(self, cls): self.use_gfk = self.through is None or issubclass(self.through, CategoryItem) if not self.remote_field.model: self.remote_field.model = self.through._meta.get_field( "category" ).remote_field.model if self.use_gfk: tagged_items = GenericRelation(self.through) tagged_items.contribute_to_class(cls, "categories") for rel in cls._meta.local_many_to_many: if rel == self or not isinstance(rel, CategoryManager): continue if rel.through == self.through: raise ValueError("You can't have two CategoryManagers with the same through model.") def save_form_data(self, instance, value): getattr(instance, self.name).set(*value) def formfield(self, **kwargs): super().formfield(**kwargs) # def formfield(self, form_class=CategoryField, **kwargs): # defaults = { # "label": capfirst(self.verbose_name), # "help_text": self.help_text, # "required": not self.blank, # } # defaults.update(kwargs) # return form_class(**defaults) def value_from_object(self, obj): if obj.pk is None: return [] qs = self.through.objects.select_related("category").filter( **self.through.lookup_kwargs(obj) ) return [ti.tag for ti in qs] def m2m_reverse_name(self): return self.through._meta.get_field("category").column def m2m_reverse_field_name(self): return self.through._meta.get_field("category").name def m2m_target_field_name(self): return self.model._meta.pk.name def m2m_reverse_target_field_name(self): return self.remote_field.model._meta.pk.name def m2m_column_name(self): if self.use_gfk: return self.through._meta.private_fields[0].fk_field return self.through._meta.get_field("content_object").column def m2m_db_table(self): return self.through._meta.db_table def bulk_related_objects(self, new_objs, using): return [] def _get_mm_case_path_info(self, direct=False, filtered_relation=None): pathinfos = [] linkfield1 = self.through._meta.get_field("content_object") linkfield2 = self.through._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = linkfield1.get_reverse_path_info( filtered_relation=filtered_relation ) join2infos = linkfield2.get_path_info( filtered_relation=filtered_relation ) else: join1infos = linkfield2.get_reverse_path_info( filtered_relation=filtered_relation ) join2infos = linkfield1.get_path_info( filtered_relation=filtered_relation ) pathinfos.extend(join1infos) pathinfos.extend(join2infos) return pathinfos def _get_gfk_case_path_info(self, direct=False, filtered_relation=None): pathinfos = [] from_field = self.model._meta.pk opts = self.through._meta linkfield = self.through._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = [ PathInfo( self.model._meta, opts, [from_field], self.remote_field, True, False, filtered_relation, ) ] join2infos = linkfield.get_path_info( filtered_relation=filtered_relation ) else: join1infos = linkfield.get_reverse_path_info( filtered_relation=filtered_relation ) join2infos = [ PathInfo( opts, self.model._meta, [from_field], self, True, False, filtered_relation, ) ] pathinfos.extend(join1infos) pathinfos.extend(join2infos) return pathinfos def get_path_info(self, filtered_relation=None): if self.use_gfk: return self._get_gfk_case_path_info( direct=True, filtered_relation=filtered_relation ) else: return self._get_mm_case_path_info( direct=True, filtered_relation=filtered_relation ) def get_reverse_path_info(self, filtered_relation=None): if self.use_gfk: return self._get_gfk_case_path_info( direct=False, filtered_relation=filtered_relation ) else: return self._get_mm_case_path_info( direct=False, filtered_relation=filtered_relation ) def get_joining_columns(self, reverse_join=False): if reverse_join: return (self.model._meta.pk.column, "object_id"), else: return ("object_id", self.model._meta.pk.column), def get_extra_restriction(self, where_class, alias, related_alias): extra_col = self.through._meta.get_field("content_type").column content_type_ids = [ ContentType.objects.get_for_model(subclass).pk for subclass in _get_subclasses(self.model) ] return RestrictByContentType(related_alias, extra_col, content_type_ids) def get_reverse_joining_columns(self): return self.get_joining_columns(reverse_join=True) @property def related_fields(self): return [(self.through._meta.get_field("object_id"), self.model._meta.pk)] @property def foreign_related_fields(self): return [self.related_fields[0][1]] def _get_subclasses(model): subclasses = [model] for field in model._meta.get_fields(): if isinstance(field, OneToOneRel) and getattr( field.field.remote_field, "parent_link", None ): subclasses.extend(_get_subclasses(field.related_model)) return subclasses
import datetime from wildfirepy.coordinates.util import SinusoidalCoordinate from wildfirepy.net.usgs.usgs_downloader import AbstractUSGSDownloader from wildfirepy.net.util.usgs import VIIRSHtmlParser __all__ = ['VIIRSBurntAreaDownloader'] class Viirs(AbstractUSGSDownloader): """ An Abstract Base Class Downloader for VIIRS products. """ def __init__(self, product=''): super().__init__() self.product = product self.base_url += "VIIRS/" f'{self.product}.001/' self.regex_traverser = VIIRSHtmlParser(product=product) def _get_nearest_time(self, hours, minutes): if not 0 <= minutes < 60: raise ValueError("Minutes must be between 0 and 60") if not 0 <= hours < 24: raise ValueError("Hours must be between 0 and 24") minutes = minutes - minutes % 6 minutes = str(minutes) if minutes > 9 else f"0{str(minutes)}" hours = str(hours) if hours > 9 else f"0{str(hours)}" return f'{hours}{minutes}' def _get_date(self, year, month, date): dt = f'{year}.{month}.{date}' fmt = f'%Y.%m.%d' dt = datetime.datetime.strptime(dt, fmt) julian_day = dt.timetuple().tm_yday month = str(dt.month) if dt.month > 9 else f'0{str(dt.month)}' date = str(dt.day) if dt.day > 9 else f'0{dt.day}' return f'{year}.{month}.{date}', julian_day def get_h5(self, *, year, month, date, hours, minutes, **kwargs): """ Downloads the `h5` file and stores it on the disk. Parameters ---------- year: `int` Year of the observation. month: `int` Month of the observation. date: `int` Date of observation. hours: `int` Hour of observation. UTC time. minutes: `int` Minute of observation. UTC time. kwargs: `dict` keyword arguments to be passed to `AbstractUSGSDownloader.fetch` Returns ------- path: `str` Absolute path to the downloaded `h5` file. """ date, julian_day = self._get_date(year=year, month=month, date=date) time = self._get_nearest_time(hours=hours, minutes=minutes) self.regex_traverser(self.base_url + date) filename = f"{self.product}.A{year}{"%03d" % julian_day}.{time}.001." filename = self.regex_traverser.get_filename(filename) url = self.base_url + date + '/' + filename return self.fetch(url=url, filename=filename, **kwargs) def get_xml(self, *, year, month, date, hours, minutes, **kwargs): """ Downloads the `xml` file and stores it on the disk. Parameters ---------- year: `int` Year of the observation. month: `int` Month of the observation. date: `int` Date of observation. hours: `int` Hour of observation. UTC time. minutes: `int` Minute of observation. UTC time. kwargs: `dict` keyword arguments to be passed to `AbstractUSGSDownloader.fetch` Returns ------- path: `str` Absolute path to the downloaded `xml` file. """ date, julian_day = self._get_date(year=year, month=month, date=date) time = self._get_nearest_time(hours=hours, minutes=minutes) filename = f"{self.product}.A{year}{"%03d" % julian_day}.{time}.001." filename = self.regex_traverser.get_filename(filename) + '.xml' url = self.base_url + date + '/' + filename return self.fetch(url=url, filename=filename, **kwargs) class Viirs_ext(Viirs): """ An Abstract Base Class Downloader for VIIRS products. """ def __init__(self, product=''): super().__init__() self.product = product self.base_url += "VIIRS/" f'{self.product}.001/' self.regex_traverser = VIIRSHtmlParser(product=product) self.converter = SinusoidalCoordinate() def get_h5(self, *, year, month, date, latitude, longitude, **kwargs): """ Downloads the `h5` file and stores it on the disk. Parameters ---------- year: `int` Year of the observation. month: `int` Month of the observation. date: `int` Date of observation. latitude: `float` latitude of the observation. longitude: `float` longitude of the observation. kwargs: `dict` keyword arguments to be passed to `AbstractUSGSDownloader.fetch` Returns ------- path: `str` Absolute path to the downloaded `h5` file. """ date, julian_day = self._get_date(year=year, month=month, date=date) h, v = self.converter(latitude, longitude) self.regex_traverser(self.base_url + date) filename = f"{self.product}.A{year}{"%03d" % julian_day}.{h}.001.{v}" filename = self.regex_traverser.get_filename(filename) url = self.base_url + date + '/' + filename return self.fetch(url=url, filename=filename, **kwargs) def get_xml(self, *, year, month, date, latitude, longitude, **kwargs): """ Downloads the `xml` file and stores it on the disk. Parameters ---------- year: `int` Year of the observation. month: `int` Month of the observation. date: `int` Date of observation. latitude: `float` latitude of the observation. longitude: `float` longitude of the observation. kwargs: `dict` keyword arguments to be passed to `AbstractUSGSDownloader.fetch` Returns ------- path: `str` Absolute path to the downloaded `xml` file. """ date, julian_day = self._get_date(year=year, month=month, date=date) h, v = self.converter(latitude, longitude) filename = f"{self.product}.A{year}{"%03d" % julian_day}.{h}.001.{v}" filename = self.regex_traverser.get_filename(filename) + '.xml' url = self.base_url + date + '/' + filename return self.fetch(url=url, filename=filename, **kwargs) class VIIRSFireAreaDownloader(Viirs_ext): """ VIIRS Class for ``, i.e., Fire Area Data. """ def __init__(self): super().__init__(product="VNP14A1") class VIIRSBurntAreaDownloader(Viirs): """ VIIRS Class for `VNP03MODLL`, i.e., Burnt Area. """ def __init__(self): super().__init__(product="VNP03MODLL")
import datetime from wildfirepy.coordinates.util import SinusoidalCoordinate from wildfirepy.net.usgs.usgs_downloader import AbstractUSGSDownloader from wildfirepy.net.util.usgs import VIIRSHtmlParser __all__ = ['VIIRSBurntAreaDownloader'] class Viirs(AbstractUSGSDownloader): """ An Abstract Base Class Downloader for VIIRS products. """ def __init__(self, product=''): super().__init__() self.product = product self.base_url += "VIIRS/" f'{self.product}.001/' self.regex_traverser = VIIRSHtmlParser(product=product) def _get_nearest_time(self, hours, minutes): if not 0 <= minutes < 60: raise ValueError("Minutes must be between 0 and 60") if not 0 <= hours < 24: raise ValueError("Hours must be between 0 and 24") minutes = minutes - minutes % 6 minutes = str(minutes) if minutes > 9 else f"0{str(minutes)}" hours = str(hours) if hours > 9 else f"0{str(hours)}" return f'{hours}{minutes}' def _get_date(self, year, month, date): dt = f'{year}.{month}.{date}' fmt = f'%Y.%m.%d' dt = datetime.datetime.strptime(dt, fmt) julian_day = dt.timetuple().tm_yday month = str(dt.month) if dt.month > 9 else f'0{str(dt.month)}' date = str(dt.day) if dt.day > 9 else f'0{dt.day}' return f'{year}.{month}.{date}', julian_day def get_h5(self, *, year, month, date, hours, minutes, **kwargs): """ Downloads the `h5` file and stores it on the disk. Parameters ---------- year: `int` Year of the observation. month: `int` Month of the observation. date: `int` Date of observation. hours: `int` Hour of observation. UTC time. minutes: `int` Minute of observation. UTC time. kwargs: `dict` keyword arguments to be passed to `AbstractUSGSDownloader.fetch` Returns ------- path: `str` Absolute path to the downloaded `h5` file. """ date, julian_day = self._get_date(year=year, month=month, date=date) time = self._get_nearest_time(hours=hours, minutes=minutes) self.regex_traverser(self.base_url + date) filename = f"{self.product}.A{year}{'%03d' % julian_day}.{time}.001." filename = self.regex_traverser.get_filename(filename) url = self.base_url + date + '/' + filename return self.fetch(url=url, filename=filename, **kwargs) def get_xml(self, *, year, month, date, hours, minutes, **kwargs): """ Downloads the `xml` file and stores it on the disk. Parameters ---------- year: `int` Year of the observation. month: `int` Month of the observation. date: `int` Date of observation. hours: `int` Hour of observation. UTC time. minutes: `int` Minute of observation. UTC time. kwargs: `dict` keyword arguments to be passed to `AbstractUSGSDownloader.fetch` Returns ------- path: `str` Absolute path to the downloaded `xml` file. """ date, julian_day = self._get_date(year=year, month=month, date=date) time = self._get_nearest_time(hours=hours, minutes=minutes) filename = f"{self.product}.A{year}{'%03d' % julian_day}.{time}.001." filename = self.regex_traverser.get_filename(filename) + '.xml' url = self.base_url + date + '/' + filename return self.fetch(url=url, filename=filename, **kwargs) class Viirs_ext(Viirs): """ An Abstract Base Class Downloader for VIIRS products. """ def __init__(self, product=''): super().__init__() self.product = product self.base_url += "VIIRS/" f'{self.product}.001/' self.regex_traverser = VIIRSHtmlParser(product=product) self.converter = SinusoidalCoordinate() def get_h5(self, *, year, month, date, latitude, longitude, **kwargs): """ Downloads the `h5` file and stores it on the disk. Parameters ---------- year: `int` Year of the observation. month: `int` Month of the observation. date: `int` Date of observation. latitude: `float` latitude of the observation. longitude: `float` longitude of the observation. kwargs: `dict` keyword arguments to be passed to `AbstractUSGSDownloader.fetch` Returns ------- path: `str` Absolute path to the downloaded `h5` file. """ date, julian_day = self._get_date(year=year, month=month, date=date) h, v = self.converter(latitude, longitude) self.regex_traverser(self.base_url + date) filename = f"{self.product}.A{year}{'%03d' % julian_day}.{h}.001.{v}" filename = self.regex_traverser.get_filename(filename) url = self.base_url + date + '/' + filename return self.fetch(url=url, filename=filename, **kwargs) def get_xml(self, *, year, month, date, latitude, longitude, **kwargs): """ Downloads the `xml` file and stores it on the disk. Parameters ---------- year: `int` Year of the observation. month: `int` Month of the observation. date: `int` Date of observation. latitude: `float` latitude of the observation. longitude: `float` longitude of the observation. kwargs: `dict` keyword arguments to be passed to `AbstractUSGSDownloader.fetch` Returns ------- path: `str` Absolute path to the downloaded `xml` file. """ date, julian_day = self._get_date(year=year, month=month, date=date) h, v = self.converter(latitude, longitude) filename = f"{self.product}.A{year}{'%03d' % julian_day}.{h}.001.{v}" filename = self.regex_traverser.get_filename(filename) + '.xml' url = self.base_url + date + '/' + filename return self.fetch(url=url, filename=filename, **kwargs) class VIIRSFireAreaDownloader(Viirs_ext): """ VIIRS Class for ``, i.e., Fire Area Data. """ def __init__(self): super().__init__(product="VNP14A1") class VIIRSBurntAreaDownloader(Viirs): """ VIIRS Class for `VNP03MODLL`, i.e., Burnt Area. """ def __init__(self): super().__init__(product="VNP03MODLL")
import datetime import json import requests from django.contrib.sessions.models import Session from rest_framework import status from rest_framework.exceptions import ValidationError from rest_framework.response import Response from connect.utils import attempt_json_loads from iotconnect.classes import AdHocAdapter from uninett_api.settings._secrets import HEADERS, OWNER_ID class HiveManagerAdapter(AdHocAdapter): group_id = 339207927204382 def validate_data(self, data, **kwargs): # TODO: Validate all data, possibly using serializers # Convert to JSON data = attempt_json_loads(data) # Validation if data.get('deliver_by_email', None) is None: raise ValidationError("deliver_by_email is required") if not data.get('device_type', None): raise ValidationError("deliver_by_email is required") # Formatting if isinstance(data['deliver_by_email'], str): data['deliver_by_email'] = data['deliver_by_email'].upper() == 'TRUE' return data def perform_generation(self, validated_data): url = "https://cloud-ie.aerohive.com/xapi/v1/identity/credentials" params = {'ownerId': OWNER_ID} session_key = attempt_json_loads(self.request.data['authentication_data'])['session_key'] session = Session.objects.get(pk=session_key).get_decoded() kwargs = { 'feide_username': session['user_data']['userid_sec'][0], 'group_id': self.group_id, 'full_name': session['user_data']['name'], 'organization_name': 'Uninett', 'policy': 'PERSONAL', 'device_type': validated_data['device_type'], 'deliver_by_email': validated_data['deliver_by_email'], 'email': session['user_data']['email'] } data = self._get_generation_data(**kwargs) response = requests.post(url=url, params=params, data=json.dumps(data), headers=HEADERS) data, status_code = self._get_response_data(response) return Response(data=data, status=status_code) def _get_response_data(self, response): if response.status_code == status.HTTP_403_FORBIDDEN: data = {'error': 'Could not generate PSK because the user has already created one this second'} return data, response.status_code try: psk = attempt_json_loads(response.content)['data']['password'] data = {'psk': psk, 'username': self.hive_user_name} status_code = status.HTTP_201_CREATED except TypeError: data = {'error': 'Could not generate PSK due to an unknown error with the multi-PSK service'} status_code = status.HTTP_500_INTERNAL_SERVER_ERROR return data, status_code def _get_generation_data(self, feide_username: str, group_id: int, full_name: str, organization_name: str, policy: str, device_type: str, deliver_by_email: bool, email: str): feide_username = feide_username.strip('feide:').split('@')[0] hive_user_name = f"{feide_username}: {datetime.datetime.now().replace(microsecond=0).strftime("%y%m%d%H%M%S")}" self.hive_user_name = hive_user_name return { "deliverMethod": "NO_DELIVERY" if not deliver_by_email else "EMAIL", "firstName": f"{full_name}", "groupId": group_id, "lastName": feide_username, "email": email, "organization": organization_name, "policy": policy, "userName": hive_user_name, "purpose": device_type }
import datetime import json import requests from django.contrib.sessions.models import Session from rest_framework import status from rest_framework.exceptions import ValidationError from rest_framework.response import Response from connect.utils import attempt_json_loads from iotconnect.classes import AdHocAdapter from uninett_api.settings._secrets import HEADERS, OWNER_ID class HiveManagerAdapter(AdHocAdapter): group_id = 339207927204382 def validate_data(self, data, **kwargs): # TODO: Validate all data, possibly using serializers # Convert to JSON data = attempt_json_loads(data) # Validation if data.get('deliver_by_email', None) is None: raise ValidationError("deliver_by_email is required") if not data.get('device_type', None): raise ValidationError("deliver_by_email is required") # Formatting if isinstance(data['deliver_by_email'], str): data['deliver_by_email'] = data['deliver_by_email'].upper() == 'TRUE' return data def perform_generation(self, validated_data): url = "https://cloud-ie.aerohive.com/xapi/v1/identity/credentials" params = {'ownerId': OWNER_ID} session_key = attempt_json_loads(self.request.data['authentication_data'])['session_key'] session = Session.objects.get(pk=session_key).get_decoded() kwargs = { 'feide_username': session['user_data']['userid_sec'][0], 'group_id': self.group_id, 'full_name': session['user_data']['name'], 'organization_name': 'Uninett', 'policy': 'PERSONAL', 'device_type': validated_data['device_type'], 'deliver_by_email': validated_data['deliver_by_email'], 'email': session['user_data']['email'] } data = self._get_generation_data(**kwargs) response = requests.post(url=url, params=params, data=json.dumps(data), headers=HEADERS) data, status_code = self._get_response_data(response) return Response(data=data, status=status_code) def _get_response_data(self, response): if response.status_code == status.HTTP_403_FORBIDDEN: data = {'error': 'Could not generate PSK because the user has already created one this second'} return data, response.status_code try: psk = attempt_json_loads(response.content)['data']['password'] data = {'psk': psk, 'username': self.hive_user_name} status_code = status.HTTP_201_CREATED except TypeError: data = {'error': 'Could not generate PSK due to an unknown error with the multi-PSK service'} status_code = status.HTTP_500_INTERNAL_SERVER_ERROR return data, status_code def _get_generation_data(self, feide_username: str, group_id: int, full_name: str, organization_name: str, policy: str, device_type: str, deliver_by_email: bool, email: str): feide_username = feide_username.strip('feide:').split('@')[0] hive_user_name = f"{feide_username}: {datetime.datetime.now().replace(microsecond=0).strftime('%y%m%d%H%M%S')}" self.hive_user_name = hive_user_name return { "deliverMethod": "NO_DELIVERY" if not deliver_by_email else "EMAIL", "firstName": f"{full_name}", "groupId": group_id, "lastName": feide_username, "email": email, "organization": organization_name, "policy": policy, "userName": hive_user_name, "purpose": device_type }
#!/usr/bin/python # Copyright 2021 Expedient # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -*- coding: utf-8 -*- ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = r''' --- module: ece_cluster short_description: Create modify or delete Elasticsearch clusters in ECE version_added: '2.9' author: Mike Garuccio (@mgaruccio) requirements: - python3 description: - "This module creates new Elasticsearch clusters in ECE" - "supports elasticsearch, Kibana, APM, and ML clusters" options: state: description: - The desired state for the module choices: ['present', 'absent'] default: present type: str cluster_name: description: - Name for the cluster to create or modify required: True type: str elastic_settings: description: - Settings for Elastic clusters to create required: True type: list suboptions: memory_mb: description: - Amount of memory to assign cluster, in mb required: True type: int instance_config: description: - Name of the instance configuration to use for the elastic nodes required: True type: str zone_count: description: - Number of zones to deploy the elasticsearch cluster into required: True type: int choices: [1, 2, 3] roles: description: - Roles the nodes should fufill required: True type: list choices: ['master', 'data', 'ingest'] version: description: - Version of the Elastic Stack to deploy required: True type: str deployment_template: description: - Name of the deployment template to use when deploying the cluster required: True type: str elastic_user_settings: description: - Settings object to pass as overrides for the elasticsearch.yml file for the cluster - Supports all settings definable in elasticsearch.yml required: False type: dict snapshot_settings: description: - Defines which snapshot repository to use and the retention settings for snapshots suboptions: repository_name: description: - Name of the snapshot repository to use for cluster backups type: str required: True snapshots_to_retain: description: - number of snapshots to retain type: int default: 100 snapshot_interval: description: - How long to wait between snapshots - Defined as '60m' for 60 minutes, or '5h' for 5 hours, etc type: string default: 30m enabled: description: - Whether or not to enable the snapshot repo type: bool default: true kibana_settings: description: - Settings to apply to the Kibana instances deployed for the elastic cluster required: True type: dict suboptions: memory_mb: description: - Amount of memory to assign to each Kibana instance, in mb required: True type: int instance_config: description: - instance configuration to use when creating the Kibana instances default: kibana type: str zone_count: description: - number of zones to deploy Kibana into default: 1 type: int apm_settings: description: - Settings to apply to the Kibana instances deployed for the elastic cluster required: True type: dict suboptions: memory_mb: description: - Amount of memory to assign to each APM instance, in mb required: True type: int instance_config: description: - instance configuration to use when creating the APM instances default: apm type: str zone_count: description: - number of zones to deploy Kibana into default: 1 type: int ml_settings: description: - Settings to apply to the Kibana instances deployed for the elastic cluster required: False type: dict suboptions: memory_mb: description: - Amount of memory to assign to each ML instance, in mb required: True type: int instance_config: description: - instance configuration to use when creating the ML instances default: ml type: str zone_count: description: - number of zones to deploy Kibana into default: 1 type: int wait_for_completion: description: - Whether to wait for the completion of the cluster operations before exiting the module - Impacts how much information is returned at the end of the module run default: True type: bool completion_timeout: description: - How long to wait, in seconds, for operations to complete before timing out - only applies if wait_for_completion is True default: 600 type: int extends_documentation_fragment: - expedient.elastic.ece_auth_options ''' ## need to support both loading as part of a collection and running in test/debug mode try: from ansible_collections.expedient.elastic.plugins.module_utils.ece import ECE except: import sys import os util_path = new_path = f'{os.getcwd()}/plugins/module_utils' sys.path.append(util_path) from ece import ECE from yaml import load, dump try: from yaml import CLoader as Loader, CDumper as Dumper except ImportError: from yaml import Loader, Dumper from ansible.module_utils.basic import AnsibleModule class ECE_Cluster(ECE): def __init__(self, module): super().__init__(module) self.cluster_name = self.module.params.get('cluster_name') self.elastic_settings = self.module.params.get('elastic_settings') self.kibana_settings = self.module.params.get('kibana_settings') self.apm_settings = self.module.params.get('apm_settings') self.ml_settings = self.module.params.get('ml_settings') self.version = self.module.params.get('version') self.deployment_template = self.module.params.get('deployment_template') self.wait_for_completion = self.module.params.get('wait_for_completion') self.completion_timeout = self.module.params.get('completion_timeout') self.elastic_user_settings = self.module.params.get('elastic_user_settings') self.snapshot_settings = self.module.params.get('snapshot_settings') self.traffic_rulesets = self.module.params.get('traffic_rulesets') def get_matching_clusters(self): clusters = self.get_clusters_by_name(self.cluster_name) return clusters def create_cluster(self): data = { 'cluster_name': self.cluster_name, 'plan': { 'cluster_topology': [{ 'size': { 'value': settings['memory_mb'], 'resource': 'memory' }, 'node_type': { 'master': 'master' in settings['roles'], 'data': 'data' in settings['roles'], 'ingest': 'ingest' in settings['roles'] }, 'instance_configuration_id': self.get_instance_config(settings['instance_config'])['id'], 'elasticsearch': { 'enabled_built_in_plugins': [], 'node_attributes': {}, 'user_settings_yaml': dump(self.elastic_user_settings, Dumper=Dumper), }, 'zone_count': settings['zone_count'] } for settings in self.elastic_settings], 'elasticsearch': { 'version': self.version }, 'transient': {}, 'deployment_template': { 'id': self.get_deployment_template(self.deployment_template)['id'] }, }, 'settings': {}, 'kibana': { 'plan': { 'cluster_topology': [{ 'instance_configuration_id': self.get_instance_config(self.kibana_settings['instance_config'])['id'], 'size': { 'value': self.kibana_settings['memory_mb'], 'resource': 'memory' }, 'zone_count': self.kibana_settings['zone_count'] }], 'kibana': { ## using a default value here, if we want to extend it later it can be 'user_settings_yaml': "# Note that the syntax for user settings can change between major versions.\n# You might need to update these user settings before performing a major version upgrade.\n#\n# Use OpenStreetMap for tiles:\n# tilemap:\n# options.maxZoom: 18\n# url: http://a.tile.openstreetmap.org/{z}/{x}/{y}.png\n#\n# To learn more, see the documentation.", 'version': self.version } }, } } if self.apm_settings: data['apm'] = { 'plan': { 'cluster_topology': [{ 'instance_configuration_id': self.get_instance_config(self.apm_settings['instance_config'])['id'], 'size': { 'value': self.apm_settings['memory_mb'], 'resource': 'memory' }, 'zone_count': self.apm_settings['zone_count'] }], 'apm': {'version': self.version} } } ## This is technically just another ES deployment rather than it's own config, but decided to follow the UI rather than API conventions if self.ml_settings: data['plan']['cluster_topology'].append({ 'instance_configuration_id': self.get_instance_config(self.ml_settings['instance_config'])['id'], 'size': { 'value': self.ml_settings['memory_mb'], 'resource': 'memory' }, 'node_type': { 'master': False, 'data': False, 'ingest': False, 'ml': True }, 'zone_count': self.ml_settings['zone_count'] }) if self.snapshot_settings: data['settings']['snapshot'] = { 'repository': { 'reference': { 'repository_name': self.snapshot_settings['repository_name'] } }, 'enabled': self.snapshot_settings['enabled'], 'retention': { 'snapshots': self.snapshot_settings['snapshots_to_retain'], }, 'interval': self.snapshot_settings['snapshot_interval'] } if self.traffic_rulesets: data['settings']['ip_filtering'] = { 'rulesets': [self.get_traffic_ruleset_by_name(x)['id'] for x in self.traffic_rulesets] } endpoint = 'clusters/elasticsearch' cluster_creation_result = self.send_api_request(endpoint, 'POST', data=data) if self.wait_for_completion: elastic_result = self.wait_for_cluster_state('elasticsearch', cluster_creation_result['elasticsearch_cluster_id'], 'started', self.completion_timeout) kibana_result = self.wait_for_cluster_state('kibana', cluster_creation_result['kibana_cluster_id'], 'started', self.completion_timeout) if not elastic_result and kibana_result: return False return cluster_creation_result def delete_cluster(self, cluster_id): self.terminate_cluster(cluster_id) endpoint = f'clusters/elasticsearch/{cluster_id}' delete_result = self.send_api_request(endpoint, 'DELETE') return delete_result def terminate_cluster(self, cluster_id): endpoint = f'clusters/elasticsearch/{cluster_id}/_shutdown' stop_result = self.send_api_request(endpoint, 'POST') wait_result = self.wait_for_cluster_state('elasticsearch', cluster_id, 'stopped', self.completion_timeout) if not wait_result: self.module.fail_json(msg=f'failed to stop cluster {self.cluster_name}') return stop_result def main(): elastic_settings_spec=dict( memory_mb=dict(type='int', required=True), instance_config=dict(type='str', default='data.logging'), zone_count=dict(type='int', default=2), roles=dict(type='list', elements='str', options=['master', 'data', 'ingest']), ) snapshot_settings_spec=dict( repository_name=dict(type='str', required=True), snapshots_to_retain=dict(type='int', default=100), snapshot_interval=dict(type='str', default='60m'), enabled=dict(type='bool', default=True), ) kibana_settings_spec=dict( memory_mb=dict(type='int', required=True), instance_config=dict(type='str', default='kibana'), zone_count=dict(type='int', default=1), ) apm_settings_spec=dict( memory_mb=dict(type='int', required=True), instance_config=dict(type='str', default='apm'), zone_count=dict(type='int', default=1), ) ml_settings_spec=dict( memory_mb=dict(type='int', required=True), instance_config=dict(type='str', default='ml'), zone_count=dict(type='int', default=1), ) module_args = dict( host=dict(type='str', required=True), port=dict(type='int', default=12443), username=dict(type='str', required=True), password=dict(type='str', required=True, no_log=True), verify_ssl_cert=dict(type='bool', default=True), state=dict(type='str', default='present'), cluster_name=dict(type='str', required=True), elastic_settings=dict(type='list', required=False, elements='dict', options=elastic_settings_spec), elastic_user_settings=dict(type='dict', default={}), # does not have sub-options defined as there are far too many elastic options to capture here snapshot_settings=dict(type='dict', required=False, options=snapshot_settings_spec), traffic_rulesets=dict(type='list', required=False), kibana_settings=dict(type='dict', required=False, options=kibana_settings_spec), apm_settings=dict(type='dict', required=False, options=apm_settings_spec), ml_settings=dict(type='dict', required=False, options=ml_settings_spec), version=dict(type='str', default='7.13.0'), deployment_template=dict(type='str', required=True), wait_for_completion=dict(type='bool', default=False), completion_timeout=dict(type='int', default=600), ) results = {'changed': False} module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) state = module.params.get('state') ece_cluster = ECE_Cluster(module) matching_clusters = ece_cluster.get_matching_clusters() if len(matching_clusters) > 1: results['msg'] = f'found multiple clusters matching name {module.params.get('cluster_name')}' module.fail_json(**results) if state == 'present': if len(matching_clusters) > 0: results['msg'] = 'cluster exists' ## This code handles edge cases poorly, in the interest of being able to match the data format of the cluster creation result results['cluster_data'] = { 'elasticsearch_cluster_id': matching_clusters[0]['cluster_id'], 'kibana_cluster_id': matching_clusters[0]['associated_kibana_clusters'][0]['kibana_id'] } if len(matching_clusters[0]['associated_apm_clusters']) > 0: results['cluster_data']['apm_id'] = matching_clusters[0]['associated_apm_clusters'][0]['apm_id'] module.exit_json(**results) results['changed'] = True results['msg'] = f'cluster {module.params.get('cluster_name')} will be created' if not module.check_mode: cluster_data = ece_cluster.create_cluster() if not cluster_data: results['msg'] = 'cluster creation failed' module.fail_json(**results) results['cluster_data'] = cluster_data results['msg'] = f'cluster {module.params.get('cluster_name')} created' module.exit_json(**results) if state == 'absent': if len(matching_clusters) == 0: results['msg'] = f'cluster {module.params.get('cluster_name')} does not exist' module.exit_json(**results) results['msg'] = f'cluster {module.params.get('cluster_name')} will be deleted' if not module.check_mode: results['changed'] = True ece_cluster.delete_cluster(matching_clusters[0]['cluster_id']) results['msg'] = f'cluster {module.params.get('cluster_name')} deleted' module.exit_json(**results) if __name__ == '__main__': main()
#!/usr/bin/python # Copyright 2021 Expedient # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -*- coding: utf-8 -*- ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = r''' --- module: ece_cluster short_description: Create modify or delete Elasticsearch clusters in ECE version_added: '2.9' author: Mike Garuccio (@mgaruccio) requirements: - python3 description: - "This module creates new Elasticsearch clusters in ECE" - "supports elasticsearch, Kibana, APM, and ML clusters" options: state: description: - The desired state for the module choices: ['present', 'absent'] default: present type: str cluster_name: description: - Name for the cluster to create or modify required: True type: str elastic_settings: description: - Settings for Elastic clusters to create required: True type: list suboptions: memory_mb: description: - Amount of memory to assign cluster, in mb required: True type: int instance_config: description: - Name of the instance configuration to use for the elastic nodes required: True type: str zone_count: description: - Number of zones to deploy the elasticsearch cluster into required: True type: int choices: [1, 2, 3] roles: description: - Roles the nodes should fufill required: True type: list choices: ['master', 'data', 'ingest'] version: description: - Version of the Elastic Stack to deploy required: True type: str deployment_template: description: - Name of the deployment template to use when deploying the cluster required: True type: str elastic_user_settings: description: - Settings object to pass as overrides for the elasticsearch.yml file for the cluster - Supports all settings definable in elasticsearch.yml required: False type: dict snapshot_settings: description: - Defines which snapshot repository to use and the retention settings for snapshots suboptions: repository_name: description: - Name of the snapshot repository to use for cluster backups type: str required: True snapshots_to_retain: description: - number of snapshots to retain type: int default: 100 snapshot_interval: description: - How long to wait between snapshots - Defined as '60m' for 60 minutes, or '5h' for 5 hours, etc type: string default: 30m enabled: description: - Whether or not to enable the snapshot repo type: bool default: true kibana_settings: description: - Settings to apply to the Kibana instances deployed for the elastic cluster required: True type: dict suboptions: memory_mb: description: - Amount of memory to assign to each Kibana instance, in mb required: True type: int instance_config: description: - instance configuration to use when creating the Kibana instances default: kibana type: str zone_count: description: - number of zones to deploy Kibana into default: 1 type: int apm_settings: description: - Settings to apply to the Kibana instances deployed for the elastic cluster required: True type: dict suboptions: memory_mb: description: - Amount of memory to assign to each APM instance, in mb required: True type: int instance_config: description: - instance configuration to use when creating the APM instances default: apm type: str zone_count: description: - number of zones to deploy Kibana into default: 1 type: int ml_settings: description: - Settings to apply to the Kibana instances deployed for the elastic cluster required: False type: dict suboptions: memory_mb: description: - Amount of memory to assign to each ML instance, in mb required: True type: int instance_config: description: - instance configuration to use when creating the ML instances default: ml type: str zone_count: description: - number of zones to deploy Kibana into default: 1 type: int wait_for_completion: description: - Whether to wait for the completion of the cluster operations before exiting the module - Impacts how much information is returned at the end of the module run default: True type: bool completion_timeout: description: - How long to wait, in seconds, for operations to complete before timing out - only applies if wait_for_completion is True default: 600 type: int extends_documentation_fragment: - expedient.elastic.ece_auth_options ''' ## need to support both loading as part of a collection and running in test/debug mode try: from ansible_collections.expedient.elastic.plugins.module_utils.ece import ECE except: import sys import os util_path = new_path = f'{os.getcwd()}/plugins/module_utils' sys.path.append(util_path) from ece import ECE from yaml import load, dump try: from yaml import CLoader as Loader, CDumper as Dumper except ImportError: from yaml import Loader, Dumper from ansible.module_utils.basic import AnsibleModule class ECE_Cluster(ECE): def __init__(self, module): super().__init__(module) self.cluster_name = self.module.params.get('cluster_name') self.elastic_settings = self.module.params.get('elastic_settings') self.kibana_settings = self.module.params.get('kibana_settings') self.apm_settings = self.module.params.get('apm_settings') self.ml_settings = self.module.params.get('ml_settings') self.version = self.module.params.get('version') self.deployment_template = self.module.params.get('deployment_template') self.wait_for_completion = self.module.params.get('wait_for_completion') self.completion_timeout = self.module.params.get('completion_timeout') self.elastic_user_settings = self.module.params.get('elastic_user_settings') self.snapshot_settings = self.module.params.get('snapshot_settings') self.traffic_rulesets = self.module.params.get('traffic_rulesets') def get_matching_clusters(self): clusters = self.get_clusters_by_name(self.cluster_name) return clusters def create_cluster(self): data = { 'cluster_name': self.cluster_name, 'plan': { 'cluster_topology': [{ 'size': { 'value': settings['memory_mb'], 'resource': 'memory' }, 'node_type': { 'master': 'master' in settings['roles'], 'data': 'data' in settings['roles'], 'ingest': 'ingest' in settings['roles'] }, 'instance_configuration_id': self.get_instance_config(settings['instance_config'])['id'], 'elasticsearch': { 'enabled_built_in_plugins': [], 'node_attributes': {}, 'user_settings_yaml': dump(self.elastic_user_settings, Dumper=Dumper), }, 'zone_count': settings['zone_count'] } for settings in self.elastic_settings], 'elasticsearch': { 'version': self.version }, 'transient': {}, 'deployment_template': { 'id': self.get_deployment_template(self.deployment_template)['id'] }, }, 'settings': {}, 'kibana': { 'plan': { 'cluster_topology': [{ 'instance_configuration_id': self.get_instance_config(self.kibana_settings['instance_config'])['id'], 'size': { 'value': self.kibana_settings['memory_mb'], 'resource': 'memory' }, 'zone_count': self.kibana_settings['zone_count'] }], 'kibana': { ## using a default value here, if we want to extend it later it can be 'user_settings_yaml': "# Note that the syntax for user settings can change between major versions.\n# You might need to update these user settings before performing a major version upgrade.\n#\n# Use OpenStreetMap for tiles:\n# tilemap:\n# options.maxZoom: 18\n# url: http://a.tile.openstreetmap.org/{z}/{x}/{y}.png\n#\n# To learn more, see the documentation.", 'version': self.version } }, } } if self.apm_settings: data['apm'] = { 'plan': { 'cluster_topology': [{ 'instance_configuration_id': self.get_instance_config(self.apm_settings['instance_config'])['id'], 'size': { 'value': self.apm_settings['memory_mb'], 'resource': 'memory' }, 'zone_count': self.apm_settings['zone_count'] }], 'apm': {'version': self.version} } } ## This is technically just another ES deployment rather than it's own config, but decided to follow the UI rather than API conventions if self.ml_settings: data['plan']['cluster_topology'].append({ 'instance_configuration_id': self.get_instance_config(self.ml_settings['instance_config'])['id'], 'size': { 'value': self.ml_settings['memory_mb'], 'resource': 'memory' }, 'node_type': { 'master': False, 'data': False, 'ingest': False, 'ml': True }, 'zone_count': self.ml_settings['zone_count'] }) if self.snapshot_settings: data['settings']['snapshot'] = { 'repository': { 'reference': { 'repository_name': self.snapshot_settings['repository_name'] } }, 'enabled': self.snapshot_settings['enabled'], 'retention': { 'snapshots': self.snapshot_settings['snapshots_to_retain'], }, 'interval': self.snapshot_settings['snapshot_interval'] } if self.traffic_rulesets: data['settings']['ip_filtering'] = { 'rulesets': [self.get_traffic_ruleset_by_name(x)['id'] for x in self.traffic_rulesets] } endpoint = 'clusters/elasticsearch' cluster_creation_result = self.send_api_request(endpoint, 'POST', data=data) if self.wait_for_completion: elastic_result = self.wait_for_cluster_state('elasticsearch', cluster_creation_result['elasticsearch_cluster_id'], 'started', self.completion_timeout) kibana_result = self.wait_for_cluster_state('kibana', cluster_creation_result['kibana_cluster_id'], 'started', self.completion_timeout) if not elastic_result and kibana_result: return False return cluster_creation_result def delete_cluster(self, cluster_id): self.terminate_cluster(cluster_id) endpoint = f'clusters/elasticsearch/{cluster_id}' delete_result = self.send_api_request(endpoint, 'DELETE') return delete_result def terminate_cluster(self, cluster_id): endpoint = f'clusters/elasticsearch/{cluster_id}/_shutdown' stop_result = self.send_api_request(endpoint, 'POST') wait_result = self.wait_for_cluster_state('elasticsearch', cluster_id, 'stopped', self.completion_timeout) if not wait_result: self.module.fail_json(msg=f'failed to stop cluster {self.cluster_name}') return stop_result def main(): elastic_settings_spec=dict( memory_mb=dict(type='int', required=True), instance_config=dict(type='str', default='data.logging'), zone_count=dict(type='int', default=2), roles=dict(type='list', elements='str', options=['master', 'data', 'ingest']), ) snapshot_settings_spec=dict( repository_name=dict(type='str', required=True), snapshots_to_retain=dict(type='int', default=100), snapshot_interval=dict(type='str', default='60m'), enabled=dict(type='bool', default=True), ) kibana_settings_spec=dict( memory_mb=dict(type='int', required=True), instance_config=dict(type='str', default='kibana'), zone_count=dict(type='int', default=1), ) apm_settings_spec=dict( memory_mb=dict(type='int', required=True), instance_config=dict(type='str', default='apm'), zone_count=dict(type='int', default=1), ) ml_settings_spec=dict( memory_mb=dict(type='int', required=True), instance_config=dict(type='str', default='ml'), zone_count=dict(type='int', default=1), ) module_args = dict( host=dict(type='str', required=True), port=dict(type='int', default=12443), username=dict(type='str', required=True), password=dict(type='str', required=True, no_log=True), verify_ssl_cert=dict(type='bool', default=True), state=dict(type='str', default='present'), cluster_name=dict(type='str', required=True), elastic_settings=dict(type='list', required=False, elements='dict', options=elastic_settings_spec), elastic_user_settings=dict(type='dict', default={}), # does not have sub-options defined as there are far too many elastic options to capture here snapshot_settings=dict(type='dict', required=False, options=snapshot_settings_spec), traffic_rulesets=dict(type='list', required=False), kibana_settings=dict(type='dict', required=False, options=kibana_settings_spec), apm_settings=dict(type='dict', required=False, options=apm_settings_spec), ml_settings=dict(type='dict', required=False, options=ml_settings_spec), version=dict(type='str', default='7.13.0'), deployment_template=dict(type='str', required=True), wait_for_completion=dict(type='bool', default=False), completion_timeout=dict(type='int', default=600), ) results = {'changed': False} module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) state = module.params.get('state') ece_cluster = ECE_Cluster(module) matching_clusters = ece_cluster.get_matching_clusters() if len(matching_clusters) > 1: results['msg'] = f'found multiple clusters matching name {module.params.get("cluster_name")}' module.fail_json(**results) if state == 'present': if len(matching_clusters) > 0: results['msg'] = 'cluster exists' ## This code handles edge cases poorly, in the interest of being able to match the data format of the cluster creation result results['cluster_data'] = { 'elasticsearch_cluster_id': matching_clusters[0]['cluster_id'], 'kibana_cluster_id': matching_clusters[0]['associated_kibana_clusters'][0]['kibana_id'] } if len(matching_clusters[0]['associated_apm_clusters']) > 0: results['cluster_data']['apm_id'] = matching_clusters[0]['associated_apm_clusters'][0]['apm_id'] module.exit_json(**results) results['changed'] = True results['msg'] = f'cluster {module.params.get("cluster_name")} will be created' if not module.check_mode: cluster_data = ece_cluster.create_cluster() if not cluster_data: results['msg'] = 'cluster creation failed' module.fail_json(**results) results['cluster_data'] = cluster_data results['msg'] = f'cluster {module.params.get("cluster_name")} created' module.exit_json(**results) if state == 'absent': if len(matching_clusters) == 0: results['msg'] = f'cluster {module.params.get("cluster_name")} does not exist' module.exit_json(**results) results['msg'] = f'cluster {module.params.get("cluster_name")} will be deleted' if not module.check_mode: results['changed'] = True ece_cluster.delete_cluster(matching_clusters[0]['cluster_id']) results['msg'] = f'cluster {module.params.get("cluster_name")} deleted' module.exit_json(**results) if __name__ == '__main__': main()
from pygears import gear from pygears.sim import sim_log from pygears.typing import Tuple, Uint TAddr = Uint['w_addr'] TData = Uint['xlen'] TReadRequest = TAddr TWriteRequest = Tuple[{'addr': TAddr, 'data': TData}] @gear async def register_file_write(request: TWriteRequest, *, storage): async with request as req: if req['addr'] != 0: sim_log().info(f'Writing {req['data']} to x{int(req['addr'])}') storage[int(req['addr'])] = req['data'] @gear async def register_file_read(request: TReadRequest, *, storage, t_dout) -> b't_dout': async with request as req: if req == 0: yield t_dout(0) else: yield storage[int(req)] @gear def register_file(read_request: TReadRequest, write_request: TWriteRequest, *, storage, xlen=b'xlen') -> TData: write_request | register_file_write(storage=storage) return read_request | register_file_read( storage=storage, t_dout=TData[xlen])
from pygears import gear from pygears.sim import sim_log from pygears.typing import Tuple, Uint TAddr = Uint['w_addr'] TData = Uint['xlen'] TReadRequest = TAddr TWriteRequest = Tuple[{'addr': TAddr, 'data': TData}] @gear async def register_file_write(request: TWriteRequest, *, storage): async with request as req: if req['addr'] != 0: sim_log().info(f'Writing {req["data"]} to x{int(req["addr"])}') storage[int(req['addr'])] = req['data'] @gear async def register_file_read(request: TReadRequest, *, storage, t_dout) -> b't_dout': async with request as req: if req == 0: yield t_dout(0) else: yield storage[int(req)] @gear def register_file(read_request: TReadRequest, write_request: TWriteRequest, *, storage, xlen=b'xlen') -> TData: write_request | register_file_write(storage=storage) return read_request | register_file_read( storage=storage, t_dout=TData[xlen])
# This is an automatically generated file. # DO NOT EDIT or your changes may be overwritten import base64 from xdrlib import Packer, Unpacker from .end_sponsoring_future_reserves_result_code import ( EndSponsoringFutureReservesResultCode, ) from ..exceptions import ValueError __all__ = ["EndSponsoringFutureReservesResult"] class EndSponsoringFutureReservesResult: """ XDR Source Code ---------------------------------------------------------------- union EndSponsoringFutureReservesResult switch (EndSponsoringFutureReservesResultCode code) { case END_SPONSORING_FUTURE_RESERVES_SUCCESS: void; default: void; }; ---------------------------------------------------------------- """ def __init__(self, code: EndSponsoringFutureReservesResultCode,) -> None: self.code = code def pack(self, packer: Packer) -> None: self.code.pack(packer) if ( self.code == EndSponsoringFutureReservesResultCode.END_SPONSORING_FUTURE_RESERVES_SUCCESS ): return raise ValueError("Invalid code.") @classmethod def unpack(cls, unpacker: Unpacker) -> "EndSponsoringFutureReservesResult": code = EndSponsoringFutureReservesResultCode.unpack(unpacker) if ( code == EndSponsoringFutureReservesResultCode.END_SPONSORING_FUTURE_RESERVES_SUCCESS ): return cls(code) raise ValueError("Invalid code.") def to_xdr_bytes(self) -> bytes: packer = Packer() self.pack(packer) return packer.get_buffer() @classmethod def from_xdr_bytes(cls, xdr: bytes) -> "EndSponsoringFutureReservesResult": unpacker = Unpacker(xdr) return cls.unpack(unpacker) def to_xdr(self) -> str: xdr_bytes = self.to_xdr_bytes() return base64.b64encode(xdr_bytes).decode() @classmethod def from_xdr(cls, xdr: str) -> "EndSponsoringFutureReservesResult": xdr_bytes = base64.b64decode(xdr.encode()) return cls.from_xdr_bytes(xdr_bytes) def __eq__(self, other: object): if not isinstance(other, self.__class__): return NotImplemented return self.code == other.code def __str__(self): out = [] out.append(f"code={self.code}") return f"<EndSponsoringFutureReservesResult {[", ".join(out)]}>"
# This is an automatically generated file. # DO NOT EDIT or your changes may be overwritten import base64 from xdrlib import Packer, Unpacker from .end_sponsoring_future_reserves_result_code import ( EndSponsoringFutureReservesResultCode, ) from ..exceptions import ValueError __all__ = ["EndSponsoringFutureReservesResult"] class EndSponsoringFutureReservesResult: """ XDR Source Code ---------------------------------------------------------------- union EndSponsoringFutureReservesResult switch (EndSponsoringFutureReservesResultCode code) { case END_SPONSORING_FUTURE_RESERVES_SUCCESS: void; default: void; }; ---------------------------------------------------------------- """ def __init__(self, code: EndSponsoringFutureReservesResultCode,) -> None: self.code = code def pack(self, packer: Packer) -> None: self.code.pack(packer) if ( self.code == EndSponsoringFutureReservesResultCode.END_SPONSORING_FUTURE_RESERVES_SUCCESS ): return raise ValueError("Invalid code.") @classmethod def unpack(cls, unpacker: Unpacker) -> "EndSponsoringFutureReservesResult": code = EndSponsoringFutureReservesResultCode.unpack(unpacker) if ( code == EndSponsoringFutureReservesResultCode.END_SPONSORING_FUTURE_RESERVES_SUCCESS ): return cls(code) raise ValueError("Invalid code.") def to_xdr_bytes(self) -> bytes: packer = Packer() self.pack(packer) return packer.get_buffer() @classmethod def from_xdr_bytes(cls, xdr: bytes) -> "EndSponsoringFutureReservesResult": unpacker = Unpacker(xdr) return cls.unpack(unpacker) def to_xdr(self) -> str: xdr_bytes = self.to_xdr_bytes() return base64.b64encode(xdr_bytes).decode() @classmethod def from_xdr(cls, xdr: str) -> "EndSponsoringFutureReservesResult": xdr_bytes = base64.b64decode(xdr.encode()) return cls.from_xdr_bytes(xdr_bytes) def __eq__(self, other: object): if not isinstance(other, self.__class__): return NotImplemented return self.code == other.code def __str__(self): out = [] out.append(f"code={self.code}") return f"<EndSponsoringFutureReservesResult {[', '.join(out)]}>"
from pyzenfolio3.exceptions import APIError VALID_ENUM = { 'AccessMask': [ 'None', 'HideDateCreated', 'HideDateModified', 'HideDateTaken', 'HideMetaData', 'HideUserStats', 'HideVisits', 'NoCollections', 'NoPrivateSearch', 'NoPublicSearch', 'NoRecentList', 'ProtectExif', 'ProtectXXLarge', 'ProtectExtraLarge', 'ProtectLarge', 'ProtectMedium', 'ProtectOriginals', 'ProtectGuestbook', 'NoPublicGuestbookPosts', 'NoPrivateGuestbookPosts', 'NoAnonymousGuestbookPosts', 'ProtectComments', 'NoPublicComments', 'NoPrivateComments', 'NoAnonymousComments', 'PasswordProtectOriginals', 'UnprotectCover', 'ProtectAll', ], 'AccessType': [ 'Private', 'Public', 'UserList', 'Password', ], 'InformationLevel': [ 'Level1', 'Level2', 'Full', ], 'GroupShiftOrder': [ 'CreatedAsc', 'CreatedDesc', 'ModifiedAsc', 'ModifiedDesc', 'TitleAsc', 'TitleDesc', 'GroupsTop', 'GroupsBottom', ], 'PhotoSetType': [ 'Gallery', 'Collection', ], 'PhotoRotation': [ 'None', 'Rotate90', 'Rotate180', 'Rotate270', 'Flip', 'Rotate90Flip', 'Rotate180Flip', 'Rotate270Flip', ], 'ShiftOrder': [ 'CreatedAsc', 'CreatedDesc', 'TakenAsc', 'TakenDesc', 'TitleAsc', 'TitleDesc', 'SizeAsc', 'SizeDesc', 'FileNameAsc', 'FileNameDesc', ], 'SortOrder': [ 'Date', 'Popularity', 'Rank', ], 'VideoPlaybackMode': [ 'Flash', 'iOS', 'Http', ], } VALID_OBJECTS = { 'AccessUpdater': { 'AccessMask': 'AccessMask', 'AccessType': 'AccessType', 'Viewers': None, 'Password': None, 'IsDerived': None, 'PasswordHint': None, }, 'GroupUpdater': { 'Title': None, 'Caption': None, 'CustomReference': None, }, 'PhotoSetUpdater': { 'Title': None, 'Caption': None, 'Keywords': None, 'Categories': None, 'CustomReference': None, }, 'PhotoUpdater': { 'Title': None, 'Caption': None, 'Keywords': None, 'Categories': None, 'Copyright': None, 'Filename': None, }, 'MessageUpdater': { 'PosterName': None, 'PosterUrl': None, 'PosterEmail': None, 'Body': None, 'IsPrivate': None, }, } def assert_type(value, expected_type, param, method): if value['$type'] != expected_type: raise APIError(f"Got `{value["$type"]}` instead of `{expected_type}` value for `{param}` for `{method}` method.") def validate_value(value, data_struct, method): if value not in VALID_ENUM[data_struct]: raise APIError(f"`{value}` is an invalid value for `{data_struct}` enumeration for `{method}` method.") def validate_object(value, data_struct, method): if not isinstance(value, dict): raise APIError(f"`{data_struct}` must be a dict for `{method}` method.'") for k, v in value.items(): if k not in VALID_OBJECTS[data_struct]: raise APIError(f"`{value}` is an invalid key for `{data_struct}` object for `{method}` method.") enum = VALID_OBJECTS[data_struct][k] if enum is not None: validate_value(v, enum, method)
from pyzenfolio3.exceptions import APIError VALID_ENUM = { 'AccessMask': [ 'None', 'HideDateCreated', 'HideDateModified', 'HideDateTaken', 'HideMetaData', 'HideUserStats', 'HideVisits', 'NoCollections', 'NoPrivateSearch', 'NoPublicSearch', 'NoRecentList', 'ProtectExif', 'ProtectXXLarge', 'ProtectExtraLarge', 'ProtectLarge', 'ProtectMedium', 'ProtectOriginals', 'ProtectGuestbook', 'NoPublicGuestbookPosts', 'NoPrivateGuestbookPosts', 'NoAnonymousGuestbookPosts', 'ProtectComments', 'NoPublicComments', 'NoPrivateComments', 'NoAnonymousComments', 'PasswordProtectOriginals', 'UnprotectCover', 'ProtectAll', ], 'AccessType': [ 'Private', 'Public', 'UserList', 'Password', ], 'InformationLevel': [ 'Level1', 'Level2', 'Full', ], 'GroupShiftOrder': [ 'CreatedAsc', 'CreatedDesc', 'ModifiedAsc', 'ModifiedDesc', 'TitleAsc', 'TitleDesc', 'GroupsTop', 'GroupsBottom', ], 'PhotoSetType': [ 'Gallery', 'Collection', ], 'PhotoRotation': [ 'None', 'Rotate90', 'Rotate180', 'Rotate270', 'Flip', 'Rotate90Flip', 'Rotate180Flip', 'Rotate270Flip', ], 'ShiftOrder': [ 'CreatedAsc', 'CreatedDesc', 'TakenAsc', 'TakenDesc', 'TitleAsc', 'TitleDesc', 'SizeAsc', 'SizeDesc', 'FileNameAsc', 'FileNameDesc', ], 'SortOrder': [ 'Date', 'Popularity', 'Rank', ], 'VideoPlaybackMode': [ 'Flash', 'iOS', 'Http', ], } VALID_OBJECTS = { 'AccessUpdater': { 'AccessMask': 'AccessMask', 'AccessType': 'AccessType', 'Viewers': None, 'Password': None, 'IsDerived': None, 'PasswordHint': None, }, 'GroupUpdater': { 'Title': None, 'Caption': None, 'CustomReference': None, }, 'PhotoSetUpdater': { 'Title': None, 'Caption': None, 'Keywords': None, 'Categories': None, 'CustomReference': None, }, 'PhotoUpdater': { 'Title': None, 'Caption': None, 'Keywords': None, 'Categories': None, 'Copyright': None, 'Filename': None, }, 'MessageUpdater': { 'PosterName': None, 'PosterUrl': None, 'PosterEmail': None, 'Body': None, 'IsPrivate': None, }, } def assert_type(value, expected_type, param, method): if value['$type'] != expected_type: raise APIError(f"Got `{value['$type']}` instead of `{expected_type}` value for `{param}` for `{method}` method.") def validate_value(value, data_struct, method): if value not in VALID_ENUM[data_struct]: raise APIError(f"`{value}` is an invalid value for `{data_struct}` enumeration for `{method}` method.") def validate_object(value, data_struct, method): if not isinstance(value, dict): raise APIError(f"`{data_struct}` must be a dict for `{method}` method.'") for k, v in value.items(): if k not in VALID_OBJECTS[data_struct]: raise APIError(f"`{value}` is an invalid key for `{data_struct}` object for `{method}` method.") enum = VALID_OBJECTS[data_struct][k] if enum is not None: validate_value(v, enum, method)
# Copyright (C) 2019 The Raphielscape Company LLC. # # Licensed under the Raphielscape Public License, Version 1.c (the "License"); # you may not use this file except in compliance with the License. # """ Userbot module containing commands related to the \ Information Superhighway (yes, Internet). """ from datetime import datetime from speedtest import Speedtest from telethon import functions from userbot import CMD_HELP from userbot.events import register from userbot.utils import humanbytes @register(outgoing=True, pattern=r"^\.speedtest$") async def speedtst(spd): """For .speed command, use SpeedTest to check server speeds.""" await spd.edit("`Running speed test...`") test = Speedtest() test.get_best_server() test.download() test.upload() test.results.share() result = test.results.dict() msg = ( f"**Started at {result["timestamp"]}**\n\n" "**Client**\n" f"**ISP :** `{result["client"]["isp"]}`\n" f"**Country :** `{result["client"]["country"]}`\n\n" "**Server**\n" f"**Name :** `{result["server"]["name"]}`\n" f"**Country :** `{result["server"]["country"]}`\n" f"**Sponsor :** `{result["server"]["sponsor"]}`\n\n" f"**Ping :** `{result["ping"]}`\n" f"**Upload :** `{humanbytes(result["upload"])}/s`\n" f"**Download :** `{humanbytes(result["download"])}/s`" ) await spd.delete() await spd.client.send_file( spd.chat_id, result["share"], caption=msg, force_document=False, ) @register(outgoing=True, pattern=r"^\.dc$") async def neardc(event): """For .dc command, get the nearest datacenter information.""" result = await event.client(functions.help.GetNearestDcRequest()) await event.edit( f"Country : `{result.country}`\n" f"Nearest Datacenter : `{result.nearest_dc}`\n" f"This Datacenter : `{result.this_dc}`" ) @register(outgoing=True, pattern=r"^\.ping$") async def pingme(pong): """For .ping command, ping the userbot from any chat.""" start = datetime.now() await pong.edit("`bentar!`") end = datetime.now() duration = (end - start).microseconds / 1000 await pong.edit("`waktuku bersamanya!\n%sms`" % (duration)) CMD_HELP.update( { "speedtest": ">`.speedtest`" "\nUsage: Does a speedtest and shows the results.", "dc": ">`.dc`" "\nUsage: Finds the nearest datacenter from your server.", "ping": ">`.ping`" "\nUsage: Shows how long it takes to ping your bot.", } )
# Copyright (C) 2019 The Raphielscape Company LLC. # # Licensed under the Raphielscape Public License, Version 1.c (the "License"); # you may not use this file except in compliance with the License. # """ Userbot module containing commands related to the \ Information Superhighway (yes, Internet). """ from datetime import datetime from speedtest import Speedtest from telethon import functions from userbot import CMD_HELP from userbot.events import register from userbot.utils import humanbytes @register(outgoing=True, pattern=r"^\.speedtest$") async def speedtst(spd): """For .speed command, use SpeedTest to check server speeds.""" await spd.edit("`Running speed test...`") test = Speedtest() test.get_best_server() test.download() test.upload() test.results.share() result = test.results.dict() msg = ( f"**Started at {result['timestamp']}**\n\n" "**Client**\n" f"**ISP :** `{result['client']['isp']}`\n" f"**Country :** `{result['client']['country']}`\n\n" "**Server**\n" f"**Name :** `{result['server']['name']}`\n" f"**Country :** `{result['server']['country']}`\n" f"**Sponsor :** `{result['server']['sponsor']}`\n\n" f"**Ping :** `{result['ping']}`\n" f"**Upload :** `{humanbytes(result['upload'])}/s`\n" f"**Download :** `{humanbytes(result['download'])}/s`" ) await spd.delete() await spd.client.send_file( spd.chat_id, result["share"], caption=msg, force_document=False, ) @register(outgoing=True, pattern=r"^\.dc$") async def neardc(event): """For .dc command, get the nearest datacenter information.""" result = await event.client(functions.help.GetNearestDcRequest()) await event.edit( f"Country : `{result.country}`\n" f"Nearest Datacenter : `{result.nearest_dc}`\n" f"This Datacenter : `{result.this_dc}`" ) @register(outgoing=True, pattern=r"^\.ping$") async def pingme(pong): """For .ping command, ping the userbot from any chat.""" start = datetime.now() await pong.edit("`bentar!`") end = datetime.now() duration = (end - start).microseconds / 1000 await pong.edit("`waktuku bersamanya!\n%sms`" % (duration)) CMD_HELP.update( { "speedtest": ">`.speedtest`" "\nUsage: Does a speedtest and shows the results.", "dc": ">`.dc`" "\nUsage: Finds the nearest datacenter from your server.", "ping": ">`.ping`" "\nUsage: Shows how long it takes to ping your bot.", } )
from __future__ import annotations import logging import os import subprocess import tempfile import textwrap import threading from logging import Logger from pathlib import Path from typing import TYPE_CHECKING, Any, Iterable, Mapping import tomli from pep517.wrappers import Pep517HookCaller from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet from pdm.exceptions import BuildError from pdm.models.in_process import get_sys_config_paths from pdm.termui import logger from pdm.utils import create_tracked_tempdir, prepare_pip_source_args if TYPE_CHECKING: from pdm.models.environment import Environment class LoggerWrapper(threading.Thread): """ Read messages from a pipe and redirect them to a logger (see python's logging module). """ def __init__(self, logger: Logger, level: int) -> None: super().__init__() self.daemon = True self.logger = logger self.level = level # create the pipe and reader self.fd_read, self.fd_write = os.pipe() self.reader = os.fdopen(self.fd_read) self.start() def fileno(self) -> int: return self.fd_write @staticmethod def remove_newline(msg: str) -> str: return msg[:-1] if msg.endswith("\n") else msg def run(self) -> None: try: for line in self.reader: self._write(self.remove_newline(line)) finally: self.reader.close() def _write(self, message: str) -> None: self.logger.log(self.level, message) def stop(self) -> None: os.close(self.fd_write) self.join() def log_subprocessor( cmd: list[str], cwd: str | Path | None = None, extra_environ: dict[str, str] | None = None, ) -> None: env = os.environ.copy() if extra_environ: env.update(extra_environ) outstream = LoggerWrapper(logger, logging.DEBUG) try: subprocess.check_call( cmd, cwd=cwd, env=env, stdout=outstream.fileno(), stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError: raise BuildError(f"Call command {cmd} return non-zero status.") finally: outstream.stop() class _Prefix: def __init__(self, executable: str, shared: str, overlay: str) -> None: self.bin_dirs: list[str] = [] self.lib_dirs: list[str] = [] for path in (overlay, shared): paths = get_sys_config_paths( executable, vars={"base": path, "platbase": path} ) self.bin_dirs.append(paths["scripts"]) self.lib_dirs.extend([paths["platlib"], paths["purelib"]]) self.site_dir = os.path.join(path, "site") if not os.path.isdir(self.site_dir): os.makedirs(self.site_dir) with open(os.path.join(self.site_dir, "sitecustomize.py"), "w") as fp: fp.write( textwrap.dedent( f""" import sys, os, site original_sys_path = sys.path[:] known_paths = set() site.addusersitepackages(known_paths) site.addsitepackages(known_paths) known_paths = {{os.path.normcase(p) for p in known_paths}} original_sys_path = [ p for p in original_sys_path if os.path.normcase(p) not in known_paths] sys.path[:] = original_sys_path for lib_path in {self.lib_dirs!r}: site.addsitedir(lib_path) """ ) ) self.shared = shared self.overlay = overlay class EnvBuilder: """A simple PEP 517 builder for an isolated environment""" _shared_envs: dict[int, str] = {} _overlay_envs: dict[str, str] = {} DEFAULT_BACKEND = { "build-backend": "setuptools.build_meta:__legacy__", "requires": ["setuptools >= 40.8.0", "wheel"], } @classmethod def get_shared_env(cls, key: int) -> str: if key in cls._shared_envs: logger.debug("Reusing shared build env: %s", cls._shared_envs[key]) return cls._shared_envs[key] # Postpone the cache after installation is done return create_tracked_tempdir("-shared", "pdm-build-env-") @classmethod def get_overlay_env(cls, key: str) -> str: if key not in cls._overlay_envs: cls._overlay_envs[key] = create_tracked_tempdir( "-overlay", "pdm-build-env-" ) return cls._overlay_envs[key] def __init__(self, src_dir: str | Path, environment: Environment) -> None: self._env = environment self.executable = self._env.interpreter.executable self.src_dir = src_dir logger.debug("Preparing isolated env for PEP 517 build...") try: with open(os.path.join(src_dir, "pyproject.toml"), "rb") as f: spec = tomli.load(f) except FileNotFoundError: spec = {} except Exception as e: raise BuildError(e) from e self._build_system = spec.get("build-system", self.DEFAULT_BACKEND) if "build-backend" not in self._build_system: self._build_system["build-backend"] = self.DEFAULT_BACKEND["build-backend"] if "requires" not in self._build_system: raise BuildError("Missing 'build-system.requires' in pyproject.toml") self._backend = self._build_system["build-backend"] self._prefix = _Prefix( self.executable, shared=self.get_shared_env(hash(frozenset(self._build_system["requires"]))), overlay=self.get_overlay_env(os.path.normcase(self.src_dir).rstrip("\\/")), ) self._hook = Pep517HookCaller( src_dir, self._backend, backend_path=self._build_system.get("backend-path"), runner=self.subprocess_runner, python_executable=self.executable, ) @property def _env_vars(self) -> dict[str, str]: paths = self._prefix.bin_dirs if "PATH" in os.environ: paths.append(os.getenv("PATH", "")) return { "PYTHONPATH": self._prefix.site_dir, "PATH": os.pathsep.join(paths), "PYTHONNOUSERSITE": "1", } def subprocess_runner( self, cmd: list[str], cwd: str | Path | None = None, extra_environ: dict[str, str] | None = None, isolated: bool = True, ) -> None: env = self._env_vars.copy() if isolated else {} if extra_environ: env.update(extra_environ) return log_subprocessor(cmd, cwd, extra_environ=env) def check_requirements(self, reqs: Iterable[str]) -> Iterable[str]: missing = set() conflicting = set() if reqs: ws = WorkingSet(self._prefix.lib_dirs) for req in reqs: try: if ws.find(Requirement.parse(req)) is None: missing.add(req) except VersionConflict: conflicting.add(req) if conflicting: raise BuildError(f"Conflicting requirements: {", ".join(conflicting)}") return missing def install(self, requirements: Iterable[str], shared: bool = False) -> None: missing = self.check_requirements(requirements) if not missing: return path = self._prefix.shared if shared else self._prefix.overlay with tempfile.NamedTemporaryFile( "w+", prefix="pdm-build-reqs-", suffix=".txt", delete=False ) as req_file: req_file.write(os.linesep.join(missing)) req_file.close() cmd = self._env.pip_command + [ "install", "--ignore-installed", "--prefix", path, ] cmd.extend(prepare_pip_source_args(self._env.project.sources)) cmd.extend(["-r", req_file.name]) self.subprocess_runner(cmd, isolated=False) os.unlink(req_file.name) if shared: # The shared env is prepared and is safe to be cached now. This is to make # sure no broken env is returned when run in parallel mode. key = hash(frozenset(requirements)) if key not in self._shared_envs: self._shared_envs[key] = path def prepare_metadata( self, out_dir: str, config_settings: Mapping[str, Any] | None = None ) -> str: """Prepare metadata and store in the out_dir. Some backends doesn't provide that API, in that case the metadata will be retreived from the built result. """ raise NotImplementedError("Should be implemented in subclass") def build( self, out_dir: str, config_settings: Mapping[str, Any] | None = None, metadata_directory: str | None = None, ) -> str: """Build and store the artifact in out_dir, return the absolute path of the built result. """ raise NotImplementedError("Should be implemented in subclass")
from __future__ import annotations import logging import os import subprocess import tempfile import textwrap import threading from logging import Logger from pathlib import Path from typing import TYPE_CHECKING, Any, Iterable, Mapping import tomli from pep517.wrappers import Pep517HookCaller from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet from pdm.exceptions import BuildError from pdm.models.in_process import get_sys_config_paths from pdm.termui import logger from pdm.utils import create_tracked_tempdir, prepare_pip_source_args if TYPE_CHECKING: from pdm.models.environment import Environment class LoggerWrapper(threading.Thread): """ Read messages from a pipe and redirect them to a logger (see python's logging module). """ def __init__(self, logger: Logger, level: int) -> None: super().__init__() self.daemon = True self.logger = logger self.level = level # create the pipe and reader self.fd_read, self.fd_write = os.pipe() self.reader = os.fdopen(self.fd_read) self.start() def fileno(self) -> int: return self.fd_write @staticmethod def remove_newline(msg: str) -> str: return msg[:-1] if msg.endswith("\n") else msg def run(self) -> None: try: for line in self.reader: self._write(self.remove_newline(line)) finally: self.reader.close() def _write(self, message: str) -> None: self.logger.log(self.level, message) def stop(self) -> None: os.close(self.fd_write) self.join() def log_subprocessor( cmd: list[str], cwd: str | Path | None = None, extra_environ: dict[str, str] | None = None, ) -> None: env = os.environ.copy() if extra_environ: env.update(extra_environ) outstream = LoggerWrapper(logger, logging.DEBUG) try: subprocess.check_call( cmd, cwd=cwd, env=env, stdout=outstream.fileno(), stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError: raise BuildError(f"Call command {cmd} return non-zero status.") finally: outstream.stop() class _Prefix: def __init__(self, executable: str, shared: str, overlay: str) -> None: self.bin_dirs: list[str] = [] self.lib_dirs: list[str] = [] for path in (overlay, shared): paths = get_sys_config_paths( executable, vars={"base": path, "platbase": path} ) self.bin_dirs.append(paths["scripts"]) self.lib_dirs.extend([paths["platlib"], paths["purelib"]]) self.site_dir = os.path.join(path, "site") if not os.path.isdir(self.site_dir): os.makedirs(self.site_dir) with open(os.path.join(self.site_dir, "sitecustomize.py"), "w") as fp: fp.write( textwrap.dedent( f""" import sys, os, site original_sys_path = sys.path[:] known_paths = set() site.addusersitepackages(known_paths) site.addsitepackages(known_paths) known_paths = {{os.path.normcase(p) for p in known_paths}} original_sys_path = [ p for p in original_sys_path if os.path.normcase(p) not in known_paths] sys.path[:] = original_sys_path for lib_path in {self.lib_dirs!r}: site.addsitedir(lib_path) """ ) ) self.shared = shared self.overlay = overlay class EnvBuilder: """A simple PEP 517 builder for an isolated environment""" _shared_envs: dict[int, str] = {} _overlay_envs: dict[str, str] = {} DEFAULT_BACKEND = { "build-backend": "setuptools.build_meta:__legacy__", "requires": ["setuptools >= 40.8.0", "wheel"], } @classmethod def get_shared_env(cls, key: int) -> str: if key in cls._shared_envs: logger.debug("Reusing shared build env: %s", cls._shared_envs[key]) return cls._shared_envs[key] # Postpone the cache after installation is done return create_tracked_tempdir("-shared", "pdm-build-env-") @classmethod def get_overlay_env(cls, key: str) -> str: if key not in cls._overlay_envs: cls._overlay_envs[key] = create_tracked_tempdir( "-overlay", "pdm-build-env-" ) return cls._overlay_envs[key] def __init__(self, src_dir: str | Path, environment: Environment) -> None: self._env = environment self.executable = self._env.interpreter.executable self.src_dir = src_dir logger.debug("Preparing isolated env for PEP 517 build...") try: with open(os.path.join(src_dir, "pyproject.toml"), "rb") as f: spec = tomli.load(f) except FileNotFoundError: spec = {} except Exception as e: raise BuildError(e) from e self._build_system = spec.get("build-system", self.DEFAULT_BACKEND) if "build-backend" not in self._build_system: self._build_system["build-backend"] = self.DEFAULT_BACKEND["build-backend"] if "requires" not in self._build_system: raise BuildError("Missing 'build-system.requires' in pyproject.toml") self._backend = self._build_system["build-backend"] self._prefix = _Prefix( self.executable, shared=self.get_shared_env(hash(frozenset(self._build_system["requires"]))), overlay=self.get_overlay_env(os.path.normcase(self.src_dir).rstrip("\\/")), ) self._hook = Pep517HookCaller( src_dir, self._backend, backend_path=self._build_system.get("backend-path"), runner=self.subprocess_runner, python_executable=self.executable, ) @property def _env_vars(self) -> dict[str, str]: paths = self._prefix.bin_dirs if "PATH" in os.environ: paths.append(os.getenv("PATH", "")) return { "PYTHONPATH": self._prefix.site_dir, "PATH": os.pathsep.join(paths), "PYTHONNOUSERSITE": "1", } def subprocess_runner( self, cmd: list[str], cwd: str | Path | None = None, extra_environ: dict[str, str] | None = None, isolated: bool = True, ) -> None: env = self._env_vars.copy() if isolated else {} if extra_environ: env.update(extra_environ) return log_subprocessor(cmd, cwd, extra_environ=env) def check_requirements(self, reqs: Iterable[str]) -> Iterable[str]: missing = set() conflicting = set() if reqs: ws = WorkingSet(self._prefix.lib_dirs) for req in reqs: try: if ws.find(Requirement.parse(req)) is None: missing.add(req) except VersionConflict: conflicting.add(req) if conflicting: raise BuildError(f"Conflicting requirements: {', '.join(conflicting)}") return missing def install(self, requirements: Iterable[str], shared: bool = False) -> None: missing = self.check_requirements(requirements) if not missing: return path = self._prefix.shared if shared else self._prefix.overlay with tempfile.NamedTemporaryFile( "w+", prefix="pdm-build-reqs-", suffix=".txt", delete=False ) as req_file: req_file.write(os.linesep.join(missing)) req_file.close() cmd = self._env.pip_command + [ "install", "--ignore-installed", "--prefix", path, ] cmd.extend(prepare_pip_source_args(self._env.project.sources)) cmd.extend(["-r", req_file.name]) self.subprocess_runner(cmd, isolated=False) os.unlink(req_file.name) if shared: # The shared env is prepared and is safe to be cached now. This is to make # sure no broken env is returned when run in parallel mode. key = hash(frozenset(requirements)) if key not in self._shared_envs: self._shared_envs[key] = path def prepare_metadata( self, out_dir: str, config_settings: Mapping[str, Any] | None = None ) -> str: """Prepare metadata and store in the out_dir. Some backends doesn't provide that API, in that case the metadata will be retreived from the built result. """ raise NotImplementedError("Should be implemented in subclass") def build( self, out_dir: str, config_settings: Mapping[str, Any] | None = None, metadata_directory: str | None = None, ) -> str: """Build and store the artifact in out_dir, return the absolute path of the built result. """ raise NotImplementedError("Should be implemented in subclass")
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.] # snippet-sourcedescription:[receive_message.py demonstrates how to retrieve messages from an Amazon SQS queue.] # snippet-service:[sqs] # snippet-keyword:[Amazon Simple Queue Service (Amazon SQS)] # snippet-keyword:[Python] # snippet-keyword:[Code Sample] # snippet-sourcetype:[snippet] # snippet-sourcedate:[2019-04-29] # snippet-sourceauthor:[AWS] # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import boto3 from botocore.exceptions import ClientError def retrieve_sqs_messages(sqs_queue_url, num_msgs=1, wait_time=0, visibility_time=5): """Retrieve messages from an SQS queue The retrieved messages are not deleted from the queue. :param sqs_queue_url: String URL of existing SQS queue :param num_msgs: Number of messages to retrieve (1-10) :param wait_time: Number of seconds to wait if no messages in queue :param visibility_time: Number of seconds to make retrieved messages hidden from subsequent retrieval requests :return: List of retrieved messages. If no messages are available, returned list is empty. If error, returns None. """ # Validate number of messages to retrieve if num_msgs < 1: num_msgs = 1 elif num_msgs > 10: num_msgs = 10 # Retrieve messages from an SQS queue sqs_client = boto3.client('sqs') try: msgs = sqs_client.receive_message(QueueUrl=sqs_queue_url, MaxNumberOfMessages=num_msgs, WaitTimeSeconds=wait_time, VisibilityTimeout=visibility_time) except ClientError as e: logging.error(e) return None # Return the list of retrieved messages return msgs['Messages'] def delete_sqs_message(sqs_queue_url, msg_receipt_handle): """Delete a message from an SQS queue :param sqs_queue_url: String URL of existing SQS queue :param msg_receipt_handle: Receipt handle value of retrieved message """ # Delete the message from the SQS queue sqs_client = boto3.client('sqs') sqs_client.delete_message(QueueUrl=sqs_queue_url, ReceiptHandle=msg_receipt_handle) def main(): """Exercise retrieve_sqs_messages()""" # Assign this value before running the program sqs_queue_url = 'SQS_QUEUE_URL' num_messages = 2 # Set up logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(asctime)s: %(message)s') # Retrieve SQS messages msgs = retrieve_sqs_messages(sqs_queue_url, num_messages) if msgs is not None: for msg in msgs: logging.info(f'SQS: Message ID: {msg['MessageId']}, ' f'Contents: {msg['Body']}') # Remove the message from the queue delete_sqs_message(sqs_queue_url, msg['ReceiptHandle']) if __name__ == '__main__': main()
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.] # snippet-sourcedescription:[receive_message.py demonstrates how to retrieve messages from an Amazon SQS queue.] # snippet-service:[sqs] # snippet-keyword:[Amazon Simple Queue Service (Amazon SQS)] # snippet-keyword:[Python] # snippet-keyword:[Code Sample] # snippet-sourcetype:[snippet] # snippet-sourcedate:[2019-04-29] # snippet-sourceauthor:[AWS] # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import boto3 from botocore.exceptions import ClientError def retrieve_sqs_messages(sqs_queue_url, num_msgs=1, wait_time=0, visibility_time=5): """Retrieve messages from an SQS queue The retrieved messages are not deleted from the queue. :param sqs_queue_url: String URL of existing SQS queue :param num_msgs: Number of messages to retrieve (1-10) :param wait_time: Number of seconds to wait if no messages in queue :param visibility_time: Number of seconds to make retrieved messages hidden from subsequent retrieval requests :return: List of retrieved messages. If no messages are available, returned list is empty. If error, returns None. """ # Validate number of messages to retrieve if num_msgs < 1: num_msgs = 1 elif num_msgs > 10: num_msgs = 10 # Retrieve messages from an SQS queue sqs_client = boto3.client('sqs') try: msgs = sqs_client.receive_message(QueueUrl=sqs_queue_url, MaxNumberOfMessages=num_msgs, WaitTimeSeconds=wait_time, VisibilityTimeout=visibility_time) except ClientError as e: logging.error(e) return None # Return the list of retrieved messages return msgs['Messages'] def delete_sqs_message(sqs_queue_url, msg_receipt_handle): """Delete a message from an SQS queue :param sqs_queue_url: String URL of existing SQS queue :param msg_receipt_handle: Receipt handle value of retrieved message """ # Delete the message from the SQS queue sqs_client = boto3.client('sqs') sqs_client.delete_message(QueueUrl=sqs_queue_url, ReceiptHandle=msg_receipt_handle) def main(): """Exercise retrieve_sqs_messages()""" # Assign this value before running the program sqs_queue_url = 'SQS_QUEUE_URL' num_messages = 2 # Set up logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(asctime)s: %(message)s') # Retrieve SQS messages msgs = retrieve_sqs_messages(sqs_queue_url, num_messages) if msgs is not None: for msg in msgs: logging.info(f'SQS: Message ID: {msg["MessageId"]}, ' f'Contents: {msg["Body"]}') # Remove the message from the queue delete_sqs_message(sqs_queue_url, msg['ReceiptHandle']) if __name__ == '__main__': main()
# Copyright 2020 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from autoai_ts_libs.sklearn.mvp_windowed_transformed_target_estimators import ( # type: ignore # noqa AutoaiWindowTransformedTargetRegressor as model_to_be_wrapped, ) from sklearn.pipeline import Pipeline, make_pipeline import lale.docstrings import lale.operators class _AutoaiWindowTransformedTargetRegressorImpl: def __init__( self, regressor=None, lookback_window=10, prediction_horizon=1, scaling_func=None, inverse_scaling_func=None, check_inverse=False, short_name="", one_shot=False, row_mean_center=False, estimator_prediction_type="forecast", ): if isinstance(regressor, lale.operators.TrainableIndividualOp): nested_op = make_pipeline(regressor._impl._wrapped_model) elif isinstance(regressor, lale.operators.BasePipeline): nested_op = regressor.export_to_sklearn_pipeline() elif isinstance(regressor, Pipeline): nested_op = regressor else: # TODO: What is the best way to handle this case? nested_op = None self._hyperparams = { "regressor": nested_op, "lookback_window": lookback_window, "prediction_horizon": prediction_horizon, "scaling_func": scaling_func, "inverse_scaling_func": inverse_scaling_func, "check_inverse": check_inverse, "short_name": short_name, "one_shot": one_shot, "row_mean_center": row_mean_center, "estimator_prediction_type": estimator_prediction_type, } self._wrapped_model = model_to_be_wrapped(**self._hyperparams) def fit(self, X, y): self._wrapped_model.fit(X, y) return self def predict(self, X=None, prediction_type=None, **predict_params): return self._wrapped_model.predict(X, prediction_type, **predict_params) _hyperparams_schema = { "allOf": [ { "description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.", "type": "object", "additionalProperties": False, "required": [ "regressor", "lookback_window", "prediction_horizon", "scaling_func", "inverse_scaling_func", "check_inverse", "short_name", "one_shot", "row_mean_center", "estimator_prediction_type", ], "relevantToOptimizer": ["lookback_window"], "properties": { "regressor": { "description": """Regressor object. This regressor will automatically be cloned each time prior to fitting. If regressor is None, LinearRegression() is created and used.""", "laleType": "operator", "default": None, }, "lookback_window": { "description": "The number of time points in the window of data to use as predictors in the estimator.", "type": "integer", "default": 10, }, "prediction_horizon": { "description": "The number of time points to predict into the future. The estimator(s) will be trained to predict all of these time points.", "type": "integer", "default": 1, }, "scaling_func": { "description": """(deprecated) Function to apply to y before passing to fit. The function needs to return a 2-dimensional array. If func is None, the function used will be the identity function.""", "laleType": "Any", "default": None, }, "inverse_scaling_func": { "description": """(deprecated) Function to apply to the prediction of the regressor. The function needs to return a 2-dimensional array. The inverse function is used to return predictions to the same space of the original training labels.""", "laleType": "Any", "default": None, }, "check_inverse": { "description": """Whether to check that transform followed by inverse_transform or func followed by inverse_func leads to the original targets.""", "type": "boolean", "default": False, }, "short_name": { "description": "Short name to be used for this estimator.", "type": "string", "default": "", }, "one_shot": { "description": "(deprecated)", "anyOf": [{"type": "boolean"}, {"enum": [None]}], "default": False, }, "row_mean_center": { "description": "Whether to apply the row mean center transformation to the data. If true, windows of the data according to lookback_window aree created and then those rows are normalized.", "type": "boolean", "default": False, }, "estimator_prediction_type": { "description": "Defines what predictions are returned by the predict functionality. Forecast: only generate predictions for the out of sample data, i.e., for the prediction window immediately following the input data. Rowwise: make predictions for each time point in the input data.", "enum": [ "forecast", "rowwise", ], "default": "forecast", }, }, } ] } _input_fit_schema = { "type": "object", "required": ["X", "y"], "additionalProperties": False, "properties": { "X": { # Handles 1-D arrays as well "anyOf": [ {"type": "array", "items": {"laleType": "Any"}}, { "type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}, }, ] }, "y": {"laleType": "Any"}, }, } _input_predict_schema = { "type": "object", "required": ["X"], "additionalProperties": False, "properties": { "X": { # Handles 1-D arrays as well "anyOf": [ {"type": "array", "items": {"laleType": "Any"}}, { "type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}, }, ] }, "prediction_type": { "enum": [ "forecast", "rowwise", ] }, }, } _output_predict_schema = { "description": "Features; the outer array is over samples.", "anyOf": [ {"type": "array", "items": {"laleType": "Any"}}, {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}, ], } _combined_schemas = { "$schema": "http://json-schema.org/draft-04/schema#", "description": """Operator from `autoai_ts_libs`_. .. _`autoai_ts_libs`: https://pypi.org/project/autoai-ts-libs""", "documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_ts_libs.autoai_window_transformed_target_regressor.html", "import_from": "autoai_ts_libs.sklearn.mvp_windowed_transformed_target_estimators", "type": "object", "tags": {"pre": [], "op": ["estimator", "regressor"], "post": []}, "properties": { "hyperparams": _hyperparams_schema, "input_fit": _input_fit_schema, "input_predict": _input_predict_schema, "output_predict": _output_predict_schema, }, } AutoaiWindowTransformedTargetRegressor = lale.operators.make_operator( _AutoaiWindowTransformedTargetRegressorImpl, _combined_schemas ) lale.docstrings.set_docstrings(AutoaiWindowTransformedTargetRegressor)
# Copyright 2020 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from autoai_ts_libs.sklearn.mvp_windowed_transformed_target_estimators import ( # type: ignore # noqa AutoaiWindowTransformedTargetRegressor as model_to_be_wrapped, ) from sklearn.pipeline import Pipeline, make_pipeline import lale.docstrings import lale.operators class _AutoaiWindowTransformedTargetRegressorImpl: def __init__( self, regressor=None, lookback_window=10, prediction_horizon=1, scaling_func=None, inverse_scaling_func=None, check_inverse=False, short_name="", one_shot=False, row_mean_center=False, estimator_prediction_type="forecast", ): if isinstance(regressor, lale.operators.TrainableIndividualOp): nested_op = make_pipeline(regressor._impl._wrapped_model) elif isinstance(regressor, lale.operators.BasePipeline): nested_op = regressor.export_to_sklearn_pipeline() elif isinstance(regressor, Pipeline): nested_op = regressor else: # TODO: What is the best way to handle this case? nested_op = None self._hyperparams = { "regressor": nested_op, "lookback_window": lookback_window, "prediction_horizon": prediction_horizon, "scaling_func": scaling_func, "inverse_scaling_func": inverse_scaling_func, "check_inverse": check_inverse, "short_name": short_name, "one_shot": one_shot, "row_mean_center": row_mean_center, "estimator_prediction_type": estimator_prediction_type, } self._wrapped_model = model_to_be_wrapped(**self._hyperparams) def fit(self, X, y): self._wrapped_model.fit(X, y) return self def predict(self, X=None, prediction_type=None, **predict_params): return self._wrapped_model.predict(X, prediction_type, **predict_params) _hyperparams_schema = { "allOf": [ { "description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.", "type": "object", "additionalProperties": False, "required": [ "regressor", "lookback_window", "prediction_horizon", "scaling_func", "inverse_scaling_func", "check_inverse", "short_name", "one_shot", "row_mean_center", "estimator_prediction_type", ], "relevantToOptimizer": ["lookback_window"], "properties": { "regressor": { "description": """Regressor object. This regressor will automatically be cloned each time prior to fitting. If regressor is None, LinearRegression() is created and used.""", "laleType": "operator", "default": None, }, "lookback_window": { "description": "The number of time points in the window of data to use as predictors in the estimator.", "type": "integer", "default": 10, }, "prediction_horizon": { "description": "The number of time points to predict into the future. The estimator(s) will be trained to predict all of these time points.", "type": "integer", "default": 1, }, "scaling_func": { "description": """(deprecated) Function to apply to y before passing to fit. The function needs to return a 2-dimensional array. If func is None, the function used will be the identity function.""", "laleType": "Any", "default": None, }, "inverse_scaling_func": { "description": """(deprecated) Function to apply to the prediction of the regressor. The function needs to return a 2-dimensional array. The inverse function is used to return predictions to the same space of the original training labels.""", "laleType": "Any", "default": None, }, "check_inverse": { "description": """Whether to check that transform followed by inverse_transform or func followed by inverse_func leads to the original targets.""", "type": "boolean", "default": False, }, "short_name": { "description": "Short name to be used for this estimator.", "type": "string", "default": "", }, "one_shot": { "description": "(deprecated)", "anyOf": [{"type": "boolean"}, {"enum": [None]}], "default": False, }, "row_mean_center": { "description": "Whether to apply the row mean center transformation to the data. If true, windows of the data according to lookback_window aree created and then those rows are normalized.", "type": "boolean", "default": False, }, "estimator_prediction_type": { "description": "Defines what predictions are returned by the predict functionality. Forecast: only generate predictions for the out of sample data, i.e., for the prediction window immediately following the input data. Rowwise: make predictions for each time point in the input data.", "enum": [ "forecast", "rowwise", ], "default": "forecast", }, }, } ] } _input_fit_schema = { "type": "object", "required": ["X", "y"], "additionalProperties": False, "properties": { "X": { # Handles 1-D arrays as well "anyOf": [ {"type": "array", "items": {"laleType": "Any"}}, { "type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}, }, ] }, "y": {"laleType": "Any"}, }, } _input_predict_schema = { "type": "object", "required": ["X"], "additionalProperties": False, "properties": { "X": { # Handles 1-D arrays as well "anyOf": [ {"type": "array", "items": {"laleType": "Any"}}, { "type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}, }, ] }, "prediction_type": { "enum": [ "forecast", "rowwise", ] }, }, } _output_predict_schema = { "description": "Features; the outer array is over samples.", "anyOf": [ {"type": "array", "items": {"laleType": "Any"}}, {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}, ], } _combined_schemas = { "$schema": "http://json-schema.org/draft-04/schema#", "description": """Operator from `autoai_ts_libs`_. .. _`autoai_ts_libs`: https://pypi.org/project/autoai-ts-libs""", "documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_ts_libs.autoai_window_transformed_target_regressor.html", "import_from": "autoai_ts_libs.sklearn.mvp_windowed_transformed_target_estimators", "type": "object", "tags": {"pre": [], "op": ["estimator", "regressor"], "post": []}, "properties": { "hyperparams": _hyperparams_schema, "input_fit": _input_fit_schema, "input_predict": _input_predict_schema, "output_predict": _output_predict_schema, }, } AutoaiWindowTransformedTargetRegressor = lale.operators.make_operator( _AutoaiWindowTransformedTargetRegressorImpl, _combined_schemas ) lale.docstrings.set_docstrings(AutoaiWindowTransformedTargetRegressor)
"""Make / Download Telegram Sticker Packs without installing Third Party applications Available Commands: .kang [Optional Emoji] .packinfo .loda {for get stickers in a zip file}""" from telethon import events from io import BytesIO from PIL import Image import asyncio import datetime from collections import defaultdict import math import os import random import requests import zipfile from telethon.errors.rpcerrorlist import StickersetInvalidError from telethon.errors import MessageNotModifiedError from telethon.tl.functions.account import UpdateNotifySettingsRequest from telethon.tl.functions.messages import GetStickerSetRequest from telethon.tl.types import ( DocumentAttributeFilename, DocumentAttributeSticker, InputMediaUploadedDocument, InputPeerNotifySettings, InputStickerSetID, InputStickerSetShortName, MessageMediaPhoto ) from uniborg.util import admin_cmd KANGING_STR = [ "`Using Witchery to kang this sticker...`", "`Plagiarising hehe...`", "`Aaham Brahmassami................`", "`Inviting this sticker over to my pack...`", "`Kanging this sticker...`", "`Hey that's a nice sticker!\nMind if I kang?!..`", "`hehe me stel ur stikér\nhehe.`", "`Ay look over there (☉。☉)!→\nWhile I kang this...`", "`Roses are red violets are blue, kanging this sticker so my pacc looks cool`", "`Imprisoning this sticker...`", "`Mr.Steal Your Sticker is stealing this sticker...`", "`I am Stealing your Sticker.....\nGand Marao...`", "Why u bullin me.....\nರ╭╮ರ`", "`BOOM.... HEADSHOT...\n(ノಠ益ಠ)ノ...\n(⌐■-■)`", "`Me is having sux with ur GF....\nU can't du nthing...Hehe..\nಠ∀ಠ...(≧▽≦)`", "`Aise tukur tukur kahe Dekh raha hain`", ] @borg.on(admin_cmd(pattern="kang ?(.*)")) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to a photo to add to my personal sticker pack.") return reply_message = await event.get_reply_message() sticker_emoji = "🍆" input_str = event.pattern_match.group(1) if input_str: sticker_emoji = input_str me = borg.me name = me.username userid = event.from_id packname = f"Babyツ @Baby_xD" packshortname = f"Darling_BabyxD{userid}" # format: Uni_Borg_userid is_a_s = is_it_animated_sticker(reply_message) file_ext_ns_ion = "chutiya_Sticker.png" file = await borg.download_file(reply_message.media) uploaded_sticker = None if is_a_s: file_ext_ns_ion = "AnimatedSticker.tgs" uploaded_sticker = await borg.upload_file(file, file_name=file_ext_ns_ion) packname = f"nikal_lawde_AnimatedStickers" packshortname = f"kirito6969_Animated" # format: Uni_Borg_userid elif not is_message_image(reply_message): await event.edit("Invalid message type") return else: with BytesIO(file) as mem_file, BytesIO() as sticker: resize_image(mem_file, sticker) sticker.seek(0) uploaded_sticker = await borg.upload_file(sticker, file_name=file_ext_ns_ion) await event.edit(random.choice(KANGING_STR)) async with borg.conversation("@Stickers") as bot_conv: now = datetime.datetime.now() dt = now + datetime.timedelta(minutes=1) if not await stickerset_exists(bot_conv, packshortname): await silently_send_message(bot_conv, "/cancel") if is_a_s: response = await silently_send_message(bot_conv, "/newanimated") else: response = await silently_send_message(bot_conv, "/newpack") if "Yay!" not in response.text: await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return response = await silently_send_message(bot_conv, packname) if not response.text.startswith("Alright!"): await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return w = await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/publish") response = await silently_send_message(bot_conv, f"<{packname}>") await silently_send_message(bot_conv, "/skip") response = await silently_send_message(bot_conv, packshortname) if response.text == "Sorry, this short name is already taken.": await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return else: await silently_send_message(bot_conv, "/cancel") await silently_send_message(bot_conv, "/addsticker") await silently_send_message(bot_conv, packshortname) await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/done") await event.edit(f"`This Sticker Is Raped! Plox Help this Sticker by Clicking` [HERE](t.me/addstickers/{packshortname})") @borg.on(admin_cmd(pattern="packinfo")) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to any sticker to get it's pack info.") return rep_msg = await event.get_reply_message() if not rep_msg.document: await event.edit("Reply to any sticker to get it's pack info.") return stickerset_attr_s = rep_msg.document.attributes stickerset_attr = find_instance(stickerset_attr_s, DocumentAttributeSticker) if not stickerset_attr.stickerset: await event.edit("sticker does not belong to a pack.") return get_stickerset = await borg( GetStickerSetRequest( InputStickerSetID( id=stickerset_attr.stickerset.id, access_hash=stickerset_attr.stickerset.access_hash ) ) ) pack_emojis = [] for document_sticker in get_stickerset.packs: if document_sticker.emoticon not in pack_emojis: pack_emojis.append(document_sticker.emoticon) await event.edit(f"**Sticker Title:** `{get_stickerset.set.title}\n`" f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n" f"**Official:** `{get_stickerset.set.official}`\n" f"**Archived:** `{get_stickerset.set.archived}`\n" f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n" f"**Emojis In Pack:** {" ".join(pack_emojis)}") @borg.on(admin_cmd(pattern="loda ?(.*)")) async def _(event): if event.fwd_from: return input_str = event.pattern_match.group(1) if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: reply_message = await event.get_reply_message() # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 if not reply_message.sticker: return sticker = reply_message.sticker sticker_attrib = find_instance(sticker.attributes, DocumentAttributeSticker) if not sticker_attrib.stickerset: await event.reply("This sticker is not part of a pack") return is_a_s = is_it_animated_sticker(reply_message) file_ext_ns_ion = "webp" file_caption = "`You are my Nigga`" if is_a_s: file_ext_ns_ion = "tgs" file_caption = "Forward the ZIP file to @AnimatedStickersRoBot to get lottIE JSON containing the vector information." sticker_set = await borg(GetStickerSetRequest(sticker_attrib.stickerset)) pack_file = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, sticker_set.set.short_name, "pack.txt") if os.path.isfile(pack_file): os.remove(pack_file) # Sticker emojis are retrieved as a mapping of # <emoji>: <list of document ids that have this emoji> # So we need to build a mapping of <document id>: <list of emoji> # Thanks, Durov emojis = defaultdict(str) for pack in sticker_set.packs: for document_id in pack.documents: emojis[document_id] += pack.emoticon async def download(sticker, emojis, path, file): await borg.download_media(sticker, file=os.path.join(path, file)) with open(pack_file, "a") as f: f.write(f"{{"image_file": "{file}','emojis':{emojis[sticker.id]}}},") pending_tasks = [ asyncio.ensure_future( download(document, emojis, Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name, f"{i:03d}.{file_ext_ns_ion}") ) for i, document in enumerate(sticker_set.documents) ] await event.edit(f"Downloading {sticker_set.set.count} sticker(s) to .{Config.TMP_DOWNLOAD_DIRECTORY}{sticker_set.set.short_name}...") num_tasks = len(pending_tasks) while 1: done, pending_tasks = await asyncio.wait(pending_tasks, timeout=2.5, return_when=asyncio.FIRST_COMPLETED) try: await event.edit( f"Downloaded {num_tasks - len(pending_tasks)}/{sticker_set.set.count}") except MessageNotModifiedError: pass if not pending_tasks: break await event.edit("Downloading to my local completed") # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 directory_name = Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name zipf = zipfile.ZipFile(directory_name + ".zip", "w", zipfile.ZIP_DEFLATED) zipdir(directory_name, zipf) zipf.close() await borg.send_file( event.chat_id, directory_name + ".zip", caption=file_caption, force_document=True, allow_cache=False, reply_to=event.message.id, progress_callback=progress ) try: os.remove(directory_name + ".zip") os.remove(directory_name) except: pass await event.edit("task Completed") await asyncio.sleep(3) await event.delete() else: await event.edit("TODO: Not Implemented") # Helpers def is_it_animated_sticker(message): try: if message.media and message.media.document: mime_type = message.media.document.mime_type if "tgsticker" in mime_type: return True else: return False else: return False except: return False def is_message_image(message): if message.media: if isinstance(message.media, MessageMediaPhoto): return True if message.media.document: if message.media.document.mime_type.split("/")[0] == "image": return True return False return False async def silently_send_message(conv, text): await conv.send_message(text) response = await conv.get_response() await conv.mark_read(message=response) return response async def stickerset_exists(conv, setname): try: await borg(GetStickerSetRequest(InputStickerSetShortName(setname))) response = await silently_send_message(conv, "/addsticker") if response.text == "Invalid pack selected.": await silently_send_message(conv, "/cancel") return False await silently_send_message(conv, "/cancel") return True except StickersetInvalidError: return False def resize_image(image, save_locaton): """ Copyright Rhyse Simpson: https://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py """ im = Image.open(image) maxsize = (512, 512) if (im.width and im.height) < 512: size1 = im.width size2 = im.height if im.width > im.height: scale = 512 / size1 size1new = 512 size2new = size2 * scale else: scale = 512 / size2 size1new = size1 * scale size2new = 512 size1new = math.floor(size1new) size2new = math.floor(size2new) sizenew = (size1new, size2new) im = im.resize(sizenew) else: im.thumbnail(maxsize) im.save(save_locaton, "PNG") def progress(current, total): logger.info("Uploaded: {} of {}\nCompleted {}".format(current, total, (current / total) * 100)) def find_instance(items, class_or_tuple): for item in items: if isinstance(item, class_or_tuple): return item return None def zipdir(path, ziph): # ziph is zipfile handle for root, dirs, files in os.walk(path): for file in files: ziph.write(os.path.join(root, file)) os.remove(os.path.join(root, file))
"""Make / Download Telegram Sticker Packs without installing Third Party applications Available Commands: .kang [Optional Emoji] .packinfo .loda {for get stickers in a zip file}""" from telethon import events from io import BytesIO from PIL import Image import asyncio import datetime from collections import defaultdict import math import os import random import requests import zipfile from telethon.errors.rpcerrorlist import StickersetInvalidError from telethon.errors import MessageNotModifiedError from telethon.tl.functions.account import UpdateNotifySettingsRequest from telethon.tl.functions.messages import GetStickerSetRequest from telethon.tl.types import ( DocumentAttributeFilename, DocumentAttributeSticker, InputMediaUploadedDocument, InputPeerNotifySettings, InputStickerSetID, InputStickerSetShortName, MessageMediaPhoto ) from uniborg.util import admin_cmd KANGING_STR = [ "`Using Witchery to kang this sticker...`", "`Plagiarising hehe...`", "`Aaham Brahmassami................`", "`Inviting this sticker over to my pack...`", "`Kanging this sticker...`", "`Hey that's a nice sticker!\nMind if I kang?!..`", "`hehe me stel ur stikér\nhehe.`", "`Ay look over there (☉。☉)!→\nWhile I kang this...`", "`Roses are red violets are blue, kanging this sticker so my pacc looks cool`", "`Imprisoning this sticker...`", "`Mr.Steal Your Sticker is stealing this sticker...`", "`I am Stealing your Sticker.....\nGand Marao...`", "Why u bullin me.....\nರ╭╮ರ`", "`BOOM.... HEADSHOT...\n(ノಠ益ಠ)ノ...\n(⌐■-■)`", "`Me is having sux with ur GF....\nU can't du nthing...Hehe..\nಠ∀ಠ...(≧▽≦)`", "`Aise tukur tukur kahe Dekh raha hain`", ] @borg.on(admin_cmd(pattern="kang ?(.*)")) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to a photo to add to my personal sticker pack.") return reply_message = await event.get_reply_message() sticker_emoji = "🍆" input_str = event.pattern_match.group(1) if input_str: sticker_emoji = input_str me = borg.me name = me.username userid = event.from_id packname = f"Babyツ @Baby_xD" packshortname = f"Darling_BabyxD{userid}" # format: Uni_Borg_userid is_a_s = is_it_animated_sticker(reply_message) file_ext_ns_ion = "chutiya_Sticker.png" file = await borg.download_file(reply_message.media) uploaded_sticker = None if is_a_s: file_ext_ns_ion = "AnimatedSticker.tgs" uploaded_sticker = await borg.upload_file(file, file_name=file_ext_ns_ion) packname = f"nikal_lawde_AnimatedStickers" packshortname = f"kirito6969_Animated" # format: Uni_Borg_userid elif not is_message_image(reply_message): await event.edit("Invalid message type") return else: with BytesIO(file) as mem_file, BytesIO() as sticker: resize_image(mem_file, sticker) sticker.seek(0) uploaded_sticker = await borg.upload_file(sticker, file_name=file_ext_ns_ion) await event.edit(random.choice(KANGING_STR)) async with borg.conversation("@Stickers") as bot_conv: now = datetime.datetime.now() dt = now + datetime.timedelta(minutes=1) if not await stickerset_exists(bot_conv, packshortname): await silently_send_message(bot_conv, "/cancel") if is_a_s: response = await silently_send_message(bot_conv, "/newanimated") else: response = await silently_send_message(bot_conv, "/newpack") if "Yay!" not in response.text: await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return response = await silently_send_message(bot_conv, packname) if not response.text.startswith("Alright!"): await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return w = await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/publish") response = await silently_send_message(bot_conv, f"<{packname}>") await silently_send_message(bot_conv, "/skip") response = await silently_send_message(bot_conv, packshortname) if response.text == "Sorry, this short name is already taken.": await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return else: await silently_send_message(bot_conv, "/cancel") await silently_send_message(bot_conv, "/addsticker") await silently_send_message(bot_conv, packshortname) await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/done") await event.edit(f"`This Sticker Is Raped! Plox Help this Sticker by Clicking` [HERE](t.me/addstickers/{packshortname})") @borg.on(admin_cmd(pattern="packinfo")) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to any sticker to get it's pack info.") return rep_msg = await event.get_reply_message() if not rep_msg.document: await event.edit("Reply to any sticker to get it's pack info.") return stickerset_attr_s = rep_msg.document.attributes stickerset_attr = find_instance(stickerset_attr_s, DocumentAttributeSticker) if not stickerset_attr.stickerset: await event.edit("sticker does not belong to a pack.") return get_stickerset = await borg( GetStickerSetRequest( InputStickerSetID( id=stickerset_attr.stickerset.id, access_hash=stickerset_attr.stickerset.access_hash ) ) ) pack_emojis = [] for document_sticker in get_stickerset.packs: if document_sticker.emoticon not in pack_emojis: pack_emojis.append(document_sticker.emoticon) await event.edit(f"**Sticker Title:** `{get_stickerset.set.title}\n`" f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n" f"**Official:** `{get_stickerset.set.official}`\n" f"**Archived:** `{get_stickerset.set.archived}`\n" f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n" f"**Emojis In Pack:** {' '.join(pack_emojis)}") @borg.on(admin_cmd(pattern="loda ?(.*)")) async def _(event): if event.fwd_from: return input_str = event.pattern_match.group(1) if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: reply_message = await event.get_reply_message() # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 if not reply_message.sticker: return sticker = reply_message.sticker sticker_attrib = find_instance(sticker.attributes, DocumentAttributeSticker) if not sticker_attrib.stickerset: await event.reply("This sticker is not part of a pack") return is_a_s = is_it_animated_sticker(reply_message) file_ext_ns_ion = "webp" file_caption = "`You are my Nigga`" if is_a_s: file_ext_ns_ion = "tgs" file_caption = "Forward the ZIP file to @AnimatedStickersRoBot to get lottIE JSON containing the vector information." sticker_set = await borg(GetStickerSetRequest(sticker_attrib.stickerset)) pack_file = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, sticker_set.set.short_name, "pack.txt") if os.path.isfile(pack_file): os.remove(pack_file) # Sticker emojis are retrieved as a mapping of # <emoji>: <list of document ids that have this emoji> # So we need to build a mapping of <document id>: <list of emoji> # Thanks, Durov emojis = defaultdict(str) for pack in sticker_set.packs: for document_id in pack.documents: emojis[document_id] += pack.emoticon async def download(sticker, emojis, path, file): await borg.download_media(sticker, file=os.path.join(path, file)) with open(pack_file, "a") as f: f.write(f"{{'image_file': '{file}','emojis':{emojis[sticker.id]}}},") pending_tasks = [ asyncio.ensure_future( download(document, emojis, Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name, f"{i:03d}.{file_ext_ns_ion}") ) for i, document in enumerate(sticker_set.documents) ] await event.edit(f"Downloading {sticker_set.set.count} sticker(s) to .{Config.TMP_DOWNLOAD_DIRECTORY}{sticker_set.set.short_name}...") num_tasks = len(pending_tasks) while 1: done, pending_tasks = await asyncio.wait(pending_tasks, timeout=2.5, return_when=asyncio.FIRST_COMPLETED) try: await event.edit( f"Downloaded {num_tasks - len(pending_tasks)}/{sticker_set.set.count}") except MessageNotModifiedError: pass if not pending_tasks: break await event.edit("Downloading to my local completed") # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 directory_name = Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name zipf = zipfile.ZipFile(directory_name + ".zip", "w", zipfile.ZIP_DEFLATED) zipdir(directory_name, zipf) zipf.close() await borg.send_file( event.chat_id, directory_name + ".zip", caption=file_caption, force_document=True, allow_cache=False, reply_to=event.message.id, progress_callback=progress ) try: os.remove(directory_name + ".zip") os.remove(directory_name) except: pass await event.edit("task Completed") await asyncio.sleep(3) await event.delete() else: await event.edit("TODO: Not Implemented") # Helpers def is_it_animated_sticker(message): try: if message.media and message.media.document: mime_type = message.media.document.mime_type if "tgsticker" in mime_type: return True else: return False else: return False except: return False def is_message_image(message): if message.media: if isinstance(message.media, MessageMediaPhoto): return True if message.media.document: if message.media.document.mime_type.split("/")[0] == "image": return True return False return False async def silently_send_message(conv, text): await conv.send_message(text) response = await conv.get_response() await conv.mark_read(message=response) return response async def stickerset_exists(conv, setname): try: await borg(GetStickerSetRequest(InputStickerSetShortName(setname))) response = await silently_send_message(conv, "/addsticker") if response.text == "Invalid pack selected.": await silently_send_message(conv, "/cancel") return False await silently_send_message(conv, "/cancel") return True except StickersetInvalidError: return False def resize_image(image, save_locaton): """ Copyright Rhyse Simpson: https://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py """ im = Image.open(image) maxsize = (512, 512) if (im.width and im.height) < 512: size1 = im.width size2 = im.height if im.width > im.height: scale = 512 / size1 size1new = 512 size2new = size2 * scale else: scale = 512 / size2 size1new = size1 * scale size2new = 512 size1new = math.floor(size1new) size2new = math.floor(size2new) sizenew = (size1new, size2new) im = im.resize(sizenew) else: im.thumbnail(maxsize) im.save(save_locaton, "PNG") def progress(current, total): logger.info("Uploaded: {} of {}\nCompleted {}".format(current, total, (current / total) * 100)) def find_instance(items, class_or_tuple): for item in items: if isinstance(item, class_or_tuple): return item return None def zipdir(path, ziph): # ziph is zipfile handle for root, dirs, files in os.walk(path): for file in files: ziph.write(os.path.join(root, file)) os.remove(os.path.join(root, file))
""" ISA datatype See https://github.com/ISA-tools """ import json import logging import os import os.path import re import shutil import tempfile # Imports isatab after turning off warnings inside logger settings to avoid pandas warning making uploads fail. logging.getLogger("isatools.isatab").setLevel(logging.ERROR) from isatools import ( isajson, isatab_meta ) from markupsafe import escape from galaxy import util from galaxy.datatypes import data from galaxy.util.compression_utils import CompressedFile from galaxy.util.sanitize_html import sanitize_html # CONSTANTS {{{1 ################################################################ # Main files regex JSON_FILE_REGEX = re.compile(r"^.*\.json$", flags=re.IGNORECASE) INVESTIGATION_FILE_REGEX = re.compile(r"^i_\w+\.txt$", flags=re.IGNORECASE) # The name of the ISA archive (compressed file) as saved inside Galaxy ISA_ARCHIVE_NAME = "archive" # Set max number of lines of the history peek _MAX_LINES_HISTORY_PEEK = 11 # Configure logger {{{1 ################################################################ logger = logging.getLogger(__name__) # Function for opening correctly a CSV file for csv.reader() for both Python 2 and 3 {{{1 ################################################################ # ISA class {{{1 ################################################################ class _Isa(data.Data): """ Base class for implementing ISA datatypes """ composite_type = 'auto_primary_file' is_binary = True _main_file_regex = None # Make investigation instance {{{2 ################################################################ def _make_investigation_instance(self, filename): raise NotImplementedError() # Constructor {{{2 ################################################################ def __init__(self, main_file_regex, **kwd): super().__init__(**kwd) self._main_file_regex = main_file_regex # Add the archive file as the only composite file self.add_composite_file(ISA_ARCHIVE_NAME, is_binary=True, optional=True) # Get ISA folder path {{{2 ################################################################ def _get_isa_folder_path(self, dataset): isa_folder = dataset.extra_files_path if not isa_folder: raise Exception('Unvalid dataset object, or no extra files path found for this dataset.') return isa_folder # Get main file {{{2 ################################################################ def _get_main_file(self, dataset): """Get the main file of the ISA archive. Either the investigation file i_*.txt for ISA-Tab, or the JSON file for ISA-JSON.""" main_file = None isa_folder = self._get_isa_folder_path(dataset) if os.path.exists(isa_folder): # Get ISA archive older isa_files = os.listdir(isa_folder) # Try to find main file main_file = self._find_main_file_in_archive(isa_files) if main_file is None: raise Exception('Invalid ISA archive. No main file found.') # Make full path main_file = os.path.join(isa_folder, main_file) return main_file # Get investigation {{{2 ################################################################ def _get_investigation(self, dataset): """Create a contained instance specific to the exact ISA type (Tab or Json). We will use it to parse and access information from the archive.""" investigation = None main_file = self._get_main_file(dataset) if main_file is not None: investigation = self._make_investigation_instance(main_file) return investigation # Find main file in archive {{{2 ################################################################ def _find_main_file_in_archive(self, files_list): """Find the main file inside the ISA archive.""" found_file = None for f in files_list: match = self._main_file_regex.match(f) if match: if found_file is None: found_file = match.group() else: raise Exception('More than one file match the pattern "', str(self._main_file_regex), '" to identify the investigation file') return found_file # Set peek {{{2 ################################################################ def set_peek(self, dataset, is_multi_byte=False): """Set the peek and blurb text. Get first lines of the main file and set it as the peek.""" main_file = self._get_main_file(dataset) if main_file is None: raise RuntimeError("Unable to find the main file within the 'files_path' folder") # Read first lines of main file with open(main_file, encoding='utf-8') as f: data = [] for line in f: if len(data) < _MAX_LINES_HISTORY_PEEK: data.append(line) else: break if not dataset.dataset.purged and data: dataset.peek = json.dumps({"data": data}) dataset.blurb = 'data' else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' # Display peek {{{2 ################################################################ def display_peek(self, dataset): """Create the HTML table used for displaying peek, from the peek text found by set_peek() method.""" out = ['<table cellspacing="0" cellpadding="3">'] try: if not dataset.peek: dataset.set_peek() json_data = json.loads(dataset.peek) for line in json_data["data"]: line = line.strip() if not line: continue out.append(f"<tr><td>{escape(util.unicodify(line, "utf-8"))}</td></tr>") out.append('</table>') out = "".join(out) except Exception as exc: out = f"Can't create peek: {util.unicodify(exc)}" return out # Generate primary file {{{2 ################################################################ def generate_primary_file(self, dataset=None): """Generate the primary file. It is an HTML file containing description of the composite dataset as well as a list of the composite files that it contains.""" if dataset: rval = ['<html><head><title>ISA Dataset </title></head><p/>'] if hasattr(dataset, "extra_files_path"): rval.append('<div>ISA Dataset composed of the following files:<p/><ul>') for cmp_file in os.listdir(dataset.extra_files_path): rval.append(f'<li><a href="{cmp_file}" type="text/plain">{escape(cmp_file)}</a></li>') rval.append('</ul></div></html>') else: rval.append('<div>ISA Dataset is empty!<p/><ul>') return "\n".join(rval) return "<div>No dataset available</div>" # Dataset content needs grooming {{{2 ################################################################ def dataset_content_needs_grooming(self, file_name): """This function is called on an output dataset file after the content is initially generated.""" return os.path.basename(file_name) == ISA_ARCHIVE_NAME # Groom dataset content {{{2 ################################################################ def groom_dataset_content(self, file_name): """This method is called by Galaxy to extract files contained in a composite data type.""" # XXX Is the right place to extract files? Should this step not be a cleaning step instead? # Could extracting be done earlier and composite files declared as files contained inside the archive # instead of the archive itself? # extract basename and folder of the current file whose content has to be groomed basename = os.path.basename(file_name) output_path = os.path.dirname(file_name) # extract archive if the file corresponds to the ISA archive if basename == ISA_ARCHIVE_NAME: # perform extraction # For some ZIP files CompressedFile::extract() extract the file inside <output_folder>/<file_name> instead of outputing it inside <output_folder>. So we first create a temporary folder, extract inside it, and move content to final destination. temp_folder = tempfile.mkdtemp() CompressedFile(file_name).extract(temp_folder) shutil.rmtree(output_path) extracted_files = os.listdir(temp_folder) logger.debug(' '.join(extracted_files)) if len(extracted_files) == 0: os.makedirs(output_path) shutil.rmtree(temp_folder) elif len(extracted_files) == 1 and os.path.isdir(os.path.join(temp_folder, extracted_files[0])): shutil.move(os.path.join(temp_folder, extracted_files[0]), output_path) shutil.rmtree(temp_folder) else: shutil.move(temp_folder, output_path) # Display data {{{2 ################################################################ def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, offset=None, ck_size=None, **kwd): """Downloads the ISA dataset if `preview` is `False`; if `preview` is `True`, it returns a preview of the ISA dataset as a HTML page. The preview is triggered when user clicks on the eye icon of the composite dataset.""" # if it is not required a preview use the default behaviour of `display_data` if not preview: return super().display_data(trans, dataset, preview, filename, to_ext, **kwd) # prepare the preview of the ISA dataset investigation = self._get_investigation(dataset) if investigation is None: html = """<html><header><title>Error while reading ISA archive.</title></header> <body> <h1>An error occurred while reading content of ISA archive.</h1> <p>If you have tried to load your archive with the uploader by selecting isa-tab as composite data type, then try to load it again with isa-json instead. Conversely, if you have tried to load your archive with the uploader by selecting isa-json as composite data type, then try isa-tab instead.</p> <p>You may also try to look into your zip file in order to find out if this is a proper ISA archive. If you see a file i_Investigation.txt inside, then it is an ISA-Tab archive. If you see a file with extension .json inside, then it is an ISA-JSON archive. If you see nothing like that, then either your ISA archive is corrupted, or it is not an ISA archive.</p> </body></html>""" else: html = '<html><body>' html += f'<h1>{investigation.title} {investigation.identifier}</h1>' # Loop on all studies for study in investigation.studies: html += f'<h2>Study {study.identifier}</h2>' html += f'<h3>{study.title}</h3>' html += f'<p>{study.description}</p>' html += f'<p>Submitted the {study.submission_date}</p>' html += f'<p>Released on {study.public_release_date}</p>' html += f"<p>Experimental factors used: {", ".join(x.name for x in study.factors)}</p>" # Loop on all assays of this study for assay in study.assays: html += f'<h3>Assay {assay.filename}</h3>' html += f'<p>Measurement type: {assay.measurement_type.term}</p>' # OntologyAnnotation html += f'<p>Technology type: {assay.technology_type.term}</p>' # OntologyAnnotation html += f'<p>Technology platform: {assay.technology_platform}</p>' if assay.data_files is not None: html += '<p>Data files:</p>' html += '<ul>' for data_file in assay.data_files: if data_file.filename != '': html += f"<li>{escape(util.unicodify(str(data_file.filename), "utf-8"))} - {escape(util.unicodify(str(data_file.label), "utf-8"))}</li>" html += '</ul>' html += '</body></html>' # Set mime type mime = 'text/html' self._clean_and_set_mime_type(trans, mime) return sanitize_html(html).encode('utf-8') # ISA-Tab class {{{1 ################################################################ class IsaTab(_Isa): file_ext = "isa-tab" # Constructor {{{2 ################################################################ def __init__(self, **kwd): super().__init__(main_file_regex=INVESTIGATION_FILE_REGEX, **kwd) # Make investigation instance {{{2 ################################################################ def _make_investigation_instance(self, filename): # Parse ISA-Tab investigation file parser = isatab_meta.InvestigationParser() isa_dir = os.path.dirname(filename) with open(filename, newline='', encoding='utf8') as fp: parser.parse(fp) for study in parser.isa.studies: s_parser = isatab_meta.LazyStudySampleTableParser(parser.isa) s_parser.parse(os.path.join(isa_dir, study.filename)) for assay in study.assays: a_parser = isatab_meta.LazyAssayTableParser(parser.isa) a_parser.parse(os.path.join(isa_dir, assay.filename)) isa = parser.isa return isa # ISA-JSON class {{{1 ################################################################ class IsaJson(_Isa): file_ext = "isa-json" # Constructor {{{2 ################################################################ def __init__(self, **kwd): super().__init__(main_file_regex=JSON_FILE_REGEX, **kwd) # Make investigation instance {{{2 ################################################################ def _make_investigation_instance(self, filename): # Parse JSON file with open(filename, newline='', encoding='utf8') as fp: isa = isajson.load(fp) return isa
""" ISA datatype See https://github.com/ISA-tools """ import json import logging import os import os.path import re import shutil import tempfile # Imports isatab after turning off warnings inside logger settings to avoid pandas warning making uploads fail. logging.getLogger("isatools.isatab").setLevel(logging.ERROR) from isatools import ( isajson, isatab_meta ) from markupsafe import escape from galaxy import util from galaxy.datatypes import data from galaxy.util.compression_utils import CompressedFile from galaxy.util.sanitize_html import sanitize_html # CONSTANTS {{{1 ################################################################ # Main files regex JSON_FILE_REGEX = re.compile(r"^.*\.json$", flags=re.IGNORECASE) INVESTIGATION_FILE_REGEX = re.compile(r"^i_\w+\.txt$", flags=re.IGNORECASE) # The name of the ISA archive (compressed file) as saved inside Galaxy ISA_ARCHIVE_NAME = "archive" # Set max number of lines of the history peek _MAX_LINES_HISTORY_PEEK = 11 # Configure logger {{{1 ################################################################ logger = logging.getLogger(__name__) # Function for opening correctly a CSV file for csv.reader() for both Python 2 and 3 {{{1 ################################################################ # ISA class {{{1 ################################################################ class _Isa(data.Data): """ Base class for implementing ISA datatypes """ composite_type = 'auto_primary_file' is_binary = True _main_file_regex = None # Make investigation instance {{{2 ################################################################ def _make_investigation_instance(self, filename): raise NotImplementedError() # Constructor {{{2 ################################################################ def __init__(self, main_file_regex, **kwd): super().__init__(**kwd) self._main_file_regex = main_file_regex # Add the archive file as the only composite file self.add_composite_file(ISA_ARCHIVE_NAME, is_binary=True, optional=True) # Get ISA folder path {{{2 ################################################################ def _get_isa_folder_path(self, dataset): isa_folder = dataset.extra_files_path if not isa_folder: raise Exception('Unvalid dataset object, or no extra files path found for this dataset.') return isa_folder # Get main file {{{2 ################################################################ def _get_main_file(self, dataset): """Get the main file of the ISA archive. Either the investigation file i_*.txt for ISA-Tab, or the JSON file for ISA-JSON.""" main_file = None isa_folder = self._get_isa_folder_path(dataset) if os.path.exists(isa_folder): # Get ISA archive older isa_files = os.listdir(isa_folder) # Try to find main file main_file = self._find_main_file_in_archive(isa_files) if main_file is None: raise Exception('Invalid ISA archive. No main file found.') # Make full path main_file = os.path.join(isa_folder, main_file) return main_file # Get investigation {{{2 ################################################################ def _get_investigation(self, dataset): """Create a contained instance specific to the exact ISA type (Tab or Json). We will use it to parse and access information from the archive.""" investigation = None main_file = self._get_main_file(dataset) if main_file is not None: investigation = self._make_investigation_instance(main_file) return investigation # Find main file in archive {{{2 ################################################################ def _find_main_file_in_archive(self, files_list): """Find the main file inside the ISA archive.""" found_file = None for f in files_list: match = self._main_file_regex.match(f) if match: if found_file is None: found_file = match.group() else: raise Exception('More than one file match the pattern "', str(self._main_file_regex), '" to identify the investigation file') return found_file # Set peek {{{2 ################################################################ def set_peek(self, dataset, is_multi_byte=False): """Set the peek and blurb text. Get first lines of the main file and set it as the peek.""" main_file = self._get_main_file(dataset) if main_file is None: raise RuntimeError("Unable to find the main file within the 'files_path' folder") # Read first lines of main file with open(main_file, encoding='utf-8') as f: data = [] for line in f: if len(data) < _MAX_LINES_HISTORY_PEEK: data.append(line) else: break if not dataset.dataset.purged and data: dataset.peek = json.dumps({"data": data}) dataset.blurb = 'data' else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' # Display peek {{{2 ################################################################ def display_peek(self, dataset): """Create the HTML table used for displaying peek, from the peek text found by set_peek() method.""" out = ['<table cellspacing="0" cellpadding="3">'] try: if not dataset.peek: dataset.set_peek() json_data = json.loads(dataset.peek) for line in json_data["data"]: line = line.strip() if not line: continue out.append(f"<tr><td>{escape(util.unicodify(line, 'utf-8'))}</td></tr>") out.append('</table>') out = "".join(out) except Exception as exc: out = f"Can't create peek: {util.unicodify(exc)}" return out # Generate primary file {{{2 ################################################################ def generate_primary_file(self, dataset=None): """Generate the primary file. It is an HTML file containing description of the composite dataset as well as a list of the composite files that it contains.""" if dataset: rval = ['<html><head><title>ISA Dataset </title></head><p/>'] if hasattr(dataset, "extra_files_path"): rval.append('<div>ISA Dataset composed of the following files:<p/><ul>') for cmp_file in os.listdir(dataset.extra_files_path): rval.append(f'<li><a href="{cmp_file}" type="text/plain">{escape(cmp_file)}</a></li>') rval.append('</ul></div></html>') else: rval.append('<div>ISA Dataset is empty!<p/><ul>') return "\n".join(rval) return "<div>No dataset available</div>" # Dataset content needs grooming {{{2 ################################################################ def dataset_content_needs_grooming(self, file_name): """This function is called on an output dataset file after the content is initially generated.""" return os.path.basename(file_name) == ISA_ARCHIVE_NAME # Groom dataset content {{{2 ################################################################ def groom_dataset_content(self, file_name): """This method is called by Galaxy to extract files contained in a composite data type.""" # XXX Is the right place to extract files? Should this step not be a cleaning step instead? # Could extracting be done earlier and composite files declared as files contained inside the archive # instead of the archive itself? # extract basename and folder of the current file whose content has to be groomed basename = os.path.basename(file_name) output_path = os.path.dirname(file_name) # extract archive if the file corresponds to the ISA archive if basename == ISA_ARCHIVE_NAME: # perform extraction # For some ZIP files CompressedFile::extract() extract the file inside <output_folder>/<file_name> instead of outputing it inside <output_folder>. So we first create a temporary folder, extract inside it, and move content to final destination. temp_folder = tempfile.mkdtemp() CompressedFile(file_name).extract(temp_folder) shutil.rmtree(output_path) extracted_files = os.listdir(temp_folder) logger.debug(' '.join(extracted_files)) if len(extracted_files) == 0: os.makedirs(output_path) shutil.rmtree(temp_folder) elif len(extracted_files) == 1 and os.path.isdir(os.path.join(temp_folder, extracted_files[0])): shutil.move(os.path.join(temp_folder, extracted_files[0]), output_path) shutil.rmtree(temp_folder) else: shutil.move(temp_folder, output_path) # Display data {{{2 ################################################################ def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, offset=None, ck_size=None, **kwd): """Downloads the ISA dataset if `preview` is `False`; if `preview` is `True`, it returns a preview of the ISA dataset as a HTML page. The preview is triggered when user clicks on the eye icon of the composite dataset.""" # if it is not required a preview use the default behaviour of `display_data` if not preview: return super().display_data(trans, dataset, preview, filename, to_ext, **kwd) # prepare the preview of the ISA dataset investigation = self._get_investigation(dataset) if investigation is None: html = """<html><header><title>Error while reading ISA archive.</title></header> <body> <h1>An error occurred while reading content of ISA archive.</h1> <p>If you have tried to load your archive with the uploader by selecting isa-tab as composite data type, then try to load it again with isa-json instead. Conversely, if you have tried to load your archive with the uploader by selecting isa-json as composite data type, then try isa-tab instead.</p> <p>You may also try to look into your zip file in order to find out if this is a proper ISA archive. If you see a file i_Investigation.txt inside, then it is an ISA-Tab archive. If you see a file with extension .json inside, then it is an ISA-JSON archive. If you see nothing like that, then either your ISA archive is corrupted, or it is not an ISA archive.</p> </body></html>""" else: html = '<html><body>' html += f'<h1>{investigation.title} {investigation.identifier}</h1>' # Loop on all studies for study in investigation.studies: html += f'<h2>Study {study.identifier}</h2>' html += f'<h3>{study.title}</h3>' html += f'<p>{study.description}</p>' html += f'<p>Submitted the {study.submission_date}</p>' html += f'<p>Released on {study.public_release_date}</p>' html += f"<p>Experimental factors used: {', '.join(x.name for x in study.factors)}</p>" # Loop on all assays of this study for assay in study.assays: html += f'<h3>Assay {assay.filename}</h3>' html += f'<p>Measurement type: {assay.measurement_type.term}</p>' # OntologyAnnotation html += f'<p>Technology type: {assay.technology_type.term}</p>' # OntologyAnnotation html += f'<p>Technology platform: {assay.technology_platform}</p>' if assay.data_files is not None: html += '<p>Data files:</p>' html += '<ul>' for data_file in assay.data_files: if data_file.filename != '': html += f"<li>{escape(util.unicodify(str(data_file.filename), 'utf-8'))} - {escape(util.unicodify(str(data_file.label), 'utf-8'))}</li>" html += '</ul>' html += '</body></html>' # Set mime type mime = 'text/html' self._clean_and_set_mime_type(trans, mime) return sanitize_html(html).encode('utf-8') # ISA-Tab class {{{1 ################################################################ class IsaTab(_Isa): file_ext = "isa-tab" # Constructor {{{2 ################################################################ def __init__(self, **kwd): super().__init__(main_file_regex=INVESTIGATION_FILE_REGEX, **kwd) # Make investigation instance {{{2 ################################################################ def _make_investigation_instance(self, filename): # Parse ISA-Tab investigation file parser = isatab_meta.InvestigationParser() isa_dir = os.path.dirname(filename) with open(filename, newline='', encoding='utf8') as fp: parser.parse(fp) for study in parser.isa.studies: s_parser = isatab_meta.LazyStudySampleTableParser(parser.isa) s_parser.parse(os.path.join(isa_dir, study.filename)) for assay in study.assays: a_parser = isatab_meta.LazyAssayTableParser(parser.isa) a_parser.parse(os.path.join(isa_dir, assay.filename)) isa = parser.isa return isa # ISA-JSON class {{{1 ################################################################ class IsaJson(_Isa): file_ext = "isa-json" # Constructor {{{2 ################################################################ def __init__(self, **kwd): super().__init__(main_file_regex=JSON_FILE_REGEX, **kwd) # Make investigation instance {{{2 ################################################################ def _make_investigation_instance(self, filename): # Parse JSON file with open(filename, newline='', encoding='utf8') as fp: isa = isajson.load(fp) return isa
#!/usr/bin/env python3 import logging import coloredlogs import json import requests from datetime import datetime from packaging import version log_level = None def get_logger(name=__name__, verbosity=None): ''' Colored logging :param name: logger name (use __name__ variable) :param verbosity: :return: Logger ''' global log_level if verbosity is not None: if log_level is None: log_level = verbosity else: raise RuntimeError('Verbosity has already been set.') shortname = name.replace('pytr.', '') logger = logging.getLogger(shortname) # no logging of libs logger.propagate = False if log_level == 'debug': fmt = '%(asctime)s %(name)-9s %(levelname)-8s %(message)s' datefmt = '%Y-%m-%d %H:%M:%S%z' else: fmt = '%(asctime)s %(message)s' datefmt = '%H:%M:%S' fs = { 'asctime': {'color': 'green'}, 'hostname': {'color': 'magenta'}, 'levelname': {'color': 'red', 'bold': True}, 'name': {'color': 'magenta'}, 'programname': {'color': 'cyan'}, 'username': {'color': 'yellow'}, } ls = { 'critical': {'color': 'red', 'bold': True}, 'debug': {'color': 'green'}, 'error': {'color': 'red'}, 'info': {}, 'notice': {'color': 'magenta'}, 'spam': {'color': 'green', 'faint': True}, 'success': {'color': 'green', 'bold': True}, 'verbose': {'color': 'blue'}, 'warning': {'color': 'yellow'}, } coloredlogs.install(level=log_level, logger=logger, fmt=fmt, datefmt=datefmt, level_styles=ls, field_styles=fs) return logger def preview(response, num_lines=5): lines = json.dumps(response, indent=2).splitlines() head = '\n'.join(lines[:num_lines]) tail = len(lines) - num_lines if tail <= 0: return f'{head}\n' else: return f'{head}\n{tail} more lines hidden' def check_version(installed_version): log = get_logger(__name__) try: r = requests.get('https://api.github.com/repos/marzzzello/pytr/tags', timeout=1) except Exception as e: log.error('Could not check for a newer version') log.debug(str(e)) return latest_version = r.json()[0]['name'] if version.parse(installed_version) < version.parse(latest_version): log.warning(f'Installed pytr version ({installed_version}) is outdated. Latest version is {latest_version}') else: log.info('pytr is up to date') class Timeline: def __init__(self, tr): self.tr = tr self.log = get_logger(__name__) self.received_detail = 0 self.requested_detail = 0 self.num_timeline_details = 0 self.events_without_docs = [] async def get_next_timeline(self, response=None, max_age_timestamp=0): ''' Get timelines and save time in list timelines. Extract timeline events and save them in list timeline_events ''' if response is None: # empty response / first timeline self.log.info('Awaiting #1 timeline') # self.timelines = [] self.num_timelines = 0 self.timeline_events = [] await self.tr.timeline() else: timestamp = response['data'][-1]['data']['timestamp'] self.num_timelines += 1 # print(json.dumps(response)) self.num_timeline_details += len(response['data']) for event in response['data']: self.timeline_events.append(event) after = response['cursors'].get('after') if after is None: # last timeline is reached self.log.info(f'Received #{self.num_timelines:<2} (last) timeline') await self._get_timeline_details(5) elif max_age_timestamp != 0 and timestamp < max_age_timestamp: self.log.info(f'Received #{self.num_timelines+1:<2} timeline') self.log.info('Reached last relevant timeline') await self._get_timeline_details(5, max_age_timestamp=max_age_timestamp) else: self.log.info( f'Received #{self.num_timelines:<2} timeline, awaiting #{self.num_timelines+1:<2} timeline' ) await self.tr.timeline(after) async def _get_timeline_details(self, num_torequest, max_age_timestamp=0): ''' request timeline details ''' while num_torequest > 0: if len(self.timeline_events) == 0: self.log.info('All timeline details requested') return False else: event = self.timeline_events.pop() action = event['data'].get('action') # icon = event['data'].get('icon') msg = '' if max_age_timestamp != 0 and event['data']['timestamp'] > max_age_timestamp: msg += 'Skip: too old' # elif icon is None: # pass # elif icon.endswith('/human.png'): # msg += 'Skip: human' # elif icon.endswith('/CashIn.png'): # msg += 'Skip: CashIn' # elif icon.endswith('/ExemptionOrderChanged.png'): # msg += 'Skip: ExemptionOrderChanged' elif action is None: if event['data'].get('actionLabel') is None: msg += 'Skip: no action' elif action.get('type') != 'timelineDetail': msg += f"Skip: action type unmatched ({action["type"]})" elif action.get('payload') != event['data']['id']: msg += f"Skip: payload unmatched ({action["payload"]})" if msg != '': self.events_without_docs.append(event) self.log.debug(f"{msg} {event["data"]["title"]}: {event["data"].get("body")} {json.dumps(event)}") self.num_timeline_details -= 1 continue num_torequest -= 1 self.requested_detail += 1 await self.tr.timeline_detail(event['data']['id']) async def timelineDetail(self, response, dl, max_age_timestamp=0): ''' process timeline response and request timelines ''' self.received_detail += 1 # when all requested timeline events are received request 5 new if self.received_detail == self.requested_detail: remaining = len(self.timeline_events) if remaining < 5: await self._get_timeline_details(remaining) else: await self._get_timeline_details(5) # print(f'len timeline_events: {len(self.timeline_events)}') isSavingsPlan = False if response['subtitleText'] == 'Sparplan': isSavingsPlan = True else: # some savingsPlan don't have the subtitleText == 'Sparplan' but there are actions just for savingsPans # but maybe these are unneeded duplicates for section in response['sections']: if section['type'] == 'actionButtons': for button in section['data']: if button['action']['type'] in ['editSavingsPlan', 'deleteSavingsPlan']: isSavingsPlan = True break if response['subtitleText'] != 'Sparplan' and isSavingsPlan is True: isSavingsPlan_fmt = ' -- SPARPLAN' else: isSavingsPlan_fmt = '' max_details_digits = len(str(self.num_timeline_details)) self.log.info( f"{self.received_detail:>{max_details_digits}}/{self.num_timeline_details}: " + f"{response["titleText"]} -- {response["subtitleText"]}{isSavingsPlan_fmt}" ) for section in response['sections']: if section['type'] == 'documents': for doc in section['documents']: try: timestamp = datetime.strptime(doc['detail'], '%d.%m.%Y').timestamp() * 1000 except ValueError: timestamp = datetime.now().timestamp() * 1000 if max_age_timestamp == 0 or max_age_timestamp < timestamp: # save all savingsplan documents in a subdirectory if isSavingsPlan: dl.dl_doc(doc, response['titleText'], response['subtitleText'], subfolder='Sparplan') else: dl.dl_doc(doc, response['titleText'], response['subtitleText']) if self.received_detail == self.num_timeline_details: self.log.info('Received all details') dl.output_path.mkdir(parents=True, exist_ok=True) with open(dl.output_path / 'other_events.json', 'w', encoding='utf-8') as f: json.dump(self.events_without_docs, f, ensure_ascii=False, indent=2) dl.work_responses()
#!/usr/bin/env python3 import logging import coloredlogs import json import requests from datetime import datetime from packaging import version log_level = None def get_logger(name=__name__, verbosity=None): ''' Colored logging :param name: logger name (use __name__ variable) :param verbosity: :return: Logger ''' global log_level if verbosity is not None: if log_level is None: log_level = verbosity else: raise RuntimeError('Verbosity has already been set.') shortname = name.replace('pytr.', '') logger = logging.getLogger(shortname) # no logging of libs logger.propagate = False if log_level == 'debug': fmt = '%(asctime)s %(name)-9s %(levelname)-8s %(message)s' datefmt = '%Y-%m-%d %H:%M:%S%z' else: fmt = '%(asctime)s %(message)s' datefmt = '%H:%M:%S' fs = { 'asctime': {'color': 'green'}, 'hostname': {'color': 'magenta'}, 'levelname': {'color': 'red', 'bold': True}, 'name': {'color': 'magenta'}, 'programname': {'color': 'cyan'}, 'username': {'color': 'yellow'}, } ls = { 'critical': {'color': 'red', 'bold': True}, 'debug': {'color': 'green'}, 'error': {'color': 'red'}, 'info': {}, 'notice': {'color': 'magenta'}, 'spam': {'color': 'green', 'faint': True}, 'success': {'color': 'green', 'bold': True}, 'verbose': {'color': 'blue'}, 'warning': {'color': 'yellow'}, } coloredlogs.install(level=log_level, logger=logger, fmt=fmt, datefmt=datefmt, level_styles=ls, field_styles=fs) return logger def preview(response, num_lines=5): lines = json.dumps(response, indent=2).splitlines() head = '\n'.join(lines[:num_lines]) tail = len(lines) - num_lines if tail <= 0: return f'{head}\n' else: return f'{head}\n{tail} more lines hidden' def check_version(installed_version): log = get_logger(__name__) try: r = requests.get('https://api.github.com/repos/marzzzello/pytr/tags', timeout=1) except Exception as e: log.error('Could not check for a newer version') log.debug(str(e)) return latest_version = r.json()[0]['name'] if version.parse(installed_version) < version.parse(latest_version): log.warning(f'Installed pytr version ({installed_version}) is outdated. Latest version is {latest_version}') else: log.info('pytr is up to date') class Timeline: def __init__(self, tr): self.tr = tr self.log = get_logger(__name__) self.received_detail = 0 self.requested_detail = 0 self.num_timeline_details = 0 self.events_without_docs = [] async def get_next_timeline(self, response=None, max_age_timestamp=0): ''' Get timelines and save time in list timelines. Extract timeline events and save them in list timeline_events ''' if response is None: # empty response / first timeline self.log.info('Awaiting #1 timeline') # self.timelines = [] self.num_timelines = 0 self.timeline_events = [] await self.tr.timeline() else: timestamp = response['data'][-1]['data']['timestamp'] self.num_timelines += 1 # print(json.dumps(response)) self.num_timeline_details += len(response['data']) for event in response['data']: self.timeline_events.append(event) after = response['cursors'].get('after') if after is None: # last timeline is reached self.log.info(f'Received #{self.num_timelines:<2} (last) timeline') await self._get_timeline_details(5) elif max_age_timestamp != 0 and timestamp < max_age_timestamp: self.log.info(f'Received #{self.num_timelines+1:<2} timeline') self.log.info('Reached last relevant timeline') await self._get_timeline_details(5, max_age_timestamp=max_age_timestamp) else: self.log.info( f'Received #{self.num_timelines:<2} timeline, awaiting #{self.num_timelines+1:<2} timeline' ) await self.tr.timeline(after) async def _get_timeline_details(self, num_torequest, max_age_timestamp=0): ''' request timeline details ''' while num_torequest > 0: if len(self.timeline_events) == 0: self.log.info('All timeline details requested') return False else: event = self.timeline_events.pop() action = event['data'].get('action') # icon = event['data'].get('icon') msg = '' if max_age_timestamp != 0 and event['data']['timestamp'] > max_age_timestamp: msg += 'Skip: too old' # elif icon is None: # pass # elif icon.endswith('/human.png'): # msg += 'Skip: human' # elif icon.endswith('/CashIn.png'): # msg += 'Skip: CashIn' # elif icon.endswith('/ExemptionOrderChanged.png'): # msg += 'Skip: ExemptionOrderChanged' elif action is None: if event['data'].get('actionLabel') is None: msg += 'Skip: no action' elif action.get('type') != 'timelineDetail': msg += f"Skip: action type unmatched ({action['type']})" elif action.get('payload') != event['data']['id']: msg += f"Skip: payload unmatched ({action['payload']})" if msg != '': self.events_without_docs.append(event) self.log.debug(f"{msg} {event['data']['title']}: {event['data'].get('body')} {json.dumps(event)}") self.num_timeline_details -= 1 continue num_torequest -= 1 self.requested_detail += 1 await self.tr.timeline_detail(event['data']['id']) async def timelineDetail(self, response, dl, max_age_timestamp=0): ''' process timeline response and request timelines ''' self.received_detail += 1 # when all requested timeline events are received request 5 new if self.received_detail == self.requested_detail: remaining = len(self.timeline_events) if remaining < 5: await self._get_timeline_details(remaining) else: await self._get_timeline_details(5) # print(f'len timeline_events: {len(self.timeline_events)}') isSavingsPlan = False if response['subtitleText'] == 'Sparplan': isSavingsPlan = True else: # some savingsPlan don't have the subtitleText == 'Sparplan' but there are actions just for savingsPans # but maybe these are unneeded duplicates for section in response['sections']: if section['type'] == 'actionButtons': for button in section['data']: if button['action']['type'] in ['editSavingsPlan', 'deleteSavingsPlan']: isSavingsPlan = True break if response['subtitleText'] != 'Sparplan' and isSavingsPlan is True: isSavingsPlan_fmt = ' -- SPARPLAN' else: isSavingsPlan_fmt = '' max_details_digits = len(str(self.num_timeline_details)) self.log.info( f"{self.received_detail:>{max_details_digits}}/{self.num_timeline_details}: " + f"{response['titleText']} -- {response['subtitleText']}{isSavingsPlan_fmt}" ) for section in response['sections']: if section['type'] == 'documents': for doc in section['documents']: try: timestamp = datetime.strptime(doc['detail'], '%d.%m.%Y').timestamp() * 1000 except ValueError: timestamp = datetime.now().timestamp() * 1000 if max_age_timestamp == 0 or max_age_timestamp < timestamp: # save all savingsplan documents in a subdirectory if isSavingsPlan: dl.dl_doc(doc, response['titleText'], response['subtitleText'], subfolder='Sparplan') else: dl.dl_doc(doc, response['titleText'], response['subtitleText']) if self.received_detail == self.num_timeline_details: self.log.info('Received all details') dl.output_path.mkdir(parents=True, exist_ok=True) with open(dl.output_path / 'other_events.json', 'w', encoding='utf-8') as f: json.dump(self.events_without_docs, f, ensure_ascii=False, indent=2) dl.work_responses()
# -*- coding: utf-8 -*- # ███╗ ███╗ █████╗ ███╗ ██╗██╗ ██████╗ ██████╗ ███╗ ███╗██╗ ██████╗ # ████╗ ████║██╔══██╗████╗ ██║██║██╔════╝██╔═══██╗████╗ ████║██║██╔═══██╗ # ██╔████╔██║███████║██╔██╗ ██║██║██║ ██║ ██║██╔████╔██║██║██║ ██║ # ██║╚██╔╝██║██╔══██║██║╚██╗██║██║██║ ██║ ██║██║╚██╔╝██║██║██║ ██║ # ██║ ╚═╝ ██║██║ ██║██║ ╚████║██║╚██████╗╚██████╔╝██║ ╚═╝ ██║██║╚██████╔╝ # ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═════╝ # [+] @GorpoOrko 2020 - Telegram Bot and Personal Assistant [+] # | TCXS Project Hacker Team - https://tcxsproject.com.br | # | Telegram: @GorpoOrko Mail:gorpoorko@protonmail.com | # [+] Github Gorpo Dev: https://github.com/gorpo [+] import os from bot_files.config import bot, version, bot_username, git_repo, logs,sudoers import sqlite3 from datetime import datetime async def tcxs(msg): #variaveis iniciais---------------------: conexao_sqlite = sqlite3.connect('bot_files/bot_database.db') conexao_sqlite.row_factory = sqlite3.Row cursor_sqlite = conexao_sqlite.cursor() data = datetime.now().strftime('%d/%m/%Y %H:%M') try:#SISTEMA DE CADASTRO LOJA PARA DOADORES PAGA | $$$$$ | $$$$$$--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[-1] == 'pkg' and 'tcxs' in msg['document']['file_name'].lower(): if 'free' in msg['document']['file_name'].lower(): pass else: id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO loja_doadores(int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Loja para Doadores atualizada na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}*** no grupo de doadores.','markdown',reply_to_message_id=msg['message_id']) except: pass try:#SISTEMA DE CADASTRO LOJA GRATUITA--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[-1] == 'pkg' and 'free' in msg['document']['file_name'].lower(): id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO loja_free(int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Loja Gratuita atualizada na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}*** no grupo geral','markdown',reply_to_message_id=msg['message_id']) except: pass try:#SISTEMA DE CADASTRO FIX HAN--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[0].lower() == 'fix_han' and msg['document']['file_name'].split('.')[-1].lower() == 'pkg': id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO fix_han (int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Fix HAN atualizado na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}***','markdown',reply_to_message_id=msg['message_id']) except: pass try:#SISTEMA DE CADASTRO FIX HEN--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[0].lower() == 'fix_hen' and msg['document']['file_name'].split('.')[-1].lower() == 'pkg': id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO fix_hen (int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Fix HEN atualizado na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}***','markdown',reply_to_message_id=msg['message_id']) except: pass try:#SISTEMA DE CADASTRO FIX XML CFW--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[0].lower() == 'category_network_tool2' and msg['document']['file_name'].split('.')[-1].lower() == 'xml': id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO fix_cfw_xml (int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Fix XML para CFW atualizado na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}***','markdown',reply_to_message_id=msg['message_id']) except: pass try:#SISTEMA DE CADASTRO FIX XML HEN--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[0].lower() == 'category_network' and msg['document']['file_name'].split('.')[-1].lower() == 'xml': id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO fix_hen_xml (int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Fix XML para HEN atualizado na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}***','markdown',reply_to_message_id=msg['message_id']) except: pass if msg.get('text') and msg['chat']['type'] == 'supergroup' : try: # ----ATT PARA DOADORES TCXS PROJECT----------------------------------------> if msg['text'].lower() == 'att' and msg['chat']['title'] == 'Doadores TCXS 2020': await bot.sendMessage(msg['chat']['id'],f"🤖 ***Olá*** @{msg["from"]["username"]}\n```-------->> Bem vindo a TCXS Project ,agora você faz parte dela, entenda que as doações sao mensais e nossa equipe nao ganha nada por este projeto, todo dinheiro arrecadado neste grupo é para pagar os servidores dos quais dispomos jogos. Logo a PSN STUFF IRÁ ACABAR POIS OS SERVIDORES SERÃO DESLIGADOS e assim nao terá mais os jogos gratuitos por ai, restando apenas este acervo que é mantido por voces doadores! Vamos a Instalação!!! --> Espero que tenha um pendrive em mãos! --> copie os arquivos da VERSÃO 3.6 e caso use o fix de acordo com seu Exploit/Desbloqueio, se voce tem han ou CFW use o FIX HAN, caso contrário e seja o Exploit HEN em seu console use o FIX HEN, é necessaria a instalacao deste arquivo para que a loja apareca em seu console! Ative seu HAN/HEN e instale o FIX, após o FIX instalado instale a TCXS Store PKG, recomendamos reiniciar o console após este processo!!```",'markdown') await bot.sendMessage(msg['chat']['id'],'🤖`TUTORIAL DE COMO INSTALAR A LOJA EXPLOIT HAN E HEN!!` https://cos.tv/videos/play/1586413688272059934','markdown') await bot.sendMessage(msg['chat']['id'],'🤖`COMO USAR A XML CATEGORY_NETWORK!` https://cos.tv/videos/play/1586411677524278797','markdown') await bot.sendMessage(msg['chat']['id'],'🤖`Tutorial Download em Segundo Plano` https://cos.tv/videos/play/1586815808334907474','markdown') await bot.sendMessage(msg['chat']['id'],'🤖`COMO USAR A XML CATEGORY_NETWORK! CONSOLES CFW ` https://cos.tv/videos/play/1586411677524278797','markdown') await bot.sendMessage(msg['chat']['id'],'🤖`PORQUE DEVE USAR PROXY NO PS3!!` https://cos.tv/videos/play/1586410545470952204','markdown') #LOJA PARA DOADORES LIGADA A Database--------------------------------------------------------------------------------------------------> cursor_sqlite.execute("""SELECT * FROM loja_doadores""") resultados = cursor_sqlite.fetchall() if resultados == []: await bot.sendMessage(msg['chat']['id'], f"🤖 {msg["from"]["first_name"]} `não tenho lojas cadastradas, insira o banco de dados com dados ou cadastre um PKG enviando ela no meu privado com nome inicinando com TCXS, exemplo:` ***TCXS_Store_3.9.pkg***", 'markdown') else: for resultado in resultados: id_pkg = resultado['pkg'] nome_pkg = resultado['versao'] data_att = resultado['data'] uploader_id = resultado['uploader'] await bot.sendDocument(msg['chat']['id'],document=id_pkg, caption=f'{nome_pkg} upada em {data_att} por @{uploader_id}') #FIX HAN DATABASE--------------------------------------------------------------------------------------------------> cursor_sqlite.execute("""SELECT * FROM fix_han""") resultados = cursor_sqlite.fetchall() if resultados == []: await bot.sendMessage(msg['chat']['id'],f"🤖 {msg["from"]["first_name"]} `não tenho o fix han, insira o banco de dados com dados ou cadastre um PKG enviando ele no meu privado com nome de:` ***FIX_HAN.pkg***",'markdown') else: for resultado in resultados: id_pkg = resultado['pkg'] await bot.sendDocument(msg['chat']['id'],document=id_pkg,caption='Fix para usuários HAN') # FIX HEN DATABASE--------------------------------------------------------------------------------------------------> cursor_sqlite.execute("""SELECT * FROM fix_hen""") resultados = cursor_sqlite.fetchall() if resultados == []: await bot.sendMessage(msg['chat']['id'],f"🤖 {msg["from"]["first_name"]} `não tenho o fix hen, insira o banco de dados com dados ou cadastre um PKG enviando ele no meu privado com nome de:` ***FIX_HEN.pkg***",'markdown') else: for resultado in resultados: id_pkg = resultado['pkg'] await bot.sendDocument(msg['chat']['id'], document=id_pkg,caption='Fix para usuários HEN') #XML EXCLUSIVO PARA CFW--------------------------------------------------------------------------------------------------> cursor_sqlite.execute("""SELECT * FROM fix_cfw_xml""") resultados = cursor_sqlite.fetchall() if resultados == []: await bot.sendMessage(msg['chat']['id'],f"🤖 {msg["from"]["first_name"]} `não tenho o fix xml para cfw, insira o banco de dados com dados ou cadastre um XML enviando ele no meu privado com nome de:` ***category_network_tool2.xml***",'markdown') else: for resultado in resultados: id_pkg = resultado['pkg'] await bot.sendDocument(msg['chat']['id'],document=id_pkg,caption='XML exclusivo para quem usa CFW') # XML EXCLUSIVO PARA HEN--------------------------------------------------------------------------------------------------> cursor_sqlite.execute("""SELECT * FROM fix_hen_xml""") resultados = cursor_sqlite.fetchall() if resultados == []: await bot.sendMessage(msg['chat']['id'],f"🤖 {msg["from"]["first_name"]} `não tenho o fix xml para hen, insira o banco de dados com dados ou cadastre um XML enviando ele no meu privado com nome de:` ***category_network.xml***",'markdown') else: for resultado in resultados: id_pkg = resultado['pkg'] await bot.sendDocument(msg['chat']['id'], document=id_pkg,caption='XML exclusivo para quem usa HEN') except Exception as e: pass #LOJA PARA USUARIOS GRATUITOS -------------------------------------------------------------------> if msg.get('text').lower() == 'att' and not msg['chat']['title'] == 'Doadores TCXS 2020':#cagueta q tao roubando a loja kkkk await bot.sendMessage(msg['chat']['id'],f"***{msg["from"]["first_name"]} você esta tentando roubar a TCXS Store, cara vou pegar seu ip e te hackear agora mesmo!!!*** ",'markdown', reply_to_message_id=msg['message_id']) if msg['text'] == 'loja gratis' or msg['text'] == '/freepkg' or msg['text'] == 'free pkg' or msg['text'] == f"/freepkg@{bot_username}" or msg['text'] == 'gratis' or msg['text'] == 'Gratis' or msg['text'] == 'Free pkg': await bot.sendMessage(msg['chat']['id'],'Salve, venho trazer a você nossa nova att GRATUITA, espero que goste! ---- ---- ---- Caso tenha dificuldades com o download em segundo plano confira este tutorial exclusivo feito para você doador amado que contribui para este projeto se manter em pe: https://youtu.be/_21a5REKhBc',reply_to_message_id=msg['message_id']) await bot.sendMessage(msg['chat']['id'],'Espero que tenha um pendrive em mãos e saiba usar a loja, não daremos suporte para USUARIOS GRATUITOS, agora copie os arquivos abaixo para a raiz de um pendrive e coloque na USB direita do seu console, caso use HAN ative o debug ou se usa HEN ative o hen. ESTA ATT NAO USA NENHUM TIPO DE PATCH OU FIX!',reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'],document='BQACAgEAAx0CTd0y0QABAfACXkmA716o7XaNW82C3Mr7O2c0bX8AApEAA0oQUUaFcnOHb037rhgE',reply_to_message_id=msg['message_id']) # DEMAIS INSTRUÇOES PARA USUARIOS if msg['text'].split()[0] == 'doadores' or msg['text'].split()[0] == '/doadores' or msg['text'].split()[0] == f"/doadores@{bot_username}": await bot.sendMessage(msg['chat']['id'],'`Aqui tem tudo que os doadores precisam saber:` http://tcxsproject.com.br/doadores-tcxs-store-regras/','markdown', reply_to_message_id=msg['message_id']) # video de bluetooth do ted if msg['text'] == 'bluetooth': await bot.sendMessage(msg['chat']['id'], 'https://www.youtube.com/watch?v=_wYG7iMa5uY',reply_to_message_id=msg['message_id']) # videos dos jogos if msg['text'] == 'ps1': await bot.sendVideo(msg['chat']['id'], video='BAACAgEAAx0EUYaz7wACEbVe_lDehK8EitSnLO-jP2SIqZ00PAACsgADGepZRCV_bEET9yWbGgQ', reply_to_message_id=msg['message_id']) if msg['text'] == 'ps2': await bot.sendVideo(msg['chat']['id'],video='BAACAgEAAx0EUYaz7wACEbde_lDfbzhCcTg7M1iPa0_G_rF6UQACsgADGepZRCV_bEET9yWbGgQ',reply_to_message_id=msg['message_id']) if msg['text'] == 'ps3': await bot.sendVideo(msg['chat']['id'],video='AAMCAQADHQRRhrPvAAIS0V7_9mwf8l1awkJia_vSIK-7I4a7AAKzAAMZ6llEdIdHMW_ukXk1cHhIFwADAQAHbQADKkcAAhoE',reply_to_message_id=msg['message_id']) if msg['text'] == 'exclusivos': await bot.sendVideo(msg['chat']['id'], video='BAACAgEAAx0EUYaz7wACEbhe_lDfqKXeXTKts9b5692tHUMg7gACsAADGepZRO4jb6TTGEoWGgQ',reply_to_message_id=msg['message_id']) if msg['text'] == 'emuladores': await bot.sendVideo(msg['chat']['id'],video='BAACAgEAAx0CUYaz7wACEbJe_lDe2zzPbEQaW7cmwysAAbjSkPYAAgYBAAKPeSlGO3j50bdxrn8aBA',reply_to_message_id=msg['message_id']) if msg['text'] == 'psp': await bot.sendVideo(msg['chat']['id'],video='BAACAgEAAx0CUYaz7wACEbBe_lDeo13PNB4kKRDH4sAFdn8g2AACBwEAAo95KUbzplnZj4OTAAEaBA', reply_to_message_id=msg['message_id']) if msg['text'] == 'tcxs' or msg['text'] == "/tcxs" or msg['text'] == f"/tcxs@{bot_username}" or msg['text'] == 'tcxs project' or msg['text'] == 'TCXS' or msg['text'] == 'TCXS Project': await bot.sendMessage(msg['chat']['id'], '`{} O nome TCXS foi criado com base nos botoes do PlayStation3, TRIANGLE - CIRCLE - X - SQUARE, ou seja, triangulo, bolinha, x e quadrado, kkk. Como nosso dev era publicitario e odiava a cena vendo alguns imbecis AUTO PROMOVER seu nome criando lojas e projetos, ele decidiu entrar na cena com uma nomenclatura que lembrasse a cena hacker, ou seja, siglas! Siglas esyão no cotidiano de todo mundo e é facil sua absorção bem como dentro da parte web e publicitaria a sigla tem um forte papel facilitando a digitacao e pesquisa, entao com este intuito nos denominados de TCXS Project, a palavra Project veio da vontade de que nunca morra, sendo assim um projeto qualquer um que tiver habilidade e capacidade pode entrar na equipe e ajudar a coordenar bem como tocar o projeto, ja vimos na cena varios adms passarem pela TCXS, ela e um projeto feito a varias maos e cada um doa de forma gratuita seu tempo e conhecimento para disponibilizar tudo que temos em nossas redes e arquivos. Ficamos gratos a todos que passaram por esta equipe seja dos adms aos users e seria impossivel enumerar todos, voces que sao a TCXS Project e formam este projeto que ja esta indo para seu terceiro ano! OBRIGADO COMUNIDADE GAMER, HACKER, EXPLOITER, DEVS, USUARIOS E SIMPATIZANTES, SEM VOCES NAO EXISTIRIAMOS!`'.format(msg['from']['first_name']),'markdown',reply_to_message_id=msg['message_id']) if msg['text'] == 'proxy' or msg['text'] == f"/proxy@{bot_username}" or msg['text'] == "/proxy" or msg['text'] == 'Proxy' : await bot.sendMessage(msg['chat']['id'], '{} quer aumentar a velocidade dos downloads no seu PS3? Primeiro quero que saiba que o PS3 é de 2006 e sua placa de rede trabalha com um protocolo bem lento, logo nao adianta vc ter 100mb de net fibra full, pois sua placa de rede nao le neste tempo, bem como a gravaçao no HD do PS3 tambem é lenta, lembre que ele usa HDD e nao SSD assim eu te digo que NAO ADIANTA TUA NET SER 100MB e de fibra se seu hd antigo e ja capenga grava no maximo a 30mb/s, porem vc sabia que antes de gravar no hd tudo fica na ram e so depois passa para o HD, tendo isto como afirmaçao entenda que o ps3 tem 256mb de ram e mtos slots desta ram estao ocupados, entao nao espere que o PS3 seja uma super maquina de download, ele era do tempo do final da Internet Discada e inicio da internet a Radio e ADLS na epoca da esturura dele em 2006 a maior velocidade de internet vigente como estavel era em torno de 1mb! Tendo isto em mente siga nosso tutorial de proxy para melhorar sua conexao, o serviço proxy é utilizar de outra maquina para que sua conexao esteja com o IP mais proximo do servidor e um cache seja armazenado neste -proxy- fazendo assim seu download melhorar significativamente, segue tutorial: https://youtu.be/l4o8ySk1Do4'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg['text'].lower() == 'fix' or msg['text'] == f"/fix@{bot_username}" or msg['text'] == "/fix": await bot.sendMessage(msg['chat']['id'], '`{} vejo que esta precisando do fix para sua loja aparecer, instale este pkg em seu console e a loja começara aparecer.`'.format(msg['from']['first_name']),'markdown',reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'], document='BQACAgEAAx0CWJNTSQACC7FeXTrapHT8zx-Yz6Rm85I7s6BU2gACxQADxKN4RV4960o0M9ruGAQ', reply_to_message_id=msg['message_id']) if msg['text'] == 'torrent' or msg['text'] == "/torrent" or msg['text'] == f"/torrent@{bot_username}" or msg['text'] == 'Torrent' or msg['text'] == 'torrents' or msg['text'] == 'Torrents': await bot.sendMessage(msg['chat']['id'], '{} aqui nosso canal de torrents com pkg para download: https://t.me/tcxsp'.format(msg['from']['first_name']),'markdown',reply_to_message_id=msg['message_id']) if msg['text'] == 'codigo de erro' or msg['text'] == f"/codigoerro@{bot_username}" or msg['text'] == '/codigoerro' or msg['text'] == 'lista de erros' or msg['text'] == 'erro psn' or msg['text'] == 'estou com erro' or msg['text'] == 'ta dando erro' or msg['text'] == 'deu erro' or msg['text'] == 'meu videogame ta com problema': await bot.sendMessage(msg['chat']['id'], '`Querido usúario do sistema PlayStation3 e praticamente impossivel decorar ou trazer a minha base de dados todos os erros, imagina entao se um humano saberia o erro por um codigo, entao vou te fornecer aqui o site oficial da sony e na lista voce podera encontrar seu erro e solucao, caso seu erro persista seu erro esta ocorrendo com o exploit, ai aconselho que voce refaca todo o seu exploit novamente, nao e dificil mas antes veja aqui se seu erro tem solucao:` https://www.playstation.com/pt-pt/get-help/#!/error-code/ ','markdown',reply_to_message_id=msg['message_id']) if msg['text'].lower() == 'rap' or msg['text'] == '/rap' or msg['text'] == f"/rap@{bot_username}" or msg['text'] == 'raps' or 'licença' in msg['text'] or msg['text'] == '14.000' or msg['text'] == 'assinatura': await bot.sendMessage(msg['chat']['id'], 'Agora precisamos apenas do PKG das licenças, no HEN as mesmas licenças servem para todos! Tutorial:https://www.youtube.com/watch?v=EYr_MKaL1Tg Download: https://www.mediafire.com/file/23nzljx8w83dbl0/14Mil-raps-.pkg/file',reply_to_message_id=msg['message_id']) if msg['text'] == 'desbloqueio' or msg['text'] == '/desbloqueio' or msg['text'] == f"/desbloqueio@{bot_username}" or msg['text'] == 'o meu ps3' or msg['text'] == 'Desbloqueio' or msg['text'] == 'desbloquear o ps3' or msg['text'] == 'desbloquear' or msg['text'] == 'desbloquear meu videogame' or msg['text'] == 'desbloquear o meu ps3' or msg['text'] == 'desbloquear o playstation' or msg['text'] == 'desbloquear o meu console' or msg['text'] == 'desbloqueei meu videogame' or msg['text'] == 'desbloqueei meu console': await bot.sendMessage(msg['chat']['id'], '`{} Tem certeza que ele foi feito de forma correta? O Joao PSX alem de fornecer arquivos bugados e ate mesmo mostrar bugs de mais ao vivo acaba nao fornecendo um material confiavel bem como ele nao tem total dominio doque faz como podemos ver nos videos a quantidade de erros ( ele podia editar os videos), enfim aconselho que veja, reveja e se possivel faça o exploit em cima deste tutorial:` https://www.youtube.com/watch?v=XUUieW4bv_Y'.format(msg['from']['first_name']),'markdown',reply_to_message_id=msg['message_id']) if 'mercado' in msg['text'] or msg['text'] == "/mercadopago" or msg['text'] == f"/mercadopago@{bot_username}" or msg['text'] == 'Mercado Pago' or msg['text'] == 'Mercado pago': await bot.sendMessage(msg['chat']['id'], 'Olá que bom que você quer doar, {} aqui esta o link de pagamento -----> https://www.mercadopago.com.br/checkout/v1/redirect?pref_id=354396246-315fce8c-d8f9-4aa0-8583-95d678936375'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg['text'] == 'tutorial segundo plano' or msg['text'] == "/segundoplano" or msg['text'] == f"/segundoplano@{bot_username}" or msg['text'] == 'download segundo plano' or msg['text'] == 'downloads em segundo plano' or msg['text'] == 'Tutorial' or msg['text'] == 'Download em segundo plano' or msg['text'] == 'Downloads em segundo plano'or msg['text'] == 'download em segundo plano': await bot.sendMessage(msg['chat']['id'], '{} O nosso admin @MsT3Dz criou um tutorial exclusivo de como fazer os downloads na TCXS Project, bem como os downloads em segundo plano. Confira o tutorial completo: https://youtu.be/_21a5REKhBc'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg['text'] == 'tutorial' or msg['text'] == "/tutorial" or msg['text'] == f"/tutorial@{bot_username}" or msg['text'] == 'instalar' or msg['text'] == 'instalar a loja' or msg['text'] == 'instalar loja' or msg['text'] == 'como instalar a loja' or msg['text'] == 'Como instalar a loja' or msg['text'] == 'Como instalo a loja'or msg['text'] == 'Instalação' or msg['text'] == 'Tutorial de instalação' or msg['text'] == 'Instalação da tcxs' or msg['text'] == 'instalar a tcxs' or msg['text'] == 'Instalação': await bot.sendMessage(msg['chat']['id'], '{} O nosso admin @MsT3Dz criou um tutorial exclusivo de como instalar a loja: https://www.youtube.com/watch?v=aG1jLj8QuBY'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg['text'].lower() == 'dev' or msg['text'] == "/dev" or msg['text'] == f"/dev@{bot_username}" : await bot.sendMessage(msg['chat']['id'], '{} aqui esta a biblioteca dev, não vá se aventurar naquela loucura satanista anarquista lá.... https://tcxsproject.com.br/dev/'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg['text'].lower() == 'onion' or msg['text'] == "/onion" or msg['text'] == f"/onion@{bot_username}" : await bot.sendMessage(msg['chat']['id'], '{} este é nosso site na deep web, ele esta hospedado comigo por sinal, espero que isto nao de merda...\n http://5ct542hryncrbz7x3pveukcfzwf6qlhbwemsxnu4vtx2r7icjtimj6qd.onion'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg.get('text'): #SISTEMA DE CRIAÇÃO DE XML PARA LOJA if '/ps1' in msg['text']: try: nome_xml = msg['text'].split()[1] nome = msg['text'].split()[2] descricao = msg['text'].split()[3] link = msg['text'].split()[4] # string armazena o xml a ser gravado e printado arq = (f''' <XMBML version="1.0"> <View id="ps1_items_link"> <Attributes> <Table key="ps1_item_link"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PS1/download.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} </String></Pair> <Pair key="info"><String>TCXS - {descricao} </String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link}</String></Pair> </Table> </Attributes> <Items> <Query class="type:x-xmb/module-action" key="ps1_item_link" attr="ps1_item_link"/> </Items> </View> </XMBML>''') # bot envia mensagem await bot.sendMessage(msg['chat']['id'], (f'Seu xml de PlaysTation1 meu mestre ```{arq}```'), 'markdown', reply_to_message_id=msg['message_id']) arq2 = (f'''Seu XML esta pronto, insira estas linhas no seu XML Pai: `Abaixo de <Attributes> cole:` ``` <Table key="ps1_{nome_xml}"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PS1/{nome_xml}.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} - TCXS</String></Pair> <Pair key="info"><String> {descricao}- TCXS</String></Pair> </Table>``` `Abaixo de <Items> cole:` ``` <Query class="type:x-xmb/folder-pixmap" key="ps1_{nome_xml}" attr="ps1_{nome_xml}" src="xmb://localhost/dev_hdd0/game/TCXSPROJECT/USRDIR/XMLS/PS1/{nome_xml}.xml#ps1_items_link" />```''') jon = open("bot_files/arquivos/{}.xml".format(nome_xml), "w") for i in arq: j = i.replace('', '') jon.writelines(j) jon.close() await bot.sendMessage(msg['chat']['id'], arq2, 'markdown', reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'], document=open("bot_files/arquivos/{}.xml".format(nome_xml), 'rb'), reply_to_message_id=msg['message_id']) os.remove("bot_files/arquivos/{}.xml".format(nome_xml)) except: instrucao = '''Instruções: ``` 1 - meu comando sempre começa com /xml 2 - eu não aceito espaços no nome de arquivo, nome de jogo e nem na descrição! 3 - você pode copiar o caractere especial invisivel dentro das aspas abaixo para usar onde precisar de espaço!``` `Copie de dentro das aspas o caractere invisivel:`"⠀" **VAMOS AO COMANDO EM SI** `Exemplo com caractere invisivel:` ``` gow god⠀of⠀war descriçao⠀usando⠀caractere⠀invisivel www.linkdropbox.com``` `Exemplo sem caractere visivel:` ``` /ps1 gow god_of_war descrição_sem_caractere_visivel www.linkdropbox.com``` **Onde cada campo:** `/ps1` ```- chama comando``` `gow` ```- nome do xml``` `god_of_war` ```- nome do jogo, se quiser tirar os _ usar caractere especial no lugar``` `descrição_do_jogo` ```- descrição, se quiser tirar os _ usar caractere especial no lugar``` `www.linkdropbox.com` ```- Link do Dropbox``` ''' await bot.sendMessage(msg['chat']['id'], instrucao, 'markdown', reply_to_message_id=msg['message_id']) return True if '/ps2' in msg['text']: try: nome_xml = msg['text'].split()[1] nome = msg['text'].split()[2] descricao = msg['text'].split()[3] link = msg['text'].split()[4] # string armazena o xml a ser gravado e printado arq = (f''' <XMBML version="1.0"> <View id="ps2_items_link"> <Attributes> <Table key="ps2_item_link"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PS2/download.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} </String></Pair> <Pair key="info"><String>TCXS - {descricao} </String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link}</String></Pair> </Table> </Attributes> <Items> <Query class="type:x-xmb/module-action" key="ps2_item_link" attr="ps2_item_link"/> </Items> </View> </XMBML>''') # bot envia mensagem await bot.sendMessage(msg['chat']['id'], (f'Seu xml de PlaysTation2 meu mestre ```{arq}```'), 'markdown', reply_to_message_id=msg['message_id']) arq2 = (f'''Seu XML esta pronto, insira estas linhas no seu XML Pai: `Abaixo de <Attributes> cole:` ``` <Table key="ps2_{nome_xml}"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PS2/{nome_xml}.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} - TCXS</String></Pair> <Pair key="info"><String> {descricao}- TCXS</String></Pair> </Table>``` `Abaixo de <Items> cole:` ``` <Query class="type:x-xmb/folder-pixmap" key="ps2_{nome_xml}" attr="ps2_{nome_xml}" src="xmb://localhost/dev_hdd0/game/TCXSPROJECT/USRDIR/XMLS/PS2/{nome_xml}.xml#ps2_items_link" />```''') jon = open("bot_files/arquivos/{}.xml".format(nome_xml), "w") for i in arq: j = i.replace('', '') jon.writelines(j) jon.close() await bot.sendMessage(msg['chat']['id'], arq2, 'markdown', reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'], document=open("bot_files/arquivos/{}.xml".format(nome_xml), 'rb'), reply_to_message_id=msg['message_id']) os.remove("bot_files/arquivos/{}.xml".format(nome_xml)) except: instrucao = '''Instruções: ``` 1 - meu comando sempre começa com /xml 2 - eu não aceito espaços no nome de arquivo, nome de jogo e nem na descrição! 3 - você pode copiar o caractere especial invisivel dentro das aspas abaixo para usar onde precisar de espaço!``` `Copie de dentro das aspas o caractere invisivel:`"⠀" **VAMOS AO COMANDO EM SI** `Exemplo com caractere invisivel:` ``` gow god⠀of⠀war descriçao⠀usando⠀caractere⠀invisivel www.linkdropbox.com``` `Exemplo sem caractere visivel:` ``` /ps2 gow god_of_war descrição_sem_caractere_visivel www.linkdropbox.com``` **Onde cada campo:** `/ps2` ```- chama comando``` `gow` ```- nome do xml``` `god_of_war` ```- nome do jogo, se quiser tirar os _ usar caractere especial no lugar``` `descrição_do_jogo` ```- descrição, se quiser tirar os _ usar caractere especial no lugar``` `www.linkdropbox.com` ```- Link do Dropbox``` ''' await bot.sendMessage(msg['chat']['id'], instrucao, 'markdown', reply_to_message_id=msg['message_id']) return True if '/psp' in msg['text']: try: nome_xml = msg['text'].split()[1] nome = msg['text'].split()[2] descricao = msg['text'].split()[3] link = msg['text'].split()[4] # string armazena o xml a ser gravado e printado arq = (f''' <XMBML version="1.0"> <View id="psp_items_link"> <Attributes> <Table key="psp_item_link"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PSP/download.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} </String></Pair> <Pair key="info"><String>TCXS - {descricao} </String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link}</String></Pair> </Table> </Attributes> <Items> <Query class="type:x-xmb/module-action" key="psp_item_link" attr="psp_item_link"/> </Items> </View> </XMBML>''') # bot envia mensagem await bot.sendMessage(msg['chat']['id'], (f'Seu xml meu mestre ```{arq}```'), 'markdown', reply_to_message_id=msg['message_id']) arq2 = (f'''Seu XML esta pronto, insira estas linhas no seu XML Pai: `Abaixo de <Attributes> cole:` ``` <Table key="psp_{nome_xml}"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PSP/{nome_xml}.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} - TCXS</String></Pair> <Pair key="info"><String> {descricao}- TCXS</String></Pair> </Table>``` `Abaixo de <Items> cole:` ``` <Query class="type:x-xmb/folder-pixmap" key="psp_{nome_xml}" attr="psp_{nome_xml}" src="xmb://localhost/dev_hdd0/game/TCXSPROJECT/USRDIR/XMLS/PSP/{nome_xml}.xml#psp_items_link" />```''') jon = open("bot_files/arquivos/{}.xml".format(nome_xml), "w") for i in arq: j = i.replace('', '') jon.writelines(j) # print(i) jon.close() await bot.sendMessage(msg['chat']['id'], arq2, 'markdown', reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'], document=open("bot_files/arquivos/{}.xml".format(nome_xml), 'rb'), reply_to_message_id=msg['message_id']) os.remove("bot_files/arquivos/{}.xml".format(nome_xml)) except: instrucao = '''Instruções: ``` 1 - meu comando sempre começa com /xml 2 - eu não aceito espaços no nome de arquivo, nome de jogo e nem na descrição! 3 - você pode copiar o caractere especial invisivel dentro das aspas abaixo para usar onde precisar de espaço!``` `Copie de dentro das aspas o caractere invisivel:`"⠀" **VAMOS AO COMANDO EM SI** `Exemplo com caractere invisivel:` ``` gow god⠀of⠀war descriçao⠀usando⠀caractere⠀invisivel www.linkdropbox.com``` `Exemplo sem caractere visivel:` ``` /psp gow god_of_war descrição_sem_caractere_visivel www.linkdropbox.com``` **Onde cada campo:** `/psp` ```- chama comando``` `gow` ```- nome do xml``` `god_of_war` ```- nome do jogo, se quiser tirar os _ usar caractere especial no lugar``` `descrição_do_jogo` ```- descrição, se quiser tirar os _ usar caractere especial no lugar``` `www.linkdropbox.com` ```- Link do Dropbox``` ''' await bot.sendMessage(msg['chat']['id'], instrucao, 'markdown', reply_to_message_id=msg['message_id']) return True if '/ps3' in msg['text']: try: nome_xml = msg['text'].split()[1] nome = msg['text'].split()[2] descricao = msg['text'].split()[3] link1 = msg['text'].split()[4] link2 = msg['text'].split()[5] link3 = msg['text'].split()[6] # string armazena o xml a ser gravado e printado arq = (f'''<XMBML version="1.0"> <View id="ps3_items_link"> <Attributes> <Table key="ps3_item_0"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/DLCS/download.png</String></Pair> <Pair key="title"><String>TCXS Parte1 GAME- {nome}</String></Pair> <Pair key="info"><String>TCXS - {descricao}</String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link1}</String></Pair> </Table> <Table key="ps3_item_1"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/DLCS/download.png</String></Pair> <Pair key="title"><String>TCXS Parte GAME+LIC- {nome}</String></Pair> <Pair key="info"><String>TCXS - {descricao}</String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link2}</String></Pair> </Table> <Table key="ps3_item_2"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/DLCS/download.png</String></Pair> <Pair key="title"><String>TCXS Parte GAME+LIC- {nome}</String></Pair> <Pair key="info"><String>TCXS - {descricao}</String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link3}</String></Pair> </Table> </Attributes> <Items> <Query class="type:x-xmb/module-action" key="ps3_item_0" attr="ps3_item_0"/> <Query class="type:x-xmb/module-action" key="ps3_item_1" attr="ps3_item_1"/> <Query class="type:x-xmb/module-action" key="ps3_item_2" attr="ps3_item_2"/> </Items> </View> </XMBML>''') # bot envia mensagem await bot.sendMessage(msg['chat']['id'], (f'Seu xml meu mestre ```{arq}```'), 'markdown', reply_to_message_id=msg['message_id']) arq2 = (f'''Seu XML esta pronto, insira estas linhas no seu XML Pai: `Abaixo de <Attributes> cole:` ``` <Table key="ps3_{nome_xml}"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PS3/{nome_xml}.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} - TCXS</String></Pair> <Pair key="info"><String>{descricao} - TCXS</String></Pair> </Table>``` `Abaixo de <Items> cole:` ``` <Query class="type:x-xmb/folder-pixmap" key="ps3_{nome_xml}" attr="ps3_{nome_xml}" src="xmb://localhost/dev_hdd0/game/TCXSPROJECT/USRDIR/XMLS/PS3/{nome_xml}.xml#ps3_items_link" />```''') jon = open("bot_files/arquivos/{}.xml".format(nome_xml), "w") for i in arq: j = i.replace('', '') jon.writelines(j) # print(i) jon.close() await bot.sendMessage(msg['chat']['id'], arq2, 'markdown', reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'], document=open("bot_files/arquivos/{}.xml".format(nome_xml), 'rb'), reply_to_message_id=msg['message_id']) os.remove("bot_files/arquivos/{}.xml".format(nome_xml)) except: instrucao = '''Instruções: ``` 1 - meu comando sempre começa com /xml 2 - eu não aceito espaços no nome de arquivo, nome de jogo e nem na descrição! 3 - você pode copiar o caractere especial invisivel dentro das aspas abaixo para usar onde precisar de espaço! 4 - meu sistema para por jogos de PS3 aceitam apenas 3 links preciso deles como exemplos.``` `Copie de dentro das aspas o caractere invisivel:`"⠀" **VAMOS AO COMANDO EM SI** `Exemplo com caractere invisivel:` ``` gow god⠀of⠀war descriçao⠀usando⠀caractere⠀invisivel www.linkdropbox.com www.linkdropbox.com www.linkdropbox.com``` `Exemplo sem caractere visivel:` ``` /ps3 gow god_of_war descrição_sem_caractere_visivel www.linkdropbox.com www.linkdropbox.com www.linkdropbox.com``` **Onde cada campo:** `/ps3` ```- chama comando``` `gow` ```- nome do xml``` `god_of_war` ```- nome do jogo, se quiser tirar os _ usar caractere especial no lugar``` `descrição_do_jogo` ```- descrição, se quiser tirar os _ usar caractere especial no lugar``` `www.linkdropbox.com` ```- Link do Dropbox, preciso de 3 links separados por espaço``` ''' await bot.sendMessage(msg['chat']['id'], instrucao, 'markdown', reply_to_message_id=msg['message_id']) return True
# -*- coding: utf-8 -*- # ███╗ ███╗ █████╗ ███╗ ██╗██╗ ██████╗ ██████╗ ███╗ ███╗██╗ ██████╗ # ████╗ ████║██╔══██╗████╗ ██║██║██╔════╝██╔═══██╗████╗ ████║██║██╔═══██╗ # ██╔████╔██║███████║██╔██╗ ██║██║██║ ██║ ██║██╔████╔██║██║██║ ██║ # ██║╚██╔╝██║██╔══██║██║╚██╗██║██║██║ ██║ ██║██║╚██╔╝██║██║██║ ██║ # ██║ ╚═╝ ██║██║ ██║██║ ╚████║██║╚██████╗╚██████╔╝██║ ╚═╝ ██║██║╚██████╔╝ # ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═════╝ # [+] @GorpoOrko 2020 - Telegram Bot and Personal Assistant [+] # | TCXS Project Hacker Team - https://tcxsproject.com.br | # | Telegram: @GorpoOrko Mail:gorpoorko@protonmail.com | # [+] Github Gorpo Dev: https://github.com/gorpo [+] import os from bot_files.config import bot, version, bot_username, git_repo, logs,sudoers import sqlite3 from datetime import datetime async def tcxs(msg): #variaveis iniciais---------------------: conexao_sqlite = sqlite3.connect('bot_files/bot_database.db') conexao_sqlite.row_factory = sqlite3.Row cursor_sqlite = conexao_sqlite.cursor() data = datetime.now().strftime('%d/%m/%Y %H:%M') try:#SISTEMA DE CADASTRO LOJA PARA DOADORES PAGA | $$$$$ | $$$$$$--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[-1] == 'pkg' and 'tcxs' in msg['document']['file_name'].lower(): if 'free' in msg['document']['file_name'].lower(): pass else: id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO loja_doadores(int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Loja para Doadores atualizada na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}*** no grupo de doadores.','markdown',reply_to_message_id=msg['message_id']) except: pass try:#SISTEMA DE CADASTRO LOJA GRATUITA--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[-1] == 'pkg' and 'free' in msg['document']['file_name'].lower(): id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO loja_free(int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Loja Gratuita atualizada na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}*** no grupo geral','markdown',reply_to_message_id=msg['message_id']) except: pass try:#SISTEMA DE CADASTRO FIX HAN--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[0].lower() == 'fix_han' and msg['document']['file_name'].split('.')[-1].lower() == 'pkg': id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO fix_han (int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Fix HAN atualizado na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}***','markdown',reply_to_message_id=msg['message_id']) except: pass try:#SISTEMA DE CADASTRO FIX HEN--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[0].lower() == 'fix_hen' and msg['document']['file_name'].split('.')[-1].lower() == 'pkg': id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO fix_hen (int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Fix HEN atualizado na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}***','markdown',reply_to_message_id=msg['message_id']) except: pass try:#SISTEMA DE CADASTRO FIX XML CFW--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[0].lower() == 'category_network_tool2' and msg['document']['file_name'].split('.')[-1].lower() == 'xml': id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO fix_cfw_xml (int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Fix XML para CFW atualizado na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}***','markdown',reply_to_message_id=msg['message_id']) except: pass try:#SISTEMA DE CADASTRO FIX XML HEN--------------------------------------> if msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 522510051 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 758600965 or msg['chat']['type'] == 'private' and msg.get('document') and msg['from']['id'] == 628238139: #id_mit = 758600965 id_ted = 628238139 if msg['document']['file_name'].split('.')[0].lower() == 'category_network' and msg['document']['file_name'].split('.')[-1].lower() == 'xml': id_pkg = msg['document']['file_id'] nome_pkg = msg['document']['file_name'] uploader = msg['from']['username'] cursor_sqlite.execute(f"INSERT INTO fix_hen_xml (int_id,uploader,versao,pkg,data)VALUES(null,'{uploader}','{nome_pkg}','{id_pkg}','{data}')") conexao_sqlite.commit() await bot.sendMessage(msg['chat']['id'],f'`Fix XML para HEN atualizado na Database:` @{uploader} agora os usuarios irão receber a ***{nome_pkg}***','markdown',reply_to_message_id=msg['message_id']) except: pass if msg.get('text') and msg['chat']['type'] == 'supergroup' : try: # ----ATT PARA DOADORES TCXS PROJECT----------------------------------------> if msg['text'].lower() == 'att' and msg['chat']['title'] == 'Doadores TCXS 2020': await bot.sendMessage(msg['chat']['id'],f"🤖 ***Olá*** @{msg['from']['username']}\n```-------->> Bem vindo a TCXS Project ,agora você faz parte dela, entenda que as doações sao mensais e nossa equipe nao ganha nada por este projeto, todo dinheiro arrecadado neste grupo é para pagar os servidores dos quais dispomos jogos. Logo a PSN STUFF IRÁ ACABAR POIS OS SERVIDORES SERÃO DESLIGADOS e assim nao terá mais os jogos gratuitos por ai, restando apenas este acervo que é mantido por voces doadores! Vamos a Instalação!!! --> Espero que tenha um pendrive em mãos! --> copie os arquivos da VERSÃO 3.6 e caso use o fix de acordo com seu Exploit/Desbloqueio, se voce tem han ou CFW use o FIX HAN, caso contrário e seja o Exploit HEN em seu console use o FIX HEN, é necessaria a instalacao deste arquivo para que a loja apareca em seu console! Ative seu HAN/HEN e instale o FIX, após o FIX instalado instale a TCXS Store PKG, recomendamos reiniciar o console após este processo!!```",'markdown') await bot.sendMessage(msg['chat']['id'],'🤖`TUTORIAL DE COMO INSTALAR A LOJA EXPLOIT HAN E HEN!!` https://cos.tv/videos/play/1586413688272059934','markdown') await bot.sendMessage(msg['chat']['id'],'🤖`COMO USAR A XML CATEGORY_NETWORK!` https://cos.tv/videos/play/1586411677524278797','markdown') await bot.sendMessage(msg['chat']['id'],'🤖`Tutorial Download em Segundo Plano` https://cos.tv/videos/play/1586815808334907474','markdown') await bot.sendMessage(msg['chat']['id'],'🤖`COMO USAR A XML CATEGORY_NETWORK! CONSOLES CFW ` https://cos.tv/videos/play/1586411677524278797','markdown') await bot.sendMessage(msg['chat']['id'],'🤖`PORQUE DEVE USAR PROXY NO PS3!!` https://cos.tv/videos/play/1586410545470952204','markdown') #LOJA PARA DOADORES LIGADA A Database--------------------------------------------------------------------------------------------------> cursor_sqlite.execute("""SELECT * FROM loja_doadores""") resultados = cursor_sqlite.fetchall() if resultados == []: await bot.sendMessage(msg['chat']['id'], f"🤖 {msg['from']['first_name']} `não tenho lojas cadastradas, insira o banco de dados com dados ou cadastre um PKG enviando ela no meu privado com nome inicinando com TCXS, exemplo:` ***TCXS_Store_3.9.pkg***", 'markdown') else: for resultado in resultados: id_pkg = resultado['pkg'] nome_pkg = resultado['versao'] data_att = resultado['data'] uploader_id = resultado['uploader'] await bot.sendDocument(msg['chat']['id'],document=id_pkg, caption=f'{nome_pkg} upada em {data_att} por @{uploader_id}') #FIX HAN DATABASE--------------------------------------------------------------------------------------------------> cursor_sqlite.execute("""SELECT * FROM fix_han""") resultados = cursor_sqlite.fetchall() if resultados == []: await bot.sendMessage(msg['chat']['id'],f"🤖 {msg['from']['first_name']} `não tenho o fix han, insira o banco de dados com dados ou cadastre um PKG enviando ele no meu privado com nome de:` ***FIX_HAN.pkg***",'markdown') else: for resultado in resultados: id_pkg = resultado['pkg'] await bot.sendDocument(msg['chat']['id'],document=id_pkg,caption='Fix para usuários HAN') # FIX HEN DATABASE--------------------------------------------------------------------------------------------------> cursor_sqlite.execute("""SELECT * FROM fix_hen""") resultados = cursor_sqlite.fetchall() if resultados == []: await bot.sendMessage(msg['chat']['id'],f"🤖 {msg['from']['first_name']} `não tenho o fix hen, insira o banco de dados com dados ou cadastre um PKG enviando ele no meu privado com nome de:` ***FIX_HEN.pkg***",'markdown') else: for resultado in resultados: id_pkg = resultado['pkg'] await bot.sendDocument(msg['chat']['id'], document=id_pkg,caption='Fix para usuários HEN') #XML EXCLUSIVO PARA CFW--------------------------------------------------------------------------------------------------> cursor_sqlite.execute("""SELECT * FROM fix_cfw_xml""") resultados = cursor_sqlite.fetchall() if resultados == []: await bot.sendMessage(msg['chat']['id'],f"🤖 {msg['from']['first_name']} `não tenho o fix xml para cfw, insira o banco de dados com dados ou cadastre um XML enviando ele no meu privado com nome de:` ***category_network_tool2.xml***",'markdown') else: for resultado in resultados: id_pkg = resultado['pkg'] await bot.sendDocument(msg['chat']['id'],document=id_pkg,caption='XML exclusivo para quem usa CFW') # XML EXCLUSIVO PARA HEN--------------------------------------------------------------------------------------------------> cursor_sqlite.execute("""SELECT * FROM fix_hen_xml""") resultados = cursor_sqlite.fetchall() if resultados == []: await bot.sendMessage(msg['chat']['id'],f"🤖 {msg['from']['first_name']} `não tenho o fix xml para hen, insira o banco de dados com dados ou cadastre um XML enviando ele no meu privado com nome de:` ***category_network.xml***",'markdown') else: for resultado in resultados: id_pkg = resultado['pkg'] await bot.sendDocument(msg['chat']['id'], document=id_pkg,caption='XML exclusivo para quem usa HEN') except Exception as e: pass #LOJA PARA USUARIOS GRATUITOS -------------------------------------------------------------------> if msg.get('text').lower() == 'att' and not msg['chat']['title'] == 'Doadores TCXS 2020':#cagueta q tao roubando a loja kkkk await bot.sendMessage(msg['chat']['id'],f"***{msg['from']['first_name']} você esta tentando roubar a TCXS Store, cara vou pegar seu ip e te hackear agora mesmo!!!*** ",'markdown', reply_to_message_id=msg['message_id']) if msg['text'] == 'loja gratis' or msg['text'] == '/freepkg' or msg['text'] == 'free pkg' or msg['text'] == f"/freepkg@{bot_username}" or msg['text'] == 'gratis' or msg['text'] == 'Gratis' or msg['text'] == 'Free pkg': await bot.sendMessage(msg['chat']['id'],'Salve, venho trazer a você nossa nova att GRATUITA, espero que goste! ---- ---- ---- Caso tenha dificuldades com o download em segundo plano confira este tutorial exclusivo feito para você doador amado que contribui para este projeto se manter em pe: https://youtu.be/_21a5REKhBc',reply_to_message_id=msg['message_id']) await bot.sendMessage(msg['chat']['id'],'Espero que tenha um pendrive em mãos e saiba usar a loja, não daremos suporte para USUARIOS GRATUITOS, agora copie os arquivos abaixo para a raiz de um pendrive e coloque na USB direita do seu console, caso use HAN ative o debug ou se usa HEN ative o hen. ESTA ATT NAO USA NENHUM TIPO DE PATCH OU FIX!',reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'],document='BQACAgEAAx0CTd0y0QABAfACXkmA716o7XaNW82C3Mr7O2c0bX8AApEAA0oQUUaFcnOHb037rhgE',reply_to_message_id=msg['message_id']) # DEMAIS INSTRUÇOES PARA USUARIOS if msg['text'].split()[0] == 'doadores' or msg['text'].split()[0] == '/doadores' or msg['text'].split()[0] == f"/doadores@{bot_username}": await bot.sendMessage(msg['chat']['id'],'`Aqui tem tudo que os doadores precisam saber:` http://tcxsproject.com.br/doadores-tcxs-store-regras/','markdown', reply_to_message_id=msg['message_id']) # video de bluetooth do ted if msg['text'] == 'bluetooth': await bot.sendMessage(msg['chat']['id'], 'https://www.youtube.com/watch?v=_wYG7iMa5uY',reply_to_message_id=msg['message_id']) # videos dos jogos if msg['text'] == 'ps1': await bot.sendVideo(msg['chat']['id'], video='BAACAgEAAx0EUYaz7wACEbVe_lDehK8EitSnLO-jP2SIqZ00PAACsgADGepZRCV_bEET9yWbGgQ', reply_to_message_id=msg['message_id']) if msg['text'] == 'ps2': await bot.sendVideo(msg['chat']['id'],video='BAACAgEAAx0EUYaz7wACEbde_lDfbzhCcTg7M1iPa0_G_rF6UQACsgADGepZRCV_bEET9yWbGgQ',reply_to_message_id=msg['message_id']) if msg['text'] == 'ps3': await bot.sendVideo(msg['chat']['id'],video='AAMCAQADHQRRhrPvAAIS0V7_9mwf8l1awkJia_vSIK-7I4a7AAKzAAMZ6llEdIdHMW_ukXk1cHhIFwADAQAHbQADKkcAAhoE',reply_to_message_id=msg['message_id']) if msg['text'] == 'exclusivos': await bot.sendVideo(msg['chat']['id'], video='BAACAgEAAx0EUYaz7wACEbhe_lDfqKXeXTKts9b5692tHUMg7gACsAADGepZRO4jb6TTGEoWGgQ',reply_to_message_id=msg['message_id']) if msg['text'] == 'emuladores': await bot.sendVideo(msg['chat']['id'],video='BAACAgEAAx0CUYaz7wACEbJe_lDe2zzPbEQaW7cmwysAAbjSkPYAAgYBAAKPeSlGO3j50bdxrn8aBA',reply_to_message_id=msg['message_id']) if msg['text'] == 'psp': await bot.sendVideo(msg['chat']['id'],video='BAACAgEAAx0CUYaz7wACEbBe_lDeo13PNB4kKRDH4sAFdn8g2AACBwEAAo95KUbzplnZj4OTAAEaBA', reply_to_message_id=msg['message_id']) if msg['text'] == 'tcxs' or msg['text'] == "/tcxs" or msg['text'] == f"/tcxs@{bot_username}" or msg['text'] == 'tcxs project' or msg['text'] == 'TCXS' or msg['text'] == 'TCXS Project': await bot.sendMessage(msg['chat']['id'], '`{} O nome TCXS foi criado com base nos botoes do PlayStation3, TRIANGLE - CIRCLE - X - SQUARE, ou seja, triangulo, bolinha, x e quadrado, kkk. Como nosso dev era publicitario e odiava a cena vendo alguns imbecis AUTO PROMOVER seu nome criando lojas e projetos, ele decidiu entrar na cena com uma nomenclatura que lembrasse a cena hacker, ou seja, siglas! Siglas esyão no cotidiano de todo mundo e é facil sua absorção bem como dentro da parte web e publicitaria a sigla tem um forte papel facilitando a digitacao e pesquisa, entao com este intuito nos denominados de TCXS Project, a palavra Project veio da vontade de que nunca morra, sendo assim um projeto qualquer um que tiver habilidade e capacidade pode entrar na equipe e ajudar a coordenar bem como tocar o projeto, ja vimos na cena varios adms passarem pela TCXS, ela e um projeto feito a varias maos e cada um doa de forma gratuita seu tempo e conhecimento para disponibilizar tudo que temos em nossas redes e arquivos. Ficamos gratos a todos que passaram por esta equipe seja dos adms aos users e seria impossivel enumerar todos, voces que sao a TCXS Project e formam este projeto que ja esta indo para seu terceiro ano! OBRIGADO COMUNIDADE GAMER, HACKER, EXPLOITER, DEVS, USUARIOS E SIMPATIZANTES, SEM VOCES NAO EXISTIRIAMOS!`'.format(msg['from']['first_name']),'markdown',reply_to_message_id=msg['message_id']) if msg['text'] == 'proxy' or msg['text'] == f"/proxy@{bot_username}" or msg['text'] == "/proxy" or msg['text'] == 'Proxy' : await bot.sendMessage(msg['chat']['id'], '{} quer aumentar a velocidade dos downloads no seu PS3? Primeiro quero que saiba que o PS3 é de 2006 e sua placa de rede trabalha com um protocolo bem lento, logo nao adianta vc ter 100mb de net fibra full, pois sua placa de rede nao le neste tempo, bem como a gravaçao no HD do PS3 tambem é lenta, lembre que ele usa HDD e nao SSD assim eu te digo que NAO ADIANTA TUA NET SER 100MB e de fibra se seu hd antigo e ja capenga grava no maximo a 30mb/s, porem vc sabia que antes de gravar no hd tudo fica na ram e so depois passa para o HD, tendo isto como afirmaçao entenda que o ps3 tem 256mb de ram e mtos slots desta ram estao ocupados, entao nao espere que o PS3 seja uma super maquina de download, ele era do tempo do final da Internet Discada e inicio da internet a Radio e ADLS na epoca da esturura dele em 2006 a maior velocidade de internet vigente como estavel era em torno de 1mb! Tendo isto em mente siga nosso tutorial de proxy para melhorar sua conexao, o serviço proxy é utilizar de outra maquina para que sua conexao esteja com o IP mais proximo do servidor e um cache seja armazenado neste -proxy- fazendo assim seu download melhorar significativamente, segue tutorial: https://youtu.be/l4o8ySk1Do4'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg['text'].lower() == 'fix' or msg['text'] == f"/fix@{bot_username}" or msg['text'] == "/fix": await bot.sendMessage(msg['chat']['id'], '`{} vejo que esta precisando do fix para sua loja aparecer, instale este pkg em seu console e a loja começara aparecer.`'.format(msg['from']['first_name']),'markdown',reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'], document='BQACAgEAAx0CWJNTSQACC7FeXTrapHT8zx-Yz6Rm85I7s6BU2gACxQADxKN4RV4960o0M9ruGAQ', reply_to_message_id=msg['message_id']) if msg['text'] == 'torrent' or msg['text'] == "/torrent" or msg['text'] == f"/torrent@{bot_username}" or msg['text'] == 'Torrent' or msg['text'] == 'torrents' or msg['text'] == 'Torrents': await bot.sendMessage(msg['chat']['id'], '{} aqui nosso canal de torrents com pkg para download: https://t.me/tcxsp'.format(msg['from']['first_name']),'markdown',reply_to_message_id=msg['message_id']) if msg['text'] == 'codigo de erro' or msg['text'] == f"/codigoerro@{bot_username}" or msg['text'] == '/codigoerro' or msg['text'] == 'lista de erros' or msg['text'] == 'erro psn' or msg['text'] == 'estou com erro' or msg['text'] == 'ta dando erro' or msg['text'] == 'deu erro' or msg['text'] == 'meu videogame ta com problema': await bot.sendMessage(msg['chat']['id'], '`Querido usúario do sistema PlayStation3 e praticamente impossivel decorar ou trazer a minha base de dados todos os erros, imagina entao se um humano saberia o erro por um codigo, entao vou te fornecer aqui o site oficial da sony e na lista voce podera encontrar seu erro e solucao, caso seu erro persista seu erro esta ocorrendo com o exploit, ai aconselho que voce refaca todo o seu exploit novamente, nao e dificil mas antes veja aqui se seu erro tem solucao:` https://www.playstation.com/pt-pt/get-help/#!/error-code/ ','markdown',reply_to_message_id=msg['message_id']) if msg['text'].lower() == 'rap' or msg['text'] == '/rap' or msg['text'] == f"/rap@{bot_username}" or msg['text'] == 'raps' or 'licença' in msg['text'] or msg['text'] == '14.000' or msg['text'] == 'assinatura': await bot.sendMessage(msg['chat']['id'], 'Agora precisamos apenas do PKG das licenças, no HEN as mesmas licenças servem para todos! Tutorial:https://www.youtube.com/watch?v=EYr_MKaL1Tg Download: https://www.mediafire.com/file/23nzljx8w83dbl0/14Mil-raps-.pkg/file',reply_to_message_id=msg['message_id']) if msg['text'] == 'desbloqueio' or msg['text'] == '/desbloqueio' or msg['text'] == f"/desbloqueio@{bot_username}" or msg['text'] == 'o meu ps3' or msg['text'] == 'Desbloqueio' or msg['text'] == 'desbloquear o ps3' or msg['text'] == 'desbloquear' or msg['text'] == 'desbloquear meu videogame' or msg['text'] == 'desbloquear o meu ps3' or msg['text'] == 'desbloquear o playstation' or msg['text'] == 'desbloquear o meu console' or msg['text'] == 'desbloqueei meu videogame' or msg['text'] == 'desbloqueei meu console': await bot.sendMessage(msg['chat']['id'], '`{} Tem certeza que ele foi feito de forma correta? O Joao PSX alem de fornecer arquivos bugados e ate mesmo mostrar bugs de mais ao vivo acaba nao fornecendo um material confiavel bem como ele nao tem total dominio doque faz como podemos ver nos videos a quantidade de erros ( ele podia editar os videos), enfim aconselho que veja, reveja e se possivel faça o exploit em cima deste tutorial:` https://www.youtube.com/watch?v=XUUieW4bv_Y'.format(msg['from']['first_name']),'markdown',reply_to_message_id=msg['message_id']) if 'mercado' in msg['text'] or msg['text'] == "/mercadopago" or msg['text'] == f"/mercadopago@{bot_username}" or msg['text'] == 'Mercado Pago' or msg['text'] == 'Mercado pago': await bot.sendMessage(msg['chat']['id'], 'Olá que bom que você quer doar, {} aqui esta o link de pagamento -----> https://www.mercadopago.com.br/checkout/v1/redirect?pref_id=354396246-315fce8c-d8f9-4aa0-8583-95d678936375'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg['text'] == 'tutorial segundo plano' or msg['text'] == "/segundoplano" or msg['text'] == f"/segundoplano@{bot_username}" or msg['text'] == 'download segundo plano' or msg['text'] == 'downloads em segundo plano' or msg['text'] == 'Tutorial' or msg['text'] == 'Download em segundo plano' or msg['text'] == 'Downloads em segundo plano'or msg['text'] == 'download em segundo plano': await bot.sendMessage(msg['chat']['id'], '{} O nosso admin @MsT3Dz criou um tutorial exclusivo de como fazer os downloads na TCXS Project, bem como os downloads em segundo plano. Confira o tutorial completo: https://youtu.be/_21a5REKhBc'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg['text'] == 'tutorial' or msg['text'] == "/tutorial" or msg['text'] == f"/tutorial@{bot_username}" or msg['text'] == 'instalar' or msg['text'] == 'instalar a loja' or msg['text'] == 'instalar loja' or msg['text'] == 'como instalar a loja' or msg['text'] == 'Como instalar a loja' or msg['text'] == 'Como instalo a loja'or msg['text'] == 'Instalação' or msg['text'] == 'Tutorial de instalação' or msg['text'] == 'Instalação da tcxs' or msg['text'] == 'instalar a tcxs' or msg['text'] == 'Instalação': await bot.sendMessage(msg['chat']['id'], '{} O nosso admin @MsT3Dz criou um tutorial exclusivo de como instalar a loja: https://www.youtube.com/watch?v=aG1jLj8QuBY'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg['text'].lower() == 'dev' or msg['text'] == "/dev" or msg['text'] == f"/dev@{bot_username}" : await bot.sendMessage(msg['chat']['id'], '{} aqui esta a biblioteca dev, não vá se aventurar naquela loucura satanista anarquista lá.... https://tcxsproject.com.br/dev/'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg['text'].lower() == 'onion' or msg['text'] == "/onion" or msg['text'] == f"/onion@{bot_username}" : await bot.sendMessage(msg['chat']['id'], '{} este é nosso site na deep web, ele esta hospedado comigo por sinal, espero que isto nao de merda...\n http://5ct542hryncrbz7x3pveukcfzwf6qlhbwemsxnu4vtx2r7icjtimj6qd.onion'.format(msg['from']['first_name']),reply_to_message_id=msg['message_id']) if msg.get('text'): #SISTEMA DE CRIAÇÃO DE XML PARA LOJA if '/ps1' in msg['text']: try: nome_xml = msg['text'].split()[1] nome = msg['text'].split()[2] descricao = msg['text'].split()[3] link = msg['text'].split()[4] # string armazena o xml a ser gravado e printado arq = (f''' <XMBML version="1.0"> <View id="ps1_items_link"> <Attributes> <Table key="ps1_item_link"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PS1/download.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} </String></Pair> <Pair key="info"><String>TCXS - {descricao} </String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link}</String></Pair> </Table> </Attributes> <Items> <Query class="type:x-xmb/module-action" key="ps1_item_link" attr="ps1_item_link"/> </Items> </View> </XMBML>''') # bot envia mensagem await bot.sendMessage(msg['chat']['id'], (f'Seu xml de PlaysTation1 meu mestre ```{arq}```'), 'markdown', reply_to_message_id=msg['message_id']) arq2 = (f'''Seu XML esta pronto, insira estas linhas no seu XML Pai: `Abaixo de <Attributes> cole:` ``` <Table key="ps1_{nome_xml}"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PS1/{nome_xml}.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} - TCXS</String></Pair> <Pair key="info"><String> {descricao}- TCXS</String></Pair> </Table>``` `Abaixo de <Items> cole:` ``` <Query class="type:x-xmb/folder-pixmap" key="ps1_{nome_xml}" attr="ps1_{nome_xml}" src="xmb://localhost/dev_hdd0/game/TCXSPROJECT/USRDIR/XMLS/PS1/{nome_xml}.xml#ps1_items_link" />```''') jon = open("bot_files/arquivos/{}.xml".format(nome_xml), "w") for i in arq: j = i.replace('', '') jon.writelines(j) jon.close() await bot.sendMessage(msg['chat']['id'], arq2, 'markdown', reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'], document=open("bot_files/arquivos/{}.xml".format(nome_xml), 'rb'), reply_to_message_id=msg['message_id']) os.remove("bot_files/arquivos/{}.xml".format(nome_xml)) except: instrucao = '''Instruções: ``` 1 - meu comando sempre começa com /xml 2 - eu não aceito espaços no nome de arquivo, nome de jogo e nem na descrição! 3 - você pode copiar o caractere especial invisivel dentro das aspas abaixo para usar onde precisar de espaço!``` `Copie de dentro das aspas o caractere invisivel:`"⠀" **VAMOS AO COMANDO EM SI** `Exemplo com caractere invisivel:` ``` gow god⠀of⠀war descriçao⠀usando⠀caractere⠀invisivel www.linkdropbox.com``` `Exemplo sem caractere visivel:` ``` /ps1 gow god_of_war descrição_sem_caractere_visivel www.linkdropbox.com``` **Onde cada campo:** `/ps1` ```- chama comando``` `gow` ```- nome do xml``` `god_of_war` ```- nome do jogo, se quiser tirar os _ usar caractere especial no lugar``` `descrição_do_jogo` ```- descrição, se quiser tirar os _ usar caractere especial no lugar``` `www.linkdropbox.com` ```- Link do Dropbox``` ''' await bot.sendMessage(msg['chat']['id'], instrucao, 'markdown', reply_to_message_id=msg['message_id']) return True if '/ps2' in msg['text']: try: nome_xml = msg['text'].split()[1] nome = msg['text'].split()[2] descricao = msg['text'].split()[3] link = msg['text'].split()[4] # string armazena o xml a ser gravado e printado arq = (f''' <XMBML version="1.0"> <View id="ps2_items_link"> <Attributes> <Table key="ps2_item_link"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PS2/download.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} </String></Pair> <Pair key="info"><String>TCXS - {descricao} </String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link}</String></Pair> </Table> </Attributes> <Items> <Query class="type:x-xmb/module-action" key="ps2_item_link" attr="ps2_item_link"/> </Items> </View> </XMBML>''') # bot envia mensagem await bot.sendMessage(msg['chat']['id'], (f'Seu xml de PlaysTation2 meu mestre ```{arq}```'), 'markdown', reply_to_message_id=msg['message_id']) arq2 = (f'''Seu XML esta pronto, insira estas linhas no seu XML Pai: `Abaixo de <Attributes> cole:` ``` <Table key="ps2_{nome_xml}"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PS2/{nome_xml}.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} - TCXS</String></Pair> <Pair key="info"><String> {descricao}- TCXS</String></Pair> </Table>``` `Abaixo de <Items> cole:` ``` <Query class="type:x-xmb/folder-pixmap" key="ps2_{nome_xml}" attr="ps2_{nome_xml}" src="xmb://localhost/dev_hdd0/game/TCXSPROJECT/USRDIR/XMLS/PS2/{nome_xml}.xml#ps2_items_link" />```''') jon = open("bot_files/arquivos/{}.xml".format(nome_xml), "w") for i in arq: j = i.replace('', '') jon.writelines(j) jon.close() await bot.sendMessage(msg['chat']['id'], arq2, 'markdown', reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'], document=open("bot_files/arquivos/{}.xml".format(nome_xml), 'rb'), reply_to_message_id=msg['message_id']) os.remove("bot_files/arquivos/{}.xml".format(nome_xml)) except: instrucao = '''Instruções: ``` 1 - meu comando sempre começa com /xml 2 - eu não aceito espaços no nome de arquivo, nome de jogo e nem na descrição! 3 - você pode copiar o caractere especial invisivel dentro das aspas abaixo para usar onde precisar de espaço!``` `Copie de dentro das aspas o caractere invisivel:`"⠀" **VAMOS AO COMANDO EM SI** `Exemplo com caractere invisivel:` ``` gow god⠀of⠀war descriçao⠀usando⠀caractere⠀invisivel www.linkdropbox.com``` `Exemplo sem caractere visivel:` ``` /ps2 gow god_of_war descrição_sem_caractere_visivel www.linkdropbox.com``` **Onde cada campo:** `/ps2` ```- chama comando``` `gow` ```- nome do xml``` `god_of_war` ```- nome do jogo, se quiser tirar os _ usar caractere especial no lugar``` `descrição_do_jogo` ```- descrição, se quiser tirar os _ usar caractere especial no lugar``` `www.linkdropbox.com` ```- Link do Dropbox``` ''' await bot.sendMessage(msg['chat']['id'], instrucao, 'markdown', reply_to_message_id=msg['message_id']) return True if '/psp' in msg['text']: try: nome_xml = msg['text'].split()[1] nome = msg['text'].split()[2] descricao = msg['text'].split()[3] link = msg['text'].split()[4] # string armazena o xml a ser gravado e printado arq = (f''' <XMBML version="1.0"> <View id="psp_items_link"> <Attributes> <Table key="psp_item_link"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PSP/download.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} </String></Pair> <Pair key="info"><String>TCXS - {descricao} </String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link}</String></Pair> </Table> </Attributes> <Items> <Query class="type:x-xmb/module-action" key="psp_item_link" attr="psp_item_link"/> </Items> </View> </XMBML>''') # bot envia mensagem await bot.sendMessage(msg['chat']['id'], (f'Seu xml meu mestre ```{arq}```'), 'markdown', reply_to_message_id=msg['message_id']) arq2 = (f'''Seu XML esta pronto, insira estas linhas no seu XML Pai: `Abaixo de <Attributes> cole:` ``` <Table key="psp_{nome_xml}"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PSP/{nome_xml}.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} - TCXS</String></Pair> <Pair key="info"><String> {descricao}- TCXS</String></Pair> </Table>``` `Abaixo de <Items> cole:` ``` <Query class="type:x-xmb/folder-pixmap" key="psp_{nome_xml}" attr="psp_{nome_xml}" src="xmb://localhost/dev_hdd0/game/TCXSPROJECT/USRDIR/XMLS/PSP/{nome_xml}.xml#psp_items_link" />```''') jon = open("bot_files/arquivos/{}.xml".format(nome_xml), "w") for i in arq: j = i.replace('', '') jon.writelines(j) # print(i) jon.close() await bot.sendMessage(msg['chat']['id'], arq2, 'markdown', reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'], document=open("bot_files/arquivos/{}.xml".format(nome_xml), 'rb'), reply_to_message_id=msg['message_id']) os.remove("bot_files/arquivos/{}.xml".format(nome_xml)) except: instrucao = '''Instruções: ``` 1 - meu comando sempre começa com /xml 2 - eu não aceito espaços no nome de arquivo, nome de jogo e nem na descrição! 3 - você pode copiar o caractere especial invisivel dentro das aspas abaixo para usar onde precisar de espaço!``` `Copie de dentro das aspas o caractere invisivel:`"⠀" **VAMOS AO COMANDO EM SI** `Exemplo com caractere invisivel:` ``` gow god⠀of⠀war descriçao⠀usando⠀caractere⠀invisivel www.linkdropbox.com``` `Exemplo sem caractere visivel:` ``` /psp gow god_of_war descrição_sem_caractere_visivel www.linkdropbox.com``` **Onde cada campo:** `/psp` ```- chama comando``` `gow` ```- nome do xml``` `god_of_war` ```- nome do jogo, se quiser tirar os _ usar caractere especial no lugar``` `descrição_do_jogo` ```- descrição, se quiser tirar os _ usar caractere especial no lugar``` `www.linkdropbox.com` ```- Link do Dropbox``` ''' await bot.sendMessage(msg['chat']['id'], instrucao, 'markdown', reply_to_message_id=msg['message_id']) return True if '/ps3' in msg['text']: try: nome_xml = msg['text'].split()[1] nome = msg['text'].split()[2] descricao = msg['text'].split()[3] link1 = msg['text'].split()[4] link2 = msg['text'].split()[5] link3 = msg['text'].split()[6] # string armazena o xml a ser gravado e printado arq = (f'''<XMBML version="1.0"> <View id="ps3_items_link"> <Attributes> <Table key="ps3_item_0"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/DLCS/download.png</String></Pair> <Pair key="title"><String>TCXS Parte1 GAME- {nome}</String></Pair> <Pair key="info"><String>TCXS - {descricao}</String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link1}</String></Pair> </Table> <Table key="ps3_item_1"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/DLCS/download.png</String></Pair> <Pair key="title"><String>TCXS Parte GAME+LIC- {nome}</String></Pair> <Pair key="info"><String>TCXS - {descricao}</String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link2}</String></Pair> </Table> <Table key="ps3_item_2"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/DLCS/download.png</String></Pair> <Pair key="title"><String>TCXS Parte GAME+LIC- {nome}</String></Pair> <Pair key="info"><String>TCXS - {descricao}</String></Pair> <Pair key="module_name"><String>webrender_plugin</String></Pair> <Pair key="module_action"><String>{link3}</String></Pair> </Table> </Attributes> <Items> <Query class="type:x-xmb/module-action" key="ps3_item_0" attr="ps3_item_0"/> <Query class="type:x-xmb/module-action" key="ps3_item_1" attr="ps3_item_1"/> <Query class="type:x-xmb/module-action" key="ps3_item_2" attr="ps3_item_2"/> </Items> </View> </XMBML>''') # bot envia mensagem await bot.sendMessage(msg['chat']['id'], (f'Seu xml meu mestre ```{arq}```'), 'markdown', reply_to_message_id=msg['message_id']) arq2 = (f'''Seu XML esta pronto, insira estas linhas no seu XML Pai: `Abaixo de <Attributes> cole:` ``` <Table key="ps3_{nome_xml}"> <Pair key="icon"><String>/dev_hdd0/game/TCXSPROJECT/USRDIR/IMAGES/PS3/{nome_xml}.jpg</String></Pair> <Pair key="title"><String>TCXS - {nome} - TCXS</String></Pair> <Pair key="info"><String>{descricao} - TCXS</String></Pair> </Table>``` `Abaixo de <Items> cole:` ``` <Query class="type:x-xmb/folder-pixmap" key="ps3_{nome_xml}" attr="ps3_{nome_xml}" src="xmb://localhost/dev_hdd0/game/TCXSPROJECT/USRDIR/XMLS/PS3/{nome_xml}.xml#ps3_items_link" />```''') jon = open("bot_files/arquivos/{}.xml".format(nome_xml), "w") for i in arq: j = i.replace('', '') jon.writelines(j) # print(i) jon.close() await bot.sendMessage(msg['chat']['id'], arq2, 'markdown', reply_to_message_id=msg['message_id']) await bot.sendDocument(msg['chat']['id'], document=open("bot_files/arquivos/{}.xml".format(nome_xml), 'rb'), reply_to_message_id=msg['message_id']) os.remove("bot_files/arquivos/{}.xml".format(nome_xml)) except: instrucao = '''Instruções: ``` 1 - meu comando sempre começa com /xml 2 - eu não aceito espaços no nome de arquivo, nome de jogo e nem na descrição! 3 - você pode copiar o caractere especial invisivel dentro das aspas abaixo para usar onde precisar de espaço! 4 - meu sistema para por jogos de PS3 aceitam apenas 3 links preciso deles como exemplos.``` `Copie de dentro das aspas o caractere invisivel:`"⠀" **VAMOS AO COMANDO EM SI** `Exemplo com caractere invisivel:` ``` gow god⠀of⠀war descriçao⠀usando⠀caractere⠀invisivel www.linkdropbox.com www.linkdropbox.com www.linkdropbox.com``` `Exemplo sem caractere visivel:` ``` /ps3 gow god_of_war descrição_sem_caractere_visivel www.linkdropbox.com www.linkdropbox.com www.linkdropbox.com``` **Onde cada campo:** `/ps3` ```- chama comando``` `gow` ```- nome do xml``` `god_of_war` ```- nome do jogo, se quiser tirar os _ usar caractere especial no lugar``` `descrição_do_jogo` ```- descrição, se quiser tirar os _ usar caractere especial no lugar``` `www.linkdropbox.com` ```- Link do Dropbox, preciso de 3 links separados por espaço``` ''' await bot.sendMessage(msg['chat']['id'], instrucao, 'markdown', reply_to_message_id=msg['message_id']) return True
# -*- coding: utf-8 -*- # standard library imports import json import os import re import sys import subprocess from glob import glob from pathlib import Path # first-party imports import click from loguru import logger from sequencetools.tools.basic_fasta_stats import basic_fasta_stats from sequencetools.helpers.file_helpers import return_filehandle # module imports from . import cli from . import click_loguru from . import specification_checks # global defs DOMAIN = "https://legumeinfo.org/data/public" FASTA_TYPES = ("fna", "faa", "fasta", "frn") GFF_TYPES = ("gff", "gff3") def count_gff_features(gff): counts = {} with return_filehandle(Path(gff)) as fopen: for line in fopen: if ( not line or line.isspace() or line.startswith("#") ): # skip comments continue line = line.rstrip() f = line.split("\t") # get fields if f[2] not in counts: # feature type counts[f[2]] = 1 continue counts[f[2]] += 1 return counts class Detector: """Detect datastore file inconsistencies.""" def __init__( self, target, busco, nodes, genome_main, gene_models_main, genometools, fasta_headers, disable_all ): """Check for check for gt""" self.checks = {} # object that determines which checks are skipped self.checks["genome_main"] = genome_main self.checks["gene_models_main"] = gene_models_main self.checks["perform_gt"] = genometools self.checks["fasta_headers"] = fasta_headers self.nodes = nodes self.busco = busco self.disable_all = disable_all self.options = {} self.canonical_types = [ "genome_main", "protein_primaryTranscript", "protein", "gene_models_main", "gwas", "mrk", "phen" ] self.canonical_parents = { "genome_main": None, "gene_models_main": "genome_main", "mrk": "genome_main", "protein_primaryTranscript": "gene_models_main", "protein": "gene_models_main", "gwas": "mrk", "phen": "gwas", "qtl": "genome_main" } self.rank = { "genome_main": 0, "gene_models_main": 1, "mrk": 1, "qtl": 1, "protein": 2, "protein_primaryTranscript": 2, "gwas": 2, "phen": 3 } self.write_me = {} self.passed = {} # dictionary of passing names self.target_objects = {} # store all target pairings self.get_targets self.fasta_ids = {} self.reporting = {} self.node_data = {} # nodes for DSCensor self.target = Path(target) self.target_readme = "" self.target_name = os.path.basename(self.target) self.target_type = self.get_target_type() if ( self.target_type is None ): # target type returned False not recognized logger.error(f"Target type not recognized for {self.target}") sys.exit(1) logger.info(f"Target type looks like {self.target_type}") self.get_targets() logger.info("Performing Checks for the Following:\n") for t in self.target_objects: # for each object set validate logger.info(f"Parent {t}:") logger.debug(f"{self.target_objects[t]}") count = 0 set_primary = "" primary = False for c in self.target_objects[t]["children"]: logger.info(f"Child {c}") if ( self.target_objects[t]["children"][c]["type"] == "protein_primaryTranscript" ): primary = True if self.target_objects[t]["children"][c]["type"] == "protein": count += 1 set_primary = c if count == 1 and not primary: self.target_objects[t]["children"][set_primary][ "type" ] = "protein_primaryTranscript" self.target_objects[t]["children"][set_primary]["node_data"][ "canonical_type" ] = "protein_primaryTranscript" if count > 1 and not primary: logger.error( f"Multiple protein files found for {t}, one must be" " renamed to primary." ) sys.exit(1) logger.info("Initialized Detector\n") def get_target_type(self): """Determine whether target is file, organism directory, or data directory.""" if self.target.is_file(): target_type = "file" elif ( len(self.target_name.split("_")) == 2 and len(self.target_name.split(".")) < 3 ): target_type = "organism_dir" # will always be Genus_species elif len(self.target_name.split(".")) >= 3: # standard naming minimum target_type = "data_dir" else: target_type = None logger.warning(f"Unrecognized directory type {self.target}") return target_type def get_targets(self): """Gets and discovers target files relation to other files. If the target is a directory, the program will discover all related files that can be checked. """ if self.target_type == "file": # starting with a file self.add_target_object() return elif ( self.target_type == "data_dir" or self.target_type == "organism_dir" ): self.get_all_files() # works for both data and organism return def get_all_files(self): """Walk filetree and return all files.""" for root, directories, filenames in os.walk(self.target): for filename in filenames: # we only care about the files my_target = f"{root}/{filename}" logger.debug(f"Checking file {my_target}") self.target = my_target self.target_name = filename self.add_target_object() # add target if canonical def get_target_file_type(self, target_file): """Determines if file is fasta, gff3, vcf, etc""" file_type = target_file.split(".")[-2].lower() if file_type in FASTA_TYPES: file_type = "fasta" elif file_type in GFF_TYPES: file_type = "gff3" else: return False return file_type def add_target_object(self): """Create a structure for file objects.""" target_attributes = self.target_name.split(".") if len(target_attributes) < 3 or self.target_name[0] == "_": logger.info(f"File {self.target} does not seem to have attributes") return canonical_type = target_attributes[-3] # check content type if canonical_type not in self.canonical_types: # check for known file types if len(target_attributes) < 5: logger.info(f"No type for {self.target}. skipping") return canonical_type = target_attributes[-5] if canonical_type not in self.canonical_types: # check for mrk identifier logger.info( f"Type {canonical_type} not recognized in" f" {self.canonical_types}. Skipping" ) return organism_dir_path = os.path.dirname( os.path.dirname(self.target) ) # org dir organism_dir = os.path.basename( os.path.dirname(os.path.dirname(self.target)) ) # org dir target_dir = os.path.basename(os.path.dirname(self.target)) print(target_dir, organism_dir) if len(organism_dir.split("_")) != 2: return genus = organism_dir.split("_")[0].lower() species = organism_dir.split("_")[1].lower() target_ref_type = self.canonical_parents[canonical_type] logger.debug("Getting target files reference if necessary...") file_type = self.get_target_file_type(self.target_name) file_url = f"{DOMAIN}/{organism_dir}/{target_dir}/{self.target_name}" target_node_object = { "filename": self.target_name, "filetype": file_type, "canonical_type": canonical_type, "url": file_url, "counts": "", "genus": genus, "species": species, "origin": "LIS", "infraspecies": target_attributes[1], "derived_from": [], "child_of": [], } if len(target_attributes) > 7 and target_ref_type: # check parent logger.debug("Target Derived from Some Reference Searching...") ref_glob = f"{organism_dir_path}/{ ".".join(target_attributes[1:3])}*/*{target_ref_type}.*.gz" if self.rank[canonical_type] > 1: # feature has a subtype ref_glob = f"{organism_dir_path}/{".".join(target_attributes[1:4])}*/*{target_ref_type}.*.gz" if canonical_type == 'gwas': # mrk is parent and needs be read from PlatformName in gwas file cmd = f'zgrep PlatformName {self.target}' proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) platformname = proc.communicate()[0].decode('utf-8').rstrip().split('\t')[1] ref_glob = f"{organism_dir_path}/*.mrk.*/*mrk.*.{platformname}.gff3.gz" elif canonical_type == 'phen': # gwas is parent ref_glob = 'gwas'.join(str(self.target).rsplit('phen', 1)) my_reference = self.get_reference(ref_glob) logger.info(my_reference) if my_reference not in self.target_objects: # new parent parent_name = os.path.basename(my_reference) file_type = self.get_target_file_type(parent_name) organism_dir = os.path.basename( os.path.dirname(os.path.dirname(my_reference)) ) ref_dir = os.path.basename(os.path.dirname(my_reference)) file_url = f"{DOMAIN}/{organism_dir}/{ref_dir}/{parent_name}" ref_node_object = { "filename": parent_name, "filetype": file_type, "canonical_type": target_ref_type, "url": file_url, "counts": "", "genus": genus, "species": species, "origin": "LIS", "infraspecies": target_attributes[1], "derived_from": [], "child_of": [], } target_node_object["child_of"].append(parent_name) target_node_object["derived_from"].append(parent_name) self.target_objects[my_reference] = { "type": target_ref_type, "node_data": ref_node_object, "readme": "", "children": {}, } self.target_objects[my_reference]["children"][self.target] = { "node_data": target_node_object, "type": canonical_type, } if self.rank[target_ref_type] > 0: self.target = my_reference self.target_name = os.path.basename(my_reference) self.add_target_object() # add target if canonical else: # the parent is already in the data structure add child if self.target not in self.target_objects[my_reference]["children"]: parent_name = os.path.basename(my_reference) target_node_object["child_of"].append(parent_name) target_node_object["derived_from"].append(parent_name) self.target_objects[my_reference]["children"][ self.target ] = { "node_data": target_node_object, "type": canonical_type, } else: # target is a reference if target_ref_type: logger.error("Reference was not found or file has <=7 fields") sys.exit(1) logger.debug("Target has no Parent, it is a Reference") if self.target not in self.target_objects: self.target_objects[self.target] = { "type": canonical_type, "node_data": target_node_object, "children": {}, } def get_reference(self, glob_target): """Finds the FASTA reference for some prefix""" if len(glob(glob_target)) > 1: # too many references....? logger.error(f"Multiple references found {glob(glob_target)}") # sys.exit(1) reference = glob(glob_target) if not reference: # if the objects parent could not be found logger.error(f"Could not find ref glob: {glob_target}") sys.exit(1) reference = glob(glob_target)[0] if not os.path.isfile(reference): # if cannot find reference file logger.error(f"Could not find main target {reference}") sys.exit(1) logger.debug(f"Found reference {reference}") return reference def write_node_object(self): """Write DSCensor object node.""" my_name = self.write_me["filename"] if self.write_me["canonical_type"] == "genome_main": return self.write_me["counts"] = basic_fasta_stats(self.target, 10, False) elif self.write_me["canonical_type"] == "gene_models_main": self.write_me["counts"] = count_gff_features(self.target) my_file = open(f"./{my_name}.json", "w") my_file.write(json.dumps(self.write_me)) my_file.close() def check_busco(self): """Runs BUSCO using BUSCO_ENV_FILE envvar and outputs to file_name.""" target = self.target node_data = self.node_data # get current targets nodes busco_parse = re.compile( r"C:(.+)\%\[S:(.+)\%,D:(.+)\%\],F:(.+)\%,M:(.+)\%,n:(\d+)" ) # output = f"{".".join(file_name.split(".")[:-2])}.busco" #cmd = f"run_BUSCO.py --mode {mode} --lineage {"lineage"}" #outdir = f"./run_{output}" # output from BUSCO name = f'{os.path.basename(target)}_busco' if node_data.get("canonical_type") == "gene_models_main": name = "*.protein_primaryTranscript.*_busco" print(node_data.get("canonical_type")) print(f"{os.path.dirname(target)}/busco/{name}/short_summary*_busco.txt") short_summary = glob(f'{os.path.dirname(target)}/busco/{name}/short_summary*_busco.txt') if not short_summary: logger.debug(f"BUSCO short summary not found for {target}") return short_summary = short_summary[0] node_data["busco"] = {} with open(short_summary) as fopen: for line in fopen: line = line.rstrip() if line.startswith("#") or not line or len(line.split("\t")) < 3: continue fields = line.split("\t") if fields[2].startswith("Complete BUSCOs"): node_data["busco"]["complete_buscos"] = fields[1] elif fields[2].startswith("Complete and single"): node_data["busco"]["single_copy_buscos"] = fields[1] elif fields[2].startswith("Complete and dupli"): node_data["busco"]["duplicate_buscos"] = fields[1] elif fields[2].startswith("Fragmented"): node_data["busco"]["fragmented_buscos"] = fields[1] elif fields[2].startswith("Missing"): node_data["busco"]["missing_buscos"] = fields[1] elif fields[2].startswith("Total"): node_data["busco"]["total_buscos"] = fields[1] def detect_incongruencies(self): """Check consistencies in all objects.""" targets = self.target_objects # get objects from class init busco = self.busco # if true, run BUSCO nodes = self.nodes # if true, generate nodes for DSCensor for reference in sorted(targets, key=lambda k: self.rank[targets[k]["type"]]): # logger.info('HERE {}'.format(reference)) if reference not in self.passed: self.passed[reference] = 0 self.target = reference ref_method = getattr( specification_checks, targets[reference]["type"], # reads checks from spec ) # type ex genome_main if ( not ref_method ): # if the target isnt in the hierarchy continue logger.debug( f"Check for {targets[reference]["type"]} does not" " exist" ) continue logger.debug(ref_method) my_detector = ref_method(self, **self.options) passed = True if not self.disable_all: passed = my_detector.run() if ( passed ): # validation passed writing object node for DSCensor self.passed[reference] = 1 self.node_data = targets[reference]["node_data"] if nodes: logger.info(f"Writing node object for {reference}") # dscensor node self.check_busco() self.write_me = targets[reference]["node_data"] self.write_node_object() # write node for dscensor loading logger.debug(f"{targets[reference]}") if self.target_objects[reference]["children"]: # process children self.parent = reference children = self.target_objects[reference]["children"] for c in children: if c in self.passed: logger.debug(f"Child {c} Already Passed") continue self.passed[c] = 0 # logger.info('HERE child {}'.format(c)) logger.info(f"Performing Checks for {c}") self.target = c child_method = getattr( specification_checks, children[c]["type"], # check for spec ) # exgene_models_main if not child_method: logger.warning( f"Check for {children[c]["type"]} does not exist" ) continue logger.debug(child_method) my_detector = child_method(self, **self.options) passed = True if not self.disable_all: passed = my_detector.run() if ( passed ): # validation passed writing object node for DSCensor self.passed[c] = 1 self.node_data = children[c]["node_data"] if nodes: logger.info(f"Writing node object for {c}") self.check_busco() self.write_me = children[c]["node_data"] self.write_node_object() logger.debug(f"{c}") @cli.command() @click_loguru.init_logger() @click.option( "--busco", is_flag=True, default=False, help="""Parse BUSCO for node object.""" ) @click.option( "--nodes", is_flag=True, default=False, help="""Generate DSCensor stats and node.""", ) @click.option( "--genome_main", is_flag=True, default=True, help="""Verify genomic DNA files.""", ) @click.option( "--gene_models_main", is_flag=True, default=True, help="""Verify gene model FASTA files.""", ) @click.option( "--genometools", is_flag=True, help="""Run genometools checks on GFF files.""", ) @click.option( "--fasta_headers", is_flag=True, help="""Check consistency of FASTA headers and GFF.""", ) @click.option( "--disable_all", is_flag=True, help="""Disables all consistency checking.""", ) @click.argument("target", nargs=1) def consistency( target, busco, nodes, genome_main, gene_models_main, genometools, fasta_headers, disable_all ): """Perform consistency checks on target directory.""" detector = Detector( target, busco=busco, nodes=nodes, genome_main=genome_main, gene_models_main=gene_models_main, genometools=genometools, fasta_headers=fasta_headers, disable_all=disable_all ) # initialize class detector.detect_incongruencies() # run all detection methods
# -*- coding: utf-8 -*- # standard library imports import json import os import re import sys import subprocess from glob import glob from pathlib import Path # first-party imports import click from loguru import logger from sequencetools.tools.basic_fasta_stats import basic_fasta_stats from sequencetools.helpers.file_helpers import return_filehandle # module imports from . import cli from . import click_loguru from . import specification_checks # global defs DOMAIN = "https://legumeinfo.org/data/public" FASTA_TYPES = ("fna", "faa", "fasta", "frn") GFF_TYPES = ("gff", "gff3") def count_gff_features(gff): counts = {} with return_filehandle(Path(gff)) as fopen: for line in fopen: if ( not line or line.isspace() or line.startswith("#") ): # skip comments continue line = line.rstrip() f = line.split("\t") # get fields if f[2] not in counts: # feature type counts[f[2]] = 1 continue counts[f[2]] += 1 return counts class Detector: """Detect datastore file inconsistencies.""" def __init__( self, target, busco, nodes, genome_main, gene_models_main, genometools, fasta_headers, disable_all ): """Check for check for gt""" self.checks = {} # object that determines which checks are skipped self.checks["genome_main"] = genome_main self.checks["gene_models_main"] = gene_models_main self.checks["perform_gt"] = genometools self.checks["fasta_headers"] = fasta_headers self.nodes = nodes self.busco = busco self.disable_all = disable_all self.options = {} self.canonical_types = [ "genome_main", "protein_primaryTranscript", "protein", "gene_models_main", "gwas", "mrk", "phen" ] self.canonical_parents = { "genome_main": None, "gene_models_main": "genome_main", "mrk": "genome_main", "protein_primaryTranscript": "gene_models_main", "protein": "gene_models_main", "gwas": "mrk", "phen": "gwas", "qtl": "genome_main" } self.rank = { "genome_main": 0, "gene_models_main": 1, "mrk": 1, "qtl": 1, "protein": 2, "protein_primaryTranscript": 2, "gwas": 2, "phen": 3 } self.write_me = {} self.passed = {} # dictionary of passing names self.target_objects = {} # store all target pairings self.get_targets self.fasta_ids = {} self.reporting = {} self.node_data = {} # nodes for DSCensor self.target = Path(target) self.target_readme = "" self.target_name = os.path.basename(self.target) self.target_type = self.get_target_type() if ( self.target_type is None ): # target type returned False not recognized logger.error(f"Target type not recognized for {self.target}") sys.exit(1) logger.info(f"Target type looks like {self.target_type}") self.get_targets() logger.info("Performing Checks for the Following:\n") for t in self.target_objects: # for each object set validate logger.info(f"Parent {t}:") logger.debug(f"{self.target_objects[t]}") count = 0 set_primary = "" primary = False for c in self.target_objects[t]["children"]: logger.info(f"Child {c}") if ( self.target_objects[t]["children"][c]["type"] == "protein_primaryTranscript" ): primary = True if self.target_objects[t]["children"][c]["type"] == "protein": count += 1 set_primary = c if count == 1 and not primary: self.target_objects[t]["children"][set_primary][ "type" ] = "protein_primaryTranscript" self.target_objects[t]["children"][set_primary]["node_data"][ "canonical_type" ] = "protein_primaryTranscript" if count > 1 and not primary: logger.error( f"Multiple protein files found for {t}, one must be" " renamed to primary." ) sys.exit(1) logger.info("Initialized Detector\n") def get_target_type(self): """Determine whether target is file, organism directory, or data directory.""" if self.target.is_file(): target_type = "file" elif ( len(self.target_name.split("_")) == 2 and len(self.target_name.split(".")) < 3 ): target_type = "organism_dir" # will always be Genus_species elif len(self.target_name.split(".")) >= 3: # standard naming minimum target_type = "data_dir" else: target_type = None logger.warning(f"Unrecognized directory type {self.target}") return target_type def get_targets(self): """Gets and discovers target files relation to other files. If the target is a directory, the program will discover all related files that can be checked. """ if self.target_type == "file": # starting with a file self.add_target_object() return elif ( self.target_type == "data_dir" or self.target_type == "organism_dir" ): self.get_all_files() # works for both data and organism return def get_all_files(self): """Walk filetree and return all files.""" for root, directories, filenames in os.walk(self.target): for filename in filenames: # we only care about the files my_target = f"{root}/{filename}" logger.debug(f"Checking file {my_target}") self.target = my_target self.target_name = filename self.add_target_object() # add target if canonical def get_target_file_type(self, target_file): """Determines if file is fasta, gff3, vcf, etc""" file_type = target_file.split(".")[-2].lower() if file_type in FASTA_TYPES: file_type = "fasta" elif file_type in GFF_TYPES: file_type = "gff3" else: return False return file_type def add_target_object(self): """Create a structure for file objects.""" target_attributes = self.target_name.split(".") if len(target_attributes) < 3 or self.target_name[0] == "_": logger.info(f"File {self.target} does not seem to have attributes") return canonical_type = target_attributes[-3] # check content type if canonical_type not in self.canonical_types: # check for known file types if len(target_attributes) < 5: logger.info(f"No type for {self.target}. skipping") return canonical_type = target_attributes[-5] if canonical_type not in self.canonical_types: # check for mrk identifier logger.info( f"Type {canonical_type} not recognized in" f" {self.canonical_types}. Skipping" ) return organism_dir_path = os.path.dirname( os.path.dirname(self.target) ) # org dir organism_dir = os.path.basename( os.path.dirname(os.path.dirname(self.target)) ) # org dir target_dir = os.path.basename(os.path.dirname(self.target)) print(target_dir, organism_dir) if len(organism_dir.split("_")) != 2: return genus = organism_dir.split("_")[0].lower() species = organism_dir.split("_")[1].lower() target_ref_type = self.canonical_parents[canonical_type] logger.debug("Getting target files reference if necessary...") file_type = self.get_target_file_type(self.target_name) file_url = f"{DOMAIN}/{organism_dir}/{target_dir}/{self.target_name}" target_node_object = { "filename": self.target_name, "filetype": file_type, "canonical_type": canonical_type, "url": file_url, "counts": "", "genus": genus, "species": species, "origin": "LIS", "infraspecies": target_attributes[1], "derived_from": [], "child_of": [], } if len(target_attributes) > 7 and target_ref_type: # check parent logger.debug("Target Derived from Some Reference Searching...") ref_glob = f"{organism_dir_path}/{ '.'.join(target_attributes[1:3])}*/*{target_ref_type}.*.gz" if self.rank[canonical_type] > 1: # feature has a subtype ref_glob = f"{organism_dir_path}/{'.'.join(target_attributes[1:4])}*/*{target_ref_type}.*.gz" if canonical_type == 'gwas': # mrk is parent and needs be read from PlatformName in gwas file cmd = f'zgrep PlatformName {self.target}' proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) platformname = proc.communicate()[0].decode('utf-8').rstrip().split('\t')[1] ref_glob = f"{organism_dir_path}/*.mrk.*/*mrk.*.{platformname}.gff3.gz" elif canonical_type == 'phen': # gwas is parent ref_glob = 'gwas'.join(str(self.target).rsplit('phen', 1)) my_reference = self.get_reference(ref_glob) logger.info(my_reference) if my_reference not in self.target_objects: # new parent parent_name = os.path.basename(my_reference) file_type = self.get_target_file_type(parent_name) organism_dir = os.path.basename( os.path.dirname(os.path.dirname(my_reference)) ) ref_dir = os.path.basename(os.path.dirname(my_reference)) file_url = f"{DOMAIN}/{organism_dir}/{ref_dir}/{parent_name}" ref_node_object = { "filename": parent_name, "filetype": file_type, "canonical_type": target_ref_type, "url": file_url, "counts": "", "genus": genus, "species": species, "origin": "LIS", "infraspecies": target_attributes[1], "derived_from": [], "child_of": [], } target_node_object["child_of"].append(parent_name) target_node_object["derived_from"].append(parent_name) self.target_objects[my_reference] = { "type": target_ref_type, "node_data": ref_node_object, "readme": "", "children": {}, } self.target_objects[my_reference]["children"][self.target] = { "node_data": target_node_object, "type": canonical_type, } if self.rank[target_ref_type] > 0: self.target = my_reference self.target_name = os.path.basename(my_reference) self.add_target_object() # add target if canonical else: # the parent is already in the data structure add child if self.target not in self.target_objects[my_reference]["children"]: parent_name = os.path.basename(my_reference) target_node_object["child_of"].append(parent_name) target_node_object["derived_from"].append(parent_name) self.target_objects[my_reference]["children"][ self.target ] = { "node_data": target_node_object, "type": canonical_type, } else: # target is a reference if target_ref_type: logger.error("Reference was not found or file has <=7 fields") sys.exit(1) logger.debug("Target has no Parent, it is a Reference") if self.target not in self.target_objects: self.target_objects[self.target] = { "type": canonical_type, "node_data": target_node_object, "children": {}, } def get_reference(self, glob_target): """Finds the FASTA reference for some prefix""" if len(glob(glob_target)) > 1: # too many references....? logger.error(f"Multiple references found {glob(glob_target)}") # sys.exit(1) reference = glob(glob_target) if not reference: # if the objects parent could not be found logger.error(f"Could not find ref glob: {glob_target}") sys.exit(1) reference = glob(glob_target)[0] if not os.path.isfile(reference): # if cannot find reference file logger.error(f"Could not find main target {reference}") sys.exit(1) logger.debug(f"Found reference {reference}") return reference def write_node_object(self): """Write DSCensor object node.""" my_name = self.write_me["filename"] if self.write_me["canonical_type"] == "genome_main": return self.write_me["counts"] = basic_fasta_stats(self.target, 10, False) elif self.write_me["canonical_type"] == "gene_models_main": self.write_me["counts"] = count_gff_features(self.target) my_file = open(f"./{my_name}.json", "w") my_file.write(json.dumps(self.write_me)) my_file.close() def check_busco(self): """Runs BUSCO using BUSCO_ENV_FILE envvar and outputs to file_name.""" target = self.target node_data = self.node_data # get current targets nodes busco_parse = re.compile( r"C:(.+)\%\[S:(.+)\%,D:(.+)\%\],F:(.+)\%,M:(.+)\%,n:(\d+)" ) # output = f"{'.'.join(file_name.split('.')[:-2])}.busco" #cmd = f"run_BUSCO.py --mode {mode} --lineage {'lineage'}" #outdir = f"./run_{output}" # output from BUSCO name = f'{os.path.basename(target)}_busco' if node_data.get("canonical_type") == "gene_models_main": name = "*.protein_primaryTranscript.*_busco" print(node_data.get("canonical_type")) print(f"{os.path.dirname(target)}/busco/{name}/short_summary*_busco.txt") short_summary = glob(f'{os.path.dirname(target)}/busco/{name}/short_summary*_busco.txt') if not short_summary: logger.debug(f"BUSCO short summary not found for {target}") return short_summary = short_summary[0] node_data["busco"] = {} with open(short_summary) as fopen: for line in fopen: line = line.rstrip() if line.startswith("#") or not line or len(line.split("\t")) < 3: continue fields = line.split("\t") if fields[2].startswith("Complete BUSCOs"): node_data["busco"]["complete_buscos"] = fields[1] elif fields[2].startswith("Complete and single"): node_data["busco"]["single_copy_buscos"] = fields[1] elif fields[2].startswith("Complete and dupli"): node_data["busco"]["duplicate_buscos"] = fields[1] elif fields[2].startswith("Fragmented"): node_data["busco"]["fragmented_buscos"] = fields[1] elif fields[2].startswith("Missing"): node_data["busco"]["missing_buscos"] = fields[1] elif fields[2].startswith("Total"): node_data["busco"]["total_buscos"] = fields[1] def detect_incongruencies(self): """Check consistencies in all objects.""" targets = self.target_objects # get objects from class init busco = self.busco # if true, run BUSCO nodes = self.nodes # if true, generate nodes for DSCensor for reference in sorted(targets, key=lambda k: self.rank[targets[k]["type"]]): # logger.info('HERE {}'.format(reference)) if reference not in self.passed: self.passed[reference] = 0 self.target = reference ref_method = getattr( specification_checks, targets[reference]["type"], # reads checks from spec ) # type ex genome_main if ( not ref_method ): # if the target isnt in the hierarchy continue logger.debug( f"Check for {targets[reference]['type']} does not" " exist" ) continue logger.debug(ref_method) my_detector = ref_method(self, **self.options) passed = True if not self.disable_all: passed = my_detector.run() if ( passed ): # validation passed writing object node for DSCensor self.passed[reference] = 1 self.node_data = targets[reference]["node_data"] if nodes: logger.info(f"Writing node object for {reference}") # dscensor node self.check_busco() self.write_me = targets[reference]["node_data"] self.write_node_object() # write node for dscensor loading logger.debug(f"{targets[reference]}") if self.target_objects[reference]["children"]: # process children self.parent = reference children = self.target_objects[reference]["children"] for c in children: if c in self.passed: logger.debug(f"Child {c} Already Passed") continue self.passed[c] = 0 # logger.info('HERE child {}'.format(c)) logger.info(f"Performing Checks for {c}") self.target = c child_method = getattr( specification_checks, children[c]["type"], # check for spec ) # exgene_models_main if not child_method: logger.warning( f"Check for {children[c]['type']} does not exist" ) continue logger.debug(child_method) my_detector = child_method(self, **self.options) passed = True if not self.disable_all: passed = my_detector.run() if ( passed ): # validation passed writing object node for DSCensor self.passed[c] = 1 self.node_data = children[c]["node_data"] if nodes: logger.info(f"Writing node object for {c}") self.check_busco() self.write_me = children[c]["node_data"] self.write_node_object() logger.debug(f"{c}") @cli.command() @click_loguru.init_logger() @click.option( "--busco", is_flag=True, default=False, help="""Parse BUSCO for node object.""" ) @click.option( "--nodes", is_flag=True, default=False, help="""Generate DSCensor stats and node.""", ) @click.option( "--genome_main", is_flag=True, default=True, help="""Verify genomic DNA files.""", ) @click.option( "--gene_models_main", is_flag=True, default=True, help="""Verify gene model FASTA files.""", ) @click.option( "--genometools", is_flag=True, help="""Run genometools checks on GFF files.""", ) @click.option( "--fasta_headers", is_flag=True, help="""Check consistency of FASTA headers and GFF.""", ) @click.option( "--disable_all", is_flag=True, help="""Disables all consistency checking.""", ) @click.argument("target", nargs=1) def consistency( target, busco, nodes, genome_main, gene_models_main, genometools, fasta_headers, disable_all ): """Perform consistency checks on target directory.""" detector = Detector( target, busco=busco, nodes=nodes, genome_main=genome_main, gene_models_main=gene_models_main, genometools=genometools, fasta_headers=fasta_headers, disable_all=disable_all ) # initialize class detector.detect_incongruencies() # run all detection methods
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import, unicode_literals from os import path from collections import OrderedDict import numpy as np from scipy import stats import pandas as pd from sklearn import model_selection, metrics from . import six, utils def standardize_within_group(X, groups, with_mean=True, with_std=True): ''' This is an extension of the "mean centering" method proposed in [1]. Can be used as a replacement for the training-set-wise standardization. Both with_mean and with_std may provide some extra performance. References ---------- [1] Lee, S., & Kable, J. W. (2018). Simple but robust improvement in multivoxel pattern classification. PloS One, 13(11), e0207083. ''' X = X.copy() for g in np.unique(groups): indexer = (groups==g) u = X[indexer].mean(axis=0, keepdims=True) s = X[indexer].std(axis=0, keepdims=True) if with_std else 1 if with_mean: X[indexer] = (X[indexer] - u) / s else: X[indexer] = (X[indexer] - u) / s + u return X def permute_within_group(y, groups): y = y.copy() for g in np.unique(groups): indexer = (groups==g) y[indexer] = y[indexer][np.random.permutation(np.sum(indexer))] return y def cross_validate_ext(model, X, y, groups=None, cv=None, pred_kws=None, method=None): if cv is None: cv = model_selection.LeaveOneGroupOut() # One group of each run if method is None: method = 'predict' pred_kws = dict(dict(), **({} if pred_kws is None else pred_kws)) res = [] idx = [] for train_index, test_index in cv.split(X, y, groups): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] model.fit(X_train, y_train) res.append(getattr(model, method)(X_test, **pred_kws)) idx.extend(test_index) sorter = np.argsort(idx) if isinstance(res[0], tuple) and len(res[0]) > 1: # predict() has more the one output res = tuple([np.concatenate([r[k] for r in res], axis=0)[sorter] for k in range(len(res[0]))]) else: # predict() has only one output res = np.concatenate(res, axis=0)[sorter] return res def cross_validate_with_permutation(model, X, y, groups, rois=None, n_permutations=1000, scoring=None, cv=None): if rois is None: X, y, groups, rois = [X], [y], [groups], ['NA'] if cv is None: cv = model_selection.LeaveOneGroupOut() # One group of each run if scoring is None: scoring = {'performance': metrics.make_scorer(metrics.accuracy_score)} def cross_validate(X, y, groups, roi, permute): if permute: y = permute_within_group(y, groups) scores = model_selection.cross_validate(model, X, y, groups, \ scoring=scoring, cv=cv, return_train_score=True, n_jobs=1) res = OrderedDict(roi=roi, permute=permute, train=np.mean(scores['train_performance']), test=np.mean(scores['test_performance'])) return res res = [] for XX, yy, gg, roi in zip(X, y, groups, rois): for permute in range(n_permutations+1): res.append(cross_validate(XX, yy, gg, roi, permute)) res = pd.DataFrame(res) return res def compute_critical_value(x, y, permute='permute', data=None, alpha=0.05, tail=2): ''' Get critical values based on permutation distribution, and account for multiple comparisons using extreme statistics. Parameters ---------- x : str, list of str Columns along which multiple comparisons occur (e.g., roi, time). y : str Column for performance measurement (e.g., test_accuracy, PC, RT). data : pd.DataFrame(x, y, permute) permute == 0 is originally observed data, >= 1 is permutation data. ''' # Mean performance for each condition and each permutation by = [x, permute] if isinstance(x, six.string_types) else list(x) + [permute] df = data[data[permute]>0].groupby(by=by)[y].mean() # This is a Series with MultiIndex # Globally corrected critical value max_dist = df.groupby(permute).max().values # Max distribution min_dist = df.groupby(permute).min().values # Min distribution if tail == 2: gmax = np.percentile(max_dist, (1-alpha/2)*100) gmin = np.percentile(min_dist, alpha/2*100) else: gmax = np.percentile(max_dist, (1-alpha)*100) gmin = np.percentile(min_dist, alpha*100) # Per-comparison (uncorrected) critical value if tail == 2: pmax = df.groupby(x).quantile(1-alpha/2) pmin = df.groupby(x).quantile(alpha/2) else: pmax = df.groupby(x).quantile(1-alpha) pmin = df.groupby(x).quantile(alpha) bounds = pd.concat([pmin, pmax], axis=1) bounds.columns = ['lower', 'upper'] bounds = pd.concat([pd.DataFrame([{'lower': gmin, 'upper': gmax}], index=['overall']), bounds], axis=0) # Determine significance obs = data[data[permute]==0] if obs.size > 0: # Contain originally observed data bounds['obs_mean'] = pd.concat([pd.Series([np.nan], ['overall']), obs.groupby(by=x)[y].mean()], axis=0) # Mean response bounds['obs_std'] = pd.concat([pd.Series([np.nan], ['overall']), obs.groupby(by=x)[y].std()], axis=0) bounds['obs_n'] = pd.concat([pd.Series([-1], ['overall']), obs.groupby(by=x)[y].count()], axis=0) n_comparisons = len(obs[x].unique()) if tail == 2: # The two-tailed p value is twice the one-tailed p value (assuming you correctly predicted the direction of the difference) bounds['corrected'] = (bounds.obs_mean < bounds.lower['overall']) | (bounds.upper['overall'] < bounds.obs_mean) # Significance (corrected) bounds['uncorrected'] = (bounds.obs_mean < bounds.lower) | (bounds.upper < bounds.obs_mean) # Significance (uncorrected) elif tail == 1: bounds['corrected'] = (bounds.upper['overall'] < bounds.obs_mean) bounds['p_corr'] = [np.nan] + [1-stats.percentileofscore(max_dist, v)/100 for v in bounds.obs_mean[1:]] bounds['uncorrected'] = (bounds.upper < bounds.obs_mean) bounds['p_uncorr'] = [np.nan] + [1-stats.percentileofscore(df[k], v)/100 for k, v in bounds.obs_mean[1:].iteritems()] bounds['bonferroni'] = bounds['p_uncorr'] * n_comparisons elif tail == -1: bounds['corrected'] = (bounds.obs_mean < bounds.lower['overall']) bounds['uncorrected'] = (bounds.obs_mean < bounds.lower) return bounds # def plot_permutation(x, y, subject='subject', permute='permute', data=None, plot=None, # color=None, x_order=None, xtick_format=None, chance=0, alpha=0.05, tail=2, mcc='extreme', # figsize=None, star_shift=None, star_alpha=None, # dist_kws=None, scatter_kws=None, line_kws=None, bar_kws=None, point_kws=None, chance_kws=None, ref_kws=None, # show_mcc=True, show_n=True, show_p=False, show_ref=False, show_num=False): # ''' # Parameters # ---------- # x : str # y : str # data : pd.DataFrame(x, y, permute) # permute == 0 is originally observed data, >= 1 is permutation data. # ''' # if figsize is not None: # fig = plt.figure(figsize=figsize) # else: # fig = plt.gcf() # pmt = data[data[permute]>0] # has_pmt = (pmt.size > 0) # obs = data[data[permute]==0] # if x_order is None: # x_order = data[x].unique() # else: # x_order = [x_label for x_label in x_order if x_label in data[x].values] # if has_pmt: # bounds = compute_critical_value(x=x, y=y, data=data, alpha=alpha, tail=tail) # df_pmt = pmt.groupby(by=[x, permute])[y].mean().reset_index() # df_obs = obs.groupby(by=[x, subject])[y].mean().reset_index() # obs_mean = df_obs.groupby(x)[y].mean() # obs_n = df_obs.groupby(x)[y].count() # x_loc = np.arange(len(df_obs[x].unique())) # if plot is None: # plot = 'violinplot' if has_pmt else 'barplot' # if plot == 'violinplot': # # Plot permutation distribution # dist_kws = dict(dict(color='gray', inner=None, linewidth=0), **(dist_kws if dist_kws is not None else {})) # sns.violinplot(x=x, y=y, data=df_pmt, order=x_order, **dist_kws) # # Plot originally observed data # scatter_kws = dict(dict(color=color, s=100, linewidths=1, edgecolors='k'), **(scatter_kws if scatter_kws is not None else {})) # plt.scatter(np.arange(len(x_order)), bounds.loc[x_order,'obs_mean'], **scatter_kws) # elif plot == 'lineplot': # line_kws = dict(dict(), **(line_kws if line_kws is not None else {})) # sns.lineplot(x=x, y=y, data=df_obs, ci=(1-alpha)*100, palette=color, **line_kws) # x_loc = df_obs[x].unique() # elif plot == 'barplot': # color = 'gray' if color is None else color # bar_kws = dict(dict(), **(bar_kws if bar_kws is not None else {})) # sns.barplot(x=x, y=y, data=df_obs, order=x_order, ci=(1-alpha)*100, color=color, **bar_kws) # elif plot == 'finalplot': # # Plot permutation distribution # dist_kws = dict(dict(color='gray', alpha=0.5, inner=None, linewidth=0), **(dist_kws if dist_kws is not None else {})) # sns.violinplot(x=x, y=y, data=df_pmt, order=x_order, **dist_kws) # # Plot bootstrap errorbars # color = 'k' if color is None else color # point_kws = dict(dict(linestyles='', scale=0.5, errwidth=2, capsize=0.1, facecolors='r'), **(point_kws if point_kws is not None else {})) # sns.pointplot(x=x, y=y, data=df_obs, order=x_order, ci=(1-alpha)*100, color=color, **point_kws) # # Plot originally observed data # scatter_kws = dict(dict(s=50, marker='o', linewidths=1, edgecolors=color, zorder=10), **(scatter_kws if scatter_kws is not None else {})) # plt.scatter(np.arange(len(x_order)), bounds.loc[x_order,'obs_mean'], **scatter_kws) # # Shift long ticklabels # if xtick_format is None: # xtick_format = ('normal' if plot == 'lineplot' else 'rotated') # if xtick_format == 'rotated': # plt.setp(plt.gca().get_xticklabels(), rotation=-30) # dx = 20/72; dy = 0/72 # offset = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans) # for label in plt.gca().xaxis.get_majorticklabels(): # label.set_transform(label.get_transform() + offset) # elif xtick_format == 'short': # plt.setp(plt.gca(), xticklabels=[label.get_text().split('_')[0] for label in plt.gca().get_xticklabels()]) # elif xtick_format == 'normal': # pass # elif xtick_format == 'final': # plt.setp(plt.gca(), xticklabels=[label.get_text().split('_')[0] for label in plt.gca().get_xticklabels()]) # plt.setp(plt.gca().get_xticklabels(), rotation=45, ha='right') # dx = 15/72; dy = 5/72 # offset = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans) # for label in plt.gca().xaxis.get_majorticklabels(): # label.set_transform(label.get_transform() + offset) # # Plot chance level # chance_kws = dict(dict(color='C3', ls='--', zorder=1), **(chance_kws if chance_kws is not None else {})) # plt.axhline(chance, **chance_kws) # # Plot reference line # if show_ref: # ref_kws = dict(dict(ref=0.55, color='gray', lw=0.5, ls='--'), **(ref_kws if ref_kws is not None else {})) # ref = ref_kws.pop('ref') # plt.axhline(ref, **ref_kws) # if star_alpha is None: # star_alpha = [0.3, 1] # if has_pmt: # # Plot multiple comparison correction band # if show_mcc: # plt.axhspan(bounds.loc['overall','lower'] if tail==2 else chance, bounds.loc['overall','upper'], color='r', alpha=0.1) # # Plot significant stars # if star_shift is None: # star_shift = bounds.ix[1:,'obs_std']/np.sqrt(bounds.ix[1:,'obs_n']) * 2.2 # Ignore first row # else: # star_shift = pd.Series(star_shift, index=bounds.index) # for k, x_label in enumerate(x_order): # if bounds.loc[x_label,'uncorrected']: # plt.text(x_loc[k], bounds.loc[x_label,'obs_mean']+star_shift[x_label], '*', ha='center', alpha=star_alpha[0]) # if bounds.loc[x_label,'corrected']: # plt.text(x_loc[k], bounds.loc[x_label,'obs_mean']+star_shift[x_label], '*', ha='center', alpha=star_alpha[1]) # if show_p: # if mcc == 'none': # plt.text(x_loc[k], 0.15, f"{bounds.loc[x_label,"p_uncorr"]:.3f}", transform=myplot.transHDVA(), ha='center', alpha=star_alpha[0], fontsize='xx-small') # if mcc == 'extreme': # plt.text(x_loc[k], 0.15, f"{bounds.loc[x_label,"p_corr"]:.3f}", transform=myplot.transHDVA(), ha='center', alpha=star_alpha[0], fontsize='xx-small') # elif mcc == 'bonferroni': # plt.text(x_loc[k], 0.15, f"{bounds.loc[x_label,"bonferroni"]:.3f}", transform=myplot.transHDVA(), ha='center', alpha=star_alpha[1], fontsize='xx-small') # # Plot performance # if show_num: # for k, x_label in enumerate(x_order): # plt.text(x_loc[k], bounds.loc['overall','upper']*1.1-0.05 if has_pmt else 0.9, f"{obs_mean[x_label]:.3f}", # transform=plt.gca().transData if has_pmt else myplot.transHDVA(), ha='center', alpha=star_alpha[0], fontsize='xx-small') # # Plot obs_n # if show_n: # if len(set(obs_n[x_order].values)) == 1: # All equal # plt.text(0.95, 0.05, f"$n={obs_n[x_order[0]]}$", transform=plt.gca().transAxes, ha='right', fontsize='x-small') # else: # for k, x_label in enumerate(x_order): # plt.text(x_loc[k], 0.05, f"$n={obs_n[x_label]}$" if k == 0 else f"${obs_n[x_label]}$", # transform=myplot.transHDVA(), ha='center', fontsize='x-small') if __name__ == '__main__': pass
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import, unicode_literals from os import path from collections import OrderedDict import numpy as np from scipy import stats import pandas as pd from sklearn import model_selection, metrics from . import six, utils def standardize_within_group(X, groups, with_mean=True, with_std=True): ''' This is an extension of the "mean centering" method proposed in [1]. Can be used as a replacement for the training-set-wise standardization. Both with_mean and with_std may provide some extra performance. References ---------- [1] Lee, S., & Kable, J. W. (2018). Simple but robust improvement in multivoxel pattern classification. PloS One, 13(11), e0207083. ''' X = X.copy() for g in np.unique(groups): indexer = (groups==g) u = X[indexer].mean(axis=0, keepdims=True) s = X[indexer].std(axis=0, keepdims=True) if with_std else 1 if with_mean: X[indexer] = (X[indexer] - u) / s else: X[indexer] = (X[indexer] - u) / s + u return X def permute_within_group(y, groups): y = y.copy() for g in np.unique(groups): indexer = (groups==g) y[indexer] = y[indexer][np.random.permutation(np.sum(indexer))] return y def cross_validate_ext(model, X, y, groups=None, cv=None, pred_kws=None, method=None): if cv is None: cv = model_selection.LeaveOneGroupOut() # One group of each run if method is None: method = 'predict' pred_kws = dict(dict(), **({} if pred_kws is None else pred_kws)) res = [] idx = [] for train_index, test_index in cv.split(X, y, groups): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] model.fit(X_train, y_train) res.append(getattr(model, method)(X_test, **pred_kws)) idx.extend(test_index) sorter = np.argsort(idx) if isinstance(res[0], tuple) and len(res[0]) > 1: # predict() has more the one output res = tuple([np.concatenate([r[k] for r in res], axis=0)[sorter] for k in range(len(res[0]))]) else: # predict() has only one output res = np.concatenate(res, axis=0)[sorter] return res def cross_validate_with_permutation(model, X, y, groups, rois=None, n_permutations=1000, scoring=None, cv=None): if rois is None: X, y, groups, rois = [X], [y], [groups], ['NA'] if cv is None: cv = model_selection.LeaveOneGroupOut() # One group of each run if scoring is None: scoring = {'performance': metrics.make_scorer(metrics.accuracy_score)} def cross_validate(X, y, groups, roi, permute): if permute: y = permute_within_group(y, groups) scores = model_selection.cross_validate(model, X, y, groups, \ scoring=scoring, cv=cv, return_train_score=True, n_jobs=1) res = OrderedDict(roi=roi, permute=permute, train=np.mean(scores['train_performance']), test=np.mean(scores['test_performance'])) return res res = [] for XX, yy, gg, roi in zip(X, y, groups, rois): for permute in range(n_permutations+1): res.append(cross_validate(XX, yy, gg, roi, permute)) res = pd.DataFrame(res) return res def compute_critical_value(x, y, permute='permute', data=None, alpha=0.05, tail=2): ''' Get critical values based on permutation distribution, and account for multiple comparisons using extreme statistics. Parameters ---------- x : str, list of str Columns along which multiple comparisons occur (e.g., roi, time). y : str Column for performance measurement (e.g., test_accuracy, PC, RT). data : pd.DataFrame(x, y, permute) permute == 0 is originally observed data, >= 1 is permutation data. ''' # Mean performance for each condition and each permutation by = [x, permute] if isinstance(x, six.string_types) else list(x) + [permute] df = data[data[permute]>0].groupby(by=by)[y].mean() # This is a Series with MultiIndex # Globally corrected critical value max_dist = df.groupby(permute).max().values # Max distribution min_dist = df.groupby(permute).min().values # Min distribution if tail == 2: gmax = np.percentile(max_dist, (1-alpha/2)*100) gmin = np.percentile(min_dist, alpha/2*100) else: gmax = np.percentile(max_dist, (1-alpha)*100) gmin = np.percentile(min_dist, alpha*100) # Per-comparison (uncorrected) critical value if tail == 2: pmax = df.groupby(x).quantile(1-alpha/2) pmin = df.groupby(x).quantile(alpha/2) else: pmax = df.groupby(x).quantile(1-alpha) pmin = df.groupby(x).quantile(alpha) bounds = pd.concat([pmin, pmax], axis=1) bounds.columns = ['lower', 'upper'] bounds = pd.concat([pd.DataFrame([{'lower': gmin, 'upper': gmax}], index=['overall']), bounds], axis=0) # Determine significance obs = data[data[permute]==0] if obs.size > 0: # Contain originally observed data bounds['obs_mean'] = pd.concat([pd.Series([np.nan], ['overall']), obs.groupby(by=x)[y].mean()], axis=0) # Mean response bounds['obs_std'] = pd.concat([pd.Series([np.nan], ['overall']), obs.groupby(by=x)[y].std()], axis=0) bounds['obs_n'] = pd.concat([pd.Series([-1], ['overall']), obs.groupby(by=x)[y].count()], axis=0) n_comparisons = len(obs[x].unique()) if tail == 2: # The two-tailed p value is twice the one-tailed p value (assuming you correctly predicted the direction of the difference) bounds['corrected'] = (bounds.obs_mean < bounds.lower['overall']) | (bounds.upper['overall'] < bounds.obs_mean) # Significance (corrected) bounds['uncorrected'] = (bounds.obs_mean < bounds.lower) | (bounds.upper < bounds.obs_mean) # Significance (uncorrected) elif tail == 1: bounds['corrected'] = (bounds.upper['overall'] < bounds.obs_mean) bounds['p_corr'] = [np.nan] + [1-stats.percentileofscore(max_dist, v)/100 for v in bounds.obs_mean[1:]] bounds['uncorrected'] = (bounds.upper < bounds.obs_mean) bounds['p_uncorr'] = [np.nan] + [1-stats.percentileofscore(df[k], v)/100 for k, v in bounds.obs_mean[1:].iteritems()] bounds['bonferroni'] = bounds['p_uncorr'] * n_comparisons elif tail == -1: bounds['corrected'] = (bounds.obs_mean < bounds.lower['overall']) bounds['uncorrected'] = (bounds.obs_mean < bounds.lower) return bounds # def plot_permutation(x, y, subject='subject', permute='permute', data=None, plot=None, # color=None, x_order=None, xtick_format=None, chance=0, alpha=0.05, tail=2, mcc='extreme', # figsize=None, star_shift=None, star_alpha=None, # dist_kws=None, scatter_kws=None, line_kws=None, bar_kws=None, point_kws=None, chance_kws=None, ref_kws=None, # show_mcc=True, show_n=True, show_p=False, show_ref=False, show_num=False): # ''' # Parameters # ---------- # x : str # y : str # data : pd.DataFrame(x, y, permute) # permute == 0 is originally observed data, >= 1 is permutation data. # ''' # if figsize is not None: # fig = plt.figure(figsize=figsize) # else: # fig = plt.gcf() # pmt = data[data[permute]>0] # has_pmt = (pmt.size > 0) # obs = data[data[permute]==0] # if x_order is None: # x_order = data[x].unique() # else: # x_order = [x_label for x_label in x_order if x_label in data[x].values] # if has_pmt: # bounds = compute_critical_value(x=x, y=y, data=data, alpha=alpha, tail=tail) # df_pmt = pmt.groupby(by=[x, permute])[y].mean().reset_index() # df_obs = obs.groupby(by=[x, subject])[y].mean().reset_index() # obs_mean = df_obs.groupby(x)[y].mean() # obs_n = df_obs.groupby(x)[y].count() # x_loc = np.arange(len(df_obs[x].unique())) # if plot is None: # plot = 'violinplot' if has_pmt else 'barplot' # if plot == 'violinplot': # # Plot permutation distribution # dist_kws = dict(dict(color='gray', inner=None, linewidth=0), **(dist_kws if dist_kws is not None else {})) # sns.violinplot(x=x, y=y, data=df_pmt, order=x_order, **dist_kws) # # Plot originally observed data # scatter_kws = dict(dict(color=color, s=100, linewidths=1, edgecolors='k'), **(scatter_kws if scatter_kws is not None else {})) # plt.scatter(np.arange(len(x_order)), bounds.loc[x_order,'obs_mean'], **scatter_kws) # elif plot == 'lineplot': # line_kws = dict(dict(), **(line_kws if line_kws is not None else {})) # sns.lineplot(x=x, y=y, data=df_obs, ci=(1-alpha)*100, palette=color, **line_kws) # x_loc = df_obs[x].unique() # elif plot == 'barplot': # color = 'gray' if color is None else color # bar_kws = dict(dict(), **(bar_kws if bar_kws is not None else {})) # sns.barplot(x=x, y=y, data=df_obs, order=x_order, ci=(1-alpha)*100, color=color, **bar_kws) # elif plot == 'finalplot': # # Plot permutation distribution # dist_kws = dict(dict(color='gray', alpha=0.5, inner=None, linewidth=0), **(dist_kws if dist_kws is not None else {})) # sns.violinplot(x=x, y=y, data=df_pmt, order=x_order, **dist_kws) # # Plot bootstrap errorbars # color = 'k' if color is None else color # point_kws = dict(dict(linestyles='', scale=0.5, errwidth=2, capsize=0.1, facecolors='r'), **(point_kws if point_kws is not None else {})) # sns.pointplot(x=x, y=y, data=df_obs, order=x_order, ci=(1-alpha)*100, color=color, **point_kws) # # Plot originally observed data # scatter_kws = dict(dict(s=50, marker='o', linewidths=1, edgecolors=color, zorder=10), **(scatter_kws if scatter_kws is not None else {})) # plt.scatter(np.arange(len(x_order)), bounds.loc[x_order,'obs_mean'], **scatter_kws) # # Shift long ticklabels # if xtick_format is None: # xtick_format = ('normal' if plot == 'lineplot' else 'rotated') # if xtick_format == 'rotated': # plt.setp(plt.gca().get_xticklabels(), rotation=-30) # dx = 20/72; dy = 0/72 # offset = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans) # for label in plt.gca().xaxis.get_majorticklabels(): # label.set_transform(label.get_transform() + offset) # elif xtick_format == 'short': # plt.setp(plt.gca(), xticklabels=[label.get_text().split('_')[0] for label in plt.gca().get_xticklabels()]) # elif xtick_format == 'normal': # pass # elif xtick_format == 'final': # plt.setp(plt.gca(), xticklabels=[label.get_text().split('_')[0] for label in plt.gca().get_xticklabels()]) # plt.setp(plt.gca().get_xticklabels(), rotation=45, ha='right') # dx = 15/72; dy = 5/72 # offset = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans) # for label in plt.gca().xaxis.get_majorticklabels(): # label.set_transform(label.get_transform() + offset) # # Plot chance level # chance_kws = dict(dict(color='C3', ls='--', zorder=1), **(chance_kws if chance_kws is not None else {})) # plt.axhline(chance, **chance_kws) # # Plot reference line # if show_ref: # ref_kws = dict(dict(ref=0.55, color='gray', lw=0.5, ls='--'), **(ref_kws if ref_kws is not None else {})) # ref = ref_kws.pop('ref') # plt.axhline(ref, **ref_kws) # if star_alpha is None: # star_alpha = [0.3, 1] # if has_pmt: # # Plot multiple comparison correction band # if show_mcc: # plt.axhspan(bounds.loc['overall','lower'] if tail==2 else chance, bounds.loc['overall','upper'], color='r', alpha=0.1) # # Plot significant stars # if star_shift is None: # star_shift = bounds.ix[1:,'obs_std']/np.sqrt(bounds.ix[1:,'obs_n']) * 2.2 # Ignore first row # else: # star_shift = pd.Series(star_shift, index=bounds.index) # for k, x_label in enumerate(x_order): # if bounds.loc[x_label,'uncorrected']: # plt.text(x_loc[k], bounds.loc[x_label,'obs_mean']+star_shift[x_label], '*', ha='center', alpha=star_alpha[0]) # if bounds.loc[x_label,'corrected']: # plt.text(x_loc[k], bounds.loc[x_label,'obs_mean']+star_shift[x_label], '*', ha='center', alpha=star_alpha[1]) # if show_p: # if mcc == 'none': # plt.text(x_loc[k], 0.15, f"{bounds.loc[x_label,'p_uncorr']:.3f}", transform=myplot.transHDVA(), ha='center', alpha=star_alpha[0], fontsize='xx-small') # if mcc == 'extreme': # plt.text(x_loc[k], 0.15, f"{bounds.loc[x_label,'p_corr']:.3f}", transform=myplot.transHDVA(), ha='center', alpha=star_alpha[0], fontsize='xx-small') # elif mcc == 'bonferroni': # plt.text(x_loc[k], 0.15, f"{bounds.loc[x_label,'bonferroni']:.3f}", transform=myplot.transHDVA(), ha='center', alpha=star_alpha[1], fontsize='xx-small') # # Plot performance # if show_num: # for k, x_label in enumerate(x_order): # plt.text(x_loc[k], bounds.loc['overall','upper']*1.1-0.05 if has_pmt else 0.9, f"{obs_mean[x_label]:.3f}", # transform=plt.gca().transData if has_pmt else myplot.transHDVA(), ha='center', alpha=star_alpha[0], fontsize='xx-small') # # Plot obs_n # if show_n: # if len(set(obs_n[x_order].values)) == 1: # All equal # plt.text(0.95, 0.05, f"$n={obs_n[x_order[0]]}$", transform=plt.gca().transAxes, ha='right', fontsize='x-small') # else: # for k, x_label in enumerate(x_order): # plt.text(x_loc[k], 0.05, f"$n={obs_n[x_label]}$" if k == 0 else f"${obs_n[x_label]}$", # transform=myplot.transHDVA(), ha='center', fontsize='x-small') if __name__ == '__main__': pass
"""Support for monitoring emoncms feeds.""" from datetime import timedelta import logging import requests import voluptuous as vol from homeassistant.components.sensor import ( PLATFORM_SCHEMA, STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL_INCREASING, SensorEntity, ) from homeassistant.const import ( CONF_API_KEY, CONF_ID, CONF_SCAN_INTERVAL, CONF_UNIT_OF_MEASUREMENT, CONF_URL, CONF_VALUE_TEMPLATE, DEVICE_CLASS_ENERGY, DEVICE_CLASS_POWER, HTTP_OK, POWER_WATT, STATE_UNKNOWN, ) from homeassistant.helpers import template import homeassistant.helpers.config_validation as cv from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) ATTR_FEEDID = "FeedId" ATTR_FEEDNAME = "FeedName" ATTR_LASTUPDATETIME = "LastUpdated" ATTR_LASTUPDATETIMESTR = "LastUpdatedStr" ATTR_SIZE = "Size" ATTR_TAG = "Tag" ATTR_USERID = "UserId" CONF_EXCLUDE_FEEDID = "exclude_feed_id" CONF_ONLY_INCLUDE_FEEDID = "include_only_feed_id" CONF_SENSOR_NAMES = "sensor_names" DECIMALS = 2 DEFAULT_UNIT = POWER_WATT MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5) ONLY_INCL_EXCL_NONE = "only_include_exclude_or_none" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_URL): cv.string, vol.Required(CONF_ID): cv.positive_int, vol.Exclusive(CONF_ONLY_INCLUDE_FEEDID, ONLY_INCL_EXCL_NONE): vol.All( cv.ensure_list, [cv.positive_int] ), vol.Exclusive(CONF_EXCLUDE_FEEDID, ONLY_INCL_EXCL_NONE): vol.All( cv.ensure_list, [cv.positive_int] ), vol.Optional(CONF_SENSOR_NAMES): vol.All( {cv.positive_int: vol.All(cv.string, vol.Length(min=1))} ), vol.Optional(CONF_VALUE_TEMPLATE): cv.template, vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=DEFAULT_UNIT): cv.string, } ) def get_id(sensorid, feedtag, feedname, feedid, feeduserid): """Return unique identifier for feed / sensor.""" return f"emoncms{sensorid}_{feedtag}_{feedname}_{feedid}_{feeduserid}" def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Emoncms sensor.""" apikey = config.get(CONF_API_KEY) url = config.get(CONF_URL) sensorid = config.get(CONF_ID) value_template = config.get(CONF_VALUE_TEMPLATE) config_unit = config.get(CONF_UNIT_OF_MEASUREMENT) exclude_feeds = config.get(CONF_EXCLUDE_FEEDID) include_only_feeds = config.get(CONF_ONLY_INCLUDE_FEEDID) sensor_names = config.get(CONF_SENSOR_NAMES) interval = config.get(CONF_SCAN_INTERVAL) if value_template is not None: value_template.hass = hass data = EmonCmsData(hass, url, apikey, interval) data.update() if data.data is None: return False sensors = [] for elem in data.data: if exclude_feeds is not None and int(elem["id"]) in exclude_feeds: continue if include_only_feeds is not None and int(elem["id"]) not in include_only_feeds: continue name = None if sensor_names is not None: name = sensor_names.get(int(elem["id"]), None) unit = elem.get("unit") if unit: unit_of_measurement = unit else: unit_of_measurement = config_unit sensors.append( EmonCmsSensor( hass, data, name, value_template, unit_of_measurement, str(sensorid), elem, ) ) add_entities(sensors) class EmonCmsSensor(SensorEntity): """Implementation of an Emoncms sensor.""" def __init__( self, hass, data, name, value_template, unit_of_measurement, sensorid, elem ): """Initialize the sensor.""" if name is None: # Suppress ID in sensor name if it's 1, since most people won't # have more than one EmonCMS source and it's redundant to show the # ID if there's only one. id_for_name = "" if str(sensorid) == "1" else sensorid # Use the feed name assigned in EmonCMS or fall back to the feed ID feed_name = elem.get("name") or f"Feed {elem["id"]}" self._name = f"EmonCMS{id_for_name} {feed_name}" else: self._name = name self._identifier = get_id( sensorid, elem["tag"], elem["name"], elem["id"], elem["userid"] ) self._hass = hass self._data = data self._value_template = value_template self._unit_of_measurement = unit_of_measurement self._sensorid = sensorid self._elem = elem if unit_of_measurement == "kWh": self._attr_device_class = DEVICE_CLASS_ENERGY self._attr_state_class = STATE_CLASS_TOTAL_INCREASING elif unit_of_measurement == "W": self._attr_device_class = DEVICE_CLASS_POWER self._attr_state_class = STATE_CLASS_MEASUREMENT if self._value_template is not None: self._state = self._value_template.render_with_possible_json_value( elem["value"], STATE_UNKNOWN ) else: self._state = round(float(elem["value"]), DECIMALS) @property def name(self): """Return the name of the sensor.""" return self._name @property def native_unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def native_value(self): """Return the state of the device.""" return self._state @property def extra_state_attributes(self): """Return the attributes of the sensor.""" return { ATTR_FEEDID: self._elem["id"], ATTR_TAG: self._elem["tag"], ATTR_FEEDNAME: self._elem["name"], ATTR_SIZE: self._elem["size"], ATTR_USERID: self._elem["userid"], ATTR_LASTUPDATETIME: self._elem["time"], ATTR_LASTUPDATETIMESTR: template.timestamp_local(float(self._elem["time"])), } def update(self): """Get the latest data and updates the state.""" self._data.update() if self._data.data is None: return elem = next( ( elem for elem in self._data.data if get_id( self._sensorid, elem["tag"], elem["name"], elem["id"], elem["userid"], ) == self._identifier ), None, ) if elem is None: return self._elem = elem if self._value_template is not None: self._state = self._value_template.render_with_possible_json_value( elem["value"], STATE_UNKNOWN ) else: self._state = round(float(elem["value"]), DECIMALS) class EmonCmsData: """The class for handling the data retrieval.""" def __init__(self, hass, url, apikey, interval): """Initialize the data object.""" self._apikey = apikey self._url = f"{url}/feed/list.json" self._interval = interval self._hass = hass self.data = None @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Get the latest data from Emoncms.""" try: parameters = {"apikey": self._apikey} req = requests.get( self._url, params=parameters, allow_redirects=True, timeout=5 ) except requests.exceptions.RequestException as exception: _LOGGER.error(exception) return else: if req.status_code == HTTP_OK: self.data = req.json() else: _LOGGER.error( "Please verify if the specified configuration value " "'%s' is correct! (HTTP Status_code = %d)", CONF_URL, req.status_code, )
"""Support for monitoring emoncms feeds.""" from datetime import timedelta import logging import requests import voluptuous as vol from homeassistant.components.sensor import ( PLATFORM_SCHEMA, STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL_INCREASING, SensorEntity, ) from homeassistant.const import ( CONF_API_KEY, CONF_ID, CONF_SCAN_INTERVAL, CONF_UNIT_OF_MEASUREMENT, CONF_URL, CONF_VALUE_TEMPLATE, DEVICE_CLASS_ENERGY, DEVICE_CLASS_POWER, HTTP_OK, POWER_WATT, STATE_UNKNOWN, ) from homeassistant.helpers import template import homeassistant.helpers.config_validation as cv from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) ATTR_FEEDID = "FeedId" ATTR_FEEDNAME = "FeedName" ATTR_LASTUPDATETIME = "LastUpdated" ATTR_LASTUPDATETIMESTR = "LastUpdatedStr" ATTR_SIZE = "Size" ATTR_TAG = "Tag" ATTR_USERID = "UserId" CONF_EXCLUDE_FEEDID = "exclude_feed_id" CONF_ONLY_INCLUDE_FEEDID = "include_only_feed_id" CONF_SENSOR_NAMES = "sensor_names" DECIMALS = 2 DEFAULT_UNIT = POWER_WATT MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5) ONLY_INCL_EXCL_NONE = "only_include_exclude_or_none" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_URL): cv.string, vol.Required(CONF_ID): cv.positive_int, vol.Exclusive(CONF_ONLY_INCLUDE_FEEDID, ONLY_INCL_EXCL_NONE): vol.All( cv.ensure_list, [cv.positive_int] ), vol.Exclusive(CONF_EXCLUDE_FEEDID, ONLY_INCL_EXCL_NONE): vol.All( cv.ensure_list, [cv.positive_int] ), vol.Optional(CONF_SENSOR_NAMES): vol.All( {cv.positive_int: vol.All(cv.string, vol.Length(min=1))} ), vol.Optional(CONF_VALUE_TEMPLATE): cv.template, vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=DEFAULT_UNIT): cv.string, } ) def get_id(sensorid, feedtag, feedname, feedid, feeduserid): """Return unique identifier for feed / sensor.""" return f"emoncms{sensorid}_{feedtag}_{feedname}_{feedid}_{feeduserid}" def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Emoncms sensor.""" apikey = config.get(CONF_API_KEY) url = config.get(CONF_URL) sensorid = config.get(CONF_ID) value_template = config.get(CONF_VALUE_TEMPLATE) config_unit = config.get(CONF_UNIT_OF_MEASUREMENT) exclude_feeds = config.get(CONF_EXCLUDE_FEEDID) include_only_feeds = config.get(CONF_ONLY_INCLUDE_FEEDID) sensor_names = config.get(CONF_SENSOR_NAMES) interval = config.get(CONF_SCAN_INTERVAL) if value_template is not None: value_template.hass = hass data = EmonCmsData(hass, url, apikey, interval) data.update() if data.data is None: return False sensors = [] for elem in data.data: if exclude_feeds is not None and int(elem["id"]) in exclude_feeds: continue if include_only_feeds is not None and int(elem["id"]) not in include_only_feeds: continue name = None if sensor_names is not None: name = sensor_names.get(int(elem["id"]), None) unit = elem.get("unit") if unit: unit_of_measurement = unit else: unit_of_measurement = config_unit sensors.append( EmonCmsSensor( hass, data, name, value_template, unit_of_measurement, str(sensorid), elem, ) ) add_entities(sensors) class EmonCmsSensor(SensorEntity): """Implementation of an Emoncms sensor.""" def __init__( self, hass, data, name, value_template, unit_of_measurement, sensorid, elem ): """Initialize the sensor.""" if name is None: # Suppress ID in sensor name if it's 1, since most people won't # have more than one EmonCMS source and it's redundant to show the # ID if there's only one. id_for_name = "" if str(sensorid) == "1" else sensorid # Use the feed name assigned in EmonCMS or fall back to the feed ID feed_name = elem.get("name") or f"Feed {elem['id']}" self._name = f"EmonCMS{id_for_name} {feed_name}" else: self._name = name self._identifier = get_id( sensorid, elem["tag"], elem["name"], elem["id"], elem["userid"] ) self._hass = hass self._data = data self._value_template = value_template self._unit_of_measurement = unit_of_measurement self._sensorid = sensorid self._elem = elem if unit_of_measurement == "kWh": self._attr_device_class = DEVICE_CLASS_ENERGY self._attr_state_class = STATE_CLASS_TOTAL_INCREASING elif unit_of_measurement == "W": self._attr_device_class = DEVICE_CLASS_POWER self._attr_state_class = STATE_CLASS_MEASUREMENT if self._value_template is not None: self._state = self._value_template.render_with_possible_json_value( elem["value"], STATE_UNKNOWN ) else: self._state = round(float(elem["value"]), DECIMALS) @property def name(self): """Return the name of the sensor.""" return self._name @property def native_unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def native_value(self): """Return the state of the device.""" return self._state @property def extra_state_attributes(self): """Return the attributes of the sensor.""" return { ATTR_FEEDID: self._elem["id"], ATTR_TAG: self._elem["tag"], ATTR_FEEDNAME: self._elem["name"], ATTR_SIZE: self._elem["size"], ATTR_USERID: self._elem["userid"], ATTR_LASTUPDATETIME: self._elem["time"], ATTR_LASTUPDATETIMESTR: template.timestamp_local(float(self._elem["time"])), } def update(self): """Get the latest data and updates the state.""" self._data.update() if self._data.data is None: return elem = next( ( elem for elem in self._data.data if get_id( self._sensorid, elem["tag"], elem["name"], elem["id"], elem["userid"], ) == self._identifier ), None, ) if elem is None: return self._elem = elem if self._value_template is not None: self._state = self._value_template.render_with_possible_json_value( elem["value"], STATE_UNKNOWN ) else: self._state = round(float(elem["value"]), DECIMALS) class EmonCmsData: """The class for handling the data retrieval.""" def __init__(self, hass, url, apikey, interval): """Initialize the data object.""" self._apikey = apikey self._url = f"{url}/feed/list.json" self._interval = interval self._hass = hass self.data = None @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Get the latest data from Emoncms.""" try: parameters = {"apikey": self._apikey} req = requests.get( self._url, params=parameters, allow_redirects=True, timeout=5 ) except requests.exceptions.RequestException as exception: _LOGGER.error(exception) return else: if req.status_code == HTTP_OK: self.data = req.json() else: _LOGGER.error( "Please verify if the specified configuration value " "'%s' is correct! (HTTP Status_code = %d)", CONF_URL, req.status_code, )
# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California # # This file is part of the SimCenter Backend Applications # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # You should have received a copy of the BSD 3-Clause License along with # this file. If not, see <http://www.opensource.org/licenses/>. # # Contributors: # Adam Zsarnóczay # Kuanshi Zhong # # Based on rulesets developed by: # Karen Angeles # Meredith Lockhead # Tracy Kijewski-Correa import random import numpy as np import datetime def SPMB_config(BIM): """ Rules to identify a HAZUS SPMB configuration based on BIM data Parameters ---------- BIM: dictionary Information about the building characteristics. Returns ------- config: str A string that identifies a specific configration within this buidling class. """ year = BIM['year_built'] # just for the sake of brevity # Roof Deck Age (~ Roof Quality) if BIM['year_built'] >= (datetime.datetime.now().year - 50): roof_quality = 'god' else: roof_quality = 'por' # shutters if year >= 2000: shutters = BIM['WBD'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be # 46%. This value is based on a study on preparedness of small businesses # for hurricane disasters, which says that in Sarasota County, 46% of # business owners had taken action to wind-proof or flood-proof their # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. else: if BIM['WBD']: shutters = random.random() < 0.46 else: shutters = False # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with # the manufacturer’s instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. if BIM['V_ult'] > 142: MRDA = 'std' # standard else: MRDA = 'sup' # superior if BIM['area'] <= 4000: bldg_tag = 'SPMBS' elif BIM['area'] <= 50000: bldg_tag = 'SPMBM' else: bldg_tag = 'SPMBL' bldg_config = f"{bldg_tag}_" \ f"{roof_quality}_" \ f"{int(shutters)}_" \ f"{MRDA}_" \ f"{int(BIM["terrain"])}" return bldg_config
# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California # # This file is part of the SimCenter Backend Applications # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # You should have received a copy of the BSD 3-Clause License along with # this file. If not, see <http://www.opensource.org/licenses/>. # # Contributors: # Adam Zsarnóczay # Kuanshi Zhong # # Based on rulesets developed by: # Karen Angeles # Meredith Lockhead # Tracy Kijewski-Correa import random import numpy as np import datetime def SPMB_config(BIM): """ Rules to identify a HAZUS SPMB configuration based on BIM data Parameters ---------- BIM: dictionary Information about the building characteristics. Returns ------- config: str A string that identifies a specific configration within this buidling class. """ year = BIM['year_built'] # just for the sake of brevity # Roof Deck Age (~ Roof Quality) if BIM['year_built'] >= (datetime.datetime.now().year - 50): roof_quality = 'god' else: roof_quality = 'por' # shutters if year >= 2000: shutters = BIM['WBD'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be # 46%. This value is based on a study on preparedness of small businesses # for hurricane disasters, which says that in Sarasota County, 46% of # business owners had taken action to wind-proof or flood-proof their # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. else: if BIM['WBD']: shutters = random.random() < 0.46 else: shutters = False # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with # the manufacturer’s instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. if BIM['V_ult'] > 142: MRDA = 'std' # standard else: MRDA = 'sup' # superior if BIM['area'] <= 4000: bldg_tag = 'SPMBS' elif BIM['area'] <= 50000: bldg_tag = 'SPMBM' else: bldg_tag = 'SPMBL' bldg_config = f"{bldg_tag}_" \ f"{roof_quality}_" \ f"{int(shutters)}_" \ f"{MRDA}_" \ f"{int(BIM['terrain'])}" return bldg_config
import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from matplotlib.ticker import StrMethodFormatter from sklearn.model_selection import train_test_split from sklearn import svm from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from data_visualization import * def filter_genres(df): genres_to_remove = ['Audio Production', 'Photo Editing', 'Video Production', 'Web Publishing', 'Animation & Modeling', 'Game Development', 'Utilities', 'Design & Illustration'] return df[~df['genres'].isin(genres_to_remove)] def parse_steam(df): return df[df["review_score"] > 0.] def parse_rawg(df): df = df[df["rating"] > 0.] df["rating"] *= 2 return df def parse_metacritic(df): df = df[df["metacritic"] != ' '] df["metacritic"] = df["metacritic"].astype(float) df["metacritic"] /= 10. return df def avg_for_all(df): df = parse_steam(df) print(f"Z recenzją na steam{len(df["review_score"])}") df = parse_rawg(df) df = parse_metacritic(df) avg_steam = df["review_score"].mean() avg_meta = df["metacritic"].mean() avg_rawg = df["rating"].mean() print("Wszystkie maja ocene na każdym portalu") print(f"steam avg:{avg_steam} ") print(f"rawg avg:{avg_rawg}") print(f"metacritic avg: {avg_meta}") plot_avg_game_score("oceny", "liczba", "Rozklad ocen dla trzech portali", review_score=df["review_score"] , metacritic=df["metacritic"], rating=df["rating"]) def avg_RAWG_STEAM(df): df = parse_rawg(df) print(f"Z recenzją na RAWG{len(df["rating"])}") df = parse_steam(df) avg_steam = df["review_score"].mean() avg_rawg = df["rating"].mean() print("Oceny dla RAWG i Steam") print(f"steam avg:{avg_steam} ") print(f"rawg avg:{avg_rawg}") plot_avg_game_score("oceny", "liczba", "Rozklad ocen dla RAWG i Steam", review_score=df["review_score"] , rating=df["rating"]) def avg_RAWG_metacritic(df): df = parse_metacritic(df) print(f"Z recenzją na Metacritic{len(df["metacritic"])}") df = parse_rawg(df) avg_rawg = df["rating"].mean() avg_meta = df["metacritic"].mean() print("Oceny dla RAWG i Metacritic") print(f"metacritic avg:{avg_meta} ") print(f"rawg avg:{avg_rawg}") plot_avg_game_score("oceny", "liczba", "Rozklad ocen dla Metacritic i RAWG", metacritic=df["metacritic"], rating=df["rating"]) def avg_metacritic_STEAM(df): df = parse_steam(df) df = parse_metacritic(df) avg_steam = df["review_score"].mean() avg_meta = df["metacritic"].mean() print("Oceny dla Metacritic i Steam") print(f"steam avg:{avg_steam} ") print(f"metacritic avg:{avg_meta}") plot_avg_game_score("oceny", "liczba", "Rozklad ocen dla Metacritic i Steam", review_score=df["review_score"], metacritic=df["metacritic"]) def scenario1(df): avg_for_all(df) avg_metacritic_STEAM(df) avg_RAWG_metacritic(df) avg_RAWG_STEAM(df) def get_top_20_publishers(df): developers = df[df.duplicated(subset=['publisher_steam'], keep=False)]["publisher_steam"] df = df[df['publisher_steam'].isin(developers)] df2 = df.groupby(["publisher_steam"])['total_reviews'].sum().reset_index() indexes = df2.sort_values(by='total_reviews', ascending=False).head(20)['publisher_steam'].values return df[df['publisher_steam'].isin(indexes)] def sort_publishers_by_games_number(df): developers_20 = get_top_20_publishers(df) developers_20['num_games'] = developers_20.groupby('publisher_steam')['id'].transform('count') return developers_20.sort_values(['num_games', 'publisher_steam'], ascending=False) def print_analysis_results(publisher, variance, std, mean): print("dla developera " + publisher) print(f"wariancja: {variance}") print(f"odchylenie: {std}") print(f"średnia: {mean} ") print("\n") def get_score_column(portal): if portal == 'RAWG': return "rating" elif portal == 'Steam': return 'review_score' return 'metacritic' def parse_score(portal, df): if portal == 'RAWG': return parse_rawg(df) elif portal == 'Steam': return parse_steam(df) return parse_metacritic(df) def analysis_for_games_publishers(df, portal): score_from = get_score_column(portal) df = parse_score(portal, df) sorted_publishers = sort_publishers_by_games_number(df) publishers_list = sorted_publishers['publisher_steam'].unique() print(publishers_list) x = [] y = [] e = [] for i in publishers_list: df = sorted_publishers[sorted_publishers['publisher_steam'] == i] variance = df[score_from].var() std = df[score_from].std() mean = df[score_from].mean() print_analysis_results(i, variance, std, mean) x.append(mean) y.append(i) e.append(std) plot_publishers_analysis_results(sorted_publishers, score_from) def scenario3(df): analysis_for_games_publishers(df, 'RAWG') analysis_for_games_publishers(df, 'Steam') analysis_for_games_publishers(df, 'metacritic') def add_year(df): df = df[df["released"] != ' '].reset_index() df['year'] = df['released'] for i in range(len(df['year'])): df['year'][i] = df['year'][i][:4] df = df[df["year"] != '2020'] return df.sort_values(by='released', ascending=False) def print_years_analysis(year, variance, std, mean, game_amount): print("dla roku" + year) print(f"wariancja: {variance}") print(f"odchylenie: {std}") print(f"średnia: {mean} ") print(f"ilosc wydanych gier: {game_amount}") print("\n") def years_analysis(df, portal): score_from = get_score_column(portal) df = parse_score(portal, df) games_by_year = add_year(df) year = games_by_year['year'].unique() x = [] y = [] e = [] z = [] for i in year: df = games_by_year[games_by_year['year'] == i] variance = df[score_from].var() std = df[score_from].std() mean = df[score_from].mean() game_amount = len(df[score_from]) print_years_analysis(i, variance, std, mean, game_amount) x.append(df[score_from].mean()) y.append(i) e.append(std) z.append(game_amount) y.reverse() x.reverse() z.reverse() plot_game_year_analysis(score_from, y, z, games_by_year) def scenario5(df): years_analysis(df, 'RAWG') years_analysis(df, 'metacritic') years_analysis(df, 'Steam') def get_list_of_genres(all_games_df): return list(set(list(all_games_df['genres'].dropna()))) def get_mean_scores_per_genre(all_games_df): non_game_genres = ['Audio Production', 'Photo Editing', 'Video Production', 'Web Publishing', 'Animation & Modeling', 'Game Development', 'Utilities', 'Design & Illustration'] steam_reviews_df = all_games_df[['review_score', 'genres']].copy() steam_reviews_df.replace(0, np.nan, inplace=True) steam_reviews_df.dropna(inplace=True) mean_steam_scores = steam_reviews_df.groupby(['genres']).mean().sort_values(by=['review_score']).round(decimals=2) mean_steam_scores = mean_steam_scores.drop(non_game_genres) mean_steam_scores.to_csv('mean_steam_scores.csv') rawg_reviews = all_games_df[['rating', 'genres']].copy() rawg_reviews.replace(0.0, np.nan, inplace=True) rawg_reviews.dropna(inplace=True) mean_rawg_scores = rawg_reviews.groupby(['genres']).mean().sort_values(by=['rating']).round(decimals=2) mean_rawg_scores = mean_rawg_scores.drop(non_game_genres) mean_rawg_scores.to_csv('mean_rawg_scores.csv') metacritic_reviews = all_games_df[['metacritic', 'genres']].copy() metacritic_reviews['metacritic'].replace(' ', np.nan, inplace=True) metacritic_reviews.dropna(inplace=True) metacritic_reviews.astype({'metacritic': np.int64}, inplace=True) mean_metacritic_scores = metacritic_reviews.groupby(['genres']).mean().sort_values(by=['metacritic']) mean_metacritic_scores = mean_metacritic_scores.drop(non_game_genres) mean_metacritic_scores.to_csv('mean_metacritic_scores.csv') return mean_steam_scores, mean_rawg_scores, mean_metacritic_scores def merge_developer_and_publisher_data(all_games_df): all_games_df['developer_steam'] = np.where(all_games_df['developer_steam'].isnull() & ~all_games_df['developer_rawg'].str.isspace(), all_games_df['developer_rawg'], all_games_df['developer_steam']) all_games_df['publisher_steam'] = np.where(all_games_df['publisher_steam'].isnull() & ~all_games_df['publisher_rawg'].isnull(), all_games_df['publisher_rawg'], all_games_df['publisher_steam']) return all_games_df def transform_data_for_regressor(all_games_df): all_games_df = merge_developer_and_publisher_data(all_games_df) games_non_zero_score = all_games_df[all_games_df['review_score'] != 0].copy() data = games_non_zero_score[['developer_steam', 'publisher_steam', 'genres', 'released', 'tag1', 'tag2', 'tag3', 'tag4']].copy() data_lowercase = data.apply(lambda x: x.astype(str).str.lower()) data_lowercase['released'] = data_lowercase['released'].apply(lambda x: str(x.year) if type(x) == pd._libs.tslibs.timestamps.Timestamp else '') X_raw = data_lowercase.values y = games_non_zero_score['review_score'].values enc = OneHotEncoder(handle_unknown='ignore') enc.fit(X_raw) X = enc.transform(X_raw) return X, y def split_data(X, y): return train_test_split(X, y, test_size=0.2, random_state=42) def train_svr(X_train, X_test, y_train, y_test): regr = svm.SVR() regr.fit(X_train, y_train) y_pred = regr.predict(X_test) return mean_absolute_error(y_test, y_pred) def get_simple_prediction_mean_absolute_error(y_test): y_pred_mean = np.empty(y_test.shape) y_pred_mean.fill(y_test.mean()) return mean_absolute_error(y_test, y_pred_mean) df = pd.read_csv('all_games.csv') df = filter_genres(df)
import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from matplotlib.ticker import StrMethodFormatter from sklearn.model_selection import train_test_split from sklearn import svm from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from data_visualization import * def filter_genres(df): genres_to_remove = ['Audio Production', 'Photo Editing', 'Video Production', 'Web Publishing', 'Animation & Modeling', 'Game Development', 'Utilities', 'Design & Illustration'] return df[~df['genres'].isin(genres_to_remove)] def parse_steam(df): return df[df["review_score"] > 0.] def parse_rawg(df): df = df[df["rating"] > 0.] df["rating"] *= 2 return df def parse_metacritic(df): df = df[df["metacritic"] != ' '] df["metacritic"] = df["metacritic"].astype(float) df["metacritic"] /= 10. return df def avg_for_all(df): df = parse_steam(df) print(f"Z recenzją na steam{len(df['review_score'])}") df = parse_rawg(df) df = parse_metacritic(df) avg_steam = df["review_score"].mean() avg_meta = df["metacritic"].mean() avg_rawg = df["rating"].mean() print("Wszystkie maja ocene na każdym portalu") print(f"steam avg:{avg_steam} ") print(f"rawg avg:{avg_rawg}") print(f"metacritic avg: {avg_meta}") plot_avg_game_score("oceny", "liczba", "Rozklad ocen dla trzech portali", review_score=df["review_score"] , metacritic=df["metacritic"], rating=df["rating"]) def avg_RAWG_STEAM(df): df = parse_rawg(df) print(f"Z recenzją na RAWG{len(df['rating'])}") df = parse_steam(df) avg_steam = df["review_score"].mean() avg_rawg = df["rating"].mean() print("Oceny dla RAWG i Steam") print(f"steam avg:{avg_steam} ") print(f"rawg avg:{avg_rawg}") plot_avg_game_score("oceny", "liczba", "Rozklad ocen dla RAWG i Steam", review_score=df["review_score"] , rating=df["rating"]) def avg_RAWG_metacritic(df): df = parse_metacritic(df) print(f"Z recenzją na Metacritic{len(df['metacritic'])}") df = parse_rawg(df) avg_rawg = df["rating"].mean() avg_meta = df["metacritic"].mean() print("Oceny dla RAWG i Metacritic") print(f"metacritic avg:{avg_meta} ") print(f"rawg avg:{avg_rawg}") plot_avg_game_score("oceny", "liczba", "Rozklad ocen dla Metacritic i RAWG", metacritic=df["metacritic"], rating=df["rating"]) def avg_metacritic_STEAM(df): df = parse_steam(df) df = parse_metacritic(df) avg_steam = df["review_score"].mean() avg_meta = df["metacritic"].mean() print("Oceny dla Metacritic i Steam") print(f"steam avg:{avg_steam} ") print(f"metacritic avg:{avg_meta}") plot_avg_game_score("oceny", "liczba", "Rozklad ocen dla Metacritic i Steam", review_score=df["review_score"], metacritic=df["metacritic"]) def scenario1(df): avg_for_all(df) avg_metacritic_STEAM(df) avg_RAWG_metacritic(df) avg_RAWG_STEAM(df) def get_top_20_publishers(df): developers = df[df.duplicated(subset=['publisher_steam'], keep=False)]["publisher_steam"] df = df[df['publisher_steam'].isin(developers)] df2 = df.groupby(["publisher_steam"])['total_reviews'].sum().reset_index() indexes = df2.sort_values(by='total_reviews', ascending=False).head(20)['publisher_steam'].values return df[df['publisher_steam'].isin(indexes)] def sort_publishers_by_games_number(df): developers_20 = get_top_20_publishers(df) developers_20['num_games'] = developers_20.groupby('publisher_steam')['id'].transform('count') return developers_20.sort_values(['num_games', 'publisher_steam'], ascending=False) def print_analysis_results(publisher, variance, std, mean): print("dla developera " + publisher) print(f"wariancja: {variance}") print(f"odchylenie: {std}") print(f"średnia: {mean} ") print("\n") def get_score_column(portal): if portal == 'RAWG': return "rating" elif portal == 'Steam': return 'review_score' return 'metacritic' def parse_score(portal, df): if portal == 'RAWG': return parse_rawg(df) elif portal == 'Steam': return parse_steam(df) return parse_metacritic(df) def analysis_for_games_publishers(df, portal): score_from = get_score_column(portal) df = parse_score(portal, df) sorted_publishers = sort_publishers_by_games_number(df) publishers_list = sorted_publishers['publisher_steam'].unique() print(publishers_list) x = [] y = [] e = [] for i in publishers_list: df = sorted_publishers[sorted_publishers['publisher_steam'] == i] variance = df[score_from].var() std = df[score_from].std() mean = df[score_from].mean() print_analysis_results(i, variance, std, mean) x.append(mean) y.append(i) e.append(std) plot_publishers_analysis_results(sorted_publishers, score_from) def scenario3(df): analysis_for_games_publishers(df, 'RAWG') analysis_for_games_publishers(df, 'Steam') analysis_for_games_publishers(df, 'metacritic') def add_year(df): df = df[df["released"] != ' '].reset_index() df['year'] = df['released'] for i in range(len(df['year'])): df['year'][i] = df['year'][i][:4] df = df[df["year"] != '2020'] return df.sort_values(by='released', ascending=False) def print_years_analysis(year, variance, std, mean, game_amount): print("dla roku" + year) print(f"wariancja: {variance}") print(f"odchylenie: {std}") print(f"średnia: {mean} ") print(f"ilosc wydanych gier: {game_amount}") print("\n") def years_analysis(df, portal): score_from = get_score_column(portal) df = parse_score(portal, df) games_by_year = add_year(df) year = games_by_year['year'].unique() x = [] y = [] e = [] z = [] for i in year: df = games_by_year[games_by_year['year'] == i] variance = df[score_from].var() std = df[score_from].std() mean = df[score_from].mean() game_amount = len(df[score_from]) print_years_analysis(i, variance, std, mean, game_amount) x.append(df[score_from].mean()) y.append(i) e.append(std) z.append(game_amount) y.reverse() x.reverse() z.reverse() plot_game_year_analysis(score_from, y, z, games_by_year) def scenario5(df): years_analysis(df, 'RAWG') years_analysis(df, 'metacritic') years_analysis(df, 'Steam') def get_list_of_genres(all_games_df): return list(set(list(all_games_df['genres'].dropna()))) def get_mean_scores_per_genre(all_games_df): non_game_genres = ['Audio Production', 'Photo Editing', 'Video Production', 'Web Publishing', 'Animation & Modeling', 'Game Development', 'Utilities', 'Design & Illustration'] steam_reviews_df = all_games_df[['review_score', 'genres']].copy() steam_reviews_df.replace(0, np.nan, inplace=True) steam_reviews_df.dropna(inplace=True) mean_steam_scores = steam_reviews_df.groupby(['genres']).mean().sort_values(by=['review_score']).round(decimals=2) mean_steam_scores = mean_steam_scores.drop(non_game_genres) mean_steam_scores.to_csv('mean_steam_scores.csv') rawg_reviews = all_games_df[['rating', 'genres']].copy() rawg_reviews.replace(0.0, np.nan, inplace=True) rawg_reviews.dropna(inplace=True) mean_rawg_scores = rawg_reviews.groupby(['genres']).mean().sort_values(by=['rating']).round(decimals=2) mean_rawg_scores = mean_rawg_scores.drop(non_game_genres) mean_rawg_scores.to_csv('mean_rawg_scores.csv') metacritic_reviews = all_games_df[['metacritic', 'genres']].copy() metacritic_reviews['metacritic'].replace(' ', np.nan, inplace=True) metacritic_reviews.dropna(inplace=True) metacritic_reviews.astype({'metacritic': np.int64}, inplace=True) mean_metacritic_scores = metacritic_reviews.groupby(['genres']).mean().sort_values(by=['metacritic']) mean_metacritic_scores = mean_metacritic_scores.drop(non_game_genres) mean_metacritic_scores.to_csv('mean_metacritic_scores.csv') return mean_steam_scores, mean_rawg_scores, mean_metacritic_scores def merge_developer_and_publisher_data(all_games_df): all_games_df['developer_steam'] = np.where(all_games_df['developer_steam'].isnull() & ~all_games_df['developer_rawg'].str.isspace(), all_games_df['developer_rawg'], all_games_df['developer_steam']) all_games_df['publisher_steam'] = np.where(all_games_df['publisher_steam'].isnull() & ~all_games_df['publisher_rawg'].isnull(), all_games_df['publisher_rawg'], all_games_df['publisher_steam']) return all_games_df def transform_data_for_regressor(all_games_df): all_games_df = merge_developer_and_publisher_data(all_games_df) games_non_zero_score = all_games_df[all_games_df['review_score'] != 0].copy() data = games_non_zero_score[['developer_steam', 'publisher_steam', 'genres', 'released', 'tag1', 'tag2', 'tag3', 'tag4']].copy() data_lowercase = data.apply(lambda x: x.astype(str).str.lower()) data_lowercase['released'] = data_lowercase['released'].apply(lambda x: str(x.year) if type(x) == pd._libs.tslibs.timestamps.Timestamp else '') X_raw = data_lowercase.values y = games_non_zero_score['review_score'].values enc = OneHotEncoder(handle_unknown='ignore') enc.fit(X_raw) X = enc.transform(X_raw) return X, y def split_data(X, y): return train_test_split(X, y, test_size=0.2, random_state=42) def train_svr(X_train, X_test, y_train, y_test): regr = svm.SVR() regr.fit(X_train, y_train) y_pred = regr.predict(X_test) return mean_absolute_error(y_test, y_pred) def get_simple_prediction_mean_absolute_error(y_test): y_pred_mean = np.empty(y_test.shape) y_pred_mean.fill(y_test.mean()) return mean_absolute_error(y_test, y_pred_mean) df = pd.read_csv('all_games.csv') df = filter_genres(df)
# Copyright (C) 2019 The Raphielscape Company LLC. # # Licensed under the Raphielscape Public License, Version 1.b (the "License"); # you may not use this file except in compliance with the License. # # """ Userbot module for having some fun with people. """ import asyncio import random import re import time from collections import deque import requests from telethon.tl.functions.users import GetFullUserRequest from telethon.tl.types import MessageEntityMentionName from cowpy import cow from userbot import CMD_HELP,YOUTUBE_API_KEY from userbot.utils import register,admin_cmd # ================= CONSTANT ================= RENDISTR = [ "`I Know Uh ez Rendi Bhay Dont show Your Randi Pesa Here`", "`Jag Suna suna laage Sab #maderchod bhay`", "`you talking behind meh wew uh iz my fan now bhay`", "`Wanna pass in Life Goto BRAZZER.CAM BHAY`", "`Uh iz Pro i iz noob your boob is landi uh are Randi`", "`Sellers Nasa calling Uh bhay😆`", "`Badwoo ki yojna behan bna ke ch*da uh iz badwa its your yozja?`", "`CHAND PE CHADA HAI CHANDYAAN KA GHODA TERA NAAM HAI MANSUR TU HAI BEHAN KA LOD*😂`", "`Jab se dil lga baithe tanhai me maa chu*da baithe wo kho gyi kisi aur ke pyar hum apne hi jaato me aag lga baithe`", "`Chadii ke ander se lal pani kha se ata hai ky teri masuka ka bhosda bhi paan khata hai😂`", "`Sun bhosdi ke By anonyCrew MOHABBAT KE SIWA AUR BHI GAM HAI JAMANE ME BSDK GAND PAHAT JATI HAI PAISA KAMANE ME`", "`Thaan liya tha Sayri nhi krege Unka pichwada dekha Alfaaz nikal gye`", "`Ravivaar ko dekha Chand Ka Tukra Itna Baar Dekha par Jaath na Ukra`", "`Katal kro Tir se Talwar me Ky Rkkha hai Maal Chodo Sari Me Salwar me Ky Rkkha hai`", ] NOOBSTR = [ "`YOU PRO NIMBA DONT MESS WIDH MEH`", "`Haha yes`", "`NOOB NIMBA TRYING TO BE FAMOUS KEK`", "`Sometimes one middle finger isn’t enough to let someone know how you feel. That’s why you have two hands`", "`Some Nimbas need to open their small minds instead of their big mouths`", "`UH DONT KNOW MEH SO STAY AWAY LAWDE`", "`Kysa kysaaaa haaan? Phir MAAR nhi Khayega tu?`", "`Zikr Jinka hota hai galiyo meh woh bhosdika ajj paya gya naliyo me`", ] ZALG_LIST = [["̖", " ̗", " ̘", " ̙", " ̜", " ̝", " ̞", " ̟", " ̠", " ̤", " ̥", " ̦", " ̩", " ̪", " ̫", " ̬", " ̭", " ̮", " ̯", " ̰", " ̱", " ̲", " ̳", " ̹", " ̺", " ̻", " ̼", " ͅ", " ͇", " ͈", " ͉", " ͍", " ͎", " ͓", " ͔", " ͕", " ͖", " ͙", " ͚", " ", ], [" ̍", " ̎", " ̄", " ̅", " ̿", " ̑", " ̆", " ̐", " ͒", " ͗", " ͑", " ̇", " ̈", " ̊", " ͂", " ̓", " ̈́", " ͊", " ͋", " ͌", " ̃", " ̂", " ̌", " ͐", " ́", " ̋", " ̏", " ̽", " ̉", " ͣ", " ͤ", " ͥ", " ͦ", " ͧ", " ͨ", " ͩ", " ͪ", " ͫ", " ͬ", " ͭ", " ͮ", " ͯ", " ̾", " ͛", " ͆", " ̚", ], [" ̕", " ̛", " ̀", " ́", " ͘", " ̡", " ̢", " ̧", " ̨", " ̴", " ̵", " ̶", " ͜", " ͝", " ͞", " ͟", " ͠", " ͢", " ̸", " ̷", " ͡", ]] EMOJIS = [ "😂", "😂", "👌", "✌", "💞", "👍", "👌", "💯", "🎶", "👀", "😂", "👓", "👏", "👐", "🍕", "💥", "🍴", "💦", "💦", "🍑", "🍆", "😩", "😏", "👉👌", "👀", "👅", "😩", "🚰", ] INSULT_STRINGS = [ "`Owww ... Such a stupid idiot.`", "`Don't drink and type.`", "`Command not found. Just like your brain.`", "`Bot rule 420 section 69 prevents me from replying to stupid nubfuks like you.`", "`Sorry, we do not sell brains.`", "`Believe me you are not normal.`", "`I bet your brain feels as good as new, seeing that you never use it.`", "`If I wanted to kill myself I'd climb your ego and jump to your IQ.`", "`You didn't evolve from apes, they evolved from you.`", "`What language are you speaking? Cause it sounds like bullshit.`", "`You are proof that evolution CAN go in reverse.`", "`I would ask you how old you are but I know you can't count that high.`", "`As an outsider, what do you think of the human race?`", "`Ordinarily people live and learn. You just live.`", "`Keep talking, someday you'll say something intelligent!.......(I doubt it though)`", "`Everyone has the right to be stupid but you are abusing the privilege.`", "`I'm sorry I hurt your feelings when I called you stupid. I thought you already knew that.`", "`You should try tasting cyanide.`", "`You should try sleeping forever.`", "`Sharam kar bsdwale,kitni bkchodi deta.`", "`Chup Madarhox, bilkul chup..`", "`Me zindagi me chunotiyo se jyda inn jese Chutiyo se pareshaan hu.`", "`Pick up a gun and shoot yourself.`", "`Try bathing with Hydrochloric Acid instead of water.`", "`Go Green! Stop inhaling Oxygen.`", "`God was searching for you. You should leave to meet him.`", "`You should Volunteer for target in an firing range.`", "`Try playing catch and throw with RDX its fun.`", "`Jaana chodu chad jake land chaat`", "`Yaar ajab tere nkhare,gazab tera style hain, gand dhone ki tameez nahi, haath main mobile hai`", "`People like you are the reason we have middle fingers.`", "`When your mom dropped you off at the school, she got a ticket for littering.`", "`You’re so ugly that when you cry, the tears roll down the back of your head…just to avoid your face.`", "`If you’re talking behind my back then you’re in a perfect position to kiss my a**!.`", ] UWUS = [ "(・`ω´・)", ";;w;;", "owo", "UwU", ">w<", "^w^", r"\(^o\) (/o^)/", "( ^ _ ^)∠☆", "(ô_ô)", "~:o", ";-;", "(*^*)", "(>_", "(♥_♥)", "*(^O^)*", "((+_+))", ] FACEREACTS = [ "ʘ‿ʘ", "ヾ(-_- )ゞ", "(っ˘ڡ˘ς)", "(´ж`ς)", "( ಠ ʖ̯ ಠ)", "(° ͜ʖ͡°)╭∩╮", "(ᵟຶ︵ ᵟຶ)", "(งツ)ว", "ʚ(•`", "(っ▀¯▀)つ", "(◠﹏◠)", "( ͡ಠ ʖ̯ ͡ಠ)", "( ఠ ͟ʖ ఠ)", "(∩`-´)⊃━☆゚.*・。゚", "(⊃。•́‿•̀。)⊃", "(._.)", "{•̃_•̃}", "(ᵔᴥᵔ)", "♨_♨", "⥀.⥀", "ح˚௰˚づ ", "(҂◡_◡)", "ƪ(ړײ)‎ƪ​​", "(っ•́。•́)♪♬", "◖ᵔᴥᵔ◗ ♪ ♫ ", "(☞゚ヮ゚)☞", "[¬º-°]¬", "(Ծ‸ Ծ)", "(•̀ᴗ•́)و ̑̑", "ヾ(´〇`)ノ♪♪♪", "(ง'̀-'́)ง", "ლ(•́•́ლ)", "ʕ •́؈•̀ ₎", "♪♪ ヽ(ˇ∀ˇ )ゞ", "щ(゚Д゚щ)", "( ˇ෴ˇ )", "눈_눈", "(๑•́ ₃ •̀๑) ", "( ˘ ³˘)♥ ", "ԅ(≖‿≖ԅ)", "♥‿♥", "◔_◔", "⁽⁽ଘ( ˊᵕˋ )ଓ⁾⁾", "乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍", "( ఠൠఠ )ノ", "٩(๏_๏)۶", "┌(ㆆ㉨ㆆ)ʃ", "ఠ_ఠ", "(づ。◕‿‿◕。)づ", "(ノಠ ∩ಠ)ノ彡( \\o°o)\\", "“ヽ(´▽`)ノ”", "༼ ༎ຶ ෴ ༎ຶ༽", "。゚( ゚இ‸இ゚)゚。", "(づ ̄ ³ ̄)づ", "(⊙.☉)7", "ᕕ( ᐛ )ᕗ", "t(-_-t)", "(ಥ⌣ಥ)", "ヽ༼ ಠ益ಠ ༽ノ", "༼∵༽ ༼⍨༽ ༼⍢༽ ༼⍤༽", "ミ●﹏☉ミ", "(⊙_◎)", "¿ⓧ_ⓧﮌ", "ಠ_ಠ", "(´・_・`)", "ᕦ(ò_óˇ)ᕤ", "⊙﹏⊙", "(╯°□°)╯︵ ┻━┻", r"¯\_(⊙︿⊙)_/¯", "٩◔̯◔۶", "°‿‿°", "ᕙ(⇀‸↼‶)ᕗ", "⊂(◉‿◉)つ", "V•ᴥ•V", "q(❂‿❂)p", "ಥ_ಥ", "ฅ^•ﻌ•^ฅ", "ಥ﹏ಥ", "( ^_^)o自自o(^_^ )", "ಠ‿ಠ", "ヽ(´▽`)/", "ᵒᴥᵒ#", "( ͡° ͜ʖ ͡°)", "┬─┬ ノ( ゜-゜ノ)", "ヽ(´ー`)ノ", "☜(⌒▽⌒)☞", "ε=ε=ε=┌(;*´Д`)ノ", "(╬ ಠ益ಠ)", "┬─┬⃰͡ (ᵔᵕᵔ͜ )", "┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻", r"¯\_(ツ)_/¯", "ʕᵔᴥᵔʔ", "(`・ω・´)", "ʕ•ᴥ•ʔ", "ლ(`ー´ლ)", "ʕʘ̅͜ʘ̅ʔ", "( ゚Д゚)", r"¯\(°_o)/¯", "(。◕‿◕。)", ] RUNSREACTS = [ "`Runs to Thanos`", "`Runs far, far away from earth`", "`Running faster than supercomputer, cuzwhynot`", "`Runs to SunnyLeone`", "Where do you think you're going?", "Huh? what? did they get away?", "ZZzzZZzz... Huh? what? oh, just them again, nevermind.", "Get back here!", "Not so fast...", "Look out for the wall!", "Don't leave me alone with them!!", "You run, you die.", "Jokes on you, I'm everywhere", "You're gonna regret that...", "You could also try /kickme, I hear that's fun.", "Go bother someone else, no-one here cares.", "You can run, but you can't hide.", "Is that all you've got?", "I'm behind you...", "You've got company!", "We can do this the easy way, or the hard way.", "You just don't get it, do you?", "Yeah, you better run!", "Please, remind me how much I care?", "I'd run faster if I were you.", "That's definitely the droid we're looking for.", "May the odds be ever in your favour.", "Famous last words.", "And they disappeared forever, never to be seen again.", "\"Oh, look at me! I'm so cool, I can run from a bot!\" - this person", "Yeah yeah, just tap /kickme already.", "Here, take this ring and head to Mordor while you're at it.", "Legend has it, they're still running...", "Unlike Harry Potter, your parents can't protect you from me.", "Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might " "be the next Vader.", "Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.", "Legend has it, they're still running.", "Keep it up, not sure we want you here anyway.", "You're a wiza- Oh. Wait. You're not Harry, keep moving.", "NO RUNNING IN THE HALLWAYS!", "Hasta la vista, baby.", "Who let the dogs out?", "It's funny, because no one cares.", "Ah, what a waste. I liked that one.", "Frankly, my dear, I don't give a damn.", "My milkshake brings all the boys to yard... So run faster!", "You can't HANDLE the truth!", "A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.", "Hey, look at them! They're running from the inevitable banhammer... Cute.", "Han shot first. So will I.", "What are you running after, a white rabbit?", "As The Doctor would say... RUN!", "`Running a marathon...there's an app for that.`", ] RAPE_STRINGS = [ "`Rape Done Drink The Cum`", "`EK baat yaad rkhio, Chut ka Chakkar matlab maut se takkar`", "`The user has been successfully raped`", "`Dekho Bhaiyya esa hai! Izzat bachailo apni warna Gaand maar lenge tumhari`", "`Relax your Rear, ders nothing to fear,The Rape train is finally here`", "`Rape coming... Raped! haha 😆`", "`Kitni baar Rape krvyega mujhse?`", "`Tu Randi hai Sabko pta hai😂`", "`Don't rape too much bossdk, else problem....`", "`Tu sasti rendi hai Sabko pta hai😂`", "`Lodu Andha hai kya Yaha tera rape ho raha hai aur tu abhi tak yahi gaand mara raha hai lulz`", ] ABUSE_STRINGS = [ "`Chutiya he rah jaye ga`", "`Ja be Gaandu`", "`Muh Me Lega Bhosdike ?`", "`Kro Gandu giri kam nhi toh Gand Maar lenge tumhari hum😂`", "`Suno Lodu Jyda muh na chalo be muh me lawda pel Diyaa jayega`", "`Sharam aagyi toh aakhe juka lijia land me dam nhi hai apke toh Shilajit kha lijia`", "`Kahe Rahiman Kaviraaj C**t Ki Mahima Aisi,L**d Murjha Jaaye Par Ch**t Waisi Ki Waisi`", "`Chudakkad Raand Ki Ch**T Mein Pele L*Nd Kabeer, Par Aisa Bhi Kya Choda Ki Ban Gaye Fakeer`", ] GEY_STRINGS = [ "`you gey bsdk`", "`you gey`", "`you gey in the house`", "`you chakka`", "`Bhago BC! Chakka aya`", "`you gey gey gey gey gey gey gey gey`", "`you gey go away`", ] PRO_STRINGS = [ "`This gey is pro as phack.`", "`Proness Lebel: 6969696969`", "`Itna pro banda dekhlia bc, ab to marna hoga.`", "`U iz pro but i iz ur DAD, KeK`", "`NOOB NIMBA TRYING TO BE FAMOUS KEK`", "`Sometimes one middle finger isn’t enough to let someone know how you feel. That’s why you have two hands`", "`Some Nimbas need to open their small minds instead of their big mouths`", "`UH DONT KNOW MEH SO STAY AWAY LAWDE`", "`Kysa kysaaaa haaan? Phir MAAR nhi Khayega tu?`", "`Zikr Jinka hota hai galiyo meh woh bhosdika ajj paya gya naliyo me`", ] CHU_STRINGS = [ "`Taare hai Asmaan me very very bright jaat na jla bskd dekh le apni hight.`", "`jindagi ki na toote lari iski lulli hoti nhi khadi`", "`Kbhi kbhi meri dil me khyaal ata hai ayse chutiyo ko kon paida kr jata hai😂.`", "`Saawan ka mahina pawan kare shor jake gand mara bskd kahi aur.`", "`Dil ke armaa ansuon me beh jaye tum bskd ke chutiye hi reh gye.`", "`Ishq Se Tabiyat Ne Zeest Ka Mazaa aya maine is lodu ko randi khane me paya.`", "`Mirza galib ki yeh khani hai tu bhosdika hai yeh sab ki jubani hai.`", ] FUK_STRINGS = [ "`It's better to let someone think you are an Idiot than to open your mouth and prove it.`", "`Talking to a liberal is like trying to explain social media to a 70 years old`", "`CHAND PE HAI APUN LAWDE.`", "`Pehle main tereko chakna dega, fir daru pilayega, fir jab aap dimag se nahi L*nd se sochoge, tab bolega..`", "`Pardhan mantri se number liya, parliament apne :__;baap ka hai...`", "`Cachaa Ooo bhosdi wale Chacha`", "`Aaisi Londiya Chodiye, L*nd Ka Aapa Khoye, Auro Se Chudi Na Ho, Biwi Wo Hi Hoye`", "`Nachoo Bhosdike Nachoo`", "`Jinda toh jaat ke baal bhi hai`", "`Sab ko pta tu randi ka baccha hai (its just a joke)`", ] THANOS_STRINGS = [ "`Mashoor Rand, Ne Arz Kiya Hai. Aane Wale Aate Hai, Jaane Wale Jaate Hai. Yaade Bas Unki Reh Jaati Hai, Jo G**Nd Sujaa Ke Jaate Hai`", "`Pani kam hai matkey me ga*d mardunga teri ek jatke me`", "`Aand kitne bhi bade ho, lund ke niche hi rehte hai`", "`Tum Ameer hum gareeb hum jhopdiwale Tum bhosiwale`", "`Sisi Bhari Gulab ki padi palang ke pass chodne wale chod gye ab q baitha udaas`", "`Phuloo Ka Raja Gulaab Kaato me Rehta hai Jeewan ka Nirmata jaato me rehta hai😂`", "`Chude hue maal ko yaad mt krna Jo Chut na de usse kabhi friyad mt karna jise chudna hai wo chud ke rhegi bekar me muth maar ke apni jindagi barbaad mt krna`", "`Gand mare gandu Chut mare Chutiya Sabse accha mutti 2 mint me chutti😛`", "`Marzi Ka Sex Pap Nahi Hota.. Piche Se Dalne Wala Kabhi Baap Nahi Hota.. Condom Zarur Lagana Mere Dost Qki.. Sex K Waqt Popat Ke Pass Dimag Nahi Hota.`", "`Uss Ne Hothon Se Chhu Kar Lowd* Pe Nasha Kar Diya; Lu*D Ki Baat To Aur Thi, Uss Ne To Jhato* Ko Bhi Khada Kar Diya!`", "`Tu Chadha Pahad Pr Krke Chutadd Chode, m bola Utr Bhn K Lode`", "`Iss Bachkchodi ki Duniya m Tu gya Bhatak, Gaand m chadhai mooli patte gye atak`", "`Jitna Sajegi utna Bjegi!`", ] ABUSEHARD_STRING = [ "`Madarchod Randi ke bacche.Oye bosdike madarchod bhen ke lode tere gand me lohe ka danda garam karke dalu randwe tujhetho gali ke kutte gand pe chut rakh ke katenge me bata raha hu tere lode pe madhu makkhi Katelode ke ando pe Road roller chale tu kab bathroom me muthne Jaye tho Tera loda ghir Jaye fir tere ando me se lizard ke bacche nikle teko kidnap Kare aur childporn banaye maa ke chuttad ke lode tere saat Johnny sins rape Kare aur jab wo teko anal de tab loda andar fas Jaye bkl tere jhaat pe waxing karunga me dhek lio fir jab tu chillayega na tab tere muh me Mai gai ka gobar dalunga sale tere gand ke balo pe tel laga ke jala du me teko Anaconda leke gand me dalu tho muh se nikle maa ke lode hamesha chutiyo jaisa bartav kartha he tu maa ke Dai chawal drugs tere gand Me dalunga thi tatti nahi nikle maa darchod kabhi teko Marne ka mouka mil gaya na tho bas I'll do my best to get that tatti outof you aur tere jaise chutio ko is duniya me jagaha bhi nahi maa ke lode bandarchod tere gand me chitiya Kate wo bhi bullet ants maadarchod samj nahi aaraha tere baap NE teko kya khake paida kiya Tha kesa chutiya he tu rand ke bacche teko shadi me khana khane na mile teko gand pe 4 thappad mare sab log aur blade se likhe I want anal madarchod bosdike maccharki tatte ke baal chutiye maa ke chut pe ghode ka Lund tere gand me jaltha hu koila Dale bhen ke lode MAA KI CHUT MAI TALWAR DUNGA BC CHUT FAT JAEGI AUR USME SE ITNA KHOON NIKLEGA MZA AJAEGA DEKHNE KA SALE MAA KE BHOSDE SE BAHR AJA FIR BAAP SE ZUBAN DA TERI MAA KI CHUT CHOD CHOD KE BHOSDABNADU MADARCHOD AUR USKE UPAR CENENT LAGADU KI TERE JESA GANDU INSAAN KABHI BAHR NA A SKE ESI GANDI CHUT MAI SE LODA LASUN MADRCHOD TERI MAA KI CHUT GASTI AMA KA CHUTIA BACHA TERI MAA KO CHOD CHOD K PAGAL KAR DUNGA MAA K LODY KISI SASTIII RANDII K BACHY TERI MAA KI CHOOT MAIN TEER MAARUN GANDU HARAMI TERI COLLEGE JATI BAJI KA ROAD PEY RAPE KARONGANDU KI OLAAD HARAM KI NASAL PAPA HUN TERA BHEN PESH KAR AB PAPA KO TERI MAA KKALE KUSS MAIN KIS`", "`Main roz teri behno ki banjar chut me apna lawda daalke andar haryali lata tha magar aaj unke ke baare me sunke mujhe bhut afsos huwa..ki unko ab bada loudha chahye..ab mera balatkaaari lawda lagataar 4 ghante tk apne muh me kon rakhega..vo teri behne hi thi jo apni kaali magar rasilli chut mere saamne khol deti aur zameen pe naagin ki tarah rengne lgti thi jaise ki kisine unki chut pe naariyal tod diya ho vo b bada wala mumbai ka naariyal..apni chennal maa ko b nhi bhej rahe mere paas to main kaixe tum logo se vaada karu ki main teri maa chodd dungaw..ab agar tun sach me chahta hai ki main tum dono k mc ki chut me dhammal karu to mera lawda apne muh me rakho aur kaho Sameer hamare sage papa hain... Aur agar tb b the apni maa ki kaali chut mere saamne nahi rakhi to tumhare ghar me ghuske tumhari maa ka balatkaar kar dungaw jaixe delhi me huwa tha...ab teri chudi hui kuttiyo ki tarah apni gaand hilaate hue mere aage kalapna mt ni to tumhari fatti bhoxdi me 100 ched karunga`", "`Taare hai Asmaan me very very bright jaat na jla bskd dekh le apni hight.`", "`Zindagi ki na toote lari iski lulli hoti nhi khadi`", "`Kbhi kbhi meri dil me khyaal ata hai ayse chutiyo ko kon paida kr jata hai😂.`", "`Saawan ka mahina pawan kare shor jake gand mara bskd kahi aur.`", "`Dil ke armaa ansuon me beh jaye tum bskd ke chutiye hi reh gye.`", "`Ishq Se Tabiyat Ne Zeest Ka Mazaa aya maine is lodu ko randi khane me paya.`", "`Mirza galib ki yeh khani hai tu bhosdika hai yeh sab ki jubani hai.`", "`Mashoor Rand, Ne Arz Kiya Hai. Aane Wale Aate Hai, Jaane Wale Jaate Hai. Yaade Bas Unki Reh Jaati Hai, Jo G**Nd Sujaa Ke Jaate Hai`", "`Pani kam hai matke me gand marlunga jhatke me.`", "`Aand kitne bhi bade ho, lund ke niche hi rehte hai`", "`Tum Ameer hum gareeb hum jhopdiwale Tum bhosiwale`", "`Sisi Bhari Gulab ki padi palang ke pass chodne wale chod gye ab q baitha udaas`", "`Phuloo Ka Raja Gulaab Kaato me Rehta hai Jeewan ka Nirmata jaato me rehta hai😂`", "`Chude hue maal ko yaad mt krna Jo Chut na de usse kabhi friyad mt karna jise chudna hai wo chud ke rhegi bekar me muth maar ke apni jindagi barbaad mt krna`", "`Gand mare gandu Chut mare Chutiya Sabse accha mutti 2 mint me chutti😛`", "`Marzi Ka Sex Pap Nahi Hota.. Piche Se Dalne Wala Kabhi Baap Nahi Hota.. Condom Zarur Lagana Mere Dost Qki.. Sex K Waqt Popat Ke Pass Dimag Nahi Hota.`", "`Uss Ne Hothon Se Chhu Kar Lowd* Pe Nasha Kar Diya; Lu*D Ki Baat To Aur Thi, Uss Ne To Jhato* Ko Bhi Khada Kar Diya!`", ] HELLOSTR = [ "`Hi !`", "`‘Ello, gov'nor!`", "`What’s crackin’?`", "`‘Sup, homeslice?`", "`Howdy, howdy ,howdy!`", "`Hello, who's there, I'm talking.`", "`You know who this is.`", "`Yo!`", "`Whaddup.`", "`Greetings and salutations!`", "`Hello, sunshine!`", "`Hey, howdy, hi!`", "`What’s kickin’, little chicken?`", "`Peek-a-boo!`", "`Howdy-doody!`", "`Hey there, freshman!`", "`I come in peace!`", "`Ahoy, matey!`", "`Hiya!`", "`Oh retarded gey! Well Hello`", ] SHGS = [ "┐(´д`)┌", "┐(´~`)┌", "┐(´ー`)┌", "┐( ̄ヘ ̄)┌", "╮(╯∀╰)╭", "╮(╯_╰)╭", "┐(´д`)┌", "┐(´∀`)┌", "ʅ(́◡◝)ʃ", "ლ(゚д゚ლ)", "┐(゚~゚)┌", "┐('д')┌", "ლ|^Д^ლ|", "ლ(╹ε╹ლ)", "ლ(ಠ益ಠ)ლ", "┐(‘~`;)┌", "ヘ(´-`;)ヘ", "┐( -“-)┌", "乁༼☯‿☯✿༽ㄏ", "ʅ(´◔౪◔)ʃ", "ლ(•ω •ლ)", "ヽ(゜~゜o)ノ", "ヽ(~~~ )ノ", "┐(~ー~;)┌", "┐(-。ー;)┌", "¯\_(ツ)_/¯", "¯\_(⊙_ʖ⊙)_/¯", "乁ʕ •̀ ۝ •́ ʔㄏ", "¯\_༼ ಥ ‿ ಥ ༽_/¯", "乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ", ] CRI = [ "أ‿أ", "╥﹏╥", "(;﹏;)", "(ToT)", "(┳Д┳)", "(ಥ﹏ಥ)", "(;へ:)", "(T_T)", "(πーπ)", "(T▽T)", "(⋟﹏⋞)", "(iДi)", "(´Д⊂ヽ", "(;Д;)", "(>﹏<)", "(TдT)", "(つ﹏⊂)", "༼☯﹏☯༽", "(ノ﹏ヽ)", "(ノAヽ)", "(╥_╥)", "(T⌓T)", "(༎ຶ⌑༎ຶ)", "(☍﹏⁰)。", "(ಥ_ʖಥ)", "(つд⊂)", "(≖͞_≖̥)", "(இ﹏இ`。)", "༼ಢ_ಢ༽", "༼ ༎ຶ ෴ ༎ຶ༽", ] SLAP_TEMPLATES = [ "{hits} {victim} with a {item}.", "{hits} {victim} in the face with a {item}.", "{hits} {victim} around a bit with a {item}.", "{throws} a {item} at {victim}.", "grabs a {item} and {throws} it at {victim}'s face.", "launches a {item} in {victim}'s general direction.", "starts slapping {victim} silly with a {item}.", "pins {victim} down and repeatedly {hits} them with a {item}.", "grabs up a {item} and {hits} {victim} with it.", "ties {victim} to a chair and {throws} a {item} at them.", "gave a friendly push to help {victim} learn to swim in lava." ] ITEMS = [ "cast iron skillet", "large trout", "baseball bat", "cricket bat", "wooden cane", "nail", "printer", "shovel", "CRT monitor", "physics textbook", "toaster", "portrait of Richard Stallman", "television", "five ton truck", "roll of duct tape", "book", "laptop", "old television", "sack of rocks", "rainbow trout", "rubber chicken", "spiked bat", "fire extinguisher", "heavy rock", "chunk of dirt", "beehive", "piece of rotten meat", "bear", "ton of bricks", ] THROW = [ "throws", "flings", "chucks", "hurls", ] HIT = [ "hits", "whacks", "fek ke maari", "slaps", "smacks", "bashes", ] # =========================================== #@register(outgoing=True, pattern=r"^.(\w+)say (.*)") @borg.on(admin_cmd(pattern=r"(\w+)say (.*)")) async def univsaye(cowmsg): """ For .cowsay module, userbot wrapper for cow which says things. """ if not cowmsg.text[0].isalpha() and cowmsg.text[0] not in ("/", "#", "@", "!"): arg = cowmsg.pattern_match.group(1).lower() text = cowmsg.pattern_match.group(2) if arg == "cow": arg = "default" if arg not in cow.COWACTERS: return cheese = cow.get_cow(arg) cheese = cheese() await cowmsg.edit(f"`{cheese.milk(text).replace("`", "´")}`") @register(outgoing=True, pattern="^.:/$") async def kek(keks): if not keks.text[0].isalpha() and keks.text[0] not in ("/", "#", "@", "!"): """ Check yourself ;)""" uio = ["/", "\\"] for i in range(1, 15): time.sleep(0.3) await keks.edit(":" + uio[i % 2]) @register(pattern="^.slap(?: |$)(.*)", outgoing=True) async def who(event): if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"): """ slaps a user, or get slapped if not a reply. """ if event.fwd_from: return replied_user = await get_user(event) caption = await slap(replied_user, event) message_id_to_reply = event.message.reply_to_msg_id if not message_id_to_reply: message_id_to_reply = None try: await event.edit(caption) except: await event.edit("`Can't slap this person, need to fetch some sticks and stones !!`") async def get_user(event): """ Get the user from argument or replied message. """ if event.reply_to_msg_id: previous_message = await event.get_reply_message() replied_user = await event.client(GetFullUserRequest(previous_message.from_id)) else: user = event.pattern_match.group(1) if user.isnumeric(): user = int(user) if not user: self_user = await event.client.get_me() user = self_user.id if event.message.entities is not None: probable_user_mention_entity = event.message.entities[0] if isinstance(probable_user_mention_entity, MessageEntityMentionName): user_id = probable_user_mention_entity.user_id replied_user = await event.client(GetFullUserRequest(user_id)) return replied_user try: user_object = await event.client.get_entity(user) replied_user = await event.client(GetFullUserRequest(user_object.id)) except (TypeError, ValueError): await event.edit("`I don't slap aliens, they ugly AF !!`") return None return replied_user async def slap(replied_user, event): """ Construct a funny slap sentence !! """ user_id = replied_user.user.id first_name = replied_user.user.first_name username = replied_user.user.username if username: slapped = "@{}".format(username) else: slapped = f"[{first_name}](tg://user?id={user_id})" temp = random.choice(SLAP_TEMPLATES) item = random.choice(ITEMS) hit = random.choice(HIT) throw = random.choice(THROW) caption = "..." + temp.format(victim=slapped, item=item, hits=hit, throws=throw) return caption @register(outgoing=True, pattern="^.-_-$") async def lol(lel): if not lel.text[0].isalpha() and lel.text[0] not in ("/", "#", "@", "!"): """ Ok... """ okay = "-_-" for _ in range(10): okay = okay[:-1] + "_-" await lel.edit(okay) @register(outgoing=True, pattern="^.;_;$") async def fun(e): if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): t = ";__;" for j in range(10): t = t[:-1] + "_;" await e.edit(t) @register(outgoing=True, pattern="^.cry$") async def cry(e): """ y u du dis, i cry everytime !! """ if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): await e.edit(random.choice(CRI)) @register(outgoing=True, pattern="^.insult$") async def insult(e): """ I make you cry !! """ if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): await e.edit(random.choice(INSULT_STRINGS)) @register(outgoing=True, pattern="^.cp(?: |$)(.*)") async def copypasta(cp_e): """ Copypasta the famous meme """ if not cp_e.text[0].isalpha() and cp_e.text[0] not in ("/", "#", "@", "!"): textx = await cp_e.get_reply_message() message = cp_e.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await cp_e.edit("`😂🅱️IvE👐sOME👅text👅for✌️Me👌tO👐MAkE👀iT💞funNy!💦`") return reply_text = random.choice(EMOJIS) b_char = random.choice( message ).lower() # choose a random character in the message to be substituted with 🅱️ for owo in message: if owo == " ": reply_text += random.choice(EMOJIS) elif owo in EMOJIS: reply_text += owo reply_text += random.choice(EMOJIS) elif owo.lower() == b_char: reply_text += "🅱️" else: if bool(random.getrandbits(1)): reply_text += owo.upper() else: reply_text += owo.lower() reply_text += random.choice(EMOJIS) await cp_e.edit(reply_text) @register(outgoing=True, pattern="^.vapor(?: |$)(.*)") async def vapor(vpr): """ Vaporize everything! """ if not vpr.text[0].isalpha() and vpr.text[0] not in ("/", "#", "@", "!"): reply_text = list() textx = await vpr.get_reply_message() message = vpr.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await vpr.edit("`Give some text for vapor!`") return for charac in message: if 0x21 <= ord(charac) <= 0x7F: reply_text.append(chr(ord(charac) + 0xFEE0)) elif ord(charac) == 0x20: reply_text.append(chr(0x3000)) else: reply_text.append(charac) await vpr.edit("".join(reply_text)) @register(outgoing=True, pattern="^.repo$") async def source(e): if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): await e.edit("**Ask From Creator Of This Bot, Cozz This iz For Peru Users Only** [『𝙆𝘼𝙍𝙈𝘼』](@Karmaboii)") @register(outgoing=True, pattern="^.string$") async def source(e): if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): await e.edit("[𝐌𝐀𝐊𝐄 𝐒𝐓𝐑𝐈𝐍𝐆 𝐒𝐄𝐒𝐒𝐈𝐎𝐍](https://Karmaboii.karmabot.repl.run)") @register(outgoing=True, pattern="^.str(?: |$)(.*)") async def stretch(stret): """ Stretch it.""" if not stret.text[0].isalpha() and stret.text[0] not in ("/", "#", "@", "!"): textx = await stret.get_reply_message() message = stret.text message = stret.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await stret.edit("`GiiiiiiiB sooooooomeeeeeee teeeeeeext!`") return count = random.randint(3, 10) reply_text = re.sub( r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])", (r"\1"*count), message ) await stret.edit(reply_text) @register(outgoing=True, pattern="^.zal(?: |$)(.*)") async def zal(zgfy): """ Invoke the feeling of chaos. """ if not zgfy.text[0].isalpha() and zgfy.text[0] not in ("/", "#", "@", "!"): reply_text = list() textx = await zgfy.get_reply_message() message = zgfy.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await zgfy.edit( "`gͫ ̆ i̛ ̺ v͇̆ ȅͅ a̢ͦ s̴̪ c̸̢ ä̸ rͩͣ y͖͞ t̨͚ é̠ x̢͖ t͔͛`" ) return for charac in message: if not charac.isalpha(): reply_text.append(charac) continue for _ in range(0, 3): randint = random.randint(0, 2) if randint == 0: charac = charac.strip() + \ random.choice(ZALG_LIST[0]).strip() elif randint == 1: charac = charac.strip() + \ random.choice(ZALG_LIST[1]).strip() else: charac = charac.strip() + \ random.choice(ZALG_LIST[2]).strip() reply_text.append(charac) await zgfy.edit("".join(reply_text)) @register(outgoing=True, pattern="^.hi$") async def hoi(hello): """ Greet everyone! """ if not hello.text[0].isalpha() and hello.text[0] not in ("/", "#", "@", "!"): await hello.edit(random.choice(HELLOSTR)) @register(outgoing=True, pattern="^.pkill$") async def killing (killed): """ Dont Kill Too much -_-""" if not killed.text[0].isalpha() and killed.text[0] not in ("/", "#", "@", "!"): if await killed.get_reply_message(): await killed.edit( "`Targeted user killed by Headshot 😈......`\n" "#Sad_Reacts_Onli\n" ) @register(outgoing=True, pattern="^.bt$") async def bluetext(bte): """ Believe me, you will find this useful. """ if not bte.text[0].isalpha() and bte.text[0] not in ("/", "#", "@", "!"): if await bte.get_reply_message(): await bte.edit( "`BLUETEXT MUST CLICK.`\n" "`Are you a stupid animal which is attracted to colours?`" ) @register(outgoing=True, pattern="^.rape$") async def raping (raped): """ Dont Rape Too much -_-""" if not raped.text[0].isalpha() and raped.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(RAPE_STRINGS) - 1) reply_text = RAPE_STRINGS[index] await raped.edit(reply_text) @register(outgoing=True, pattern="^.pro$") async def proo (pros): """ String for Pros only -_-""" if not pros.text[0].isalpha() and pros.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(PRO_STRINGS) - 1) reply_text = PRO_STRINGS[index] await pros.edit(reply_text) @register(outgoing=True, pattern="^.fuk$") async def chutiya (fuks): """ String for fhu only -_-""" if not fuks.text[0].isalpha() and fuks.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(CHU_STRINGS) - 1) reply_text = FUK_STRINGS[index] await fuks.edit(reply_text) @register(outgoing=True, pattern="^.chu$") async def chutiya (chus): """ String for Chu only -_-""" if not chus.text[0].isalpha() and chus.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(CHU_STRINGS) - 1) reply_text = CHU_STRINGS[index] await chus.edit(reply_text) @register(outgoing=True, pattern="^.thanos$") async def thanos (thanos): """ String for thanos only -_-""" if not thanos.text[0].isalpha() and thanos.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(THANOS_STRINGS) - 1) reply_text = THANOS_STRINGS[index] await thanos.edit(reply_text) @register(outgoing=True, pattern="^.hardabuse$") async def fuckedd (abusehard): """ Dont Use this Too much bsdk -_-""" if not abusehard.text[0].isalpha() and abusehard.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(ABUSEHARD_STRING) - 1) reply_text = ABUSEHARD_STRING[index] await abusehard.edit(reply_text) @register(outgoing=True, pattern="^.gey$") async def geys (geyed): """ Use only for gey ppl -_-""" if not geyed.text[0].isalpha() and geyed.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(GEY_STRINGS) - 1) reply_text = GEY_STRINGS[index] await geyed.edit(reply_text) @register(outgoing=True, pattern="^.abuse$") async def abusing (abused): """ Dont Abuse Too much bsdk -_-""" if not abused.text[0].isalpha() and abused.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(ABUSE_STRINGS) - 1) reply_text = ABUSE_STRINGS[index] await abused.edit(reply_text) @register(outgoing=True, pattern="^.owo(?: |$)(.*)") async def faces(owo): """ UwU """ if not owo.text[0].isalpha() and owo.text[0] not in ("/", "#", "@", "!"): textx = await owo.get_reply_message() message = owo.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await owo.edit("` UwU no text given! `") return reply_text = re.sub(r"(r|l)", "w", message) reply_text = re.sub(r"(R|L)", "W", reply_text) reply_text = re.sub(r"n([aeiou])", r"ny\1", reply_text) reply_text = re.sub(r"N([aeiouAEIOU])", r"Ny\1", reply_text) reply_text = re.sub(r"\!+", " " + random.choice(UWUS), reply_text) reply_text = reply_text.replace("ove", "uv") reply_text += " " + random.choice(UWUS) await owo.edit(reply_text) @register(outgoing=True, pattern="^.react$") async def react_meme(react): """ Make your userbot react to everything. """ if not react.text[0].isalpha() and react.text[0] not in ("/", "#", "@", "!"): await react.edit(random.choice(FACEREACTS)) @register(outgoing=True, pattern="^.shg$") async def shrugger(shg): r""" ¯\_(ツ)_/¯ """ if not shg.text[0].isalpha() and shg.text[0] not in ("/", "#", "@", "!"): await shg.edit(random.choice(SHGS)) @register(outgoing=True, pattern="^.noob$") async def metoo(hahayes): """ Haha yes """ if not hahayes.text[0].isalpha() and hahayes.text[0] not in ("/", "#", "@", "!"): await hahayes.edit(random.choice(NOOBSTR)) @register(outgoing=True, pattern="^.rendi$") async def metoo(hahayes): """ Haha yes """ if not hahayes.text[0].isalpha() and hahayes.text[0] not in ("/", "#", "@", "!"): await hahayes.edit(random.choice(RENDISTR)) @register(outgoing=True, pattern="^.oof$") async def Oof(e): if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): t = "Oof" for j in range(15): t = t[:-1] + "of" await e.edit(t) @register(outgoing=True, pattern="^.10iq$") async def iqless(e): if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): await e.edit("♿") @register(outgoing=True, pattern="^.mock(?: |$)(.*)") async def spongemocktext(mock): """ Do it and find the real fun. """ if not mock.text[0].isalpha() and mock.text[0] not in ("/", "#", "@", "!"): reply_text = list() textx = await mock.get_reply_message() message = mock.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await mock.edit("`gIvE sOMEtHInG tO MoCk!`") return for charac in message: if charac.isalpha() and random.randint(0, 1): to_app = charac.upper() if charac.islower() else charac.lower() reply_text.append(to_app) else: reply_text.append(charac) await mock.edit("".join(reply_text)) @register(outgoing=True, pattern="^.clap(?: |$)(.*)") async def claptext(memereview): """ Praise people! """ if not memereview.text[0].isalpha() and memereview.text[0] not in ("/", "#", "@", "!"): textx = await memereview.get_reply_message() message = memereview.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await memereview.edit("`Hah, I don't clap pointlessly!`") return reply_text = "👏 " reply_text += message.replace(" ", " 👏 ") reply_text += " 👏" await memereview.edit(reply_text) @register(outgoing=True, pattern="^.smk (.*)") async def smrk(smk): if not smk.text[0].isalpha() and smk.text[0] not in ("/", "#", "@", "!"): textx = await smk.get_reply_message() message = smk.text if message[5:]: message = str(message[5:]) elif textx: message = textx message = str(message.message) if message == 'dele': await smk.edit( message +'te the hell' + "ツ" ) await smk.edit("ツ") else: smirk = " ツ" reply_text = message + smirk await smk.edit(reply_text) @register(outgoing=True, pattern="^.lfy (.*)",) async def let_me_google_that_for_you(lmgtfy_q): if not lmgtfy_q.text[0].isalpha() and lmgtfy_q.text[0] not in ("/", "#", "@", "!"): textx = await lmgtfy_q.get_reply_message() query = lmgtfy_q.text if query[5:]: query = str(query[5:]) elif textx: query = textx query = query.message query_encoded = query.replace(" ", "+") lfy_url = f"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}" payload = {'format': 'json', 'url': lfy_url} r = requests.get('http://is.gd/create.php', params=payload) await lmgtfy_q.edit(f"[{query}]({r.json()["shorturl"]})") if BOTLOG: await bot.send_message( BOTLOG_CHATID, "LMGTFY query `" + query + "` was executed successfully", ) CMD_HELP.update({ "memes": ".cowsay\ \nUsage: cow which says things.\ \n\n.milksay\ \nUsage: Weird Milk that can speak\ \n\n:/\ \nUsage: Check yourself ;)\ \n\n-_-\ \nUsage: Ok...\ \n\n;_;\ \nUsage: Like `-_-` but crying.\ \n\n.cp\ \nUsage: Copypasta the famous meme\ \n\n.vapor\ \nUsage: Vaporize everything!\ \n\n.str\ \nUsage: Stretch it.\ \n\n.10iq\ \nUsage: You retard !!\ \n\n.zal\ \nUsage: Invoke the feeling of chaos.\ \n\n.oof\ \nUsage: Ooooof\ \n\n.moon\ \nUsage: kensar moon animation.\ \n\n.clock\ \nUsage: kensar clock animation.\ \n\n.earth\ \nUsage: kensar earth animation.\ \n\n.hi\ \nUsage: Greet everyone!\ \n\n.coinflip <heads/tails>\ \nUsage: Flip a coin !!\ \n\n.owo\ \nUsage: UwU\ \n\n.react\ \nUsage: Make your userbot react to everything.\ \n\n.slap\ \nUsage: reply to slap them with random objects !!\ \n\n.cry\ \nUsage: y u du dis, i cri.\ \n\n.shg\ \nUsage: Shrug at it !!\ \n\n.runs\ \nUsage: Run, run, RUNNN! [`.disable runs`: disable | `.enable runs`: enable]\ \n\n.metoo\ \nUsage: Haha yes\ \n\n.mock\ \nUsage: Do it and find the real fun.\ \n\n.clap\ \nUsage: Praise people!\ \n\n.f <emoji/character>\ \nUsage: Pay Respects.\ \n\n.bt\ \nUsage: Believe me, you will find this useful.\ \n\n.smk <text/reply>\ \nUsage: A shit module for ツ , who cares.\ \n\n.type\ \nUsage: Just a small command to make your keyboard become a typewriter!\ \n\n.lfy <query>\ \nUsage: Let me Google that for you real quick !!\ \n\n.decide\ \nUsage: Make a quick decision.\ \n\n.abusehard\ \nUsage: You already got that! Ain't?.\ \n\n.chu\ \nUsage: Incase, the person infront of you is....\ \n\n.fuk\ \nUsage: The onlu word that can be used fucking everywhere.\ \n\n.thanos\ \nUsage: Try and then Snap.\ \n\n.noob\ \nUsage: Whadya want to know? Are you a NOOB?\ \n\n.pro\ \nUsage: If you think you're pro, try this.\ \n\n.abuse\ \nUsage: Protects you from unwanted peeps.\ \n\n\nThanks to 🅱️ottom🅱️ext🅱️ot (@NotAMemeBot) for some of these." })
# Copyright (C) 2019 The Raphielscape Company LLC. # # Licensed under the Raphielscape Public License, Version 1.b (the "License"); # you may not use this file except in compliance with the License. # # """ Userbot module for having some fun with people. """ import asyncio import random import re import time from collections import deque import requests from telethon.tl.functions.users import GetFullUserRequest from telethon.tl.types import MessageEntityMentionName from cowpy import cow from userbot import CMD_HELP,YOUTUBE_API_KEY from userbot.utils import register,admin_cmd # ================= CONSTANT ================= RENDISTR = [ "`I Know Uh ez Rendi Bhay Dont show Your Randi Pesa Here`", "`Jag Suna suna laage Sab #maderchod bhay`", "`you talking behind meh wew uh iz my fan now bhay`", "`Wanna pass in Life Goto BRAZZER.CAM BHAY`", "`Uh iz Pro i iz noob your boob is landi uh are Randi`", "`Sellers Nasa calling Uh bhay😆`", "`Badwoo ki yojna behan bna ke ch*da uh iz badwa its your yozja?`", "`CHAND PE CHADA HAI CHANDYAAN KA GHODA TERA NAAM HAI MANSUR TU HAI BEHAN KA LOD*😂`", "`Jab se dil lga baithe tanhai me maa chu*da baithe wo kho gyi kisi aur ke pyar hum apne hi jaato me aag lga baithe`", "`Chadii ke ander se lal pani kha se ata hai ky teri masuka ka bhosda bhi paan khata hai😂`", "`Sun bhosdi ke By anonyCrew MOHABBAT KE SIWA AUR BHI GAM HAI JAMANE ME BSDK GAND PAHAT JATI HAI PAISA KAMANE ME`", "`Thaan liya tha Sayri nhi krege Unka pichwada dekha Alfaaz nikal gye`", "`Ravivaar ko dekha Chand Ka Tukra Itna Baar Dekha par Jaath na Ukra`", "`Katal kro Tir se Talwar me Ky Rkkha hai Maal Chodo Sari Me Salwar me Ky Rkkha hai`", ] NOOBSTR = [ "`YOU PRO NIMBA DONT MESS WIDH MEH`", "`Haha yes`", "`NOOB NIMBA TRYING TO BE FAMOUS KEK`", "`Sometimes one middle finger isn’t enough to let someone know how you feel. That’s why you have two hands`", "`Some Nimbas need to open their small minds instead of their big mouths`", "`UH DONT KNOW MEH SO STAY AWAY LAWDE`", "`Kysa kysaaaa haaan? Phir MAAR nhi Khayega tu?`", "`Zikr Jinka hota hai galiyo meh woh bhosdika ajj paya gya naliyo me`", ] ZALG_LIST = [["̖", " ̗", " ̘", " ̙", " ̜", " ̝", " ̞", " ̟", " ̠", " ̤", " ̥", " ̦", " ̩", " ̪", " ̫", " ̬", " ̭", " ̮", " ̯", " ̰", " ̱", " ̲", " ̳", " ̹", " ̺", " ̻", " ̼", " ͅ", " ͇", " ͈", " ͉", " ͍", " ͎", " ͓", " ͔", " ͕", " ͖", " ͙", " ͚", " ", ], [" ̍", " ̎", " ̄", " ̅", " ̿", " ̑", " ̆", " ̐", " ͒", " ͗", " ͑", " ̇", " ̈", " ̊", " ͂", " ̓", " ̈́", " ͊", " ͋", " ͌", " ̃", " ̂", " ̌", " ͐", " ́", " ̋", " ̏", " ̽", " ̉", " ͣ", " ͤ", " ͥ", " ͦ", " ͧ", " ͨ", " ͩ", " ͪ", " ͫ", " ͬ", " ͭ", " ͮ", " ͯ", " ̾", " ͛", " ͆", " ̚", ], [" ̕", " ̛", " ̀", " ́", " ͘", " ̡", " ̢", " ̧", " ̨", " ̴", " ̵", " ̶", " ͜", " ͝", " ͞", " ͟", " ͠", " ͢", " ̸", " ̷", " ͡", ]] EMOJIS = [ "😂", "😂", "👌", "✌", "💞", "👍", "👌", "💯", "🎶", "👀", "😂", "👓", "👏", "👐", "🍕", "💥", "🍴", "💦", "💦", "🍑", "🍆", "😩", "😏", "👉👌", "👀", "👅", "😩", "🚰", ] INSULT_STRINGS = [ "`Owww ... Such a stupid idiot.`", "`Don't drink and type.`", "`Command not found. Just like your brain.`", "`Bot rule 420 section 69 prevents me from replying to stupid nubfuks like you.`", "`Sorry, we do not sell brains.`", "`Believe me you are not normal.`", "`I bet your brain feels as good as new, seeing that you never use it.`", "`If I wanted to kill myself I'd climb your ego and jump to your IQ.`", "`You didn't evolve from apes, they evolved from you.`", "`What language are you speaking? Cause it sounds like bullshit.`", "`You are proof that evolution CAN go in reverse.`", "`I would ask you how old you are but I know you can't count that high.`", "`As an outsider, what do you think of the human race?`", "`Ordinarily people live and learn. You just live.`", "`Keep talking, someday you'll say something intelligent!.......(I doubt it though)`", "`Everyone has the right to be stupid but you are abusing the privilege.`", "`I'm sorry I hurt your feelings when I called you stupid. I thought you already knew that.`", "`You should try tasting cyanide.`", "`You should try sleeping forever.`", "`Sharam kar bsdwale,kitni bkchodi deta.`", "`Chup Madarhox, bilkul chup..`", "`Me zindagi me chunotiyo se jyda inn jese Chutiyo se pareshaan hu.`", "`Pick up a gun and shoot yourself.`", "`Try bathing with Hydrochloric Acid instead of water.`", "`Go Green! Stop inhaling Oxygen.`", "`God was searching for you. You should leave to meet him.`", "`You should Volunteer for target in an firing range.`", "`Try playing catch and throw with RDX its fun.`", "`Jaana chodu chad jake land chaat`", "`Yaar ajab tere nkhare,gazab tera style hain, gand dhone ki tameez nahi, haath main mobile hai`", "`People like you are the reason we have middle fingers.`", "`When your mom dropped you off at the school, she got a ticket for littering.`", "`You’re so ugly that when you cry, the tears roll down the back of your head…just to avoid your face.`", "`If you’re talking behind my back then you’re in a perfect position to kiss my a**!.`", ] UWUS = [ "(・`ω´・)", ";;w;;", "owo", "UwU", ">w<", "^w^", r"\(^o\) (/o^)/", "( ^ _ ^)∠☆", "(ô_ô)", "~:o", ";-;", "(*^*)", "(>_", "(♥_♥)", "*(^O^)*", "((+_+))", ] FACEREACTS = [ "ʘ‿ʘ", "ヾ(-_- )ゞ", "(っ˘ڡ˘ς)", "(´ж`ς)", "( ಠ ʖ̯ ಠ)", "(° ͜ʖ͡°)╭∩╮", "(ᵟຶ︵ ᵟຶ)", "(งツ)ว", "ʚ(•`", "(っ▀¯▀)つ", "(◠﹏◠)", "( ͡ಠ ʖ̯ ͡ಠ)", "( ఠ ͟ʖ ఠ)", "(∩`-´)⊃━☆゚.*・。゚", "(⊃。•́‿•̀。)⊃", "(._.)", "{•̃_•̃}", "(ᵔᴥᵔ)", "♨_♨", "⥀.⥀", "ح˚௰˚づ ", "(҂◡_◡)", "ƪ(ړײ)‎ƪ​​", "(っ•́。•́)♪♬", "◖ᵔᴥᵔ◗ ♪ ♫ ", "(☞゚ヮ゚)☞", "[¬º-°]¬", "(Ծ‸ Ծ)", "(•̀ᴗ•́)و ̑̑", "ヾ(´〇`)ノ♪♪♪", "(ง'̀-'́)ง", "ლ(•́•́ლ)", "ʕ •́؈•̀ ₎", "♪♪ ヽ(ˇ∀ˇ )ゞ", "щ(゚Д゚щ)", "( ˇ෴ˇ )", "눈_눈", "(๑•́ ₃ •̀๑) ", "( ˘ ³˘)♥ ", "ԅ(≖‿≖ԅ)", "♥‿♥", "◔_◔", "⁽⁽ଘ( ˊᵕˋ )ଓ⁾⁾", "乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍", "( ఠൠఠ )ノ", "٩(๏_๏)۶", "┌(ㆆ㉨ㆆ)ʃ", "ఠ_ఠ", "(づ。◕‿‿◕。)づ", "(ノಠ ∩ಠ)ノ彡( \\o°o)\\", "“ヽ(´▽`)ノ”", "༼ ༎ຶ ෴ ༎ຶ༽", "。゚( ゚இ‸இ゚)゚。", "(づ ̄ ³ ̄)づ", "(⊙.☉)7", "ᕕ( ᐛ )ᕗ", "t(-_-t)", "(ಥ⌣ಥ)", "ヽ༼ ಠ益ಠ ༽ノ", "༼∵༽ ༼⍨༽ ༼⍢༽ ༼⍤༽", "ミ●﹏☉ミ", "(⊙_◎)", "¿ⓧ_ⓧﮌ", "ಠ_ಠ", "(´・_・`)", "ᕦ(ò_óˇ)ᕤ", "⊙﹏⊙", "(╯°□°)╯︵ ┻━┻", r"¯\_(⊙︿⊙)_/¯", "٩◔̯◔۶", "°‿‿°", "ᕙ(⇀‸↼‶)ᕗ", "⊂(◉‿◉)つ", "V•ᴥ•V", "q(❂‿❂)p", "ಥ_ಥ", "ฅ^•ﻌ•^ฅ", "ಥ﹏ಥ", "( ^_^)o自自o(^_^ )", "ಠ‿ಠ", "ヽ(´▽`)/", "ᵒᴥᵒ#", "( ͡° ͜ʖ ͡°)", "┬─┬ ノ( ゜-゜ノ)", "ヽ(´ー`)ノ", "☜(⌒▽⌒)☞", "ε=ε=ε=┌(;*´Д`)ノ", "(╬ ಠ益ಠ)", "┬─┬⃰͡ (ᵔᵕᵔ͜ )", "┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻", r"¯\_(ツ)_/¯", "ʕᵔᴥᵔʔ", "(`・ω・´)", "ʕ•ᴥ•ʔ", "ლ(`ー´ლ)", "ʕʘ̅͜ʘ̅ʔ", "( ゚Д゚)", r"¯\(°_o)/¯", "(。◕‿◕。)", ] RUNSREACTS = [ "`Runs to Thanos`", "`Runs far, far away from earth`", "`Running faster than supercomputer, cuzwhynot`", "`Runs to SunnyLeone`", "Where do you think you're going?", "Huh? what? did they get away?", "ZZzzZZzz... Huh? what? oh, just them again, nevermind.", "Get back here!", "Not so fast...", "Look out for the wall!", "Don't leave me alone with them!!", "You run, you die.", "Jokes on you, I'm everywhere", "You're gonna regret that...", "You could also try /kickme, I hear that's fun.", "Go bother someone else, no-one here cares.", "You can run, but you can't hide.", "Is that all you've got?", "I'm behind you...", "You've got company!", "We can do this the easy way, or the hard way.", "You just don't get it, do you?", "Yeah, you better run!", "Please, remind me how much I care?", "I'd run faster if I were you.", "That's definitely the droid we're looking for.", "May the odds be ever in your favour.", "Famous last words.", "And they disappeared forever, never to be seen again.", "\"Oh, look at me! I'm so cool, I can run from a bot!\" - this person", "Yeah yeah, just tap /kickme already.", "Here, take this ring and head to Mordor while you're at it.", "Legend has it, they're still running...", "Unlike Harry Potter, your parents can't protect you from me.", "Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might " "be the next Vader.", "Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.", "Legend has it, they're still running.", "Keep it up, not sure we want you here anyway.", "You're a wiza- Oh. Wait. You're not Harry, keep moving.", "NO RUNNING IN THE HALLWAYS!", "Hasta la vista, baby.", "Who let the dogs out?", "It's funny, because no one cares.", "Ah, what a waste. I liked that one.", "Frankly, my dear, I don't give a damn.", "My milkshake brings all the boys to yard... So run faster!", "You can't HANDLE the truth!", "A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.", "Hey, look at them! They're running from the inevitable banhammer... Cute.", "Han shot first. So will I.", "What are you running after, a white rabbit?", "As The Doctor would say... RUN!", "`Running a marathon...there's an app for that.`", ] RAPE_STRINGS = [ "`Rape Done Drink The Cum`", "`EK baat yaad rkhio, Chut ka Chakkar matlab maut se takkar`", "`The user has been successfully raped`", "`Dekho Bhaiyya esa hai! Izzat bachailo apni warna Gaand maar lenge tumhari`", "`Relax your Rear, ders nothing to fear,The Rape train is finally here`", "`Rape coming... Raped! haha 😆`", "`Kitni baar Rape krvyega mujhse?`", "`Tu Randi hai Sabko pta hai😂`", "`Don't rape too much bossdk, else problem....`", "`Tu sasti rendi hai Sabko pta hai😂`", "`Lodu Andha hai kya Yaha tera rape ho raha hai aur tu abhi tak yahi gaand mara raha hai lulz`", ] ABUSE_STRINGS = [ "`Chutiya he rah jaye ga`", "`Ja be Gaandu`", "`Muh Me Lega Bhosdike ?`", "`Kro Gandu giri kam nhi toh Gand Maar lenge tumhari hum😂`", "`Suno Lodu Jyda muh na chalo be muh me lawda pel Diyaa jayega`", "`Sharam aagyi toh aakhe juka lijia land me dam nhi hai apke toh Shilajit kha lijia`", "`Kahe Rahiman Kaviraaj C**t Ki Mahima Aisi,L**d Murjha Jaaye Par Ch**t Waisi Ki Waisi`", "`Chudakkad Raand Ki Ch**T Mein Pele L*Nd Kabeer, Par Aisa Bhi Kya Choda Ki Ban Gaye Fakeer`", ] GEY_STRINGS = [ "`you gey bsdk`", "`you gey`", "`you gey in the house`", "`you chakka`", "`Bhago BC! Chakka aya`", "`you gey gey gey gey gey gey gey gey`", "`you gey go away`", ] PRO_STRINGS = [ "`This gey is pro as phack.`", "`Proness Lebel: 6969696969`", "`Itna pro banda dekhlia bc, ab to marna hoga.`", "`U iz pro but i iz ur DAD, KeK`", "`NOOB NIMBA TRYING TO BE FAMOUS KEK`", "`Sometimes one middle finger isn’t enough to let someone know how you feel. That’s why you have two hands`", "`Some Nimbas need to open their small minds instead of their big mouths`", "`UH DONT KNOW MEH SO STAY AWAY LAWDE`", "`Kysa kysaaaa haaan? Phir MAAR nhi Khayega tu?`", "`Zikr Jinka hota hai galiyo meh woh bhosdika ajj paya gya naliyo me`", ] CHU_STRINGS = [ "`Taare hai Asmaan me very very bright jaat na jla bskd dekh le apni hight.`", "`jindagi ki na toote lari iski lulli hoti nhi khadi`", "`Kbhi kbhi meri dil me khyaal ata hai ayse chutiyo ko kon paida kr jata hai😂.`", "`Saawan ka mahina pawan kare shor jake gand mara bskd kahi aur.`", "`Dil ke armaa ansuon me beh jaye tum bskd ke chutiye hi reh gye.`", "`Ishq Se Tabiyat Ne Zeest Ka Mazaa aya maine is lodu ko randi khane me paya.`", "`Mirza galib ki yeh khani hai tu bhosdika hai yeh sab ki jubani hai.`", ] FUK_STRINGS = [ "`It's better to let someone think you are an Idiot than to open your mouth and prove it.`", "`Talking to a liberal is like trying to explain social media to a 70 years old`", "`CHAND PE HAI APUN LAWDE.`", "`Pehle main tereko chakna dega, fir daru pilayega, fir jab aap dimag se nahi L*nd se sochoge, tab bolega..`", "`Pardhan mantri se number liya, parliament apne :__;baap ka hai...`", "`Cachaa Ooo bhosdi wale Chacha`", "`Aaisi Londiya Chodiye, L*nd Ka Aapa Khoye, Auro Se Chudi Na Ho, Biwi Wo Hi Hoye`", "`Nachoo Bhosdike Nachoo`", "`Jinda toh jaat ke baal bhi hai`", "`Sab ko pta tu randi ka baccha hai (its just a joke)`", ] THANOS_STRINGS = [ "`Mashoor Rand, Ne Arz Kiya Hai. Aane Wale Aate Hai, Jaane Wale Jaate Hai. Yaade Bas Unki Reh Jaati Hai, Jo G**Nd Sujaa Ke Jaate Hai`", "`Pani kam hai matkey me ga*d mardunga teri ek jatke me`", "`Aand kitne bhi bade ho, lund ke niche hi rehte hai`", "`Tum Ameer hum gareeb hum jhopdiwale Tum bhosiwale`", "`Sisi Bhari Gulab ki padi palang ke pass chodne wale chod gye ab q baitha udaas`", "`Phuloo Ka Raja Gulaab Kaato me Rehta hai Jeewan ka Nirmata jaato me rehta hai😂`", "`Chude hue maal ko yaad mt krna Jo Chut na de usse kabhi friyad mt karna jise chudna hai wo chud ke rhegi bekar me muth maar ke apni jindagi barbaad mt krna`", "`Gand mare gandu Chut mare Chutiya Sabse accha mutti 2 mint me chutti😛`", "`Marzi Ka Sex Pap Nahi Hota.. Piche Se Dalne Wala Kabhi Baap Nahi Hota.. Condom Zarur Lagana Mere Dost Qki.. Sex K Waqt Popat Ke Pass Dimag Nahi Hota.`", "`Uss Ne Hothon Se Chhu Kar Lowd* Pe Nasha Kar Diya; Lu*D Ki Baat To Aur Thi, Uss Ne To Jhato* Ko Bhi Khada Kar Diya!`", "`Tu Chadha Pahad Pr Krke Chutadd Chode, m bola Utr Bhn K Lode`", "`Iss Bachkchodi ki Duniya m Tu gya Bhatak, Gaand m chadhai mooli patte gye atak`", "`Jitna Sajegi utna Bjegi!`", ] ABUSEHARD_STRING = [ "`Madarchod Randi ke bacche.Oye bosdike madarchod bhen ke lode tere gand me lohe ka danda garam karke dalu randwe tujhetho gali ke kutte gand pe chut rakh ke katenge me bata raha hu tere lode pe madhu makkhi Katelode ke ando pe Road roller chale tu kab bathroom me muthne Jaye tho Tera loda ghir Jaye fir tere ando me se lizard ke bacche nikle teko kidnap Kare aur childporn banaye maa ke chuttad ke lode tere saat Johnny sins rape Kare aur jab wo teko anal de tab loda andar fas Jaye bkl tere jhaat pe waxing karunga me dhek lio fir jab tu chillayega na tab tere muh me Mai gai ka gobar dalunga sale tere gand ke balo pe tel laga ke jala du me teko Anaconda leke gand me dalu tho muh se nikle maa ke lode hamesha chutiyo jaisa bartav kartha he tu maa ke Dai chawal drugs tere gand Me dalunga thi tatti nahi nikle maa darchod kabhi teko Marne ka mouka mil gaya na tho bas I'll do my best to get that tatti outof you aur tere jaise chutio ko is duniya me jagaha bhi nahi maa ke lode bandarchod tere gand me chitiya Kate wo bhi bullet ants maadarchod samj nahi aaraha tere baap NE teko kya khake paida kiya Tha kesa chutiya he tu rand ke bacche teko shadi me khana khane na mile teko gand pe 4 thappad mare sab log aur blade se likhe I want anal madarchod bosdike maccharki tatte ke baal chutiye maa ke chut pe ghode ka Lund tere gand me jaltha hu koila Dale bhen ke lode MAA KI CHUT MAI TALWAR DUNGA BC CHUT FAT JAEGI AUR USME SE ITNA KHOON NIKLEGA MZA AJAEGA DEKHNE KA SALE MAA KE BHOSDE SE BAHR AJA FIR BAAP SE ZUBAN DA TERI MAA KI CHUT CHOD CHOD KE BHOSDABNADU MADARCHOD AUR USKE UPAR CENENT LAGADU KI TERE JESA GANDU INSAAN KABHI BAHR NA A SKE ESI GANDI CHUT MAI SE LODA LASUN MADRCHOD TERI MAA KI CHUT GASTI AMA KA CHUTIA BACHA TERI MAA KO CHOD CHOD K PAGAL KAR DUNGA MAA K LODY KISI SASTIII RANDII K BACHY TERI MAA KI CHOOT MAIN TEER MAARUN GANDU HARAMI TERI COLLEGE JATI BAJI KA ROAD PEY RAPE KARONGANDU KI OLAAD HARAM KI NASAL PAPA HUN TERA BHEN PESH KAR AB PAPA KO TERI MAA KKALE KUSS MAIN KIS`", "`Main roz teri behno ki banjar chut me apna lawda daalke andar haryali lata tha magar aaj unke ke baare me sunke mujhe bhut afsos huwa..ki unko ab bada loudha chahye..ab mera balatkaaari lawda lagataar 4 ghante tk apne muh me kon rakhega..vo teri behne hi thi jo apni kaali magar rasilli chut mere saamne khol deti aur zameen pe naagin ki tarah rengne lgti thi jaise ki kisine unki chut pe naariyal tod diya ho vo b bada wala mumbai ka naariyal..apni chennal maa ko b nhi bhej rahe mere paas to main kaixe tum logo se vaada karu ki main teri maa chodd dungaw..ab agar tun sach me chahta hai ki main tum dono k mc ki chut me dhammal karu to mera lawda apne muh me rakho aur kaho Sameer hamare sage papa hain... Aur agar tb b the apni maa ki kaali chut mere saamne nahi rakhi to tumhare ghar me ghuske tumhari maa ka balatkaar kar dungaw jaixe delhi me huwa tha...ab teri chudi hui kuttiyo ki tarah apni gaand hilaate hue mere aage kalapna mt ni to tumhari fatti bhoxdi me 100 ched karunga`", "`Taare hai Asmaan me very very bright jaat na jla bskd dekh le apni hight.`", "`Zindagi ki na toote lari iski lulli hoti nhi khadi`", "`Kbhi kbhi meri dil me khyaal ata hai ayse chutiyo ko kon paida kr jata hai😂.`", "`Saawan ka mahina pawan kare shor jake gand mara bskd kahi aur.`", "`Dil ke armaa ansuon me beh jaye tum bskd ke chutiye hi reh gye.`", "`Ishq Se Tabiyat Ne Zeest Ka Mazaa aya maine is lodu ko randi khane me paya.`", "`Mirza galib ki yeh khani hai tu bhosdika hai yeh sab ki jubani hai.`", "`Mashoor Rand, Ne Arz Kiya Hai. Aane Wale Aate Hai, Jaane Wale Jaate Hai. Yaade Bas Unki Reh Jaati Hai, Jo G**Nd Sujaa Ke Jaate Hai`", "`Pani kam hai matke me gand marlunga jhatke me.`", "`Aand kitne bhi bade ho, lund ke niche hi rehte hai`", "`Tum Ameer hum gareeb hum jhopdiwale Tum bhosiwale`", "`Sisi Bhari Gulab ki padi palang ke pass chodne wale chod gye ab q baitha udaas`", "`Phuloo Ka Raja Gulaab Kaato me Rehta hai Jeewan ka Nirmata jaato me rehta hai😂`", "`Chude hue maal ko yaad mt krna Jo Chut na de usse kabhi friyad mt karna jise chudna hai wo chud ke rhegi bekar me muth maar ke apni jindagi barbaad mt krna`", "`Gand mare gandu Chut mare Chutiya Sabse accha mutti 2 mint me chutti😛`", "`Marzi Ka Sex Pap Nahi Hota.. Piche Se Dalne Wala Kabhi Baap Nahi Hota.. Condom Zarur Lagana Mere Dost Qki.. Sex K Waqt Popat Ke Pass Dimag Nahi Hota.`", "`Uss Ne Hothon Se Chhu Kar Lowd* Pe Nasha Kar Diya; Lu*D Ki Baat To Aur Thi, Uss Ne To Jhato* Ko Bhi Khada Kar Diya!`", ] HELLOSTR = [ "`Hi !`", "`‘Ello, gov'nor!`", "`What’s crackin’?`", "`‘Sup, homeslice?`", "`Howdy, howdy ,howdy!`", "`Hello, who's there, I'm talking.`", "`You know who this is.`", "`Yo!`", "`Whaddup.`", "`Greetings and salutations!`", "`Hello, sunshine!`", "`Hey, howdy, hi!`", "`What’s kickin’, little chicken?`", "`Peek-a-boo!`", "`Howdy-doody!`", "`Hey there, freshman!`", "`I come in peace!`", "`Ahoy, matey!`", "`Hiya!`", "`Oh retarded gey! Well Hello`", ] SHGS = [ "┐(´д`)┌", "┐(´~`)┌", "┐(´ー`)┌", "┐( ̄ヘ ̄)┌", "╮(╯∀╰)╭", "╮(╯_╰)╭", "┐(´д`)┌", "┐(´∀`)┌", "ʅ(́◡◝)ʃ", "ლ(゚д゚ლ)", "┐(゚~゚)┌", "┐('д')┌", "ლ|^Д^ლ|", "ლ(╹ε╹ლ)", "ლ(ಠ益ಠ)ლ", "┐(‘~`;)┌", "ヘ(´-`;)ヘ", "┐( -“-)┌", "乁༼☯‿☯✿༽ㄏ", "ʅ(´◔౪◔)ʃ", "ლ(•ω •ლ)", "ヽ(゜~゜o)ノ", "ヽ(~~~ )ノ", "┐(~ー~;)┌", "┐(-。ー;)┌", "¯\_(ツ)_/¯", "¯\_(⊙_ʖ⊙)_/¯", "乁ʕ •̀ ۝ •́ ʔㄏ", "¯\_༼ ಥ ‿ ಥ ༽_/¯", "乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ", ] CRI = [ "أ‿أ", "╥﹏╥", "(;﹏;)", "(ToT)", "(┳Д┳)", "(ಥ﹏ಥ)", "(;へ:)", "(T_T)", "(πーπ)", "(T▽T)", "(⋟﹏⋞)", "(iДi)", "(´Д⊂ヽ", "(;Д;)", "(>﹏<)", "(TдT)", "(つ﹏⊂)", "༼☯﹏☯༽", "(ノ﹏ヽ)", "(ノAヽ)", "(╥_╥)", "(T⌓T)", "(༎ຶ⌑༎ຶ)", "(☍﹏⁰)。", "(ಥ_ʖಥ)", "(つд⊂)", "(≖͞_≖̥)", "(இ﹏இ`。)", "༼ಢ_ಢ༽", "༼ ༎ຶ ෴ ༎ຶ༽", ] SLAP_TEMPLATES = [ "{hits} {victim} with a {item}.", "{hits} {victim} in the face with a {item}.", "{hits} {victim} around a bit with a {item}.", "{throws} a {item} at {victim}.", "grabs a {item} and {throws} it at {victim}'s face.", "launches a {item} in {victim}'s general direction.", "starts slapping {victim} silly with a {item}.", "pins {victim} down and repeatedly {hits} them with a {item}.", "grabs up a {item} and {hits} {victim} with it.", "ties {victim} to a chair and {throws} a {item} at them.", "gave a friendly push to help {victim} learn to swim in lava." ] ITEMS = [ "cast iron skillet", "large trout", "baseball bat", "cricket bat", "wooden cane", "nail", "printer", "shovel", "CRT monitor", "physics textbook", "toaster", "portrait of Richard Stallman", "television", "five ton truck", "roll of duct tape", "book", "laptop", "old television", "sack of rocks", "rainbow trout", "rubber chicken", "spiked bat", "fire extinguisher", "heavy rock", "chunk of dirt", "beehive", "piece of rotten meat", "bear", "ton of bricks", ] THROW = [ "throws", "flings", "chucks", "hurls", ] HIT = [ "hits", "whacks", "fek ke maari", "slaps", "smacks", "bashes", ] # =========================================== #@register(outgoing=True, pattern=r"^.(\w+)say (.*)") @borg.on(admin_cmd(pattern=r"(\w+)say (.*)")) async def univsaye(cowmsg): """ For .cowsay module, userbot wrapper for cow which says things. """ if not cowmsg.text[0].isalpha() and cowmsg.text[0] not in ("/", "#", "@", "!"): arg = cowmsg.pattern_match.group(1).lower() text = cowmsg.pattern_match.group(2) if arg == "cow": arg = "default" if arg not in cow.COWACTERS: return cheese = cow.get_cow(arg) cheese = cheese() await cowmsg.edit(f"`{cheese.milk(text).replace('`', '´')}`") @register(outgoing=True, pattern="^.:/$") async def kek(keks): if not keks.text[0].isalpha() and keks.text[0] not in ("/", "#", "@", "!"): """ Check yourself ;)""" uio = ["/", "\\"] for i in range(1, 15): time.sleep(0.3) await keks.edit(":" + uio[i % 2]) @register(pattern="^.slap(?: |$)(.*)", outgoing=True) async def who(event): if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"): """ slaps a user, or get slapped if not a reply. """ if event.fwd_from: return replied_user = await get_user(event) caption = await slap(replied_user, event) message_id_to_reply = event.message.reply_to_msg_id if not message_id_to_reply: message_id_to_reply = None try: await event.edit(caption) except: await event.edit("`Can't slap this person, need to fetch some sticks and stones !!`") async def get_user(event): """ Get the user from argument or replied message. """ if event.reply_to_msg_id: previous_message = await event.get_reply_message() replied_user = await event.client(GetFullUserRequest(previous_message.from_id)) else: user = event.pattern_match.group(1) if user.isnumeric(): user = int(user) if not user: self_user = await event.client.get_me() user = self_user.id if event.message.entities is not None: probable_user_mention_entity = event.message.entities[0] if isinstance(probable_user_mention_entity, MessageEntityMentionName): user_id = probable_user_mention_entity.user_id replied_user = await event.client(GetFullUserRequest(user_id)) return replied_user try: user_object = await event.client.get_entity(user) replied_user = await event.client(GetFullUserRequest(user_object.id)) except (TypeError, ValueError): await event.edit("`I don't slap aliens, they ugly AF !!`") return None return replied_user async def slap(replied_user, event): """ Construct a funny slap sentence !! """ user_id = replied_user.user.id first_name = replied_user.user.first_name username = replied_user.user.username if username: slapped = "@{}".format(username) else: slapped = f"[{first_name}](tg://user?id={user_id})" temp = random.choice(SLAP_TEMPLATES) item = random.choice(ITEMS) hit = random.choice(HIT) throw = random.choice(THROW) caption = "..." + temp.format(victim=slapped, item=item, hits=hit, throws=throw) return caption @register(outgoing=True, pattern="^.-_-$") async def lol(lel): if not lel.text[0].isalpha() and lel.text[0] not in ("/", "#", "@", "!"): """ Ok... """ okay = "-_-" for _ in range(10): okay = okay[:-1] + "_-" await lel.edit(okay) @register(outgoing=True, pattern="^.;_;$") async def fun(e): if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): t = ";__;" for j in range(10): t = t[:-1] + "_;" await e.edit(t) @register(outgoing=True, pattern="^.cry$") async def cry(e): """ y u du dis, i cry everytime !! """ if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): await e.edit(random.choice(CRI)) @register(outgoing=True, pattern="^.insult$") async def insult(e): """ I make you cry !! """ if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): await e.edit(random.choice(INSULT_STRINGS)) @register(outgoing=True, pattern="^.cp(?: |$)(.*)") async def copypasta(cp_e): """ Copypasta the famous meme """ if not cp_e.text[0].isalpha() and cp_e.text[0] not in ("/", "#", "@", "!"): textx = await cp_e.get_reply_message() message = cp_e.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await cp_e.edit("`😂🅱️IvE👐sOME👅text👅for✌️Me👌tO👐MAkE👀iT💞funNy!💦`") return reply_text = random.choice(EMOJIS) b_char = random.choice( message ).lower() # choose a random character in the message to be substituted with 🅱️ for owo in message: if owo == " ": reply_text += random.choice(EMOJIS) elif owo in EMOJIS: reply_text += owo reply_text += random.choice(EMOJIS) elif owo.lower() == b_char: reply_text += "🅱️" else: if bool(random.getrandbits(1)): reply_text += owo.upper() else: reply_text += owo.lower() reply_text += random.choice(EMOJIS) await cp_e.edit(reply_text) @register(outgoing=True, pattern="^.vapor(?: |$)(.*)") async def vapor(vpr): """ Vaporize everything! """ if not vpr.text[0].isalpha() and vpr.text[0] not in ("/", "#", "@", "!"): reply_text = list() textx = await vpr.get_reply_message() message = vpr.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await vpr.edit("`Give some text for vapor!`") return for charac in message: if 0x21 <= ord(charac) <= 0x7F: reply_text.append(chr(ord(charac) + 0xFEE0)) elif ord(charac) == 0x20: reply_text.append(chr(0x3000)) else: reply_text.append(charac) await vpr.edit("".join(reply_text)) @register(outgoing=True, pattern="^.repo$") async def source(e): if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): await e.edit("**Ask From Creator Of This Bot, Cozz This iz For Peru Users Only** [『𝙆𝘼𝙍𝙈𝘼』](@Karmaboii)") @register(outgoing=True, pattern="^.string$") async def source(e): if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): await e.edit("[𝐌𝐀𝐊𝐄 𝐒𝐓𝐑𝐈𝐍𝐆 𝐒𝐄𝐒𝐒𝐈𝐎𝐍](https://Karmaboii.karmabot.repl.run)") @register(outgoing=True, pattern="^.str(?: |$)(.*)") async def stretch(stret): """ Stretch it.""" if not stret.text[0].isalpha() and stret.text[0] not in ("/", "#", "@", "!"): textx = await stret.get_reply_message() message = stret.text message = stret.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await stret.edit("`GiiiiiiiB sooooooomeeeeeee teeeeeeext!`") return count = random.randint(3, 10) reply_text = re.sub( r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])", (r"\1"*count), message ) await stret.edit(reply_text) @register(outgoing=True, pattern="^.zal(?: |$)(.*)") async def zal(zgfy): """ Invoke the feeling of chaos. """ if not zgfy.text[0].isalpha() and zgfy.text[0] not in ("/", "#", "@", "!"): reply_text = list() textx = await zgfy.get_reply_message() message = zgfy.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await zgfy.edit( "`gͫ ̆ i̛ ̺ v͇̆ ȅͅ a̢ͦ s̴̪ c̸̢ ä̸ rͩͣ y͖͞ t̨͚ é̠ x̢͖ t͔͛`" ) return for charac in message: if not charac.isalpha(): reply_text.append(charac) continue for _ in range(0, 3): randint = random.randint(0, 2) if randint == 0: charac = charac.strip() + \ random.choice(ZALG_LIST[0]).strip() elif randint == 1: charac = charac.strip() + \ random.choice(ZALG_LIST[1]).strip() else: charac = charac.strip() + \ random.choice(ZALG_LIST[2]).strip() reply_text.append(charac) await zgfy.edit("".join(reply_text)) @register(outgoing=True, pattern="^.hi$") async def hoi(hello): """ Greet everyone! """ if not hello.text[0].isalpha() and hello.text[0] not in ("/", "#", "@", "!"): await hello.edit(random.choice(HELLOSTR)) @register(outgoing=True, pattern="^.pkill$") async def killing (killed): """ Dont Kill Too much -_-""" if not killed.text[0].isalpha() and killed.text[0] not in ("/", "#", "@", "!"): if await killed.get_reply_message(): await killed.edit( "`Targeted user killed by Headshot 😈......`\n" "#Sad_Reacts_Onli\n" ) @register(outgoing=True, pattern="^.bt$") async def bluetext(bte): """ Believe me, you will find this useful. """ if not bte.text[0].isalpha() and bte.text[0] not in ("/", "#", "@", "!"): if await bte.get_reply_message(): await bte.edit( "`BLUETEXT MUST CLICK.`\n" "`Are you a stupid animal which is attracted to colours?`" ) @register(outgoing=True, pattern="^.rape$") async def raping (raped): """ Dont Rape Too much -_-""" if not raped.text[0].isalpha() and raped.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(RAPE_STRINGS) - 1) reply_text = RAPE_STRINGS[index] await raped.edit(reply_text) @register(outgoing=True, pattern="^.pro$") async def proo (pros): """ String for Pros only -_-""" if not pros.text[0].isalpha() and pros.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(PRO_STRINGS) - 1) reply_text = PRO_STRINGS[index] await pros.edit(reply_text) @register(outgoing=True, pattern="^.fuk$") async def chutiya (fuks): """ String for fhu only -_-""" if not fuks.text[0].isalpha() and fuks.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(CHU_STRINGS) - 1) reply_text = FUK_STRINGS[index] await fuks.edit(reply_text) @register(outgoing=True, pattern="^.chu$") async def chutiya (chus): """ String for Chu only -_-""" if not chus.text[0].isalpha() and chus.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(CHU_STRINGS) - 1) reply_text = CHU_STRINGS[index] await chus.edit(reply_text) @register(outgoing=True, pattern="^.thanos$") async def thanos (thanos): """ String for thanos only -_-""" if not thanos.text[0].isalpha() and thanos.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(THANOS_STRINGS) - 1) reply_text = THANOS_STRINGS[index] await thanos.edit(reply_text) @register(outgoing=True, pattern="^.hardabuse$") async def fuckedd (abusehard): """ Dont Use this Too much bsdk -_-""" if not abusehard.text[0].isalpha() and abusehard.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(ABUSEHARD_STRING) - 1) reply_text = ABUSEHARD_STRING[index] await abusehard.edit(reply_text) @register(outgoing=True, pattern="^.gey$") async def geys (geyed): """ Use only for gey ppl -_-""" if not geyed.text[0].isalpha() and geyed.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(GEY_STRINGS) - 1) reply_text = GEY_STRINGS[index] await geyed.edit(reply_text) @register(outgoing=True, pattern="^.abuse$") async def abusing (abused): """ Dont Abuse Too much bsdk -_-""" if not abused.text[0].isalpha() and abused.text[0] not in ("/", "#", "@", "!"): index = random.randint(0, len(ABUSE_STRINGS) - 1) reply_text = ABUSE_STRINGS[index] await abused.edit(reply_text) @register(outgoing=True, pattern="^.owo(?: |$)(.*)") async def faces(owo): """ UwU """ if not owo.text[0].isalpha() and owo.text[0] not in ("/", "#", "@", "!"): textx = await owo.get_reply_message() message = owo.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await owo.edit("` UwU no text given! `") return reply_text = re.sub(r"(r|l)", "w", message) reply_text = re.sub(r"(R|L)", "W", reply_text) reply_text = re.sub(r"n([aeiou])", r"ny\1", reply_text) reply_text = re.sub(r"N([aeiouAEIOU])", r"Ny\1", reply_text) reply_text = re.sub(r"\!+", " " + random.choice(UWUS), reply_text) reply_text = reply_text.replace("ove", "uv") reply_text += " " + random.choice(UWUS) await owo.edit(reply_text) @register(outgoing=True, pattern="^.react$") async def react_meme(react): """ Make your userbot react to everything. """ if not react.text[0].isalpha() and react.text[0] not in ("/", "#", "@", "!"): await react.edit(random.choice(FACEREACTS)) @register(outgoing=True, pattern="^.shg$") async def shrugger(shg): r""" ¯\_(ツ)_/¯ """ if not shg.text[0].isalpha() and shg.text[0] not in ("/", "#", "@", "!"): await shg.edit(random.choice(SHGS)) @register(outgoing=True, pattern="^.noob$") async def metoo(hahayes): """ Haha yes """ if not hahayes.text[0].isalpha() and hahayes.text[0] not in ("/", "#", "@", "!"): await hahayes.edit(random.choice(NOOBSTR)) @register(outgoing=True, pattern="^.rendi$") async def metoo(hahayes): """ Haha yes """ if not hahayes.text[0].isalpha() and hahayes.text[0] not in ("/", "#", "@", "!"): await hahayes.edit(random.choice(RENDISTR)) @register(outgoing=True, pattern="^.oof$") async def Oof(e): if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): t = "Oof" for j in range(15): t = t[:-1] + "of" await e.edit(t) @register(outgoing=True, pattern="^.10iq$") async def iqless(e): if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): await e.edit("♿") @register(outgoing=True, pattern="^.mock(?: |$)(.*)") async def spongemocktext(mock): """ Do it and find the real fun. """ if not mock.text[0].isalpha() and mock.text[0] not in ("/", "#", "@", "!"): reply_text = list() textx = await mock.get_reply_message() message = mock.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await mock.edit("`gIvE sOMEtHInG tO MoCk!`") return for charac in message: if charac.isalpha() and random.randint(0, 1): to_app = charac.upper() if charac.islower() else charac.lower() reply_text.append(to_app) else: reply_text.append(charac) await mock.edit("".join(reply_text)) @register(outgoing=True, pattern="^.clap(?: |$)(.*)") async def claptext(memereview): """ Praise people! """ if not memereview.text[0].isalpha() and memereview.text[0] not in ("/", "#", "@", "!"): textx = await memereview.get_reply_message() message = memereview.pattern_match.group(1) if message: pass elif textx: message = textx.text else: await memereview.edit("`Hah, I don't clap pointlessly!`") return reply_text = "👏 " reply_text += message.replace(" ", " 👏 ") reply_text += " 👏" await memereview.edit(reply_text) @register(outgoing=True, pattern="^.smk (.*)") async def smrk(smk): if not smk.text[0].isalpha() and smk.text[0] not in ("/", "#", "@", "!"): textx = await smk.get_reply_message() message = smk.text if message[5:]: message = str(message[5:]) elif textx: message = textx message = str(message.message) if message == 'dele': await smk.edit( message +'te the hell' + "ツ" ) await smk.edit("ツ") else: smirk = " ツ" reply_text = message + smirk await smk.edit(reply_text) @register(outgoing=True, pattern="^.lfy (.*)",) async def let_me_google_that_for_you(lmgtfy_q): if not lmgtfy_q.text[0].isalpha() and lmgtfy_q.text[0] not in ("/", "#", "@", "!"): textx = await lmgtfy_q.get_reply_message() query = lmgtfy_q.text if query[5:]: query = str(query[5:]) elif textx: query = textx query = query.message query_encoded = query.replace(" ", "+") lfy_url = f"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}" payload = {'format': 'json', 'url': lfy_url} r = requests.get('http://is.gd/create.php', params=payload) await lmgtfy_q.edit(f"[{query}]({r.json()['shorturl']})") if BOTLOG: await bot.send_message( BOTLOG_CHATID, "LMGTFY query `" + query + "` was executed successfully", ) CMD_HELP.update({ "memes": ".cowsay\ \nUsage: cow which says things.\ \n\n.milksay\ \nUsage: Weird Milk that can speak\ \n\n:/\ \nUsage: Check yourself ;)\ \n\n-_-\ \nUsage: Ok...\ \n\n;_;\ \nUsage: Like `-_-` but crying.\ \n\n.cp\ \nUsage: Copypasta the famous meme\ \n\n.vapor\ \nUsage: Vaporize everything!\ \n\n.str\ \nUsage: Stretch it.\ \n\n.10iq\ \nUsage: You retard !!\ \n\n.zal\ \nUsage: Invoke the feeling of chaos.\ \n\n.oof\ \nUsage: Ooooof\ \n\n.moon\ \nUsage: kensar moon animation.\ \n\n.clock\ \nUsage: kensar clock animation.\ \n\n.earth\ \nUsage: kensar earth animation.\ \n\n.hi\ \nUsage: Greet everyone!\ \n\n.coinflip <heads/tails>\ \nUsage: Flip a coin !!\ \n\n.owo\ \nUsage: UwU\ \n\n.react\ \nUsage: Make your userbot react to everything.\ \n\n.slap\ \nUsage: reply to slap them with random objects !!\ \n\n.cry\ \nUsage: y u du dis, i cri.\ \n\n.shg\ \nUsage: Shrug at it !!\ \n\n.runs\ \nUsage: Run, run, RUNNN! [`.disable runs`: disable | `.enable runs`: enable]\ \n\n.metoo\ \nUsage: Haha yes\ \n\n.mock\ \nUsage: Do it and find the real fun.\ \n\n.clap\ \nUsage: Praise people!\ \n\n.f <emoji/character>\ \nUsage: Pay Respects.\ \n\n.bt\ \nUsage: Believe me, you will find this useful.\ \n\n.smk <text/reply>\ \nUsage: A shit module for ツ , who cares.\ \n\n.type\ \nUsage: Just a small command to make your keyboard become a typewriter!\ \n\n.lfy <query>\ \nUsage: Let me Google that for you real quick !!\ \n\n.decide\ \nUsage: Make a quick decision.\ \n\n.abusehard\ \nUsage: You already got that! Ain't?.\ \n\n.chu\ \nUsage: Incase, the person infront of you is....\ \n\n.fuk\ \nUsage: The onlu word that can be used fucking everywhere.\ \n\n.thanos\ \nUsage: Try and then Snap.\ \n\n.noob\ \nUsage: Whadya want to know? Are you a NOOB?\ \n\n.pro\ \nUsage: If you think you're pro, try this.\ \n\n.abuse\ \nUsage: Protects you from unwanted peeps.\ \n\n\nThanks to 🅱️ottom🅱️ext🅱️ot (@NotAMemeBot) for some of these." })
""" BaseFilter classes and exception handling """ import copy import logging from typing import List, Mapping, Any from abc import ABC, abstractmethod from asldro.utils.general import map_dict logger = logging.getLogger(__name__) class BaseFilterException(Exception): """ Exceptions for this modules """ def __init__(self, msg: str): """ :param msg: The message to display """ super().__init__() self.msg = msg def __str__(self): return self.msg class FilterInputKeyError(Exception): """ Used to show an error with a filter's input keys e.g. multiple values have been assigned to the same input """ class FilterInputValidationError(Exception): """ Used to show an error when running the validation on the filter's inputs i.e. when running _validate_inputs() """ class FilterLoopError(Exception): """ Used when a loop is detected in the filter chain """ def __init__(self): super().__init__("A loop has been detected in the filters") FILTER = "filter" IO_MAP = "io_map" class BaseFilter(ABC): """ An abstract base class for filters. All filters should inherit this class """ def __init__(self, name: str = "Unknown"): self.name = name self.inputs = {} self.outputs = {} # A placeholder for inputs before they are bound self._i = {} # Parent filters (a list of dict {FILTER: Filter, IO_MAP: dict/None}) self.parent_dict_list = [] # type: List[Dict] # Needs to be run (inputs have changed) self.needs_run = False def __str__(self): return self.name def add_input(self, key: str, value): """ Adds an input with a given key and value. If the key is already in the inputs, an RuntimeError is raised """ # Mark this filter as needing to be run self.needs_run = True if key in self._i: raise FilterInputKeyError( f"Key: {key} already existing in inputs for {self.name} filter." ) self._i[key] = value def add_inputs( self, input_dict: Mapping[str, Any], io_map: Mapping[str, str] = None, io_map_optional: bool = False, ): """ Adds multiple inputs via a dictionary. Optionally, maps the dictionary onto different input keys using an io_map. :param input_dict: The input dictionary :param io_map: The dictionary used to perform the mapping. All keys and values must be strings. For example: As an example: { "one": "two", "three": "four" } will map inputs keys of "one" to "two" AND "three" to "four". If io_map is None, no mapping with be performed. :param io_map_optional: If this is False, a KeyError will be raised if the keys in the io_map are not found in the input_dict. :raises KeyError: if keys required in the mapping are not found in the input_dict """ mapped_inputs = {} if io_map is None: mapped_inputs = {**input_dict} else: mapped_inputs = map_dict( input_dict=input_dict, io_map=io_map, io_map_optional=io_map_optional ) for key, value in mapped_inputs.items(): self.add_input(key, value) def add_child_filter(self, child: "BaseFilter", io_map: Mapping[str, str] = None): """ See documentation for `add_parent_filter` """ child.add_parent_filter(parent=self, io_map=io_map) def add_parent_filter(self, parent: "BaseFilter", io_map: Mapping[str, str] = None): """ Add a parent filter (the inputs of this filter will be connected to the output of the parents). By default, the ALL outputs of the parent will be directly mapped to the inputs of this filter using the same KEY. This can be overridden by supplying io_map. e.g. io_map = { "output_key1":"input_key1", "output_key2":"input_key2", ... } will map the output of the parent filter with a key of "output_key1" to the input of this filter with a key of "input_key1" etc. If `io_map` is defined ONLY those keys which are explicitly listed are mapped (the others are ignored) """ # Mark this filter as needing to be run self.needs_run = True # Search the parents to see if this parent already exists. If so, update it. new_parent_dict = {FILTER: parent, IO_MAP: io_map} for i, old_parent_dict in enumerate(self.parent_dict_list): if old_parent_dict[FILTER] == parent: self.parent_dict_list[i] = new_parent_dict return # Otherwise, add parent as a new parent self.parent_dict_list.append(new_parent_dict) def run(self, history=None): """ Calls the _run class on all parents (recursively) to make sure they are up-to-date. Then maps the parents' outputs to inputs for this filter. Then calls the _run method on this filter. """ # Don't run anything if the inputs haven't changed if not self.needs_run: return if history is None: history = [] # Check we haven't already been to this filter (recursion == bad) if self in history: raise FilterLoopError() history.append(self) # Run all of the parent filters for parent_dict in self.parent_dict_list: parent_dict[FILTER].run(history=history) logger.info("Running %s", self) # Populate all of the inputs to this filter self.inputs = {} # Shallow copy the inputs added with `add_input` self.inputs = copy.copy(self._i) # Map all inputs from parent filters for parent_dict in self.parent_dict_list: for output_key in parent_dict[FILTER].outputs: if parent_dict[IO_MAP] is None: # Directly map the parent outputs to inputs input_key = output_key else: # Use the io_map lookup to check the output_key exists if output_key not in parent_dict[IO_MAP]: input_key = None else: input_key = parent_dict[IO_MAP][output_key] if input_key is not None: # We have a mapping from output to input # Check the input_key does not already exist if input_key in self.inputs: raise FilterInputKeyError( f"A mapping is defined: " f"from filter \"{parent_dict["filter"]}\" " f'with output key "{output_key}" ' f'to filter "{self}" ' f'with input key "{input_key}". ' "However, this input has already been defined." ) self.inputs[input_key] = parent_dict[FILTER].outputs[output_key] # Validate the inputs to this filter self._validate_inputs() # Run this filter self._run() # Mark this filter as not needing to be run self.needs_run = False @abstractmethod def _run(self): """ Takes all of the inputs and creates the outputs. THIS SHOULD BE OVERWRITTEN IN THE SUBCLASS """ @abstractmethod def _validate_inputs(self): """ Validates all of the inputs. Should raise a FilterInputValidationError if the validation is not passed. THIS SHOULD BE OVERWRITTEN IN THE SUBCLASS """
""" BaseFilter classes and exception handling """ import copy import logging from typing import List, Mapping, Any from abc import ABC, abstractmethod from asldro.utils.general import map_dict logger = logging.getLogger(__name__) class BaseFilterException(Exception): """ Exceptions for this modules """ def __init__(self, msg: str): """ :param msg: The message to display """ super().__init__() self.msg = msg def __str__(self): return self.msg class FilterInputKeyError(Exception): """ Used to show an error with a filter's input keys e.g. multiple values have been assigned to the same input """ class FilterInputValidationError(Exception): """ Used to show an error when running the validation on the filter's inputs i.e. when running _validate_inputs() """ class FilterLoopError(Exception): """ Used when a loop is detected in the filter chain """ def __init__(self): super().__init__("A loop has been detected in the filters") FILTER = "filter" IO_MAP = "io_map" class BaseFilter(ABC): """ An abstract base class for filters. All filters should inherit this class """ def __init__(self, name: str = "Unknown"): self.name = name self.inputs = {} self.outputs = {} # A placeholder for inputs before they are bound self._i = {} # Parent filters (a list of dict {FILTER: Filter, IO_MAP: dict/None}) self.parent_dict_list = [] # type: List[Dict] # Needs to be run (inputs have changed) self.needs_run = False def __str__(self): return self.name def add_input(self, key: str, value): """ Adds an input with a given key and value. If the key is already in the inputs, an RuntimeError is raised """ # Mark this filter as needing to be run self.needs_run = True if key in self._i: raise FilterInputKeyError( f"Key: {key} already existing in inputs for {self.name} filter." ) self._i[key] = value def add_inputs( self, input_dict: Mapping[str, Any], io_map: Mapping[str, str] = None, io_map_optional: bool = False, ): """ Adds multiple inputs via a dictionary. Optionally, maps the dictionary onto different input keys using an io_map. :param input_dict: The input dictionary :param io_map: The dictionary used to perform the mapping. All keys and values must be strings. For example: As an example: { "one": "two", "three": "four" } will map inputs keys of "one" to "two" AND "three" to "four". If io_map is None, no mapping with be performed. :param io_map_optional: If this is False, a KeyError will be raised if the keys in the io_map are not found in the input_dict. :raises KeyError: if keys required in the mapping are not found in the input_dict """ mapped_inputs = {} if io_map is None: mapped_inputs = {**input_dict} else: mapped_inputs = map_dict( input_dict=input_dict, io_map=io_map, io_map_optional=io_map_optional ) for key, value in mapped_inputs.items(): self.add_input(key, value) def add_child_filter(self, child: "BaseFilter", io_map: Mapping[str, str] = None): """ See documentation for `add_parent_filter` """ child.add_parent_filter(parent=self, io_map=io_map) def add_parent_filter(self, parent: "BaseFilter", io_map: Mapping[str, str] = None): """ Add a parent filter (the inputs of this filter will be connected to the output of the parents). By default, the ALL outputs of the parent will be directly mapped to the inputs of this filter using the same KEY. This can be overridden by supplying io_map. e.g. io_map = { "output_key1":"input_key1", "output_key2":"input_key2", ... } will map the output of the parent filter with a key of "output_key1" to the input of this filter with a key of "input_key1" etc. If `io_map` is defined ONLY those keys which are explicitly listed are mapped (the others are ignored) """ # Mark this filter as needing to be run self.needs_run = True # Search the parents to see if this parent already exists. If so, update it. new_parent_dict = {FILTER: parent, IO_MAP: io_map} for i, old_parent_dict in enumerate(self.parent_dict_list): if old_parent_dict[FILTER] == parent: self.parent_dict_list[i] = new_parent_dict return # Otherwise, add parent as a new parent self.parent_dict_list.append(new_parent_dict) def run(self, history=None): """ Calls the _run class on all parents (recursively) to make sure they are up-to-date. Then maps the parents' outputs to inputs for this filter. Then calls the _run method on this filter. """ # Don't run anything if the inputs haven't changed if not self.needs_run: return if history is None: history = [] # Check we haven't already been to this filter (recursion == bad) if self in history: raise FilterLoopError() history.append(self) # Run all of the parent filters for parent_dict in self.parent_dict_list: parent_dict[FILTER].run(history=history) logger.info("Running %s", self) # Populate all of the inputs to this filter self.inputs = {} # Shallow copy the inputs added with `add_input` self.inputs = copy.copy(self._i) # Map all inputs from parent filters for parent_dict in self.parent_dict_list: for output_key in parent_dict[FILTER].outputs: if parent_dict[IO_MAP] is None: # Directly map the parent outputs to inputs input_key = output_key else: # Use the io_map lookup to check the output_key exists if output_key not in parent_dict[IO_MAP]: input_key = None else: input_key = parent_dict[IO_MAP][output_key] if input_key is not None: # We have a mapping from output to input # Check the input_key does not already exist if input_key in self.inputs: raise FilterInputKeyError( f"A mapping is defined: " f"from filter \"{parent_dict['filter']}\" " f'with output key "{output_key}" ' f'to filter "{self}" ' f'with input key "{input_key}". ' "However, this input has already been defined." ) self.inputs[input_key] = parent_dict[FILTER].outputs[output_key] # Validate the inputs to this filter self._validate_inputs() # Run this filter self._run() # Mark this filter as not needing to be run self.needs_run = False @abstractmethod def _run(self): """ Takes all of the inputs and creates the outputs. THIS SHOULD BE OVERWRITTEN IN THE SUBCLASS """ @abstractmethod def _validate_inputs(self): """ Validates all of the inputs. Should raise a FilterInputValidationError if the validation is not passed. THIS SHOULD BE OVERWRITTEN IN THE SUBCLASS """
import os import re WIKI_PATH = "../../../agk-steam-plugin.wiki/" error_count = 0 """ Page Tag Information: @page The rest of the line is the page name/Steam class for the file. Additional lines are for the page description. Method Tag Information @desc The method description. Can be multiple lines. @param First word is the parameter name. The rest is the description. One per parameter. @param-url The url for the parameter. (optional) @return The return description. @return-url The reference url for the return value. Only one allowed. (optional) @plugin-name The method name for Commands.txt. Used to create method overrides. (optional) @callback-type Optional, but must be: 'list' - A callback that returns a list. 'bool' - A callback that is a boolean. 'callresult' - A callresult function. @callback-getters Comma-delimited list of methods for getting callback information. Can be multiple lines. @callbacks Obsolete. @url Reference urls. Multiple tags allowed. """ _METHODS_WITH_NO_URL = [ "GetCommandLineArgsJSON", "GetSteamPath", "IsSteamEmulated", "SetFileAttributes", "DeleteCallResult", "IsSteamIDValid", "GetAccountID", "GetSteamID64", "GetHandleFromSteamID64", ] def report_error(text): global error_count error_count += 1 print("ERROR:\t" + text) class ExportedMethodLoader: CPP_TO_TIER1_VAR_TYPES = { 'void': None, 'int': 'integer', 'float': 'float', 'char*': 'string', 'constchar*': 'string', } def __init__(self, path: str): # self._filename = filename self.pages = [] self.method_count = 0 for (root, _, filenames) in os.walk(path): for filename in [f for f in filenames if f.endswith('.cpp') or f.endswith('.h')]: with open(os.path.join(root, filename), 'r') as f: # print(f"Loading {filename}...") lines = ''.join(f.readlines()) pages = self._get_page_tags(lines) methods = self._get_methods(lines) if len(methods): self.method_count += len(methods) self._validate_callback_tags(methods) self._assign_methods_to_pages(pages, methods) # check urls for method in methods: if method["name"] in _METHODS_WITH_NO_URL: if 'url' in method: report_error(f"{method["name"]} has an unexpected URL.") else: if 'url' not in method: report_error(f"{method["name"]} has no URL.") self._merge_page_list(pages) self.page_count = len(self.pages) # Alphabetize the pages self.pages.sort(key=lambda page: page['name']) def _merge_page_list(self, pages): for page in pages: page_index = next((i for i, p in enumerate(self.pages) if p["name"] == page['name']), None) if page_index is None: self.pages.append(page) else: self.pages[page_index]['methods'].extend(page['methods']) @classmethod def _get_tier_1_var_type(cls, cpp_var_type): return cls.CPP_TO_TIER1_VAR_TYPES[cpp_var_type.replace(' ', '')] @classmethod def _get_page_tags(cls, lines: str): pages = [] for match in re.finditer(r'(?:/\*+\s*@page\s+)(?P<tag_info>(?:(?!\*+/).)*)\*+/', lines, re.DOTALL | re.MULTILINE): name, sep, desc = match.group('tag_info').partition('\n') pages.append({ 'name': name.strip(), 'desc': desc.strip(), 'start': match.start(), 'filename': f"{name.strip().replace(" ", "-")}.md", }) return pages @classmethod def _get_methods(cls, lines: str): type_pattern = r'void\s+|int\s+|(?:const\s+)?char\s*\*\s*|float\s+' name_pattern = r'[A-Za-z0-9_]+' methods = [] last_method_name = None for match in re.finditer(r'(?:/\*+(?P<comment>(?:(?!\*+/).)*)\*+/\n)?' r'^extern "C" DLL_EXPORT ' # type name (params) r'(?P<type>{type_pattern})(?P<name>{id_name})\((?P<params>[^)]+?)?\)' r';?'.format(type_pattern=type_pattern, id_name=name_pattern), lines, re.DOTALL | re.MULTILINE): # Parse method name and return type. # method_declaration = declaration_pattern.match(match['method']) # Skip methods with multiple definitions due to #ifdefs. ie: To remove parameters for Linux, etc. if match['name'] == last_method_name: continue last_method_name = match['name'] method = { 'name': match['name'], 'start': match.start(), 'params': [], } return_type = cls._get_tier_1_var_type(match['type']) if return_type: method['return_type'] = return_type # Parse parameter names and types. if match['params']: for param in match['params'].split(','): param_match = re.match(r'(?P<type>{type_pattern})(?P<name>{name_pattern})'. format(type_pattern=type_pattern, name_pattern=name_pattern), param.strip()) method['params'].append({ 'name': param_match['name'], 'type': cls._get_tier_1_var_type(param_match['type']) }) # Parse comment tags if cls._load_method_tags(method, match['comment']): methods.append(method) return methods @classmethod def _load_method_tags(cls, method, comment): if not comment: report_error(f"{method["name"]} has no documentation.") return True if '@ignore' in comment: # ignored export. return False def get_param_index(params, name): return next((i for i, p in enumerate(params) if p["name"] == name), None) def process_desc_tag(tag_text): if 'desc' in method: report_error(f"{method["name"]} has multiple desc tags.") return method['desc'] = tag_text def process_return_tag(tag_text): if 'return_desc' in method: report_error(f"{method["name"]} has multiple return-desc tags.") return method['return_desc'] = tag_text def process_return_url_tag(tag_text): if 'return_url' in method: report_error(f"{method["name"]} has multiple return-url tags.") return method['return_url'] = tag_text def process_param_tag(tag_text): param_name, sep, tag_text = tag_text.partition(' ') if not tag_text: report_error(f"{method["name"]} has an empty description for parameter: {param_name}") return index = get_param_index(method['params'], param_name) if index is None: report_error(f"{method["name"]} has a description for an unknown parameter: {param_name}") return if 'desc' in method['params'][index]: report_error(f"{method["name"]} has multiple param tags for: {param_name}") return method['params'][index]['desc'] = tag_text def process_param_url_tag(tag_text): param_name, sep, tag_text = tag_text.partition(' ') if not tag_text: report_error(f"{method["name"]} has an empty URL for parameter: {param_name}") return index = get_param_index(method['params'], param_name) if index is None: report_error(f"{method["name"]} has a URL for an unknown parameter: {param_name}") return if 'url' in method['params'][index]: report_error(f"{method["name"]} has multiple param-url tags for: {param_name}") return method['params'][index]['url'] = tag_text # def process_api_tag(tag_text): # method['api'] = [api.strip() for api in tag_text.split(',')] def process_url_tag(tag_text): if ',' in tag_text or '\n' in tag_text: report_error(f"{method["name"]} had a url tag with multiple urls.") return if 'url' not in method: method['url'] = [] if tag_text in method['url']: report_error(f"{method["name"]} had a duplicate url entry: {tag_text}") return method['url'].append(tag_text) def process_plugin_name_tag(tag_text): if 'plugin_name' in method: report_error(f"{method["name"]} already had a plugin-name tag.") return method['plugin_name'] = tag_text def process_callback_type_tag(tag_text): if tag_text not in ['list', 'bool', 'callresult']: report_error("{} has an unknown callback type: {}".format(method['name'], tag_text)) return if 'callback-type' in method: report_error(f"{method["name"]} has multiple callback-type tags.") return method['callback-type'] = tag_text def process_callback_getters_tag(tag_text): if 'callback-getters' in method: report_error(f"{method["name"]} has multiple callback-getters tags.") return method['callback-getters'] = [name.strip() for name in tag_text.split(',')] def process_callbacks_tag(tag_text): if 'callbacks' in method: report_error(f"{method["name"]} has multiple callbacks tags.") return # callbacks that fire as a result of the current method. method['callbacks'] = [name.strip() for name in tag_text.split(',')] method_apis = [] for tag in re.finditer(r'@(?P<name>[-a-z_0-9]+)\s+(?P<text>(?:(?!@).)*)', comment, re.DOTALL | re.MULTILINE): tag_name = tag['name'] process_function = locals().get(f"process_{tag["name"].replace("-", "_")}_tag") if process_function: process_function(tag['text'].strip()) else: report_error(f"{method["name"]} has an unknown tag: {tag_name}") # Final validation checks if 'desc' not in method: report_error(f"{method["name"]} has no description.") if 'return_type' in method: if 'return_desc' not in method: report_error(f"{method["name"]} has a return type without a return description.") else: if 'return_desc' in method: report_error(f"{method["name"]} has a return description without a return type.") for param in method['params']: if 'desc' not in param: report_error(f"{method["name"]} has a parameter without a description: {param["name"]}") return True @classmethod def _validate_callback_tags(cls, methods): for method in methods: if 'callback-getters' in method: if 'callback-type' not in method: report_error(f"{method["name"]} does not have a callback type.") if method['callback-getters'] == '': report_error(f"{method["name"]} has an empty callback-getters tag.") return for getter in method['callback-getters']: method_index = next((i for i, m in enumerate(methods) if m["name"] == getter), None) if method_index is None: report_error(f"{method["name"]} has an unknown callback getter method: {getter}") else: if 'callback-parents' not in methods[method_index]: methods[method_index]['callback-parents'] = [] methods[method_index]['callback-parents'].append(method['name']) methods[method_index]['callback-parent-type'] = method['callback-type'] # callback parents attach their @url tags to each @callback-getters method. if 'url' in method: if 'url' not in methods[method_index]: methods[method_index]['url'] = [] for url in method['url']: if url not in methods[method_index]['url']: methods[method_index]['url'].append(url) @classmethod def _assign_methods_to_pages(cls, pages, methods): for page in pages: page['methods'] = [] for method in methods: try: page = [p for p in pages if p['start'] <= method['start']][-1] page['methods'].append(method) method['page'] = page except IndexError: report_error(f"Could not find page for method: {method["name"]}") def write_commands_txt(self, out_file: str): var_type_letter = { None: '0', '': '0', 'integer': 'I', 'float': 'F', 'string': 'S', } with open(out_file, 'w') as f: f.write("#CommandName,ReturnType,ParameterTypes,Windows,Linux,Mac,Android,iOS,Windows64\n") for page in [p for p in self.pages if p['methods']]: # print(f"Page {page["name"]} has {len(page["methods"])} methods") f.write("#\n") f.write(f"# {page["name"]}\n") f.write("#\n") for method in page['methods']: # type: dict param_types = ''.join([var_type_letter[v['type']] for v in method['params']]) \ if method['params'] else '0' f.write('{plugin_method_name},{return_type},{param_types},' '{windows},{linux},{mac},{android},{ios},{windows64}\n' .format(plugin_method_name=method.get('plugin_name', method['name']), return_type=var_type_letter[method.get('return_type')], param_types=param_types, windows=method['name'], linux=method['name'], mac='0', android='0', ios='0', windows64=method['name'], ) ) @classmethod def _get_url_title(cls, url: str): # TODO Handle this? # [Downloadable Content (DLC)](https://partner.steamgames.com/doc/store/application/dlc) # -> Downloadable Content (DLC) base, sep, title = url.rpartition('/') # base, sep, title = title.rpartition('#') return title @classmethod def _create_comma_list(cls, text_list): if len(text_list) == 0: return "" if len(text_list) == 1: return text_list[0] return ', '.join(text_list[0:-1]) + ' or ' + text_list[-1] def _get_method_link(self, method_name): for page in self.pages: if method_name in [m['name'] for m in page['methods']]: return '[{0}]({1}#{0})'.format(method_name, page['name'].replace(' ', '-')) report_error("Could not generate method link for {}".format(method_name)) return method_name def write_wiki_files(self, base_path: str): for page in [p for p in self.pages if p['methods']]: with open(base_path + page['filename'], 'w') as f: if 'desc' in page: f.write(page['desc']) f.write('\n\n') for method in page['methods']: # type: dict public_method_name = method.get("plugin_name", method["name"]) params_list = ', '.join( ('[{name} as {type}]({url})' if 'url' in param else '{name} as {type}').format( **param) for param in method['params']) syntax = f'{public_method_name}({params_list})' \ f'{' as ' + method['return_type'] if 'return_type' in method else ''}' # print(syntax) # Output the method wiki. wiki_entry = f'## {public_method_name}\n' wiki_entry += f'> {syntax}\n' if method['params']: wiki_entry += ''.join(f'> * _{p['name']}_ - {p['desc']} \n' for p in method['params']) wiki_entry += '\n' if 'desc' in method: wiki_entry += '{0}\n\n'.format(method['desc']) if 'callback-parents' in method: if method['callback-parent-type'] == 'callresult': parent_links = [self._get_method_link(name) for name in method['callback-parents']] comma_list = self._create_comma_list(parent_links) wiki_entry += '_This method should only be used when the call result returned by {} has ' \ 'reported a GetCallResultCode of 1._'.format(comma_list) else: # Only one callback parent for other types. wiki_entry += '_This method should only be used ' \ 'when {} has returned 1._'.format(method['callback-parents'][0]) wiki_entry += '\n\n' if 'return_desc' in method: if 'return_url' in method: wiki_entry += '**Returns:** [{return_desc}]({return_url})\n\n'.format(**method) else: wiki_entry += '**Returns:** {return_desc}\n\n'.format(**method) if 'callback-getters' in method: wiki_entry += '**Associated Methods:** \n' wiki_entry += ' \n'.join(self._get_method_link(name) for name in method['callback-getters']) wiki_entry += '\n\n' if 'url' in method: wiki_entry += '**Reference:** \n' wiki_entry += ' \n'.join(('[{0}]({1})'.format(self._get_url_title(url), url) for url in method['url'])) wiki_entry += '\n\n' f.write(wiki_entry) # print(wiki_entry) loader = ExportedMethodLoader('../../SteamPlugin/Common/') loader.write_commands_txt('../../AGKPlugin/SteamPlugin/Commands.txt') print("Error count: {}".format(error_count)) print("Page count: {}".format(len(loader.pages))) print("Method count: {}".format(loader.method_count)) loader.write_wiki_files(WIKI_PATH)
import os import re WIKI_PATH = "../../../agk-steam-plugin.wiki/" error_count = 0 """ Page Tag Information: @page The rest of the line is the page name/Steam class for the file. Additional lines are for the page description. Method Tag Information @desc The method description. Can be multiple lines. @param First word is the parameter name. The rest is the description. One per parameter. @param-url The url for the parameter. (optional) @return The return description. @return-url The reference url for the return value. Only one allowed. (optional) @plugin-name The method name for Commands.txt. Used to create method overrides. (optional) @callback-type Optional, but must be: 'list' - A callback that returns a list. 'bool' - A callback that is a boolean. 'callresult' - A callresult function. @callback-getters Comma-delimited list of methods for getting callback information. Can be multiple lines. @callbacks Obsolete. @url Reference urls. Multiple tags allowed. """ _METHODS_WITH_NO_URL = [ "GetCommandLineArgsJSON", "GetSteamPath", "IsSteamEmulated", "SetFileAttributes", "DeleteCallResult", "IsSteamIDValid", "GetAccountID", "GetSteamID64", "GetHandleFromSteamID64", ] def report_error(text): global error_count error_count += 1 print("ERROR:\t" + text) class ExportedMethodLoader: CPP_TO_TIER1_VAR_TYPES = { 'void': None, 'int': 'integer', 'float': 'float', 'char*': 'string', 'constchar*': 'string', } def __init__(self, path: str): # self._filename = filename self.pages = [] self.method_count = 0 for (root, _, filenames) in os.walk(path): for filename in [f for f in filenames if f.endswith('.cpp') or f.endswith('.h')]: with open(os.path.join(root, filename), 'r') as f: # print(f"Loading {filename}...") lines = ''.join(f.readlines()) pages = self._get_page_tags(lines) methods = self._get_methods(lines) if len(methods): self.method_count += len(methods) self._validate_callback_tags(methods) self._assign_methods_to_pages(pages, methods) # check urls for method in methods: if method["name"] in _METHODS_WITH_NO_URL: if 'url' in method: report_error(f"{method['name']} has an unexpected URL.") else: if 'url' not in method: report_error(f"{method['name']} has no URL.") self._merge_page_list(pages) self.page_count = len(self.pages) # Alphabetize the pages self.pages.sort(key=lambda page: page['name']) def _merge_page_list(self, pages): for page in pages: page_index = next((i for i, p in enumerate(self.pages) if p["name"] == page['name']), None) if page_index is None: self.pages.append(page) else: self.pages[page_index]['methods'].extend(page['methods']) @classmethod def _get_tier_1_var_type(cls, cpp_var_type): return cls.CPP_TO_TIER1_VAR_TYPES[cpp_var_type.replace(' ', '')] @classmethod def _get_page_tags(cls, lines: str): pages = [] for match in re.finditer(r'(?:/\*+\s*@page\s+)(?P<tag_info>(?:(?!\*+/).)*)\*+/', lines, re.DOTALL | re.MULTILINE): name, sep, desc = match.group('tag_info').partition('\n') pages.append({ 'name': name.strip(), 'desc': desc.strip(), 'start': match.start(), 'filename': f"{name.strip().replace(' ', '-')}.md", }) return pages @classmethod def _get_methods(cls, lines: str): type_pattern = r'void\s+|int\s+|(?:const\s+)?char\s*\*\s*|float\s+' name_pattern = r'[A-Za-z0-9_]+' methods = [] last_method_name = None for match in re.finditer(r'(?:/\*+(?P<comment>(?:(?!\*+/).)*)\*+/\n)?' r'^extern "C" DLL_EXPORT ' # type name (params) r'(?P<type>{type_pattern})(?P<name>{id_name})\((?P<params>[^)]+?)?\)' r';?'.format(type_pattern=type_pattern, id_name=name_pattern), lines, re.DOTALL | re.MULTILINE): # Parse method name and return type. # method_declaration = declaration_pattern.match(match['method']) # Skip methods with multiple definitions due to #ifdefs. ie: To remove parameters for Linux, etc. if match['name'] == last_method_name: continue last_method_name = match['name'] method = { 'name': match['name'], 'start': match.start(), 'params': [], } return_type = cls._get_tier_1_var_type(match['type']) if return_type: method['return_type'] = return_type # Parse parameter names and types. if match['params']: for param in match['params'].split(','): param_match = re.match(r'(?P<type>{type_pattern})(?P<name>{name_pattern})'. format(type_pattern=type_pattern, name_pattern=name_pattern), param.strip()) method['params'].append({ 'name': param_match['name'], 'type': cls._get_tier_1_var_type(param_match['type']) }) # Parse comment tags if cls._load_method_tags(method, match['comment']): methods.append(method) return methods @classmethod def _load_method_tags(cls, method, comment): if not comment: report_error(f"{method['name']} has no documentation.") return True if '@ignore' in comment: # ignored export. return False def get_param_index(params, name): return next((i for i, p in enumerate(params) if p["name"] == name), None) def process_desc_tag(tag_text): if 'desc' in method: report_error(f"{method['name']} has multiple desc tags.") return method['desc'] = tag_text def process_return_tag(tag_text): if 'return_desc' in method: report_error(f"{method['name']} has multiple return-desc tags.") return method['return_desc'] = tag_text def process_return_url_tag(tag_text): if 'return_url' in method: report_error(f"{method['name']} has multiple return-url tags.") return method['return_url'] = tag_text def process_param_tag(tag_text): param_name, sep, tag_text = tag_text.partition(' ') if not tag_text: report_error(f"{method['name']} has an empty description for parameter: {param_name}") return index = get_param_index(method['params'], param_name) if index is None: report_error(f"{method['name']} has a description for an unknown parameter: {param_name}") return if 'desc' in method['params'][index]: report_error(f"{method['name']} has multiple param tags for: {param_name}") return method['params'][index]['desc'] = tag_text def process_param_url_tag(tag_text): param_name, sep, tag_text = tag_text.partition(' ') if not tag_text: report_error(f"{method['name']} has an empty URL for parameter: {param_name}") return index = get_param_index(method['params'], param_name) if index is None: report_error(f"{method['name']} has a URL for an unknown parameter: {param_name}") return if 'url' in method['params'][index]: report_error(f"{method['name']} has multiple param-url tags for: {param_name}") return method['params'][index]['url'] = tag_text # def process_api_tag(tag_text): # method['api'] = [api.strip() for api in tag_text.split(',')] def process_url_tag(tag_text): if ',' in tag_text or '\n' in tag_text: report_error(f"{method['name']} had a url tag with multiple urls.") return if 'url' not in method: method['url'] = [] if tag_text in method['url']: report_error(f"{method['name']} had a duplicate url entry: {tag_text}") return method['url'].append(tag_text) def process_plugin_name_tag(tag_text): if 'plugin_name' in method: report_error(f"{method['name']} already had a plugin-name tag.") return method['plugin_name'] = tag_text def process_callback_type_tag(tag_text): if tag_text not in ['list', 'bool', 'callresult']: report_error("{} has an unknown callback type: {}".format(method['name'], tag_text)) return if 'callback-type' in method: report_error(f"{method['name']} has multiple callback-type tags.") return method['callback-type'] = tag_text def process_callback_getters_tag(tag_text): if 'callback-getters' in method: report_error(f"{method['name']} has multiple callback-getters tags.") return method['callback-getters'] = [name.strip() for name in tag_text.split(',')] def process_callbacks_tag(tag_text): if 'callbacks' in method: report_error(f"{method['name']} has multiple callbacks tags.") return # callbacks that fire as a result of the current method. method['callbacks'] = [name.strip() for name in tag_text.split(',')] method_apis = [] for tag in re.finditer(r'@(?P<name>[-a-z_0-9]+)\s+(?P<text>(?:(?!@).)*)', comment, re.DOTALL | re.MULTILINE): tag_name = tag['name'] process_function = locals().get(f"process_{tag['name'].replace('-', '_')}_tag") if process_function: process_function(tag['text'].strip()) else: report_error(f"{method['name']} has an unknown tag: {tag_name}") # Final validation checks if 'desc' not in method: report_error(f"{method['name']} has no description.") if 'return_type' in method: if 'return_desc' not in method: report_error(f"{method['name']} has a return type without a return description.") else: if 'return_desc' in method: report_error(f"{method['name']} has a return description without a return type.") for param in method['params']: if 'desc' not in param: report_error(f"{method['name']} has a parameter without a description: {param['name']}") return True @classmethod def _validate_callback_tags(cls, methods): for method in methods: if 'callback-getters' in method: if 'callback-type' not in method: report_error(f"{method['name']} does not have a callback type.") if method['callback-getters'] == '': report_error(f"{method['name']} has an empty callback-getters tag.") return for getter in method['callback-getters']: method_index = next((i for i, m in enumerate(methods) if m["name"] == getter), None) if method_index is None: report_error(f"{method['name']} has an unknown callback getter method: {getter}") else: if 'callback-parents' not in methods[method_index]: methods[method_index]['callback-parents'] = [] methods[method_index]['callback-parents'].append(method['name']) methods[method_index]['callback-parent-type'] = method['callback-type'] # callback parents attach their @url tags to each @callback-getters method. if 'url' in method: if 'url' not in methods[method_index]: methods[method_index]['url'] = [] for url in method['url']: if url not in methods[method_index]['url']: methods[method_index]['url'].append(url) @classmethod def _assign_methods_to_pages(cls, pages, methods): for page in pages: page['methods'] = [] for method in methods: try: page = [p for p in pages if p['start'] <= method['start']][-1] page['methods'].append(method) method['page'] = page except IndexError: report_error(f"Could not find page for method: {method['name']}") def write_commands_txt(self, out_file: str): var_type_letter = { None: '0', '': '0', 'integer': 'I', 'float': 'F', 'string': 'S', } with open(out_file, 'w') as f: f.write("#CommandName,ReturnType,ParameterTypes,Windows,Linux,Mac,Android,iOS,Windows64\n") for page in [p for p in self.pages if p['methods']]: # print(f"Page {page['name']} has {len(page['methods'])} methods") f.write("#\n") f.write(f"# {page['name']}\n") f.write("#\n") for method in page['methods']: # type: dict param_types = ''.join([var_type_letter[v['type']] for v in method['params']]) \ if method['params'] else '0' f.write('{plugin_method_name},{return_type},{param_types},' '{windows},{linux},{mac},{android},{ios},{windows64}\n' .format(plugin_method_name=method.get('plugin_name', method['name']), return_type=var_type_letter[method.get('return_type')], param_types=param_types, windows=method['name'], linux=method['name'], mac='0', android='0', ios='0', windows64=method['name'], ) ) @classmethod def _get_url_title(cls, url: str): # TODO Handle this? # [Downloadable Content (DLC)](https://partner.steamgames.com/doc/store/application/dlc) # -> Downloadable Content (DLC) base, sep, title = url.rpartition('/') # base, sep, title = title.rpartition('#') return title @classmethod def _create_comma_list(cls, text_list): if len(text_list) == 0: return "" if len(text_list) == 1: return text_list[0] return ', '.join(text_list[0:-1]) + ' or ' + text_list[-1] def _get_method_link(self, method_name): for page in self.pages: if method_name in [m['name'] for m in page['methods']]: return '[{0}]({1}#{0})'.format(method_name, page['name'].replace(' ', '-')) report_error("Could not generate method link for {}".format(method_name)) return method_name def write_wiki_files(self, base_path: str): for page in [p for p in self.pages if p['methods']]: with open(base_path + page['filename'], 'w') as f: if 'desc' in page: f.write(page['desc']) f.write('\n\n') for method in page['methods']: # type: dict public_method_name = method.get("plugin_name", method["name"]) params_list = ', '.join( ('[{name} as {type}]({url})' if 'url' in param else '{name} as {type}').format( **param) for param in method['params']) syntax = f'{public_method_name}({params_list})' \ f'{" as " + method["return_type"] if "return_type" in method else ""}' # print(syntax) # Output the method wiki. wiki_entry = f'## {public_method_name}\n' wiki_entry += f'> {syntax}\n' if method['params']: wiki_entry += ''.join(f'> * _{p["name"]}_ - {p["desc"]} \n' for p in method['params']) wiki_entry += '\n' if 'desc' in method: wiki_entry += '{0}\n\n'.format(method['desc']) if 'callback-parents' in method: if method['callback-parent-type'] == 'callresult': parent_links = [self._get_method_link(name) for name in method['callback-parents']] comma_list = self._create_comma_list(parent_links) wiki_entry += '_This method should only be used when the call result returned by {} has ' \ 'reported a GetCallResultCode of 1._'.format(comma_list) else: # Only one callback parent for other types. wiki_entry += '_This method should only be used ' \ 'when {} has returned 1._'.format(method['callback-parents'][0]) wiki_entry += '\n\n' if 'return_desc' in method: if 'return_url' in method: wiki_entry += '**Returns:** [{return_desc}]({return_url})\n\n'.format(**method) else: wiki_entry += '**Returns:** {return_desc}\n\n'.format(**method) if 'callback-getters' in method: wiki_entry += '**Associated Methods:** \n' wiki_entry += ' \n'.join(self._get_method_link(name) for name in method['callback-getters']) wiki_entry += '\n\n' if 'url' in method: wiki_entry += '**Reference:** \n' wiki_entry += ' \n'.join(('[{0}]({1})'.format(self._get_url_title(url), url) for url in method['url'])) wiki_entry += '\n\n' f.write(wiki_entry) # print(wiki_entry) loader = ExportedMethodLoader('../../SteamPlugin/Common/') loader.write_commands_txt('../../AGKPlugin/SteamPlugin/Commands.txt') print("Error count: {}".format(error_count)) print("Page count: {}".format(len(loader.pages))) print("Method count: {}".format(loader.method_count)) loader.write_wiki_files(WIKI_PATH)
# Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 """ This provides a small set of effect handlers in NumPyro that are modeled after Pyro's `poutine <http://docs.pyro.ai/en/stable/poutine.html>`_ module. For a tutorial on effect handlers more generally, readers are encouraged to read `Poutine: A Guide to Programming with Effect Handlers in Pyro <http://pyro.ai/examples/effect_handlers.html>`_. These simple effect handlers can be composed together or new ones added to enable implementation of custom inference utilities and algorithms. **Example** As an example, we are using :class:`~numpyro.handlers.seed`, :class:`~numpyro.handlers.trace` and :class:`~numpyro.handlers.substitute` handlers to define the `log_likelihood` function below. We first create a logistic regression model and sample from the posterior distribution over the regression parameters using :func:`~numpyro.infer.MCMC`. The `log_likelihood` function uses effect handlers to run the model by substituting sample sites with values from the posterior distribution and computes the log density for a single data point. The `log_predictive_density` function computes the log likelihood for each draw from the joint posterior and aggregates the results for all the data points, but does so by using JAX's auto-vectorize transform called `vmap` so that we do not need to loop over all the data points. .. doctest:: >>> import jax.numpy as jnp >>> from jax import random, vmap >>> from jax.jscipy.special import logsumexp >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro import handlers >>> from numpyro.infer import MCMC, NUTS >>> N, D = 3000, 3 >>> def logistic_regression(data, labels): ... coefs = numpyro.sample('coefs', dist.Normal(jnp.zeros(D), jnp.ones(D))) ... intercept = numpyro.sample('intercept', dist.Normal(0., 10.)) ... logits = jnp.sum(coefs * data + intercept, axis=-1) ... return numpyro.sample('obs', dist.Bernoulli(logits=logits), obs=labels) >>> data = random.normal(random.PRNGKey(0), (N, D)) >>> true_coefs = jnp.arange(1., D + 1.) >>> logits = jnp.sum(true_coefs * data, axis=-1) >>> labels = dist.Bernoulli(logits=logits).sample(random.PRNGKey(1)) >>> num_warmup, num_samples = 1000, 1000 >>> mcmc = MCMC(NUTS(model=logistic_regression), num_warmup, num_samples) >>> mcmc.run(random.PRNGKey(2), data, labels) # doctest: +SKIP sample: 100%|██████████| 1000/1000 [00:00<00:00, 1252.39it/s, 1 steps of size 5.83e-01. acc. prob=0.85] >>> mcmc.print_summary() # doctest: +SKIP mean sd 5.5% 94.5% n_eff Rhat coefs[0] 0.96 0.07 0.85 1.07 455.35 1.01 coefs[1] 2.05 0.09 1.91 2.20 332.00 1.01 coefs[2] 3.18 0.13 2.96 3.37 320.27 1.00 intercept -0.03 0.02 -0.06 0.00 402.53 1.00 >>> def log_likelihood(rng_key, params, model, *args, **kwargs): ... model = handlers.substitute(handlers.seed(model, rng_key), params) ... model_trace = handlers.trace(model).get_trace(*args, **kwargs) ... obs_node = model_trace['obs'] ... return obs_node['fn'].log_prob(obs_node['value']) >>> def log_predictive_density(rng_key, params, model, *args, **kwargs): ... n = list(params.values())[0].shape[0] ... log_lk_fn = vmap(lambda rng_key, params: log_likelihood(rng_key, params, model, *args, **kwargs)) ... log_lk_vals = log_lk_fn(random.split(rng_key, n), params) ... return jnp.sum(logsumexp(log_lk_vals, 0) - jnp.log(n)) >>> print(log_predictive_density(random.PRNGKey(2), mcmc.get_samples(), ... logistic_regression, data, labels)) # doctest: +SKIP -874.89813 """ from collections import OrderedDict import warnings from jax import lax, random import jax.numpy as jnp import numpyro from numpyro.primitives import Messenger, apply_stack from numpyro.util import not_jax_tracer __all__ = [ 'block', 'condition', 'lift', 'mask', 'reparam', 'replay', 'scale', 'scope', 'seed', 'substitute', 'trace', 'do' ] class trace(Messenger): """ Returns a handler that records the inputs and outputs at primitive calls inside `fn`. **Example** .. doctest:: >>> from jax import random >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro.handlers import seed, trace >>> import pprint as pp >>> def model(): ... numpyro.sample('a', dist.Normal(0., 1.)) >>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace() >>> pp.pprint(exec_trace) # doctest: +SKIP OrderedDict([('a', {'args': (), 'fn': <numpyro.distributions.continuous.Normal object at 0x7f9e689b1eb8>, 'is_observed': False, 'kwargs': {'rng_key': DeviceArray([0, 0], dtype=uint32)}, 'name': 'a', 'type': 'sample', 'value': DeviceArray(-0.20584235, dtype=float32)})]) """ def __enter__(self): super(trace, self).__enter__() self.trace = OrderedDict() return self.trace def postprocess_message(self, msg): if 'name' not in msg: # skip recording helper messages e.g. `control_flow`, `to_data`, `to_funsor` # which has no name return assert not(msg['type'] == 'sample' and msg['name'] in self.trace), \ 'all sites must have unique names but got `{}` duplicated'.format(msg['name']) self.trace[msg['name']] = msg.copy() def get_trace(self, *args, **kwargs): """ Run the wrapped callable and return the recorded trace. :param `*args`: arguments to the callable. :param `**kwargs`: keyword arguments to the callable. :return: `OrderedDict` containing the execution trace. """ self(*args, **kwargs) return self.trace class replay(Messenger): """ Given a callable `fn` and an execution trace `guide_trace`, return a callable which substitutes `sample` calls in `fn` with values from the corresponding site names in `guide_trace`. :param fn: Python callable with NumPyro primitives. :param guide_trace: an OrderedDict containing execution metadata. **Example** .. doctest:: >>> from jax import random >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro.handlers import replay, seed, trace >>> def model(): ... numpyro.sample('a', dist.Normal(0., 1.)) >>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace() >>> print(exec_trace['a']['value']) # doctest: +SKIP -0.20584235 >>> replayed_trace = trace(replay(model, exec_trace)).get_trace() >>> print(exec_trace['a']['value']) # doctest: +SKIP -0.20584235 >>> assert replayed_trace['a']['value'] == exec_trace['a']['value'] """ def __init__(self, fn=None, guide_trace=None): assert guide_trace is not None self.guide_trace = guide_trace super(replay, self).__init__(fn) def process_message(self, msg): if msg['type'] in ('sample', 'plate') and msg['name'] in self.guide_trace: msg['value'] = self.guide_trace[msg['name']]['value'] class block(Messenger): """ Given a callable `fn`, return another callable that selectively hides primitive sites where `hide_fn` returns True from other effect handlers on the stack. :param fn: Python callable with NumPyro primitives. :param hide_fn: function which when given a dictionary containing site-level metadata returns whether it should be blocked. **Example:** .. doctest:: >>> from jax import random >>> import numpyro >>> from numpyro.handlers import block, seed, trace >>> import numpyro.distributions as dist >>> def model(): ... a = numpyro.sample('a', dist.Normal(0., 1.)) ... return numpyro.sample('b', dist.Normal(a, 1.)) >>> model = seed(model, random.PRNGKey(0)) >>> block_all = block(model) >>> block_a = block(model, lambda site: site['name'] == 'a') >>> trace_block_all = trace(block_all).get_trace() >>> assert not {'a', 'b'}.intersection(trace_block_all.keys()) >>> trace_block_a = trace(block_a).get_trace() >>> assert 'a' not in trace_block_a >>> assert 'b' in trace_block_a """ def __init__(self, fn=None, hide_fn=None, hide=None): if hide_fn is not None: self.hide_fn = hide_fn elif hide is not None: self.hide_fn = lambda msg: msg.get('name') in hide else: self.hide_fn = lambda msg: True super(block, self).__init__(fn) def process_message(self, msg): if self.hide_fn(msg): msg['stop'] = True class condition(Messenger): """ Conditions unobserved sample sites to values from `data` or `condition_fn`. Similar to :class:`~numpyro.handlers.substitute` except that it only affects `sample` sites and changes the `is_observed` property to `True`. :param fn: Python callable with NumPyro primitives. :param dict data: dictionary of `numpy.ndarray` values keyed by site names. :param condition_fn: callable that takes in a site dict and returns a numpy array or `None` (in which case the handler has no side effect). **Example:** .. doctest:: >>> from jax import random >>> import numpyro >>> from numpyro.handlers import condition, seed, substitute, trace >>> import numpyro.distributions as dist >>> def model(): ... numpyro.sample('a', dist.Normal(0., 1.)) >>> model = seed(model, random.PRNGKey(0)) >>> exec_trace = trace(condition(model, {'a': -1})).get_trace() >>> assert exec_trace['a']['value'] == -1 >>> assert exec_trace['a']['is_observed'] """ def __init__(self, fn=None, data=None, condition_fn=None): self.condition_fn = condition_fn self.data = data if sum((x is not None for x in (data, condition_fn))) != 1: raise ValueError('Only one of `data` or `condition_fn` ' 'should be provided.') super(condition, self).__init__(fn) def process_message(self, msg): if (msg['type'] != 'sample') or msg.get('_control_flow_done', False): if msg['type'] == 'control_flow': if self.data is not None: msg['kwargs']['substitute_stack'].append(('condition', self.data)) if self.condition_fn is not None: msg['kwargs']['substitute_stack'].append(('condition', self.condition_fn)) return if self.data is not None: value = self.data.get(msg['name']) else: value = self.condition_fn(msg) if value is not None: msg['value'] = value msg['is_observed'] = True class lift(Messenger): """ Given a stochastic function with ``param`` calls and a prior distribution, create a stochastic function where all param calls are replaced by sampling from prior. Prior should be a distribution or a dict of names to distributions. Consider the following NumPyro program: >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro.handlers import lift >>> >>> def model(x): ... s = numpyro.param("s", 0.5) ... z = numpyro.sample("z", dist.Normal(x, s)) ... return z ** 2 >>> lifted_model = lift(model, prior={"s": dist.Exponential(0.3)}) ``lift`` makes ``param`` statements behave like ``sample`` statements using the distributions in ``prior``. In this example, site `s` will now behave as if it was replaced with ``s = numpyro.sample("s", dist.Exponential(0.3))``. :param fn: function whose parameters will be lifted to random values :param prior: prior function in the form of a Distribution or a dict of Distributions """ def __init__(self, fn=None, prior=None): super().__init__(fn) self.prior = prior self._samples_cache = {} def __enter__(self): self._samples_cache = {} return super().__enter__() def __exit__(self, *args, **kwargs): self._samples_cache = {} return super().__exit__(*args, **kwargs) def process_message(self, msg): if msg["type"] != "param": return name = msg["name"] fn = self.prior.get(name) if isinstance(self.prior, dict) else self.prior if isinstance(fn, numpyro.distributions.Distribution): msg["type"] = "sample" msg["fn"] = fn msg["args"] = () msg["kwargs"] = {"rng_key": msg["kwargs"].get("rng_key", None), "sample_shape": msg["kwargs"].get("sample_shape", ())} msg["intermediates"] = [] else: # otherwise leave as is return if name in self._samples_cache: # Multiple pyro.param statements with the same # name. Block the site and fix the value. msg["value"] = self._samples_cache[name]["value"] msg["is_observed"] = True msg["stop"] = True else: self._samples_cache[name] = msg msg["is_observed"] = False class mask(Messenger): """ This messenger masks out some of the sample statements elementwise. :param mask: a boolean or a boolean-valued array for masking elementwise log probability of sample sites (`True` includes a site, `False` excludes a site). """ def __init__(self, fn=None, mask=True): if lax.dtype(mask) != 'bool': raise ValueError("`mask` should be a bool array.") self.mask = mask super().__init__(fn) def process_message(self, msg): if msg['type'] != 'sample': return msg['fn'] = msg['fn'].mask(self.mask) class reparam(Messenger): """ Reparametrizes each affected sample site into one or more auxiliary sample sites followed by a deterministic transformation [1]. To specify reparameterizers, pass a ``config`` dict or callable to the constructor. See the :mod:`numpyro.infer.reparam` module for available reparameterizers. Note some reparameterizers can examine the ``*args,**kwargs`` inputs of functions they affect; these reparameterizers require using ``handlers.reparam`` as a decorator rather than as a context manager. [1] Maria I. Gorinova, Dave Moore, Matthew D. Hoffman (2019) "Automatic Reparameterisation of Probabilistic Programs" https://arxiv.org/pdf/1906.03028.pdf :param config: Configuration, either a dict mapping site name to :class:`~numpyro.infer.reparam.Reparam` , or a function mapping site to :class:`~numpyro.infer.reparam.Reparam` or None. :type config: dict or callable """ def __init__(self, fn=None, config=None): assert isinstance(config, dict) or callable(config) self.config = config super().__init__(fn) def process_message(self, msg): if msg["type"] != "sample": return if isinstance(self.config, dict): reparam = self.config.get(msg["name"]) else: reparam = self.config(msg) if reparam is None: return new_fn, value = reparam(msg["name"], msg["fn"], msg["value"]) if value is not None: if new_fn is None: msg['type'] = 'deterministic' msg['value'] = value for key in list(msg.keys()): if key not in ('type', 'name', 'value'): del msg[key] return if msg["value"] is None: msg["is_observed"] = True msg["value"] = value msg["fn"] = new_fn class scale(Messenger): """ This messenger rescales the log probability score. This is typically used for data subsampling or for stratified sampling of data (e.g. in fraud detection where negatives vastly outnumber positives). :param float scale: a positive scaling factor """ def __init__(self, fn=None, scale=1.): if not_jax_tracer(scale): if scale <= 0: raise ValueError("'scale' argument should be a positive number.") self.scale = scale super().__init__(fn) def process_message(self, msg): if msg['type'] not in ('param', 'sample', 'plate'): return msg["scale"] = self.scale if msg.get('scale') is None else self.scale * msg['scale'] class scope(Messenger): """ This handler prepend a prefix followed by a ``/`` to the name of sample sites. Example:: .. doctest:: >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro.handlers import scope, seed, trace >>> >>> def model(): ... with scope(prefix="a"): ... with scope(prefix="b"): ... return numpyro.sample("x", dist.Bernoulli(0.5)) ... >>> assert "a/b/x" in trace(seed(model, 0)).get_trace() :param fn: Python callable with NumPyro primitives. :param str prefix: a string to prepend to sample names """ def __init__(self, fn=None, prefix=''): self.prefix = prefix super().__init__(fn) def process_message(self, msg): if msg.get('name'): msg['name'] = f"{self.prefix}/{msg["name"]}" class seed(Messenger): """ JAX uses a functional pseudo random number generator that requires passing in a seed :func:`~jax.random.PRNGKey` to every stochastic function. The `seed` handler allows us to initially seed a stochastic function with a :func:`~jax.random.PRNGKey`. Every call to the :func:`~numpyro.handlers.sample` primitive inside the function results in a splitting of this initial seed so that we use a fresh seed for each subsequent call without having to explicitly pass in a `PRNGKey` to each `sample` call. :param fn: Python callable with NumPyro primitives. :param rng_seed: a random number generator seed. :type rng_seed: int, jnp.ndarray scalar, or jax.random.PRNGKey .. note:: Unlike in Pyro, `numpyro.sample` primitive cannot be used without wrapping it in seed handler since there is no global random state. As such, users need to use `seed` as a contextmanager to generate samples from distributions or as a decorator for their model callable (See below). **Example:** .. doctest:: >>> from jax import random >>> import numpyro >>> import numpyro.handlers >>> import numpyro.distributions as dist >>> # as context manager >>> with handlers.seed(rng_seed=1): ... x = numpyro.sample('x', dist.Normal(0., 1.)) >>> def model(): ... return numpyro.sample('y', dist.Normal(0., 1.)) >>> # as function decorator (/modifier) >>> y = handlers.seed(model, rng_seed=1)() >>> assert x == y """ def __init__(self, fn=None, rng_seed=None): if isinstance(rng_seed, int) or (isinstance(rng_seed, jnp.ndarray) and not jnp.shape(rng_seed)): rng_seed = random.PRNGKey(rng_seed) if not (isinstance(rng_seed, jnp.ndarray) and rng_seed.dtype == jnp.uint32 and rng_seed.shape == (2,)): raise TypeError('Incorrect type for rng_seed: {}'.format(type(rng_seed))) self.rng_key = rng_seed super(seed, self).__init__(fn) def process_message(self, msg): if ((msg['type'] == 'sample' and not msg['is_observed']) or msg['type'] == 'rng_key' or msg['type'] == 'param' or msg['type'] == 'control_flow') and \ msg['kwargs'].get('rng_key') is None: self.rng_key, rng_key_sample = random.split(self.rng_key) msg['kwargs']['rng_key'] = rng_key_sample class substitute(Messenger): """ Given a callable `fn` and a dict `data` keyed by site names (alternatively, a callable `substitute_fn`), return a callable which substitutes all primitive calls in `fn` with values from `data` whose key matches the site name. If the site name is not present in `data`, there is no side effect. If a `substitute_fn` is provided, then the value at the site is replaced by the value returned from the call to `substitute_fn` for the given site. :param fn: Python callable with NumPyro primitives. :param dict data: dictionary of `numpy.ndarray` values keyed by site names. :param substitute_fn: callable that takes in a site dict and returns a numpy array or `None` (in which case the handler has no side effect). **Example:** .. doctest:: >>> from jax import random >>> import numpyro >>> from numpyro.handlers import seed, substitute, trace >>> import numpyro.distributions as dist >>> def model(): ... numpyro.sample('a', dist.Normal(0., 1.)) >>> model = seed(model, random.PRNGKey(0)) >>> exec_trace = trace(substitute(model, {'a': -1})).get_trace() >>> assert exec_trace['a']['value'] == -1 """ def __init__(self, fn=None, data=None, substitute_fn=None): self.substitute_fn = substitute_fn self.data = data if sum((x is not None for x in (data, substitute_fn))) != 1: raise ValueError('Only one of `data` or `substitute_fn` ' 'should be provided.') super(substitute, self).__init__(fn) def process_message(self, msg): if (msg['type'] not in ('sample', 'param')) or msg.get('_control_flow_done', False): if msg['type'] == 'control_flow': if self.data is not None: msg['kwargs']['substitute_stack'].append(('substitute', self.data)) if self.substitute_fn is not None: msg['kwargs']['substitute_stack'].append(('substitute', self.substitute_fn)) return if self.data is not None: value = self.data.get(msg['name']) else: value = self.substitute_fn(msg) if value is not None: msg['value'] = value class do(Messenger): """ Given a stochastic function with some sample statements and a dictionary of values at names, set the return values of those sites equal to the values as if they were hard-coded to those values and introduce fresh sample sites with the same names whose values do not propagate. Composes freely with :func:`~numpyro.handlers.condition` to represent counterfactual distributions over potential outcomes. See Single World Intervention Graphs [1] for additional details and theory. This is equivalent to replacing `z = numpyro.sample("z", ...)` with `z = 1.` and introducing a fresh sample site `numpyro.sample("z", ...)` whose value is not used elsewhere. References [1] `Single World Intervention Graphs: A Primer`, Thomas Richardson, James Robins :param fn: a stochastic function (callable containing Pyro primitive calls) :param data: a ``dict`` mapping sample site names to interventions **Example:** .. doctest:: >>> import jax.numpy as jnp >>> import numpyro >>> from numpyro.handlers import do, trace, seed >>> import numpyro.distributions as dist >>> def model(x): ... s = numpyro.sample("s", dist.LogNormal()) ... z = numpyro.sample("z", dist.Normal(x, s)) ... return z ** 2 >>> intervened_model = handlers.do(model, data={"z": 1.}) >>> with trace() as exec_trace: ... z_square = seed(intervened_model, 0)(1) >>> assert exec_trace['z']['value'] != 1. >>> assert not exec_trace['z']['is_observed'] >>> assert not exec_trace['z'].get('stop', None) >>> assert z_square == 1 """ def __init__(self, fn=None, data=None): self.data = data self._intervener_id = str(id(self)) super(do, self).__init__(fn) def process_message(self, msg): if msg['type'] != 'sample': return if msg.get('_intervener_id', None) != self._intervener_id and \ self.data.get(msg['name']) is not None: if msg.get('_intervener_id', None) is not None: warnings.warn( "Attempting to intervene on variable {} multiple times," "this is almost certainly incorrect behavior".format(msg['name']), RuntimeWarning) msg['_intervener_id'] = self._intervener_id # split node, avoid reapplying self recursively to new node new_msg = msg.copy() apply_stack(new_msg) intervention = self.data.get(msg['name']) msg['name'] = msg['name'] + "__CF" # mangle old name msg['value'] = intervention msg['is_observed'] = True msg['stop'] = True
# Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 """ This provides a small set of effect handlers in NumPyro that are modeled after Pyro's `poutine <http://docs.pyro.ai/en/stable/poutine.html>`_ module. For a tutorial on effect handlers more generally, readers are encouraged to read `Poutine: A Guide to Programming with Effect Handlers in Pyro <http://pyro.ai/examples/effect_handlers.html>`_. These simple effect handlers can be composed together or new ones added to enable implementation of custom inference utilities and algorithms. **Example** As an example, we are using :class:`~numpyro.handlers.seed`, :class:`~numpyro.handlers.trace` and :class:`~numpyro.handlers.substitute` handlers to define the `log_likelihood` function below. We first create a logistic regression model and sample from the posterior distribution over the regression parameters using :func:`~numpyro.infer.MCMC`. The `log_likelihood` function uses effect handlers to run the model by substituting sample sites with values from the posterior distribution and computes the log density for a single data point. The `log_predictive_density` function computes the log likelihood for each draw from the joint posterior and aggregates the results for all the data points, but does so by using JAX's auto-vectorize transform called `vmap` so that we do not need to loop over all the data points. .. doctest:: >>> import jax.numpy as jnp >>> from jax import random, vmap >>> from jax.jscipy.special import logsumexp >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro import handlers >>> from numpyro.infer import MCMC, NUTS >>> N, D = 3000, 3 >>> def logistic_regression(data, labels): ... coefs = numpyro.sample('coefs', dist.Normal(jnp.zeros(D), jnp.ones(D))) ... intercept = numpyro.sample('intercept', dist.Normal(0., 10.)) ... logits = jnp.sum(coefs * data + intercept, axis=-1) ... return numpyro.sample('obs', dist.Bernoulli(logits=logits), obs=labels) >>> data = random.normal(random.PRNGKey(0), (N, D)) >>> true_coefs = jnp.arange(1., D + 1.) >>> logits = jnp.sum(true_coefs * data, axis=-1) >>> labels = dist.Bernoulli(logits=logits).sample(random.PRNGKey(1)) >>> num_warmup, num_samples = 1000, 1000 >>> mcmc = MCMC(NUTS(model=logistic_regression), num_warmup, num_samples) >>> mcmc.run(random.PRNGKey(2), data, labels) # doctest: +SKIP sample: 100%|██████████| 1000/1000 [00:00<00:00, 1252.39it/s, 1 steps of size 5.83e-01. acc. prob=0.85] >>> mcmc.print_summary() # doctest: +SKIP mean sd 5.5% 94.5% n_eff Rhat coefs[0] 0.96 0.07 0.85 1.07 455.35 1.01 coefs[1] 2.05 0.09 1.91 2.20 332.00 1.01 coefs[2] 3.18 0.13 2.96 3.37 320.27 1.00 intercept -0.03 0.02 -0.06 0.00 402.53 1.00 >>> def log_likelihood(rng_key, params, model, *args, **kwargs): ... model = handlers.substitute(handlers.seed(model, rng_key), params) ... model_trace = handlers.trace(model).get_trace(*args, **kwargs) ... obs_node = model_trace['obs'] ... return obs_node['fn'].log_prob(obs_node['value']) >>> def log_predictive_density(rng_key, params, model, *args, **kwargs): ... n = list(params.values())[0].shape[0] ... log_lk_fn = vmap(lambda rng_key, params: log_likelihood(rng_key, params, model, *args, **kwargs)) ... log_lk_vals = log_lk_fn(random.split(rng_key, n), params) ... return jnp.sum(logsumexp(log_lk_vals, 0) - jnp.log(n)) >>> print(log_predictive_density(random.PRNGKey(2), mcmc.get_samples(), ... logistic_regression, data, labels)) # doctest: +SKIP -874.89813 """ from collections import OrderedDict import warnings from jax import lax, random import jax.numpy as jnp import numpyro from numpyro.primitives import Messenger, apply_stack from numpyro.util import not_jax_tracer __all__ = [ 'block', 'condition', 'lift', 'mask', 'reparam', 'replay', 'scale', 'scope', 'seed', 'substitute', 'trace', 'do' ] class trace(Messenger): """ Returns a handler that records the inputs and outputs at primitive calls inside `fn`. **Example** .. doctest:: >>> from jax import random >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro.handlers import seed, trace >>> import pprint as pp >>> def model(): ... numpyro.sample('a', dist.Normal(0., 1.)) >>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace() >>> pp.pprint(exec_trace) # doctest: +SKIP OrderedDict([('a', {'args': (), 'fn': <numpyro.distributions.continuous.Normal object at 0x7f9e689b1eb8>, 'is_observed': False, 'kwargs': {'rng_key': DeviceArray([0, 0], dtype=uint32)}, 'name': 'a', 'type': 'sample', 'value': DeviceArray(-0.20584235, dtype=float32)})]) """ def __enter__(self): super(trace, self).__enter__() self.trace = OrderedDict() return self.trace def postprocess_message(self, msg): if 'name' not in msg: # skip recording helper messages e.g. `control_flow`, `to_data`, `to_funsor` # which has no name return assert not(msg['type'] == 'sample' and msg['name'] in self.trace), \ 'all sites must have unique names but got `{}` duplicated'.format(msg['name']) self.trace[msg['name']] = msg.copy() def get_trace(self, *args, **kwargs): """ Run the wrapped callable and return the recorded trace. :param `*args`: arguments to the callable. :param `**kwargs`: keyword arguments to the callable. :return: `OrderedDict` containing the execution trace. """ self(*args, **kwargs) return self.trace class replay(Messenger): """ Given a callable `fn` and an execution trace `guide_trace`, return a callable which substitutes `sample` calls in `fn` with values from the corresponding site names in `guide_trace`. :param fn: Python callable with NumPyro primitives. :param guide_trace: an OrderedDict containing execution metadata. **Example** .. doctest:: >>> from jax import random >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro.handlers import replay, seed, trace >>> def model(): ... numpyro.sample('a', dist.Normal(0., 1.)) >>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace() >>> print(exec_trace['a']['value']) # doctest: +SKIP -0.20584235 >>> replayed_trace = trace(replay(model, exec_trace)).get_trace() >>> print(exec_trace['a']['value']) # doctest: +SKIP -0.20584235 >>> assert replayed_trace['a']['value'] == exec_trace['a']['value'] """ def __init__(self, fn=None, guide_trace=None): assert guide_trace is not None self.guide_trace = guide_trace super(replay, self).__init__(fn) def process_message(self, msg): if msg['type'] in ('sample', 'plate') and msg['name'] in self.guide_trace: msg['value'] = self.guide_trace[msg['name']]['value'] class block(Messenger): """ Given a callable `fn`, return another callable that selectively hides primitive sites where `hide_fn` returns True from other effect handlers on the stack. :param fn: Python callable with NumPyro primitives. :param hide_fn: function which when given a dictionary containing site-level metadata returns whether it should be blocked. **Example:** .. doctest:: >>> from jax import random >>> import numpyro >>> from numpyro.handlers import block, seed, trace >>> import numpyro.distributions as dist >>> def model(): ... a = numpyro.sample('a', dist.Normal(0., 1.)) ... return numpyro.sample('b', dist.Normal(a, 1.)) >>> model = seed(model, random.PRNGKey(0)) >>> block_all = block(model) >>> block_a = block(model, lambda site: site['name'] == 'a') >>> trace_block_all = trace(block_all).get_trace() >>> assert not {'a', 'b'}.intersection(trace_block_all.keys()) >>> trace_block_a = trace(block_a).get_trace() >>> assert 'a' not in trace_block_a >>> assert 'b' in trace_block_a """ def __init__(self, fn=None, hide_fn=None, hide=None): if hide_fn is not None: self.hide_fn = hide_fn elif hide is not None: self.hide_fn = lambda msg: msg.get('name') in hide else: self.hide_fn = lambda msg: True super(block, self).__init__(fn) def process_message(self, msg): if self.hide_fn(msg): msg['stop'] = True class condition(Messenger): """ Conditions unobserved sample sites to values from `data` or `condition_fn`. Similar to :class:`~numpyro.handlers.substitute` except that it only affects `sample` sites and changes the `is_observed` property to `True`. :param fn: Python callable with NumPyro primitives. :param dict data: dictionary of `numpy.ndarray` values keyed by site names. :param condition_fn: callable that takes in a site dict and returns a numpy array or `None` (in which case the handler has no side effect). **Example:** .. doctest:: >>> from jax import random >>> import numpyro >>> from numpyro.handlers import condition, seed, substitute, trace >>> import numpyro.distributions as dist >>> def model(): ... numpyro.sample('a', dist.Normal(0., 1.)) >>> model = seed(model, random.PRNGKey(0)) >>> exec_trace = trace(condition(model, {'a': -1})).get_trace() >>> assert exec_trace['a']['value'] == -1 >>> assert exec_trace['a']['is_observed'] """ def __init__(self, fn=None, data=None, condition_fn=None): self.condition_fn = condition_fn self.data = data if sum((x is not None for x in (data, condition_fn))) != 1: raise ValueError('Only one of `data` or `condition_fn` ' 'should be provided.') super(condition, self).__init__(fn) def process_message(self, msg): if (msg['type'] != 'sample') or msg.get('_control_flow_done', False): if msg['type'] == 'control_flow': if self.data is not None: msg['kwargs']['substitute_stack'].append(('condition', self.data)) if self.condition_fn is not None: msg['kwargs']['substitute_stack'].append(('condition', self.condition_fn)) return if self.data is not None: value = self.data.get(msg['name']) else: value = self.condition_fn(msg) if value is not None: msg['value'] = value msg['is_observed'] = True class lift(Messenger): """ Given a stochastic function with ``param`` calls and a prior distribution, create a stochastic function where all param calls are replaced by sampling from prior. Prior should be a distribution or a dict of names to distributions. Consider the following NumPyro program: >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro.handlers import lift >>> >>> def model(x): ... s = numpyro.param("s", 0.5) ... z = numpyro.sample("z", dist.Normal(x, s)) ... return z ** 2 >>> lifted_model = lift(model, prior={"s": dist.Exponential(0.3)}) ``lift`` makes ``param`` statements behave like ``sample`` statements using the distributions in ``prior``. In this example, site `s` will now behave as if it was replaced with ``s = numpyro.sample("s", dist.Exponential(0.3))``. :param fn: function whose parameters will be lifted to random values :param prior: prior function in the form of a Distribution or a dict of Distributions """ def __init__(self, fn=None, prior=None): super().__init__(fn) self.prior = prior self._samples_cache = {} def __enter__(self): self._samples_cache = {} return super().__enter__() def __exit__(self, *args, **kwargs): self._samples_cache = {} return super().__exit__(*args, **kwargs) def process_message(self, msg): if msg["type"] != "param": return name = msg["name"] fn = self.prior.get(name) if isinstance(self.prior, dict) else self.prior if isinstance(fn, numpyro.distributions.Distribution): msg["type"] = "sample" msg["fn"] = fn msg["args"] = () msg["kwargs"] = {"rng_key": msg["kwargs"].get("rng_key", None), "sample_shape": msg["kwargs"].get("sample_shape", ())} msg["intermediates"] = [] else: # otherwise leave as is return if name in self._samples_cache: # Multiple pyro.param statements with the same # name. Block the site and fix the value. msg["value"] = self._samples_cache[name]["value"] msg["is_observed"] = True msg["stop"] = True else: self._samples_cache[name] = msg msg["is_observed"] = False class mask(Messenger): """ This messenger masks out some of the sample statements elementwise. :param mask: a boolean or a boolean-valued array for masking elementwise log probability of sample sites (`True` includes a site, `False` excludes a site). """ def __init__(self, fn=None, mask=True): if lax.dtype(mask) != 'bool': raise ValueError("`mask` should be a bool array.") self.mask = mask super().__init__(fn) def process_message(self, msg): if msg['type'] != 'sample': return msg['fn'] = msg['fn'].mask(self.mask) class reparam(Messenger): """ Reparametrizes each affected sample site into one or more auxiliary sample sites followed by a deterministic transformation [1]. To specify reparameterizers, pass a ``config`` dict or callable to the constructor. See the :mod:`numpyro.infer.reparam` module for available reparameterizers. Note some reparameterizers can examine the ``*args,**kwargs`` inputs of functions they affect; these reparameterizers require using ``handlers.reparam`` as a decorator rather than as a context manager. [1] Maria I. Gorinova, Dave Moore, Matthew D. Hoffman (2019) "Automatic Reparameterisation of Probabilistic Programs" https://arxiv.org/pdf/1906.03028.pdf :param config: Configuration, either a dict mapping site name to :class:`~numpyro.infer.reparam.Reparam` , or a function mapping site to :class:`~numpyro.infer.reparam.Reparam` or None. :type config: dict or callable """ def __init__(self, fn=None, config=None): assert isinstance(config, dict) or callable(config) self.config = config super().__init__(fn) def process_message(self, msg): if msg["type"] != "sample": return if isinstance(self.config, dict): reparam = self.config.get(msg["name"]) else: reparam = self.config(msg) if reparam is None: return new_fn, value = reparam(msg["name"], msg["fn"], msg["value"]) if value is not None: if new_fn is None: msg['type'] = 'deterministic' msg['value'] = value for key in list(msg.keys()): if key not in ('type', 'name', 'value'): del msg[key] return if msg["value"] is None: msg["is_observed"] = True msg["value"] = value msg["fn"] = new_fn class scale(Messenger): """ This messenger rescales the log probability score. This is typically used for data subsampling or for stratified sampling of data (e.g. in fraud detection where negatives vastly outnumber positives). :param float scale: a positive scaling factor """ def __init__(self, fn=None, scale=1.): if not_jax_tracer(scale): if scale <= 0: raise ValueError("'scale' argument should be a positive number.") self.scale = scale super().__init__(fn) def process_message(self, msg): if msg['type'] not in ('param', 'sample', 'plate'): return msg["scale"] = self.scale if msg.get('scale') is None else self.scale * msg['scale'] class scope(Messenger): """ This handler prepend a prefix followed by a ``/`` to the name of sample sites. Example:: .. doctest:: >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro.handlers import scope, seed, trace >>> >>> def model(): ... with scope(prefix="a"): ... with scope(prefix="b"): ... return numpyro.sample("x", dist.Bernoulli(0.5)) ... >>> assert "a/b/x" in trace(seed(model, 0)).get_trace() :param fn: Python callable with NumPyro primitives. :param str prefix: a string to prepend to sample names """ def __init__(self, fn=None, prefix=''): self.prefix = prefix super().__init__(fn) def process_message(self, msg): if msg.get('name'): msg['name'] = f"{self.prefix}/{msg['name']}" class seed(Messenger): """ JAX uses a functional pseudo random number generator that requires passing in a seed :func:`~jax.random.PRNGKey` to every stochastic function. The `seed` handler allows us to initially seed a stochastic function with a :func:`~jax.random.PRNGKey`. Every call to the :func:`~numpyro.handlers.sample` primitive inside the function results in a splitting of this initial seed so that we use a fresh seed for each subsequent call without having to explicitly pass in a `PRNGKey` to each `sample` call. :param fn: Python callable with NumPyro primitives. :param rng_seed: a random number generator seed. :type rng_seed: int, jnp.ndarray scalar, or jax.random.PRNGKey .. note:: Unlike in Pyro, `numpyro.sample` primitive cannot be used without wrapping it in seed handler since there is no global random state. As such, users need to use `seed` as a contextmanager to generate samples from distributions or as a decorator for their model callable (See below). **Example:** .. doctest:: >>> from jax import random >>> import numpyro >>> import numpyro.handlers >>> import numpyro.distributions as dist >>> # as context manager >>> with handlers.seed(rng_seed=1): ... x = numpyro.sample('x', dist.Normal(0., 1.)) >>> def model(): ... return numpyro.sample('y', dist.Normal(0., 1.)) >>> # as function decorator (/modifier) >>> y = handlers.seed(model, rng_seed=1)() >>> assert x == y """ def __init__(self, fn=None, rng_seed=None): if isinstance(rng_seed, int) or (isinstance(rng_seed, jnp.ndarray) and not jnp.shape(rng_seed)): rng_seed = random.PRNGKey(rng_seed) if not (isinstance(rng_seed, jnp.ndarray) and rng_seed.dtype == jnp.uint32 and rng_seed.shape == (2,)): raise TypeError('Incorrect type for rng_seed: {}'.format(type(rng_seed))) self.rng_key = rng_seed super(seed, self).__init__(fn) def process_message(self, msg): if ((msg['type'] == 'sample' and not msg['is_observed']) or msg['type'] == 'rng_key' or msg['type'] == 'param' or msg['type'] == 'control_flow') and \ msg['kwargs'].get('rng_key') is None: self.rng_key, rng_key_sample = random.split(self.rng_key) msg['kwargs']['rng_key'] = rng_key_sample class substitute(Messenger): """ Given a callable `fn` and a dict `data` keyed by site names (alternatively, a callable `substitute_fn`), return a callable which substitutes all primitive calls in `fn` with values from `data` whose key matches the site name. If the site name is not present in `data`, there is no side effect. If a `substitute_fn` is provided, then the value at the site is replaced by the value returned from the call to `substitute_fn` for the given site. :param fn: Python callable with NumPyro primitives. :param dict data: dictionary of `numpy.ndarray` values keyed by site names. :param substitute_fn: callable that takes in a site dict and returns a numpy array or `None` (in which case the handler has no side effect). **Example:** .. doctest:: >>> from jax import random >>> import numpyro >>> from numpyro.handlers import seed, substitute, trace >>> import numpyro.distributions as dist >>> def model(): ... numpyro.sample('a', dist.Normal(0., 1.)) >>> model = seed(model, random.PRNGKey(0)) >>> exec_trace = trace(substitute(model, {'a': -1})).get_trace() >>> assert exec_trace['a']['value'] == -1 """ def __init__(self, fn=None, data=None, substitute_fn=None): self.substitute_fn = substitute_fn self.data = data if sum((x is not None for x in (data, substitute_fn))) != 1: raise ValueError('Only one of `data` or `substitute_fn` ' 'should be provided.') super(substitute, self).__init__(fn) def process_message(self, msg): if (msg['type'] not in ('sample', 'param')) or msg.get('_control_flow_done', False): if msg['type'] == 'control_flow': if self.data is not None: msg['kwargs']['substitute_stack'].append(('substitute', self.data)) if self.substitute_fn is not None: msg['kwargs']['substitute_stack'].append(('substitute', self.substitute_fn)) return if self.data is not None: value = self.data.get(msg['name']) else: value = self.substitute_fn(msg) if value is not None: msg['value'] = value class do(Messenger): """ Given a stochastic function with some sample statements and a dictionary of values at names, set the return values of those sites equal to the values as if they were hard-coded to those values and introduce fresh sample sites with the same names whose values do not propagate. Composes freely with :func:`~numpyro.handlers.condition` to represent counterfactual distributions over potential outcomes. See Single World Intervention Graphs [1] for additional details and theory. This is equivalent to replacing `z = numpyro.sample("z", ...)` with `z = 1.` and introducing a fresh sample site `numpyro.sample("z", ...)` whose value is not used elsewhere. References [1] `Single World Intervention Graphs: A Primer`, Thomas Richardson, James Robins :param fn: a stochastic function (callable containing Pyro primitive calls) :param data: a ``dict`` mapping sample site names to interventions **Example:** .. doctest:: >>> import jax.numpy as jnp >>> import numpyro >>> from numpyro.handlers import do, trace, seed >>> import numpyro.distributions as dist >>> def model(x): ... s = numpyro.sample("s", dist.LogNormal()) ... z = numpyro.sample("z", dist.Normal(x, s)) ... return z ** 2 >>> intervened_model = handlers.do(model, data={"z": 1.}) >>> with trace() as exec_trace: ... z_square = seed(intervened_model, 0)(1) >>> assert exec_trace['z']['value'] != 1. >>> assert not exec_trace['z']['is_observed'] >>> assert not exec_trace['z'].get('stop', None) >>> assert z_square == 1 """ def __init__(self, fn=None, data=None): self.data = data self._intervener_id = str(id(self)) super(do, self).__init__(fn) def process_message(self, msg): if msg['type'] != 'sample': return if msg.get('_intervener_id', None) != self._intervener_id and \ self.data.get(msg['name']) is not None: if msg.get('_intervener_id', None) is not None: warnings.warn( "Attempting to intervene on variable {} multiple times," "this is almost certainly incorrect behavior".format(msg['name']), RuntimeWarning) msg['_intervener_id'] = self._intervener_id # split node, avoid reapplying self recursively to new node new_msg = msg.copy() apply_stack(new_msg) intervention = self.data.get(msg['name']) msg['name'] = msg['name'] + "__CF" # mangle old name msg['value'] = intervention msg['is_observed'] = True msg['stop'] = True
# -*- coding: utf-8 -*- """Run a thermodynamics calculation in MOPAC""" import logging import seamm import seamm_util.printing as printing from seamm_util import units_class from seamm_util.printing import FormattedText as __ import mopac_step logger = logging.getLogger(__name__) job = printing.getPrinter() printer = printing.getPrinter("mopac") class Thermodynamics(mopac_step.Energy): def __init__(self, flowchart=None, title="Thermodynamics", extension=None): """Initialize the node""" logger.debug("Creating Thermodynamics {}".format(self)) super().__init__(flowchart=flowchart, title=title, extension=extension) self.parameters = mopac_step.ThermodynamicsParameters() self.description = "Thermodynamic functions" def description_text(self, P=None): """Prepare information about what this node will do""" if not P: P = self.parameters.values_to_dict() text = "Thermodynamics calculation using {hamiltonian}, converged to " # Convergence if P["convergence"] == "normal": text += "the 'normal' level of 1.0e-04 kcal/mol." elif P["convergence"] == "precise": text += "the 'precise' level of 1.0e-06 kcal/mol." elif P["convergence"] == "relative": text += "a factor of {relative} times the " "normal criterion." elif P["convergence"] == "absolute": text += "converged to {absolute}." text += ( "\nThe thermodynamics functions will be calculated from " "{Tmin} to {Tmax} in steps of {Tstep}. {trans} lowest " "modes will be ignored to approximately account for {trans} " "internal rotations." ) # Structure handling handling = P["structure handling"] text += " The structure in the standard orientation will " if handling == "Overwrite the current configuration": text += "overwrite the current configuration " elif handling == "Create a new configuration": text += "be put in a new configuration " else: raise ValueError( f"Do not understand how to handle the structure: '{handling}'" ) confname = P["configuration name"] if confname == "use SMILES string": text += "using SMILES as its name." elif confname == "use Canonical SMILES string": text += "using canonical SMILES as its name." elif confname == "keep current name": text += "keeping the current name." elif confname == "vibrations with <Hamiltonian>": text += "with 'vibrations with {hamiltonian}' as its name." elif confname == "use configuration number": text += "using the index of the configuration (1, 2, ...) as its name." else: text += "with '{confname}' as its name." return self.header + "\n" + __(text, **P, indent=4 * " ").__str__() def get_input(self): """Get the input for thermodynamics in MOPAC""" P = self.parameters.current_values_to_dict( context=seamm.flowchart_variables._data ) # Have to fix formatting for printing... PP = dict(P) for key in PP: if isinstance(PP[key], units_class): PP[key] = "{:~P}".format(PP[key]) # Save the description for later printing self.description = [] self.description.append(__(self.description_text(PP), **PP, indent=self.indent)) # Convert values with units to the right units, and remove # the unit string. for key in ("Tmax", "Tmin", "Tstep"): P[key] = P[key].to("K").magnitude # Remove the 1SCF keyword from the energy setup keywords = [] for keyword in super().get_input(): if keyword == "1SCF": keywords.append("THERMO=({Tmin},{Tmax},{Tstep})".format(**P)) keywords.append("TRANS={trans}".format(**P)) else: keywords.append(keyword) return keywords def analyze(self, indent="", data={}, out=[]): """Parse the output and generating the text output and store the data in variables for other stages to access """ # Update the structure P = self.parameters.current_values_to_dict( context=seamm.flowchart_variables._data ) if "ORIENTATION_ATOM_X" in data: system, starting_configuration = self.get_system_configuration(None) periodicity = starting_configuration.periodicity if ( "structure handling" in P and P["structure handling"] == "Create a new configuration" ): configuration = system.create_configuration( periodicity=periodicity, atomset=starting_configuration.atomset, bondset=starting_configuration.bondset, cell_id=starting_configuration.cell_id, ) else: configuration = starting_configuration if periodicity != 0: raise NotImplementedError( "Thermodynamics cannot yet handle periodicity" ) xyz = [] it = iter(data["ORIENTATION_ATOM_X"]) for x in it: xyz.append([float(x), float(next(it)), float(next(it))]) configuration.atoms.set_coordinates(xyz, fractionals=False) # And the name of the configuration. if "configuration name" in P: if P["configuration name"] == "vibrations with <Hamiltonian>": configuration.name = f"vibrations with {P["hamiltonian"]}" elif P["configuration name"] == "keep current name": pass elif P["configuration name"] == "use SMILES string": configuration.name = configuration.smiles elif P["configuration name"] == "use Canonical SMILES string": configuration.name = configuration.canonical_smiles elif P["configuration name"] == "use configuration number": configuration.name = str(configuration.n_configurations) # The results printer.normal( __( ( "The geometry converged in {NUMBER_SCF_CYCLES} " "iterations to a heat of formation of {HEAT_OF_FORMATION} " "kcal/mol." ), **data, indent=self.indent + 4 * " ", ) ) # Put any requested results into variables or tables self.store_results( data=data, properties=mopac_step.properties, results=self.parameters["results"].value, create_tables=self.parameters["create tables"].get(), )
# -*- coding: utf-8 -*- """Run a thermodynamics calculation in MOPAC""" import logging import seamm import seamm_util.printing as printing from seamm_util import units_class from seamm_util.printing import FormattedText as __ import mopac_step logger = logging.getLogger(__name__) job = printing.getPrinter() printer = printing.getPrinter("mopac") class Thermodynamics(mopac_step.Energy): def __init__(self, flowchart=None, title="Thermodynamics", extension=None): """Initialize the node""" logger.debug("Creating Thermodynamics {}".format(self)) super().__init__(flowchart=flowchart, title=title, extension=extension) self.parameters = mopac_step.ThermodynamicsParameters() self.description = "Thermodynamic functions" def description_text(self, P=None): """Prepare information about what this node will do""" if not P: P = self.parameters.values_to_dict() text = "Thermodynamics calculation using {hamiltonian}, converged to " # Convergence if P["convergence"] == "normal": text += "the 'normal' level of 1.0e-04 kcal/mol." elif P["convergence"] == "precise": text += "the 'precise' level of 1.0e-06 kcal/mol." elif P["convergence"] == "relative": text += "a factor of {relative} times the " "normal criterion." elif P["convergence"] == "absolute": text += "converged to {absolute}." text += ( "\nThe thermodynamics functions will be calculated from " "{Tmin} to {Tmax} in steps of {Tstep}. {trans} lowest " "modes will be ignored to approximately account for {trans} " "internal rotations." ) # Structure handling handling = P["structure handling"] text += " The structure in the standard orientation will " if handling == "Overwrite the current configuration": text += "overwrite the current configuration " elif handling == "Create a new configuration": text += "be put in a new configuration " else: raise ValueError( f"Do not understand how to handle the structure: '{handling}'" ) confname = P["configuration name"] if confname == "use SMILES string": text += "using SMILES as its name." elif confname == "use Canonical SMILES string": text += "using canonical SMILES as its name." elif confname == "keep current name": text += "keeping the current name." elif confname == "vibrations with <Hamiltonian>": text += "with 'vibrations with {hamiltonian}' as its name." elif confname == "use configuration number": text += "using the index of the configuration (1, 2, ...) as its name." else: text += "with '{confname}' as its name." return self.header + "\n" + __(text, **P, indent=4 * " ").__str__() def get_input(self): """Get the input for thermodynamics in MOPAC""" P = self.parameters.current_values_to_dict( context=seamm.flowchart_variables._data ) # Have to fix formatting for printing... PP = dict(P) for key in PP: if isinstance(PP[key], units_class): PP[key] = "{:~P}".format(PP[key]) # Save the description for later printing self.description = [] self.description.append(__(self.description_text(PP), **PP, indent=self.indent)) # Convert values with units to the right units, and remove # the unit string. for key in ("Tmax", "Tmin", "Tstep"): P[key] = P[key].to("K").magnitude # Remove the 1SCF keyword from the energy setup keywords = [] for keyword in super().get_input(): if keyword == "1SCF": keywords.append("THERMO=({Tmin},{Tmax},{Tstep})".format(**P)) keywords.append("TRANS={trans}".format(**P)) else: keywords.append(keyword) return keywords def analyze(self, indent="", data={}, out=[]): """Parse the output and generating the text output and store the data in variables for other stages to access """ # Update the structure P = self.parameters.current_values_to_dict( context=seamm.flowchart_variables._data ) if "ORIENTATION_ATOM_X" in data: system, starting_configuration = self.get_system_configuration(None) periodicity = starting_configuration.periodicity if ( "structure handling" in P and P["structure handling"] == "Create a new configuration" ): configuration = system.create_configuration( periodicity=periodicity, atomset=starting_configuration.atomset, bondset=starting_configuration.bondset, cell_id=starting_configuration.cell_id, ) else: configuration = starting_configuration if periodicity != 0: raise NotImplementedError( "Thermodynamics cannot yet handle periodicity" ) xyz = [] it = iter(data["ORIENTATION_ATOM_X"]) for x in it: xyz.append([float(x), float(next(it)), float(next(it))]) configuration.atoms.set_coordinates(xyz, fractionals=False) # And the name of the configuration. if "configuration name" in P: if P["configuration name"] == "vibrations with <Hamiltonian>": configuration.name = f"vibrations with {P['hamiltonian']}" elif P["configuration name"] == "keep current name": pass elif P["configuration name"] == "use SMILES string": configuration.name = configuration.smiles elif P["configuration name"] == "use Canonical SMILES string": configuration.name = configuration.canonical_smiles elif P["configuration name"] == "use configuration number": configuration.name = str(configuration.n_configurations) # The results printer.normal( __( ( "The geometry converged in {NUMBER_SCF_CYCLES} " "iterations to a heat of formation of {HEAT_OF_FORMATION} " "kcal/mol." ), **data, indent=self.indent + 4 * " ", ) ) # Put any requested results into variables or tables self.store_results( data=data, properties=mopac_step.properties, results=self.parameters["results"].value, create_tables=self.parameters["create tables"].get(), )
import pygame from json import load from objects import Pawn achievements = load( open('.achievements.json') ) class Validator: """ Class used to validate movements and victory checking (a judge, basically) Parameters ---------- group : list[Pawn] A list of Pawn objects that are actually in the game (weren't captured yet) Attributes ---------- group : list[Pawn] The pawns inside the game ids : dict ID table used to know who's where stack : list[str] The movement call stack of the game move_counter: int Counter to keep track of how many movements were made in the game black_wins : int Counts how many times the black pawns won white_wins: int Counts how many times the white pawns won Methods ------- reset() Rolls back all attributes to it's original state when the object was created, keeping only move_count, black_wins and white_wins untouched check() Checks if the given movecode can be executed; if valid movecode is given, it'll update the pawns' positions and other game variables regarding movements or captures victory_validator() Checks if the actual game state of the board meets any of the requirements of the game to count as a win; returns the color who won or None if no victory situation was found get_movelist() Returns all possible movements for the given Pawn object, at the current game state """ # Constructor def __init__(self, group): self.group = group self.ids = {} self.stack = [] # Move call stack for the entire game self.move_counter = 0 self.black_wins = 0 self.white_wins = 0 # Create a dictionary containing all pawn ids # and its respective position in the group for pawn in self.group: self.ids.update({pawn.id: pawn}) # Pawn id updater def __update_ids(self): """ [Private Method] Resets and recreates the IDs table based on the game table Returns ------- None Since it is just updating a class attribute, theres no need to return information """ # Reset the id dictionary self.ids = {} # Fill the dict w/ valid pawn ids for pawn in self.group: self.ids.update({pawn.id: pawn}) # Move tester def __move(self, pawn_id): """ [Private Method] Checks if the given pawn can move ahead Returns ------- tuple: (bool, str) Returns a tuple containing a boolean to describe if the movement was made or not, and a string containing a message to be shown on the python shell """ # Try to fetch the pawn object try: # Fetch the pawn object by id pawn = self.ids[pawn_id] # Get the destination square by doing some maths :D destination = f'{pawn_id[0]}{int(pawn_id[1]) + (1 if (pawn.color == 'white') else -1)}' except KeyError: return (None, "[ERROR] Pawn not found") # Tests if there's any pawn in the destination square test_list = [ True if (test.id == destination) else False for test in self.group ] if ( test_list.count(True) > 0 ): # Yep, there's a pawn ahead, so don't move # raise a debug message return (False, "[ERROR] Can't move this pawn, there's another pawn at the destination") else: # There's no pawn ahead, so it can move return [True, f"[MOVE] {destination}"] # Capture tester def __capture(self, pawn_id, target): """ [Private Method] Checks if the given pawn can capture the given target Returns ------- tuple: (bool, str) Returns a tuple containing a boolean to describe if the capture was made or not, and a string containing a message to be shown on the python shell """ # Try to fetch the required objects try: # Fetch the pawn object by id pawn = self.ids[pawn_id] # Fetch the target pawn object by id tgt = self.ids[target] except KeyError: return (None, "[ERROR] Pawn not found") # Check if the pawns have different colors if (pawn.color != tgt.color): return (True, f'[CAPTURE] {pawn_id[0]}x{tgt.id}') # Can capture else: return (False, "[ERROR] The pawns have the same color") # Can't capture # Reset the board state def reset(self, silent=False): """ Resets game variables to it's initial state, except moves_counter, black_wins and white_wins Returns ------- None Since it is just rolling back some game variables, there's no need to return information """ # Clear the pawn groups self.group = [] # Recreate all pawns at its initial state self.group.append( Pawn("black", "a3", silent)) self.group.append( Pawn("black", "b3", silent)) self.group.append( Pawn("black", "c3", silent)) self.group.append( Pawn("white", "a1", silent)) self.group.append( Pawn("white", "b1", silent)) self.group.append( Pawn("white", "c1", silent)) # Update the id list self.__update_ids() # Reset the move counter self.move_counter = 0 # Move decoder; transforms strings (movecode) # into pawn actions def check(self, movecode, silent=False): """ Pawn move decoder. Checks and executes (if possible) the given movecodes; raises an error if a invalid movecode is given Conventions: - [destination_square] -> Moves a pawn to the specified square - [pawn_x]x[destination_square] -> Captures the pawn at the specified square Examples: a2 -> Moves a pawn to the a2 position axb2 -> Pawn at a1 captures a pawn at b2 Returns ------- None Either the game will update the pawns positions or will print a debug message on the python shell """ if ( len(movecode) == 2 ): # Pawn movement pawn_id = f'{movecode[0]}{int(movecode[1]) + (-1 if (self.move_counter % 2 == 0) else 1)}' result = self.__move(pawn_id) if (result[0] == True): self.ids[pawn_id].update_id(movecode) self.move_counter += 1 self.__update_ids() if (silent == False): print(result[1]) elif ( len(movecode) == 4 ): # Pawn capture pawn_id = f'{movecode[0]}{int(movecode[3]) + (-1 if (self.move_counter % 2 == 0) else 1)}' target = f'{movecode[2]}{movecode[3]}' result = self.__capture(pawn_id, target) if (result[0] == True): # Remove the target pawn for p in self.group: if (p.id == target): rm_index = self.group.index(p) break else: print( achievements['0xff'] ) self.group.pop(rm_index) self.ids[pawn_id].update_id(target) self.move_counter += 1 self.__update_ids() if (silent == False): print(result[1]) # Logs the move result # Since a valid movement was made, add it to # the game call stack' if (result[0]): self.stack.append(movecode) else: print("[ERROR] Invalid movecode") # Checks if any of the sides have won # Resets the game if a victory is detected def victory_validator(self, silent=False): """ Da Rules: You (Whites) win if: A: Cross the board first B: Capture all my pawns I (Blacks) win if: A: Cross the board first B: Capture all of your pawns C: There's no valid movement left Returns ------- str: 'black' If a black win is detected, return a string containing its color str: 'white' If a white win is detected, return a string containing its color None If neither blacks or whites won """ # Display meme achievement if (self.black_wins == 10): print( achievements['0x77'] ) # Check if any pawn crossed the board for k, v in self.ids.items(): # Check if any black pawn crossed the board if (k in ['a1', 'b1', 'c1'] and v.color == 'black'): self.black_wins += 1 if silent: self.reset(silent) return 'black' # Check if any white pawn crossed the board if (k in ['a3', 'b3', 'c3'] and v.color == 'white'): self.white_wins += 1 if silent: self.reset(silent) return 'white' # Check if there's only one pawn color present in the board colors = [ pawn.color for pawn in self.group ] if (colors.count('white') == 0): self.black_wins += 1 if silent: self.reset(silent) return 'black' if (colors.count('black') == 0): self.white_wins += 1 if silent: self.reset(silent) return 'white' # Check if there's any valid movement left states = [] for pawn in self.group: moves = self.get_movelist(pawn) # Check for movementation # If any movecode has length 2 (a movement) if ( any([True if len(x) == 2 else False for x in moves]) ): states.append(True) elif ( any([True if len(x) == 4 else False for x in moves]) ): # This pawn can capture another pawn, so there's still a valid move states.append(True) else: # No valid moves for that pawn states.append(False) # Finally, checks if there's no valid movement left if (states.count(True) == 0): self.black_wins += 1 if silent: self.reset(silent) return 'black' def get_movelist(self, pawn): """ Get all possible movements for a pawn at the current game state Returns: moves : list[str] A list of strings containing the movecodes for all possible movements for a pawn """ # Init moves = [] col_dict = { "a": ["b"], "b": ["a", "c"], "c": ["b"] } # Check for movements if (self.__move(pawn.id)[0] == True): moves.append( f'{pawn.id[0]}{int(pawn.id[1]) + (-1 if (pawn.color == 'black') else 1)}') # Check for captures for col in col_dict[pawn.id[0]]: if (self.__capture(pawn.id, f'{col}{int(pawn.id[1]) + (-1 if (pawn.color == 'black') else 1)}')[0] == True): moves.append( f'{pawn.id[0]}x{col}{int(pawn.id[1]) + (-1 if (pawn.color == 'black') else 1)}') return moves
import pygame from json import load from objects import Pawn achievements = load( open('.achievements.json') ) class Validator: """ Class used to validate movements and victory checking (a judge, basically) Parameters ---------- group : list[Pawn] A list of Pawn objects that are actually in the game (weren't captured yet) Attributes ---------- group : list[Pawn] The pawns inside the game ids : dict ID table used to know who's where stack : list[str] The movement call stack of the game move_counter: int Counter to keep track of how many movements were made in the game black_wins : int Counts how many times the black pawns won white_wins: int Counts how many times the white pawns won Methods ------- reset() Rolls back all attributes to it's original state when the object was created, keeping only move_count, black_wins and white_wins untouched check() Checks if the given movecode can be executed; if valid movecode is given, it'll update the pawns' positions and other game variables regarding movements or captures victory_validator() Checks if the actual game state of the board meets any of the requirements of the game to count as a win; returns the color who won or None if no victory situation was found get_movelist() Returns all possible movements for the given Pawn object, at the current game state """ # Constructor def __init__(self, group): self.group = group self.ids = {} self.stack = [] # Move call stack for the entire game self.move_counter = 0 self.black_wins = 0 self.white_wins = 0 # Create a dictionary containing all pawn ids # and its respective position in the group for pawn in self.group: self.ids.update({pawn.id: pawn}) # Pawn id updater def __update_ids(self): """ [Private Method] Resets and recreates the IDs table based on the game table Returns ------- None Since it is just updating a class attribute, theres no need to return information """ # Reset the id dictionary self.ids = {} # Fill the dict w/ valid pawn ids for pawn in self.group: self.ids.update({pawn.id: pawn}) # Move tester def __move(self, pawn_id): """ [Private Method] Checks if the given pawn can move ahead Returns ------- tuple: (bool, str) Returns a tuple containing a boolean to describe if the movement was made or not, and a string containing a message to be shown on the python shell """ # Try to fetch the pawn object try: # Fetch the pawn object by id pawn = self.ids[pawn_id] # Get the destination square by doing some maths :D destination = f'{pawn_id[0]}{int(pawn_id[1]) + (1 if (pawn.color == "white") else -1)}' except KeyError: return (None, "[ERROR] Pawn not found") # Tests if there's any pawn in the destination square test_list = [ True if (test.id == destination) else False for test in self.group ] if ( test_list.count(True) > 0 ): # Yep, there's a pawn ahead, so don't move # raise a debug message return (False, "[ERROR] Can't move this pawn, there's another pawn at the destination") else: # There's no pawn ahead, so it can move return [True, f"[MOVE] {destination}"] # Capture tester def __capture(self, pawn_id, target): """ [Private Method] Checks if the given pawn can capture the given target Returns ------- tuple: (bool, str) Returns a tuple containing a boolean to describe if the capture was made or not, and a string containing a message to be shown on the python shell """ # Try to fetch the required objects try: # Fetch the pawn object by id pawn = self.ids[pawn_id] # Fetch the target pawn object by id tgt = self.ids[target] except KeyError: return (None, "[ERROR] Pawn not found") # Check if the pawns have different colors if (pawn.color != tgt.color): return (True, f'[CAPTURE] {pawn_id[0]}x{tgt.id}') # Can capture else: return (False, "[ERROR] The pawns have the same color") # Can't capture # Reset the board state def reset(self, silent=False): """ Resets game variables to it's initial state, except moves_counter, black_wins and white_wins Returns ------- None Since it is just rolling back some game variables, there's no need to return information """ # Clear the pawn groups self.group = [] # Recreate all pawns at its initial state self.group.append( Pawn("black", "a3", silent)) self.group.append( Pawn("black", "b3", silent)) self.group.append( Pawn("black", "c3", silent)) self.group.append( Pawn("white", "a1", silent)) self.group.append( Pawn("white", "b1", silent)) self.group.append( Pawn("white", "c1", silent)) # Update the id list self.__update_ids() # Reset the move counter self.move_counter = 0 # Move decoder; transforms strings (movecode) # into pawn actions def check(self, movecode, silent=False): """ Pawn move decoder. Checks and executes (if possible) the given movecodes; raises an error if a invalid movecode is given Conventions: - [destination_square] -> Moves a pawn to the specified square - [pawn_x]x[destination_square] -> Captures the pawn at the specified square Examples: a2 -> Moves a pawn to the a2 position axb2 -> Pawn at a1 captures a pawn at b2 Returns ------- None Either the game will update the pawns positions or will print a debug message on the python shell """ if ( len(movecode) == 2 ): # Pawn movement pawn_id = f'{movecode[0]}{int(movecode[1]) + (-1 if (self.move_counter % 2 == 0) else 1)}' result = self.__move(pawn_id) if (result[0] == True): self.ids[pawn_id].update_id(movecode) self.move_counter += 1 self.__update_ids() if (silent == False): print(result[1]) elif ( len(movecode) == 4 ): # Pawn capture pawn_id = f'{movecode[0]}{int(movecode[3]) + (-1 if (self.move_counter % 2 == 0) else 1)}' target = f'{movecode[2]}{movecode[3]}' result = self.__capture(pawn_id, target) if (result[0] == True): # Remove the target pawn for p in self.group: if (p.id == target): rm_index = self.group.index(p) break else: print( achievements['0xff'] ) self.group.pop(rm_index) self.ids[pawn_id].update_id(target) self.move_counter += 1 self.__update_ids() if (silent == False): print(result[1]) # Logs the move result # Since a valid movement was made, add it to # the game call stack' if (result[0]): self.stack.append(movecode) else: print("[ERROR] Invalid movecode") # Checks if any of the sides have won # Resets the game if a victory is detected def victory_validator(self, silent=False): """ Da Rules: You (Whites) win if: A: Cross the board first B: Capture all my pawns I (Blacks) win if: A: Cross the board first B: Capture all of your pawns C: There's no valid movement left Returns ------- str: 'black' If a black win is detected, return a string containing its color str: 'white' If a white win is detected, return a string containing its color None If neither blacks or whites won """ # Display meme achievement if (self.black_wins == 10): print( achievements['0x77'] ) # Check if any pawn crossed the board for k, v in self.ids.items(): # Check if any black pawn crossed the board if (k in ['a1', 'b1', 'c1'] and v.color == 'black'): self.black_wins += 1 if silent: self.reset(silent) return 'black' # Check if any white pawn crossed the board if (k in ['a3', 'b3', 'c3'] and v.color == 'white'): self.white_wins += 1 if silent: self.reset(silent) return 'white' # Check if there's only one pawn color present in the board colors = [ pawn.color for pawn in self.group ] if (colors.count('white') == 0): self.black_wins += 1 if silent: self.reset(silent) return 'black' if (colors.count('black') == 0): self.white_wins += 1 if silent: self.reset(silent) return 'white' # Check if there's any valid movement left states = [] for pawn in self.group: moves = self.get_movelist(pawn) # Check for movementation # If any movecode has length 2 (a movement) if ( any([True if len(x) == 2 else False for x in moves]) ): states.append(True) elif ( any([True if len(x) == 4 else False for x in moves]) ): # This pawn can capture another pawn, so there's still a valid move states.append(True) else: # No valid moves for that pawn states.append(False) # Finally, checks if there's no valid movement left if (states.count(True) == 0): self.black_wins += 1 if silent: self.reset(silent) return 'black' def get_movelist(self, pawn): """ Get all possible movements for a pawn at the current game state Returns: moves : list[str] A list of strings containing the movecodes for all possible movements for a pawn """ # Init moves = [] col_dict = { "a": ["b"], "b": ["a", "c"], "c": ["b"] } # Check for movements if (self.__move(pawn.id)[0] == True): moves.append( f'{pawn.id[0]}{int(pawn.id[1]) + (-1 if (pawn.color == "black") else 1)}') # Check for captures for col in col_dict[pawn.id[0]]: if (self.__capture(pawn.id, f'{col}{int(pawn.id[1]) + (-1 if (pawn.color == "black") else 1)}')[0] == True): moves.append( f'{pawn.id[0]}x{col}{int(pawn.id[1]) + (-1 if (pawn.color == "black") else 1)}') return moves
""" This module is imported in the component_gallery.py and demonstrates how to style a Dash DataTable to look better with Bootstrap themes. To keep things organized: long descriptions and code examples are in text.py cards like a list of links are created in cheatsheet.py and imported here Cards for a light (dark) theme are displayed only when a light (dark) theme is selected """ import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import dash_table import pandas as pd from apps import text, cheatsheet TABLE_DOCS = "https://dash.plotly.com/datatable/" from app import app """ ===================================================================== Bootstrap style details """ df = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/solar.csv") """ ===================================================================== Helper functions """ def make_btn_with_modal(id, title, content): """ This makes a button that opens a modal for content note: The modal callback is located in the app_galery.py id: unique identifier title: what appears on the button content: To display text, use dcc.Markdown("my text") To display a codebox that looks better with dark themes: html.Div(html.Pre(html.Code(" enter code here" )), className="codebox",) """ return html.Div( [ dbc.Button( title, id={"type": "modal_btn", "index": id}, color="primary", size="sm", outline=True, className="my-2", ), dbc.Modal( dbc.ModalBody(content), id={"type": "modal", "index": id}, scrollable=True, size="lg", ), ] ) default_table = dash_table.DataTable( columns=[{"name": i, "id": i, "deletable": True} for i in df.columns], data=df.to_dict("records"), page_size=4, editable=True, cell_selectable=True, filter_action="native", sort_action="native", style_table={"overflowX": "auto"}, ) light_table = html.Div( dash_table.DataTable( columns=[{"name": i, "id": i, "deletable": True} for i in df.columns], data=df.to_dict("records"), editable=True, page_size=4, filter_action="native", sort_action="native", style_table={"overflowX": "auto"}, style_data_conditional=[ { "if": {"state": "active"}, "border": "1px solid var(--primary)", "opacity": 0.75, }, {"if": {"state": "selected"}, "border": "1px solid", "opacity": 0.75,}, ], ), className="dbc_light", ) dark_table = html.Div( dash_table.DataTable( columns=[{"name": i, "id": i, "deletable": True} for i in df.columns], data=df.to_dict("records"), editable=True, page_size=4, filter_action="native", sort_action="native", style_table={"overflowX": "auto"}, style_data_conditional=[ { "if": {"state": "active"}, "border": "1px solid var(--primary)", "opacity": 0.75, }, {"if": {"state": "selected"}, "border": "1px solid", "opacity": 0.75,}, ], tooltip_conditional=[ { "if": {"row_index": "odd"}, "type": "markdown", "value": "odd rows have a sample tooltip", } ], ), className="dbc_dark", ) """ ===================================================================== content """ default_table_card = dbc.Card( [ dbc.CardHeader(html.H5("Dash DataTable - default style")), dbc.CardBody([dcc.Markdown(text.datatable_default_text), default_table]), ], className="m-4", ) light_theme_card = dbc.Card( [ dbc.CardHeader(html.H5("Dash DataTable - styled for light themes")), dbc.CardBody( [ dcc.Markdown(text.datatable_light_text), light_table, make_btn_with_modal( "light_theme_code", "see code", dcc.Markdown(text.datatable_light_code), ), ] ), ], id="light_theme_table", className="m-4", ) dark_theme_card = dbc.Card( [ dbc.CardHeader(html.H5("Dash DataTable - styled for dark themes")), dbc.CardBody( [ dcc.Markdown(text.datatable_dark_text), dark_table, make_btn_with_modal( "dark_theme_code", "see code", dcc.Markdown(text.datatable_dark_code), ), ] ), ], id="dark_theme_table", className="m-4", ) hover_light = dbc.Card( [ dbc.CardHeader(html.H5("Dash DataTable - light theme with hover row")), dbc.CardBody([dcc.Markdown(text.datatable_light_hover_text), default_table]), ], className="m-4 dbc_row_hover dbc_light", ) hover_dark = dbc.Card( [ dbc.CardHeader(html.H5("Dash DataTable - dark theme with hover row")), dbc.CardBody([dcc.Markdown(text.datatable_dark_hover_text), default_table]), ], className="m-4 dbc_row_hover dbc_dark", ) layout = ( dbc.Container( [ dbc.Card( [ dcc.Markdown(text.datatable_intro_text), default_table_card, light_theme_card, hover_light, dark_theme_card, hover_dark, html.Div(cheatsheet.how_to_datatable, className="mx-4"), ], className="my-2 p-4", ), ], fluid=True, ), )
""" This module is imported in the component_gallery.py and demonstrates how to style a Dash DataTable to look better with Bootstrap themes. To keep things organized: long descriptions and code examples are in text.py cards like a list of links are created in cheatsheet.py and imported here Cards for a light (dark) theme are displayed only when a light (dark) theme is selected """ import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import dash_table import pandas as pd from apps import text, cheatsheet TABLE_DOCS = "https://dash.plotly.com/datatable/" from app import app """ ===================================================================== Bootstrap style details """ df = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/solar.csv") """ ===================================================================== Helper functions """ def make_btn_with_modal(id, title, content): """ This makes a button that opens a modal for content note: The modal callback is located in the app_galery.py id: unique identifier title: what appears on the button content: To display text, use dcc.Markdown("my text") To display a codebox that looks better with dark themes: html.Div(html.Pre(html.Code(" enter code here" )), className="codebox",) """ return html.Div( [ dbc.Button( title, id={"type": "modal_btn", "index": id}, color="primary", size="sm", outline=True, className="my-2", ), dbc.Modal( dbc.ModalBody(content), id={"type": "modal", "index": id}, scrollable=True, size="lg", ), ] ) default_table = dash_table.DataTable( columns=[{"name": i, "id": i, "deletable": True} for i in df.columns], data=df.to_dict("records"), page_size=4, editable=True, cell_selectable=True, filter_action="native", sort_action="native", style_table={"overflowX": "auto"}, ) light_table = html.Div( dash_table.DataTable( columns=[{"name": i, "id": i, "deletable": True} for i in df.columns], data=df.to_dict("records"), editable=True, page_size=4, filter_action="native", sort_action="native", style_table={"overflowX": "auto"}, style_data_conditional=[ { "if": {"state": "active"}, "border": "1px solid var(--primary)", "opacity": 0.75, }, {"if": {"state": "selected"}, "border": "1px solid", "opacity": 0.75,}, ], ), className="dbc_light", ) dark_table = html.Div( dash_table.DataTable( columns=[{"name": i, "id": i, "deletable": True} for i in df.columns], data=df.to_dict("records"), editable=True, page_size=4, filter_action="native", sort_action="native", style_table={"overflowX": "auto"}, style_data_conditional=[ { "if": {"state": "active"}, "border": "1px solid var(--primary)", "opacity": 0.75, }, {"if": {"state": "selected"}, "border": "1px solid", "opacity": 0.75,}, ], tooltip_conditional=[ { "if": {"row_index": "odd"}, "type": "markdown", "value": "odd rows have a sample tooltip", } ], ), className="dbc_dark", ) """ ===================================================================== content """ default_table_card = dbc.Card( [ dbc.CardHeader(html.H5("Dash DataTable - default style")), dbc.CardBody([dcc.Markdown(text.datatable_default_text), default_table]), ], className="m-4", ) light_theme_card = dbc.Card( [ dbc.CardHeader(html.H5("Dash DataTable - styled for light themes")), dbc.CardBody( [ dcc.Markdown(text.datatable_light_text), light_table, make_btn_with_modal( "light_theme_code", "see code", dcc.Markdown(text.datatable_light_code), ), ] ), ], id="light_theme_table", className="m-4", ) dark_theme_card = dbc.Card( [ dbc.CardHeader(html.H5("Dash DataTable - styled for dark themes")), dbc.CardBody( [ dcc.Markdown(text.datatable_dark_text), dark_table, make_btn_with_modal( "dark_theme_code", "see code", dcc.Markdown(text.datatable_dark_code), ), ] ), ], id="dark_theme_table", className="m-4", ) hover_light = dbc.Card( [ dbc.CardHeader(html.H5("Dash DataTable - light theme with hover row")), dbc.CardBody([dcc.Markdown(text.datatable_light_hover_text), default_table]), ], className="m-4 dbc_row_hover dbc_light", ) hover_dark = dbc.Card( [ dbc.CardHeader(html.H5("Dash DataTable - dark theme with hover row")), dbc.CardBody([dcc.Markdown(text.datatable_dark_hover_text), default_table]), ], className="m-4 dbc_row_hover dbc_dark", ) layout = ( dbc.Container( [ dbc.Card( [ dcc.Markdown(text.datatable_intro_text), default_table_card, light_theme_card, hover_light, dark_theme_card, hover_dark, html.Div(cheatsheet.how_to_datatable, className="mx-4"), ], className="my-2 p-4", ), ], fluid=True, ), )
#!/usr/bin/env python # -*- coding:utf-8 -*- """ Time: 2021-10-20 1:25 下午 Author: huayang Subject: """ from typing import * from itertools import islice import torch import torch.nn as nn from torch import Tensor from huaytools.pytorch.train.trainer import Trainer from huaytools.pytorch.train.callback import Callback from huaytools.pytorch.train.datasets import NerBertDatasets from huaytools.nlp.ner_utils import ner_result_parse from huaytools.nlp.bert_tokenizer import tokenizer class MyTrainer(Trainer): def set_model(self): from huaytools.pytorch.nn import BertCRF self.model = BertCRF(n_classes=3) def training_step(self, model, batch) -> Union[Tensor, Tuple[Any, Tensor]]: token_ids, token_type_ids, masks, label_ids = batch probs, loss = model([token_ids, token_type_ids, masks], label_ids, masks) return probs, loss def set_data_loader(self, batch_size, device): # TODO: 解耦 args 和 NerBertDatasets args = self.args data = NerBertDatasets(args) args.id2label_map = data.id2label_map self.logger.info(data.id2label_map) self.train_data_loader = data.train_set self.val_data_loader = data.val_set class ExampleCallback(Callback): """""" def on_after_optimize_step(self): T = self.trainer if not T.global_step % 3 == 0: return model = T.model batch = T.current_batch logger = T.logger token_ids, token_type_ids, masks, label_ids = batch prob, _ = model([token_ids, token_type_ids, masks], label_ids, masks) token_ids, mask = batch[0], batch[2] tags = model.decode(prob, mask) tags = tags.squeeze(0).cpu().numpy().tolist() for tokens_id, m, ids in islice(zip(token_ids, mask, tags), 5): tokens_id = tokens_id[m.to(bool)].cpu().numpy().tolist() # 移除 [PAD] ids = ids[: len(tokens_id)] tokens_id = tokens_id[1: -1] # 移除 [CLS]、[SEP] ids = ids[1: -1] chunks = ner_result_parse(tokens_id, ids, token_id2name=tokenizer.id2token_map, label_id2name=T.args.id2label_map) tokens = tokenizer.convert_ids_to_tokens(tokens_id) # print(''.join(tokens), chunks) logger.info(f'\tseq={''.join(tokens)}, ret={chunks}') def _test(): """""" # doctest.testmod() # args = TrainConfig(src_train=r'data_files/ner_demo_100.txt', n_classes=3, # batch_size=8, val_percent=0.2, max_len=24, evaluate_per_step=3) trainer = MyTrainer() trainer.add_callback(ExampleCallback()) trainer.train() if __name__ == '__main__': """""" _test()
#!/usr/bin/env python # -*- coding:utf-8 -*- """ Time: 2021-10-20 1:25 下午 Author: huayang Subject: """ from typing import * from itertools import islice import torch import torch.nn as nn from torch import Tensor from huaytools.pytorch.train.trainer import Trainer from huaytools.pytorch.train.callback import Callback from huaytools.pytorch.train.datasets import NerBertDatasets from huaytools.nlp.ner_utils import ner_result_parse from huaytools.nlp.bert_tokenizer import tokenizer class MyTrainer(Trainer): def set_model(self): from huaytools.pytorch.nn import BertCRF self.model = BertCRF(n_classes=3) def training_step(self, model, batch) -> Union[Tensor, Tuple[Any, Tensor]]: token_ids, token_type_ids, masks, label_ids = batch probs, loss = model([token_ids, token_type_ids, masks], label_ids, masks) return probs, loss def set_data_loader(self, batch_size, device): # TODO: 解耦 args 和 NerBertDatasets args = self.args data = NerBertDatasets(args) args.id2label_map = data.id2label_map self.logger.info(data.id2label_map) self.train_data_loader = data.train_set self.val_data_loader = data.val_set class ExampleCallback(Callback): """""" def on_after_optimize_step(self): T = self.trainer if not T.global_step % 3 == 0: return model = T.model batch = T.current_batch logger = T.logger token_ids, token_type_ids, masks, label_ids = batch prob, _ = model([token_ids, token_type_ids, masks], label_ids, masks) token_ids, mask = batch[0], batch[2] tags = model.decode(prob, mask) tags = tags.squeeze(0).cpu().numpy().tolist() for tokens_id, m, ids in islice(zip(token_ids, mask, tags), 5): tokens_id = tokens_id[m.to(bool)].cpu().numpy().tolist() # 移除 [PAD] ids = ids[: len(tokens_id)] tokens_id = tokens_id[1: -1] # 移除 [CLS]、[SEP] ids = ids[1: -1] chunks = ner_result_parse(tokens_id, ids, token_id2name=tokenizer.id2token_map, label_id2name=T.args.id2label_map) tokens = tokenizer.convert_ids_to_tokens(tokens_id) # print(''.join(tokens), chunks) logger.info(f'\tseq={"".join(tokens)}, ret={chunks}') def _test(): """""" # doctest.testmod() # args = TrainConfig(src_train=r'data_files/ner_demo_100.txt', n_classes=3, # batch_size=8, val_percent=0.2, max_len=24, evaluate_per_step=3) trainer = MyTrainer() trainer.add_callback(ExampleCallback()) trainer.train() if __name__ == '__main__': """""" _test()
import os import sys from threading import Thread from time import sleep, time from tkinter import Button, Entry, Frame, IntVar, Label, PhotoImage, StringVar, Tk from typing import Tuple from keyboard import add_hotkey, on_press_key, press_and_release, read_hotkey, remove_hotkey, unhook from mouse import click def resource_path(relative_path): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path) class App: window: Tk def __init__(self, title: str, size: Tuple[int, int]) -> None: self.window = Tk() self.window.title(title) self.window.resizable(False, False) self.window.iconphoto(True, PhotoImage(file = resource_path('images/icon.png'))) App.centerilaze(self.window, *size) self.main_frame = Frame(self.window) self.main_frame.pack() self.__row = 0 self.__column = 0 self.__key = "" self.__keypress_speed = 1 self.__mouseclick_speed = 1 self.__mouseclick_flag = False self.__keypress_flag = False self.__run_flag = True self.__mhotkey = False self.__khotkey = False self.ltitle_label = Label(self.main_frame, text="Mouse", font="Helvatica 10 bold") self.mouse_speed_entry = Entry(self.main_frame, width=5, justify="center", textvariable=IntVar(self.main_frame, 10)) self.mstart_button = Button(self.main_frame, text="Başlat", fg="green", command=self.on_mouse_button_clicked) self.mstop_button = Button(self.main_frame, text="Durdur", state="disabled", foreground="red", command=self.on_mouse_button_clicked) self.mhotkey_button = Button(self.main_frame, text="Kısayol", foreground="blue", command=self.on_mouse_hotkey_button_clicked) self.rtitle_label = Label(self.main_frame, text="Keyboard", font="Helvatica 10 bold") self.key_entry = Entry(self.main_frame, width=5, justify="center", textvariable=StringVar(self.main_frame, "a")) self.speed_entry = Entry(self.main_frame, width=5, justify="center", textvariable=IntVar(self.main_frame, 10)) self.start_button = Button(self.main_frame, text="Başlat", fg="green", command=self.on_key_button_clicked) self.stop_button = Button(self.main_frame, text="Durdur", state="disabled", command=self.on_key_button_clicked) self.hotkey_button = Button(self.main_frame, text="Kısayol", foreground="blue", command=self.on_key_hotkey_button_clicked) self.minfo_label = Label(self.main_frame, text="Henüz çalışmıyor", font='Helvatica 9 italic', pady=3) self.info_label = Label(self.main_frame, text="Henüz çalışmıyor", font='Helvatica 9 italic', pady=3) self.debug_label = Label(self.main_frame, text="", font="Courier 8 italic", pady=3) self.add(self.ltitle_label, pady=(10, 0)) self.add(self.mouse_speed_entry, padx=(10, 0), pady=(10, 0)) self.add(self.mstart_button, padx=(10, 0), pady=(10, 0)) self.add(self.mstop_button, padx=(10, 0), pady=(10, 0)) self.add(self.mhotkey_button, padx=(10, 0), pady=(10, 0)) self.add_row() self.add(self.rtitle_label, pady=(10, 0)) self.add(self.key_entry, padx=(10, 0), pady=(10, 0)) self.add(self.speed_entry, padx=(10, 0), pady=(10, 0)) self.add(self.start_button, padx=(10, 0), pady=(10, 0)) self.add(self.stop_button, padx=(10, 0), pady=(10, 0)) self.add(self.hotkey_button, padx=(10, 0), pady=(10, 0)) self.add_row() self.add(self.minfo_label, columnspan=6, padx=(10, 0), pady=(10,0)) self.add_row() self.add(self.info_label, columnspan=6, padx=(10, 0)) self.add_row() self.add(self.debug_label, columnspan=6, padx=(10, 0)) @staticmethod def centerilaze(root: Tk, width: int, height: int): # Gets both half the screen width/height and window width/height positionRight = int(root.winfo_screenwidth() / 2 - width / 2) positionDown = int(root.winfo_screenheight() / 2.5 - height / 2) # Positions the window in the center of the page. root.geometry(f"{width}x{height}+{positionRight}+{positionDown}") root.geometry("+{}+{}".format(positionRight, positionDown)) def start(self): Thread(target=self.key_press_functionality).start() Thread(target=self.mouse_click_functionality).start() app.window.mainloop() self.__run_flag = False def add(self, component, **kwargs): component.grid(column=self.__column, row=self.__row, **kwargs) self.__column += 1 def add_row(self): self.__row += 1 self.__column = 0 def on_key_button_clicked(self): if not self.__keypress_flag: self.__key = self.key_entry.get() self.__keypress_speed = int(self.speed_entry.get()) self.__keypress_flag = True self.speed_entry.configure(state="disabled") self.key_entry.configure(state="disabled") self.start_button.configure(state="disabled") self.hotkey_button.configure(state="disabled") self.stop_button.configure(state="normal") self.info_label.configure(foreground="green") self.info_label["text"] = f"{self.__key} tuşuna saniyede {self.__keypress_speed} kere basılacak" else: self.__keypress_flag = False self.speed_entry.configure(state="normal") self.key_entry.configure(state="normal") self.start_button.configure(state="normal") self.hotkey_button.configure(state="normal") self.stop_button.configure(state="disabled") self.info_label.configure(foreground="darkorange2") self.info_label["text"] = f"Tuş basımı iptal edildi" def on_mouse_button_clicked(self): if not self.__mouseclick_flag: self.__mouseclick_speed = int(self.mouse_speed_entry.get()) self.__mouseclick_flag = True self.mouse_speed_entry.configure(state="disabled") self.mstart_button.configure(state="disabled") self.mhotkey_button.configure(state="disabled") self.mstop_button.configure(state="normal") self.minfo_label.configure(foreground="green") self.minfo_label["text"] = f"Sol tuşa saniyede {self.__mouseclick_speed} kere basılacak" else: self.__mouseclick_flag = False self.mouse_speed_entry.configure(state="normal") self.mstart_button.configure(state="normal") self.mhotkey_button.configure(state="normal") self.mstop_button.configure(state="disabled") self.minfo_label.configure(foreground="darkorange2") self.minfo_label["text"] = "Mouse basımı iptal edildi" def set_hotkey_bg(self, hotkey_for: str): def set_mouse_hotkey(): self.minfo_label["text"] = "Herhangi bir kısayola basın, iptal için ESC" hotkey = read_hotkey() if hotkey == "esc": self.minfo_label.configure(foreground="darkorange2") self.minfo_label["text"] = "Kısayol kaldırıldı" if self.__mhotkey: remove_hotkey(self.__mhotkey) else: self.__mhotkey = add_hotkey(hotkey, self.on_mouse_button_clicked, suppress=True) self.minfo_label.configure(foreground="blue") self.minfo_label["text"] = f"{hotkey} kısayolu atandı" def set_key_hotkey(): self.info_label["text"] = "Herhangi bir kısayola basın, iptal için ESC" hotkey = read_hotkey() if hotkey == "esc": self.info_label.configure(foreground="darkorange2") self.info_label["text"] = "Kısayol kaldırıldı" if self.__khotkey: remove_hotkey(self.__mhotkey) else: self.__keypress_hotkey = hotkey self.__khotkey = add_hotkey(hotkey, self.on_key_button_clicked, suppress=True) self.info_label.configure(foreground="blue") self.info_label["text"] = f"{hotkey} kısayolu atandı" Thread(target=set_mouse_hotkey if hotkey_for == "mouse" else set_key_hotkey).start() def on_mouse_hotkey_button_clicked(self): self.set_hotkey_bg("mouse") def on_key_hotkey_button_clicked(self): self.set_hotkey_bg("key") def key_press_functionality(self): next_time = 0 last_time = time() while self.__run_flag: current_time = time() if self.__keypress_flag and self.__key and current_time > next_time: try: press_and_release(self.__key) passed_time = current_time - last_time last_time = current_time self.debug_label["text"] = f"{self.__key:5} tuşuna {passed_time:.3f} içinde tıklandı" next_time = current_time + 1 / self.__keypress_speed except ValueError: self.info_label.configure(foreground="red") self.info_label["text"] = f"`{self.__key}` tuşu geçersiz" sleep(min([next_time - current_time, 0.3]) if next_time > current_time else 0.3) def mouse_click_functionality(self): next_time = 0 last_time = time() while self.__run_flag: current_time = time() if self.__mouseclick_flag and current_time > next_time: click("left") passed_time = current_time - last_time last_time = current_time next_time = current_time + 1 / self.__mouseclick_speed self.debug_label["text"] = f"{"Sol":5} tuşuna {passed_time:.3f} içinde tıklandı" sleep(min([next_time - current_time, 0.3]) if next_time > current_time else 0.3) app = App("YClicker", (440, 180)) app.start()
import os import sys from threading import Thread from time import sleep, time from tkinter import Button, Entry, Frame, IntVar, Label, PhotoImage, StringVar, Tk from typing import Tuple from keyboard import add_hotkey, on_press_key, press_and_release, read_hotkey, remove_hotkey, unhook from mouse import click def resource_path(relative_path): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path) class App: window: Tk def __init__(self, title: str, size: Tuple[int, int]) -> None: self.window = Tk() self.window.title(title) self.window.resizable(False, False) self.window.iconphoto(True, PhotoImage(file = resource_path('images/icon.png'))) App.centerilaze(self.window, *size) self.main_frame = Frame(self.window) self.main_frame.pack() self.__row = 0 self.__column = 0 self.__key = "" self.__keypress_speed = 1 self.__mouseclick_speed = 1 self.__mouseclick_flag = False self.__keypress_flag = False self.__run_flag = True self.__mhotkey = False self.__khotkey = False self.ltitle_label = Label(self.main_frame, text="Mouse", font="Helvatica 10 bold") self.mouse_speed_entry = Entry(self.main_frame, width=5, justify="center", textvariable=IntVar(self.main_frame, 10)) self.mstart_button = Button(self.main_frame, text="Başlat", fg="green", command=self.on_mouse_button_clicked) self.mstop_button = Button(self.main_frame, text="Durdur", state="disabled", foreground="red", command=self.on_mouse_button_clicked) self.mhotkey_button = Button(self.main_frame, text="Kısayol", foreground="blue", command=self.on_mouse_hotkey_button_clicked) self.rtitle_label = Label(self.main_frame, text="Keyboard", font="Helvatica 10 bold") self.key_entry = Entry(self.main_frame, width=5, justify="center", textvariable=StringVar(self.main_frame, "a")) self.speed_entry = Entry(self.main_frame, width=5, justify="center", textvariable=IntVar(self.main_frame, 10)) self.start_button = Button(self.main_frame, text="Başlat", fg="green", command=self.on_key_button_clicked) self.stop_button = Button(self.main_frame, text="Durdur", state="disabled", command=self.on_key_button_clicked) self.hotkey_button = Button(self.main_frame, text="Kısayol", foreground="blue", command=self.on_key_hotkey_button_clicked) self.minfo_label = Label(self.main_frame, text="Henüz çalışmıyor", font='Helvatica 9 italic', pady=3) self.info_label = Label(self.main_frame, text="Henüz çalışmıyor", font='Helvatica 9 italic', pady=3) self.debug_label = Label(self.main_frame, text="", font="Courier 8 italic", pady=3) self.add(self.ltitle_label, pady=(10, 0)) self.add(self.mouse_speed_entry, padx=(10, 0), pady=(10, 0)) self.add(self.mstart_button, padx=(10, 0), pady=(10, 0)) self.add(self.mstop_button, padx=(10, 0), pady=(10, 0)) self.add(self.mhotkey_button, padx=(10, 0), pady=(10, 0)) self.add_row() self.add(self.rtitle_label, pady=(10, 0)) self.add(self.key_entry, padx=(10, 0), pady=(10, 0)) self.add(self.speed_entry, padx=(10, 0), pady=(10, 0)) self.add(self.start_button, padx=(10, 0), pady=(10, 0)) self.add(self.stop_button, padx=(10, 0), pady=(10, 0)) self.add(self.hotkey_button, padx=(10, 0), pady=(10, 0)) self.add_row() self.add(self.minfo_label, columnspan=6, padx=(10, 0), pady=(10,0)) self.add_row() self.add(self.info_label, columnspan=6, padx=(10, 0)) self.add_row() self.add(self.debug_label, columnspan=6, padx=(10, 0)) @staticmethod def centerilaze(root: Tk, width: int, height: int): # Gets both half the screen width/height and window width/height positionRight = int(root.winfo_screenwidth() / 2 - width / 2) positionDown = int(root.winfo_screenheight() / 2.5 - height / 2) # Positions the window in the center of the page. root.geometry(f"{width}x{height}+{positionRight}+{positionDown}") root.geometry("+{}+{}".format(positionRight, positionDown)) def start(self): Thread(target=self.key_press_functionality).start() Thread(target=self.mouse_click_functionality).start() app.window.mainloop() self.__run_flag = False def add(self, component, **kwargs): component.grid(column=self.__column, row=self.__row, **kwargs) self.__column += 1 def add_row(self): self.__row += 1 self.__column = 0 def on_key_button_clicked(self): if not self.__keypress_flag: self.__key = self.key_entry.get() self.__keypress_speed = int(self.speed_entry.get()) self.__keypress_flag = True self.speed_entry.configure(state="disabled") self.key_entry.configure(state="disabled") self.start_button.configure(state="disabled") self.hotkey_button.configure(state="disabled") self.stop_button.configure(state="normal") self.info_label.configure(foreground="green") self.info_label["text"] = f"{self.__key} tuşuna saniyede {self.__keypress_speed} kere basılacak" else: self.__keypress_flag = False self.speed_entry.configure(state="normal") self.key_entry.configure(state="normal") self.start_button.configure(state="normal") self.hotkey_button.configure(state="normal") self.stop_button.configure(state="disabled") self.info_label.configure(foreground="darkorange2") self.info_label["text"] = f"Tuş basımı iptal edildi" def on_mouse_button_clicked(self): if not self.__mouseclick_flag: self.__mouseclick_speed = int(self.mouse_speed_entry.get()) self.__mouseclick_flag = True self.mouse_speed_entry.configure(state="disabled") self.mstart_button.configure(state="disabled") self.mhotkey_button.configure(state="disabled") self.mstop_button.configure(state="normal") self.minfo_label.configure(foreground="green") self.minfo_label["text"] = f"Sol tuşa saniyede {self.__mouseclick_speed} kere basılacak" else: self.__mouseclick_flag = False self.mouse_speed_entry.configure(state="normal") self.mstart_button.configure(state="normal") self.mhotkey_button.configure(state="normal") self.mstop_button.configure(state="disabled") self.minfo_label.configure(foreground="darkorange2") self.minfo_label["text"] = "Mouse basımı iptal edildi" def set_hotkey_bg(self, hotkey_for: str): def set_mouse_hotkey(): self.minfo_label["text"] = "Herhangi bir kısayola basın, iptal için ESC" hotkey = read_hotkey() if hotkey == "esc": self.minfo_label.configure(foreground="darkorange2") self.minfo_label["text"] = "Kısayol kaldırıldı" if self.__mhotkey: remove_hotkey(self.__mhotkey) else: self.__mhotkey = add_hotkey(hotkey, self.on_mouse_button_clicked, suppress=True) self.minfo_label.configure(foreground="blue") self.minfo_label["text"] = f"{hotkey} kısayolu atandı" def set_key_hotkey(): self.info_label["text"] = "Herhangi bir kısayola basın, iptal için ESC" hotkey = read_hotkey() if hotkey == "esc": self.info_label.configure(foreground="darkorange2") self.info_label["text"] = "Kısayol kaldırıldı" if self.__khotkey: remove_hotkey(self.__mhotkey) else: self.__keypress_hotkey = hotkey self.__khotkey = add_hotkey(hotkey, self.on_key_button_clicked, suppress=True) self.info_label.configure(foreground="blue") self.info_label["text"] = f"{hotkey} kısayolu atandı" Thread(target=set_mouse_hotkey if hotkey_for == "mouse" else set_key_hotkey).start() def on_mouse_hotkey_button_clicked(self): self.set_hotkey_bg("mouse") def on_key_hotkey_button_clicked(self): self.set_hotkey_bg("key") def key_press_functionality(self): next_time = 0 last_time = time() while self.__run_flag: current_time = time() if self.__keypress_flag and self.__key and current_time > next_time: try: press_and_release(self.__key) passed_time = current_time - last_time last_time = current_time self.debug_label["text"] = f"{self.__key:5} tuşuna {passed_time:.3f} içinde tıklandı" next_time = current_time + 1 / self.__keypress_speed except ValueError: self.info_label.configure(foreground="red") self.info_label["text"] = f"`{self.__key}` tuşu geçersiz" sleep(min([next_time - current_time, 0.3]) if next_time > current_time else 0.3) def mouse_click_functionality(self): next_time = 0 last_time = time() while self.__run_flag: current_time = time() if self.__mouseclick_flag and current_time > next_time: click("left") passed_time = current_time - last_time last_time = current_time next_time = current_time + 1 / self.__mouseclick_speed self.debug_label["text"] = f"{'Sol':5} tuşuna {passed_time:.3f} içinde tıklandı" sleep(min([next_time - current_time, 0.3]) if next_time > current_time else 0.3) app = App("YClicker", (440, 180)) app.start()
import os import string import util import datetime import newdb import datafiles #import config database = newdb.get_db() __CAPITALIZATION_UPDATE_THRESHOLD = 0.03 # 5% def __capEquals(previous, new): if previous == 0: if new == 0: return True else: return False else: return abs(new / previous) <= 1 + __CAPITALIZATION_UPDATE_THRESHOLD and abs(new / previous) >= 1 - __CAPITALIZATION_UPDATE_THRESHOLD def __barraDateToCompact(barraDate): return datetime.datetime.strptime(barraDate, "%d%b%Y") #comma separated, string attributes enclosed in double quotes. floats always contain . , otherwise integers def __getListFromBarraLine(line): tokens = line.strip().split(",") data = [] try: for token in tokens: if token[0] == '"' or token[-1] == '"': #string. it should be an and there, changed it to protect from compustat using commas within quotes data.append(token.strip('"').strip()) elif token.find(".") < 0: #integer data.append(int(token)) else: #double data.append(float(token)) return data except ValueError, e: util.error("Error processing line: {}".format(line)) util.error(str(e)) return [] def __removeUnwantedAttributes(data): if "BARRID" in data: del data["BARRID"] #del data["TICKER"] #del data["CUSIP"] #del data["NAME"] if "INTRA_MONTH_ADDITION" in data: del data["INTRA_MONTH_ADDITION"] def insertBarraAttribute(datatype, barraid, date, source, attributeName, attributeValue, born, backfill=0, compareWithRecent=False, valueEquals=(lambda x, y: x == y)): assert date.__class__ is long and born.__class__ is long assert len(barraid) == 7 assert datatype in ("n", "s") table = database.BARRA + datatype attrType = database.getAttributeType(attributeName, source, datatype, table) if datatype == 'n': value = float(attributeValue) elif datatype == 's': value = str(attributeValue)[0:database.MAX_STR_LEN] if not compareWithRecent: updates = database.insertTimelineRow(table, {"barraid":barraid, "type":attrType, "date":date}, {"value":value, "backfill":backfill}, born) database.updateAttributeStats(attrType, *updates) else: sqlWhere = "barraid=%(barraid)s AND type=%(type)s AND date<=%(date)s" if born is None: sqlWhere = sqlWhere + " AND died IS NULL" else: sqlWhere = sqlWhere + " AND born<=%(born)s AND (died IS NULL OR died>%(born)s)" params = {"barraid": barraid, "type": attrType, "date":date, "born":born} row = database.execute("SELECT value FROM {} WHERE {} ORDER BY date DESC,born DESC LIMIT 1".format(table, sqlWhere), params).fetchone() if row is None or not valueEquals(row["value"], value): updates = database.insertTimelineRow(table, {"barraid":barraid, "type":attrType, "date":date}, {"value":value, "backfill":backfill}, born) database.updateAttributeStats(attrType, *updates) #extra processing for TICKER,CUSIP if attributeName == "TICKER": database.killOrDeleteTimelineRow(database.BARRA + "xref", {"xref_type":2, "value":attributeValue}, date) database.insertTimelineRow(database.BARRA + "xref", {"barraid":barraid, "xref_type":2}, {"value":attributeValue}, date) elif attributeName == "CUSIP": database.killOrDeleteTimelineRow(database.BARRA + "xref", {"xref_type":1, "value":util.cusip8to9(attributeValue)}, date) database.insertTimelineRow(database.BARRA + "xref", {"barraid":barraid, "xref_type":1}, {"value":util.cusip8to9(attributeValue)}, date) def updateBarraRef(source, barraid, cusip, timestamp, historical): #get existing barraref refTable = database.BARRA + "ref" refTable = refTable + "_hist" if historical else refTable code = database.getAttributeType("BARRAID", source, "s", refTable) row = database.getTimelineRow(refTable, {"barraid":barraid}, timestamp) barraSecid = None if row is None else row["secid"] #get the implied mapping based on cusip cusipSecid = database.getSecidFromXref("CUSIP", cusip, timestamp, "compustat_idhist", newdb.xrefsolve.preferUS) if barraSecid is None and cusipSecid is None: return None elif barraSecid is None and cusipSecid is not None: #database.insertTimelineRow(refTable, {"secid":cusipSecid}, {"barraid":barraid}, timestamp) updates = database.killOrDeleteTimelineRow(refTable, {"secid":cusipSecid}, timestamp) database.updateAttributeStats(code, *updates) updates = database.insertTimelineRow(refTable, {"barraid":barraid}, {"secid":cusipSecid}, timestamp) database.updateAttributeStats(code, *updates) return cusipSecid elif barraSecid is not None and cusipSecid is not None and barraSecid == cusipSecid: return barraSecid elif barraSecid is not None and cusipSecid is not None and barraSecid != cusipSecid: updates = database.killOrDeleteTimelineRow(refTable, {"secid":cusipSecid}, timestamp) database.updateAttributeStats(code, *updates) updates = database.insertTimelineRow(refTable, {"barraid":barraid}, {"secid":cusipSecid}, timestamp) database.updateAttributeStats(code, *updates) return cusipSecid else: #barraSecid is not None and cusipSecid is None updates = database.killOrDeleteTimelineRow(refTable, {"barraid":barraid}, timestamp) #only one should be needed database.updateAttributeStats(code, *updates) return None #remove non printable characters that can have creeped in name def __printableString(name): #check first if it is printable printable = reduce(lambda x, y: x and (y in string.printable), name, True) if printable: return name else: newName = [c for c in name if c in string.printable] newName = ''.join(newName).strip() return newName def verifyMappings(filePath, source): return process(filePath, source, True) def process(filePath, source, verifyOnly=False): #process the RSK files for now if filePath.find(".RSK.") < 0: return file = open(filePath, "r") #The first 2 lines should be the pricedate and the modeldate for daily files #For the monthly files it is just the model date #check if it is a daily file or a monthly file. Check if the first line contains PriceDate firstLine = file.readline() if "PriceDate" in firstLine: daily = True file.seek(0) #get to the first line again tokens = file.readline().strip().split(":") if tokens[0] != "PriceDate": util.error("It doesn't seem like a barra daily format") raise Exception else: priceDate = __barraDateToCompact(tokens[1].strip()) tokens = file.readline().strip().split(":") if tokens[0] != "ModelDate": util.error("It doesn't seem like a barra daily format") raise Exception else: modelDate = __barraDateToCompact(tokens[1].strip()) else: daily = False file.seek(0) #get to the first line again token = file.readline().strip() priceDate = __barraDateToCompact(token) modelDate = __barraDateToCompact(token) # If we have acquisition times, use these for real born time. # Else, use the priceDate + 1 day fileInfo = datafiles.read_info_file(filePath) if fileInfo['date_last_absent'] is not None: timestamp = util.convert_date_to_millis(fileInfo['date_first_present']) backfill = 0; else: if daily: date = priceDate + datetime.timedelta(days=1) else: date = priceDate + datetime.timedelta(days=2) timestamp = util.convert_date_to_millis(date.strftime("%Y%m%d")) backfill = 1 database.setAttributeAutoCreate(True) priceDate = util.convert_date_to_millis(priceDate) modelDate = util.convert_date_to_millis(modelDate) #get the header names. comma separated, surrounded by double quotes line = file.readline() headers = __getListFromBarraLine(line) for line in file: data = __getListFromBarraLine(line) if len(data) != len(headers): util.warning("Skipping bad line: {}".format(line)) continue data = dict(zip(headers, data)) barraid = data["BARRID"] cusip = util.cusip8to9(data["CUSIP"]) #updateBarraRef(barraid, cusip, timestamp, False) updateBarraRef(source, barraid, cusip, priceDate, True) #Now, insert barra attributes and attribute values __removeUnwantedAttributes(data) for attributeName, attributeValue in data.iteritems(): if isinstance(attributeValue, str): table = "s" elif isinstance(attributeValue, int): table = "n" elif isinstance(attributeValue, float): table = "n" else: util.error("Dude, attribute values should be either int,float or str") raise #With the exeption of capitalization and price, the other barra attributes #are attributes that are evaluated monthly. for them, the date should be the #model date. price we ignore, while capitatlization, we only create a new tuple #if the capitalization has changed more than a threshould since the last date #for which we have a tuple if attributeName == "PRICE": continue elif attributeName == "CAPITALIZATION": insertBarraAttribute("n", barraid, priceDate, source, attributeName, attributeValue, timestamp, backfill, True, __capEquals) elif attributeName in ("TICKER", "CUSIP", "NAME"): #protect against crappy names: if attributeName == "NAME": attributeValue = __printableString(attributeValue) insertBarraAttribute("s", barraid, priceDate, source, attributeName, attributeValue, timestamp, backfill, True) else: insertBarraAttribute(table, barraid, modelDate, source, attributeName, attributeValue, timestamp, backfill) file.close() def regenerateMappings(): #get the cusips rows = database.execute("SELECT * FROM {} WHERE type={} ORDER BY born,barraid".format(database.BARRA + "s", database.getAttributeType("CUSIP", "barra", None, None))).fetchall() for row in rows: #kill whoever owned the cusip database.killOrDeleteTimelineRow("barra_xref", {"xref_type":1, "value":util.cusip8to9(row["value"])}, row["date"]) database.insertTimelineRow("barra_xref", {"barraid":row["barraid"], "xref_type":1}, {"value":util.cusip8to9(row["value"])}, row["date"]) #get the tickers rows = database.execute("SELECT * FROM {} WHERE type={} ORDER BY born,barraid".format(database.BARRA + "s", database.getAttributeType("TICKER", "barra", None, None))).fetchall() for row in rows: #kill whoever owned the cusip database.killOrDeleteTimelineRow("barra_xref", {"xref_type":2, "value":row["value"]}, row["date"]) database.insertTimelineRow("barra_xref", {"barraid":row["barraid"], "xref_type":2}, {"value":row["value"]}, row["date"]) if __name__ == "__main__": #ammend barra data and add the INDNAME values newdb.init_db(os.environ["DB_CONFIG_FILE"]) database = newdb.get_db() #collect all the files processed so far processedFiles = database.getProcessedFilesTimeOrdered("barra") #database.start_transaction() try: database.start_transaction() regenerateMappings(); database.commit() # i = 0 # for file in processedFiles: # if file=="20100401/USE3S1003.RSK.439dbb03": # continue # path="/".join((os.environ["DATA_DIR"], "barra","use3s_init_load", file)) # print datetime.datetime.now(), file # if not os.path.exists(path): # print "Not found, looking in other directory" # path="/".join((os.environ["DATA_DIR"], "barra","use3s_daily", file)) # if not os.path.exists(path): # print "Not found, looking in other directory" # path="/".join((os.environ["DATA_DIR"], "barra","use3s_monthly", file)) # if not os.path.exists(path): # print "Not found" # continue # database.start_transaction() # process(path, "barra") # database.commit() except Exception, e: print e database.rollback() # else: # database.commit()
import os import string import util import datetime import newdb import datafiles #import config database = newdb.get_db() __CAPITALIZATION_UPDATE_THRESHOLD = 0.03 # 5% def __capEquals(previous, new): if previous == 0: if new == 0: return True else: return False else: return abs(new / previous) <= 1 + __CAPITALIZATION_UPDATE_THRESHOLD and abs(new / previous) >= 1 - __CAPITALIZATION_UPDATE_THRESHOLD def __barraDateToCompact(barraDate): return datetime.datetime.strptime(barraDate, "%d%b%Y") #comma separated, string attributes enclosed in double quotes. floats always contain . , otherwise integers def __getListFromBarraLine(line): tokens = line.strip().split(",") data = [] try: for token in tokens: if token[0] == '"' or token[-1] == '"': #string. it should be an and there, changed it to protect from compustat using commas within quotes data.append(token.strip('"').strip()) elif token.find(".") < 0: #integer data.append(int(token)) else: #double data.append(float(token)) return data except ValueError, e: util.error("Error processing line: {}".format(line)) util.error(str(e)) return [] def __removeUnwantedAttributes(data): if "BARRID" in data: del data["BARRID"] #del data["TICKER"] #del data["CUSIP"] #del data["NAME"] if "INTRA_MONTH_ADDITION" in data: del data["INTRA_MONTH_ADDITION"] def insertBarraAttribute(datatype, barraid, date, source, attributeName, attributeValue, born, backfill=0, compareWithRecent=False, valueEquals=(lambda x, y: x == y)): assert date.__class__ is long and born.__class__ is long assert len(barraid) == 7 assert datatype in ("n", "s") table = database.BARRA + datatype attrType = database.getAttributeType(attributeName, source, datatype, table) if datatype == 'n': value = float(attributeValue) elif datatype == 's': value = str(attributeValue)[0:database.MAX_STR_LEN] if not compareWithRecent: updates = database.insertTimelineRow(table, {"barraid":barraid, "type":attrType, "date":date}, {"value":value, "backfill":backfill}, born) database.updateAttributeStats(attrType, *updates) else: sqlWhere = "barraid=%(barraid)s AND type=%(type)s AND date<=%(date)s" if born is None: sqlWhere = sqlWhere + " AND died IS NULL" else: sqlWhere = sqlWhere + " AND born<=%(born)s AND (died IS NULL OR died>%(born)s)" params = {"barraid": barraid, "type": attrType, "date":date, "born":born} row = database.execute("SELECT value FROM {} WHERE {} ORDER BY date DESC,born DESC LIMIT 1".format(table, sqlWhere), params).fetchone() if row is None or not valueEquals(row["value"], value): updates = database.insertTimelineRow(table, {"barraid":barraid, "type":attrType, "date":date}, {"value":value, "backfill":backfill}, born) database.updateAttributeStats(attrType, *updates) #extra processing for TICKER,CUSIP if attributeName == "TICKER": database.killOrDeleteTimelineRow(database.BARRA + "xref", {"xref_type":2, "value":attributeValue}, date) database.insertTimelineRow(database.BARRA + "xref", {"barraid":barraid, "xref_type":2}, {"value":attributeValue}, date) elif attributeName == "CUSIP": database.killOrDeleteTimelineRow(database.BARRA + "xref", {"xref_type":1, "value":util.cusip8to9(attributeValue)}, date) database.insertTimelineRow(database.BARRA + "xref", {"barraid":barraid, "xref_type":1}, {"value":util.cusip8to9(attributeValue)}, date) def updateBarraRef(source, barraid, cusip, timestamp, historical): #get existing barraref refTable = database.BARRA + "ref" refTable = refTable + "_hist" if historical else refTable code = database.getAttributeType("BARRAID", source, "s", refTable) row = database.getTimelineRow(refTable, {"barraid":barraid}, timestamp) barraSecid = None if row is None else row["secid"] #get the implied mapping based on cusip cusipSecid = database.getSecidFromXref("CUSIP", cusip, timestamp, "compustat_idhist", newdb.xrefsolve.preferUS) if barraSecid is None and cusipSecid is None: return None elif barraSecid is None and cusipSecid is not None: #database.insertTimelineRow(refTable, {"secid":cusipSecid}, {"barraid":barraid}, timestamp) updates = database.killOrDeleteTimelineRow(refTable, {"secid":cusipSecid}, timestamp) database.updateAttributeStats(code, *updates) updates = database.insertTimelineRow(refTable, {"barraid":barraid}, {"secid":cusipSecid}, timestamp) database.updateAttributeStats(code, *updates) return cusipSecid elif barraSecid is not None and cusipSecid is not None and barraSecid == cusipSecid: return barraSecid elif barraSecid is not None and cusipSecid is not None and barraSecid != cusipSecid: updates = database.killOrDeleteTimelineRow(refTable, {"secid":cusipSecid}, timestamp) database.updateAttributeStats(code, *updates) updates = database.insertTimelineRow(refTable, {"barraid":barraid}, {"secid":cusipSecid}, timestamp) database.updateAttributeStats(code, *updates) return cusipSecid else: #barraSecid is not None and cusipSecid is None updates = database.killOrDeleteTimelineRow(refTable, {"barraid":barraid}, timestamp) #only one should be needed database.updateAttributeStats(code, *updates) return None #remove non printable characters that can have creeped in name def __printableString(name): #check first if it is printable printable = reduce(lambda x, y: x and (y in string.printable), name, True) if printable: return name else: newName = [c for c in name if c in string.printable] newName = ''.join(newName).strip() return newName def verifyMappings(filePath, source): return process(filePath, source, True) def process(filePath, source, verifyOnly=False): #process the RSK files for now if filePath.find(".RSK.") < 0: return file = open(filePath, "r") #The first 2 lines should be the pricedate and the modeldate for daily files #For the monthly files it is just the model date #check if it is a daily file or a monthly file. Check if the first line contains PriceDate firstLine = file.readline() if "PriceDate" in firstLine: daily = True file.seek(0) #get to the first line again tokens = file.readline().strip().split(":") if tokens[0] != "PriceDate": util.error("It doesn't seem like a barra daily format") raise Exception else: priceDate = __barraDateToCompact(tokens[1].strip()) tokens = file.readline().strip().split(":") if tokens[0] != "ModelDate": util.error("It doesn't seem like a barra daily format") raise Exception else: modelDate = __barraDateToCompact(tokens[1].strip()) else: daily = False file.seek(0) #get to the first line again token = file.readline().strip() priceDate = __barraDateToCompact(token) modelDate = __barraDateToCompact(token) # If we have acquisition times, use these for real born time. # Else, use the priceDate + 1 day fileInfo = datafiles.read_info_file(filePath) if fileInfo['date_last_absent'] is not None: timestamp = util.convert_date_to_millis(fileInfo['date_first_present']) backfill = 0; else: if daily: date = priceDate + datetime.timedelta(days=1) else: date = priceDate + datetime.timedelta(days=2) timestamp = util.convert_date_to_millis(date.strftime("%Y%m%d")) backfill = 1 database.setAttributeAutoCreate(True) priceDate = util.convert_date_to_millis(priceDate) modelDate = util.convert_date_to_millis(modelDate) #get the header names. comma separated, surrounded by double quotes line = file.readline() headers = __getListFromBarraLine(line) for line in file: data = __getListFromBarraLine(line) if len(data) != len(headers): util.warning("Skipping bad line: {}".format(line)) continue data = dict(zip(headers, data)) barraid = data["BARRID"] cusip = util.cusip8to9(data["CUSIP"]) #updateBarraRef(barraid, cusip, timestamp, False) updateBarraRef(source, barraid, cusip, priceDate, True) #Now, insert barra attributes and attribute values __removeUnwantedAttributes(data) for attributeName, attributeValue in data.iteritems(): if isinstance(attributeValue, str): table = "s" elif isinstance(attributeValue, int): table = "n" elif isinstance(attributeValue, float): table = "n" else: util.error("Dude, attribute values should be either int,float or str") raise #With the exeption of capitalization and price, the other barra attributes #are attributes that are evaluated monthly. for them, the date should be the #model date. price we ignore, while capitatlization, we only create a new tuple #if the capitalization has changed more than a threshould since the last date #for which we have a tuple if attributeName == "PRICE": continue elif attributeName == "CAPITALIZATION": insertBarraAttribute("n", barraid, priceDate, source, attributeName, attributeValue, timestamp, backfill, True, __capEquals) elif attributeName in ("TICKER", "CUSIP", "NAME"): #protect against crappy names: if attributeName == "NAME": attributeValue = __printableString(attributeValue) insertBarraAttribute("s", barraid, priceDate, source, attributeName, attributeValue, timestamp, backfill, True) else: insertBarraAttribute(table, barraid, modelDate, source, attributeName, attributeValue, timestamp, backfill) file.close() def regenerateMappings(): #get the cusips rows = database.execute("SELECT * FROM {} WHERE type={} ORDER BY born,barraid".format(database.BARRA + "s", database.getAttributeType("CUSIP", "barra", None, None))).fetchall() for row in rows: #kill whoever owned the cusip database.killOrDeleteTimelineRow("barra_xref", {"xref_type":1, "value":util.cusip8to9(row["value"])}, row["date"]) database.insertTimelineRow("barra_xref", {"barraid":row["barraid"], "xref_type":1}, {"value":util.cusip8to9(row["value"])}, row["date"]) #get the tickers rows = database.execute("SELECT * FROM {} WHERE type={} ORDER BY born,barraid".format(database.BARRA + "s", database.getAttributeType("TICKER", "barra", None, None))).fetchall() for row in rows: #kill whoever owned the cusip database.killOrDeleteTimelineRow("barra_xref", {"xref_type":2, "value":row["value"]}, row["date"]) database.insertTimelineRow("barra_xref", {"barraid":row["barraid"], "xref_type":2}, {"value":row["value"]}, row["date"]) if __name__ == "__main__": #ammend barra data and add the INDNAME values newdb.init_db(os.environ["DB_CONFIG_FILE"]) database = newdb.get_db() #collect all the files processed so far processedFiles = database.getProcessedFilesTimeOrdered("barra") #database.start_transaction() try: database.start_transaction() regenerateMappings(); database.commit() # i = 0 # for file in processedFiles: # if file=="20100401/USE3S1003.RSK.439dbb03": # continue # path="/".join((os.environ["DATA_DIR"], "barra","use3s_init_load", file)) # print datetime.datetime.now(), file # if not os.path.exists(path): # print "Not found, looking in other directory" # path="/".join((os.environ["DATA_DIR"], "barra","use3s_daily", file)) # if not os.path.exists(path): # print "Not found, looking in other directory" # path="/".join((os.environ["DATA_DIR"], "barra","use3s_monthly", file)) # if not os.path.exists(path): # print "Not found" # continue # database.start_transaction() # process(path, "barra") # database.commit() except Exception, e: print e database.rollback() # else: # database.commit()
import io from datetime import datetime from typing import Union, Any, Callable, Tuple, List, Coroutine, Optional from uuid import UUID import discord from PIL import Image from discord import Embed, User, Member, Permissions __all__ = [ 'create_embed', 'guess_user_nitro_status', 'user_friendly_dt', 'format_perms', 'hierarchy_check', 'shorten_below_number', 'multi_punish', 'punish_embed', 'is_uuid4', 'format_deleted_msg', 'str_to_file', 'fix_url', 'solid_color_image' ] def create_embed(user: Optional[Union[Member, User]], *, image=Embed.Empty, thumbnail=Embed.Empty, **kwargs) -> Embed: """Makes a discord.Embed with options for image and thumbnail URLs, and adds a footer with author name""" kwargs['color'] = kwargs.get('color', discord.Color.green()) embed = discord.Embed(**kwargs) embed.set_image(url=fix_url(image)) embed.set_thumbnail(url=fix_url(thumbnail)) if user: embed.set_footer(text=f'Command sent by {user}', icon_url=fix_url(user.display_avatar)) return embed def guess_user_nitro_status(user: Union[User, Member]) -> bool: """Guess if an user or member has Discord Nitro""" if isinstance(user, Member): has_emote_status = any([a.emoji.is_custom_emoji() for a in user.activities if getattr(a, 'emoji', None)]) return any([user.display_avatar.is_animated(), has_emote_status, user.premium_since]) return any([user.display_avatar.is_animated(), user.banner]) def user_friendly_dt(dt: datetime): """Format a datetime as "short_date (relative_date)" """ return discord.utils.format_dt(dt, style='f') + f' ({discord.utils.format_dt(dt, style='R')})' def format_perms(permissions: Permissions) -> str: perms_list = [p.title().replace('_', ' ') for p, v in iter(permissions) if v] return '\n'.join(perms_list) def hierarchy_check(mod: Member, user: Union[Member, User]) -> bool: """Check if a moderator and the bot can punish an user/member""" if isinstance(user, User): return True return mod.top_role > user.top_role and mod.guild.me.top_role > user.top_role and not user == mod.guild.owner def shorten_below_number(_list: List[Any], *, separator: str = '\n', number: int = 1000): shortened = '' while _list and len(shortened) + len(str(_list[0])) <= number: shortened += str(_list.pop(0)) + separator return shortened[:-len(separator)] USER_LIST = List[Union[Member, User]] async def multi_punish( mod: Member, users: USER_LIST, func: Callable[[Union[Member, User], Any], Coroutine[Any, Any, Any]], **kwargs ) -> Tuple[USER_LIST, USER_LIST]: punished = [] not_punished = [user for user in users if not hierarchy_check(mod, user)] users = [user for user in users if user not in not_punished] for user in users: try: await func(user, **kwargs) punished.append(user) except (discord.Forbidden, discord.HTTPException): not_punished.append(user) return punished, not_punished def punish_embed(mod: Member, punishment: str, reason: str, punish_lists: Tuple[USER_LIST, USER_LIST]) -> Embed: punished, not_punished = punish_lists punished, not_punished = punished.copy(), not_punished.copy() if not punished: return create_embed(mod, title=f'Users couldn\'t be {punishment}!', description=f'The bot wasn\'t able to {punishment} any users! ' 'Maybe their role is higher than yours. or higher than this bot\'s roles.', color=discord.Color.red()) if not_punished: embed = create_embed(mod, title=f'Some users couldn\'t be {punishment}!', description=f'{len(punished)} users were {punishment} for "{reason[:1000]}"\n' f'{len(not_punished)} users couldn\'t be punished, ' f'maybe their role is higher than yours. or higher than this bot\'s roles.', color=discord.Color.orange()) embed.add_field(name=f'Users not {punishment}:', value=shorten_below_number(not_punished)) else: embed = create_embed(mod, title=f'Users successfully {punishment}!', description=f'{len(punished)} users were {punishment} for "{reason[:1000]}"') embed.add_field(name=f'Users {punishment}:', value=shorten_below_number(punished)) return embed def is_uuid4(string: str) -> bool: try: uuid = UUID(string, version=4) except ValueError: return False return uuid.hex == string def str_to_file(string: str, *, filename: str = 'file.txt', encoding: str = 'utf-8') -> discord.File: """Converts a given str to a discord.File ready for sending""" _bytes = bytes(string, encoding) buffer = io.BytesIO(_bytes) file = discord.File(buffer, filename=filename) return file def format_deleted_msg(message: discord.Message, title: Optional[str] = None) -> discord.Embed: emote = '<:messagedelete:887729903317946388>' reply = message.reference if reply: reply = reply.resolved reply_deleted = isinstance(reply, discord.DeletedReferencedMessage) embed = discord.Embed( title=f'{emote} {title}' if title else f'{emote} Message deleted in #{message.channel}', description=f'"{message.content}"' if message.content else '*No content*', color=discord.Color.red() ) embed.set_author(name=f'{message.author}: {message.author.id}', icon_url=fix_url(message.author.display_avatar)) if message.attachments: if message.attachments[0].filename.endswith(('png', 'jpg', 'jpeg', 'gif', 'webp')): embed.set_image(url=fix_url(message.attachments[0].proxy_url)) file_urls = [f'[{file.filename}]({file.proxy_url})' for file in message.attachments] embed.add_field(name='Deleted files:', value=f'\n'.join(file_urls)) embed.add_field( name=f'Message created at:', value=user_friendly_dt(message.created_at), inline=False ) if reply: if reply_deleted: msg = 'Replied message has been deleted.' else: msg = f'Replied to {reply.author} - [Link to replied message]({reply.jump_url} "Jump to Message")' embed.add_field(name='Message reply:', value=msg) embed.add_field(name='Message channel:', value=message.channel.mention, inline=False) return embed def fix_url(url: Any): if not url or url == discord.Embed.Empty: return discord.Embed.Empty return str(url) def solid_color_image(color: tuple): buffer = io.BytesIO() image = Image.new('RGB', (80, 80), color) image.save(buffer, 'png') buffer.seek(0) return buffer
import io from datetime import datetime from typing import Union, Any, Callable, Tuple, List, Coroutine, Optional from uuid import UUID import discord from PIL import Image from discord import Embed, User, Member, Permissions __all__ = [ 'create_embed', 'guess_user_nitro_status', 'user_friendly_dt', 'format_perms', 'hierarchy_check', 'shorten_below_number', 'multi_punish', 'punish_embed', 'is_uuid4', 'format_deleted_msg', 'str_to_file', 'fix_url', 'solid_color_image' ] def create_embed(user: Optional[Union[Member, User]], *, image=Embed.Empty, thumbnail=Embed.Empty, **kwargs) -> Embed: """Makes a discord.Embed with options for image and thumbnail URLs, and adds a footer with author name""" kwargs['color'] = kwargs.get('color', discord.Color.green()) embed = discord.Embed(**kwargs) embed.set_image(url=fix_url(image)) embed.set_thumbnail(url=fix_url(thumbnail)) if user: embed.set_footer(text=f'Command sent by {user}', icon_url=fix_url(user.display_avatar)) return embed def guess_user_nitro_status(user: Union[User, Member]) -> bool: """Guess if an user or member has Discord Nitro""" if isinstance(user, Member): has_emote_status = any([a.emoji.is_custom_emoji() for a in user.activities if getattr(a, 'emoji', None)]) return any([user.display_avatar.is_animated(), has_emote_status, user.premium_since]) return any([user.display_avatar.is_animated(), user.banner]) def user_friendly_dt(dt: datetime): """Format a datetime as "short_date (relative_date)" """ return discord.utils.format_dt(dt, style='f') + f' ({discord.utils.format_dt(dt, style="R")})' def format_perms(permissions: Permissions) -> str: perms_list = [p.title().replace('_', ' ') for p, v in iter(permissions) if v] return '\n'.join(perms_list) def hierarchy_check(mod: Member, user: Union[Member, User]) -> bool: """Check if a moderator and the bot can punish an user/member""" if isinstance(user, User): return True return mod.top_role > user.top_role and mod.guild.me.top_role > user.top_role and not user == mod.guild.owner def shorten_below_number(_list: List[Any], *, separator: str = '\n', number: int = 1000): shortened = '' while _list and len(shortened) + len(str(_list[0])) <= number: shortened += str(_list.pop(0)) + separator return shortened[:-len(separator)] USER_LIST = List[Union[Member, User]] async def multi_punish( mod: Member, users: USER_LIST, func: Callable[[Union[Member, User], Any], Coroutine[Any, Any, Any]], **kwargs ) -> Tuple[USER_LIST, USER_LIST]: punished = [] not_punished = [user for user in users if not hierarchy_check(mod, user)] users = [user for user in users if user not in not_punished] for user in users: try: await func(user, **kwargs) punished.append(user) except (discord.Forbidden, discord.HTTPException): not_punished.append(user) return punished, not_punished def punish_embed(mod: Member, punishment: str, reason: str, punish_lists: Tuple[USER_LIST, USER_LIST]) -> Embed: punished, not_punished = punish_lists punished, not_punished = punished.copy(), not_punished.copy() if not punished: return create_embed(mod, title=f'Users couldn\'t be {punishment}!', description=f'The bot wasn\'t able to {punishment} any users! ' 'Maybe their role is higher than yours. or higher than this bot\'s roles.', color=discord.Color.red()) if not_punished: embed = create_embed(mod, title=f'Some users couldn\'t be {punishment}!', description=f'{len(punished)} users were {punishment} for "{reason[:1000]}"\n' f'{len(not_punished)} users couldn\'t be punished, ' f'maybe their role is higher than yours. or higher than this bot\'s roles.', color=discord.Color.orange()) embed.add_field(name=f'Users not {punishment}:', value=shorten_below_number(not_punished)) else: embed = create_embed(mod, title=f'Users successfully {punishment}!', description=f'{len(punished)} users were {punishment} for "{reason[:1000]}"') embed.add_field(name=f'Users {punishment}:', value=shorten_below_number(punished)) return embed def is_uuid4(string: str) -> bool: try: uuid = UUID(string, version=4) except ValueError: return False return uuid.hex == string def str_to_file(string: str, *, filename: str = 'file.txt', encoding: str = 'utf-8') -> discord.File: """Converts a given str to a discord.File ready for sending""" _bytes = bytes(string, encoding) buffer = io.BytesIO(_bytes) file = discord.File(buffer, filename=filename) return file def format_deleted_msg(message: discord.Message, title: Optional[str] = None) -> discord.Embed: emote = '<:messagedelete:887729903317946388>' reply = message.reference if reply: reply = reply.resolved reply_deleted = isinstance(reply, discord.DeletedReferencedMessage) embed = discord.Embed( title=f'{emote} {title}' if title else f'{emote} Message deleted in #{message.channel}', description=f'"{message.content}"' if message.content else '*No content*', color=discord.Color.red() ) embed.set_author(name=f'{message.author}: {message.author.id}', icon_url=fix_url(message.author.display_avatar)) if message.attachments: if message.attachments[0].filename.endswith(('png', 'jpg', 'jpeg', 'gif', 'webp')): embed.set_image(url=fix_url(message.attachments[0].proxy_url)) file_urls = [f'[{file.filename}]({file.proxy_url})' for file in message.attachments] embed.add_field(name='Deleted files:', value=f'\n'.join(file_urls)) embed.add_field( name=f'Message created at:', value=user_friendly_dt(message.created_at), inline=False ) if reply: if reply_deleted: msg = 'Replied message has been deleted.' else: msg = f'Replied to {reply.author} - [Link to replied message]({reply.jump_url} "Jump to Message")' embed.add_field(name='Message reply:', value=msg) embed.add_field(name='Message channel:', value=message.channel.mention, inline=False) return embed def fix_url(url: Any): if not url or url == discord.Embed.Empty: return discord.Embed.Empty return str(url) def solid_color_image(color: tuple): buffer = io.BytesIO() image = Image.new('RGB', (80, 80), color) image.save(buffer, 'png') buffer.seek(0) return buffer
""" MIT License Copyright (c) 2020 Myer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import aiohttp import discord import humanfriendly import lastfmpy import textwrap from PIL import Image, ImageDraw, ImageFont from bs4 import BeautifulSoup from discord.ext import commands, menus class LastFM(commands.Cog): def __init__(self, bot): self.bot = bot self.image_default = 900 self.image_default_size = 900, 900 self.template = Image.open("static/nowplaying.png") # width = 400, height = 150 # the box on the template is 10 pixels down, 10 pixels to the right self.url_cache = {} self.image_cache = {} self.font = ImageFont.truetype("static/calibri.ttf", 20) self.font_small = ImageFont.truetype("static/calibri.ttf", 14) @commands.group(aliases=["fm"], invoke_without_command=True) async def lastfm(self, ctx): return @lastfm.command(aliases=["verify", "link"]) @commands.max_concurrency(1, per=commands.BucketType.user) async def set(self, ctx, username): user = await ctx.bot.lastfm.client.user.get_info(username) ctx.bot.data.users.set(ctx.author.id, "lastfm", user.name) return await ctx.reply(embed=ctx.bot.static.embed(ctx, f"Verified your Last.FM account as `{user.name}`")) @lastfm.command(aliases=["unverify", "unlink"]) @commands.max_concurrency(1, per=commands.BucketType.user) async def unset(self, ctx): reset = ctx.bot.data.users.delete(ctx.author.id, "lastfm") return await ctx.reply(embed=ctx.bot.static.embed(ctx, f"Unverified your Last.FM account `{reset}`" if reset else "Your Last.FM account was not set!")) @lastfm.command() @commands.max_concurrency(1, per=commands.BucketType.user) async def recent(self, ctx, username=None): username = await ctx.bot.lastfm.get_username(ctx=ctx, username=username) recent = await ctx.bot.lastfm.client.user.get_recent_tracks(user=username) tracks = [f"`{track.name} - " f"{track.artist.name}: " f"{"(now playing)" if track.playing else f"({humanfriendly.format_timespan(ctx.bot.static.time() - track.played, max_units=2)} ago)'}`" for track in recent.items] await menus.MenuPages( source=ctx.bot.static.paginators.regular(tracks, ctx, discord.Embed( title=f"{username}'s Recent Tracks", color=ctx.author.color, timestamp=ctx.message.created_at ).set_footer( text="Recently played", )), clear_reactions_after=True ).start(ctx) @lastfm.command(aliases=["np"]) @commands.max_concurrency(1, per=commands.BucketType.user) async def now(self, ctx, username=None): # any raw numbers used here are simply magic numbers based on testing with the template image username = await ctx.bot.lastfm.get_username(ctx=ctx, username=username) now = await ctx.bot.lastfm.client.user.get_now_playing(username) if now: now_full = await self.try_get_track(artist=now.artist.name, track=now.name, username=username) cover = ctx.bot.static.image_to_pil(await ctx.bot.static.get_image(now.image[-1].url)) cover = cover.resize((129, 129)) image = self.template.copy() draw = ImageDraw.Draw(image) image.paste(cover, (11, 11)) if bool(now_full): playcount_string = f"{now_full.stats.userplaycount} plays" draw.text(( self.get_playcount_x(self.font_small, playcount_string), 125 ), playcount_string, font=self.font_small) string = f"{now.artist.name} ― {now.name}" string_wrapped = [line for line in textwrap.wrap(string, 25, break_on_hyphens=False, max_lines=6)] total_height = 0 for line in string_wrapped: _, height = self.font.getsize(line) total_height += height + 6 height, y = self.get_intial_y(total_height) for line in string_wrapped: x = self.get_x(self.font, line) draw.text((x, y,), line, font=self.font) y += self.font.size + 3 await ctx.reply(file=discord.File(ctx.bot.static.image_to_bytes(image), filename="np.png")) else: await ctx.reply(embed=ctx.bot.static.embed(ctx, description="Not currently playing anything")) @lastfm.command(aliases=["servernp"]) @commands.max_concurrency(1, per=commands.BucketType.user) async def servernow(self, ctx): async with ctx.typing(): users = self.get_server_lastfm(ctx) tracks = [] for member, user in users: now = await ctx.bot.lastfm.client.user.get_now_playing(user) if bool(now): now_full = await self.try_get_track(artist=now.artist.name, track=now.name, username=user) string = f"{member.mention}: `{now.artist.name} - {now.name}{f" ({now_full.stats.userplaycount} plays)`" if bool(now_full) else "`"}" tracks.append(string) if not tracks: return await ctx.reply(embed=ctx.bot.static.embed(ctx, f"No one in {ctx.guild} is listening to anything")) await menus.MenuPages(source=ctx.bot.static.paginators.regular(tracks, ctx, discord.Embed( title=f"{ctx.guild}'s Now Playing", color=ctx.author.color, timestamp=ctx.message.created_at ).set_footer( text="Recently played", )), clear_reactions_after=True).start(ctx) @lastfm.command(aliases=["wk"]) @commands.max_concurrency(1, per=commands.BucketType.user) async def whoknows(self, ctx, *, artist=None): async with ctx.typing(): if not artist: username = await ctx.bot.lastfm.get_username(ctx=ctx) now = await ctx.bot.lastfm.client.user.get_now_playing(username) if not bool(now): return await ctx.reply(embed=ctx.bot.static.embed(ctx, f"Not currently playing anything")) artist = now.artist else: artist = await ctx.bot.lastfm.client.artist.get_info(artist=artist) users = self.get_server_lastfm(ctx) knows = [] counts = [] for member, user in users: artist_full = await ctx.bot.lastfm.client.artist.get_info(artist=artist.name, username=user) if bool(artist_full.stats.userplaycount): string = f"{member.mention}: `{artist_full.name} ({artist_full.stats.userplaycount} plays)`" knows.append(string) counts.append(artist_full.stats.userplaycount) knows.sort(key=dict(zip(knows, counts)).get, reverse=True) if not knows: return await ctx.reply(embed=ctx.bot.static.embed(ctx, f"No one in {ctx.guild} knows `{artist}`")) await menus.MenuPages( source=ctx.bot.static.paginators.regular(knows, ctx, discord.Embed( title=f"Who In {ctx.guild} Knows {artist}", color=ctx.author.color, timestamp=ctx.message.created_at ).set_footer( text="Who Knows", ))).start(ctx) @lastfm.group(invoke_without_command=True) @commands.max_concurrency(1, per=commands.BucketType.user) async def chart(self, ctx, first=None, second=3): # first and second argument, if a number is in the first argument then take it as the per value # otherwise, take first argument as username and second argument as the per value async with ctx.typing(): if bool(first) and first.isdigit(): second = int(first) first = None first = await ctx.bot.lastfm.get_username(ctx=ctx, username=first) chart = await ctx.bot.lastfm.client.user.get_weekly_album_chart(first) images = await self.get_image_pil(await self.scrape_images(chart.items[:second ** 2])) # per ** 2 is the maximum amount of images that could be displayed final = ctx.bot.static.image_to_bytes(self.merge_images(images, per=second)) await ctx.reply(file=discord.File(final, filename="chart.png")) @chart.command() @commands.max_concurrency(1, per=commands.BucketType.user) async def artist(self, ctx, first=None, second=3): # first and second argument, if a number is in the first argument then take it as the per value # otherwise, take first argument as username and second argument as the per value async with ctx.typing(): if bool(first) and first.isdigit(): second = int(first) first = None first = await ctx.bot.lastfm.get_username(ctx=ctx, username=first) chart = await ctx.bot.lastfm.client.user.get_weekly_artist_chart(first) images = await self.get_image_pil(await self.scrape_images(chart.items[:second ** 2])) # per ** 2 is the maximum amount of images that could be displayed final = ctx.bot.static.image_to_bytes(self.merge_images(images, per=second)) await ctx.reply(file=discord.File(final, filename="chart.png")) async def try_get_track(self, artist=None, track=None, username=None): try: return await self.bot.lastfm.client.track.get_info(track=track, artist=artist, username=username) except lastfmpy.InvalidInputError: return async def scrape_images(self, albums: list) -> list: urls = [] async with aiohttp.ClientSession() as session: for album in albums: if url := self.url_cache.get(album.url): urls.append(url) else: html = await session.get(album.url) html = BeautifulSoup(await html.read(), "html.parser") url = html.find("meta", property="og:image")["content"] self.url_cache[album.url] = html.find("meta", property="og:image")["content"] urls.append(url) return urls async def get_image_pil(self, images: list) -> list: files = [] for image in images: if file := self.image_cache.get(image): files.append(file) else: file = self.bot.static.image_to_pil(await self.bot.static.get_image(image)) files.append(file) self.image_cache[image] = file return files def merge_images(self, images: list, per: int = 3) -> Image: final = Image.new("RGB", size=self.image_default_size) x = 0 y = 0 pixels = self.image_default // per for image in images: image = image.resize((pixels, pixels)) final.paste(image, (x, y)) if x == self.image_default or x + pixels == self.image_default: x = 0 else: x += pixels continue if y == self.image_default or y + pixels == self.image_default: y = 0 else: y += pixels return final @staticmethod def get_server_lastfm(ctx): users = [] for member in ctx.guild.members: if lastfm := ctx.bot.data.users.get(member.id).lastfm: users.append((member, lastfm)) return users # these are all magic number functions @staticmethod def get_intial_y(height): center_of_middle = 150 / 2 y = center_of_middle - (height / 2) return height, y @staticmethod def get_x(font, string): center_of_right = 145 + (250 / 2) width, height = font.getsize(string) x = center_of_right - (width / 2) return x @staticmethod def get_playcount_x(font, string): width, height = font.getsize(string) x = 400 - 15 - width return x def setup(bot): bot.add_cog(LastFM(bot)) print("COGS > Reloaded cogs.lastfm")
""" MIT License Copyright (c) 2020 Myer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import aiohttp import discord import humanfriendly import lastfmpy import textwrap from PIL import Image, ImageDraw, ImageFont from bs4 import BeautifulSoup from discord.ext import commands, menus class LastFM(commands.Cog): def __init__(self, bot): self.bot = bot self.image_default = 900 self.image_default_size = 900, 900 self.template = Image.open("static/nowplaying.png") # width = 400, height = 150 # the box on the template is 10 pixels down, 10 pixels to the right self.url_cache = {} self.image_cache = {} self.font = ImageFont.truetype("static/calibri.ttf", 20) self.font_small = ImageFont.truetype("static/calibri.ttf", 14) @commands.group(aliases=["fm"], invoke_without_command=True) async def lastfm(self, ctx): return @lastfm.command(aliases=["verify", "link"]) @commands.max_concurrency(1, per=commands.BucketType.user) async def set(self, ctx, username): user = await ctx.bot.lastfm.client.user.get_info(username) ctx.bot.data.users.set(ctx.author.id, "lastfm", user.name) return await ctx.reply(embed=ctx.bot.static.embed(ctx, f"Verified your Last.FM account as `{user.name}`")) @lastfm.command(aliases=["unverify", "unlink"]) @commands.max_concurrency(1, per=commands.BucketType.user) async def unset(self, ctx): reset = ctx.bot.data.users.delete(ctx.author.id, "lastfm") return await ctx.reply(embed=ctx.bot.static.embed(ctx, f"Unverified your Last.FM account `{reset}`" if reset else "Your Last.FM account was not set!")) @lastfm.command() @commands.max_concurrency(1, per=commands.BucketType.user) async def recent(self, ctx, username=None): username = await ctx.bot.lastfm.get_username(ctx=ctx, username=username) recent = await ctx.bot.lastfm.client.user.get_recent_tracks(user=username) tracks = [f"`{track.name} - " f"{track.artist.name}: " f"{'(now playing)' if track.playing else f'({humanfriendly.format_timespan(ctx.bot.static.time() - track.played, max_units=2)} ago)'}`" for track in recent.items] await menus.MenuPages( source=ctx.bot.static.paginators.regular(tracks, ctx, discord.Embed( title=f"{username}'s Recent Tracks", color=ctx.author.color, timestamp=ctx.message.created_at ).set_footer( text="Recently played", )), clear_reactions_after=True ).start(ctx) @lastfm.command(aliases=["np"]) @commands.max_concurrency(1, per=commands.BucketType.user) async def now(self, ctx, username=None): # any raw numbers used here are simply magic numbers based on testing with the template image username = await ctx.bot.lastfm.get_username(ctx=ctx, username=username) now = await ctx.bot.lastfm.client.user.get_now_playing(username) if now: now_full = await self.try_get_track(artist=now.artist.name, track=now.name, username=username) cover = ctx.bot.static.image_to_pil(await ctx.bot.static.get_image(now.image[-1].url)) cover = cover.resize((129, 129)) image = self.template.copy() draw = ImageDraw.Draw(image) image.paste(cover, (11, 11)) if bool(now_full): playcount_string = f"{now_full.stats.userplaycount} plays" draw.text(( self.get_playcount_x(self.font_small, playcount_string), 125 ), playcount_string, font=self.font_small) string = f"{now.artist.name} ― {now.name}" string_wrapped = [line for line in textwrap.wrap(string, 25, break_on_hyphens=False, max_lines=6)] total_height = 0 for line in string_wrapped: _, height = self.font.getsize(line) total_height += height + 6 height, y = self.get_intial_y(total_height) for line in string_wrapped: x = self.get_x(self.font, line) draw.text((x, y,), line, font=self.font) y += self.font.size + 3 await ctx.reply(file=discord.File(ctx.bot.static.image_to_bytes(image), filename="np.png")) else: await ctx.reply(embed=ctx.bot.static.embed(ctx, description="Not currently playing anything")) @lastfm.command(aliases=["servernp"]) @commands.max_concurrency(1, per=commands.BucketType.user) async def servernow(self, ctx): async with ctx.typing(): users = self.get_server_lastfm(ctx) tracks = [] for member, user in users: now = await ctx.bot.lastfm.client.user.get_now_playing(user) if bool(now): now_full = await self.try_get_track(artist=now.artist.name, track=now.name, username=user) string = f"{member.mention}: `{now.artist.name} - {now.name}{f' ({now_full.stats.userplaycount} plays)`' if bool(now_full) else '`'}" tracks.append(string) if not tracks: return await ctx.reply(embed=ctx.bot.static.embed(ctx, f"No one in {ctx.guild} is listening to anything")) await menus.MenuPages(source=ctx.bot.static.paginators.regular(tracks, ctx, discord.Embed( title=f"{ctx.guild}'s Now Playing", color=ctx.author.color, timestamp=ctx.message.created_at ).set_footer( text="Recently played", )), clear_reactions_after=True).start(ctx) @lastfm.command(aliases=["wk"]) @commands.max_concurrency(1, per=commands.BucketType.user) async def whoknows(self, ctx, *, artist=None): async with ctx.typing(): if not artist: username = await ctx.bot.lastfm.get_username(ctx=ctx) now = await ctx.bot.lastfm.client.user.get_now_playing(username) if not bool(now): return await ctx.reply(embed=ctx.bot.static.embed(ctx, f"Not currently playing anything")) artist = now.artist else: artist = await ctx.bot.lastfm.client.artist.get_info(artist=artist) users = self.get_server_lastfm(ctx) knows = [] counts = [] for member, user in users: artist_full = await ctx.bot.lastfm.client.artist.get_info(artist=artist.name, username=user) if bool(artist_full.stats.userplaycount): string = f"{member.mention}: `{artist_full.name} ({artist_full.stats.userplaycount} plays)`" knows.append(string) counts.append(artist_full.stats.userplaycount) knows.sort(key=dict(zip(knows, counts)).get, reverse=True) if not knows: return await ctx.reply(embed=ctx.bot.static.embed(ctx, f"No one in {ctx.guild} knows `{artist}`")) await menus.MenuPages( source=ctx.bot.static.paginators.regular(knows, ctx, discord.Embed( title=f"Who In {ctx.guild} Knows {artist}", color=ctx.author.color, timestamp=ctx.message.created_at ).set_footer( text="Who Knows", ))).start(ctx) @lastfm.group(invoke_without_command=True) @commands.max_concurrency(1, per=commands.BucketType.user) async def chart(self, ctx, first=None, second=3): # first and second argument, if a number is in the first argument then take it as the per value # otherwise, take first argument as username and second argument as the per value async with ctx.typing(): if bool(first) and first.isdigit(): second = int(first) first = None first = await ctx.bot.lastfm.get_username(ctx=ctx, username=first) chart = await ctx.bot.lastfm.client.user.get_weekly_album_chart(first) images = await self.get_image_pil(await self.scrape_images(chart.items[:second ** 2])) # per ** 2 is the maximum amount of images that could be displayed final = ctx.bot.static.image_to_bytes(self.merge_images(images, per=second)) await ctx.reply(file=discord.File(final, filename="chart.png")) @chart.command() @commands.max_concurrency(1, per=commands.BucketType.user) async def artist(self, ctx, first=None, second=3): # first and second argument, if a number is in the first argument then take it as the per value # otherwise, take first argument as username and second argument as the per value async with ctx.typing(): if bool(first) and first.isdigit(): second = int(first) first = None first = await ctx.bot.lastfm.get_username(ctx=ctx, username=first) chart = await ctx.bot.lastfm.client.user.get_weekly_artist_chart(first) images = await self.get_image_pil(await self.scrape_images(chart.items[:second ** 2])) # per ** 2 is the maximum amount of images that could be displayed final = ctx.bot.static.image_to_bytes(self.merge_images(images, per=second)) await ctx.reply(file=discord.File(final, filename="chart.png")) async def try_get_track(self, artist=None, track=None, username=None): try: return await self.bot.lastfm.client.track.get_info(track=track, artist=artist, username=username) except lastfmpy.InvalidInputError: return async def scrape_images(self, albums: list) -> list: urls = [] async with aiohttp.ClientSession() as session: for album in albums: if url := self.url_cache.get(album.url): urls.append(url) else: html = await session.get(album.url) html = BeautifulSoup(await html.read(), "html.parser") url = html.find("meta", property="og:image")["content"] self.url_cache[album.url] = html.find("meta", property="og:image")["content"] urls.append(url) return urls async def get_image_pil(self, images: list) -> list: files = [] for image in images: if file := self.image_cache.get(image): files.append(file) else: file = self.bot.static.image_to_pil(await self.bot.static.get_image(image)) files.append(file) self.image_cache[image] = file return files def merge_images(self, images: list, per: int = 3) -> Image: final = Image.new("RGB", size=self.image_default_size) x = 0 y = 0 pixels = self.image_default // per for image in images: image = image.resize((pixels, pixels)) final.paste(image, (x, y)) if x == self.image_default or x + pixels == self.image_default: x = 0 else: x += pixels continue if y == self.image_default or y + pixels == self.image_default: y = 0 else: y += pixels return final @staticmethod def get_server_lastfm(ctx): users = [] for member in ctx.guild.members: if lastfm := ctx.bot.data.users.get(member.id).lastfm: users.append((member, lastfm)) return users # these are all magic number functions @staticmethod def get_intial_y(height): center_of_middle = 150 / 2 y = center_of_middle - (height / 2) return height, y @staticmethod def get_x(font, string): center_of_right = 145 + (250 / 2) width, height = font.getsize(string) x = center_of_right - (width / 2) return x @staticmethod def get_playcount_x(font, string): width, height = font.getsize(string) x = 400 - 15 - width return x def setup(bot): bot.add_cog(LastFM(bot)) print("COGS > Reloaded cogs.lastfm")
from collections import Counter from copy import copy import json import numpy as np import re import logging from dadmatools.models.common.utils import ud_scores, harmonic_mean from dadmatools.utils.conll import CoNLL from dadmatools.models.common.doc import * logger = logging.getLogger('stanza') def load_mwt_dict(filename): if filename is not None: with open(filename, 'r') as f: mwt_dict0 = json.load(f) mwt_dict = dict() for item in mwt_dict0: (key, expansion), count = item if key not in mwt_dict or mwt_dict[key][1] < count: mwt_dict[key] = (expansion, count) return mwt_dict else: return def process_sentence(sentence, mwt_dict=None): sent = [] i = 0 for tok, p, position_info in sentence: expansion = None if (p == 3 or p == 4) and mwt_dict is not None: # MWT found, (attempt to) expand it! if tok in mwt_dict: expansion = mwt_dict[tok][0] elif tok.lower() in mwt_dict: expansion = mwt_dict[tok.lower()][0] if expansion is not None: sent.append({ID: (i+1, i+len(expansion)), TEXT: tok}) if position_info is not None: sent[-1][START_CHAR] = position_info[0] sent[-1][END_CHAR] = position_info[1] for etok in expansion: sent.append({ID: (i+1, ), TEXT: etok}) i += 1 else: if len(tok) <= 0: continue sent.append({ID: (i+1, ), TEXT: tok}) if position_info is not None: sent[-1][START_CHAR] = position_info[0] sent[-1][END_CHAR] = position_info[1] if p == 3 or p == 4:# MARK sent[-1][MISC] = 'MWT=Yes' i += 1 return sent # https://stackoverflow.com/questions/201323/how-to-validate-an-email-address-using-a-regular-expression EMAIL_RAW_RE = r"""(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(?:2(?:5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(?:2(?:5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])""" # https://stackoverflow.com/questions/3809401/what-is-a-good-regular-expression-to-match-a-url # modification: disallow " as opposed to all ^\s URL_RAW_RE = r"""(?:https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s"]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s"]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s"]{2,}|www\.[a-zA-Z0-9]+\.[^\s"]{2,})""" MASK_RE = re.compile(f"(?:{EMAIL_RAW_RE}|{URL_RAW_RE})") def find_spans(raw): """ Return spans of text which don't contain <PAD> and are split by <PAD> """ pads = [idx for idx, char in enumerate(raw) if char == '<PAD>'] if len(pads) == 0: spans = [(0, len(raw))] else: prev = 0 spans = [] for pad in pads: if pad != prev: spans.append( (prev, pad) ) prev = pad + 1 if prev < len(raw): spans.append( (prev, len(raw)) ) return spans def update_pred_regex(raw, pred): """ Update the results of a tokenization batch by checking the raw text against a couple regular expressions Currently, emails and urls are handled TODO: this might work better as a constraint on the inference for efficiency pred is modified in place """ spans = find_spans(raw) for span_begin, span_end in spans: text = "".join(raw[span_begin:span_end]) for match in MASK_RE.finditer(text): match_begin, match_end = match.span() # first, update all characters touched by the regex to not split # with the exception of the last character... for char in range(match_begin+span_begin, match_end+span_begin-1): pred[char] = 0 # if the last character is not currently a split, make it a word split if pred[match_end+span_begin-1] == 0: pred[match_end+span_begin-1] = 1 return pred SPACE_RE = re.compile(r'\s') SPACE_SPLIT_RE = re.compile(r'( *[^ ]+)') def output_predictions(output_file, trainer, data_generator, vocab, mwt_dict, max_seqlen=1000, orig_text=None, no_ssplit=False, use_regex_tokens=True): paragraphs = [] for i, p in enumerate(data_generator.sentences): start = 0 if i == 0 else paragraphs[-1][2] length = sum([len(x[0]) for x in p]) paragraphs += [(i, start, start+length, length)] # para idx, start idx, end idx, length paragraphs = list(sorted(paragraphs, key=lambda x: x[3], reverse=True)) all_preds = [None] * len(paragraphs) all_raw = [None] * len(paragraphs) eval_limit = max(3000, max_seqlen) batch_size = trainer.args['batch_size'] skip_newline = trainer.args['skip_newline'] batches = int((len(paragraphs) + batch_size - 1) / batch_size) for i in range(batches): # At evaluation time, each paragraph is treated as a single "sentence", and a batch of `batch_size` paragraphs # are tokenized together. `offsets` here are used by the data generator to identify which paragraphs to use # for the next batch of evaluation. batchparas = paragraphs[i * batch_size : (i + 1) * batch_size] offsets = [x[1] for x in batchparas] batch = data_generator.next(eval_offsets=offsets) raw = batch[3] N = len(batch[3][0]) if N <= eval_limit: pred = np.argmax(trainer.predict(batch), axis=2) else: idx = [0] * len(batchparas) adv = [0] * len(batchparas) Ns = [p[3] for p in batchparas] pred = [[] for _ in batchparas] while True: ens = [min(N - idx1, eval_limit) for idx1, N in zip(idx, Ns)] en = max(ens) batch1 = batch[0][:, :en], batch[1][:, :en], batch[2][:, :en], [x[:en] for x in batch[3]] pred1 = np.argmax(trainer.predict(batch1), axis=2) for j in range(len(batchparas)): sentbreaks = np.where((pred1[j] == 2) + (pred1[j] == 4))[0] if len(sentbreaks) <= 0 or idx[j] >= Ns[j] - eval_limit: advance = ens[j] else: advance = np.max(sentbreaks) + 1 pred[j] += [pred1[j, :advance]] idx[j] += advance adv[j] = advance if all([idx1 >= N for idx1, N in zip(idx, Ns)]): break # once we've made predictions on a certain number of characters for each paragraph (recorded in `adv`), # we skip the first `adv` characters to make the updated batch batch = data_generator.next(eval_offsets=adv, old_batch=batch) pred = [np.concatenate(p, 0) for p in pred] for j, p in enumerate(batchparas): len1 = len([1 for x in raw[j] if x != '<PAD>']) if pred[j][len1-1] < 2: pred[j][len1-1] = 2 elif pred[j][len1-1] > 2: pred[j][len1-1] = 4 if use_regex_tokens: all_preds[p[0]] = update_pred_regex(raw[j], pred[j][:len1]) else: all_preds[p[0]] = pred[j][:len1] all_raw[p[0]] = raw[j] offset = 0 oov_count = 0 doc = [] text = SPACE_RE.sub(' ', orig_text) if orig_text is not None else None char_offset = 0 use_la_ittb_shorthand = trainer.args['shorthand'] == 'la_ittb' UNK_ID = vocab.unit2id('<UNK>') # Once everything is fed through the tokenizer model, it's time to decode the predictions # into actual tokens and sentences that the rest of the pipeline uses for j in range(len(paragraphs)): raw = all_raw[j] pred = all_preds[j] current_tok = '' current_sent = [] for t, p in zip(raw, pred): if t == '<PAD>': break # hack la_ittb if use_la_ittb_shorthand and t in (":", ";"): p = 2 offset += 1 if vocab.unit2id(t) == UNK_ID: oov_count += 1 current_tok += t if p >= 1: tok = vocab.normalize_token(current_tok) assert '\t' not in tok, tok if len(tok) <= 0: current_tok = '' continue if orig_text is not None: st = -1 tok_len = 0 for part in SPACE_SPLIT_RE.split(current_tok): if len(part) == 0: continue if skip_newline: part_pattern = re.compile(r'\s*'.join(re.escape(c) for c in part)) match = part_pattern.search(text, char_offset) st0 = match.start(0) - char_offset partlen = match.end(0) - match.start(0) else: st0 = text.index(part, char_offset) - char_offset partlen = len(part) lstripped = part.lstrip() if st < 0: st = char_offset + st0 + (len(part) - len(lstripped)) char_offset += st0 + partlen position_info = (st, char_offset) else: position_info = None current_sent.append((tok, p, position_info)) current_tok = '' if (p == 2 or p == 4) and not no_ssplit: doc.append(process_sentence(current_sent, mwt_dict)) current_sent = [] assert(len(current_tok) == 0) if len(current_sent): doc.append(process_sentence(current_sent, mwt_dict)) return doc # if output_file: CoNLL.dict2conll(doc, output_file) # return oov_count, offset, all_preds, doc def eval_model(args, trainer, batches, vocab, mwt_dict): oov_count, N, all_preds, doc = output_predictions(args['conll_file'], trainer, batches, vocab, mwt_dict, args['max_seqlen']) all_preds = np.concatenate(all_preds, 0) labels = [y[1] for x in batches.data for y in x] counter = Counter(zip(all_preds, labels)) def f1(pred, gold, mapping): pred = [mapping[p] for p in pred] gold = [mapping[g] for g in gold] lastp = -1; lastg = -1 tp = 0; fp = 0; fn = 0 for i, (p, g) in enumerate(zip(pred, gold)): if p == g > 0 and lastp == lastg: lastp = i lastg = i tp += 1 elif p > 0 and g > 0: lastp = i lastg = i fp += 1 fn += 1 elif p > 0: # and g == 0 lastp = i fp += 1 elif g > 0: lastg = i fn += 1 if tp == 0: return 0 else: return 2 * tp / (2 * tp + fp + fn) f1tok = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:1, 4:1}) f1sent = f1(all_preds, labels, {0:0, 1:0, 2:1, 3:0, 4:1}) f1mwt = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:2, 4:2}) logger.info(f"{args["shorthand"]}: token F1 = {f1tok*100:.2f}, sentence F1 = {f1sent*100:.2f}, mwt F1 = {f1mwt*100:.2f}") return harmonic_mean([f1tok, f1sent, f1mwt], [1, 1, .01])
from collections import Counter from copy import copy import json import numpy as np import re import logging from dadmatools.models.common.utils import ud_scores, harmonic_mean from dadmatools.utils.conll import CoNLL from dadmatools.models.common.doc import * logger = logging.getLogger('stanza') def load_mwt_dict(filename): if filename is not None: with open(filename, 'r') as f: mwt_dict0 = json.load(f) mwt_dict = dict() for item in mwt_dict0: (key, expansion), count = item if key not in mwt_dict or mwt_dict[key][1] < count: mwt_dict[key] = (expansion, count) return mwt_dict else: return def process_sentence(sentence, mwt_dict=None): sent = [] i = 0 for tok, p, position_info in sentence: expansion = None if (p == 3 or p == 4) and mwt_dict is not None: # MWT found, (attempt to) expand it! if tok in mwt_dict: expansion = mwt_dict[tok][0] elif tok.lower() in mwt_dict: expansion = mwt_dict[tok.lower()][0] if expansion is not None: sent.append({ID: (i+1, i+len(expansion)), TEXT: tok}) if position_info is not None: sent[-1][START_CHAR] = position_info[0] sent[-1][END_CHAR] = position_info[1] for etok in expansion: sent.append({ID: (i+1, ), TEXT: etok}) i += 1 else: if len(tok) <= 0: continue sent.append({ID: (i+1, ), TEXT: tok}) if position_info is not None: sent[-1][START_CHAR] = position_info[0] sent[-1][END_CHAR] = position_info[1] if p == 3 or p == 4:# MARK sent[-1][MISC] = 'MWT=Yes' i += 1 return sent # https://stackoverflow.com/questions/201323/how-to-validate-an-email-address-using-a-regular-expression EMAIL_RAW_RE = r"""(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(?:2(?:5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(?:2(?:5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])""" # https://stackoverflow.com/questions/3809401/what-is-a-good-regular-expression-to-match-a-url # modification: disallow " as opposed to all ^\s URL_RAW_RE = r"""(?:https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s"]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s"]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s"]{2,}|www\.[a-zA-Z0-9]+\.[^\s"]{2,})""" MASK_RE = re.compile(f"(?:{EMAIL_RAW_RE}|{URL_RAW_RE})") def find_spans(raw): """ Return spans of text which don't contain <PAD> and are split by <PAD> """ pads = [idx for idx, char in enumerate(raw) if char == '<PAD>'] if len(pads) == 0: spans = [(0, len(raw))] else: prev = 0 spans = [] for pad in pads: if pad != prev: spans.append( (prev, pad) ) prev = pad + 1 if prev < len(raw): spans.append( (prev, len(raw)) ) return spans def update_pred_regex(raw, pred): """ Update the results of a tokenization batch by checking the raw text against a couple regular expressions Currently, emails and urls are handled TODO: this might work better as a constraint on the inference for efficiency pred is modified in place """ spans = find_spans(raw) for span_begin, span_end in spans: text = "".join(raw[span_begin:span_end]) for match in MASK_RE.finditer(text): match_begin, match_end = match.span() # first, update all characters touched by the regex to not split # with the exception of the last character... for char in range(match_begin+span_begin, match_end+span_begin-1): pred[char] = 0 # if the last character is not currently a split, make it a word split if pred[match_end+span_begin-1] == 0: pred[match_end+span_begin-1] = 1 return pred SPACE_RE = re.compile(r'\s') SPACE_SPLIT_RE = re.compile(r'( *[^ ]+)') def output_predictions(output_file, trainer, data_generator, vocab, mwt_dict, max_seqlen=1000, orig_text=None, no_ssplit=False, use_regex_tokens=True): paragraphs = [] for i, p in enumerate(data_generator.sentences): start = 0 if i == 0 else paragraphs[-1][2] length = sum([len(x[0]) for x in p]) paragraphs += [(i, start, start+length, length)] # para idx, start idx, end idx, length paragraphs = list(sorted(paragraphs, key=lambda x: x[3], reverse=True)) all_preds = [None] * len(paragraphs) all_raw = [None] * len(paragraphs) eval_limit = max(3000, max_seqlen) batch_size = trainer.args['batch_size'] skip_newline = trainer.args['skip_newline'] batches = int((len(paragraphs) + batch_size - 1) / batch_size) for i in range(batches): # At evaluation time, each paragraph is treated as a single "sentence", and a batch of `batch_size` paragraphs # are tokenized together. `offsets` here are used by the data generator to identify which paragraphs to use # for the next batch of evaluation. batchparas = paragraphs[i * batch_size : (i + 1) * batch_size] offsets = [x[1] for x in batchparas] batch = data_generator.next(eval_offsets=offsets) raw = batch[3] N = len(batch[3][0]) if N <= eval_limit: pred = np.argmax(trainer.predict(batch), axis=2) else: idx = [0] * len(batchparas) adv = [0] * len(batchparas) Ns = [p[3] for p in batchparas] pred = [[] for _ in batchparas] while True: ens = [min(N - idx1, eval_limit) for idx1, N in zip(idx, Ns)] en = max(ens) batch1 = batch[0][:, :en], batch[1][:, :en], batch[2][:, :en], [x[:en] for x in batch[3]] pred1 = np.argmax(trainer.predict(batch1), axis=2) for j in range(len(batchparas)): sentbreaks = np.where((pred1[j] == 2) + (pred1[j] == 4))[0] if len(sentbreaks) <= 0 or idx[j] >= Ns[j] - eval_limit: advance = ens[j] else: advance = np.max(sentbreaks) + 1 pred[j] += [pred1[j, :advance]] idx[j] += advance adv[j] = advance if all([idx1 >= N for idx1, N in zip(idx, Ns)]): break # once we've made predictions on a certain number of characters for each paragraph (recorded in `adv`), # we skip the first `adv` characters to make the updated batch batch = data_generator.next(eval_offsets=adv, old_batch=batch) pred = [np.concatenate(p, 0) for p in pred] for j, p in enumerate(batchparas): len1 = len([1 for x in raw[j] if x != '<PAD>']) if pred[j][len1-1] < 2: pred[j][len1-1] = 2 elif pred[j][len1-1] > 2: pred[j][len1-1] = 4 if use_regex_tokens: all_preds[p[0]] = update_pred_regex(raw[j], pred[j][:len1]) else: all_preds[p[0]] = pred[j][:len1] all_raw[p[0]] = raw[j] offset = 0 oov_count = 0 doc = [] text = SPACE_RE.sub(' ', orig_text) if orig_text is not None else None char_offset = 0 use_la_ittb_shorthand = trainer.args['shorthand'] == 'la_ittb' UNK_ID = vocab.unit2id('<UNK>') # Once everything is fed through the tokenizer model, it's time to decode the predictions # into actual tokens and sentences that the rest of the pipeline uses for j in range(len(paragraphs)): raw = all_raw[j] pred = all_preds[j] current_tok = '' current_sent = [] for t, p in zip(raw, pred): if t == '<PAD>': break # hack la_ittb if use_la_ittb_shorthand and t in (":", ";"): p = 2 offset += 1 if vocab.unit2id(t) == UNK_ID: oov_count += 1 current_tok += t if p >= 1: tok = vocab.normalize_token(current_tok) assert '\t' not in tok, tok if len(tok) <= 0: current_tok = '' continue if orig_text is not None: st = -1 tok_len = 0 for part in SPACE_SPLIT_RE.split(current_tok): if len(part) == 0: continue if skip_newline: part_pattern = re.compile(r'\s*'.join(re.escape(c) for c in part)) match = part_pattern.search(text, char_offset) st0 = match.start(0) - char_offset partlen = match.end(0) - match.start(0) else: st0 = text.index(part, char_offset) - char_offset partlen = len(part) lstripped = part.lstrip() if st < 0: st = char_offset + st0 + (len(part) - len(lstripped)) char_offset += st0 + partlen position_info = (st, char_offset) else: position_info = None current_sent.append((tok, p, position_info)) current_tok = '' if (p == 2 or p == 4) and not no_ssplit: doc.append(process_sentence(current_sent, mwt_dict)) current_sent = [] assert(len(current_tok) == 0) if len(current_sent): doc.append(process_sentence(current_sent, mwt_dict)) return doc # if output_file: CoNLL.dict2conll(doc, output_file) # return oov_count, offset, all_preds, doc def eval_model(args, trainer, batches, vocab, mwt_dict): oov_count, N, all_preds, doc = output_predictions(args['conll_file'], trainer, batches, vocab, mwt_dict, args['max_seqlen']) all_preds = np.concatenate(all_preds, 0) labels = [y[1] for x in batches.data for y in x] counter = Counter(zip(all_preds, labels)) def f1(pred, gold, mapping): pred = [mapping[p] for p in pred] gold = [mapping[g] for g in gold] lastp = -1; lastg = -1 tp = 0; fp = 0; fn = 0 for i, (p, g) in enumerate(zip(pred, gold)): if p == g > 0 and lastp == lastg: lastp = i lastg = i tp += 1 elif p > 0 and g > 0: lastp = i lastg = i fp += 1 fn += 1 elif p > 0: # and g == 0 lastp = i fp += 1 elif g > 0: lastg = i fn += 1 if tp == 0: return 0 else: return 2 * tp / (2 * tp + fp + fn) f1tok = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:1, 4:1}) f1sent = f1(all_preds, labels, {0:0, 1:0, 2:1, 3:0, 4:1}) f1mwt = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:2, 4:2}) logger.info(f"{args['shorthand']}: token F1 = {f1tok*100:.2f}, sentence F1 = {f1sent*100:.2f}, mwt F1 = {f1mwt*100:.2f}") return harmonic_mean([f1tok, f1sent, f1mwt], [1, 1, .01])
# coding: utf8 from __future__ import unicode_literals import os from csv import reader import unittest from tempfile import mkdtemp from shutil import rmtree from six import PY3, text_type, binary_type from dataset import connect from datafreeze.app import freeze from datafreeze.format.fcsv import value_to_str from .sample_data import TEST_DATA class FreezeTestCase(unittest.TestCase): def setUp(self): self.db = connect('sqlite://') self.tbl = self.db['weather'] for row in TEST_DATA: self.tbl.insert(row) self.d = mkdtemp() def tearDown(self): rmtree(self.d, ignore_errors=True) def test_freeze(self): freeze(self.tbl.all(), format='csv', filename=u'wäther.csv'.encode('utf8'), prefix=self.d) self.assertTrue(os.path.exists(os.path.join(self.d, u'wäther.csv'))) freeze(self.tbl.all(), format='csv', filename=u'wäther.csv', prefix=self.d) self.assertTrue(os.path.exists(os.path.join(self.d, u'wäther.csv'))) def test_freeze_csv(self): freeze(self.tbl.all(), format='csv', filename='weather.csv', prefix=self.d) path = os.path.join(self.d, 'weather.csv') if PY3: fh = open(path, 'rt', encoding='utf8', newline='') else: fh = open(path, 'rU') try: rows = list(reader(fh)) keys = rows[0] for i, d1 in enumerate(TEST_DATA): d2 = dict(zip(keys, rows[i + 1])) for k in d1.keys(): v2 = d2[k] if not PY3: v2 = v2.decode('utf8') v1 = value_to_str(d1[k]) if not isinstance(v1, text_type): if isinstance(v1, binary_type): v1 = text_type(v1, encoding='utf8') else: v1 = '%s' % v1 self.assertEqual(v2, v1) finally: fh.close() def test_memory_streams(self): if PY3: from io import StringIO else: from io import BytesIO as StringIO for fmt in ('csv', 'json', 'tabson'): with StringIO() as fd: freeze(self.tbl.all(), format=fmt, fileobj=fd) self.assertFalse(fd.closed, 'fileobj was closed for format %s' % fmt) fd.getvalue() # should not throw def test_freeze_json_no_wrap(self): freeze(self.tbl.all(), format='json', filename='weather.csv', prefix=self.d, wrap=False) path = os.path.join(self.d, 'weather.csv') if PY3: fh = open(path, 'rt', encoding='utf8', newline='') else: fh = open(path, 'rU') try: import json data = json.load(fh) self.assertIsInstance(data, list, 'Without wrapping, returned JSON should be a list') finally: fh.close() def test_freeze_json_wrap(self): freeze(self.tbl.all(), format='json', filename='weather.csv', prefix=self.d, wrap=True) path = os.path.join(self.d, 'weather.csv') if PY3: fh = open(path, 'rt', encoding='utf8', newline='') else: fh = open(path, 'rU') try: import json data = json.load(fh) self.assertIsInstance(data, dict, 'With wrapping, returned JSON should be a dict') self.assertIn('results', data.keys()) self.assertIn('count', data.keys()) self.assertIn('meta', data.keys()) finally: fh.close() class SerializerTestCase(unittest.TestCase): def test_serializer(self): from datafreeze.format.common import Serializer from datafreeze.config import Export from datafreeze.util import FreezeException self.assertRaises(FreezeException, Serializer, {}, {}) s = Serializer(Export({'filename': 'f'}, {'mode': 'nomode'}), '') self.assertRaises(FreezeException, getattr, s, 'wrap') s = Serializer(Export({'filename': 'f'}, {}), '') s.wrap s = Serializer(Export({'filename': '-'}, {}), '') self.assertTrue(s.fileobj) def test_value_to_str1(self): assert '2011-01-01T00:00:00' == value_to_str(TEST_DATA[0]['date']), \ value_to_str(TEST_DATA[0]['date']) def test_value_to_str2(self): if PY3: assert 'hóla' == value_to_str('\u0068\u00f3\u006c\u0061') else: assert u'hóla'.encode('utf-8') == value_to_str(u'\u0068\u00f3\u006c\u0061'), \ [value_to_str(u'\u0068\u00f3\u006c\u0061')] def test_value_to_str3(self): assert '' == value_to_str(None) def test_value_to_str4(self): assert [] == value_to_str([]) if __name__ == '__main__': unittest.main()
# coding: utf8 from __future__ import unicode_literals import os from csv import reader import unittest from tempfile import mkdtemp from shutil import rmtree from six import PY3, text_type, binary_type from dataset import connect from datafreeze.app import freeze from datafreeze.format.fcsv import value_to_str from .sample_data import TEST_DATA class FreezeTestCase(unittest.TestCase): def setUp(self): self.db = connect('sqlite://') self.tbl = self.db['weather'] for row in TEST_DATA: self.tbl.insert(row) self.d = mkdtemp() def tearDown(self): rmtree(self.d, ignore_errors=True) def test_freeze(self): freeze(self.tbl.all(), format='csv', filename=u'wäther.csv'.encode('utf8'), prefix=self.d) self.assertTrue(os.path.exists(os.path.join(self.d, u'wäther.csv'))) freeze(self.tbl.all(), format='csv', filename=u'wäther.csv', prefix=self.d) self.assertTrue(os.path.exists(os.path.join(self.d, u'wäther.csv'))) def test_freeze_csv(self): freeze(self.tbl.all(), format='csv', filename='weather.csv', prefix=self.d) path = os.path.join(self.d, 'weather.csv') if PY3: fh = open(path, 'rt', encoding='utf8', newline='') else: fh = open(path, 'rU') try: rows = list(reader(fh)) keys = rows[0] for i, d1 in enumerate(TEST_DATA): d2 = dict(zip(keys, rows[i + 1])) for k in d1.keys(): v2 = d2[k] if not PY3: v2 = v2.decode('utf8') v1 = value_to_str(d1[k]) if not isinstance(v1, text_type): if isinstance(v1, binary_type): v1 = text_type(v1, encoding='utf8') else: v1 = '%s' % v1 self.assertEqual(v2, v1) finally: fh.close() def test_memory_streams(self): if PY3: from io import StringIO else: from io import BytesIO as StringIO for fmt in ('csv', 'json', 'tabson'): with StringIO() as fd: freeze(self.tbl.all(), format=fmt, fileobj=fd) self.assertFalse(fd.closed, 'fileobj was closed for format %s' % fmt) fd.getvalue() # should not throw def test_freeze_json_no_wrap(self): freeze(self.tbl.all(), format='json', filename='weather.csv', prefix=self.d, wrap=False) path = os.path.join(self.d, 'weather.csv') if PY3: fh = open(path, 'rt', encoding='utf8', newline='') else: fh = open(path, 'rU') try: import json data = json.load(fh) self.assertIsInstance(data, list, 'Without wrapping, returned JSON should be a list') finally: fh.close() def test_freeze_json_wrap(self): freeze(self.tbl.all(), format='json', filename='weather.csv', prefix=self.d, wrap=True) path = os.path.join(self.d, 'weather.csv') if PY3: fh = open(path, 'rt', encoding='utf8', newline='') else: fh = open(path, 'rU') try: import json data = json.load(fh) self.assertIsInstance(data, dict, 'With wrapping, returned JSON should be a dict') self.assertIn('results', data.keys()) self.assertIn('count', data.keys()) self.assertIn('meta', data.keys()) finally: fh.close() class SerializerTestCase(unittest.TestCase): def test_serializer(self): from datafreeze.format.common import Serializer from datafreeze.config import Export from datafreeze.util import FreezeException self.assertRaises(FreezeException, Serializer, {}, {}) s = Serializer(Export({'filename': 'f'}, {'mode': 'nomode'}), '') self.assertRaises(FreezeException, getattr, s, 'wrap') s = Serializer(Export({'filename': 'f'}, {}), '') s.wrap s = Serializer(Export({'filename': '-'}, {}), '') self.assertTrue(s.fileobj) def test_value_to_str1(self): assert '2011-01-01T00:00:00' == value_to_str(TEST_DATA[0]['date']), \ value_to_str(TEST_DATA[0]['date']) def test_value_to_str2(self): if PY3: assert 'hóla' == value_to_str('\u0068\u00f3\u006c\u0061') else: assert u'hóla'.encode('utf-8') == value_to_str(u'\u0068\u00f3\u006c\u0061'), \ [value_to_str(u'\u0068\u00f3\u006c\u0061')] def test_value_to_str3(self): assert '' == value_to_str(None) def test_value_to_str4(self): assert [] == value_to_str([]) if __name__ == '__main__': unittest.main()
from typing import ( Any, ClassVar, Dict, List, Sequence, Tuple, Type, Union, no_type_check, ) import anyio from sqlalchemy import Column, func, inspect, select from sqlalchemy.engine.base import Engine from sqlalchemy.exc import NoInspectionAvailable from sqlalchemy.ext.asyncio import AsyncEngine from sqlalchemy.orm import ( ColumnProperty, RelationshipProperty, selectinload, sessionmaker, ) from sqlalchemy.orm.attributes import InstrumentedAttribute from sqlalchemy.sql.elements import ClauseElement from starlette.requests import Request from wtforms import Form from sqladmin.exceptions import InvalidColumnError, InvalidModelError from sqladmin.forms import get_model_form from sqladmin.helpers import prettify_class_name, slugify_class_name from sqladmin.pagination import Pagination __all__ = [ "ModelAdmin", ] class ModelAdminMeta(type): """Metaclass used to specify class variables in ModelAdmin. Danger: This class should almost never be used directly. """ @no_type_check def __new__(mcls, name, bases, attrs: dict, **kwargs: Any): cls: Type["ModelAdmin"] = super().__new__(mcls, name, bases, attrs) model = kwargs.get("model") if not model: return cls try: mapper = inspect(model) except NoInspectionAvailable: raise InvalidModelError( f"Class {model.__name__} is not a SQLAlchemy model." ) assert len(mapper.primary_key) == 1, "Multiple PK columns not supported." cls.pk_column = mapper.primary_key[0] cls.identity = slugify_class_name(model.__name__) cls.model = model cls.name = attrs.get("name", prettify_class_name(cls.model.__name__)) cls.name_plural = attrs.get("name_plural", f"{cls.name}s") cls.icon = attrs.get("icon") mcls._check_conflicting_options(["column_list", "column_exclude_list"], attrs) mcls._check_conflicting_options( ["column_details_list", "column_details_exclude_list"], attrs ) return cls @classmethod def _check_conflicting_options(mcls, keys: List[str], attrs: dict) -> None: if all(k in attrs for k in keys): raise AssertionError(f"Cannot use {" and ".join(keys)} together.") class BaseModelAdmin: def is_visible(self, request: Request) -> bool: """Override this method if you want dynamically hide or show administrative views from SQLAdmin menu structure By default, item is visible in menu. Both is_visible and is_accessible to be displayed in menu. """ return True def is_accessible(self, request: Request) -> bool: """Override this method to add permission checks. SQLAdmin does not make any assumptions about the authentication system used in your application, so it is up to you to implement it. By default, it will allow access for everyone. """ return True class ModelAdmin(BaseModelAdmin, metaclass=ModelAdminMeta): """Base class for defining admnistrative behaviour for the model. ???+ usage ```python from sqladmin import ModelAdmin from mymodels import User # SQLAlchemy model class UserAdmin(ModelAdmin, model=User): can_create = True ``` """ model: ClassVar[type] # Internals pk_column: ClassVar[Column] identity: ClassVar[str] sessionmaker: ClassVar[sessionmaker] engine: ClassVar[Union[Engine, AsyncEngine]] async_engine: ClassVar[bool] # Metadata name: ClassVar[str] = "" """Name of ModelAdmin to display. Default value is set to Model class name. """ name_plural: ClassVar[str] = "" """Plural name of ModelAdmin. Default value is Model class name + `s`. """ icon: ClassVar[str] = "" """Display icon for ModelAdmin in the sidebar. Currently only supports FontAwesome icons. ???+ example ```python class UserAdmin(ModelAdmin, model=User): icon = "fas fa-user" ``` """ # Permissions can_create: ClassVar[bool] = True """Permission for creating new Models. Default value is set to `True`.""" can_edit: ClassVar[bool] = True """Permission for editing Models. Default value is set to `True`.""" can_delete: ClassVar[bool] = True """Permission for deleting Models. Default value is set to `True`.""" can_view_details: ClassVar[bool] = True """Permission for viewing full details of Models. Default value is set to `True`. """ # List page column_list: ClassVar[Sequence[Union[str, InstrumentedAttribute]]] = [] """List of columns to display in `List` page. Columns can either be string names or SQLAlchemy columns. ???+ note By default only Model primary key is displayed. ???+ example ```python class UserAdmin(ModelAdmin, model=User): column_list = [User.id, User.name] ``` """ column_exclude_list: ClassVar[Sequence[Union[str, InstrumentedAttribute]]] = [] """List of columns to exclude in `List` page. Columns can either be string names or SQLAlchemy columns. ???+ example ```python class UserAdmin(ModelAdmin, model=User): column_exclude_list = [User.id, User.name] ``` """ page_size: ClassVar[int] = 10 """Default number of items to display in `List` page pagination. Default value is set to `10`. ???+ example ```python class UserAdmin(ModelAdmin, model=User): page_size = 25 ``` """ page_size_options: ClassVar[Sequence[int]] = [10, 25, 50, 100] """Pagination choices displayed in `List` page. Default value is set to `[10, 25, 50, 100]`. ???+ example ```python class UserAdmin(ModelAdmin, model=User): page_size_options = [50, 100] ``` """ # Details page column_details_list: ClassVar[Sequence[Union[str, InstrumentedAttribute]]] = [] """List of columns to display in `Detail` page. Columns can either be string names or SQLAlchemy columns. ???+ note By default all columns of Model are displayed. ???+ example ```python class UserAdmin(ModelAdmin, model=User): column_details_list = [User.id, User.name, User.mail] ``` """ column_details_exclude_list: ClassVar[ Sequence[Union[str, InstrumentedAttribute]] ] = [] """List of columns to exclude from displaying in `Detail` page. Columns can either be string names or SQLAlchemy columns. ???+ example ```python class UserAdmin(ModelAdmin, model=User): column_details_exclude_list = [User.mail] ``` """ column_labels: ClassVar[Dict[Union[str, InstrumentedAttribute], str]] = {} """A mapping of column labels, used to map column names to new names. Dictionary keys can be string names or SQLAlchemy columns with string values. ???+ example ```python class UserAdmin(ModelAdmin, model=User): column_labels = {User.mail: "Email"} ``` """ # Templates list_template: ClassVar[str] = "list.html" """List view template. Default is `list.html`.""" create_template: ClassVar[str] = "create.html" """Create view template. Default is `create.html`.""" details_template: ClassVar[str] = "details.html" """Details view template. Default is `details.html`.""" edit_template: ClassVar[str] = "edit.html" """Edit view template. Default is `edit.html`.""" def _run_query_sync(self, stmt: ClauseElement) -> Any: with self.sessionmaker(expire_on_commit=False) as session: result = session.execute(stmt) return result.scalars().all() async def _run_query(self, stmt: ClauseElement) -> Any: if self.async_engine: async with self.sessionmaker(expire_on_commit=False) as session: result = await session.execute(stmt) return result.scalars().all() else: return await anyio.to_thread.run_sync(self._run_query_sync, stmt) def _add_object_sync(self, obj: Any) -> None: with self.sessionmaker.begin() as session: session.add(obj) def _delete_object_sync(self, obj: Any) -> None: with self.sessionmaker.begin() as session: session.delete(obj) def _update_modeL_sync(self, pk: Any, data: Dict[str, Any]) -> None: stmt = select(self.model).where(self.pk_column == pk) relationships = inspect(self.model).relationships with self.sessionmaker.begin() as session: result = session.execute(stmt).scalars().first() for name, value in data.items(): if name in relationships and isinstance(value, list): # Load relationship objects into session session.add_all(value) setattr(result, name, value) async def count(self) -> int: stmt = select(func.count(self.pk_column)) rows = await self._run_query(stmt) return rows[0] async def list(self, page: int, page_size: int) -> Pagination: page_size = min(page_size or self.page_size, max(self.page_size_options)) count = await self.count() stmt = ( select(self.model) .order_by(self.pk_column) .limit(page_size) .offset((page - 1) * page_size) ) for _, attr in self.get_list_columns(): if isinstance(attr, RelationshipProperty): stmt = stmt.options(selectinload(attr.key)) rows = await self._run_query(stmt) pagination = Pagination( rows=rows, page=page, page_size=page_size, count=count, ) return pagination async def get_model_by_pk(self, value: Any) -> Any: stmt = select(self.model).where(self.pk_column == value) for _, attr in self.get_details_columns(): if isinstance(attr, RelationshipProperty): stmt = stmt.options(selectinload(attr.key)) rows = await self._run_query(stmt) if rows: return rows[0] return None def get_attr_value( self, obj: type, attr: Union[Column, ColumnProperty, RelationshipProperty] ) -> Any: if isinstance(attr, Column): return getattr(obj, attr.name) else: value = getattr(obj, attr.key) if isinstance(value, list): return ", ".join(map(str, value)) return value def get_model_attr( self, attr: Union[str, InstrumentedAttribute] ) -> Union[ColumnProperty, RelationshipProperty]: assert isinstance(attr, (str, InstrumentedAttribute)) if isinstance(attr, str): key = attr elif isinstance(attr.prop, ColumnProperty): key = attr.name elif isinstance(attr.prop, RelationshipProperty): key = attr.prop.key try: return inspect(self.model).attrs[key] except KeyError: raise InvalidColumnError( f"Model '{self.model.__name__}' has no attribute '{attr}'." ) def get_model_attributes(self) -> List[Column]: return list(inspect(self.model).attrs) def get_list_columns(self) -> List[Tuple[str, Column]]: """Get list of columns to display in List page.""" column_list = getattr(self, "column_list", None) column_exclude_list = getattr(self, "column_exclude_list", None) if column_list: attrs = [self.get_model_attr(attr) for attr in self.column_list] elif column_exclude_list: exclude_columns = [ self.get_model_attr(attr) for attr in column_exclude_list ] all_attrs = self.get_model_attributes() attrs = list(set(all_attrs) - set(exclude_columns)) else: attrs = [getattr(self.model, self.pk_column.name).prop] labels = self.get_column_labels() return [(labels.get(attr, attr.key), attr) for attr in attrs] def get_details_columns(self) -> List[Tuple[str, Column]]: """Get list of columns to display in Detail page.""" column_details_list = getattr(self, "column_details_list", None) column_details_exclude_list = getattr(self, "column_details_exclude_list", None) if column_details_list: attrs = [self.get_model_attr(attr) for attr in column_details_list] elif column_details_exclude_list: exclude_columns = [ self.get_model_attr(attr) for attr in column_details_exclude_list ] all_attrs = self.get_model_attributes() attrs = list(set(all_attrs) - set(exclude_columns)) else: attrs = self.get_model_attributes() labels = self.get_column_labels() return [(labels.get(attr, attr.key), attr) for attr in attrs] def get_column_labels(self) -> Dict[Column, str]: return { self.get_model_attr(column_label): value for column_label, value in self.column_labels.items() } async def delete_model(self, obj: Any) -> None: if self.async_engine: async with self.sessionmaker.begin() as session: await session.delete(obj) else: await anyio.to_thread.run_sync(self._delete_object_sync, obj) async def insert_model(self, obj: type) -> Any: if self.async_engine: async with self.sessionmaker.begin() as session: session.add(obj) else: await anyio.to_thread.run_sync(self._add_object_sync, obj) async def update_model(self, pk: Any, data: Dict[str, Any]) -> None: if self.async_engine: stmt = select(self.model).where(self.pk_column == pk) relationships = inspect(self.model).relationships for name in relationships.keys(): stmt = stmt.options(selectinload(name)) async with self.sessionmaker.begin() as session: result = await session.execute(stmt) result = result.scalars().first() for name, value in data.items(): if name in relationships and isinstance(value, list): # Load relationship objects into session session.add_all(value) setattr(result, name, value) else: await anyio.to_thread.run_sync(self._update_modeL_sync, pk, data) async def scaffold_form(self) -> Type[Form]: return await get_model_form(model=self.model, engine=self.engine)
from typing import ( Any, ClassVar, Dict, List, Sequence, Tuple, Type, Union, no_type_check, ) import anyio from sqlalchemy import Column, func, inspect, select from sqlalchemy.engine.base import Engine from sqlalchemy.exc import NoInspectionAvailable from sqlalchemy.ext.asyncio import AsyncEngine from sqlalchemy.orm import ( ColumnProperty, RelationshipProperty, selectinload, sessionmaker, ) from sqlalchemy.orm.attributes import InstrumentedAttribute from sqlalchemy.sql.elements import ClauseElement from starlette.requests import Request from wtforms import Form from sqladmin.exceptions import InvalidColumnError, InvalidModelError from sqladmin.forms import get_model_form from sqladmin.helpers import prettify_class_name, slugify_class_name from sqladmin.pagination import Pagination __all__ = [ "ModelAdmin", ] class ModelAdminMeta(type): """Metaclass used to specify class variables in ModelAdmin. Danger: This class should almost never be used directly. """ @no_type_check def __new__(mcls, name, bases, attrs: dict, **kwargs: Any): cls: Type["ModelAdmin"] = super().__new__(mcls, name, bases, attrs) model = kwargs.get("model") if not model: return cls try: mapper = inspect(model) except NoInspectionAvailable: raise InvalidModelError( f"Class {model.__name__} is not a SQLAlchemy model." ) assert len(mapper.primary_key) == 1, "Multiple PK columns not supported." cls.pk_column = mapper.primary_key[0] cls.identity = slugify_class_name(model.__name__) cls.model = model cls.name = attrs.get("name", prettify_class_name(cls.model.__name__)) cls.name_plural = attrs.get("name_plural", f"{cls.name}s") cls.icon = attrs.get("icon") mcls._check_conflicting_options(["column_list", "column_exclude_list"], attrs) mcls._check_conflicting_options( ["column_details_list", "column_details_exclude_list"], attrs ) return cls @classmethod def _check_conflicting_options(mcls, keys: List[str], attrs: dict) -> None: if all(k in attrs for k in keys): raise AssertionError(f"Cannot use {' and '.join(keys)} together.") class BaseModelAdmin: def is_visible(self, request: Request) -> bool: """Override this method if you want dynamically hide or show administrative views from SQLAdmin menu structure By default, item is visible in menu. Both is_visible and is_accessible to be displayed in menu. """ return True def is_accessible(self, request: Request) -> bool: """Override this method to add permission checks. SQLAdmin does not make any assumptions about the authentication system used in your application, so it is up to you to implement it. By default, it will allow access for everyone. """ return True class ModelAdmin(BaseModelAdmin, metaclass=ModelAdminMeta): """Base class for defining admnistrative behaviour for the model. ???+ usage ```python from sqladmin import ModelAdmin from mymodels import User # SQLAlchemy model class UserAdmin(ModelAdmin, model=User): can_create = True ``` """ model: ClassVar[type] # Internals pk_column: ClassVar[Column] identity: ClassVar[str] sessionmaker: ClassVar[sessionmaker] engine: ClassVar[Union[Engine, AsyncEngine]] async_engine: ClassVar[bool] # Metadata name: ClassVar[str] = "" """Name of ModelAdmin to display. Default value is set to Model class name. """ name_plural: ClassVar[str] = "" """Plural name of ModelAdmin. Default value is Model class name + `s`. """ icon: ClassVar[str] = "" """Display icon for ModelAdmin in the sidebar. Currently only supports FontAwesome icons. ???+ example ```python class UserAdmin(ModelAdmin, model=User): icon = "fas fa-user" ``` """ # Permissions can_create: ClassVar[bool] = True """Permission for creating new Models. Default value is set to `True`.""" can_edit: ClassVar[bool] = True """Permission for editing Models. Default value is set to `True`.""" can_delete: ClassVar[bool] = True """Permission for deleting Models. Default value is set to `True`.""" can_view_details: ClassVar[bool] = True """Permission for viewing full details of Models. Default value is set to `True`. """ # List page column_list: ClassVar[Sequence[Union[str, InstrumentedAttribute]]] = [] """List of columns to display in `List` page. Columns can either be string names or SQLAlchemy columns. ???+ note By default only Model primary key is displayed. ???+ example ```python class UserAdmin(ModelAdmin, model=User): column_list = [User.id, User.name] ``` """ column_exclude_list: ClassVar[Sequence[Union[str, InstrumentedAttribute]]] = [] """List of columns to exclude in `List` page. Columns can either be string names or SQLAlchemy columns. ???+ example ```python class UserAdmin(ModelAdmin, model=User): column_exclude_list = [User.id, User.name] ``` """ page_size: ClassVar[int] = 10 """Default number of items to display in `List` page pagination. Default value is set to `10`. ???+ example ```python class UserAdmin(ModelAdmin, model=User): page_size = 25 ``` """ page_size_options: ClassVar[Sequence[int]] = [10, 25, 50, 100] """Pagination choices displayed in `List` page. Default value is set to `[10, 25, 50, 100]`. ???+ example ```python class UserAdmin(ModelAdmin, model=User): page_size_options = [50, 100] ``` """ # Details page column_details_list: ClassVar[Sequence[Union[str, InstrumentedAttribute]]] = [] """List of columns to display in `Detail` page. Columns can either be string names or SQLAlchemy columns. ???+ note By default all columns of Model are displayed. ???+ example ```python class UserAdmin(ModelAdmin, model=User): column_details_list = [User.id, User.name, User.mail] ``` """ column_details_exclude_list: ClassVar[ Sequence[Union[str, InstrumentedAttribute]] ] = [] """List of columns to exclude from displaying in `Detail` page. Columns can either be string names or SQLAlchemy columns. ???+ example ```python class UserAdmin(ModelAdmin, model=User): column_details_exclude_list = [User.mail] ``` """ column_labels: ClassVar[Dict[Union[str, InstrumentedAttribute], str]] = {} """A mapping of column labels, used to map column names to new names. Dictionary keys can be string names or SQLAlchemy columns with string values. ???+ example ```python class UserAdmin(ModelAdmin, model=User): column_labels = {User.mail: "Email"} ``` """ # Templates list_template: ClassVar[str] = "list.html" """List view template. Default is `list.html`.""" create_template: ClassVar[str] = "create.html" """Create view template. Default is `create.html`.""" details_template: ClassVar[str] = "details.html" """Details view template. Default is `details.html`.""" edit_template: ClassVar[str] = "edit.html" """Edit view template. Default is `edit.html`.""" def _run_query_sync(self, stmt: ClauseElement) -> Any: with self.sessionmaker(expire_on_commit=False) as session: result = session.execute(stmt) return result.scalars().all() async def _run_query(self, stmt: ClauseElement) -> Any: if self.async_engine: async with self.sessionmaker(expire_on_commit=False) as session: result = await session.execute(stmt) return result.scalars().all() else: return await anyio.to_thread.run_sync(self._run_query_sync, stmt) def _add_object_sync(self, obj: Any) -> None: with self.sessionmaker.begin() as session: session.add(obj) def _delete_object_sync(self, obj: Any) -> None: with self.sessionmaker.begin() as session: session.delete(obj) def _update_modeL_sync(self, pk: Any, data: Dict[str, Any]) -> None: stmt = select(self.model).where(self.pk_column == pk) relationships = inspect(self.model).relationships with self.sessionmaker.begin() as session: result = session.execute(stmt).scalars().first() for name, value in data.items(): if name in relationships and isinstance(value, list): # Load relationship objects into session session.add_all(value) setattr(result, name, value) async def count(self) -> int: stmt = select(func.count(self.pk_column)) rows = await self._run_query(stmt) return rows[0] async def list(self, page: int, page_size: int) -> Pagination: page_size = min(page_size or self.page_size, max(self.page_size_options)) count = await self.count() stmt = ( select(self.model) .order_by(self.pk_column) .limit(page_size) .offset((page - 1) * page_size) ) for _, attr in self.get_list_columns(): if isinstance(attr, RelationshipProperty): stmt = stmt.options(selectinload(attr.key)) rows = await self._run_query(stmt) pagination = Pagination( rows=rows, page=page, page_size=page_size, count=count, ) return pagination async def get_model_by_pk(self, value: Any) -> Any: stmt = select(self.model).where(self.pk_column == value) for _, attr in self.get_details_columns(): if isinstance(attr, RelationshipProperty): stmt = stmt.options(selectinload(attr.key)) rows = await self._run_query(stmt) if rows: return rows[0] return None def get_attr_value( self, obj: type, attr: Union[Column, ColumnProperty, RelationshipProperty] ) -> Any: if isinstance(attr, Column): return getattr(obj, attr.name) else: value = getattr(obj, attr.key) if isinstance(value, list): return ", ".join(map(str, value)) return value def get_model_attr( self, attr: Union[str, InstrumentedAttribute] ) -> Union[ColumnProperty, RelationshipProperty]: assert isinstance(attr, (str, InstrumentedAttribute)) if isinstance(attr, str): key = attr elif isinstance(attr.prop, ColumnProperty): key = attr.name elif isinstance(attr.prop, RelationshipProperty): key = attr.prop.key try: return inspect(self.model).attrs[key] except KeyError: raise InvalidColumnError( f"Model '{self.model.__name__}' has no attribute '{attr}'." ) def get_model_attributes(self) -> List[Column]: return list(inspect(self.model).attrs) def get_list_columns(self) -> List[Tuple[str, Column]]: """Get list of columns to display in List page.""" column_list = getattr(self, "column_list", None) column_exclude_list = getattr(self, "column_exclude_list", None) if column_list: attrs = [self.get_model_attr(attr) for attr in self.column_list] elif column_exclude_list: exclude_columns = [ self.get_model_attr(attr) for attr in column_exclude_list ] all_attrs = self.get_model_attributes() attrs = list(set(all_attrs) - set(exclude_columns)) else: attrs = [getattr(self.model, self.pk_column.name).prop] labels = self.get_column_labels() return [(labels.get(attr, attr.key), attr) for attr in attrs] def get_details_columns(self) -> List[Tuple[str, Column]]: """Get list of columns to display in Detail page.""" column_details_list = getattr(self, "column_details_list", None) column_details_exclude_list = getattr(self, "column_details_exclude_list", None) if column_details_list: attrs = [self.get_model_attr(attr) for attr in column_details_list] elif column_details_exclude_list: exclude_columns = [ self.get_model_attr(attr) for attr in column_details_exclude_list ] all_attrs = self.get_model_attributes() attrs = list(set(all_attrs) - set(exclude_columns)) else: attrs = self.get_model_attributes() labels = self.get_column_labels() return [(labels.get(attr, attr.key), attr) for attr in attrs] def get_column_labels(self) -> Dict[Column, str]: return { self.get_model_attr(column_label): value for column_label, value in self.column_labels.items() } async def delete_model(self, obj: Any) -> None: if self.async_engine: async with self.sessionmaker.begin() as session: await session.delete(obj) else: await anyio.to_thread.run_sync(self._delete_object_sync, obj) async def insert_model(self, obj: type) -> Any: if self.async_engine: async with self.sessionmaker.begin() as session: session.add(obj) else: await anyio.to_thread.run_sync(self._add_object_sync, obj) async def update_model(self, pk: Any, data: Dict[str, Any]) -> None: if self.async_engine: stmt = select(self.model).where(self.pk_column == pk) relationships = inspect(self.model).relationships for name in relationships.keys(): stmt = stmt.options(selectinload(name)) async with self.sessionmaker.begin() as session: result = await session.execute(stmt) result = result.scalars().first() for name, value in data.items(): if name in relationships and isinstance(value, list): # Load relationship objects into session session.add_all(value) setattr(result, name, value) else: await anyio.to_thread.run_sync(self._update_modeL_sync, pk, data) async def scaffold_form(self) -> Type[Form]: return await get_model_form(model=self.model, engine=self.engine)
from trading_ig.rest import IGService, IGException, ApiExceededException from trading_ig.config import config import pandas as pd from datetime import datetime, timedelta import pytest from random import randint, choice import logging import time from tenacity import Retrying, wait_exponential, retry_if_exception_type @pytest.fixture(scope="module") def retrying(): """test fixture creates a tenacity.Retrying instance""" return Retrying(wait=wait_exponential(), retry=retry_if_exception_type(ApiExceededException)) @pytest.fixture(autouse=True) def logging_setup(): """sets logging for each test""" logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') @pytest.fixture(scope="module", params=['2', '3'], ids=['v2 session', 'v3 session']) def ig_service(request, retrying): """test fixture logs into IG with the configured credentials. Tests both v2 and v3 types""" if config.acc_type == 'LIVE': pytest.fail('this integration test should not be executed with a LIVE account') ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, acc_number=config.acc_number, retryer=retrying) ig_service.create_session(version=request.param) yield ig_service ig_service.logout() @pytest.fixture() def top_level_nodes(ig_service): """test fixture gets the top level navigation nodes""" response = ig_service.fetch_top_level_navigation_nodes() return response["nodes"] @pytest.fixture() def watchlists(ig_service): """test fixture gets all watchlists""" return ig_service.fetch_all_watchlists() @pytest.fixture() def watchlist_id(ig_service): """test fixture creates a dummy watchlist for use in tests, and returns the ID. In teardown it also deletes the dummy watchlist""" epics = ['CS.D.GBPUSD.TODAY.IP', 'IX.D.FTSE.DAILY.IP'] now = datetime.now() data = ig_service.create_watchlist(f"test_{now.strftime("%Y%m%d%H%H%S")}", epics) watchlist_id = data['watchlistId'] yield watchlist_id ig_service.delete_watchlist(watchlist_id) class TestIntegration: def test_create_session_no_encryption(self, retrying): ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, retryer=retrying) ig_service.create_session() assert 'CST' in ig_service.session.headers def test_create_session_encrypted_password(self, retrying): ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, retryer=retrying) ig_service.create_session(encryption=True) assert 'CST' in ig_service.session.headers def test_fetch_accounts(self, ig_service): response = ig_service.fetch_accounts() preferred = response.loc[response["preferred"]] assert all(preferred["balance"] > 0) def test_accounts_prefs(self, ig_service): # turn off trailing stops update_status = ig_service.update_account_preferences(trailing_stops_enabled=False) assert update_status == 'SUCCESS' # check trailing stops are turned off enabled_status = ig_service.fetch_account_preferences()['trailingStopsEnabled'] assert enabled_status is False time.sleep(5) # turn on trailing stops update_status = ig_service.update_account_preferences(trailing_stops_enabled=True) assert update_status == 'SUCCESS' time.sleep(5) # check trailing stops are turned on enabled_status = ig_service.fetch_account_preferences()['trailingStopsEnabled'] assert enabled_status is True def test_fetch_account_activity_by_period(self, ig_service): response = ig_service.fetch_account_activity_by_period(10000) assert isinstance(response, pd.DataFrame) def test_fetch_account_activity_by_date(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=7) response = ig_service.fetch_account_activity_by_date(from_date, to_date) assert isinstance(response, pd.DataFrame) def test_fetch_account_activity_v2_span(self, ig_service): period = 7 * 24 * 60 * 60 # 7 days response = ig_service.fetch_account_activity_v2(max_span_seconds=period) assert isinstance(response, pd.DataFrame) def test_fetch_account_activity_v2_dates(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=7) response = ig_service.fetch_account_activity_v2(from_date=from_date, to_date=to_date) assert isinstance(response, pd.DataFrame) def test_fetch_account_activity_from(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=7) response = ig_service.fetch_account_activity(from_date=from_date) assert isinstance(response, pd.DataFrame) assert response.shape[1] == 9 def test_fetch_account_activity_from_to(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=7) response = ig_service.fetch_account_activity(from_date=from_date, to_date=to_date) assert isinstance(response, pd.DataFrame) assert response.shape[1] == 9 def test_fetch_account_activity_detailed(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=7) response = ig_service.fetch_account_activity(from_date=from_date, to_date=to_date, detailed=True) assert isinstance(response, pd.DataFrame) assert response.shape[1] == 22 def test_fetch_account_activity_old(self, ig_service): from_date = datetime(1970, 1, 1) to_date = from_date + timedelta(days=7) response = ig_service.fetch_account_activity(from_date=from_date, to_date=to_date) assert isinstance(response, pd.DataFrame) assert response.shape[0] == 0 def test_fetch_account_activity_fiql(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=30) response = ig_service.fetch_account_activity(from_date=from_date, to_date=to_date, fiql_filter='channel==PUBLIC_WEB_API') assert isinstance(response, pd.DataFrame) assert response.shape[1] == 9 def test_init_bad_account_type(self, retrying): with pytest.raises(IGException): IGService(config.username, config.password, config.api_key, 'wrong', retryer=retrying) def test_fetch_transaction_history_by_type_and_period(self, ig_service): response = ig_service.fetch_transaction_history_by_type_and_period(10000, "ALL") assert isinstance(response, pd.DataFrame) def test_create_session_bad_password(self, retrying): ig_service = IGService(config.username, 'wrong', config.api_key, config.acc_type, retryer=retrying) with pytest.raises(IGException): ig_service.create_session() def test_fetch_open_positions(self, ig_service): response = ig_service.fetch_open_positions() assert isinstance(response, pd.DataFrame) def test_fetch_open_positions_v1(self, ig_service): response = ig_service.fetch_open_positions(version='1') assert isinstance(response, pd.DataFrame) def test_create_session_bad_username(self, retrying): ig_service = IGService('wrong', config.password, config.api_key, config.acc_type, retryer=retrying) with pytest.raises(IGException): ig_service.create_session() def test_fetch_working_orders(self, ig_service): response = ig_service.fetch_working_orders() assert isinstance(response, pd.DataFrame) def test_create_session_bad_api_key(self, retrying): ig_service = IGService(config.username, config.password, 'wrong', config.acc_type, retryer=retrying) with pytest.raises(IGException): ig_service.create_session() def test_fetch_top_level_navigation_nodes(self, top_level_nodes): assert isinstance(top_level_nodes, pd.DataFrame) def test_create_session_v3_no_acc_num(self, retrying): ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, retryer=retrying) with pytest.raises(IGException): ig_service.create_session(version='3') def test_create_session_v3(self, retrying): ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, acc_number=config.acc_number, retryer=retrying) ig_service.create_session(version='3') assert 'X-IG-API-KEY' in ig_service.session.headers assert 'Authorization' in ig_service.session.headers assert 'IG-ACCOUNT-ID' in ig_service.session.headers assert len(ig_service.fetch_accounts()) == 2 @pytest.mark.slow # will be skipped unless run with 'pytest --runslow' def test_session_v3_refresh(self, retrying): """ Tests refresh capability of v3 sessions. It makes repeated calls to the 'fetch_accounts' endpoint, with random sleep times in between, to show/test the different scenarios. Will take a long time to run """ ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, acc_number=config.acc_number, retryer=retrying) ig_service.create_session(version='3') delay_choice = [(1, 59), (60, 650)] for count in range(1, 20): data = ig_service.fetch_accounts() logging.info(f"Account count: {len(data)}") option = choice(delay_choice) wait = randint(option[0], option[1]) logging.info(f"Waiting for {wait} seconds...") time.sleep(wait) def test_read_session(self, ig_service): ig_service.read_session() assert 'X-IG-API-KEY' in ig_service.session.headers if ig_service.session.headers['VERSION'] == '2': assert 'CST' in ig_service.session.headers assert 'X-SECURITY-TOKEN' in ig_service.session.headers assert 'Authorization' not in ig_service.session.headers assert 'IG-ACCOUNT-ID' not in ig_service.session.headers if ig_service.session.headers['VERSION'] == '3': assert 'CST' not in ig_service.session.headers assert 'X-SECURITY-TOKEN' not in ig_service.session.headers assert 'Authorization' in ig_service.session.headers assert 'IG-ACCOUNT-ID' in ig_service.session.headers def test_read_session_fetch_session_tokens(self, ig_service): ig_service.read_session(fetch_session_tokens='true') assert 'X-IG-API-KEY' in ig_service.session.headers assert 'CST' in ig_service.session.headers assert 'X-SECURITY-TOKEN' in ig_service.session.headers if ig_service.session.headers['VERSION'] == '2': assert 'Authorization' not in ig_service.session.headers assert 'IG-ACCOUNT-ID' not in ig_service.session.headers if ig_service.session.headers['VERSION'] == '3': assert 'Authorization' in ig_service.session.headers assert 'IG-ACCOUNT-ID' in ig_service.session.headers @staticmethod def get_random_market_id(): market_ids = ['US500', 'FT100', 'USTECH', 'GC', 'CL', 'W', 'GBPUSD', 'USDJPY', 'EURCHF', '10YRTND', 'FGBL', 'FLG', 'BITCOIN', 'ETHUSD', 'CS.D.LTCUSD.TODAY.IP', 'BP-UK', 'VOD-UK', 'TSLA-US'] rand_index = randint(0, len(market_ids) - 1) market_id = market_ids[rand_index] return market_id def test_fetch_client_sentiment_by_instrument(self, ig_service): market_id = self.get_random_market_id() response = ig_service.fetch_client_sentiment_by_instrument(market_id) self.assert_sentiment(response) def test_fetch_client_sentiment_by_instrument_multiple(self, ig_service): market_id_list = [] for i in range(1, 5): market_id_list.append(self.get_random_market_id()) response = ig_service.fetch_client_sentiment_by_instrument(market_id_list) for sentiment in response['clientSentiments']: self.assert_sentiment(sentiment) def test_fetch_related_client_sentiment_by_instrument(self, ig_service): market_id = self.get_random_market_id() df = ig_service.fetch_related_client_sentiment_by_instrument(market_id) rows = df.to_dict('records') for sentiment in rows: self.assert_sentiment(sentiment) @staticmethod def assert_sentiment(response): long = response['longPositionPercentage'] short = response['shortPositionPercentage'] assert isinstance(response, dict) assert isinstance(long, float) assert isinstance(short, float) assert long + short == 100.0 def test_fetch_sub_nodes_by_node(self, ig_service, top_level_nodes): rand_index = randint(0, len(top_level_nodes) - 1) response = ig_service.fetch_sub_nodes_by_node(rand_index) assert isinstance(response["markets"], pd.DataFrame) assert isinstance(response["nodes"], pd.DataFrame) def test_fetch_all_watchlists(self, watchlists): assert isinstance(watchlists, pd.DataFrame) default = watchlists[watchlists["defaultSystemWatchlist"]] assert any(default["id"] == "Popular Markets") def test_fetch_watchlist_markets(self, ig_service, watchlists): rand_index = randint(0, len(watchlists) - 1) watchlist_id = watchlists.iloc[rand_index]["id"] response = ig_service.fetch_watchlist_markets(watchlist_id) assert isinstance(response, pd.DataFrame) def test_fetch_market_by_epic(self, ig_service): response = ig_service.fetch_market_by_epic("CS.D.EURUSD.MINI.IP") assert isinstance(response, dict) def test_fetch_markets_by_epics(self, ig_service): markets_list = ig_service.fetch_markets_by_epics("IX.D.SPTRD.MONTH1.IP,IX.D.FTSE.MONTH1.IP", version='1') assert isinstance(markets_list, list) assert len(markets_list) == 2 assert markets_list[0].instrument.name == 'FTSE 100' assert markets_list[0].dealingRules is not None assert markets_list[1].instrument.name == 'US 500' markets_list = ig_service.fetch_markets_by_epics("MT.D.PL.Month2.IP,MT.D.PA.Month1.IP,MT.D.HG.Month1.IP", detailed=False) assert len(markets_list) == 3 assert markets_list[0].instrument.name == None assert markets_list[0].snapshot.bid != 0 assert markets_list[0].snapshot.offer != 0 assert markets_list[0].dealingRules is None assert markets_list[1].instrument.name == None assert markets_list[1].snapshot.bid != 0 assert markets_list[1].snapshot.offer != 0 assert markets_list[1].dealingRules is None assert markets_list[2].instrument.name == None assert markets_list[2].snapshot.bid != 0 assert markets_list[2].snapshot.offer != 0 assert markets_list[2].dealingRules is None def test_search_markets(self, ig_service): search_term = "EURUSD" response = ig_service.search_markets(search_term) assert isinstance(response, pd.DataFrame) def test_fetch_historical_prices_by_epic_and_numpoints(self, ig_service): response = ig_service.fetch_historical_prices_by_epic_and_num_points( "CS.D.EURUSD.MINI.IP", "H", 4 ) assert isinstance(response["allowance"], dict) assert isinstance(response["prices"], pd.DataFrame) assert len(response["prices"]) == 4 def test_fetch_historical_prices_by_epic_and_date_range_v1(self, ig_service): response = ig_service.fetch_historical_prices_by_epic_and_date_range( "CS.D.EURUSD.MINI.IP", "D", "2020:09:01-00:00:00", "2020:09:04-23:59:59", version='1' ) assert isinstance(response["allowance"], dict) assert isinstance(response["prices"], pd.DataFrame) assert len(response["prices"]) == 4 def test_fetch_historical_prices_by_epic_and_date_range(self, ig_service): response = ig_service.fetch_historical_prices_by_epic_and_date_range( "CS.D.EURUSD.MINI.IP", "D", "2020-09-01 00:00:00", "2020-09-04 23:59:59" ) assert isinstance(response["allowance"], dict) assert isinstance(response["prices"], pd.DataFrame) assert len(response["prices"]) == 4 def test_fetch_historical_prices_by_epic_dates(self, ig_service): result = ig_service.fetch_historical_prices_by_epic( epic='MT.D.GC.Month2.IP', resolution='D', start_date='2020-09-01T00:00:00', end_date='2020-09-04T23:59:59') prices = result['prices'] assert isinstance(result, dict) assert isinstance(prices, pd.DataFrame) assert prices.shape[0] == 4 assert prices.shape[1] == 13 # assert time series rows are 1 day apart prices['tvalue'] = prices.index prices['delta'] = (prices['tvalue'] - prices['tvalue'].shift()) assert any(prices["delta"].dropna() == timedelta(days=1)) # assert default paging assert result['metadata']['pageData']['pageSize'] == 20 assert result['metadata']['pageData']['pageNumber'] == 1 assert result['metadata']['pageData']['totalPages'] == 1 def test_fetch_historical_prices_by_epic_numpoints(self, ig_service): result = ig_service.fetch_historical_prices_by_epic( epic='MT.D.GC.Month2.IP', resolution='W', numpoints=10) prices = result['prices'] assert isinstance(result, dict) assert isinstance(prices, pd.DataFrame) # assert DataFrame shape assert prices.shape[0] == 10 assert prices.shape[1] == 13 # assert time series rows are 1 week apart prices['tvalue'] = prices.index prices['delta'] = (prices['tvalue'] - prices['tvalue'].shift()) assert any(prices["delta"].dropna() == timedelta(weeks=1)) def test_fetch_historical_prices_by_epic_numpoints_default_paged( self, ig_service): result = ig_service.fetch_historical_prices_by_epic( epic='MT.D.GC.Month2.IP', resolution='W', numpoints=21) # assert default paged row count assert result['prices'].shape[0] == 21 assert result['metadata']['pageData']['pageNumber'] == 2 def test_fetch_historical_prices_by_epic_numpoints_custom_paged( self, ig_service): result = ig_service.fetch_historical_prices_by_epic( epic='MT.D.GC.Month2.IP', resolution='W', numpoints=6, pagesize=2) # assert default paged row count assert result['prices'].shape[0] == 6 assert result['metadata']['pageData']['pageNumber'] == 3 @pytest.mark.parametrize("ig_service", ['2'], indirect=True) def test_create_open_position(self, ig_service): epic = 'CS.D.GBPUSD.TODAY.IP' market_info = ig_service.fetch_market_by_epic(epic) status = market_info.snapshot.marketStatus bid = market_info.snapshot.bid offer = market_info.snapshot.offer if status != 'TRADEABLE': pytest.skip('Skipping open position test, market not open') open_result = ig_service.create_open_position( epic=epic, direction='BUY', currency_code='GBP', order_type='MARKET', expiry='DFB', force_open='false', guaranteed_stop='false', size=0.5, level=None, limit_level=None, limit_distance=None, quote_id=None, stop_distance=None, stop_level=None, trailing_stop=None, trailing_stop_increment=None) assert open_result['dealStatus'] == 'ACCEPTED' assert open_result['reason'] == 'SUCCESS' time.sleep(10) update_v1_result = ig_service.update_open_position(offer * 1.5, bid * 0.5, open_result['dealId'], version='1') assert update_v1_result['dealStatus'] == 'ACCEPTED' assert update_v1_result['reason'] == 'SUCCESS' time.sleep(10) update_v2_result = ig_service.update_open_position(offer * 1.4, bid * 0.4, open_result['dealId'], trailing_stop=True, trailing_stop_distance=25.0, trailing_stop_increment=10.0) assert update_v2_result['dealStatus'] == 'ACCEPTED' assert update_v2_result['reason'] == 'SUCCESS' time.sleep(10) close_result = ig_service.close_open_position(deal_id=open_result['dealId'], direction='SELL', epic=None, expiry='DFB', level=None, order_type='MARKET', quote_id=None, size=0.5, session=None) assert close_result['dealStatus'] == 'ACCEPTED' assert close_result['reason'] == 'SUCCESS' @pytest.mark.parametrize("ig_service", ['2'], indirect=True) def test_create_working_order(self, ig_service): epic = 'CS.D.GBPUSD.TODAY.IP' bet_info = ig_service.fetch_market_by_epic(epic) min_bet = bet_info.dealingRules.minDealSize.value offer = bet_info.snapshot.offer create_result = ig_service.create_working_order( epic=epic, direction='BUY', currency_code='GBP', order_type='LIMIT', expiry='DFB', guaranteed_stop='false', time_in_force='GOOD_TILL_CANCELLED', size=min_bet, level=offer * 0.9, limit_level=None, limit_distance=None, stop_distance=None, stop_level=None) assert create_result['dealStatus'] == 'ACCEPTED' assert create_result['reason'] == 'SUCCESS' time.sleep(10) delete_result = ig_service.delete_working_order(create_result['dealId']) assert delete_result['dealStatus'] == 'ACCEPTED' assert delete_result['reason'] == 'SUCCESS' def test_fetch_transaction_history(self, ig_service): data = ig_service.fetch_transaction_history() assert type(data) is pd.DataFrame def test_watchlist_add_market(self, ig_service, watchlist_id): response = ig_service.add_market_to_watchlist(watchlist_id, 'MT.D.GC.Month2.IP') assert response['status'] == 'SUCCESS' def test_watchlist_remove_market(self, ig_service, watchlist_id): response = ig_service.remove_market_from_watchlist(watchlist_id, 'CS.D.GBPUSD.TODAY.IP') assert response['status'] == 'SUCCESS' def test_get_client_apps(self, ig_service): apps_list = ig_service.get_client_apps() assert len(apps_list) > 0 @pytest.mark.skip(reason="endpoint throwing 500 errors - April 2021") def test_update_client_app(self, ig_service): result = ig_service.update_client_app(60, 60, config.api_key, 'ENABLED') print(result) def test_logout(self, retrying): ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, retryer=retrying) ig_service.create_session() ig_service.logout() with pytest.raises(Exception) as error: print(error) ig_service.fetch_accounts()
from trading_ig.rest import IGService, IGException, ApiExceededException from trading_ig.config import config import pandas as pd from datetime import datetime, timedelta import pytest from random import randint, choice import logging import time from tenacity import Retrying, wait_exponential, retry_if_exception_type @pytest.fixture(scope="module") def retrying(): """test fixture creates a tenacity.Retrying instance""" return Retrying(wait=wait_exponential(), retry=retry_if_exception_type(ApiExceededException)) @pytest.fixture(autouse=True) def logging_setup(): """sets logging for each test""" logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') @pytest.fixture(scope="module", params=['2', '3'], ids=['v2 session', 'v3 session']) def ig_service(request, retrying): """test fixture logs into IG with the configured credentials. Tests both v2 and v3 types""" if config.acc_type == 'LIVE': pytest.fail('this integration test should not be executed with a LIVE account') ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, acc_number=config.acc_number, retryer=retrying) ig_service.create_session(version=request.param) yield ig_service ig_service.logout() @pytest.fixture() def top_level_nodes(ig_service): """test fixture gets the top level navigation nodes""" response = ig_service.fetch_top_level_navigation_nodes() return response["nodes"] @pytest.fixture() def watchlists(ig_service): """test fixture gets all watchlists""" return ig_service.fetch_all_watchlists() @pytest.fixture() def watchlist_id(ig_service): """test fixture creates a dummy watchlist for use in tests, and returns the ID. In teardown it also deletes the dummy watchlist""" epics = ['CS.D.GBPUSD.TODAY.IP', 'IX.D.FTSE.DAILY.IP'] now = datetime.now() data = ig_service.create_watchlist(f"test_{now.strftime('%Y%m%d%H%H%S')}", epics) watchlist_id = data['watchlistId'] yield watchlist_id ig_service.delete_watchlist(watchlist_id) class TestIntegration: def test_create_session_no_encryption(self, retrying): ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, retryer=retrying) ig_service.create_session() assert 'CST' in ig_service.session.headers def test_create_session_encrypted_password(self, retrying): ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, retryer=retrying) ig_service.create_session(encryption=True) assert 'CST' in ig_service.session.headers def test_fetch_accounts(self, ig_service): response = ig_service.fetch_accounts() preferred = response.loc[response["preferred"]] assert all(preferred["balance"] > 0) def test_accounts_prefs(self, ig_service): # turn off trailing stops update_status = ig_service.update_account_preferences(trailing_stops_enabled=False) assert update_status == 'SUCCESS' # check trailing stops are turned off enabled_status = ig_service.fetch_account_preferences()['trailingStopsEnabled'] assert enabled_status is False time.sleep(5) # turn on trailing stops update_status = ig_service.update_account_preferences(trailing_stops_enabled=True) assert update_status == 'SUCCESS' time.sleep(5) # check trailing stops are turned on enabled_status = ig_service.fetch_account_preferences()['trailingStopsEnabled'] assert enabled_status is True def test_fetch_account_activity_by_period(self, ig_service): response = ig_service.fetch_account_activity_by_period(10000) assert isinstance(response, pd.DataFrame) def test_fetch_account_activity_by_date(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=7) response = ig_service.fetch_account_activity_by_date(from_date, to_date) assert isinstance(response, pd.DataFrame) def test_fetch_account_activity_v2_span(self, ig_service): period = 7 * 24 * 60 * 60 # 7 days response = ig_service.fetch_account_activity_v2(max_span_seconds=period) assert isinstance(response, pd.DataFrame) def test_fetch_account_activity_v2_dates(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=7) response = ig_service.fetch_account_activity_v2(from_date=from_date, to_date=to_date) assert isinstance(response, pd.DataFrame) def test_fetch_account_activity_from(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=7) response = ig_service.fetch_account_activity(from_date=from_date) assert isinstance(response, pd.DataFrame) assert response.shape[1] == 9 def test_fetch_account_activity_from_to(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=7) response = ig_service.fetch_account_activity(from_date=from_date, to_date=to_date) assert isinstance(response, pd.DataFrame) assert response.shape[1] == 9 def test_fetch_account_activity_detailed(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=7) response = ig_service.fetch_account_activity(from_date=from_date, to_date=to_date, detailed=True) assert isinstance(response, pd.DataFrame) assert response.shape[1] == 22 def test_fetch_account_activity_old(self, ig_service): from_date = datetime(1970, 1, 1) to_date = from_date + timedelta(days=7) response = ig_service.fetch_account_activity(from_date=from_date, to_date=to_date) assert isinstance(response, pd.DataFrame) assert response.shape[0] == 0 def test_fetch_account_activity_fiql(self, ig_service): to_date = datetime.now() from_date = to_date - timedelta(days=30) response = ig_service.fetch_account_activity(from_date=from_date, to_date=to_date, fiql_filter='channel==PUBLIC_WEB_API') assert isinstance(response, pd.DataFrame) assert response.shape[1] == 9 def test_init_bad_account_type(self, retrying): with pytest.raises(IGException): IGService(config.username, config.password, config.api_key, 'wrong', retryer=retrying) def test_fetch_transaction_history_by_type_and_period(self, ig_service): response = ig_service.fetch_transaction_history_by_type_and_period(10000, "ALL") assert isinstance(response, pd.DataFrame) def test_create_session_bad_password(self, retrying): ig_service = IGService(config.username, 'wrong', config.api_key, config.acc_type, retryer=retrying) with pytest.raises(IGException): ig_service.create_session() def test_fetch_open_positions(self, ig_service): response = ig_service.fetch_open_positions() assert isinstance(response, pd.DataFrame) def test_fetch_open_positions_v1(self, ig_service): response = ig_service.fetch_open_positions(version='1') assert isinstance(response, pd.DataFrame) def test_create_session_bad_username(self, retrying): ig_service = IGService('wrong', config.password, config.api_key, config.acc_type, retryer=retrying) with pytest.raises(IGException): ig_service.create_session() def test_fetch_working_orders(self, ig_service): response = ig_service.fetch_working_orders() assert isinstance(response, pd.DataFrame) def test_create_session_bad_api_key(self, retrying): ig_service = IGService(config.username, config.password, 'wrong', config.acc_type, retryer=retrying) with pytest.raises(IGException): ig_service.create_session() def test_fetch_top_level_navigation_nodes(self, top_level_nodes): assert isinstance(top_level_nodes, pd.DataFrame) def test_create_session_v3_no_acc_num(self, retrying): ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, retryer=retrying) with pytest.raises(IGException): ig_service.create_session(version='3') def test_create_session_v3(self, retrying): ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, acc_number=config.acc_number, retryer=retrying) ig_service.create_session(version='3') assert 'X-IG-API-KEY' in ig_service.session.headers assert 'Authorization' in ig_service.session.headers assert 'IG-ACCOUNT-ID' in ig_service.session.headers assert len(ig_service.fetch_accounts()) == 2 @pytest.mark.slow # will be skipped unless run with 'pytest --runslow' def test_session_v3_refresh(self, retrying): """ Tests refresh capability of v3 sessions. It makes repeated calls to the 'fetch_accounts' endpoint, with random sleep times in between, to show/test the different scenarios. Will take a long time to run """ ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, acc_number=config.acc_number, retryer=retrying) ig_service.create_session(version='3') delay_choice = [(1, 59), (60, 650)] for count in range(1, 20): data = ig_service.fetch_accounts() logging.info(f"Account count: {len(data)}") option = choice(delay_choice) wait = randint(option[0], option[1]) logging.info(f"Waiting for {wait} seconds...") time.sleep(wait) def test_read_session(self, ig_service): ig_service.read_session() assert 'X-IG-API-KEY' in ig_service.session.headers if ig_service.session.headers['VERSION'] == '2': assert 'CST' in ig_service.session.headers assert 'X-SECURITY-TOKEN' in ig_service.session.headers assert 'Authorization' not in ig_service.session.headers assert 'IG-ACCOUNT-ID' not in ig_service.session.headers if ig_service.session.headers['VERSION'] == '3': assert 'CST' not in ig_service.session.headers assert 'X-SECURITY-TOKEN' not in ig_service.session.headers assert 'Authorization' in ig_service.session.headers assert 'IG-ACCOUNT-ID' in ig_service.session.headers def test_read_session_fetch_session_tokens(self, ig_service): ig_service.read_session(fetch_session_tokens='true') assert 'X-IG-API-KEY' in ig_service.session.headers assert 'CST' in ig_service.session.headers assert 'X-SECURITY-TOKEN' in ig_service.session.headers if ig_service.session.headers['VERSION'] == '2': assert 'Authorization' not in ig_service.session.headers assert 'IG-ACCOUNT-ID' not in ig_service.session.headers if ig_service.session.headers['VERSION'] == '3': assert 'Authorization' in ig_service.session.headers assert 'IG-ACCOUNT-ID' in ig_service.session.headers @staticmethod def get_random_market_id(): market_ids = ['US500', 'FT100', 'USTECH', 'GC', 'CL', 'W', 'GBPUSD', 'USDJPY', 'EURCHF', '10YRTND', 'FGBL', 'FLG', 'BITCOIN', 'ETHUSD', 'CS.D.LTCUSD.TODAY.IP', 'BP-UK', 'VOD-UK', 'TSLA-US'] rand_index = randint(0, len(market_ids) - 1) market_id = market_ids[rand_index] return market_id def test_fetch_client_sentiment_by_instrument(self, ig_service): market_id = self.get_random_market_id() response = ig_service.fetch_client_sentiment_by_instrument(market_id) self.assert_sentiment(response) def test_fetch_client_sentiment_by_instrument_multiple(self, ig_service): market_id_list = [] for i in range(1, 5): market_id_list.append(self.get_random_market_id()) response = ig_service.fetch_client_sentiment_by_instrument(market_id_list) for sentiment in response['clientSentiments']: self.assert_sentiment(sentiment) def test_fetch_related_client_sentiment_by_instrument(self, ig_service): market_id = self.get_random_market_id() df = ig_service.fetch_related_client_sentiment_by_instrument(market_id) rows = df.to_dict('records') for sentiment in rows: self.assert_sentiment(sentiment) @staticmethod def assert_sentiment(response): long = response['longPositionPercentage'] short = response['shortPositionPercentage'] assert isinstance(response, dict) assert isinstance(long, float) assert isinstance(short, float) assert long + short == 100.0 def test_fetch_sub_nodes_by_node(self, ig_service, top_level_nodes): rand_index = randint(0, len(top_level_nodes) - 1) response = ig_service.fetch_sub_nodes_by_node(rand_index) assert isinstance(response["markets"], pd.DataFrame) assert isinstance(response["nodes"], pd.DataFrame) def test_fetch_all_watchlists(self, watchlists): assert isinstance(watchlists, pd.DataFrame) default = watchlists[watchlists["defaultSystemWatchlist"]] assert any(default["id"] == "Popular Markets") def test_fetch_watchlist_markets(self, ig_service, watchlists): rand_index = randint(0, len(watchlists) - 1) watchlist_id = watchlists.iloc[rand_index]["id"] response = ig_service.fetch_watchlist_markets(watchlist_id) assert isinstance(response, pd.DataFrame) def test_fetch_market_by_epic(self, ig_service): response = ig_service.fetch_market_by_epic("CS.D.EURUSD.MINI.IP") assert isinstance(response, dict) def test_fetch_markets_by_epics(self, ig_service): markets_list = ig_service.fetch_markets_by_epics("IX.D.SPTRD.MONTH1.IP,IX.D.FTSE.MONTH1.IP", version='1') assert isinstance(markets_list, list) assert len(markets_list) == 2 assert markets_list[0].instrument.name == 'FTSE 100' assert markets_list[0].dealingRules is not None assert markets_list[1].instrument.name == 'US 500' markets_list = ig_service.fetch_markets_by_epics("MT.D.PL.Month2.IP,MT.D.PA.Month1.IP,MT.D.HG.Month1.IP", detailed=False) assert len(markets_list) == 3 assert markets_list[0].instrument.name == None assert markets_list[0].snapshot.bid != 0 assert markets_list[0].snapshot.offer != 0 assert markets_list[0].dealingRules is None assert markets_list[1].instrument.name == None assert markets_list[1].snapshot.bid != 0 assert markets_list[1].snapshot.offer != 0 assert markets_list[1].dealingRules is None assert markets_list[2].instrument.name == None assert markets_list[2].snapshot.bid != 0 assert markets_list[2].snapshot.offer != 0 assert markets_list[2].dealingRules is None def test_search_markets(self, ig_service): search_term = "EURUSD" response = ig_service.search_markets(search_term) assert isinstance(response, pd.DataFrame) def test_fetch_historical_prices_by_epic_and_numpoints(self, ig_service): response = ig_service.fetch_historical_prices_by_epic_and_num_points( "CS.D.EURUSD.MINI.IP", "H", 4 ) assert isinstance(response["allowance"], dict) assert isinstance(response["prices"], pd.DataFrame) assert len(response["prices"]) == 4 def test_fetch_historical_prices_by_epic_and_date_range_v1(self, ig_service): response = ig_service.fetch_historical_prices_by_epic_and_date_range( "CS.D.EURUSD.MINI.IP", "D", "2020:09:01-00:00:00", "2020:09:04-23:59:59", version='1' ) assert isinstance(response["allowance"], dict) assert isinstance(response["prices"], pd.DataFrame) assert len(response["prices"]) == 4 def test_fetch_historical_prices_by_epic_and_date_range(self, ig_service): response = ig_service.fetch_historical_prices_by_epic_and_date_range( "CS.D.EURUSD.MINI.IP", "D", "2020-09-01 00:00:00", "2020-09-04 23:59:59" ) assert isinstance(response["allowance"], dict) assert isinstance(response["prices"], pd.DataFrame) assert len(response["prices"]) == 4 def test_fetch_historical_prices_by_epic_dates(self, ig_service): result = ig_service.fetch_historical_prices_by_epic( epic='MT.D.GC.Month2.IP', resolution='D', start_date='2020-09-01T00:00:00', end_date='2020-09-04T23:59:59') prices = result['prices'] assert isinstance(result, dict) assert isinstance(prices, pd.DataFrame) assert prices.shape[0] == 4 assert prices.shape[1] == 13 # assert time series rows are 1 day apart prices['tvalue'] = prices.index prices['delta'] = (prices['tvalue'] - prices['tvalue'].shift()) assert any(prices["delta"].dropna() == timedelta(days=1)) # assert default paging assert result['metadata']['pageData']['pageSize'] == 20 assert result['metadata']['pageData']['pageNumber'] == 1 assert result['metadata']['pageData']['totalPages'] == 1 def test_fetch_historical_prices_by_epic_numpoints(self, ig_service): result = ig_service.fetch_historical_prices_by_epic( epic='MT.D.GC.Month2.IP', resolution='W', numpoints=10) prices = result['prices'] assert isinstance(result, dict) assert isinstance(prices, pd.DataFrame) # assert DataFrame shape assert prices.shape[0] == 10 assert prices.shape[1] == 13 # assert time series rows are 1 week apart prices['tvalue'] = prices.index prices['delta'] = (prices['tvalue'] - prices['tvalue'].shift()) assert any(prices["delta"].dropna() == timedelta(weeks=1)) def test_fetch_historical_prices_by_epic_numpoints_default_paged( self, ig_service): result = ig_service.fetch_historical_prices_by_epic( epic='MT.D.GC.Month2.IP', resolution='W', numpoints=21) # assert default paged row count assert result['prices'].shape[0] == 21 assert result['metadata']['pageData']['pageNumber'] == 2 def test_fetch_historical_prices_by_epic_numpoints_custom_paged( self, ig_service): result = ig_service.fetch_historical_prices_by_epic( epic='MT.D.GC.Month2.IP', resolution='W', numpoints=6, pagesize=2) # assert default paged row count assert result['prices'].shape[0] == 6 assert result['metadata']['pageData']['pageNumber'] == 3 @pytest.mark.parametrize("ig_service", ['2'], indirect=True) def test_create_open_position(self, ig_service): epic = 'CS.D.GBPUSD.TODAY.IP' market_info = ig_service.fetch_market_by_epic(epic) status = market_info.snapshot.marketStatus bid = market_info.snapshot.bid offer = market_info.snapshot.offer if status != 'TRADEABLE': pytest.skip('Skipping open position test, market not open') open_result = ig_service.create_open_position( epic=epic, direction='BUY', currency_code='GBP', order_type='MARKET', expiry='DFB', force_open='false', guaranteed_stop='false', size=0.5, level=None, limit_level=None, limit_distance=None, quote_id=None, stop_distance=None, stop_level=None, trailing_stop=None, trailing_stop_increment=None) assert open_result['dealStatus'] == 'ACCEPTED' assert open_result['reason'] == 'SUCCESS' time.sleep(10) update_v1_result = ig_service.update_open_position(offer * 1.5, bid * 0.5, open_result['dealId'], version='1') assert update_v1_result['dealStatus'] == 'ACCEPTED' assert update_v1_result['reason'] == 'SUCCESS' time.sleep(10) update_v2_result = ig_service.update_open_position(offer * 1.4, bid * 0.4, open_result['dealId'], trailing_stop=True, trailing_stop_distance=25.0, trailing_stop_increment=10.0) assert update_v2_result['dealStatus'] == 'ACCEPTED' assert update_v2_result['reason'] == 'SUCCESS' time.sleep(10) close_result = ig_service.close_open_position(deal_id=open_result['dealId'], direction='SELL', epic=None, expiry='DFB', level=None, order_type='MARKET', quote_id=None, size=0.5, session=None) assert close_result['dealStatus'] == 'ACCEPTED' assert close_result['reason'] == 'SUCCESS' @pytest.mark.parametrize("ig_service", ['2'], indirect=True) def test_create_working_order(self, ig_service): epic = 'CS.D.GBPUSD.TODAY.IP' bet_info = ig_service.fetch_market_by_epic(epic) min_bet = bet_info.dealingRules.minDealSize.value offer = bet_info.snapshot.offer create_result = ig_service.create_working_order( epic=epic, direction='BUY', currency_code='GBP', order_type='LIMIT', expiry='DFB', guaranteed_stop='false', time_in_force='GOOD_TILL_CANCELLED', size=min_bet, level=offer * 0.9, limit_level=None, limit_distance=None, stop_distance=None, stop_level=None) assert create_result['dealStatus'] == 'ACCEPTED' assert create_result['reason'] == 'SUCCESS' time.sleep(10) delete_result = ig_service.delete_working_order(create_result['dealId']) assert delete_result['dealStatus'] == 'ACCEPTED' assert delete_result['reason'] == 'SUCCESS' def test_fetch_transaction_history(self, ig_service): data = ig_service.fetch_transaction_history() assert type(data) is pd.DataFrame def test_watchlist_add_market(self, ig_service, watchlist_id): response = ig_service.add_market_to_watchlist(watchlist_id, 'MT.D.GC.Month2.IP') assert response['status'] == 'SUCCESS' def test_watchlist_remove_market(self, ig_service, watchlist_id): response = ig_service.remove_market_from_watchlist(watchlist_id, 'CS.D.GBPUSD.TODAY.IP') assert response['status'] == 'SUCCESS' def test_get_client_apps(self, ig_service): apps_list = ig_service.get_client_apps() assert len(apps_list) > 0 @pytest.mark.skip(reason="endpoint throwing 500 errors - April 2021") def test_update_client_app(self, ig_service): result = ig_service.update_client_app(60, 60, config.api_key, 'ENABLED') print(result) def test_logout(self, retrying): ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, retryer=retrying) ig_service.create_session() ig_service.logout() with pytest.raises(Exception) as error: print(error) ig_service.fetch_accounts()
# Copyright (C) 2019 The Raphielscape Company LLC. # # Licensed under the Raphielscape Public License, Version 1.c (the "License"); # you may not use this file except in compliance with the License. # """ Userbot module containing commands related to android""" import asyncio import re import os import time import math from requests import get from bs4 import BeautifulSoup from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY from userbot.events import register from userbot.utils import chrome, humanbytes, time_formatter, md5, human_to_bytes GITHUB = "https://github.com" DEVICES_DATA = ( "https://raw.githubusercontent.com/androidtrackers/" "certified-android-devices/master/by_device.json" ) @register(outgoing=True, pattern="^.magisk$") async def magisk(request): """ magisk latest releases """ magisk_dict = { "Stable": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/stable.json", "Beta": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/beta.json", "Canary (Release)": "https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/release.json", "Canary (Debug)": "https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/debug.json", } releases = "Latest Magisk Releases:\n" for name, release_url in magisk_dict.items(): data = get(release_url).json() releases += ( f'{name}: [ZIP v{data['magisk']['version']}]({data['magisk']['link']}) | ' f'[APK v{data['app']['version']}]({data['app']['link']}) | ' f'[Uninstaller]({data['uninstaller']['link']})\n' ) await request.edit(releases) @register(outgoing=True, pattern=r"^.device(?: |$)(\S*)") async def device_info(request): """ get android device basic info from its codename """ textx = await request.get_reply_message() device = request.pattern_match.group(1) if device: pass elif textx: device = textx.text else: return await request.edit("`Usage: .device <codename> / <model>`") try: found = get(DEVICES_DATA).json()[device] except KeyError: reply = f"`Couldn't find info about {device}!`\n" else: reply = f"Search results for {device}:\n\n" for item in found: brand = item["brand"] name = item["name"] codename = device model = item["model"] reply += ( f"{brand} {name}\n" f"**Codename**: `{codename}`\n" f"**Model**: {model}\n\n" ) await request.edit(reply) @register(outgoing=True, pattern=r"^.codename(?: |)([\S]*)(?: |)([\s\S]*)") async def codename_info(request): """ search for android codename """ textx = await request.get_reply_message() brand = request.pattern_match.group(1).lower() device = request.pattern_match.group(2).lower() if brand and device: pass elif textx: brand = textx.text.split(" ")[0] device = " ".join(textx.text.split(" ")[1:]) else: return await request.edit("`Usage: .codename <brand> <device>`") found = [ i for i in get(DEVICES_DATA).json() if i["brand"].lower() == brand and device in i["name"].lower() ] if len(found) > 8: found = found[:8] if found: reply = f"Search results for {brand.capitalize()} {device.capitalize()}:\n\n" for item in found: brand = item["brand"] name = item["name"] codename = item["device"] model = item["model"] reply += ( f"{brand} {name}\n" f"**Codename**: `{codename}`\n" f"**Model**: {model}\n\n" ) else: reply = f"`Couldn't find {device} codename!`\n" await request.edit(reply) @register(outgoing=True, pattern="^.pixeldl(?: |$)(.*)") async def download_api(dl): await dl.edit("`Collecting information...`") URL = dl.pattern_match.group(1) URL_MSG = await dl.get_reply_message() if URL: pass elif URL_MSG: URL = URL_MSG.text else: await dl.edit("`Empty information...`") return if not re.findall(r"\bhttps?://download.*pixelexperience.*\.org\S+", URL): await dl.edit("`Invalid information...`") return driver = await chrome() await dl.edit("`Getting information...`") driver.get(URL) error = driver.find_elements_by_class_name("swal2-content") if len(error) > 0 and error[0].text == "File Not Found.": await dl.edit(f"`FileNotFoundError`: {URL} is not found.") return datas = driver.find_elements_by_class_name("download__meta") """ - enumerate data to make sure we download the matched version - """ md5_origin = None i = None for index, value in enumerate(datas): for data in value.text.split("\n"): if data.startswith("MD5"): md5_origin = data.split(":")[1].strip() i = index break if md5_origin is not None and i is not None: break if md5_origin is None and i is None: await dl.edit("`There is no match version available...`") file_name = URL.split("/")[-2] if URL.endswith("/") else URL.split("/")[-1] file_path = TEMP_DOWNLOAD_DIRECTORY + file_name download = driver.find_elements_by_class_name("download__btn")[i] download.click() await dl.edit("`Starting download...`") file_size = human_to_bytes(download.text.split(None, 3)[-1].strip("()")) display_message = None complete = False start = time.time() while not complete: if os.path.isfile(file_path + ".crdownload"): try: downloaded = os.stat(file_path + ".crdownload").st_size status = "Downloading" except OSError: # Rare case await asyncio.sleep(1) continue elif os.path.isfile(file_path): downloaded = os.stat(file_path).st_size file_size = downloaded status = "Checking" else: await asyncio.sleep(0.3) continue diff = time.time() - start percentage = downloaded / file_size * 100 speed = round(downloaded / diff, 2) eta = round((file_size - downloaded) / speed) prog_str = "`{0}` | [{1}{2}] `{3}%`".format( status, "".join(["●" for i in range(math.floor(percentage / 10))]), "".join(["○" for i in range(10 - math.floor(percentage / 10))]), round(percentage, 2), ) current_message = ( "`[DOWNLOAD]`\n\n" f"`{file_name}`\n" f"`Status`\n{prog_str}\n" f"`{humanbytes(downloaded)} of {humanbytes(file_size)}" f" @ {humanbytes(speed)}`\n" f"`ETA` -> {time_formatter(eta)}" ) if ( round(diff % 15.00) == 0 and display_message != current_message or (downloaded == file_size) ): await dl.edit(current_message) display_message = current_message if downloaded == file_size: if not os.path.isfile(file_path): # Rare case await asyncio.sleep(1) continue MD5 = await md5(file_path) if md5_origin == MD5: complete = True else: await dl.edit("`Download corrupt...`") os.remove(file_path) driver.quit() return await dl.respond(f"`{file_name}`\n\n" f"Successfully downloaded to `{file_path}`.") await dl.delete() driver.quit() return @register(outgoing=True, pattern=r"^.specs(?: |)([\S]*)(?: |)([\s\S]*)") async def devices_specifications(request): """ Mobile devices specifications """ textx = await request.get_reply_message() brand = request.pattern_match.group(1).lower() device = request.pattern_match.group(2).lower() if brand and device: pass elif textx: brand = textx.text.split(" ")[0] device = " ".join(textx.text.split(" ")[1:]) else: return await request.edit("`Usage: .specs <brand> <device>`") all_brands = ( BeautifulSoup( get("https://www.devicespecifications.com/en/brand-more").content, "lxml" ) .find("div", {"class": "brand-listing-container-news"}) .findAll("a") ) brand_page_url = None try: brand_page_url = [ i["href"] for i in all_brands if brand == i.text.strip().lower() ][0] except IndexError: await request.edit(f"`{brand} is unknown brand!`") devices = BeautifulSoup(get(brand_page_url).content, "lxml").findAll( "div", {"class": "model-listing-container-80"} ) device_page_url = None try: device_page_url = [ i.a["href"] for i in BeautifulSoup(str(devices), "lxml").findAll("h3") if device in i.text.strip().lower() ] except IndexError: await request.edit(f"`can't find {device}!`") if len(device_page_url) > 2: device_page_url = device_page_url[:2] reply = "" for url in device_page_url: info = BeautifulSoup(get(url).content, "lxml") reply = "\n**" + info.title.text.split("-")[0].strip() + "**\n\n" info = info.find("div", {"id": "model-brief-specifications"}) specifications = re.findall(r"<b>.*?<br/>", str(info)) for item in specifications: title = re.findall(r"<b>(.*?)</b>", item)[0].strip() data = ( re.findall(r"</b>: (.*?)<br/>", item)[0] .replace("<b>", "") .replace("</b>", "") .strip() ) reply += f"**{title}**: {data}\n" await request.edit(reply) @register(outgoing=True, pattern=r"^.twrp(?: |$)(\S*)") async def twrp(request): """ get android device twrp """ textx = await request.get_reply_message() device = request.pattern_match.group(1) if device: pass elif textx: device = textx.text.split(" ")[0] else: return await request.edit("`Usage: .twrp <codename>`") url = get(f"https://dl.twrp.me/{device}/") if url.status_code == 404: reply = f"`Couldn't find twrp downloads for {device}!`\n" return await request.edit(reply) page = BeautifulSoup(url.content, "lxml") download = page.find("table").find("tr").find("a") dl_link = f"https://dl.twrp.me{download["href"]}" dl_file = download.text size = page.find("span", {"class": "filesize"}).text date = page.find("em").text.strip() reply = ( f"**Latest TWRP for {device}:**\n" f"[{dl_file}]({dl_link}) - __{size}__\n" f"**Updated:** __{date}__\n" ) await request.edit(reply) CMD_HELP.update( { "android": ">`.magisk`" "\nGet latest Magisk releases" "\n\n>`.device <codename>`" "\nUsage: Get info about android device codename or model." "\n\n>`.codename <brand> <device>`" "\nUsage: Search for android device codename." "\n\n>`.pixeldl` **<download.pixelexperience.org>**" "\nUsage: Download pixel experience ROM into your userbot server." "\n\n>`.specs <brand> <device>`" "\nUsage: Get device specifications info." "\n\n>`.twrp <codename>`" "\nUsage: Get latest twrp download for android device." } )
# Copyright (C) 2019 The Raphielscape Company LLC. # # Licensed under the Raphielscape Public License, Version 1.c (the "License"); # you may not use this file except in compliance with the License. # """ Userbot module containing commands related to android""" import asyncio import re import os import time import math from requests import get from bs4 import BeautifulSoup from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY from userbot.events import register from userbot.utils import chrome, humanbytes, time_formatter, md5, human_to_bytes GITHUB = "https://github.com" DEVICES_DATA = ( "https://raw.githubusercontent.com/androidtrackers/" "certified-android-devices/master/by_device.json" ) @register(outgoing=True, pattern="^.magisk$") async def magisk(request): """ magisk latest releases """ magisk_dict = { "Stable": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/stable.json", "Beta": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/beta.json", "Canary (Release)": "https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/release.json", "Canary (Debug)": "https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/debug.json", } releases = "Latest Magisk Releases:\n" for name, release_url in magisk_dict.items(): data = get(release_url).json() releases += ( f'{name}: [ZIP v{data["magisk"]["version"]}]({data["magisk"]["link"]}) | ' f'[APK v{data["app"]["version"]}]({data["app"]["link"]}) | ' f'[Uninstaller]({data["uninstaller"]["link"]})\n' ) await request.edit(releases) @register(outgoing=True, pattern=r"^.device(?: |$)(\S*)") async def device_info(request): """ get android device basic info from its codename """ textx = await request.get_reply_message() device = request.pattern_match.group(1) if device: pass elif textx: device = textx.text else: return await request.edit("`Usage: .device <codename> / <model>`") try: found = get(DEVICES_DATA).json()[device] except KeyError: reply = f"`Couldn't find info about {device}!`\n" else: reply = f"Search results for {device}:\n\n" for item in found: brand = item["brand"] name = item["name"] codename = device model = item["model"] reply += ( f"{brand} {name}\n" f"**Codename**: `{codename}`\n" f"**Model**: {model}\n\n" ) await request.edit(reply) @register(outgoing=True, pattern=r"^.codename(?: |)([\S]*)(?: |)([\s\S]*)") async def codename_info(request): """ search for android codename """ textx = await request.get_reply_message() brand = request.pattern_match.group(1).lower() device = request.pattern_match.group(2).lower() if brand and device: pass elif textx: brand = textx.text.split(" ")[0] device = " ".join(textx.text.split(" ")[1:]) else: return await request.edit("`Usage: .codename <brand> <device>`") found = [ i for i in get(DEVICES_DATA).json() if i["brand"].lower() == brand and device in i["name"].lower() ] if len(found) > 8: found = found[:8] if found: reply = f"Search results for {brand.capitalize()} {device.capitalize()}:\n\n" for item in found: brand = item["brand"] name = item["name"] codename = item["device"] model = item["model"] reply += ( f"{brand} {name}\n" f"**Codename**: `{codename}`\n" f"**Model**: {model}\n\n" ) else: reply = f"`Couldn't find {device} codename!`\n" await request.edit(reply) @register(outgoing=True, pattern="^.pixeldl(?: |$)(.*)") async def download_api(dl): await dl.edit("`Collecting information...`") URL = dl.pattern_match.group(1) URL_MSG = await dl.get_reply_message() if URL: pass elif URL_MSG: URL = URL_MSG.text else: await dl.edit("`Empty information...`") return if not re.findall(r"\bhttps?://download.*pixelexperience.*\.org\S+", URL): await dl.edit("`Invalid information...`") return driver = await chrome() await dl.edit("`Getting information...`") driver.get(URL) error = driver.find_elements_by_class_name("swal2-content") if len(error) > 0 and error[0].text == "File Not Found.": await dl.edit(f"`FileNotFoundError`: {URL} is not found.") return datas = driver.find_elements_by_class_name("download__meta") """ - enumerate data to make sure we download the matched version - """ md5_origin = None i = None for index, value in enumerate(datas): for data in value.text.split("\n"): if data.startswith("MD5"): md5_origin = data.split(":")[1].strip() i = index break if md5_origin is not None and i is not None: break if md5_origin is None and i is None: await dl.edit("`There is no match version available...`") file_name = URL.split("/")[-2] if URL.endswith("/") else URL.split("/")[-1] file_path = TEMP_DOWNLOAD_DIRECTORY + file_name download = driver.find_elements_by_class_name("download__btn")[i] download.click() await dl.edit("`Starting download...`") file_size = human_to_bytes(download.text.split(None, 3)[-1].strip("()")) display_message = None complete = False start = time.time() while not complete: if os.path.isfile(file_path + ".crdownload"): try: downloaded = os.stat(file_path + ".crdownload").st_size status = "Downloading" except OSError: # Rare case await asyncio.sleep(1) continue elif os.path.isfile(file_path): downloaded = os.stat(file_path).st_size file_size = downloaded status = "Checking" else: await asyncio.sleep(0.3) continue diff = time.time() - start percentage = downloaded / file_size * 100 speed = round(downloaded / diff, 2) eta = round((file_size - downloaded) / speed) prog_str = "`{0}` | [{1}{2}] `{3}%`".format( status, "".join(["●" for i in range(math.floor(percentage / 10))]), "".join(["○" for i in range(10 - math.floor(percentage / 10))]), round(percentage, 2), ) current_message = ( "`[DOWNLOAD]`\n\n" f"`{file_name}`\n" f"`Status`\n{prog_str}\n" f"`{humanbytes(downloaded)} of {humanbytes(file_size)}" f" @ {humanbytes(speed)}`\n" f"`ETA` -> {time_formatter(eta)}" ) if ( round(diff % 15.00) == 0 and display_message != current_message or (downloaded == file_size) ): await dl.edit(current_message) display_message = current_message if downloaded == file_size: if not os.path.isfile(file_path): # Rare case await asyncio.sleep(1) continue MD5 = await md5(file_path) if md5_origin == MD5: complete = True else: await dl.edit("`Download corrupt...`") os.remove(file_path) driver.quit() return await dl.respond(f"`{file_name}`\n\n" f"Successfully downloaded to `{file_path}`.") await dl.delete() driver.quit() return @register(outgoing=True, pattern=r"^.specs(?: |)([\S]*)(?: |)([\s\S]*)") async def devices_specifications(request): """ Mobile devices specifications """ textx = await request.get_reply_message() brand = request.pattern_match.group(1).lower() device = request.pattern_match.group(2).lower() if brand and device: pass elif textx: brand = textx.text.split(" ")[0] device = " ".join(textx.text.split(" ")[1:]) else: return await request.edit("`Usage: .specs <brand> <device>`") all_brands = ( BeautifulSoup( get("https://www.devicespecifications.com/en/brand-more").content, "lxml" ) .find("div", {"class": "brand-listing-container-news"}) .findAll("a") ) brand_page_url = None try: brand_page_url = [ i["href"] for i in all_brands if brand == i.text.strip().lower() ][0] except IndexError: await request.edit(f"`{brand} is unknown brand!`") devices = BeautifulSoup(get(brand_page_url).content, "lxml").findAll( "div", {"class": "model-listing-container-80"} ) device_page_url = None try: device_page_url = [ i.a["href"] for i in BeautifulSoup(str(devices), "lxml").findAll("h3") if device in i.text.strip().lower() ] except IndexError: await request.edit(f"`can't find {device}!`") if len(device_page_url) > 2: device_page_url = device_page_url[:2] reply = "" for url in device_page_url: info = BeautifulSoup(get(url).content, "lxml") reply = "\n**" + info.title.text.split("-")[0].strip() + "**\n\n" info = info.find("div", {"id": "model-brief-specifications"}) specifications = re.findall(r"<b>.*?<br/>", str(info)) for item in specifications: title = re.findall(r"<b>(.*?)</b>", item)[0].strip() data = ( re.findall(r"</b>: (.*?)<br/>", item)[0] .replace("<b>", "") .replace("</b>", "") .strip() ) reply += f"**{title}**: {data}\n" await request.edit(reply) @register(outgoing=True, pattern=r"^.twrp(?: |$)(\S*)") async def twrp(request): """ get android device twrp """ textx = await request.get_reply_message() device = request.pattern_match.group(1) if device: pass elif textx: device = textx.text.split(" ")[0] else: return await request.edit("`Usage: .twrp <codename>`") url = get(f"https://dl.twrp.me/{device}/") if url.status_code == 404: reply = f"`Couldn't find twrp downloads for {device}!`\n" return await request.edit(reply) page = BeautifulSoup(url.content, "lxml") download = page.find("table").find("tr").find("a") dl_link = f"https://dl.twrp.me{download['href']}" dl_file = download.text size = page.find("span", {"class": "filesize"}).text date = page.find("em").text.strip() reply = ( f"**Latest TWRP for {device}:**\n" f"[{dl_file}]({dl_link}) - __{size}__\n" f"**Updated:** __{date}__\n" ) await request.edit(reply) CMD_HELP.update( { "android": ">`.magisk`" "\nGet latest Magisk releases" "\n\n>`.device <codename>`" "\nUsage: Get info about android device codename or model." "\n\n>`.codename <brand> <device>`" "\nUsage: Search for android device codename." "\n\n>`.pixeldl` **<download.pixelexperience.org>**" "\nUsage: Download pixel experience ROM into your userbot server." "\n\n>`.specs <brand> <device>`" "\nUsage: Get device specifications info." "\n\n>`.twrp <codename>`" "\nUsage: Get latest twrp download for android device." } )
aluno={} aluno['nome']=str(input('Nome: ')) aluno['media']=float(input(f'Média de {aluno['nome']}: ')) if aluno['media'] < 7.0: aluno['situacao']='Reprovado' elif 5 <= aluno['media'] <7: aluno['situacao']='Recuperação' else: aluno['situacao']='Aprovado' for k, v in aluno.items(): print(f' - {k} é igual a {v}') print(f'O nome é igual a {aluno['nome']}.') print(f'A média é igual a {aluno['media']}.') print(f'A situação é: {aluno['situacao']}')
aluno={} aluno['nome']=str(input('Nome: ')) aluno['media']=float(input(f'Média de {aluno["nome"]}: ')) if aluno['media'] < 7.0: aluno['situacao']='Reprovado' elif 5 <= aluno['media'] <7: aluno['situacao']='Recuperação' else: aluno['situacao']='Aprovado' for k, v in aluno.items(): print(f' - {k} é igual a {v}') print(f'O nome é igual a {aluno["nome"]}.') print(f'A média é igual a {aluno["media"]}.') print(f'A situação é: {aluno["situacao"]}')
import base64 import json import falcon import time import pymongo from datetime import datetime from dateutil.parser import parse from history import conf, Logger from dojot.module import Messenger, Config, Auth from wsgiref import simple_server # NOQA import os LOGGER = Logger.Log(conf.log_level).color_log() class Persister: def __init__(self): self.db = None self.client = None def init_mongodb(self, collection_name=None): """ MongoDB initialization :type collection_name: str :param collection_name: collection to create index """ try: self.client = pymongo.MongoClient( conf.db_host, replicaSet=conf.db_replica_set) self.db = self.client['device_history'] if collection_name: self.create_indexes(collection_name) LOGGER.info("db initialized") except Exception as error: LOGGER.warn("Could not init mongo db client: %s" % error) def create_indexes(self, collection_name): """ Create index given a collection :type collection_name: str :param collection_name: collection to create index """ self.db[collection_name].create_index([('ts', pymongo.DESCENDING)]) self.db[collection_name].create_index( [('attr', pymongo.DESCENDING), ('ts', pymongo.DESCENDING)]) self.db[collection_name].create_index( 'ts', expireAfterSeconds=conf.db_expiration) def create_indexes_for_notifications(self, tenants): LOGGER.debug(f"Creating indexes for tenants: {tenants}") for tenant in tenants: self.create_index_for_tenant(tenant) def create_index_for_tenant(self, tenant): collection_name = "{}_{}".format(tenant, "notifications") self.create_indexes(collection_name) def enable_collection_sharding(self, collection_name): """ Create index given a collection :type collection_name: str :param collection_name: collection to create index """ self.db[collection_name].create_index([('attr', pymongo.HASHED)]) self.client.admin.command('enableSharding', self.db.name) self.client.admin.command( 'shardCollection', self.db[collection_name].full_name, key={'attr': 'hashed'}) def parse_message(self, data): """ Formats message to save in MongoDB :type data: dict :param data: data that will be parsed to a format """ parsed_message = dict() parsed_message['attrs'] = data['data']['attrs'] parsed_message['metadata'] = dict() if data['meta']['timestamp'] is None: parsed_message['metadata']['timestamp'] = int(time.time() * 1000) else: parsed_message['metadata']['timestamp'] = data['meta']['timestamp'] parsed_message['metadata']['deviceid'] = data['data']['id'] parsed_message['metadata']['tenant'] = data['meta']['service'] LOGGER.info("new message is: %s" % parsed_message) return json.dumps(parsed_message) def parse_datetime(self, timestamp): """ Parses date time :type timestamp: string :param timestamp: timestamp """ if timestamp is None: return datetime.utcnow() try: val = int(timestamp) if timestamp > ((2**31)-1): return datetime.utcfromtimestamp(val/1000) return datetime.utcfromtimestamp(float(timestamp)) except ValueError as error: LOGGER.error( "Failed to parse timestamp ({})\n{}".format(timestamp, error)) try: return datetime.utcfromtimestamp(float(timestamp)/1000) except ValueError as error: LOGGER.error( "Failed to parse timestamp ({})\n{}".format(timestamp, error)) try: return parse(timestamp) except TypeError as error: raise TypeError( 'Timestamp could not be parsed: {}\n{}'.format(timestamp, error)) def handle_event_data(self, tenant, message): """ Given a device data event, persist it to mongo :type tenant: str :param tenant: tenant related to the event :type message: str :param message: A device data event """ data = None try: data = json.loads(message) LOGGER.info("Received data: %s" % data) except Exception as error: LOGGER.error( 'Received event is not valid JSON. Ignoring.\n%s', error) return LOGGER.debug('got data event %s', message) metadata = data.get('metadata', None) if metadata is None: LOGGER.error( 'Received event has no metadata associated with it. Ignoring') return device_id = metadata.get('deviceid', None) if device_id is None: LOGGER.error( 'Received event cannot be traced to a valid device. Ignoring') return attrs = data.get('attrs', None) if attrs is None: LOGGER.error( 'Received event has no attrs associated with it. Ignoring') return del metadata['deviceid'] timestamp = self.parse_datetime(metadata.get('timestamp', None)) if "timestamp" in metadata: del metadata['timestamp'] if metadata.get('tenant', None) != None: del metadata['tenant'] docs = [] if type(data["attrs"]) is dict: for attr in data.get('attrs', {}).keys(): docs.append({ 'attr': attr, 'value': data['attrs'][attr], 'device_id': device_id, 'ts': timestamp, 'metadata': metadata }) if docs: try: collection_name = "{}_{}".format(tenant, device_id) self.db[collection_name].insert_many(docs) except Exception as error: LOGGER.warn( 'Failed to persist received information.\n%s', error) else: LOGGER.warning( f"Expected attribute dictionary, got {type(data["attrs"])}") LOGGER.warning("Bailing out") def handle_event_devices(self, tenant, message): """ Given a device management event, create (if not alredy existent) proper indexes to suppor the new device :type tenant: str :param tenant: tenant related to the event :type message: str :param message Device lifecyle message, as produced by device manager """ try: data = json.loads(message) LOGGER.info('got device event %s', data) if data['event'] == 'create' or data['event'] == 'update': if "meta" in data and "data" in data: collection_name = "{}_{}".format( data['meta']['service'], data['data']['id']) self.create_indexes(collection_name) elif data['event'] == 'configure': new_message = self.parse_message(data) self.handle_event_data(tenant, new_message) except Exception as error: LOGGER.warning('Failed to persist device event: %s', error) def handle_new_tenant(self, tenant, message): data = json.loads(message) new_tenant = data['tenant'] LOGGER.debug( f"Received a new tenant: {new_tenant}. Will create index for it.") self.create_index_for_tenant(new_tenant) def handle_notification(self, tenant, message): try: notification = json.loads(message) LOGGER.debug( f"Received a notification: {notification}. Will check if it will be persisted.") except Exception as error: LOGGER.debug(f"Invalid JSON: {error}") return notification['ts'] = self.parse_datetime(notification.get("timestamp")) del notification['timestamp'] if('shouldPersist' in notification['metaAttrsFilter']): if(notification['metaAttrsFilter']['shouldPersist']): LOGGER.debug("Notification should be persisted.") try: collection_name = "{}_{}".format(tenant, "notifications") self.db[collection_name].insert_one(notification) except Exception as error: LOGGER.debug(f"Failed to persist notification:\n{error}") else: LOGGER.debug( f"Notification should not be persisted. Discarding it.") else: LOGGER.debug( f"Notification should not be persisted. Discarding it.") class LoggingInterface(object): @staticmethod def on_get(req, resp): """ Returns the level attribute value of the LOGGER variable """ response = {"log_level": Logger.Log.levelToName[LOGGER.level]} resp.body = json.dumps(response) resp.status = falcon.HTTP_200 @staticmethod def on_put(req, resp): """ Set a new value to the level attribute of the LOGGER variable """ if 'level' in req.params.keys() and req.params['level'].upper() in Logger.Log.levelToName.values(): LOGGER.setLevel(req.params['level'].upper()) for handler in LOGGER.handlers: handler.setLevel(req.params['level'].upper()) response = {"new_log_level": Logger.Log.levelToName[LOGGER.level]} resp.body = json.dumps(response) resp.status = falcon.HTTP_200 else: raise falcon.HTTPInvalidParam( 'Logging level must be DEBUG, INFO, WARNING, ERROR or CRITICAL!', 'level') def str2_bool(v): """ This function converts a string to bool if this is the case. If the value received is of type bool, it is just returned. If it is a string and has one of these values "yes", "true", "t", "1" regardless of whether it is uppercase or not, it will return a bool with a true value. """ if type(v) is bool: return v return v.lower() in ("yes", "true", "t", "1") def start_dojot_messenger(config, persister, dojot_persist_notifications_only): messenger = Messenger("Persister", config) messenger.init() messenger.create_channel("dojot.notifications", "r") messenger.on(config.dojot['subjects']['tenancy'], "message", persister.handle_new_tenant) LOGGER.info("Listen to tenancy events") messenger.on("dojot.notifications", "message", persister.handle_notification) LOGGER.info('Listen to notification events') if str2_bool(dojot_persist_notifications_only) != True: messenger.create_channel(config.dojot['subjects']['devices'], "r") messenger.create_channel(config.dojot['subjects']['device_data'], "r") messenger.on(config.dojot['subjects']['devices'], "message", persister.handle_event_devices) messenger.on(config.dojot['subjects']['device_data'], "message", persister.handle_event_data) LOGGER.info("Listen to devices events") def main(): """ Main, inits mongo, messenger, create channels read channels for device and device-data topics and add callbacks to events related to that subjects """ config = Config() auth = Auth(config) LOGGER.debug("Initializing persister...") persister = Persister() persister.init_mongodb() persister.create_indexes_for_notifications(auth.get_tenants()) LOGGER.debug("... persister was successfully initialized.") LOGGER.debug("Initializing dojot messenger...") start_dojot_messenger( config, persister, conf.dojot_persist_notifications_only) LOGGER.debug("... dojot messenger was successfully initialized.") # Create falcon app app = falcon.API() app.add_route('/persister/log', LoggingInterface()) httpd = simple_server.make_server( '0.0.0.0', os.environ.get("PERSISTER_PORT", 8057), app) httpd.serve_forever() if __name__ == "__main__": main()
import base64 import json import falcon import time import pymongo from datetime import datetime from dateutil.parser import parse from history import conf, Logger from dojot.module import Messenger, Config, Auth from wsgiref import simple_server # NOQA import os LOGGER = Logger.Log(conf.log_level).color_log() class Persister: def __init__(self): self.db = None self.client = None def init_mongodb(self, collection_name=None): """ MongoDB initialization :type collection_name: str :param collection_name: collection to create index """ try: self.client = pymongo.MongoClient( conf.db_host, replicaSet=conf.db_replica_set) self.db = self.client['device_history'] if collection_name: self.create_indexes(collection_name) LOGGER.info("db initialized") except Exception as error: LOGGER.warn("Could not init mongo db client: %s" % error) def create_indexes(self, collection_name): """ Create index given a collection :type collection_name: str :param collection_name: collection to create index """ self.db[collection_name].create_index([('ts', pymongo.DESCENDING)]) self.db[collection_name].create_index( [('attr', pymongo.DESCENDING), ('ts', pymongo.DESCENDING)]) self.db[collection_name].create_index( 'ts', expireAfterSeconds=conf.db_expiration) def create_indexes_for_notifications(self, tenants): LOGGER.debug(f"Creating indexes for tenants: {tenants}") for tenant in tenants: self.create_index_for_tenant(tenant) def create_index_for_tenant(self, tenant): collection_name = "{}_{}".format(tenant, "notifications") self.create_indexes(collection_name) def enable_collection_sharding(self, collection_name): """ Create index given a collection :type collection_name: str :param collection_name: collection to create index """ self.db[collection_name].create_index([('attr', pymongo.HASHED)]) self.client.admin.command('enableSharding', self.db.name) self.client.admin.command( 'shardCollection', self.db[collection_name].full_name, key={'attr': 'hashed'}) def parse_message(self, data): """ Formats message to save in MongoDB :type data: dict :param data: data that will be parsed to a format """ parsed_message = dict() parsed_message['attrs'] = data['data']['attrs'] parsed_message['metadata'] = dict() if data['meta']['timestamp'] is None: parsed_message['metadata']['timestamp'] = int(time.time() * 1000) else: parsed_message['metadata']['timestamp'] = data['meta']['timestamp'] parsed_message['metadata']['deviceid'] = data['data']['id'] parsed_message['metadata']['tenant'] = data['meta']['service'] LOGGER.info("new message is: %s" % parsed_message) return json.dumps(parsed_message) def parse_datetime(self, timestamp): """ Parses date time :type timestamp: string :param timestamp: timestamp """ if timestamp is None: return datetime.utcnow() try: val = int(timestamp) if timestamp > ((2**31)-1): return datetime.utcfromtimestamp(val/1000) return datetime.utcfromtimestamp(float(timestamp)) except ValueError as error: LOGGER.error( "Failed to parse timestamp ({})\n{}".format(timestamp, error)) try: return datetime.utcfromtimestamp(float(timestamp)/1000) except ValueError as error: LOGGER.error( "Failed to parse timestamp ({})\n{}".format(timestamp, error)) try: return parse(timestamp) except TypeError as error: raise TypeError( 'Timestamp could not be parsed: {}\n{}'.format(timestamp, error)) def handle_event_data(self, tenant, message): """ Given a device data event, persist it to mongo :type tenant: str :param tenant: tenant related to the event :type message: str :param message: A device data event """ data = None try: data = json.loads(message) LOGGER.info("Received data: %s" % data) except Exception as error: LOGGER.error( 'Received event is not valid JSON. Ignoring.\n%s', error) return LOGGER.debug('got data event %s', message) metadata = data.get('metadata', None) if metadata is None: LOGGER.error( 'Received event has no metadata associated with it. Ignoring') return device_id = metadata.get('deviceid', None) if device_id is None: LOGGER.error( 'Received event cannot be traced to a valid device. Ignoring') return attrs = data.get('attrs', None) if attrs is None: LOGGER.error( 'Received event has no attrs associated with it. Ignoring') return del metadata['deviceid'] timestamp = self.parse_datetime(metadata.get('timestamp', None)) if "timestamp" in metadata: del metadata['timestamp'] if metadata.get('tenant', None) != None: del metadata['tenant'] docs = [] if type(data["attrs"]) is dict: for attr in data.get('attrs', {}).keys(): docs.append({ 'attr': attr, 'value': data['attrs'][attr], 'device_id': device_id, 'ts': timestamp, 'metadata': metadata }) if docs: try: collection_name = "{}_{}".format(tenant, device_id) self.db[collection_name].insert_many(docs) except Exception as error: LOGGER.warn( 'Failed to persist received information.\n%s', error) else: LOGGER.warning( f"Expected attribute dictionary, got {type(data['attrs'])}") LOGGER.warning("Bailing out") def handle_event_devices(self, tenant, message): """ Given a device management event, create (if not alredy existent) proper indexes to suppor the new device :type tenant: str :param tenant: tenant related to the event :type message: str :param message Device lifecyle message, as produced by device manager """ try: data = json.loads(message) LOGGER.info('got device event %s', data) if data['event'] == 'create' or data['event'] == 'update': if "meta" in data and "data" in data: collection_name = "{}_{}".format( data['meta']['service'], data['data']['id']) self.create_indexes(collection_name) elif data['event'] == 'configure': new_message = self.parse_message(data) self.handle_event_data(tenant, new_message) except Exception as error: LOGGER.warning('Failed to persist device event: %s', error) def handle_new_tenant(self, tenant, message): data = json.loads(message) new_tenant = data['tenant'] LOGGER.debug( f"Received a new tenant: {new_tenant}. Will create index for it.") self.create_index_for_tenant(new_tenant) def handle_notification(self, tenant, message): try: notification = json.loads(message) LOGGER.debug( f"Received a notification: {notification}. Will check if it will be persisted.") except Exception as error: LOGGER.debug(f"Invalid JSON: {error}") return notification['ts'] = self.parse_datetime(notification.get("timestamp")) del notification['timestamp'] if('shouldPersist' in notification['metaAttrsFilter']): if(notification['metaAttrsFilter']['shouldPersist']): LOGGER.debug("Notification should be persisted.") try: collection_name = "{}_{}".format(tenant, "notifications") self.db[collection_name].insert_one(notification) except Exception as error: LOGGER.debug(f"Failed to persist notification:\n{error}") else: LOGGER.debug( f"Notification should not be persisted. Discarding it.") else: LOGGER.debug( f"Notification should not be persisted. Discarding it.") class LoggingInterface(object): @staticmethod def on_get(req, resp): """ Returns the level attribute value of the LOGGER variable """ response = {"log_level": Logger.Log.levelToName[LOGGER.level]} resp.body = json.dumps(response) resp.status = falcon.HTTP_200 @staticmethod def on_put(req, resp): """ Set a new value to the level attribute of the LOGGER variable """ if 'level' in req.params.keys() and req.params['level'].upper() in Logger.Log.levelToName.values(): LOGGER.setLevel(req.params['level'].upper()) for handler in LOGGER.handlers: handler.setLevel(req.params['level'].upper()) response = {"new_log_level": Logger.Log.levelToName[LOGGER.level]} resp.body = json.dumps(response) resp.status = falcon.HTTP_200 else: raise falcon.HTTPInvalidParam( 'Logging level must be DEBUG, INFO, WARNING, ERROR or CRITICAL!', 'level') def str2_bool(v): """ This function converts a string to bool if this is the case. If the value received is of type bool, it is just returned. If it is a string and has one of these values "yes", "true", "t", "1" regardless of whether it is uppercase or not, it will return a bool with a true value. """ if type(v) is bool: return v return v.lower() in ("yes", "true", "t", "1") def start_dojot_messenger(config, persister, dojot_persist_notifications_only): messenger = Messenger("Persister", config) messenger.init() messenger.create_channel("dojot.notifications", "r") messenger.on(config.dojot['subjects']['tenancy'], "message", persister.handle_new_tenant) LOGGER.info("Listen to tenancy events") messenger.on("dojot.notifications", "message", persister.handle_notification) LOGGER.info('Listen to notification events') if str2_bool(dojot_persist_notifications_only) != True: messenger.create_channel(config.dojot['subjects']['devices'], "r") messenger.create_channel(config.dojot['subjects']['device_data'], "r") messenger.on(config.dojot['subjects']['devices'], "message", persister.handle_event_devices) messenger.on(config.dojot['subjects']['device_data'], "message", persister.handle_event_data) LOGGER.info("Listen to devices events") def main(): """ Main, inits mongo, messenger, create channels read channels for device and device-data topics and add callbacks to events related to that subjects """ config = Config() auth = Auth(config) LOGGER.debug("Initializing persister...") persister = Persister() persister.init_mongodb() persister.create_indexes_for_notifications(auth.get_tenants()) LOGGER.debug("... persister was successfully initialized.") LOGGER.debug("Initializing dojot messenger...") start_dojot_messenger( config, persister, conf.dojot_persist_notifications_only) LOGGER.debug("... dojot messenger was successfully initialized.") # Create falcon app app = falcon.API() app.add_route('/persister/log', LoggingInterface()) httpd = simple_server.make_server( '0.0.0.0', os.environ.get("PERSISTER_PORT", 8057), app) httpd.serve_forever() if __name__ == "__main__": main()
import numpy as np import gym import wandb from rl_credit.examples.environment import ( DISCOUNT_FACTOR, VaryGiftsGoalEnv, ) from rl_credit.examples.train import train DISCOUNT_TIMESCALE = int(np.round(1/(1 - DISCOUNT_FACTOR))) DELAY_STEPS = 0.5 * DISCOUNT_TIMESCALE #################################################### # Environments: Variance of reward in distractor phase # Mean reward is 5 (but default original was 3). class Var0_Gifts(VaryGiftsGoalEnv): """variance=0""" def __init__(self): distractor_xtra_kwargs = {'max_steps': DELAY_STEPS, 'gift_reward': [5, 5]} super().__init__(distractor_xtra_kwargs) class Var1_3_Gifts(VaryGiftsGoalEnv): """variance=1.33""" def __init__(self): distractor_xtra_kwargs = {'max_steps': DELAY_STEPS, 'gift_reward': [3, 7]} super().__init__(distractor_xtra_kwargs) class Var8_3_Gifts(VaryGiftsGoalEnv): """variance=8.33""" def __init__(self): distractor_xtra_kwargs = {'max_steps': DELAY_STEPS, 'gift_reward': [0, 10]} super().__init__(distractor_xtra_kwargs) #################################################### # Config params shared among all experiments common_train_config = dict( num_procs=16, save_interval=300, total_frames=16*600*2500, #24_000_000 log_interval=1, ) common_algo_kwargs = dict( num_frames_per_proc=600, discount=DISCOUNT_FACTOR, lr=0.001, gae_lambda=0.95, entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, rmsprop_alpha=0.99, rmsprop_eps=1e-8, reshape_reward=None, ) mean_reward = 'mean_reward=5' delay_steps = f'delay_steps={DELAY_STEPS}' #################################################### # Experiment-specific configs ##************ experiment 1 ************ expt1a = dict( model_dir_stem='a2c_mem10_giftvar0', expt_train_config = dict( env_id='GiftDistractorVar0-v0', algo_name='a2c', recurrence=10, ), expt_algo_kwargs = {}, distractor_var = 'gift_var=0', wandb_notes = 'A2C with recurrence=10, gift reward=5, gift var=0, delay=50 steps' ) expt1b = dict( model_dir_stem='tvt_mem10_giftvar0', expt_train_config = dict( env_id='GiftDistractorVar0-v0', algo_name='tvt', recurrence=10, ), expt_algo_kwargs = dict( d_key=150, # same as fixed episode len use_tvt=True, importance_threshold=0.15, tvt_alpha=0.5, y_moving_avg_alpha=0.03, pos_weight=2, embed_actions=True, mask_future=True, ), distractor_var = 'gift_var=0', wandb_notes = 'TVT, recurrence=10, d_key=150, action embed, gift reward=5, gift var=0, delay=50 steps' ) ##************ experiment 2 ************ expt2a = dict( model_dir_stem='a2c_mem10_giftvar1_3', expt_train_config = dict( env_id='GiftDistractorVar1_3-v0', algo_name='a2c', recurrence=10, ), expt_algo_kwargs = {}, distractor_var = 'gift_var=1.33', wandb_notes = 'A2C with recurrence=10, gift reward=5, gift var=1.33 delay=50 steps' ) expt2b = dict( model_dir_stem='tvt_mem10_giftvar1_3', expt_train_config = dict( env_id='GiftDistractorVar1_3-v0', algo_name='tvt', recurrence=10, ), expt_algo_kwargs = dict( d_key=150, use_tvt=True, importance_threshold=0.15, tvt_alpha=0.5, y_moving_avg_alpha=0.03, pos_weight=2, embed_actions=True, mask_future=True, ), distractor_var = 'gift_var=1.33', wandb_notes = 'TVT, recurrence=10, d_key=150, action embed, gift reward=5, gift var=1.33, delay=50 steps' ) ##************ experiment 3 ************ expt3a = dict( model_dir_stem='a2c_mem10_giftvar8_3', expt_train_config = dict( env_id='GiftDistractorVar8_3-v0', algo_name='a2c', recurrence=10, ), expt_algo_kwargs = {}, distractor_var = 'gift_var=8.33', wandb_notes = 'A2C with recurrence=10, gift reward=5, gift var=8.33, delay=50 steps' ) expt3b = dict( model_dir_stem='tvt_mem10_giftvar8_3', expt_train_config = dict( env_id='GiftDistractorVar8_3-v0', algo_name='tvt', recurrence=10, ), expt_algo_kwargs = dict( d_key=150, use_tvt=True, importance_threshold=0.15, tvt_alpha=0.5, y_moving_avg_alpha=0.03, pos_weight=2, embed_actions=True, mask_future=True, ), distractor_var = 'gift_var=8.33', wandb_notes = 'TVT, recurrence=10, d_key=150, action embed, gift reward=5, gift var=8.33, delay=50 steps' ) def main(model_dir_stem, expt_train_config, expt_algo_kwargs, distractor_var, wandb_notes, seed): wandb_params = {} algo_kwargs = common_algo_kwargs algo_kwargs.update(expt_algo_kwargs) train_config = common_train_config expt_train_config['model_dir_stem'] = f"{model_dir_stem}_seed{seed}" train_config.update(expt_train_config) train_config['seed'] = seed train_config.update({'algo_kwargs': algo_kwargs}) # expt run params to record in wandb wandb_params.update(train_config) wandb_params.update(common_algo_kwargs) wandb_params.update({'env_params': str(vars(gym.make(train_config['env_id'])))}) wandb_name = f"{train_config["algo_name"]}|mem={train_config["recurrence"]}|{train_config["env_id"]}" wandb.init( project="distractor_reward_variance", config=wandb_params, name=wandb_name, tags=[train_config['algo_name'], distractor_var, delay_steps, mean_reward, train_config['env_id'], f'discount_timescale={DISCOUNT_TIMESCALE}', f"discount_factor={DISCOUNT_FACTOR}"], notes=wandb_notes, reinit=True, group=wandb_name, job_type='training', ) wandb_dir = wandb.run.dir train_config.update({'wandb_dir': wandb_dir}) train(**train_config) wandb.join() if __name__ == '__main__': expts = [expt1b]*5 + [expt2b]*5 + [expt3b]*5 for i, expt in enumerate(expts): main(**expt, seed=i)
import numpy as np import gym import wandb from rl_credit.examples.environment import ( DISCOUNT_FACTOR, VaryGiftsGoalEnv, ) from rl_credit.examples.train import train DISCOUNT_TIMESCALE = int(np.round(1/(1 - DISCOUNT_FACTOR))) DELAY_STEPS = 0.5 * DISCOUNT_TIMESCALE #################################################### # Environments: Variance of reward in distractor phase # Mean reward is 5 (but default original was 3). class Var0_Gifts(VaryGiftsGoalEnv): """variance=0""" def __init__(self): distractor_xtra_kwargs = {'max_steps': DELAY_STEPS, 'gift_reward': [5, 5]} super().__init__(distractor_xtra_kwargs) class Var1_3_Gifts(VaryGiftsGoalEnv): """variance=1.33""" def __init__(self): distractor_xtra_kwargs = {'max_steps': DELAY_STEPS, 'gift_reward': [3, 7]} super().__init__(distractor_xtra_kwargs) class Var8_3_Gifts(VaryGiftsGoalEnv): """variance=8.33""" def __init__(self): distractor_xtra_kwargs = {'max_steps': DELAY_STEPS, 'gift_reward': [0, 10]} super().__init__(distractor_xtra_kwargs) #################################################### # Config params shared among all experiments common_train_config = dict( num_procs=16, save_interval=300, total_frames=16*600*2500, #24_000_000 log_interval=1, ) common_algo_kwargs = dict( num_frames_per_proc=600, discount=DISCOUNT_FACTOR, lr=0.001, gae_lambda=0.95, entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, rmsprop_alpha=0.99, rmsprop_eps=1e-8, reshape_reward=None, ) mean_reward = 'mean_reward=5' delay_steps = f'delay_steps={DELAY_STEPS}' #################################################### # Experiment-specific configs ##************ experiment 1 ************ expt1a = dict( model_dir_stem='a2c_mem10_giftvar0', expt_train_config = dict( env_id='GiftDistractorVar0-v0', algo_name='a2c', recurrence=10, ), expt_algo_kwargs = {}, distractor_var = 'gift_var=0', wandb_notes = 'A2C with recurrence=10, gift reward=5, gift var=0, delay=50 steps' ) expt1b = dict( model_dir_stem='tvt_mem10_giftvar0', expt_train_config = dict( env_id='GiftDistractorVar0-v0', algo_name='tvt', recurrence=10, ), expt_algo_kwargs = dict( d_key=150, # same as fixed episode len use_tvt=True, importance_threshold=0.15, tvt_alpha=0.5, y_moving_avg_alpha=0.03, pos_weight=2, embed_actions=True, mask_future=True, ), distractor_var = 'gift_var=0', wandb_notes = 'TVT, recurrence=10, d_key=150, action embed, gift reward=5, gift var=0, delay=50 steps' ) ##************ experiment 2 ************ expt2a = dict( model_dir_stem='a2c_mem10_giftvar1_3', expt_train_config = dict( env_id='GiftDistractorVar1_3-v0', algo_name='a2c', recurrence=10, ), expt_algo_kwargs = {}, distractor_var = 'gift_var=1.33', wandb_notes = 'A2C with recurrence=10, gift reward=5, gift var=1.33 delay=50 steps' ) expt2b = dict( model_dir_stem='tvt_mem10_giftvar1_3', expt_train_config = dict( env_id='GiftDistractorVar1_3-v0', algo_name='tvt', recurrence=10, ), expt_algo_kwargs = dict( d_key=150, use_tvt=True, importance_threshold=0.15, tvt_alpha=0.5, y_moving_avg_alpha=0.03, pos_weight=2, embed_actions=True, mask_future=True, ), distractor_var = 'gift_var=1.33', wandb_notes = 'TVT, recurrence=10, d_key=150, action embed, gift reward=5, gift var=1.33, delay=50 steps' ) ##************ experiment 3 ************ expt3a = dict( model_dir_stem='a2c_mem10_giftvar8_3', expt_train_config = dict( env_id='GiftDistractorVar8_3-v0', algo_name='a2c', recurrence=10, ), expt_algo_kwargs = {}, distractor_var = 'gift_var=8.33', wandb_notes = 'A2C with recurrence=10, gift reward=5, gift var=8.33, delay=50 steps' ) expt3b = dict( model_dir_stem='tvt_mem10_giftvar8_3', expt_train_config = dict( env_id='GiftDistractorVar8_3-v0', algo_name='tvt', recurrence=10, ), expt_algo_kwargs = dict( d_key=150, use_tvt=True, importance_threshold=0.15, tvt_alpha=0.5, y_moving_avg_alpha=0.03, pos_weight=2, embed_actions=True, mask_future=True, ), distractor_var = 'gift_var=8.33', wandb_notes = 'TVT, recurrence=10, d_key=150, action embed, gift reward=5, gift var=8.33, delay=50 steps' ) def main(model_dir_stem, expt_train_config, expt_algo_kwargs, distractor_var, wandb_notes, seed): wandb_params = {} algo_kwargs = common_algo_kwargs algo_kwargs.update(expt_algo_kwargs) train_config = common_train_config expt_train_config['model_dir_stem'] = f"{model_dir_stem}_seed{seed}" train_config.update(expt_train_config) train_config['seed'] = seed train_config.update({'algo_kwargs': algo_kwargs}) # expt run params to record in wandb wandb_params.update(train_config) wandb_params.update(common_algo_kwargs) wandb_params.update({'env_params': str(vars(gym.make(train_config['env_id'])))}) wandb_name = f"{train_config['algo_name']}|mem={train_config['recurrence']}|{train_config['env_id']}" wandb.init( project="distractor_reward_variance", config=wandb_params, name=wandb_name, tags=[train_config['algo_name'], distractor_var, delay_steps, mean_reward, train_config['env_id'], f'discount_timescale={DISCOUNT_TIMESCALE}', f"discount_factor={DISCOUNT_FACTOR}"], notes=wandb_notes, reinit=True, group=wandb_name, job_type='training', ) wandb_dir = wandb.run.dir train_config.update({'wandb_dir': wandb_dir}) train(**train_config) wandb.join() if __name__ == '__main__': expts = [expt1b]*5 + [expt2b]*5 + [expt3b]*5 for i, expt in enumerate(expts): main(**expt, seed=i)
# -*- coding:utf-8 -*- import os import logging from pathlib import Path import random import numpy as np import torch import torch.optim as optim from torch.optim.lr_scheduler import ReduceLROnPlateau from torch.utils.tensorboard import SummaryWriter from .util.progressbar import ProgressBar from .util.vocab import Vocab from .util.embed import get_embed from .util.data import NerLstmDataset, NerLstmDataLoader from .util.score import get_f1 class LstmTrain: def __init__(self, config): logging.info(config) train_cfg = config["train"] model_cfg = config["model"] if train_cfg["cuda"] and \ torch.cuda.is_available(): self.device = torch.device("cuda") logging.info("在GPU上训练模型") else: self.device = torch.device("cpu") logging.info("在CPU上训练模型") input_dir = Path(train_cfg["input"]) train_file = input_dir / "train.txt" dev_file = input_dir / "dev.txt" test_file = input_dir / "test.txt" # 加载词表 delimiter = train_cfg["delimiter"] vocab = Vocab(pad="<pad>", unk="<unk>") vocab.build_vocab(train_file, dev_file, delimiter=delimiter, count=0) output_dir = Path(train_cfg["output"]) if not os.path.exists(output_dir): os.makedirs(output_dir) word_file = output_dir / "word.txt" label_file = output_dir / "label.txt" vocab.save_vocab(word_file, label_file) model_cfg['word_size'] = vocab.get_word_size() model_cfg['label_size'] = vocab.get_label_size() # 数据处理 train_data = NerLstmDataset(train_file, vocab, delimiter=delimiter) dev_data = NerLstmDataset(dev_file, vocab, delimiter=delimiter) self.train_loader = NerLstmDataLoader(train_data, train_cfg["batch"], shuffle=True, drop_last=True) self.dev_loader = NerLstmDataLoader(dev_data, train_cfg["batch"], shuffle=False, drop_last=False) # 构建word2vec model_cfg["embed"] = \ get_embed(train_cfg["embedding"], vocab, model_cfg["word_dim"]) # 构建模型 logging.info(model_cfg) model_name = config["name"].lower() if model_name == "bilstm_softmax": from .model.bilstm import BiLstmSoftmax model = BiLstmSoftmax(model_cfg) elif model_name == "bilstm_crf": from .model.bilstm import BiLstmCrf model = BiLstmCrf(model_cfg) else: raise RuntimeError(f"没有对应的模型: {config["name"]}") self.model = model.to(self.device) # for name, param in model.named_parameters(): # if param.requires_grad: # print(name) summary_dir = output_dir / "summary/" self.writer = SummaryWriter(summary_dir) self.vocab = vocab self.train_cfg = train_cfg self.output_model = output_dir / f"{model_name}.pt" def train(self): logging.info("开始训练") optim_name = self.train_cfg["optim"].lower() lr = self.train_cfg["lr"] if optim_name == "adam": optimizer = optim.Adam(self.model.parameters(), lr=lr) elif optim_name == "sgd": optimizer = optim.SGD(self.model.parameters(), lr=lr) else: raise RuntimeError("当前优化器不支持") scheduler = ReduceLROnPlateau(optimizer, 'max', verbose=True, patience=5) best_f1 = 0.0 for epoch in range(self.train_cfg["epoch"]): self.model.train() bar = ProgressBar(n_total=len(self.train_loader), desc='Training') for step, batch in enumerate(self.train_loader): word_batch, label_batch = batch word_batch = word_batch.to(self.device) label_batch = label_batch.to(self.device) optimizer.zero_grad() loss = self.model(word_batch, label_batch) loss.backward() optimizer.step() bar(step=step, info={'loss': loss.item()}) # if step % 5 == 4: # f1, dloss = dev(dev_loader) # print("f1: ", f1) # print("loss: ", dloss) # model.train() train_f1, train_loss = self.dev(self.train_loader) dev_f1, dev_loss = self.dev(self.dev_loader) print() logging.info("Epoch: {} 验证集F1: {}".format(epoch + 1, dev_f1)) self.writer.add_scalars("f1", { "train": round(100 * train_f1, 2), "dev": round(100 * dev_f1, 2) }, epoch + 1) self.writer.add_scalars('loss', { "train": round( train_loss, 2), "dev": round(dev_loss, 2) }, epoch + 1) if dev_f1 >= best_f1: best_f1 = dev_f1 torch.save(self.model.state_dict(), self.output_model) scheduler.step(100 * dev_f1) logging.info(f"训练完成,best f1: {best_f1}") def dev(self, loader): self.model.eval() gold_lists, pred_lists = self.generate_result(loader) f1 = get_f1(gold_lists, pred_lists, self.train_cfg["tag_format"]) loss = self.get_loss(loader) return f1, loss def generate_result(self, loader): gold_list = list() pred_list = list() for batch in loader: word_batch, gold_ids_batch = batch word_batch = word_batch.to(self.device) pred_ids_batch, len_list_batch = self.model(word_batch) gold_lists_batch, pred_lists_batch = self.recover_id_to_tag( gold_ids_batch.tolist(), pred_ids_batch, len_list_batch ) gold_list.extend(gold_lists_batch) pred_list.extend(pred_lists_batch) return gold_list, pred_list def get_loss(self, loader): loss = 0.0 for batch in loader: word_batch, label_batch = batch word_batch = word_batch.to(self.device) label_batch = label_batch.to(self.device) loss_batch = self.model(word_batch, label_batch) loss += loss_batch.item() return loss def recover_id_to_tag(self, gold_ids_list, pred_ids_list, len_list): gold_tag_lists = list() pred_tag_lists = list() for gold_id_list, pred_id_list, seq_len in \ zip(gold_ids_list, pred_ids_list, len_list): tmp_gold_list = list() tmp_pred_list = list() for i in range(seq_len): tmp_gold_list.append(self.vocab.get_label(gold_id_list[i])) tmp_pred_list.append(self.vocab.get_label(pred_id_list[i])) gold_tag_lists.append(tmp_gold_list) pred_tag_lists.append(tmp_pred_list) return gold_tag_lists, pred_tag_lists
# -*- coding:utf-8 -*- import os import logging from pathlib import Path import random import numpy as np import torch import torch.optim as optim from torch.optim.lr_scheduler import ReduceLROnPlateau from torch.utils.tensorboard import SummaryWriter from .util.progressbar import ProgressBar from .util.vocab import Vocab from .util.embed import get_embed from .util.data import NerLstmDataset, NerLstmDataLoader from .util.score import get_f1 class LstmTrain: def __init__(self, config): logging.info(config) train_cfg = config["train"] model_cfg = config["model"] if train_cfg["cuda"] and \ torch.cuda.is_available(): self.device = torch.device("cuda") logging.info("在GPU上训练模型") else: self.device = torch.device("cpu") logging.info("在CPU上训练模型") input_dir = Path(train_cfg["input"]) train_file = input_dir / "train.txt" dev_file = input_dir / "dev.txt" test_file = input_dir / "test.txt" # 加载词表 delimiter = train_cfg["delimiter"] vocab = Vocab(pad="<pad>", unk="<unk>") vocab.build_vocab(train_file, dev_file, delimiter=delimiter, count=0) output_dir = Path(train_cfg["output"]) if not os.path.exists(output_dir): os.makedirs(output_dir) word_file = output_dir / "word.txt" label_file = output_dir / "label.txt" vocab.save_vocab(word_file, label_file) model_cfg['word_size'] = vocab.get_word_size() model_cfg['label_size'] = vocab.get_label_size() # 数据处理 train_data = NerLstmDataset(train_file, vocab, delimiter=delimiter) dev_data = NerLstmDataset(dev_file, vocab, delimiter=delimiter) self.train_loader = NerLstmDataLoader(train_data, train_cfg["batch"], shuffle=True, drop_last=True) self.dev_loader = NerLstmDataLoader(dev_data, train_cfg["batch"], shuffle=False, drop_last=False) # 构建word2vec model_cfg["embed"] = \ get_embed(train_cfg["embedding"], vocab, model_cfg["word_dim"]) # 构建模型 logging.info(model_cfg) model_name = config["name"].lower() if model_name == "bilstm_softmax": from .model.bilstm import BiLstmSoftmax model = BiLstmSoftmax(model_cfg) elif model_name == "bilstm_crf": from .model.bilstm import BiLstmCrf model = BiLstmCrf(model_cfg) else: raise RuntimeError(f"没有对应的模型: {config['name']}") self.model = model.to(self.device) # for name, param in model.named_parameters(): # if param.requires_grad: # print(name) summary_dir = output_dir / "summary/" self.writer = SummaryWriter(summary_dir) self.vocab = vocab self.train_cfg = train_cfg self.output_model = output_dir / f"{model_name}.pt" def train(self): logging.info("开始训练") optim_name = self.train_cfg["optim"].lower() lr = self.train_cfg["lr"] if optim_name == "adam": optimizer = optim.Adam(self.model.parameters(), lr=lr) elif optim_name == "sgd": optimizer = optim.SGD(self.model.parameters(), lr=lr) else: raise RuntimeError("当前优化器不支持") scheduler = ReduceLROnPlateau(optimizer, 'max', verbose=True, patience=5) best_f1 = 0.0 for epoch in range(self.train_cfg["epoch"]): self.model.train() bar = ProgressBar(n_total=len(self.train_loader), desc='Training') for step, batch in enumerate(self.train_loader): word_batch, label_batch = batch word_batch = word_batch.to(self.device) label_batch = label_batch.to(self.device) optimizer.zero_grad() loss = self.model(word_batch, label_batch) loss.backward() optimizer.step() bar(step=step, info={'loss': loss.item()}) # if step % 5 == 4: # f1, dloss = dev(dev_loader) # print("f1: ", f1) # print("loss: ", dloss) # model.train() train_f1, train_loss = self.dev(self.train_loader) dev_f1, dev_loss = self.dev(self.dev_loader) print() logging.info("Epoch: {} 验证集F1: {}".format(epoch + 1, dev_f1)) self.writer.add_scalars("f1", { "train": round(100 * train_f1, 2), "dev": round(100 * dev_f1, 2) }, epoch + 1) self.writer.add_scalars('loss', { "train": round( train_loss, 2), "dev": round(dev_loss, 2) }, epoch + 1) if dev_f1 >= best_f1: best_f1 = dev_f1 torch.save(self.model.state_dict(), self.output_model) scheduler.step(100 * dev_f1) logging.info(f"训练完成,best f1: {best_f1}") def dev(self, loader): self.model.eval() gold_lists, pred_lists = self.generate_result(loader) f1 = get_f1(gold_lists, pred_lists, self.train_cfg["tag_format"]) loss = self.get_loss(loader) return f1, loss def generate_result(self, loader): gold_list = list() pred_list = list() for batch in loader: word_batch, gold_ids_batch = batch word_batch = word_batch.to(self.device) pred_ids_batch, len_list_batch = self.model(word_batch) gold_lists_batch, pred_lists_batch = self.recover_id_to_tag( gold_ids_batch.tolist(), pred_ids_batch, len_list_batch ) gold_list.extend(gold_lists_batch) pred_list.extend(pred_lists_batch) return gold_list, pred_list def get_loss(self, loader): loss = 0.0 for batch in loader: word_batch, label_batch = batch word_batch = word_batch.to(self.device) label_batch = label_batch.to(self.device) loss_batch = self.model(word_batch, label_batch) loss += loss_batch.item() return loss def recover_id_to_tag(self, gold_ids_list, pred_ids_list, len_list): gold_tag_lists = list() pred_tag_lists = list() for gold_id_list, pred_id_list, seq_len in \ zip(gold_ids_list, pred_ids_list, len_list): tmp_gold_list = list() tmp_pred_list = list() for i in range(seq_len): tmp_gold_list.append(self.vocab.get_label(gold_id_list[i])) tmp_pred_list.append(self.vocab.get_label(pred_id_list[i])) gold_tag_lists.append(tmp_gold_list) pred_tag_lists.append(tmp_pred_list) return gold_tag_lists, pred_tag_lists
import glob import json import logging import os import re import sys import textwrap from importlib.metadata import metadata, requires from typing import Any, Dict, List, Optional import click from pydantic import Field from pydantic.dataclasses import dataclass from datahub.configuration.common import ConfigModel from datahub.ingestion.api.decorators import ( CapabilitySetting, SourceCapability, SupportStatus, ) from datahub.ingestion.api.registry import PluginRegistry from datahub.ingestion.api.source import Source logger = logging.getLogger(__name__) @dataclass class FieldRow: path: str type_name: str required: bool default: str description: str inner_fields: List["FieldRow"] = Field(default_factory=list) @staticmethod def get_checkbox(enabled: bool) -> str: return "✅" if enabled else "" def to_md_line(self) -> str: return ( f"| {self.path} | {self.get_checkbox(self.required)} | {self.type_name} | {self.description} | {self.default} |\n" + "".join([inner_field.to_md_line() for inner_field in self.inner_fields]) ) class FieldHeader(FieldRow): def to_md_line(self) -> str: return "\n".join( [ "| Field | Required | Type | Description | Default |", "| --- | --- | --- | --- | -- |", "", ] ) def __init__(self): pass def get_definition_dict_from_definition( definitions_dict: Dict[str, Any], definition_name: str ) -> Dict[str, Any]: import re m = re.search("#/definitions/(.*)$", definition_name) if m: definition_term: str = m.group(1) definition_dict = definitions_dict[definition_term] return definition_dict raise Exception("Failed to find a definition for " + definition_name) def get_prefixed_name(field_prefix: Optional[str], field_name: Optional[str]) -> str: assert ( field_prefix or field_name ), "One of field_prefix or field_name should be present" return ( f"{field_prefix}.{field_name}" # type: ignore if field_prefix and field_name else field_name if not field_prefix else field_prefix ) def gen_md_table_from_struct(schema_dict: Dict[str, Any]) -> List[str]: table_md_str: List[FieldRow] = [] # table_md_str = [ # "<table>\n<tr>\n<td>\nField\n</td>Type<td>Default</td><td>Description</td></tr>\n" # ] gen_md_table(schema_dict, schema_dict.get("definitions", {}), md_str=table_md_str) # table_md_str.append("\n</table>\n") table_md_str = [field for field in table_md_str if len(field.inner_fields) == 0] + [ field for field in table_md_str if len(field.inner_fields) > 0 ] # table_md_str.sort(key=lambda x: "z" if len(x.inner_fields) else "" + x.path) return ( [FieldHeader().to_md_line()] + [row.to_md_line() for row in table_md_str] + ["\n"] ) def get_enum_description( authored_description: Optional[str], enum_symbols: List[str] ) -> str: description = authored_description or "" missed_symbols = [symbol for symbol in enum_symbols if symbol not in description] if missed_symbols: description = ( description + "." if description else "" + " Allowed symbols are " + ",".join(enum_symbols) ) return description def gen_md_table( field_dict: Dict[str, Any], definitions_dict: Dict[str, Any], md_str: List[FieldRow], field_prefix: str = None, ) -> None: if "enum" in field_dict: md_str.append( FieldRow( path=get_prefixed_name(field_prefix, None), type_name="Enum", required=field_dict.get("required") or False, description=f"one of {",".join(field_dict["enum"])}", default=str(field_dict.get("default", "None")), ) ) # md_str.append( # f"| {get_prefixed_name(field_prefix, None)} | Enum | {field_dict["type"]} | one of {",".join(field_dict["enum"])} |\n" # ) elif "properties" in field_dict: for field_name, value in field_dict["properties"].items(): required_field: bool = field_name in field_dict.get("required", []) if "allOf" in value: for sub_schema in value["allOf"]: reference = sub_schema["$ref"] def_dict = get_definition_dict_from_definition( definitions_dict, reference ) # special case for enum reference, we don't split up the rows if "enum" in def_dict: row = FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"enum({reference.split("/")[-1]})", description=get_enum_description( value.get("description"), def_dict["enum"] ), default=str(value.get("default", "")), required=required_field, ) md_str.append(row) else: # object reference row = FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"{reference.split("/")[-1]} (see below for fields)", description=value.get("description") or "", default=str(value.get("default", "")), required=required_field, ) md_str.append(row) # md_str.append( # f"| {get_prefixed_name(field_prefix, field_name)} | {reference.split("/")[-1]} (see below for fields) | {value.get("description") or ""} | {value.get("default") or ""} | \n" # ) gen_md_table( def_dict, definitions_dict, field_prefix=get_prefixed_name(field_prefix, field_name), md_str=row.inner_fields, ) elif "type" in value and value["type"] == "enum": # enum enum_definition = value["allOf"][0]["$ref"] def_dict = get_definition_dict_from_definition( definitions_dict, enum_definition ) print(value) print(def_dict) md_str.append( FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name="Enum", description=f"one of {",".join(def_dict["enum"])}", required=required_field, default=str(value.get("default", "None")), ) # f"| {get_prefixed_name(field_prefix, field_name)} | Enum | one of {",".join(def_dict["enum"])} | {def_dict["type"]} | \n" ) elif "type" in value and value["type"] == "object": # struct if "$ref" not in value: if ( "additionalProperties" in value and "$ref" in value["additionalProperties"] ): # breakpoint() value_ref = value["additionalProperties"]["$ref"] def_dict = get_definition_dict_from_definition( definitions_dict, value_ref ) row = FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"Dict[str, {value_ref.split("/")[-1]}]", description=value.get("description") or "", default=str(value.get("default", "")), required=required_field, ) md_str.append(row) gen_md_table( def_dict, definitions_dict, field_prefix=get_prefixed_name( field_prefix, f"{field_name}.`key`" ), md_str=row.inner_fields, ) else: value_type = value.get("additionalProperties", {}).get("type") md_str.append( FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"Dict[str,{value_type}]" if value_type else "Dict", description=value.get("description") or "", default=str(value.get("default", "")), required=required_field, ) ) else: object_definition = value["$ref"] row = FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"{object_definition.split("/")[-1]} (see below for fields)", description=value.get("description") or "", default=str(value.get("default", "")), required=required_field, ) md_str.append( row # f"| {get_prefixed_name(field_prefix, field_name)} | {object_definition.split("/")[-1]} (see below for fields) | {value.get("description") or ""} | {value.get("default") or ""} | \n" ) def_dict = get_definition_dict_from_definition( definitions_dict, object_definition ) gen_md_table( def_dict, definitions_dict, field_prefix=get_prefixed_name(field_prefix, field_name), md_str=row.inner_fields, ) elif "type" in value and value["type"] == "array": # array items_type = value["items"].get("type", "object") md_str.append( FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"Array of {items_type}", description=value.get("description") or "", default=str(value.get("default", "None")), required=required_field, ) # f"| {get_prefixed_name(field_prefix, field_name)} | Array of {items_type} | {value.get("description") or ""} | {value.get("default")} | \n" ) # TODO: Array of structs elif "type" in value: md_str.append( FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=value["type"], description=value.get("description") or "", default=str(value.get("default", "None")), required=required_field, ) # f"| {get_prefixed_name(field_prefix, field_name)} | {value["type"]} | {value.get("description") or ""} | {value.get("default")} | \n" ) elif "$ref" in value: object_definition = value["$ref"] def_dict = get_definition_dict_from_definition( definitions_dict, object_definition ) row = FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"{object_definition.split("/")[-1]} (see below for fields)", description=value.get("description") or "", default=str(value.get("default", "")), required=required_field, ) md_str.append( row # f"| {get_prefixed_name(field_prefix, field_name)} | {object_definition.split("/")[-1]} (see below for fields) | {value.get("description") or ""} | {value.get("default") or ""} | \n" ) gen_md_table( def_dict, definitions_dict, field_prefix=get_prefixed_name(field_prefix, field_name), md_str=row.inner_fields, ) else: # print(md_str, field_prefix, field_name, value) md_str.append( FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name="Generic dict", description=value.get("description", ""), default=str(value.get("default", "None")), required=required_field, ) # f"| {get_prefixed_name(field_prefix, field_name)} | Any dict | {value.get("description") or ""} | {value.get("default")} |\n" ) def get_snippet(long_string: str, max_length: int = 100) -> str: snippet = "" if len(long_string) > max_length: snippet = long_string[:max_length].strip() + "... " else: snippet = long_string.strip() snippet = snippet.replace("\n", " ") snippet = snippet.strip() + " " return snippet def get_support_status_badge(support_status: SupportStatus) -> str: if support_status == SupportStatus.CERTIFIED: return "![Certified](https://img.shields.io/badge/support%20status-certified-brightgreen)" if support_status == SupportStatus.INCUBATING: return "![Incubating](https://img.shields.io/badge/support%20status-incubating-blue)" if support_status == SupportStatus.TESTING: return "![Testing](https://img.shields.io/badge/support%20status-testing-lightgrey)" return "" def get_capability_supported_badge(supported: bool) -> str: return "✅" if supported else "❌" def get_capability_text(src_capability: SourceCapability) -> str: """ Returns markdown format cell text for a capability, hyperlinked to capability feature page if known """ capability_docs_mapping: Dict[SourceCapability, str] = { SourceCapability.DELETION_DETECTION: "../../../../metadata-ingestion/docs/dev_guides/stateful.md#removal-of-stale-tables-and-views", SourceCapability.DOMAINS: "../../../domains.md", SourceCapability.PLATFORM_INSTANCE: "../../../platform-instances.md", SourceCapability.DATA_PROFILING: "../../../../metadata-ingestion/docs/dev_guides/sql_profiles.md", } capability_doc = capability_docs_mapping.get(src_capability) return ( src_capability.value if not capability_doc else f"[{src_capability.value}]({capability_doc})" ) def create_or_update( something: Dict[Any, Any], path: List[str], value: Any ) -> Dict[Any, Any]: dict_under_operation = something for p in path[:-1]: if p not in dict_under_operation: dict_under_operation[p] = {} dict_under_operation = dict_under_operation[p] dict_under_operation[path[-1]] = value return something def does_extra_exist(extra_name: str) -> bool: for key, value in metadata("acryl-datahub").items(): if key == "Provides-Extra" and value == extra_name: return True return False def get_additional_deps_for_extra(extra_name: str) -> List[str]: all_requirements = requires("acryl-datahub") or [] # filter for base dependencies base_deps = set([x.split(";")[0] for x in all_requirements if "extra ==" not in x]) # filter for dependencies for this extra extra_deps = set( [x.split(";")[0] for x in all_requirements if f'extra == "{extra_name}"' in x] ) # calculate additional deps that this extra adds delta_deps = extra_deps - base_deps return list(delta_deps) def relocate_path(orig_path: str, relative_path: str, relocated_path: str) -> str: newPath = os.path.join(os.path.dirname(orig_path), relative_path) assert os.path.exists(newPath) newRelativePath = os.path.relpath(newPath, os.path.dirname(relocated_path)) return newRelativePath def rewrite_markdown(file_contents: str, path: str, relocated_path: str) -> str: def new_url(original_url: str, file_path: str) -> str: if original_url.startswith(("http://", "https://", "#")): return original_url import pathlib file_ext = pathlib.Path(original_url).suffix if file_ext.startswith(".md"): return original_url elif file_ext in [".png", ".svg", ".gif", ".pdf"]: new_url = relocate_path(path, original_url, relocated_path) return new_url return original_url # Look for the [text](url) syntax. Note that this will also capture images. # # We do a little bit of parenthesis matching here to account for parens in URLs. # See https://stackoverflow.com/a/17759264 for explanation of the second capture group. new_content = re.sub( r"\[(.*?)\]\(((?:[^)(]+|\((?:[^)(]+|\([^)(]*\))*\))*)\)", lambda x: f"[{x.group(1)}]({new_url(x.group(2).strip(),path)})", # type: ignore file_contents, ) new_content = re.sub( # Also look for the [text]: url syntax. r"^\[(.+?)\]\s*:\s*(.+?)\s*$", lambda x: f"[{x.group(1)}]: {new_url(x.group(2), path)}", new_content, ) return new_content @click.command() @click.option("--out-dir", type=str, required=True) @click.option("--extra-docs", type=str, required=False) @click.option("--source", type=str, required=False) def generate( out_dir: str, extra_docs: Optional[str] = None, source: Optional[str] = None ) -> None: # noqa: C901 source_documentation: Dict[str, Any] = {} metrics = {} metrics["source_platforms"] = {"discovered": 0, "generated": 0, "warnings": []} metrics["plugins"] = {"discovered": 0, "generated": 0, "failed": 0} if extra_docs: for path in glob.glob(f"{extra_docs}/**/*[.md|.yaml|.yml]", recursive=True): # breakpoint() m = re.search("/docs/sources/(.*)/(.*).md", path) if m: platform_name = m.group(1).lower() file_name = m.group(2) destination_md: str = ( f"../docs/generated/ingestion/sources/{platform_name}.md" ) with open(path, "r") as doc_file: file_contents = doc_file.read() final_markdown = rewrite_markdown( file_contents, path, destination_md ) if file_name == "README": # README goes as platform level docs # all other docs are assumed to be plugin level create_or_update( source_documentation, [platform_name, "custom_docs"], final_markdown, ) else: create_or_update( source_documentation, [platform_name, "plugins", file_name, "custom_docs"], final_markdown, ) else: yml_match = re.search("/docs/sources/(.*)/(.*)_recipe.yml", path) if yml_match: platform_name = yml_match.group(1).lower() plugin_name = yml_match.group(2) with open(path, "r") as doc_file: file_contents = doc_file.read() create_or_update( source_documentation, [platform_name, "plugins", plugin_name, "recipe"], file_contents, ) source_registry = PluginRegistry[Source]() source_registry.register_from_entrypoint("datahub.ingestion.source.plugins") # This source is always enabled for plugin_name in sorted(source_registry._mapping.keys()): if source and source != plugin_name: continue metrics["plugins"]["discovered"] = metrics["plugins"]["discovered"] + 1 # We want to attempt to load all plugins before printing a summary. source_type = None try: # output = subprocess.check_output( # ["/bin/bash", "-c", f"pip install -e '.[{key}]'"] # ) class_or_exception = source_registry._ensure_not_lazy(plugin_name) if isinstance(class_or_exception, Exception): raise class_or_exception logger.debug(f"Processing {plugin_name}") source_type = source_registry.get(plugin_name) logger.debug(f"Source class is {source_type}") extra_plugin = plugin_name if does_extra_exist(plugin_name) else None extra_deps = ( get_additional_deps_for_extra(extra_plugin) if extra_plugin else [] ) except Exception as e: print(f"Failed to process {plugin_name} due to exception") print(repr(e)) metrics["plugins"]["failed"] = metrics["plugins"].get("failed", 0) + 1 if source_type and hasattr(source_type, "get_config_class"): try: source_config_class: ConfigModel = source_type.get_config_class() support_status = SupportStatus.UNKNOWN capabilities = [] if hasattr(source_type, "__doc__"): source_doc = textwrap.dedent(source_type.__doc__ or "") if hasattr(source_type, "get_platform_name"): platform_name = source_type.get_platform_name() else: platform_name = ( plugin_name.title() ) # we like platform names to be human readable if hasattr(source_type, "get_platform_id"): platform_id = source_type.get_platform_id() source_documentation[platform_id] = ( source_documentation.get(platform_id) or {} ) # breakpoint() create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "classname"], ".".join([source_type.__module__, source_type.__name__]), ) plugin_file_name = "src/" + "/".join(source_type.__module__.split(".")) if os.path.exists(plugin_file_name) and os.path.isdir(plugin_file_name): plugin_file_name = plugin_file_name + "/__init__.py" else: plugin_file_name = plugin_file_name + ".py" if os.path.exists(plugin_file_name): create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "filename"], plugin_file_name, ) else: logger.info( f"Failed to locate filename for {plugin_name}. Guessed {plugin_file_name}" ) if hasattr(source_type, "get_support_status"): support_status = source_type.get_support_status() if hasattr(source_type, "get_capabilities"): capabilities = list(source_type.get_capabilities()) capabilities.sort(key=lambda x: x.capability.value) create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "capabilities"], capabilities, ) create_or_update( source_documentation, [platform_id, "name"], platform_name ) create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "extra_deps"], extra_deps, ) config_dir = f"{out_dir}/config_schemas" os.makedirs(config_dir, exist_ok=True) with open(f"{config_dir}/{plugin_name}_config.json", "w") as f: f.write(source_config_class.schema_json(indent=2)) create_or_update(source_documentation, [platform_id, "plugins", plugin_name, "config_schema"], source_config_class.schema_json(indent=2) or "", ) table_md = gen_md_table_from_struct(source_config_class.schema()) create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "source_doc"], source_doc or "", ) create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "config"], table_md, ) create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "support_status"], support_status, ) except Exception as e: raise e sources_dir = f"{out_dir}/sources" os.makedirs(sources_dir, exist_ok=True) for platform_id, platform_docs in source_documentation.items(): if source and platform_id != source: continue metrics["source_platforms"]["discovered"] = ( metrics["source_platforms"]["discovered"] + 1 ) platform_doc_file = f"{sources_dir}/{platform_id}.md" if "name" not in platform_docs: # We seem to have discovered written docs that corresponds to a platform, but haven't found linkage to it from the source classes warning_msg = f"Failed to find source classes for platform {platform_id}. Did you remember to annotate your source class with @platform_name({platform_id})?" logger.error(warning_msg) metrics["source_platforms"]["warnings"].append(warning_msg) with open(platform_doc_file, "w") as f: if "name" in platform_docs: f.write(f"import Tabs from '@theme/Tabs';\nimport TabItem from '@theme/TabItem';\n\n") f.write(f"# {platform_docs["name"]}\n") if len(platform_docs["plugins"].keys()) > 1: # More than one plugin used to provide integration with this platform f.write( f"There are {len(platform_docs["plugins"].keys())} sources that provide integration with {platform_docs["name"]}\n" ) f.write("\n") f.write("<table>\n") f.write("<tr>") for col_header in ["Source Module", "Documentation"]: f.write(f"<td>{col_header}</td>") f.write("</tr>") # f.write("| Source Module | Documentation |\n") # f.write("| ------ | ---- |\n") for plugin in sorted(platform_docs["plugins"]): f.write("<tr>\n") f.write(f"<td>\n\n`{plugin}`\n\n</td>\n") f.write( f"<td>\n\n\n{platform_docs["plugins"][plugin].get("source_doc") or ""} [Read more...](#module-{plugin})\n\n\n</td>\n" ) f.write("</tr>\n") # f.write( # f"| `{plugin}` | {get_snippet(platform_docs["plugins"][plugin]["source_doc"])}[Read more...](#module-{plugin}) |\n" # ) f.write("</table>\n\n") # insert platform level custom docs before plugin section f.write(platform_docs.get("custom_docs") or "") for plugin in sorted(platform_docs["plugins"]): plugin_docs = platform_docs["plugins"][plugin] f.write(f"\n\n## Module `{plugin}`\n") if "support_status" in plugin_docs: f.write( get_support_status_badge(plugin_docs["support_status"]) + "\n\n" ) if "capabilities" in plugin_docs and len(plugin_docs["capabilities"]): f.write("\n### Important Capabilities\n") f.write("| Capability | Status | Notes |\n") f.write("| ---------- | ------ | ----- |\n") plugin_capabilities: List[CapabilitySetting] = plugin_docs[ "capabilities" ] for cap_setting in plugin_capabilities: f.write( f"| {get_capability_text(cap_setting.capability)} | {get_capability_supported_badge(cap_setting.supported)} | {cap_setting.description} |\n" ) f.write("\n") f.write(f"{plugin_docs.get("source_doc") or ""}\n") if "extra_deps" in plugin_docs: f.write("### Install the Plugin\n") if plugin_docs["extra_deps"] != []: f.write("```shell\n") f.write(f"pip install 'acryl-datahub[{plugin}]'\n") f.write("```\n") else: f.write( f"The `{plugin}` source works out of the box with `acryl-datahub`.\n" ) if "recipe" in plugin_docs: f.write("\n### Quickstart Recipe\n") f.write( "Check out the following recipe to get started with ingestion! See [below](#config-details) for full configuration options.\n\n\n" ) f.write( "For general pointers on writing and running a recipe, see our [main recipe guide](../../../../metadata-ingestion/README.md#recipes)\n" ) f.write("```yaml\n") f.write(plugin_docs["recipe"]) f.write("\n```\n") if "config" in plugin_docs: f.write("\n### Config Details\n") f.write("""<Tabs> <TabItem value="options" label="Options" default>\n\n""") f.write( "Note that a `.` is used to denote nested fields in the YAML recipe.\n\n" ) f.write( "\n<details open>\n<summary>View All Configuration Options</summary>\n\n" ) for doc in plugin_docs["config"]: f.write(doc) f.write("\n</details>\n\n") f.write(f"""</TabItem> <TabItem value="schema" label="Schema"> The [JSONSchema](https://json-schema.org/) for this configuration is inlined below.\n\n ```javascript {plugin_docs['config_schema']} ```\n\n </TabItem> </Tabs>\n\n""") # insert custom plugin docs after config details f.write(plugin_docs.get("custom_docs", "")) if "classname" in plugin_docs: f.write("\n### Code Coordinates\n") f.write(f"- Class Name: `{plugin_docs["classname"]}`\n") if "filename" in plugin_docs: f.write( f"- Browse on [GitHub](../../../../metadata-ingestion/{plugin_docs["filename"]})\n\n" ) metrics["plugins"]["generated"] = metrics["plugins"]["generated"] + 1 f.write("\n## Questions\n") f.write( f"If you've got any questions on configuring ingestion for {platform_docs.get("name",platform_id)}, feel free to ping us on [our Slack](https://slack.datahubproject.io)\n" ) metrics["source_platforms"]["generated"] = ( metrics["source_platforms"]["generated"] + 1 ) print("Ingestion Documentation Generation Complete") print("############################################") print(json.dumps(metrics, indent=2)) print("############################################") if metrics["plugins"].get("failed", 0) > 0: sys.exit(1) if __name__ == "__main__": logger.setLevel("INFO") generate()
import glob import json import logging import os import re import sys import textwrap from importlib.metadata import metadata, requires from typing import Any, Dict, List, Optional import click from pydantic import Field from pydantic.dataclasses import dataclass from datahub.configuration.common import ConfigModel from datahub.ingestion.api.decorators import ( CapabilitySetting, SourceCapability, SupportStatus, ) from datahub.ingestion.api.registry import PluginRegistry from datahub.ingestion.api.source import Source logger = logging.getLogger(__name__) @dataclass class FieldRow: path: str type_name: str required: bool default: str description: str inner_fields: List["FieldRow"] = Field(default_factory=list) @staticmethod def get_checkbox(enabled: bool) -> str: return "✅" if enabled else "" def to_md_line(self) -> str: return ( f"| {self.path} | {self.get_checkbox(self.required)} | {self.type_name} | {self.description} | {self.default} |\n" + "".join([inner_field.to_md_line() for inner_field in self.inner_fields]) ) class FieldHeader(FieldRow): def to_md_line(self) -> str: return "\n".join( [ "| Field | Required | Type | Description | Default |", "| --- | --- | --- | --- | -- |", "", ] ) def __init__(self): pass def get_definition_dict_from_definition( definitions_dict: Dict[str, Any], definition_name: str ) -> Dict[str, Any]: import re m = re.search("#/definitions/(.*)$", definition_name) if m: definition_term: str = m.group(1) definition_dict = definitions_dict[definition_term] return definition_dict raise Exception("Failed to find a definition for " + definition_name) def get_prefixed_name(field_prefix: Optional[str], field_name: Optional[str]) -> str: assert ( field_prefix or field_name ), "One of field_prefix or field_name should be present" return ( f"{field_prefix}.{field_name}" # type: ignore if field_prefix and field_name else field_name if not field_prefix else field_prefix ) def gen_md_table_from_struct(schema_dict: Dict[str, Any]) -> List[str]: table_md_str: List[FieldRow] = [] # table_md_str = [ # "<table>\n<tr>\n<td>\nField\n</td>Type<td>Default</td><td>Description</td></tr>\n" # ] gen_md_table(schema_dict, schema_dict.get("definitions", {}), md_str=table_md_str) # table_md_str.append("\n</table>\n") table_md_str = [field for field in table_md_str if len(field.inner_fields) == 0] + [ field for field in table_md_str if len(field.inner_fields) > 0 ] # table_md_str.sort(key=lambda x: "z" if len(x.inner_fields) else "" + x.path) return ( [FieldHeader().to_md_line()] + [row.to_md_line() for row in table_md_str] + ["\n"] ) def get_enum_description( authored_description: Optional[str], enum_symbols: List[str] ) -> str: description = authored_description or "" missed_symbols = [symbol for symbol in enum_symbols if symbol not in description] if missed_symbols: description = ( description + "." if description else "" + " Allowed symbols are " + ",".join(enum_symbols) ) return description def gen_md_table( field_dict: Dict[str, Any], definitions_dict: Dict[str, Any], md_str: List[FieldRow], field_prefix: str = None, ) -> None: if "enum" in field_dict: md_str.append( FieldRow( path=get_prefixed_name(field_prefix, None), type_name="Enum", required=field_dict.get("required") or False, description=f"one of {','.join(field_dict['enum'])}", default=str(field_dict.get("default", "None")), ) ) # md_str.append( # f"| {get_prefixed_name(field_prefix, None)} | Enum | {field_dict['type']} | one of {','.join(field_dict['enum'])} |\n" # ) elif "properties" in field_dict: for field_name, value in field_dict["properties"].items(): required_field: bool = field_name in field_dict.get("required", []) if "allOf" in value: for sub_schema in value["allOf"]: reference = sub_schema["$ref"] def_dict = get_definition_dict_from_definition( definitions_dict, reference ) # special case for enum reference, we don't split up the rows if "enum" in def_dict: row = FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"enum({reference.split('/')[-1]})", description=get_enum_description( value.get("description"), def_dict["enum"] ), default=str(value.get("default", "")), required=required_field, ) md_str.append(row) else: # object reference row = FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"{reference.split('/')[-1]} (see below for fields)", description=value.get("description") or "", default=str(value.get("default", "")), required=required_field, ) md_str.append(row) # md_str.append( # f"| {get_prefixed_name(field_prefix, field_name)} | {reference.split('/')[-1]} (see below for fields) | {value.get('description') or ''} | {value.get('default') or ''} | \n" # ) gen_md_table( def_dict, definitions_dict, field_prefix=get_prefixed_name(field_prefix, field_name), md_str=row.inner_fields, ) elif "type" in value and value["type"] == "enum": # enum enum_definition = value["allOf"][0]["$ref"] def_dict = get_definition_dict_from_definition( definitions_dict, enum_definition ) print(value) print(def_dict) md_str.append( FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name="Enum", description=f"one of {','.join(def_dict['enum'])}", required=required_field, default=str(value.get("default", "None")), ) # f"| {get_prefixed_name(field_prefix, field_name)} | Enum | one of {','.join(def_dict['enum'])} | {def_dict['type']} | \n" ) elif "type" in value and value["type"] == "object": # struct if "$ref" not in value: if ( "additionalProperties" in value and "$ref" in value["additionalProperties"] ): # breakpoint() value_ref = value["additionalProperties"]["$ref"] def_dict = get_definition_dict_from_definition( definitions_dict, value_ref ) row = FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"Dict[str, {value_ref.split('/')[-1]}]", description=value.get("description") or "", default=str(value.get("default", "")), required=required_field, ) md_str.append(row) gen_md_table( def_dict, definitions_dict, field_prefix=get_prefixed_name( field_prefix, f"{field_name}.`key`" ), md_str=row.inner_fields, ) else: value_type = value.get("additionalProperties", {}).get("type") md_str.append( FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"Dict[str,{value_type}]" if value_type else "Dict", description=value.get("description") or "", default=str(value.get("default", "")), required=required_field, ) ) else: object_definition = value["$ref"] row = FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"{object_definition.split('/')[-1]} (see below for fields)", description=value.get("description") or "", default=str(value.get("default", "")), required=required_field, ) md_str.append( row # f"| {get_prefixed_name(field_prefix, field_name)} | {object_definition.split('/')[-1]} (see below for fields) | {value.get('description') or ''} | {value.get('default') or ''} | \n" ) def_dict = get_definition_dict_from_definition( definitions_dict, object_definition ) gen_md_table( def_dict, definitions_dict, field_prefix=get_prefixed_name(field_prefix, field_name), md_str=row.inner_fields, ) elif "type" in value and value["type"] == "array": # array items_type = value["items"].get("type", "object") md_str.append( FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"Array of {items_type}", description=value.get("description") or "", default=str(value.get("default", "None")), required=required_field, ) # f"| {get_prefixed_name(field_prefix, field_name)} | Array of {items_type} | {value.get('description') or ''} | {value.get('default')} | \n" ) # TODO: Array of structs elif "type" in value: md_str.append( FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=value["type"], description=value.get("description") or "", default=str(value.get("default", "None")), required=required_field, ) # f"| {get_prefixed_name(field_prefix, field_name)} | {value['type']} | {value.get('description') or ''} | {value.get('default')} | \n" ) elif "$ref" in value: object_definition = value["$ref"] def_dict = get_definition_dict_from_definition( definitions_dict, object_definition ) row = FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name=f"{object_definition.split('/')[-1]} (see below for fields)", description=value.get("description") or "", default=str(value.get("default", "")), required=required_field, ) md_str.append( row # f"| {get_prefixed_name(field_prefix, field_name)} | {object_definition.split('/')[-1]} (see below for fields) | {value.get('description') or ''} | {value.get('default') or ''} | \n" ) gen_md_table( def_dict, definitions_dict, field_prefix=get_prefixed_name(field_prefix, field_name), md_str=row.inner_fields, ) else: # print(md_str, field_prefix, field_name, value) md_str.append( FieldRow( path=get_prefixed_name(field_prefix, field_name), type_name="Generic dict", description=value.get("description", ""), default=str(value.get("default", "None")), required=required_field, ) # f"| {get_prefixed_name(field_prefix, field_name)} | Any dict | {value.get('description') or ''} | {value.get('default')} |\n" ) def get_snippet(long_string: str, max_length: int = 100) -> str: snippet = "" if len(long_string) > max_length: snippet = long_string[:max_length].strip() + "... " else: snippet = long_string.strip() snippet = snippet.replace("\n", " ") snippet = snippet.strip() + " " return snippet def get_support_status_badge(support_status: SupportStatus) -> str: if support_status == SupportStatus.CERTIFIED: return "![Certified](https://img.shields.io/badge/support%20status-certified-brightgreen)" if support_status == SupportStatus.INCUBATING: return "![Incubating](https://img.shields.io/badge/support%20status-incubating-blue)" if support_status == SupportStatus.TESTING: return "![Testing](https://img.shields.io/badge/support%20status-testing-lightgrey)" return "" def get_capability_supported_badge(supported: bool) -> str: return "✅" if supported else "❌" def get_capability_text(src_capability: SourceCapability) -> str: """ Returns markdown format cell text for a capability, hyperlinked to capability feature page if known """ capability_docs_mapping: Dict[SourceCapability, str] = { SourceCapability.DELETION_DETECTION: "../../../../metadata-ingestion/docs/dev_guides/stateful.md#removal-of-stale-tables-and-views", SourceCapability.DOMAINS: "../../../domains.md", SourceCapability.PLATFORM_INSTANCE: "../../../platform-instances.md", SourceCapability.DATA_PROFILING: "../../../../metadata-ingestion/docs/dev_guides/sql_profiles.md", } capability_doc = capability_docs_mapping.get(src_capability) return ( src_capability.value if not capability_doc else f"[{src_capability.value}]({capability_doc})" ) def create_or_update( something: Dict[Any, Any], path: List[str], value: Any ) -> Dict[Any, Any]: dict_under_operation = something for p in path[:-1]: if p not in dict_under_operation: dict_under_operation[p] = {} dict_under_operation = dict_under_operation[p] dict_under_operation[path[-1]] = value return something def does_extra_exist(extra_name: str) -> bool: for key, value in metadata("acryl-datahub").items(): if key == "Provides-Extra" and value == extra_name: return True return False def get_additional_deps_for_extra(extra_name: str) -> List[str]: all_requirements = requires("acryl-datahub") or [] # filter for base dependencies base_deps = set([x.split(";")[0] for x in all_requirements if "extra ==" not in x]) # filter for dependencies for this extra extra_deps = set( [x.split(";")[0] for x in all_requirements if f'extra == "{extra_name}"' in x] ) # calculate additional deps that this extra adds delta_deps = extra_deps - base_deps return list(delta_deps) def relocate_path(orig_path: str, relative_path: str, relocated_path: str) -> str: newPath = os.path.join(os.path.dirname(orig_path), relative_path) assert os.path.exists(newPath) newRelativePath = os.path.relpath(newPath, os.path.dirname(relocated_path)) return newRelativePath def rewrite_markdown(file_contents: str, path: str, relocated_path: str) -> str: def new_url(original_url: str, file_path: str) -> str: if original_url.startswith(("http://", "https://", "#")): return original_url import pathlib file_ext = pathlib.Path(original_url).suffix if file_ext.startswith(".md"): return original_url elif file_ext in [".png", ".svg", ".gif", ".pdf"]: new_url = relocate_path(path, original_url, relocated_path) return new_url return original_url # Look for the [text](url) syntax. Note that this will also capture images. # # We do a little bit of parenthesis matching here to account for parens in URLs. # See https://stackoverflow.com/a/17759264 for explanation of the second capture group. new_content = re.sub( r"\[(.*?)\]\(((?:[^)(]+|\((?:[^)(]+|\([^)(]*\))*\))*)\)", lambda x: f"[{x.group(1)}]({new_url(x.group(2).strip(),path)})", # type: ignore file_contents, ) new_content = re.sub( # Also look for the [text]: url syntax. r"^\[(.+?)\]\s*:\s*(.+?)\s*$", lambda x: f"[{x.group(1)}]: {new_url(x.group(2), path)}", new_content, ) return new_content @click.command() @click.option("--out-dir", type=str, required=True) @click.option("--extra-docs", type=str, required=False) @click.option("--source", type=str, required=False) def generate( out_dir: str, extra_docs: Optional[str] = None, source: Optional[str] = None ) -> None: # noqa: C901 source_documentation: Dict[str, Any] = {} metrics = {} metrics["source_platforms"] = {"discovered": 0, "generated": 0, "warnings": []} metrics["plugins"] = {"discovered": 0, "generated": 0, "failed": 0} if extra_docs: for path in glob.glob(f"{extra_docs}/**/*[.md|.yaml|.yml]", recursive=True): # breakpoint() m = re.search("/docs/sources/(.*)/(.*).md", path) if m: platform_name = m.group(1).lower() file_name = m.group(2) destination_md: str = ( f"../docs/generated/ingestion/sources/{platform_name}.md" ) with open(path, "r") as doc_file: file_contents = doc_file.read() final_markdown = rewrite_markdown( file_contents, path, destination_md ) if file_name == "README": # README goes as platform level docs # all other docs are assumed to be plugin level create_or_update( source_documentation, [platform_name, "custom_docs"], final_markdown, ) else: create_or_update( source_documentation, [platform_name, "plugins", file_name, "custom_docs"], final_markdown, ) else: yml_match = re.search("/docs/sources/(.*)/(.*)_recipe.yml", path) if yml_match: platform_name = yml_match.group(1).lower() plugin_name = yml_match.group(2) with open(path, "r") as doc_file: file_contents = doc_file.read() create_or_update( source_documentation, [platform_name, "plugins", plugin_name, "recipe"], file_contents, ) source_registry = PluginRegistry[Source]() source_registry.register_from_entrypoint("datahub.ingestion.source.plugins") # This source is always enabled for plugin_name in sorted(source_registry._mapping.keys()): if source and source != plugin_name: continue metrics["plugins"]["discovered"] = metrics["plugins"]["discovered"] + 1 # We want to attempt to load all plugins before printing a summary. source_type = None try: # output = subprocess.check_output( # ["/bin/bash", "-c", f"pip install -e '.[{key}]'"] # ) class_or_exception = source_registry._ensure_not_lazy(plugin_name) if isinstance(class_or_exception, Exception): raise class_or_exception logger.debug(f"Processing {plugin_name}") source_type = source_registry.get(plugin_name) logger.debug(f"Source class is {source_type}") extra_plugin = plugin_name if does_extra_exist(plugin_name) else None extra_deps = ( get_additional_deps_for_extra(extra_plugin) if extra_plugin else [] ) except Exception as e: print(f"Failed to process {plugin_name} due to exception") print(repr(e)) metrics["plugins"]["failed"] = metrics["plugins"].get("failed", 0) + 1 if source_type and hasattr(source_type, "get_config_class"): try: source_config_class: ConfigModel = source_type.get_config_class() support_status = SupportStatus.UNKNOWN capabilities = [] if hasattr(source_type, "__doc__"): source_doc = textwrap.dedent(source_type.__doc__ or "") if hasattr(source_type, "get_platform_name"): platform_name = source_type.get_platform_name() else: platform_name = ( plugin_name.title() ) # we like platform names to be human readable if hasattr(source_type, "get_platform_id"): platform_id = source_type.get_platform_id() source_documentation[platform_id] = ( source_documentation.get(platform_id) or {} ) # breakpoint() create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "classname"], ".".join([source_type.__module__, source_type.__name__]), ) plugin_file_name = "src/" + "/".join(source_type.__module__.split(".")) if os.path.exists(plugin_file_name) and os.path.isdir(plugin_file_name): plugin_file_name = plugin_file_name + "/__init__.py" else: plugin_file_name = plugin_file_name + ".py" if os.path.exists(plugin_file_name): create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "filename"], plugin_file_name, ) else: logger.info( f"Failed to locate filename for {plugin_name}. Guessed {plugin_file_name}" ) if hasattr(source_type, "get_support_status"): support_status = source_type.get_support_status() if hasattr(source_type, "get_capabilities"): capabilities = list(source_type.get_capabilities()) capabilities.sort(key=lambda x: x.capability.value) create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "capabilities"], capabilities, ) create_or_update( source_documentation, [platform_id, "name"], platform_name ) create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "extra_deps"], extra_deps, ) config_dir = f"{out_dir}/config_schemas" os.makedirs(config_dir, exist_ok=True) with open(f"{config_dir}/{plugin_name}_config.json", "w") as f: f.write(source_config_class.schema_json(indent=2)) create_or_update(source_documentation, [platform_id, "plugins", plugin_name, "config_schema"], source_config_class.schema_json(indent=2) or "", ) table_md = gen_md_table_from_struct(source_config_class.schema()) create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "source_doc"], source_doc or "", ) create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "config"], table_md, ) create_or_update( source_documentation, [platform_id, "plugins", plugin_name, "support_status"], support_status, ) except Exception as e: raise e sources_dir = f"{out_dir}/sources" os.makedirs(sources_dir, exist_ok=True) for platform_id, platform_docs in source_documentation.items(): if source and platform_id != source: continue metrics["source_platforms"]["discovered"] = ( metrics["source_platforms"]["discovered"] + 1 ) platform_doc_file = f"{sources_dir}/{platform_id}.md" if "name" not in platform_docs: # We seem to have discovered written docs that corresponds to a platform, but haven't found linkage to it from the source classes warning_msg = f"Failed to find source classes for platform {platform_id}. Did you remember to annotate your source class with @platform_name({platform_id})?" logger.error(warning_msg) metrics["source_platforms"]["warnings"].append(warning_msg) with open(platform_doc_file, "w") as f: if "name" in platform_docs: f.write(f"import Tabs from '@theme/Tabs';\nimport TabItem from '@theme/TabItem';\n\n") f.write(f"# {platform_docs['name']}\n") if len(platform_docs["plugins"].keys()) > 1: # More than one plugin used to provide integration with this platform f.write( f"There are {len(platform_docs['plugins'].keys())} sources that provide integration with {platform_docs['name']}\n" ) f.write("\n") f.write("<table>\n") f.write("<tr>") for col_header in ["Source Module", "Documentation"]: f.write(f"<td>{col_header}</td>") f.write("</tr>") # f.write("| Source Module | Documentation |\n") # f.write("| ------ | ---- |\n") for plugin in sorted(platform_docs["plugins"]): f.write("<tr>\n") f.write(f"<td>\n\n`{plugin}`\n\n</td>\n") f.write( f"<td>\n\n\n{platform_docs['plugins'][plugin].get('source_doc') or ''} [Read more...](#module-{plugin})\n\n\n</td>\n" ) f.write("</tr>\n") # f.write( # f"| `{plugin}` | {get_snippet(platform_docs['plugins'][plugin]['source_doc'])}[Read more...](#module-{plugin}) |\n" # ) f.write("</table>\n\n") # insert platform level custom docs before plugin section f.write(platform_docs.get("custom_docs") or "") for plugin in sorted(platform_docs["plugins"]): plugin_docs = platform_docs["plugins"][plugin] f.write(f"\n\n## Module `{plugin}`\n") if "support_status" in plugin_docs: f.write( get_support_status_badge(plugin_docs["support_status"]) + "\n\n" ) if "capabilities" in plugin_docs and len(plugin_docs["capabilities"]): f.write("\n### Important Capabilities\n") f.write("| Capability | Status | Notes |\n") f.write("| ---------- | ------ | ----- |\n") plugin_capabilities: List[CapabilitySetting] = plugin_docs[ "capabilities" ] for cap_setting in plugin_capabilities: f.write( f"| {get_capability_text(cap_setting.capability)} | {get_capability_supported_badge(cap_setting.supported)} | {cap_setting.description} |\n" ) f.write("\n") f.write(f"{plugin_docs.get('source_doc') or ''}\n") if "extra_deps" in plugin_docs: f.write("### Install the Plugin\n") if plugin_docs["extra_deps"] != []: f.write("```shell\n") f.write(f"pip install 'acryl-datahub[{plugin}]'\n") f.write("```\n") else: f.write( f"The `{plugin}` source works out of the box with `acryl-datahub`.\n" ) if "recipe" in plugin_docs: f.write("\n### Quickstart Recipe\n") f.write( "Check out the following recipe to get started with ingestion! See [below](#config-details) for full configuration options.\n\n\n" ) f.write( "For general pointers on writing and running a recipe, see our [main recipe guide](../../../../metadata-ingestion/README.md#recipes)\n" ) f.write("```yaml\n") f.write(plugin_docs["recipe"]) f.write("\n```\n") if "config" in plugin_docs: f.write("\n### Config Details\n") f.write("""<Tabs> <TabItem value="options" label="Options" default>\n\n""") f.write( "Note that a `.` is used to denote nested fields in the YAML recipe.\n\n" ) f.write( "\n<details open>\n<summary>View All Configuration Options</summary>\n\n" ) for doc in plugin_docs["config"]: f.write(doc) f.write("\n</details>\n\n") f.write(f"""</TabItem> <TabItem value="schema" label="Schema"> The [JSONSchema](https://json-schema.org/) for this configuration is inlined below.\n\n ```javascript {plugin_docs['config_schema']} ```\n\n </TabItem> </Tabs>\n\n""") # insert custom plugin docs after config details f.write(plugin_docs.get("custom_docs", "")) if "classname" in plugin_docs: f.write("\n### Code Coordinates\n") f.write(f"- Class Name: `{plugin_docs['classname']}`\n") if "filename" in plugin_docs: f.write( f"- Browse on [GitHub](../../../../metadata-ingestion/{plugin_docs['filename']})\n\n" ) metrics["plugins"]["generated"] = metrics["plugins"]["generated"] + 1 f.write("\n## Questions\n") f.write( f"If you've got any questions on configuring ingestion for {platform_docs.get('name',platform_id)}, feel free to ping us on [our Slack](https://slack.datahubproject.io)\n" ) metrics["source_platforms"]["generated"] = ( metrics["source_platforms"]["generated"] + 1 ) print("Ingestion Documentation Generation Complete") print("############################################") print(json.dumps(metrics, indent=2)) print("############################################") if metrics["plugins"].get("failed", 0) > 0: sys.exit(1) if __name__ == "__main__": logger.setLevel("INFO") generate()
"""Module with some useful decorators.""" import inspect import logging from typing import Callable, Tuple, Optional, Any, Union, Type from functools import wraps ExceptionType = Union[Type[Exception], Tuple[Type[Exception], ...]] class NonExistingException(Exception): """Stub for exception_handler.""" def exception_handler(error_message: str = 'UNSET', additional_handler: Optional[Callable] = None, logger_name: str = __name__, exception: ExceptionType = Exception, pass_exception: ExceptionType = NonExistingException, pass_exception_return: Any = None, additional_log_data: Optional[Callable] = None) -> Any: r"""Wrap error handling logic. Logging decorator with detailed log output. Can take optional handler to post processing after error. :param str error_message: default UNSET :param func additional_handler: post processing handler, called with\ args and kwargs of decorated function :param str logger_name: name of the logger in settings, default __name___ :param Exception exception: types of exceptions to handle,\ default Exception :param Exception pass_exception: types of exceptions to pass, default\ NonExistingException :param any pass_exception_return: return for ignored Exception :param func additional_log_data: func wich return some logger specific \ info spec: (*args,**kwargs) -> str :return: 0 or additional_handler return :rtype: any """ log = logging.getLogger(logger_name) def decorator(function: Callable): @wraps(function) def inner(*args, **kwargs): try: result = function(*args, **kwargs) except pass_exception: return pass_exception_return except exception as emsg: stack = inspect.trace() context_text = str(stack[-1].frame.f_locals)[:1500] new_line = '\n' trace = [(f' [FILE: {frame.filename} => FUNC: ' f'{frame.function} => LINE: {frame.lineno}]\n') for frame in stack] message = f'\n[EMSG: {error_message}]\n' if callable(additional_log_data): message += \ f'[EINFO:\n ' \ f'{additional_log_data(*args, **kwargs)}\n]\n' message += ( f'[STACK:\n{''.join(trace).strip(new_line)}\n]\n' f'[CONTEXT:\n {context_text}\n]\n' f'[DETAIL:\n {emsg}\n]' ) log.error(message) if callable(additional_handler): result = additional_handler(*args, **kwargs) return result return 0 return result return inner return decorator
"""Module with some useful decorators.""" import inspect import logging from typing import Callable, Tuple, Optional, Any, Union, Type from functools import wraps ExceptionType = Union[Type[Exception], Tuple[Type[Exception], ...]] class NonExistingException(Exception): """Stub for exception_handler.""" def exception_handler(error_message: str = 'UNSET', additional_handler: Optional[Callable] = None, logger_name: str = __name__, exception: ExceptionType = Exception, pass_exception: ExceptionType = NonExistingException, pass_exception_return: Any = None, additional_log_data: Optional[Callable] = None) -> Any: r"""Wrap error handling logic. Logging decorator with detailed log output. Can take optional handler to post processing after error. :param str error_message: default UNSET :param func additional_handler: post processing handler, called with\ args and kwargs of decorated function :param str logger_name: name of the logger in settings, default __name___ :param Exception exception: types of exceptions to handle,\ default Exception :param Exception pass_exception: types of exceptions to pass, default\ NonExistingException :param any pass_exception_return: return for ignored Exception :param func additional_log_data: func wich return some logger specific \ info spec: (*args,**kwargs) -> str :return: 0 or additional_handler return :rtype: any """ log = logging.getLogger(logger_name) def decorator(function: Callable): @wraps(function) def inner(*args, **kwargs): try: result = function(*args, **kwargs) except pass_exception: return pass_exception_return except exception as emsg: stack = inspect.trace() context_text = str(stack[-1].frame.f_locals)[:1500] new_line = '\n' trace = [(f' [FILE: {frame.filename} => FUNC: ' f'{frame.function} => LINE: {frame.lineno}]\n') for frame in stack] message = f'\n[EMSG: {error_message}]\n' if callable(additional_log_data): message += \ f'[EINFO:\n ' \ f'{additional_log_data(*args, **kwargs)}\n]\n' message += ( f'[STACK:\n{"".join(trace).strip(new_line)}\n]\n' f'[CONTEXT:\n {context_text}\n]\n' f'[DETAIL:\n {emsg}\n]' ) log.error(message) if callable(additional_handler): result = additional_handler(*args, **kwargs) return result return 0 return result return inner return decorator
"""Backup manager for the Backup integration.""" from __future__ import annotations from dataclasses import asdict, dataclass import hashlib import json from pathlib import Path import tarfile from tarfile import TarError from tempfile import TemporaryDirectory from typing import Any from securetar import SecureTarFile, atomic_contents_add from homeassistant.const import __version__ as HAVERSION from homeassistant.core import HomeAssistant from homeassistant.exceptions import HomeAssistantError from homeassistant.util import dt, json as json_util from .const import EXCLUDE_FROM_BACKUP, LOGGER @dataclass class Backup: """Backup class.""" slug: str name: str date: str path: Path size: float def as_dict(self) -> dict: """Return a dict representation of this backup.""" return {**asdict(self), "path": self.path.as_posix()} class BackupManager: """Backup manager for the Backup integration.""" def __init__(self, hass: HomeAssistant) -> None: """Initialize the backup manager.""" self.hass = hass self.backup_dir = Path(hass.config.path("backups")) self.backing_up = False self.backups: dict[str, Backup] = {} self.loaded = False async def load_backups(self) -> None: """Load data of stored backup files.""" backups = await self.hass.async_add_executor_job(self._read_backups) LOGGER.debug("Loaded %s backups", len(backups)) self.backups = backups self.loaded = True def _read_backups(self) -> dict[str, Backup]: """Read backups from disk.""" backups: dict[str, Backup] = {} for backup_path in self.backup_dir.glob("*.tar"): try: with tarfile.open(backup_path, "r:") as backup_file: if data_file := backup_file.extractfile("./backup.json"): data = json.loads(data_file.read()) backup = Backup( slug=data["slug"], name=data["name"], date=data["date"], path=backup_path, size=round(backup_path.stat().st_size / 1_048_576, 2), ) backups[backup.slug] = backup except (OSError, TarError, json.JSONDecodeError) as err: LOGGER.warning("Unable to read backup %s: %s", backup_path, err) return backups async def get_backups(self) -> dict[str, Backup]: """Return backups.""" if not self.loaded: await self.load_backups() return self.backups async def get_backup(self, slug: str) -> Backup | None: """Return a backup.""" if not self.loaded: await self.load_backups() if not (backup := self.backups.get(slug)): return None if not backup.path.exists(): LOGGER.debug( "Removing tracked backup (%s) that does not exists on the expected path %s", backup.slug, backup.path, ) self.backups.pop(slug) return None return backup async def remove_backup(self, slug: str) -> None: """Remove a backup.""" if (backup := await self.get_backup(slug)) is None: return await self.hass.async_add_executor_job(backup.path.unlink, True) LOGGER.debug("Removed backup located at %s", backup.path) self.backups.pop(slug) async def generate_backup(self) -> Backup: """Generate a backup.""" if self.backing_up: raise HomeAssistantError("Backup already in progress") try: self.backing_up = True backup_name = f"Core {HAVERSION}" date_str = dt.now().isoformat() slug = _generate_slug(date_str, backup_name) backup_data = { "slug": slug, "name": backup_name, "date": date_str, "type": "partial", "folders": ["homeassistant"], "homeassistant": {"version": HAVERSION}, "compressed": True, } tar_file_path = Path(self.backup_dir, f"{backup_data["slug"]}.tar") if not self.backup_dir.exists(): LOGGER.debug("Creating backup directory") self.hass.async_add_executor_job(self.backup_dir.mkdir) await self.hass.async_add_executor_job( self._generate_backup_contents, tar_file_path, backup_data, ) backup = Backup( slug=slug, name=backup_name, date=date_str, path=tar_file_path, size=round(tar_file_path.stat().st_size / 1_048_576, 2), ) if self.loaded: self.backups[slug] = backup LOGGER.debug("Generated new backup with slug %s", slug) return backup finally: self.backing_up = False def _generate_backup_contents( self, tar_file_path: Path, backup_data: dict[str, Any], ) -> None: """Generate backup contents.""" with TemporaryDirectory() as tmp_dir, SecureTarFile( tar_file_path, "w", gzip=False ) as tar_file: tmp_dir_path = Path(tmp_dir) json_util.save_json( tmp_dir_path.joinpath("./backup.json").as_posix(), backup_data, ) with SecureTarFile( tmp_dir_path.joinpath("./homeassistant.tar.gz").as_posix(), "w", ) as core_tar: atomic_contents_add( tar_file=core_tar, origin_path=Path(self.hass.config.path()), excludes=EXCLUDE_FROM_BACKUP, arcname="data", ) tar_file.add(tmp_dir_path, arcname=".") def _generate_slug(date: str, name: str) -> str: """Generate a backup slug.""" return hashlib.sha1(f"{date} - {name}".lower().encode()).hexdigest()[:8]
"""Backup manager for the Backup integration.""" from __future__ import annotations from dataclasses import asdict, dataclass import hashlib import json from pathlib import Path import tarfile from tarfile import TarError from tempfile import TemporaryDirectory from typing import Any from securetar import SecureTarFile, atomic_contents_add from homeassistant.const import __version__ as HAVERSION from homeassistant.core import HomeAssistant from homeassistant.exceptions import HomeAssistantError from homeassistant.util import dt, json as json_util from .const import EXCLUDE_FROM_BACKUP, LOGGER @dataclass class Backup: """Backup class.""" slug: str name: str date: str path: Path size: float def as_dict(self) -> dict: """Return a dict representation of this backup.""" return {**asdict(self), "path": self.path.as_posix()} class BackupManager: """Backup manager for the Backup integration.""" def __init__(self, hass: HomeAssistant) -> None: """Initialize the backup manager.""" self.hass = hass self.backup_dir = Path(hass.config.path("backups")) self.backing_up = False self.backups: dict[str, Backup] = {} self.loaded = False async def load_backups(self) -> None: """Load data of stored backup files.""" backups = await self.hass.async_add_executor_job(self._read_backups) LOGGER.debug("Loaded %s backups", len(backups)) self.backups = backups self.loaded = True def _read_backups(self) -> dict[str, Backup]: """Read backups from disk.""" backups: dict[str, Backup] = {} for backup_path in self.backup_dir.glob("*.tar"): try: with tarfile.open(backup_path, "r:") as backup_file: if data_file := backup_file.extractfile("./backup.json"): data = json.loads(data_file.read()) backup = Backup( slug=data["slug"], name=data["name"], date=data["date"], path=backup_path, size=round(backup_path.stat().st_size / 1_048_576, 2), ) backups[backup.slug] = backup except (OSError, TarError, json.JSONDecodeError) as err: LOGGER.warning("Unable to read backup %s: %s", backup_path, err) return backups async def get_backups(self) -> dict[str, Backup]: """Return backups.""" if not self.loaded: await self.load_backups() return self.backups async def get_backup(self, slug: str) -> Backup | None: """Return a backup.""" if not self.loaded: await self.load_backups() if not (backup := self.backups.get(slug)): return None if not backup.path.exists(): LOGGER.debug( "Removing tracked backup (%s) that does not exists on the expected path %s", backup.slug, backup.path, ) self.backups.pop(slug) return None return backup async def remove_backup(self, slug: str) -> None: """Remove a backup.""" if (backup := await self.get_backup(slug)) is None: return await self.hass.async_add_executor_job(backup.path.unlink, True) LOGGER.debug("Removed backup located at %s", backup.path) self.backups.pop(slug) async def generate_backup(self) -> Backup: """Generate a backup.""" if self.backing_up: raise HomeAssistantError("Backup already in progress") try: self.backing_up = True backup_name = f"Core {HAVERSION}" date_str = dt.now().isoformat() slug = _generate_slug(date_str, backup_name) backup_data = { "slug": slug, "name": backup_name, "date": date_str, "type": "partial", "folders": ["homeassistant"], "homeassistant": {"version": HAVERSION}, "compressed": True, } tar_file_path = Path(self.backup_dir, f"{backup_data['slug']}.tar") if not self.backup_dir.exists(): LOGGER.debug("Creating backup directory") self.hass.async_add_executor_job(self.backup_dir.mkdir) await self.hass.async_add_executor_job( self._generate_backup_contents, tar_file_path, backup_data, ) backup = Backup( slug=slug, name=backup_name, date=date_str, path=tar_file_path, size=round(tar_file_path.stat().st_size / 1_048_576, 2), ) if self.loaded: self.backups[slug] = backup LOGGER.debug("Generated new backup with slug %s", slug) return backup finally: self.backing_up = False def _generate_backup_contents( self, tar_file_path: Path, backup_data: dict[str, Any], ) -> None: """Generate backup contents.""" with TemporaryDirectory() as tmp_dir, SecureTarFile( tar_file_path, "w", gzip=False ) as tar_file: tmp_dir_path = Path(tmp_dir) json_util.save_json( tmp_dir_path.joinpath("./backup.json").as_posix(), backup_data, ) with SecureTarFile( tmp_dir_path.joinpath("./homeassistant.tar.gz").as_posix(), "w", ) as core_tar: atomic_contents_add( tar_file=core_tar, origin_path=Path(self.hass.config.path()), excludes=EXCLUDE_FROM_BACKUP, arcname="data", ) tar_file.add(tmp_dir_path, arcname=".") def _generate_slug(date: str, name: str) -> str: """Generate a backup slug.""" return hashlib.sha1(f"{date} - {name}".lower().encode()).hexdigest()[:8]
import os import time from anadroid.Types import TESTING_FRAMEWORK, PROFILER from anadroid.testing_framework.AbstractTestingFramework import AbstractTestingFramework from anadroid.testing_framework.work.MonkeyWorkUnit import MonkeyWorkUnit from anadroid.testing_framework.work.WorkLoad import WorkLoad from anadroid.utils.Utils import get_resources_dir, loge, logw, logs, execute_shell_command #DEFAULT_RES_DIR = "resources/testingFrameworks/monkey/" DEFAULT_RES_DIR = os.path.join(get_resources_dir(), "testingFrameworks", "monkey") DEFAULT_SEEDS_FILE = "monkey_seeds.txt" DEFAULT_CONFIG_FILE = "monkey_cmd.cfg" class MonkeyFramework(AbstractTestingFramework): """Implements AbstractTestingFramework interface to allow executing tests using Monkey testing framework. Attributes: executable_prefix(str): prefix for test command. It is basically a call to the executable. workload(WorkLoad): workload object containing the work units to be executed. res_dir(str): directory containing app crawler resources. """ def __init__(self, profiler, analyzer, default_workload=False, resdir=DEFAULT_RES_DIR): super(MonkeyFramework, self).__init__(id=TESTING_FRAMEWORK.MONKEY, profiler=profiler, analyzer=analyzer) self.executable_prefix = "adb shell monkey" self.workload = None self.res_dir = resdir if default_workload: self.init_default_workload(DEFAULT_SEEDS_FILE) def init_default_workload(self, pkg, seeds_file=DEFAULT_SEEDS_FILE, tests_dir=None): self.workload = WorkLoad() wl_filename = os.path.join(self.res_dir, seeds_file) config = self.__load_config_file() ofile = open(wl_filename, "r") i=0 max_tests_per_app = self.get_config("tests_per_app", 100000000) for seed in ofile: if i >= max_tests_per_app: break wk = MonkeyWorkUnit(self.executable_prefix) wk.config(seed=seed.strip(), **config) self.workload.add_unit(wk) i = i+1 ofile.close() def execute_test(self, package, wunit=None, timeout=None, *args, **kwargs): if wunit is None: wunit = self.workload.consume() if timeout or self.get_config("test_timeout", None): timeout_val = timeout if timeout is not None else self.get_config("test_timeout", None) wunit.add_timeout(timeout_val) if self.profiler.profiler == PROFILER.GREENSCALER: cmd = wunit.build_command(package, *args, **kwargs) self.profiler.exec_greenscaler(package, cmd) else: wunit.execute(package, *args, **kwargs) if 'log_filename' in kwargs: execute_shell_command(f"adb logcat -d > {kwargs["log_filename"]}").validate(Exception("Unable to extract device log")) def init(self): pass def install(self): pass def uninstall(self): pass def __load_config_file(self, cfg_filename=DEFAULT_CONFIG_FILE): cfg_file = os.path.join(self.res_dir, cfg_filename) cfg = {} ofile = open(cfg_file, "r") for aline in ofile: key, pair = aline.split("=") cfg[key] = pair.strip() ofile.close() return cfg def test_app(self, device, app): """test a given app on a given device. Executes each work unit of workload on app running on device. Args: device(Device): device. app(App): app. """ retries_per_test = self.get_config("test_fail_retries", 1) for i, wk_unit in enumerate(self.workload.work_units): self.exec_one_test(i, device, app, wk_unit, n_retries=retries_per_test) def exec_one_test(self, test_id, device, app, wk_unit, n_retries=1): """executes one test identified by test_id of an given app on a given device. Args: test_id: test uuid. device(Device): device. app(App): app. wk_unit(WorkUnit): work unit to be executed. n_retries(int): number of times to try run the test in case it fails. """ if n_retries < 0: loge(f"Validation failed. Ignoring test {test_id}") return device.unlock_screen() time.sleep(1) self.profiler.init(**{'app': app}) log_file = os.path.join(app.curr_local_dir, f"test_{test_id}.logcat") # log device state self.profiler.start_profiling() app.start() self.execute_test(app.package_name, wk_unit, **{'log_filename': log_file}) app.stop() self.profiler.stop_profiling() # log device state device.clear_logcat() self.profiler.export_results(test_id) self.profiler.pull_results(test_id, app.curr_local_dir) app.clean_cache() if not self.analyzer.validate_test(app, test_id, **{'log_filename': log_file}): logw("Validation failed. Retrying") self.exec_one_test(test_id, device, app, wk_unit, n_retries=n_retries-1) else: logs(f"Test {test_id} PASSED")
import os import time from anadroid.Types import TESTING_FRAMEWORK, PROFILER from anadroid.testing_framework.AbstractTestingFramework import AbstractTestingFramework from anadroid.testing_framework.work.MonkeyWorkUnit import MonkeyWorkUnit from anadroid.testing_framework.work.WorkLoad import WorkLoad from anadroid.utils.Utils import get_resources_dir, loge, logw, logs, execute_shell_command #DEFAULT_RES_DIR = "resources/testingFrameworks/monkey/" DEFAULT_RES_DIR = os.path.join(get_resources_dir(), "testingFrameworks", "monkey") DEFAULT_SEEDS_FILE = "monkey_seeds.txt" DEFAULT_CONFIG_FILE = "monkey_cmd.cfg" class MonkeyFramework(AbstractTestingFramework): """Implements AbstractTestingFramework interface to allow executing tests using Monkey testing framework. Attributes: executable_prefix(str): prefix for test command. It is basically a call to the executable. workload(WorkLoad): workload object containing the work units to be executed. res_dir(str): directory containing app crawler resources. """ def __init__(self, profiler, analyzer, default_workload=False, resdir=DEFAULT_RES_DIR): super(MonkeyFramework, self).__init__(id=TESTING_FRAMEWORK.MONKEY, profiler=profiler, analyzer=analyzer) self.executable_prefix = "adb shell monkey" self.workload = None self.res_dir = resdir if default_workload: self.init_default_workload(DEFAULT_SEEDS_FILE) def init_default_workload(self, pkg, seeds_file=DEFAULT_SEEDS_FILE, tests_dir=None): self.workload = WorkLoad() wl_filename = os.path.join(self.res_dir, seeds_file) config = self.__load_config_file() ofile = open(wl_filename, "r") i=0 max_tests_per_app = self.get_config("tests_per_app", 100000000) for seed in ofile: if i >= max_tests_per_app: break wk = MonkeyWorkUnit(self.executable_prefix) wk.config(seed=seed.strip(), **config) self.workload.add_unit(wk) i = i+1 ofile.close() def execute_test(self, package, wunit=None, timeout=None, *args, **kwargs): if wunit is None: wunit = self.workload.consume() if timeout or self.get_config("test_timeout", None): timeout_val = timeout if timeout is not None else self.get_config("test_timeout", None) wunit.add_timeout(timeout_val) if self.profiler.profiler == PROFILER.GREENSCALER: cmd = wunit.build_command(package, *args, **kwargs) self.profiler.exec_greenscaler(package, cmd) else: wunit.execute(package, *args, **kwargs) if 'log_filename' in kwargs: execute_shell_command(f"adb logcat -d > {kwargs['log_filename']}").validate(Exception("Unable to extract device log")) def init(self): pass def install(self): pass def uninstall(self): pass def __load_config_file(self, cfg_filename=DEFAULT_CONFIG_FILE): cfg_file = os.path.join(self.res_dir, cfg_filename) cfg = {} ofile = open(cfg_file, "r") for aline in ofile: key, pair = aline.split("=") cfg[key] = pair.strip() ofile.close() return cfg def test_app(self, device, app): """test a given app on a given device. Executes each work unit of workload on app running on device. Args: device(Device): device. app(App): app. """ retries_per_test = self.get_config("test_fail_retries", 1) for i, wk_unit in enumerate(self.workload.work_units): self.exec_one_test(i, device, app, wk_unit, n_retries=retries_per_test) def exec_one_test(self, test_id, device, app, wk_unit, n_retries=1): """executes one test identified by test_id of an given app on a given device. Args: test_id: test uuid. device(Device): device. app(App): app. wk_unit(WorkUnit): work unit to be executed. n_retries(int): number of times to try run the test in case it fails. """ if n_retries < 0: loge(f"Validation failed. Ignoring test {test_id}") return device.unlock_screen() time.sleep(1) self.profiler.init(**{'app': app}) log_file = os.path.join(app.curr_local_dir, f"test_{test_id}.logcat") # log device state self.profiler.start_profiling() app.start() self.execute_test(app.package_name, wk_unit, **{'log_filename': log_file}) app.stop() self.profiler.stop_profiling() # log device state device.clear_logcat() self.profiler.export_results(test_id) self.profiler.pull_results(test_id, app.curr_local_dir) app.clean_cache() if not self.analyzer.validate_test(app, test_id, **{'log_filename': log_file}): logw("Validation failed. Retrying") self.exec_one_test(test_id, device, app, wk_unit, n_retries=n_retries-1) else: logs(f"Test {test_id} PASSED")
# Smallest Difference # def smallestDifference(arrayOne=[], arrayTwo=[]): # ''' # Solution 1 - Brute force (aka: Naive approach) # # O(n^2) time | O(1) space # # arrayOne: a list of integers # arrayTwo: a list of integers # return: a list of two integers # ''' # closestPair = [] # closestNumber = float('inf') # # for firstNumber in arrayOne: # for secondNumber in arrayTwo: # absoluteDiff = abs(firstNumber - secondNumber) # # if (absoluteDiff < closestNumber): # closestNumber = absoluteDiff # closestPair = [firstNumber, secondNumber] # # return closestPair def smallestDifference(arrayOne=[], arrayTwo=[]): ''' Solution 2 - Sorting along with the two-pointer sliding window approach O(n log(n) + m log(m)) time | O(1) space arrayOne: a list of integers arrayTwo: a list of integers return: a list of two integers ''' arrayOne.sort() arrayTwo.sort() closestPair = [] closestNumber = float('inf') arrayOnePointer = 0; arrayTwoPointer = 0; while (arrayOnePointer < len(arrayOne) and arrayTwoPointer < len(arrayTwo)): firstNumber = arrayOne[arrayOnePointer] secondNumber = arrayTwo[arrayTwoPointer] currentAbsDiff = abs(firstNumber - secondNumber) if (firstNumber == secondNumber): closestPair = [firstNumber, secondNumber] break if (currentAbsDiff < closestNumber): closestNumber = currentAbsDiff closestPair = [firstNumber, secondNumber] if (firstNumber < secondNumber): arrayOnePointer += 1 elif (secondNumber < firstNumber): arrayTwoPointer += 1 return closestPair # Test cases (black box - unit testing) testCases = [ { 'assert': smallestDifference([-1, 5, 10, 20, 28, 3], [26, 134, 135, 15, 17]), 'expected': [28, 26] }, { 'assert': smallestDifference([-1, 5, 10, 20, 3], [26, 134, 135, 15, 17]), 'expected': [20, 17] }, { 'assert': smallestDifference([10, 0, 20, 25], [1005, 1006, 1014, 1032, 1031]), 'expected': [25, 1005] }, # Boundary conditions (empty lists, singleton list, large numbers, small numbers) { 'assert': smallestDifference(), 'expected': [] }, { 'assert': smallestDifference([]), 'expected': [] }, { 'assert': smallestDifference([], []), 'expected': [] }, { 'assert': smallestDifference([1], [1]), 'expected': [1, 1] }, { 'assert': smallestDifference([1, 2, 3, 4]), 'expected': [] }, { 'assert': smallestDifference([-1, -1, -1], [-1, -1, -1]), 'expected': [-1, -1] }, # Extremes ] # Run tests for (index, test) in enumerate(testCases): print(f'# Test {index + 1}') print(f'Actual: {test['assert']}') print(f'Expected: {test['expected']}') print('🤘 Test PASSED 🤘' if test["assert"] == test["expected"] else '👎 Test FAILED 👎', '\n')
# Smallest Difference # def smallestDifference(arrayOne=[], arrayTwo=[]): # ''' # Solution 1 - Brute force (aka: Naive approach) # # O(n^2) time | O(1) space # # arrayOne: a list of integers # arrayTwo: a list of integers # return: a list of two integers # ''' # closestPair = [] # closestNumber = float('inf') # # for firstNumber in arrayOne: # for secondNumber in arrayTwo: # absoluteDiff = abs(firstNumber - secondNumber) # # if (absoluteDiff < closestNumber): # closestNumber = absoluteDiff # closestPair = [firstNumber, secondNumber] # # return closestPair def smallestDifference(arrayOne=[], arrayTwo=[]): ''' Solution 2 - Sorting along with the two-pointer sliding window approach O(n log(n) + m log(m)) time | O(1) space arrayOne: a list of integers arrayTwo: a list of integers return: a list of two integers ''' arrayOne.sort() arrayTwo.sort() closestPair = [] closestNumber = float('inf') arrayOnePointer = 0; arrayTwoPointer = 0; while (arrayOnePointer < len(arrayOne) and arrayTwoPointer < len(arrayTwo)): firstNumber = arrayOne[arrayOnePointer] secondNumber = arrayTwo[arrayTwoPointer] currentAbsDiff = abs(firstNumber - secondNumber) if (firstNumber == secondNumber): closestPair = [firstNumber, secondNumber] break if (currentAbsDiff < closestNumber): closestNumber = currentAbsDiff closestPair = [firstNumber, secondNumber] if (firstNumber < secondNumber): arrayOnePointer += 1 elif (secondNumber < firstNumber): arrayTwoPointer += 1 return closestPair # Test cases (black box - unit testing) testCases = [ { 'assert': smallestDifference([-1, 5, 10, 20, 28, 3], [26, 134, 135, 15, 17]), 'expected': [28, 26] }, { 'assert': smallestDifference([-1, 5, 10, 20, 3], [26, 134, 135, 15, 17]), 'expected': [20, 17] }, { 'assert': smallestDifference([10, 0, 20, 25], [1005, 1006, 1014, 1032, 1031]), 'expected': [25, 1005] }, # Boundary conditions (empty lists, singleton list, large numbers, small numbers) { 'assert': smallestDifference(), 'expected': [] }, { 'assert': smallestDifference([]), 'expected': [] }, { 'assert': smallestDifference([], []), 'expected': [] }, { 'assert': smallestDifference([1], [1]), 'expected': [1, 1] }, { 'assert': smallestDifference([1, 2, 3, 4]), 'expected': [] }, { 'assert': smallestDifference([-1, -1, -1], [-1, -1, -1]), 'expected': [-1, -1] }, # Extremes ] # Run tests for (index, test) in enumerate(testCases): print(f'# Test {index + 1}') print(f'Actual: {test["assert"]}') print(f'Expected: {test["expected"]}') print('🤘 Test PASSED 🤘' if test["assert"] == test["expected"] else '👎 Test FAILED 👎', '\n')
from rest_framework.views import APIView import rest_framework.status as status from rest_framework.response import Response from rest_framework.request import Request from podcaststore_api.utils import json_or_raise from podcaststore_api.models.user import UserSerializer from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.db.utils import IntegrityError from rest_framework_simplejwt.tokens import RefreshToken from drf_yasg.utils import swagger_auto_schema class IdentifyView(APIView): """IdentifyView.""" @swagger_auto_schema(request_body=UserSerializer) def post(self, request: Request) -> Response: """Post a new Login.""" data = json_or_raise(request) user_serd = UserSerializer(data=data) if not user_serd.is_valid(): return Response(user_serd.errors, status=status.HTTP_400_BAD_REQUEST) try: user = User.objects.create_user(**user_serd.data) except (TypeError, ValidationError) as e: return Response(str(e), status=status.HTTP_400_BAD_REQUEST) except IntegrityError: return Response( {"username": [f"'{user_serd.data["username"]}' is already registered"]}, status=status.HTTP_400_BAD_REQUEST, ) return Response( {"token": str(RefreshToken.for_user(user))}, status=status.HTTP_201_CREATED )
from rest_framework.views import APIView import rest_framework.status as status from rest_framework.response import Response from rest_framework.request import Request from podcaststore_api.utils import json_or_raise from podcaststore_api.models.user import UserSerializer from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.db.utils import IntegrityError from rest_framework_simplejwt.tokens import RefreshToken from drf_yasg.utils import swagger_auto_schema class IdentifyView(APIView): """IdentifyView.""" @swagger_auto_schema(request_body=UserSerializer) def post(self, request: Request) -> Response: """Post a new Login.""" data = json_or_raise(request) user_serd = UserSerializer(data=data) if not user_serd.is_valid(): return Response(user_serd.errors, status=status.HTTP_400_BAD_REQUEST) try: user = User.objects.create_user(**user_serd.data) except (TypeError, ValidationError) as e: return Response(str(e), status=status.HTTP_400_BAD_REQUEST) except IntegrityError: return Response( {"username": [f"'{user_serd.data['username']}' is already registered"]}, status=status.HTTP_400_BAD_REQUEST, ) return Response( {"token": str(RefreshToken.for_user(user))}, status=status.HTTP_201_CREATED )
"""Add License, Header. Use pkglts Problems: - name of a model unit? """ from __future__ import print_function from __future__ import absolute_import from path import Path import numpy import os.path import six class Model2Package(object): """ TODO """ num = 0 def __init__(self, models, dir=None, pkg_name=None): """TODO.""" self.models = models self.dir = dir self.with_import = True if pkg_name is None: self.pkg_name = "CropModel" else: self.pkg_name = pkg_name def run(self): """TODO.""" self.write_tests() # documentation def generate_function_doc(self, model_unit): doc=''' %s '''%comment(generate_doc(model_unit)) return doc def generate_test(self, model_unit, package=None): tab = ' '*4 m = model_unit inputs = m.inputs outputs = m.outputs model_name = signature(m) psets = m.parametersets import_test = f'source("../../src/{'r'}/{package}/{m.name.capitalize()}.r")\n' import_test += "library(assertthat)\n" codetest = [import_test] for v_tests in m.testsets: test_runs = v_tests.test # different run in the thest test_paramsets = v_tests.parameterset # name of paramsets # map the paramsets params = {} if test_paramsets.strip() != "" and test_paramsets not in list(psets.keys()): print(' Unknow parameter %s' % test_paramsets) else: if test_paramsets.strip() != "": params.update(psets[test_paramsets].params) for each_run in test_runs : test_codes = [] # make a function that transforms a title into a function name tname = list(each_run.keys())[0].replace(' ', '_') tname = tname.replace('-', '_') (run, inouts) = list(each_run.items())[0] ins = inouts['inputs'] outs = inouts['outputs'] code = '\n' test_codes.append(code) code = "test_%s<-function(){"%(tname) test_codes.append(code) code = " params= model_%s("%model_name test_codes.append(code) run_param = params.copy() run_param.update(ins) n=0 for k, v in six.iteritems(run_param): type_v = [inp.datatype for inp in inputs if inp.name==k][0] code = tab*2 + "%s = %s"%(k,transf(type_v, v)) if n!=len(run_param)-1: code +="," n=n+1 test_codes.append(code) code =tab*2 + ")" test_codes.append(code) for k, v in six.iteritems(outs): type_v = [out.datatype for out in outputs if out.name==k][0] code = tab + "%s_estimated = params$%s"%(k,k) test_codes.append(code) code = tab + "%s_computed = %s"%(k,transf(type_v,outs[k][0])) test_codes.append(code) if type_v.strip() in ("STRINGLIST", "DATELIST", "STRINGARRAY", "DATEARRAY", "BOOL","STRING", "DATE","INT", "INTLIST","INTARRAY") : # without precision code = tab+ "assert_that(all.equal(%s_estimated, %s_computed)==TRUE)"%(k,k) test_codes.append(code) if type_v.strip() in ( "DOUBLE", "DOUBLELIST", "DOUBLEARRAY"): # with precision code = tab+ "assert_that(all.equal(%s_estimated, %s_computed, scale=1, tol=0.%s)==TRUE)"%(k,k, outs[k][1]) test_codes.append(code) code = '\n'.join(test_codes) code += '\n}\n' code += "test_%s()"%tname codetest.append(code) return codetest def generate_func_test(self, model_unit): pass def write_tests(self): pass def signature(model): name = model.name name = name.strip() name = name.replace(' ', '_').lower() return name def generate_doc(model): desc = model.description _doc = """ %s Author: %s Reference: %s Institution: %s ExtendedDescription: %s ShortDescription: %s """ %(desc.Title, desc.Authors, desc.Reference, desc.Institution, desc.ExtendedDescription, desc.ShortDescription) code = '\n' code += _doc return code def comment(line): list_com = ['#'+x for x in line.split('\n')] com = '\n'.join(list_com) return com DATATYPE = {} DATATYPE['INT'] = "int" DATATYPE['STRING'] = "string" DATATYPE['DOUBLE'] = "double" DATATYPE['BOOLEAN'] = "bool" DATATYPE['DATE'] = "string" DATATYPE['STRINGLIST'] = "c(%s)" DATATYPE['DOUBLELIST'] = "c(%s)" DATATYPE['INTLIST'] = "c(%s)" DATATYPE['DATELIST']="c(%s)" def transf(type_v, elem): if type_v == "BOOLEAN": return elem.upper() if type_v in ["STRING", "DATE"]: return ('"%s"'%elem).replace('""', '"') if type_v in ["DOUBLE", "INT"]: return str(elem) elif "LIST" in type_v: return DATATYPE[type_v.strip()]%",".join(list(map(transf,[type_v.split("LIST")[0]]*len(elem),eval(elem))))
"""Add License, Header. Use pkglts Problems: - name of a model unit? """ from __future__ import print_function from __future__ import absolute_import from path import Path import numpy import os.path import six class Model2Package(object): """ TODO """ num = 0 def __init__(self, models, dir=None, pkg_name=None): """TODO.""" self.models = models self.dir = dir self.with_import = True if pkg_name is None: self.pkg_name = "CropModel" else: self.pkg_name = pkg_name def run(self): """TODO.""" self.write_tests() # documentation def generate_function_doc(self, model_unit): doc=''' %s '''%comment(generate_doc(model_unit)) return doc def generate_test(self, model_unit, package=None): tab = ' '*4 m = model_unit inputs = m.inputs outputs = m.outputs model_name = signature(m) psets = m.parametersets import_test = f'source("../../src/{"r"}/{package}/{m.name.capitalize()}.r")\n' import_test += "library(assertthat)\n" codetest = [import_test] for v_tests in m.testsets: test_runs = v_tests.test # different run in the thest test_paramsets = v_tests.parameterset # name of paramsets # map the paramsets params = {} if test_paramsets.strip() != "" and test_paramsets not in list(psets.keys()): print(' Unknow parameter %s' % test_paramsets) else: if test_paramsets.strip() != "": params.update(psets[test_paramsets].params) for each_run in test_runs : test_codes = [] # make a function that transforms a title into a function name tname = list(each_run.keys())[0].replace(' ', '_') tname = tname.replace('-', '_') (run, inouts) = list(each_run.items())[0] ins = inouts['inputs'] outs = inouts['outputs'] code = '\n' test_codes.append(code) code = "test_%s<-function(){"%(tname) test_codes.append(code) code = " params= model_%s("%model_name test_codes.append(code) run_param = params.copy() run_param.update(ins) n=0 for k, v in six.iteritems(run_param): type_v = [inp.datatype for inp in inputs if inp.name==k][0] code = tab*2 + "%s = %s"%(k,transf(type_v, v)) if n!=len(run_param)-1: code +="," n=n+1 test_codes.append(code) code =tab*2 + ")" test_codes.append(code) for k, v in six.iteritems(outs): type_v = [out.datatype for out in outputs if out.name==k][0] code = tab + "%s_estimated = params$%s"%(k,k) test_codes.append(code) code = tab + "%s_computed = %s"%(k,transf(type_v,outs[k][0])) test_codes.append(code) if type_v.strip() in ("STRINGLIST", "DATELIST", "STRINGARRAY", "DATEARRAY", "BOOL","STRING", "DATE","INT", "INTLIST","INTARRAY") : # without precision code = tab+ "assert_that(all.equal(%s_estimated, %s_computed)==TRUE)"%(k,k) test_codes.append(code) if type_v.strip() in ( "DOUBLE", "DOUBLELIST", "DOUBLEARRAY"): # with precision code = tab+ "assert_that(all.equal(%s_estimated, %s_computed, scale=1, tol=0.%s)==TRUE)"%(k,k, outs[k][1]) test_codes.append(code) code = '\n'.join(test_codes) code += '\n}\n' code += "test_%s()"%tname codetest.append(code) return codetest def generate_func_test(self, model_unit): pass def write_tests(self): pass def signature(model): name = model.name name = name.strip() name = name.replace(' ', '_').lower() return name def generate_doc(model): desc = model.description _doc = """ %s Author: %s Reference: %s Institution: %s ExtendedDescription: %s ShortDescription: %s """ %(desc.Title, desc.Authors, desc.Reference, desc.Institution, desc.ExtendedDescription, desc.ShortDescription) code = '\n' code += _doc return code def comment(line): list_com = ['#'+x for x in line.split('\n')] com = '\n'.join(list_com) return com DATATYPE = {} DATATYPE['INT'] = "int" DATATYPE['STRING'] = "string" DATATYPE['DOUBLE'] = "double" DATATYPE['BOOLEAN'] = "bool" DATATYPE['DATE'] = "string" DATATYPE['STRINGLIST'] = "c(%s)" DATATYPE['DOUBLELIST'] = "c(%s)" DATATYPE['INTLIST'] = "c(%s)" DATATYPE['DATELIST']="c(%s)" def transf(type_v, elem): if type_v == "BOOLEAN": return elem.upper() if type_v in ["STRING", "DATE"]: return ('"%s"'%elem).replace('""', '"') if type_v in ["DOUBLE", "INT"]: return str(elem) elif "LIST" in type_v: return DATATYPE[type_v.strip()]%",".join(list(map(transf,[type_v.split("LIST")[0]]*len(elem),eval(elem))))
from django.contrib import messages from django.shortcuts import render, redirect from scanner.scan import scan_code def home(request): return render(request, 'home.html') def scan(request): context = scan_code(request.user.profile) print(context) if context: print(context['unmatched_labels']) if context['unmatched_labels'] == set(): messages.success(request, f'The Product matches your preferences') else: labels = '' for label in context['unmatched_labels']: labels += label + ' ,' messages.info(request, f'Sorry! This product is not {labels[:-2].replace('_', ' ').capitalize()}') return redirect(home)
from django.contrib import messages from django.shortcuts import render, redirect from scanner.scan import scan_code def home(request): return render(request, 'home.html') def scan(request): context = scan_code(request.user.profile) print(context) if context: print(context['unmatched_labels']) if context['unmatched_labels'] == set(): messages.success(request, f'The Product matches your preferences') else: labels = '' for label in context['unmatched_labels']: labels += label + ' ,' messages.info(request, f'Sorry! This product is not {labels[:-2].replace("_", " ").capitalize()}') return redirect(home)
#!/usr/bin/env python import uuid import time import tempfile from plaster.tools.utils import tmp """ Plaster Generator (gen) architecture Gen consists of one plumbum cli application and many "generators". Each run of gen invokes one generator. The Generators provide: * A schema. * A generate method which generates the jobs & runs & reports. The main of the Gen app: * Creates the requested generator * Assembles a variety of switches into a state generator request. * The generator request fields might no have been specified on the CLI and, if allowed, the GenApp will ask the user. * Some of those switches are global switches that are available to all generators; others are added dynamically upon the request of the "cli" field in the userdata of the generator's scehema. * The generator request is passed as kwargs into the constructor of the Generator; the constructor then validates. * The generator then is asked to "generate()" Notes There's tension in this design. One the one hand the generators feel like Plumbum's sub-commands. But the vast majority of the switches are tools switches. If I treat each generator as a subcommand then plumbums rules mean that you'd have to organize the calls like: gen --protein=XXX --protein=YYY ptm --ptm_specific_switch Which really is weird because the "ptm" statement belongs first. So to resolve that I bypass plumbum's subcommand concept and I dynamically inject switches into the Gen App before it is instanciated. """ import sys import re from munch import Munch from plumbum import cli, local, colors, FG from plaster.tools.log.log import ( error, important, info, confirm_yn, input_request, debug, ) from plaster.tools.utils import utils from plaster.tools.utils import data from plaster.tools.aaseq.proteolyze import proteases as protease_dict from plaster.tools.uniprot import uniprot from plaster.gen import helpers from plaster.run.run import RunExecutor from plaster.tools.schema.schema import SchemaValidationFailed from plaster.gen.sigproc_v1_generator import SigprocV1Generator from plaster.gen.sigproc_v2_generator import SigprocV2Generator from plaster.gen.calib_nn_generator import CalibNNGenerator from plaster.gen.sigproc_v2_calib_generator import SigprocV2CalibGenerator from plaster.gen.classify_generator import ClassifyGenerator from plaster.gen.survey_generator import SurveyGenerator from plaster.gen.ptm_generator import PTMGenerator from plaster.gen.errors import ValidationError VERSION = "0.2" # The following help has magic markup that is parsed in the help() def help_template(generators): return utils.smart_wrap( f""" PGEN -- The plaster run generator VERSION: {VERSION} TASK: License, version, etc. Usage ------------------------------------------------------------------------------ gen <GENERATOR> <SWITCHES> Example Usage: -------------- gen classify \\ --protein_uniprot=P01308 \\ --n_edmans=10 \\ --label_set='DE,C' \\ --job=example \\ --sample=insulin #SWITCHES =============================================================================== @--job='./my_run' # (See: GENERATORS) @--sample='a modified protein' # (See: GENERATORS) Protein import (All are Repeatable; 1+ Required)... ------------------------------------------------------------------------------ @--protein_fasta='local.fasta' # Local file (See: FASTA) ^--protein_fasta='http://a.com/a.fasta' # URL of same @--protein_csv='//jobs_folder/local.csv' # Local-file (See: CSV) @--protein_csv='http://a.com/a.csv' # URL of same @--protein_csv='s3://bucket/folder/a.csv' # S3 source of same @--protein_seq='Insulin:MALWMRLLPLL' # Sequence in-line (See SEQS) @--protein_uniprot='P01308' # Lookup by Uniprot AC @--protein_uniprot='Insulin:P01308' # Lookup AC and change name ^ Protein options (All are Repeatable; All Optional)... ------------------------------------------------------------------------------ @--protein_random=N # Of proteins added, pick N @--protein_of_interest='P10636-8' # Affects classify reporting ^--protein_exclude='MyName2' # Exclude name ^--protein_abundance='P37840-1:10000.0' # Specify abundance by name ^--protein_abundance-csv='some.csv' # Specify abundance (See: CSV) ^--protein_abundance-csv='http://a.com/a.csv' # URL of same Common Generator Switches: (See: GENERATORS)... ------------------------------------------------------------------------------ @--label_set='DE,Y,C,K:2+S' # Repeatable (See: LABELS) @--protease='trypsin' # Repeatable (See: PROTEASES) @--n_edmans=10 # Edman cycles (See: LABELS) @--n_pres=1 # default: 1 (See: LABELS) @--n_mocks=0 # default: 0 (See: LABELS) @--decoys='reverse' # default: None. See (DECOYS) @--random_seed=123 # default: None @--report_prec=.9 # classifier precision to report Error Model: (See: ERROR_MODEL)... ------------------------------------------------------------------------------ @--err_p_edman_failure=0.06 # Edman miss @--err_p_detach=0.05 # Surface detach # The following probabilities # are specified per-dye like: # "dye|prob" where dye count # starts at zero. @--err_dye_beta=0|7500 # Brightness @--err_dye_sigma=0|0.16 # Log-normal variance @--err_p_bleach_per_cycle=0|0.05 # Bleach rate @--err_p_non_fluorescent=0|0.07 # Dud rate ^ # The following probabilities ^ # are specified per-aa-label ^ # like: "aa:prob" where aa ^ # matches a --label_set ^--err_p_failure_to_bind_amino_acid=0.0 # Failure to bind AA ^--err_p_failure_to_attach_to_dye=0.0 # Failure to attach to dye Sigproc Setup (Optional)... ------------------------------------------------------------------------------ @--sigproc_source='s3://bucket/folder' # S3 source (See: SIGPROC) ^--sigproc_source='http://a.com/a' # URL of same ^--sigproc_source='./folder' # Local path of same @--anomaly_iqr_cutoff # [0,100] default: 95 @--lnfit_name # Repeatable (See: LNFIT) @--lnfit_params # Repeatable (See: LNFIT) @--lnfit_dye_on_threshold # Repeatable (See: LNFIT) @--peak_find_n_cycles # [1,ncycles] default: 4 @--peak_find_start # [0,ncycles-1] default: 0 @--radial_filter # [0,1.0] or default: None Less-frequently used switches... ------------------------------------------------------------------------------ @--cache_folder='...' # default: # $ERISYON_TMP/gen_cache @--force # Force clean @--overwrite # Force overwrite (danger) @--run_name='a' # Force run name (danger) @--prop='a.b=1=int' # Modify a property (danger) @--skip_report # Do not gen. report #GENERATORS & JOBS & SAMPLES =============================================================================== Generators are a mode under which this script creates job instructions. All executions of this script require a generator be specified in the first argument. Generators emit "JOBS" into Job folders as named with the --job= switch into the ./jobs_folder folder. Note that ./jobs_folder might be a sym-link to somewhere else. Current generators are: {colors.yellow|generators} Each Generator may require specific switches which may be enumerate with "gen <GENNAME> --help" When a Generator is not given a required input, it will ask for it manually. Generators may choose to emit more than one RUN into the job folder in which case there may be more than on sub-folder of the job. A sample is a required human-readable string that describes the biological sample this came from. #ERROR_MODEL =============================================================================== All of the error model probabilities can be swept in the form: --err_p_bleach_per_cycle=0|0.05:0.07:3 which means "The probability of bleach per cycle for dye 0 shall be swept from 0.05 to 0.07 in 3 steps. Note that for --err_p_edman_failure and --err_p_detach you do not prefix with a "dye:". Example "--err_p_detach=0.01:0.02:3" Be careful when you use the iterators as the number of permutations can grow extremely quickly and thus generate a very large number of runs. #URLs =============================================================================== Any switch which accepts a file will also accept an http, https, or s3 URL. #FASTA =============================================================================== .fasta files should be in the Uniprot form. See https://www.uniprot.org/help/fasta-headers #CSV =============================================================================== .csv files require a mandatory single line header as follows in any order: Name, Seq, Abundance, UniprotAC, PTM, POI If UniprotAC is given the Seq will be filled from the UniprotAC. If UniprotAC is given but Name isn't, it will use the AC as the Name. Abundance is optional. In the case that the abundance alone is given then it can be used to assign abundances to proteins that were imported in the --protein_* commands. PTM is optional. It is a semi-colon-separate list of 1-based aa-locations at which PTM can be performed (e.g. phosphorylation). POI is optional and contains a 0 or a 1. Used to specify "proteins of interest" Quoted and un-quoted fields are legal and columns are separated by commas. #SEQS =============================================================================== Protein and peptide sequences are specified in IUPAC; N to C order. (http://publications.iupac.org/pac/1984/pdf/5605x0595.pdf) Special rules: * Whitespace is ignored "AB CD" = "ABCD" * "." can be used in place of "X" "AB..CD" = "ABXXCD" * Anything wrapped in () is dropped. "AB(a comment)CD" = "ABCD" * Square brackets are modifications of the previous amino-acid, usually used to indicate a Post-Translational-Modification (PTM) "AS[p]D" = "A" + "S[p]" + "D" * Curly brackets are reserved for future use #LABELS =============================================================================== Examples: "C,K" = Label C in channel 0, K in ch. 1. "DE,C,K" = Label D and E in channel 0, C in ch. 1, K in ch. 2. "DE,C,K: 2" = Choose all 2 label permutations, eg: (DE,C) (DE,K) (C,K) "DE,C,K: 2+S,T" = Choose all 2 label permutations and add label(s) e.g. (DE,C,S,T) (DE,K,S,T) (C,K,S,T) "DE,C[p]" = Label D and E in channel 0, and phospho C in ch. 1. Peptides are degraded by Edman degradation one amino acid at at time from the N-terminus. When a labelled amino-acid is cleaved the loss in fluorescence is what guides identification. The --n_edmans=X parameter specifies the number of Edman cycles. More cycles will sequence deeper into the peptides but also adds more time to the experiment. #PROTEASES =============================================================================== Proteolyze the proteins and any decoys with one or more of: {colors.yellow|", ".join(list(protease_dict.keys())[0:5])} {colors.yellow|", ".join(list(protease_dict.keys())[5:])} You may also proteolyze with more than one protease simultaneously using the syntax e.g. --protease=lysc+endopro #DECOYS =============================================================================== Decoys are protein sequences which are expected to *not* be present in a sample and are used to estimate the "False Discovery Rate" (ie. the rate at which the classifier makes incorrect calls.) In cases where decoys are helpful, this option will generate decoys automatically. Option available for decoy are: "none", "reverse", "shuffle" These options are applied before proteolysis. #SIGPROC =============================================================================== When one or more --sigproc_source= are given, the data from an instrument run will be added into the analysis. #LNFIT =============================================================================== When one or more --lnfit_params are given, lnfit tasks will be executed on each sigproc_source dataset. The --lnfit_params string specified will be passed directly to the pflib lnfit routine. The --lnfit_dye_on_threshold parameter is used to convert sigproc intensities at each cycle to the "ON/OFF" track_photometries.csv input format required by pflib's lnfit routine. An intensity above this threshold is considered "ON". You may specifiy a single --lnfit_dye_on_threshold to be used for all lnfit tasks, or specifiy a separate threshold for each. --lnfit_name may optionally be specified for each parameter set to assign a user-specified folder name for the lnfit task. Otherwise, the tasks will be auto-numbered in the case there is more than one, e.g. lnfit_0, lnfit_1... Examples: --lnfit_name=lnfit_647_t4000_b7000_p1r10a95 --lnfit_params='-c 1 -w 647 -m 4 -o 0 -e 10 -s HLAA --beta 7000 --truncate 2' --lnfit_dye_on_threshold=4000 """, assert_if_exceeds_width=True, ) def add_switches_to_cli_application_from_schema(app, schema, reserved_field_names=[]): """ Add plumbum switches into app from schema top-level fields. Arguments: app: cli.Application. to add switches into schema: Schema. From which switches will be created. reserved_field_names: List[str]. Any field in the schema that is in the reserved_field_names is skipped; those are handled by special-case code. This applies to --protein and other switches that are derived by more complicated assembolies. """ fields = schema.top_level_fields() for field_name, field_type, field_help, field_userdata, field_subtype in fields: if field_name not in reserved_field_names: is_list = field_type is list if is_list: field_type = field_subtype is_bool = field_type is bool if is_bool: switch = cli.Flag([f"--{field_name}"], help=field_help) else: switch = cli.SwitchAttr( [f"--{field_name}"], field_type, help=field_help, list=is_list ) setattr(app, field_name, switch) class GenFuncs: def _request_field_from_user(self, field_name, type_, default): """Mock point""" headless = ValueError(f"Attempt to request field {field_name} in headless mode") while True: resp = input_request( f"Enter {field_name} ({type_.__name__} default={default}): ", default_when_headless=headless, ) try: if resp == "": resp = default if resp is None: val = None else: val = type_(resp) except Exception: important(f"Unable to convert '{resp}' to {type_}. Try again.") else: break return val def _write_runs(self, job_folder, run_descs, props=[]): """ Convert the munch run_descs into folders """ if not job_folder.exists(): job_folder.mkdir() found_run_names = {} for i, run in enumerate(run_descs): # FIND or OVERRIDE run_name run_name = run.get("run_name") assert run_name not in found_run_names found_run_names[run_name] = True # SETUP _erisyon block if "_erisyon" not in run: run._erisyon = Munch() run._erisyon.run_i = i run._erisyon.run_i_of = len(run_descs) run._erisyon.run_name = run_name # OVERRIDE with props for prop in props: k, v, t = prop.split("=") if t == "bool": v = True if v == "true" else False elif t == "int": v = int(v) elif t == "float": v = float(v) elif t == "int_list": v = [int(i) for i in v.split(",")] elif t == "int_dict": v = v.split(",") v = {v[i]: int(v[i + 1]) for i in range(0, len(v), 2)} else: raise TypeError(f"Unknown type in prop coversion '{t}") utils.block_update(run, k, v) # Keep the run_name out run.pop("run_name", None) folder = job_folder / run_name folder.mkdir() RunExecutor(folder, tasks=run).save() info(f"Wrote run to {folder}") class GenApp(cli.Application, GenFuncs): PROGNAME = colors.green | "gen" VERSION = VERSION COLOR_GROUPS = {"Switches": colors.yellow} DESCRIPTION = colors.green | "Generate plaster run instructions" # derived_vals are switch-like elements that are assembled from more-primitive switches derived_vals = Munch(protein=[]) # Global switches that are always available in all generators # ------------------------------------------------------------------------------------- job = cli.SwitchAttr(["--job"], str, help="Name of job folder under ./jobs_folder/") sample = cli.SwitchAttr( ["--sample"], str, help="Human-readable description of the biological sample" ) jobs_folder = cli.SwitchAttr( ["--jobs_folder"], str, help="Location of jobs folder", default="./jobs_folder", ) cache_folder = cli.SwitchAttr( ["--cache_folder"], str, default=str(tmp.cache_folder()), help="Where to cache files", ) force = cli.Flag(["--force"], default=False, help="Force deletion of existing job") overwrite = cli.Flag( ["--overwrite"], default=False, help="Overwrite into existing job, (dangerous)" ) prop = cli.SwitchAttr(["--prop"], str, list=True, help="Set a property (dangerous)") run_name = cli.SwitchAttr(["--run_name"], str, help="Set run_name (dangerous)") protein_random = cli.SwitchAttr( ["--protein_random"], int, help="Pick N random proteins from set" ) skip_report = cli.Flag( ["--skip_report"], default=False, help="Skip report generation" ) generator_klass_by_name = Munch( classify=ClassifyGenerator, ptm=PTMGenerator, sigproc_v1=SigprocV1Generator, sigproc_v2=SigprocV2Generator, calib_nn=CalibNNGenerator, calib_sigproc_v2=SigprocV2CalibGenerator, survey=SurveyGenerator, ) # files spec'd to gen will be copied here for this job, and moved to # the job folder if the generator succeeds. job_uuid = uuid.uuid4().hex local_sources_tmp_folder = local.path(tempfile.gettempdir()) / job_uuid local_sources_tmp_folder.mkdir() def _print(self, line): """Mock-point""" print(line) @cli.switch(["--readme"], help="A comprehensive guide for this tool") def readme(self): """ Parse the help file ^ = Feature not yet implemented @ = Feature implemented # = Header line @--switch='value:1' # comment """ switch_pat = re.compile( r""" ^(?P<leading_char>[\^\@\#]?) (?P<switch>[^= ]+)? (?P<value>=\S*)? (?P<comment>.*\#.*)? """, re.VERBOSE, ) generators = ", ".join(self._subcommands.keys()) for line in help_template(generators).split("\n"): m = switch_pat.match(line) if m: if m.group("leading_char") == "^": continue elif m.group("leading_char") == "@": self._print( f"{colors.yellow & colors.bold | m.group("switch") or ""}" f"{m.group("value") or ""}{colors.yellow | m.group("comment") or ""}" ) continue elif m.group("leading_char") == "#": self._print(colors.blue & colors.bold | m.group("switch")) continue self._print(line) sys.exit(0) @cli.switch(["--protein_seq"], str, list=True) def protein_seq(self, seqs): """ Include protein(s) in csv format (Must have a header row with 'Name', 'Seq' and optional 'Abundance' columns.). May be a local File-path or URL. """ for seq in seqs: parts = seq.split(":") if len(parts) != 2: raise ValidationError( f"--protein_seq arguments must be in form 'id:XXXXX' but found '{seq}'" ) self.derived_vals.protein += [dict(seqstr=parts[1], id=parts[0])] @cli.switch(["--protein_csv"], str, list=True) def protein_csv(self, sources): """ Include protein(s) in csv format (Must have a header row with 'Name', 'Seq' and optional 'Abundance', 'UniprotAC', and 'PTM' columns.). May be a local File-path or URL. """ for source in sources: source = helpers.cache_source( self.cache_folder, source, self.local_sources_tmp_folder ) proteins_df = helpers.protein_csv_df(source) self.derived_vals.protein += proteins_df.to_dict("records") @cli.switch(["--protein_uniprot"], str, list=True) def protein_uniprot(self, uniprot_acs): """ Include protein by uniprot AC (will fetch from uniprot.org) """ for uniprot_ac in uniprot_acs: pro_dict = helpers.split_protein_name(uniprot_ac) # This is a little ugly because the get_ac call caches to the # default erisyon caching folder (see utils.cache) and therefore # bypasses --cache_folder option. This will need a re-work # perhaps a "with tmp.cache_folder()" type operation. source = uniprot.get_ac_fasta(pro_dict["seqstr"]) self.derived_vals.protein += helpers.protein_fasta(source, pro_dict["id"]) @cli.switch(["--protein_fasta"], str, list=True) def protein_fasta(self, file_paths): for file_path in file_paths: source = helpers.cache_source( self.cache_folder, file_path, self.local_sources_tmp_folder ) self.derived_vals.protein += helpers.protein_fasta(source) @classmethod def run(cls, argv=None, exit=True): """ ZBS: Plumbum subcommand startup sequence is complicated. But, during the default run() it instantiates this class and passes only the next argument which prevents me from jamming dynamic switches into the class. So here I duplicate the argument I need argv[1] into the head of the list. And then later I have to overload _parse_args() in order to pop those arguments back off. Also, if you pass in "--help" that would normally be handled by plumbum correctly, but these hacks prevent that so I have to keep track of the construct_fail and let it proceed so that an instance it correctly allocated because the "help" commands only work on a functional instance (ie you can not raise the Help exception during construction). """ cls.construct_fail = False if not argv or len(argv) < 2 or argv[1].startswith("--"): if argv is not None and argv[1] == "--readme": # This is a crazy work-around to get the app instance # to construct so I can print the readme. cls.construct_fail = True inst = super(GenApp, cls).run( argv=["", "calib", "--job=foo"], exit=False ) inst[0].readme() return 0 cls.construct_fail = True error( "You must specify a generator as the first argument after 'gen'.\n" f"Options are {", ".join(GenApp.generator_klass_by_name.keys())}" ) argv = ["gen", "--help"] if argv is not None: return super(GenApp, cls).run( argv=[utils.safe_list_get(argv, 1)] + argv, exit=exit ) else: return super(GenApp, cls).run(argv=argv, exit=exit) def validate_job_name_and_folder(self): """ Validate the job name and compute job_folder path. Optionally delete the job_folder if it exists. Returns: job_folder path """ if self.job is None: raise ValidationError("job not specified.") self.job = self.job.lower() if not utils.is_symbol(self.job): raise ValidationError( "job should be a symbol (a-z, 0-9, and _) are allowed." ) job_folder = local.path(self.jobs_folder) / self.job delete_job = False if self.overwrite: delete_job = False elif self.force: delete_job = True elif job_folder.exists(): delete_job = confirm_yn( ( colors.red & colors.bold | f"Do you really want to remove ALL contents of " ) + ( colors.yellow | f"'{job_folder}'?\nIf no, then job may be in an inconsistent state.\n" ), "y", ) if delete_job: important(f"Deleting all of {job_folder}.") job_folder.delete() return job_folder def _parse_args(self, argv): """See above for why this crazy code pops. Undoing the list munging.""" if self.construct_fail: return super()._parse_args(argv) argv.pop(0) argv.pop(0) return super()._parse_args(argv) def _validate_args(self, swfuncs, tailargs): """See above for why this is overloaded.""" if self.construct_fail: tailargs = [] return super()._validate_args(swfuncs, tailargs) def __init__(self, generator_name): if self.construct_fail: return super().__init__(generator_name) klass = GenApp.generator_klass_by_name.get(generator_name) self.generator_klass = klass if klass is None: raise ValidationError( f"Unknown generator '{generator_name}'. Options are: " f"{", ".join(list(GenApp.generator_klass_by_name.keys()))}" ) # Dynamically create plumbum switches based on the generator add_switches_to_cli_application_from_schema( GenApp, klass.schema, reserved_field_names=[self.derived_vals.keys()] ) super().__init__(generator_name) def main(self): if self.construct_fail: return with local.cwd("/erisyon"): assert local.path("erisyon_root").exists() job_folder = self.validate_job_name_and_folder() schema = self.generator_klass.schema defaults = self.generator_klass.defaults requirements = schema.requirements() # APPLY defaults and then ask user for any elements that are not declared generator_args = {} switches = self._switches_by_name if self.protein_random is not None: info( f"Sampling {self.protein_random} random proteins from imported set" ) n = len(self.derived_vals.protein) assert n >= self.protein_random self.derived_vals.protein = data.subsample( self.derived_vals.protein, self.protein_random ) assert len(self.derived_vals.protein) == self.protein_random for arg_name, arg_type, arg_help, arg_userdata in requirements: if ( arg_name in self.derived_vals and self.derived_vals.get(arg_name) is not None ): # Load from a derived switch (eg: protein) generator_args[arg_name] = self.derived_vals[arg_name] elif arg_name in switches and switches.get(arg_name) is not None: # Load from a switch generator_args[arg_name] = getattr(self, arg_name) else: # If the schema allows the user to enter manually if arg_userdata.get("allowed_to_be_entered_manually"): generator_args[arg_name] = self._request_field_from_user( arg_name, arg_type, default=defaults.get(arg_name) ) # Intentionally run the generate before the job folder is written # so that if generate fails it doesn't leave around a partial job. try: generator_args["force_run_name"] = self.run_name generator = self.generator_klass(**generator_args) run_descs = generator.generate() except (SchemaValidationFailed, ValidationError) as e: # Emit clean failure and exit 1 error(str(e)) return 1 # WRITE the job & copy any file sources self._write_runs(job_folder, run_descs, props=self.prop) (job_folder / "_gen_sources").delete() self.local_sources_tmp_folder.move(job_folder / "_gen_sources") if not self.skip_report: report = generator.report_assemble() utils.json_save(job_folder / "report.ipynb", report) utils.yaml_write( job_folder / "job_manifest.yaml", uuid=self.job_uuid, localtime=time.strftime("%Y-%m-%d, %H:%M:%S", time.localtime()), # Note: it seems localtime inside our container is UTC who=local.env.get("RUN_USER", "Unknown"), cmdline_args=sys.argv, ) if __name__ == "__main__": # This is ONLY executed if you do not "main.py" because # main.py imports this file as a subcommand GenApp.run()
#!/usr/bin/env python import uuid import time import tempfile from plaster.tools.utils import tmp """ Plaster Generator (gen) architecture Gen consists of one plumbum cli application and many "generators". Each run of gen invokes one generator. The Generators provide: * A schema. * A generate method which generates the jobs & runs & reports. The main of the Gen app: * Creates the requested generator * Assembles a variety of switches into a state generator request. * The generator request fields might no have been specified on the CLI and, if allowed, the GenApp will ask the user. * Some of those switches are global switches that are available to all generators; others are added dynamically upon the request of the "cli" field in the userdata of the generator's scehema. * The generator request is passed as kwargs into the constructor of the Generator; the constructor then validates. * The generator then is asked to "generate()" Notes There's tension in this design. One the one hand the generators feel like Plumbum's sub-commands. But the vast majority of the switches are tools switches. If I treat each generator as a subcommand then plumbums rules mean that you'd have to organize the calls like: gen --protein=XXX --protein=YYY ptm --ptm_specific_switch Which really is weird because the "ptm" statement belongs first. So to resolve that I bypass plumbum's subcommand concept and I dynamically inject switches into the Gen App before it is instanciated. """ import sys import re from munch import Munch from plumbum import cli, local, colors, FG from plaster.tools.log.log import ( error, important, info, confirm_yn, input_request, debug, ) from plaster.tools.utils import utils from plaster.tools.utils import data from plaster.tools.aaseq.proteolyze import proteases as protease_dict from plaster.tools.uniprot import uniprot from plaster.gen import helpers from plaster.run.run import RunExecutor from plaster.tools.schema.schema import SchemaValidationFailed from plaster.gen.sigproc_v1_generator import SigprocV1Generator from plaster.gen.sigproc_v2_generator import SigprocV2Generator from plaster.gen.calib_nn_generator import CalibNNGenerator from plaster.gen.sigproc_v2_calib_generator import SigprocV2CalibGenerator from plaster.gen.classify_generator import ClassifyGenerator from plaster.gen.survey_generator import SurveyGenerator from plaster.gen.ptm_generator import PTMGenerator from plaster.gen.errors import ValidationError VERSION = "0.2" # The following help has magic markup that is parsed in the help() def help_template(generators): return utils.smart_wrap( f""" PGEN -- The plaster run generator VERSION: {VERSION} TASK: License, version, etc. Usage ------------------------------------------------------------------------------ gen <GENERATOR> <SWITCHES> Example Usage: -------------- gen classify \\ --protein_uniprot=P01308 \\ --n_edmans=10 \\ --label_set='DE,C' \\ --job=example \\ --sample=insulin #SWITCHES =============================================================================== @--job='./my_run' # (See: GENERATORS) @--sample='a modified protein' # (See: GENERATORS) Protein import (All are Repeatable; 1+ Required)... ------------------------------------------------------------------------------ @--protein_fasta='local.fasta' # Local file (See: FASTA) ^--protein_fasta='http://a.com/a.fasta' # URL of same @--protein_csv='//jobs_folder/local.csv' # Local-file (See: CSV) @--protein_csv='http://a.com/a.csv' # URL of same @--protein_csv='s3://bucket/folder/a.csv' # S3 source of same @--protein_seq='Insulin:MALWMRLLPLL' # Sequence in-line (See SEQS) @--protein_uniprot='P01308' # Lookup by Uniprot AC @--protein_uniprot='Insulin:P01308' # Lookup AC and change name ^ Protein options (All are Repeatable; All Optional)... ------------------------------------------------------------------------------ @--protein_random=N # Of proteins added, pick N @--protein_of_interest='P10636-8' # Affects classify reporting ^--protein_exclude='MyName2' # Exclude name ^--protein_abundance='P37840-1:10000.0' # Specify abundance by name ^--protein_abundance-csv='some.csv' # Specify abundance (See: CSV) ^--protein_abundance-csv='http://a.com/a.csv' # URL of same Common Generator Switches: (See: GENERATORS)... ------------------------------------------------------------------------------ @--label_set='DE,Y,C,K:2+S' # Repeatable (See: LABELS) @--protease='trypsin' # Repeatable (See: PROTEASES) @--n_edmans=10 # Edman cycles (See: LABELS) @--n_pres=1 # default: 1 (See: LABELS) @--n_mocks=0 # default: 0 (See: LABELS) @--decoys='reverse' # default: None. See (DECOYS) @--random_seed=123 # default: None @--report_prec=.9 # classifier precision to report Error Model: (See: ERROR_MODEL)... ------------------------------------------------------------------------------ @--err_p_edman_failure=0.06 # Edman miss @--err_p_detach=0.05 # Surface detach # The following probabilities # are specified per-dye like: # "dye|prob" where dye count # starts at zero. @--err_dye_beta=0|7500 # Brightness @--err_dye_sigma=0|0.16 # Log-normal variance @--err_p_bleach_per_cycle=0|0.05 # Bleach rate @--err_p_non_fluorescent=0|0.07 # Dud rate ^ # The following probabilities ^ # are specified per-aa-label ^ # like: "aa:prob" where aa ^ # matches a --label_set ^--err_p_failure_to_bind_amino_acid=0.0 # Failure to bind AA ^--err_p_failure_to_attach_to_dye=0.0 # Failure to attach to dye Sigproc Setup (Optional)... ------------------------------------------------------------------------------ @--sigproc_source='s3://bucket/folder' # S3 source (See: SIGPROC) ^--sigproc_source='http://a.com/a' # URL of same ^--sigproc_source='./folder' # Local path of same @--anomaly_iqr_cutoff # [0,100] default: 95 @--lnfit_name # Repeatable (See: LNFIT) @--lnfit_params # Repeatable (See: LNFIT) @--lnfit_dye_on_threshold # Repeatable (See: LNFIT) @--peak_find_n_cycles # [1,ncycles] default: 4 @--peak_find_start # [0,ncycles-1] default: 0 @--radial_filter # [0,1.0] or default: None Less-frequently used switches... ------------------------------------------------------------------------------ @--cache_folder='...' # default: # $ERISYON_TMP/gen_cache @--force # Force clean @--overwrite # Force overwrite (danger) @--run_name='a' # Force run name (danger) @--prop='a.b=1=int' # Modify a property (danger) @--skip_report # Do not gen. report #GENERATORS & JOBS & SAMPLES =============================================================================== Generators are a mode under which this script creates job instructions. All executions of this script require a generator be specified in the first argument. Generators emit "JOBS" into Job folders as named with the --job= switch into the ./jobs_folder folder. Note that ./jobs_folder might be a sym-link to somewhere else. Current generators are: {colors.yellow|generators} Each Generator may require specific switches which may be enumerate with "gen <GENNAME> --help" When a Generator is not given a required input, it will ask for it manually. Generators may choose to emit more than one RUN into the job folder in which case there may be more than on sub-folder of the job. A sample is a required human-readable string that describes the biological sample this came from. #ERROR_MODEL =============================================================================== All of the error model probabilities can be swept in the form: --err_p_bleach_per_cycle=0|0.05:0.07:3 which means "The probability of bleach per cycle for dye 0 shall be swept from 0.05 to 0.07 in 3 steps. Note that for --err_p_edman_failure and --err_p_detach you do not prefix with a "dye:". Example "--err_p_detach=0.01:0.02:3" Be careful when you use the iterators as the number of permutations can grow extremely quickly and thus generate a very large number of runs. #URLs =============================================================================== Any switch which accepts a file will also accept an http, https, or s3 URL. #FASTA =============================================================================== .fasta files should be in the Uniprot form. See https://www.uniprot.org/help/fasta-headers #CSV =============================================================================== .csv files require a mandatory single line header as follows in any order: Name, Seq, Abundance, UniprotAC, PTM, POI If UniprotAC is given the Seq will be filled from the UniprotAC. If UniprotAC is given but Name isn't, it will use the AC as the Name. Abundance is optional. In the case that the abundance alone is given then it can be used to assign abundances to proteins that were imported in the --protein_* commands. PTM is optional. It is a semi-colon-separate list of 1-based aa-locations at which PTM can be performed (e.g. phosphorylation). POI is optional and contains a 0 or a 1. Used to specify "proteins of interest" Quoted and un-quoted fields are legal and columns are separated by commas. #SEQS =============================================================================== Protein and peptide sequences are specified in IUPAC; N to C order. (http://publications.iupac.org/pac/1984/pdf/5605x0595.pdf) Special rules: * Whitespace is ignored "AB CD" = "ABCD" * "." can be used in place of "X" "AB..CD" = "ABXXCD" * Anything wrapped in () is dropped. "AB(a comment)CD" = "ABCD" * Square brackets are modifications of the previous amino-acid, usually used to indicate a Post-Translational-Modification (PTM) "AS[p]D" = "A" + "S[p]" + "D" * Curly brackets are reserved for future use #LABELS =============================================================================== Examples: "C,K" = Label C in channel 0, K in ch. 1. "DE,C,K" = Label D and E in channel 0, C in ch. 1, K in ch. 2. "DE,C,K: 2" = Choose all 2 label permutations, eg: (DE,C) (DE,K) (C,K) "DE,C,K: 2+S,T" = Choose all 2 label permutations and add label(s) e.g. (DE,C,S,T) (DE,K,S,T) (C,K,S,T) "DE,C[p]" = Label D and E in channel 0, and phospho C in ch. 1. Peptides are degraded by Edman degradation one amino acid at at time from the N-terminus. When a labelled amino-acid is cleaved the loss in fluorescence is what guides identification. The --n_edmans=X parameter specifies the number of Edman cycles. More cycles will sequence deeper into the peptides but also adds more time to the experiment. #PROTEASES =============================================================================== Proteolyze the proteins and any decoys with one or more of: {colors.yellow|", ".join(list(protease_dict.keys())[0:5])} {colors.yellow|", ".join(list(protease_dict.keys())[5:])} You may also proteolyze with more than one protease simultaneously using the syntax e.g. --protease=lysc+endopro #DECOYS =============================================================================== Decoys are protein sequences which are expected to *not* be present in a sample and are used to estimate the "False Discovery Rate" (ie. the rate at which the classifier makes incorrect calls.) In cases where decoys are helpful, this option will generate decoys automatically. Option available for decoy are: "none", "reverse", "shuffle" These options are applied before proteolysis. #SIGPROC =============================================================================== When one or more --sigproc_source= are given, the data from an instrument run will be added into the analysis. #LNFIT =============================================================================== When one or more --lnfit_params are given, lnfit tasks will be executed on each sigproc_source dataset. The --lnfit_params string specified will be passed directly to the pflib lnfit routine. The --lnfit_dye_on_threshold parameter is used to convert sigproc intensities at each cycle to the "ON/OFF" track_photometries.csv input format required by pflib's lnfit routine. An intensity above this threshold is considered "ON". You may specifiy a single --lnfit_dye_on_threshold to be used for all lnfit tasks, or specifiy a separate threshold for each. --lnfit_name may optionally be specified for each parameter set to assign a user-specified folder name for the lnfit task. Otherwise, the tasks will be auto-numbered in the case there is more than one, e.g. lnfit_0, lnfit_1... Examples: --lnfit_name=lnfit_647_t4000_b7000_p1r10a95 --lnfit_params='-c 1 -w 647 -m 4 -o 0 -e 10 -s HLAA --beta 7000 --truncate 2' --lnfit_dye_on_threshold=4000 """, assert_if_exceeds_width=True, ) def add_switches_to_cli_application_from_schema(app, schema, reserved_field_names=[]): """ Add plumbum switches into app from schema top-level fields. Arguments: app: cli.Application. to add switches into schema: Schema. From which switches will be created. reserved_field_names: List[str]. Any field in the schema that is in the reserved_field_names is skipped; those are handled by special-case code. This applies to --protein and other switches that are derived by more complicated assembolies. """ fields = schema.top_level_fields() for field_name, field_type, field_help, field_userdata, field_subtype in fields: if field_name not in reserved_field_names: is_list = field_type is list if is_list: field_type = field_subtype is_bool = field_type is bool if is_bool: switch = cli.Flag([f"--{field_name}"], help=field_help) else: switch = cli.SwitchAttr( [f"--{field_name}"], field_type, help=field_help, list=is_list ) setattr(app, field_name, switch) class GenFuncs: def _request_field_from_user(self, field_name, type_, default): """Mock point""" headless = ValueError(f"Attempt to request field {field_name} in headless mode") while True: resp = input_request( f"Enter {field_name} ({type_.__name__} default={default}): ", default_when_headless=headless, ) try: if resp == "": resp = default if resp is None: val = None else: val = type_(resp) except Exception: important(f"Unable to convert '{resp}' to {type_}. Try again.") else: break return val def _write_runs(self, job_folder, run_descs, props=[]): """ Convert the munch run_descs into folders """ if not job_folder.exists(): job_folder.mkdir() found_run_names = {} for i, run in enumerate(run_descs): # FIND or OVERRIDE run_name run_name = run.get("run_name") assert run_name not in found_run_names found_run_names[run_name] = True # SETUP _erisyon block if "_erisyon" not in run: run._erisyon = Munch() run._erisyon.run_i = i run._erisyon.run_i_of = len(run_descs) run._erisyon.run_name = run_name # OVERRIDE with props for prop in props: k, v, t = prop.split("=") if t == "bool": v = True if v == "true" else False elif t == "int": v = int(v) elif t == "float": v = float(v) elif t == "int_list": v = [int(i) for i in v.split(",")] elif t == "int_dict": v = v.split(",") v = {v[i]: int(v[i + 1]) for i in range(0, len(v), 2)} else: raise TypeError(f"Unknown type in prop coversion '{t}") utils.block_update(run, k, v) # Keep the run_name out run.pop("run_name", None) folder = job_folder / run_name folder.mkdir() RunExecutor(folder, tasks=run).save() info(f"Wrote run to {folder}") class GenApp(cli.Application, GenFuncs): PROGNAME = colors.green | "gen" VERSION = VERSION COLOR_GROUPS = {"Switches": colors.yellow} DESCRIPTION = colors.green | "Generate plaster run instructions" # derived_vals are switch-like elements that are assembled from more-primitive switches derived_vals = Munch(protein=[]) # Global switches that are always available in all generators # ------------------------------------------------------------------------------------- job = cli.SwitchAttr(["--job"], str, help="Name of job folder under ./jobs_folder/") sample = cli.SwitchAttr( ["--sample"], str, help="Human-readable description of the biological sample" ) jobs_folder = cli.SwitchAttr( ["--jobs_folder"], str, help="Location of jobs folder", default="./jobs_folder", ) cache_folder = cli.SwitchAttr( ["--cache_folder"], str, default=str(tmp.cache_folder()), help="Where to cache files", ) force = cli.Flag(["--force"], default=False, help="Force deletion of existing job") overwrite = cli.Flag( ["--overwrite"], default=False, help="Overwrite into existing job, (dangerous)" ) prop = cli.SwitchAttr(["--prop"], str, list=True, help="Set a property (dangerous)") run_name = cli.SwitchAttr(["--run_name"], str, help="Set run_name (dangerous)") protein_random = cli.SwitchAttr( ["--protein_random"], int, help="Pick N random proteins from set" ) skip_report = cli.Flag( ["--skip_report"], default=False, help="Skip report generation" ) generator_klass_by_name = Munch( classify=ClassifyGenerator, ptm=PTMGenerator, sigproc_v1=SigprocV1Generator, sigproc_v2=SigprocV2Generator, calib_nn=CalibNNGenerator, calib_sigproc_v2=SigprocV2CalibGenerator, survey=SurveyGenerator, ) # files spec'd to gen will be copied here for this job, and moved to # the job folder if the generator succeeds. job_uuid = uuid.uuid4().hex local_sources_tmp_folder = local.path(tempfile.gettempdir()) / job_uuid local_sources_tmp_folder.mkdir() def _print(self, line): """Mock-point""" print(line) @cli.switch(["--readme"], help="A comprehensive guide for this tool") def readme(self): """ Parse the help file ^ = Feature not yet implemented @ = Feature implemented # = Header line @--switch='value:1' # comment """ switch_pat = re.compile( r""" ^(?P<leading_char>[\^\@\#]?) (?P<switch>[^= ]+)? (?P<value>=\S*)? (?P<comment>.*\#.*)? """, re.VERBOSE, ) generators = ", ".join(self._subcommands.keys()) for line in help_template(generators).split("\n"): m = switch_pat.match(line) if m: if m.group("leading_char") == "^": continue elif m.group("leading_char") == "@": self._print( f"{colors.yellow & colors.bold | m.group('switch') or ''}" f"{m.group('value') or ''}{colors.yellow | m.group('comment') or ''}" ) continue elif m.group("leading_char") == "#": self._print(colors.blue & colors.bold | m.group("switch")) continue self._print(line) sys.exit(0) @cli.switch(["--protein_seq"], str, list=True) def protein_seq(self, seqs): """ Include protein(s) in csv format (Must have a header row with 'Name', 'Seq' and optional 'Abundance' columns.). May be a local File-path or URL. """ for seq in seqs: parts = seq.split(":") if len(parts) != 2: raise ValidationError( f"--protein_seq arguments must be in form 'id:XXXXX' but found '{seq}'" ) self.derived_vals.protein += [dict(seqstr=parts[1], id=parts[0])] @cli.switch(["--protein_csv"], str, list=True) def protein_csv(self, sources): """ Include protein(s) in csv format (Must have a header row with 'Name', 'Seq' and optional 'Abundance', 'UniprotAC', and 'PTM' columns.). May be a local File-path or URL. """ for source in sources: source = helpers.cache_source( self.cache_folder, source, self.local_sources_tmp_folder ) proteins_df = helpers.protein_csv_df(source) self.derived_vals.protein += proteins_df.to_dict("records") @cli.switch(["--protein_uniprot"], str, list=True) def protein_uniprot(self, uniprot_acs): """ Include protein by uniprot AC (will fetch from uniprot.org) """ for uniprot_ac in uniprot_acs: pro_dict = helpers.split_protein_name(uniprot_ac) # This is a little ugly because the get_ac call caches to the # default erisyon caching folder (see utils.cache) and therefore # bypasses --cache_folder option. This will need a re-work # perhaps a "with tmp.cache_folder()" type operation. source = uniprot.get_ac_fasta(pro_dict["seqstr"]) self.derived_vals.protein += helpers.protein_fasta(source, pro_dict["id"]) @cli.switch(["--protein_fasta"], str, list=True) def protein_fasta(self, file_paths): for file_path in file_paths: source = helpers.cache_source( self.cache_folder, file_path, self.local_sources_tmp_folder ) self.derived_vals.protein += helpers.protein_fasta(source) @classmethod def run(cls, argv=None, exit=True): """ ZBS: Plumbum subcommand startup sequence is complicated. But, during the default run() it instantiates this class and passes only the next argument which prevents me from jamming dynamic switches into the class. So here I duplicate the argument I need argv[1] into the head of the list. And then later I have to overload _parse_args() in order to pop those arguments back off. Also, if you pass in "--help" that would normally be handled by plumbum correctly, but these hacks prevent that so I have to keep track of the construct_fail and let it proceed so that an instance it correctly allocated because the "help" commands only work on a functional instance (ie you can not raise the Help exception during construction). """ cls.construct_fail = False if not argv or len(argv) < 2 or argv[1].startswith("--"): if argv is not None and argv[1] == "--readme": # This is a crazy work-around to get the app instance # to construct so I can print the readme. cls.construct_fail = True inst = super(GenApp, cls).run( argv=["", "calib", "--job=foo"], exit=False ) inst[0].readme() return 0 cls.construct_fail = True error( "You must specify a generator as the first argument after 'gen'.\n" f"Options are {', '.join(GenApp.generator_klass_by_name.keys())}" ) argv = ["gen", "--help"] if argv is not None: return super(GenApp, cls).run( argv=[utils.safe_list_get(argv, 1)] + argv, exit=exit ) else: return super(GenApp, cls).run(argv=argv, exit=exit) def validate_job_name_and_folder(self): """ Validate the job name and compute job_folder path. Optionally delete the job_folder if it exists. Returns: job_folder path """ if self.job is None: raise ValidationError("job not specified.") self.job = self.job.lower() if not utils.is_symbol(self.job): raise ValidationError( "job should be a symbol (a-z, 0-9, and _) are allowed." ) job_folder = local.path(self.jobs_folder) / self.job delete_job = False if self.overwrite: delete_job = False elif self.force: delete_job = True elif job_folder.exists(): delete_job = confirm_yn( ( colors.red & colors.bold | f"Do you really want to remove ALL contents of " ) + ( colors.yellow | f"'{job_folder}'?\nIf no, then job may be in an inconsistent state.\n" ), "y", ) if delete_job: important(f"Deleting all of {job_folder}.") job_folder.delete() return job_folder def _parse_args(self, argv): """See above for why this crazy code pops. Undoing the list munging.""" if self.construct_fail: return super()._parse_args(argv) argv.pop(0) argv.pop(0) return super()._parse_args(argv) def _validate_args(self, swfuncs, tailargs): """See above for why this is overloaded.""" if self.construct_fail: tailargs = [] return super()._validate_args(swfuncs, tailargs) def __init__(self, generator_name): if self.construct_fail: return super().__init__(generator_name) klass = GenApp.generator_klass_by_name.get(generator_name) self.generator_klass = klass if klass is None: raise ValidationError( f"Unknown generator '{generator_name}'. Options are: " f"{', '.join(list(GenApp.generator_klass_by_name.keys()))}" ) # Dynamically create plumbum switches based on the generator add_switches_to_cli_application_from_schema( GenApp, klass.schema, reserved_field_names=[self.derived_vals.keys()] ) super().__init__(generator_name) def main(self): if self.construct_fail: return with local.cwd("/erisyon"): assert local.path("erisyon_root").exists() job_folder = self.validate_job_name_and_folder() schema = self.generator_klass.schema defaults = self.generator_klass.defaults requirements = schema.requirements() # APPLY defaults and then ask user for any elements that are not declared generator_args = {} switches = self._switches_by_name if self.protein_random is not None: info( f"Sampling {self.protein_random} random proteins from imported set" ) n = len(self.derived_vals.protein) assert n >= self.protein_random self.derived_vals.protein = data.subsample( self.derived_vals.protein, self.protein_random ) assert len(self.derived_vals.protein) == self.protein_random for arg_name, arg_type, arg_help, arg_userdata in requirements: if ( arg_name in self.derived_vals and self.derived_vals.get(arg_name) is not None ): # Load from a derived switch (eg: protein) generator_args[arg_name] = self.derived_vals[arg_name] elif arg_name in switches and switches.get(arg_name) is not None: # Load from a switch generator_args[arg_name] = getattr(self, arg_name) else: # If the schema allows the user to enter manually if arg_userdata.get("allowed_to_be_entered_manually"): generator_args[arg_name] = self._request_field_from_user( arg_name, arg_type, default=defaults.get(arg_name) ) # Intentionally run the generate before the job folder is written # so that if generate fails it doesn't leave around a partial job. try: generator_args["force_run_name"] = self.run_name generator = self.generator_klass(**generator_args) run_descs = generator.generate() except (SchemaValidationFailed, ValidationError) as e: # Emit clean failure and exit 1 error(str(e)) return 1 # WRITE the job & copy any file sources self._write_runs(job_folder, run_descs, props=self.prop) (job_folder / "_gen_sources").delete() self.local_sources_tmp_folder.move(job_folder / "_gen_sources") if not self.skip_report: report = generator.report_assemble() utils.json_save(job_folder / "report.ipynb", report) utils.yaml_write( job_folder / "job_manifest.yaml", uuid=self.job_uuid, localtime=time.strftime("%Y-%m-%d, %H:%M:%S", time.localtime()), # Note: it seems localtime inside our container is UTC who=local.env.get("RUN_USER", "Unknown"), cmdline_args=sys.argv, ) if __name__ == "__main__": # This is ONLY executed if you do not "main.py" because # main.py imports this file as a subcommand GenApp.run()
# Import a speaker from the submission form # # Based on the quickstart template: https://developers.google.com/sheets/api/quickstart/python # # # # Logic: # - if the speaker submission is valid, not spam, and not already accepted # - extract important data into a yaml format # - download the avatar to file # # # use in combination with the Makefile import os.path import pickle import shutil from pathlib import * import requests import yaml from google.auth.transport.requests import Request from google_auth_oauthlib.flow import InstalledAppFlow from googleapiclient.discovery import build # If modifying these scopes, delete the file token.pickle. SCOPES = ["https://www.googleapis.com/auth/spreadsheets.readonly"] # The ID and range of a sample spreadsheet. SAMPLE_SPREADSHEET_ID = "1jziwRYrrYB_NsZ4Te1EMOxbsuYSwsCqr8RG2iT9mD-A" SAMPLE_RANGE_NAME = "Form Responses 1!A1:L" def main(): """Shows basic usage of the Sheets API. Prints values from a sample spreadsheet. """ creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists("token.pickle"): with open("token.pickle", "rb") as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file("credentials.json", SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open("token.pickle", "wb") as token: pickle.dump(creds, token) service = build("sheets", "v4", credentials=creds) # Call the Sheets API sheet = service.spreadsheets() result = ( sheet.values() .get(spreadsheetId=SAMPLE_SPREADSHEET_ID, range=SAMPLE_RANGE_NAME) .execute() ) values = result.get("values", []) header = values.pop(0) speakers = [] if not values: print("No data found.") else: for r in values: row = dict(zip(header, r)) if row["I understand the conditions, and confirm I am a Katie"] != "Agree": continue if row["Action"] == "Spam": continue if row["Action"] == "Accepted": # prevent reprocessing continue data = { "name": row["Your name"], "twitter": row["Your Twitter handle"].strip("@"), "link": row["Your talk video recording link"], "title": row["Your talk title"], } avurl = row["A link to your avatar (square)"] avatar = f"{data["twitter"]}.png" data["avatar"] = avatar resp = requests.get(avurl, stream=True) with open(Path.cwd() / "img" / avatar, "wb") as out: shutil.copyfileobj(resp.raw, out) del resp if row["Your LinkedIn URL (if you'd prefer this over Twitter)"]: data["linkedin"] = row[ "Your LinkedIn URL (if you'd prefer this over Twitter)" ] speakers.append(data) print(yaml.dump(speakers)) if __name__ == "__main__": main()
# Import a speaker from the submission form # # Based on the quickstart template: https://developers.google.com/sheets/api/quickstart/python # # # # Logic: # - if the speaker submission is valid, not spam, and not already accepted # - extract important data into a yaml format # - download the avatar to file # # # use in combination with the Makefile import os.path import pickle import shutil from pathlib import * import requests import yaml from google.auth.transport.requests import Request from google_auth_oauthlib.flow import InstalledAppFlow from googleapiclient.discovery import build # If modifying these scopes, delete the file token.pickle. SCOPES = ["https://www.googleapis.com/auth/spreadsheets.readonly"] # The ID and range of a sample spreadsheet. SAMPLE_SPREADSHEET_ID = "1jziwRYrrYB_NsZ4Te1EMOxbsuYSwsCqr8RG2iT9mD-A" SAMPLE_RANGE_NAME = "Form Responses 1!A1:L" def main(): """Shows basic usage of the Sheets API. Prints values from a sample spreadsheet. """ creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists("token.pickle"): with open("token.pickle", "rb") as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file("credentials.json", SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open("token.pickle", "wb") as token: pickle.dump(creds, token) service = build("sheets", "v4", credentials=creds) # Call the Sheets API sheet = service.spreadsheets() result = ( sheet.values() .get(spreadsheetId=SAMPLE_SPREADSHEET_ID, range=SAMPLE_RANGE_NAME) .execute() ) values = result.get("values", []) header = values.pop(0) speakers = [] if not values: print("No data found.") else: for r in values: row = dict(zip(header, r)) if row["I understand the conditions, and confirm I am a Katie"] != "Agree": continue if row["Action"] == "Spam": continue if row["Action"] == "Accepted": # prevent reprocessing continue data = { "name": row["Your name"], "twitter": row["Your Twitter handle"].strip("@"), "link": row["Your talk video recording link"], "title": row["Your talk title"], } avurl = row["A link to your avatar (square)"] avatar = f"{data['twitter']}.png" data["avatar"] = avatar resp = requests.get(avurl, stream=True) with open(Path.cwd() / "img" / avatar, "wb") as out: shutil.copyfileobj(resp.raw, out) del resp if row["Your LinkedIn URL (if you'd prefer this over Twitter)"]: data["linkedin"] = row[ "Your LinkedIn URL (if you'd prefer this over Twitter)" ] speakers.append(data) print(yaml.dump(speakers)) if __name__ == "__main__": main()
from django.shortcuts import render, redirect from django.views.generic import FormView, TemplateView from django.urls import reverse, reverse_lazy from django.db import transaction from django.utils.translation import ugettext_lazy as _ from django.contrib.auth import get_user_model, login from django.contrib.auth.tokens import PasswordResetTokenGenerator from django.core import mail from django.template.loader import render_to_string from django.contrib import messages from django.conf import settings from user.models import UserProfile, Candidate from . import forms from user.forms import ResetPasswordEmailForm User = get_user_model() def send_verification_email(user, url, request): context = { 'user': user, 'activation_url': url, } message = render_to_string("emails/email-verification.txt", context=context, request=request) html_message = render_to_string("emails/email-verification.html", context=context, request=request) subject = 'Verify your account! - TalentAlps' from_email = settings.DEFAULT_FROM_EMAIL to = user.email mail.send_mail(subject, message, from_email, [to], html_message=html_message) # Create your views here. class CandidateRegisterView(FormView): template_name = 'registration/candidate-registration.html' form_class = forms.UserProfileForm success_url = reverse_lazy('login') def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['candidate_form'] = forms.CandidateRegisterForm() return context def form_valid(self, form): candidate_form = forms.CandidateRegisterForm(self.request.POST) if candidate_form.is_valid(): # Create User, UserProfile, Candidate models try: with transaction.atomic(): user = User.objects.create( username=form.cleaned_data.get('username'), email=form.cleaned_data.get('email'), first_name=form.cleaned_data.get('name') ) user.set_password(form.cleaned_data.get('password')) user.save() userprofile = UserProfile.objects.create( name=form.cleaned_data.get('name'), contact=form.cleaned_data.get('contact'), state=form.cleaned_data.get('state'), country=form.cleaned_data.get('country'), user=user ) candidate = candidate_form.save(commit=False) candidate.state = form.cleaned_data.get('state') candidate.nationality = form.cleaned_data.get('country') candidate.userprofile = userprofile candidate.save() token_generator = PasswordResetTokenGenerator() token = token_generator.make_token(user) url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token))) send_verification_email(user, url, self.request) messages.info(self.request, _(f"A verification email has been sent to - {user.email}, you must verify your account before you can log in.")) return super().form_valid(form) except: messages.error(self.request, _("Something went wrong, please try again.")) return super().get(self.request) else: return super().form_invalid(candidate_form) class EmployerRegisterView(FormView): template_name = 'registration/employer-registration.html' form_class = forms.UserProfileForm success_url = reverse_lazy('registration:employer-register-success') def form_valid(self, form): try: with transaction.atomic(): user = User.objects.create( username=form.cleaned_data.get('email'), email=form.cleaned_data.get('email'), first_name=form.cleaned_data.get('name'), ) user.set_password(form.cleaned_data.get('password')) user.save() userprofile = user.userprofile userprofile.is_employer = True userprofile.receive_updates = form.cleaned_data.get('receive_updates') userprofile.save() token_generator = PasswordResetTokenGenerator() token = token_generator.make_token(user) url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token))) send_verification_email(user, url, self.request) messages.info(self.request, _(f"A verification email has been sent to - {user.email}, you must verify your account before you can log in.")) return super().form_valid(form) except: messages.error(self.request, _("Something went wrong, please try again.")) return super().get(self.request) class EmployerRegistrationSuccessView(TemplateView): template_name = 'registration/employer-registration-success.html' class EmployerResendVerificationEmailView(TemplateView): template_name = 'registration/employer-resend-verification-email.html' class UserEmailVerificationView(TemplateView): template_name = 'registration/user-email-verification.html' def get(self, request, *args, **kwargs): self.user = User.objects.get(pk=self.kwargs.get('pk')) token_generator = PasswordResetTokenGenerator() url_token = self.kwargs.get('token') self.valid = False self.verified = False if token_generator.check_token(self.user, url_token): self.valid = True if self.user.userprofile.verified: self.verified = True self.user.userprofile.verified = True self.user.userprofile.save() return super().get(request, *args, **kwargs) class ResendVerificationEmail(FormView): template_name = 'registration/resend-verification.html' form_class = ResetPasswordEmailForm success_url = reverse_lazy('login') def form_valid(self, form): messages.info(self.request, _(f"Verification email has been sent to - {form.cleaned_data.get("email")}, please check your inbox.")) try: user = User.objects.get(email=form.cleaned_data.get('email')) if not user.userprofile.verified: token_generator = PasswordResetTokenGenerator() token = token_generator.make_token(user) url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token))) send_verification_email(user, url, self.request) except User.DoesNotExist: return super().form_valid(form) return super().form_valid(form)
from django.shortcuts import render, redirect from django.views.generic import FormView, TemplateView from django.urls import reverse, reverse_lazy from django.db import transaction from django.utils.translation import ugettext_lazy as _ from django.contrib.auth import get_user_model, login from django.contrib.auth.tokens import PasswordResetTokenGenerator from django.core import mail from django.template.loader import render_to_string from django.contrib import messages from django.conf import settings from user.models import UserProfile, Candidate from . import forms from user.forms import ResetPasswordEmailForm User = get_user_model() def send_verification_email(user, url, request): context = { 'user': user, 'activation_url': url, } message = render_to_string("emails/email-verification.txt", context=context, request=request) html_message = render_to_string("emails/email-verification.html", context=context, request=request) subject = 'Verify your account! - TalentAlps' from_email = settings.DEFAULT_FROM_EMAIL to = user.email mail.send_mail(subject, message, from_email, [to], html_message=html_message) # Create your views here. class CandidateRegisterView(FormView): template_name = 'registration/candidate-registration.html' form_class = forms.UserProfileForm success_url = reverse_lazy('login') def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['candidate_form'] = forms.CandidateRegisterForm() return context def form_valid(self, form): candidate_form = forms.CandidateRegisterForm(self.request.POST) if candidate_form.is_valid(): # Create User, UserProfile, Candidate models try: with transaction.atomic(): user = User.objects.create( username=form.cleaned_data.get('username'), email=form.cleaned_data.get('email'), first_name=form.cleaned_data.get('name') ) user.set_password(form.cleaned_data.get('password')) user.save() userprofile = UserProfile.objects.create( name=form.cleaned_data.get('name'), contact=form.cleaned_data.get('contact'), state=form.cleaned_data.get('state'), country=form.cleaned_data.get('country'), user=user ) candidate = candidate_form.save(commit=False) candidate.state = form.cleaned_data.get('state') candidate.nationality = form.cleaned_data.get('country') candidate.userprofile = userprofile candidate.save() token_generator = PasswordResetTokenGenerator() token = token_generator.make_token(user) url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token))) send_verification_email(user, url, self.request) messages.info(self.request, _(f"A verification email has been sent to - {user.email}, you must verify your account before you can log in.")) return super().form_valid(form) except: messages.error(self.request, _("Something went wrong, please try again.")) return super().get(self.request) else: return super().form_invalid(candidate_form) class EmployerRegisterView(FormView): template_name = 'registration/employer-registration.html' form_class = forms.UserProfileForm success_url = reverse_lazy('registration:employer-register-success') def form_valid(self, form): try: with transaction.atomic(): user = User.objects.create( username=form.cleaned_data.get('email'), email=form.cleaned_data.get('email'), first_name=form.cleaned_data.get('name'), ) user.set_password(form.cleaned_data.get('password')) user.save() userprofile = user.userprofile userprofile.is_employer = True userprofile.receive_updates = form.cleaned_data.get('receive_updates') userprofile.save() token_generator = PasswordResetTokenGenerator() token = token_generator.make_token(user) url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token))) send_verification_email(user, url, self.request) messages.info(self.request, _(f"A verification email has been sent to - {user.email}, you must verify your account before you can log in.")) return super().form_valid(form) except: messages.error(self.request, _("Something went wrong, please try again.")) return super().get(self.request) class EmployerRegistrationSuccessView(TemplateView): template_name = 'registration/employer-registration-success.html' class EmployerResendVerificationEmailView(TemplateView): template_name = 'registration/employer-resend-verification-email.html' class UserEmailVerificationView(TemplateView): template_name = 'registration/user-email-verification.html' def get(self, request, *args, **kwargs): self.user = User.objects.get(pk=self.kwargs.get('pk')) token_generator = PasswordResetTokenGenerator() url_token = self.kwargs.get('token') self.valid = False self.verified = False if token_generator.check_token(self.user, url_token): self.valid = True if self.user.userprofile.verified: self.verified = True self.user.userprofile.verified = True self.user.userprofile.save() return super().get(request, *args, **kwargs) class ResendVerificationEmail(FormView): template_name = 'registration/resend-verification.html' form_class = ResetPasswordEmailForm success_url = reverse_lazy('login') def form_valid(self, form): messages.info(self.request, _(f"Verification email has been sent to - {form.cleaned_data.get('email')}, please check your inbox.")) try: user = User.objects.get(email=form.cleaned_data.get('email')) if not user.userprofile.verified: token_generator = PasswordResetTokenGenerator() token = token_generator.make_token(user) url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token))) send_verification_email(user, url, self.request) except User.DoesNotExist: return super().form_valid(form) return super().form_valid(form)
""" Copyright (C) 2019-2020 Cisco Systems, Inc. and/or its affiliates. All rights reserved. This module provides an assortment of helper functions that Mussels depends on. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from collections import defaultdict, namedtuple import platform NVC = namedtuple("NVC", "name version cookbook") def version_keys(s): """ `key` function enabling python's `sort` function to sort version strings. """ import re keys = [] for u in s.split("."): for v in re.split(r"(\d+)", u): try: val = int(v) except: val = str(v) keys.append(val) return keys def sort_cookbook_by_version(items) -> defaultdict: """ Sort items, and determine the highest versions. """ sorted_items: defaultdict = defaultdict(list) for item in items: versions_list = list(items[item].keys()) versions_list.sort(key=version_keys) versions_list.reverse() for version in versions_list: sorted_items[item].append(version) return sorted_items PLATFORMS = { "posix": [ "linux", "darwin", "macos", "osx", "freebsd", "openbsd", "sunos", "aix", "hp-ux", ], "unix": ["darwin", "macos", "osx", "freebsd", "openbsd", "sunos", "aix", "hp-ux"], } def platform_matches(requested_platform: str, specific_platform) -> bool: """ Compare two platforms. Common platforms: - Windows - macos / darwin / osx - linux - unix (macos, sunos, bsd unix's) - *nix / posix (not windows) :return: True if current platform matches requested platform. :return: False otherwise. """ specific_platform = specific_platform.lower() requested_platform = requested_platform.lower() if requested_platform == specific_platform: return True elif ( requested_platform == "mac" or requested_platform == "macos" or requested_platform == "osx" ) and specific_platform == "darwin": return True if (requested_platform == "unix") and ( specific_platform == "darwin" or specific_platform == "sunos" or "bsd" in specific_platform ): return True elif requested_platform == "*nix" or requested_platform == "posix": if specific_platform != "windows": return True else: return False return False def platform_is(requested_platform: str) -> bool: """ Compare requested platform with current platform. Common platforms: - Win / Windows - Mac / macOS / Darwin - Linux - Unix (Mac, SunOS, BSD unix's) - *nix / posix (Not Windows) :return: True if current platform matches requested platform. :return: False otherwise. """ return platform_matches(requested_platform, platform.system()) def pick_platform(requested_platform: str, platform_options: list) -> str: """ Given a list of platforms, pick the one that most closely matches the current platform. Prefer exact, allow superset. :return: string name of selected platform. """ if requested_platform in platform_options: return requested_platform for option in platform_options: if platform_matches(option, requested_platform): return option return "" def compare_versions(version_a: str, version_b: str) -> int: """ Evaluate version strings of two versions. Compare if version A against version B. :return: -1 if A < B :return: 0 if A == B :return: 1 if A > B """ if version_a == version_b: return 0 versions_list = [version_a, version_b] versions_list.sort(key=version_keys) if versions_list[0] == version_a: return -1 else: return 1 def get_item_version(item_name: str, sorted_items: dict, target: str = "", logger = None) -> NVC: """ Convert a item name in the below format to a (name, version) tuple: [cookbook:]name[>=,<=,>,<,(==|=|@)version] Examples: - meepioux - blarghus>=1.2.3 - wheeple@0.2.0 - pyplo==5.1.0g - scrapbook:sasquatch<2.0.0 - scrapbook: minnow < 0.1.12 The highest available version will be selected if one is not specified. Version requirements will whittle down the list of available versions in the sorted_items list. If a specific version is specified, all other versions will be disqualified (pruned). If no versions remain that satisfy build qualifications, an exception will be raised. :return: named tuple describing the highest qualified version: NVC( "name"->str, "version"->str, "cookbook"->str, ) """ def select_cookbook_version(nvc, item_version, target: str = "") -> bool: cookbook_selected = False def cookbook_has_build_target(cookbooks_item: dict, target) -> bool: if target == "": return True for each_platform in cookbooks_item: # Note: sorted_items has been filtered down to compatible platform. # No need to check with platform_is() if target in cookbooks_item[each_platform]: return True return False # Prefer local over all else, enabling monkey-patching of recipes. if "local" in item_version["cookbooks"] and cookbook_has_build_target( item_version["cookbooks"]["local"], target ): if nvc["cookbook"] != "" and nvc["cookbook"] != "local": if logger: logger.debug(f"Overriding {nvc_str(nvc["name"], nvc["version"], nvc["cookbook"])} with {nvc_str(nvc["name"], item_version["version"], "local")}") nvc["version"] = item_version["version"] nvc["cookbook"] = "local" cookbook_selected = True else: if nvc["cookbook"] == "": # Any cookbook will do. for cookbook in item_version["cookbooks"]: if cookbook_has_build_target( item_version["cookbooks"][cookbook], target ): nvc["version"] = item_version["version"] nvc["cookbook"] = cookbook cookbook_selected = True break else: # Check for requested cookbook. for cookbook in item_version["cookbooks"]: if cookbook == nvc["cookbook"] and cookbook_has_build_target( item_version["cookbooks"][cookbook], target ): nvc["version"] = item_version["version"] cookbook_selected = True break # Remove all other cookbooks for this item version. if cookbook_selected: item_version["cookbooks"] = { nvc["cookbook"]: item_version["cookbooks"][nvc["cookbook"]] } return cookbook_selected nvc = {"name": "", "version": "", "cookbook": ""} requested_item = item_name item_selected = False # Identify cookbook name, if provided. if ":" in item_name: cookbook, item = item_name.split(":") nvc["cookbook"] = cookbook.strip() item_name = item.strip() if ">=" in item_name: # GTE requirement found. name, version = item_name.split(">=") nvc["name"] = name.strip() version = version.strip() for i, item_version in enumerate(sorted_items[name]): cmp = compare_versions(item_version["version"], version) if cmp >= 0: # Version is good. if item_selected != True: item_selected = select_cookbook_version(nvc, item_version, target) else: # Version is too low. Remove it, and subsequent versions. if logger != None and len(sorted_items[name][:i]) < len(sorted_items[name]): logger.debug(f"{name} limited to version: {", ".join([item["version"] for item in sorted_items[name][:i]])}") sorted_items[name] = sorted_items[name][:i] break elif ">" in item_name: # GT requirement found. name, version = item_name.split(">") nvc["name"] = name.strip() version = version.strip() for i, item_version in enumerate(sorted_items[name]): cmp = compare_versions(item_version["version"], version) if cmp > 0: # Version is good. if item_selected != True: item_selected = select_cookbook_version(nvc, item_version, target) else: # Version is too low. Remove it, and subsequent versions. if logger != None and len(sorted_items[name][:i]) < len(sorted_items[name]): logger.debug(f"{name} limited to version: {", ".join([item["version"] for item in sorted_items[name][:i]])}") sorted_items[name] = sorted_items[name][:i] break elif "<=" in item_name: # LTE requirement found. name, version = item_name.split("<=") nvc["name"] = name.strip() version = version.strip() pruned = False # First, prune down to highest tolerable version if len(sorted_items[name]) > 0: while ( len(sorted_items[name]) > 0 and compare_versions(sorted_items[name][0]["version"], version) > 0 ): # Remove a version from the sorted_items. sorted_items[name].remove(sorted_items[name][0]) pruned = True # Then, prune down to the highest version provided by a the requested cookbook if len(sorted_items[name]) > 0: while len(sorted_items[name]) > 0 and not item_selected: item_selected = select_cookbook_version( nvc, sorted_items[name][0], target ) if not item_selected: # Remove a version from the sorted_items. sorted_items[name].remove(sorted_items[name][0]) pruned = True if logger != None and pruned: logger.debug(f"{name} limited to version: {", ".join([item["version"] for item in sorted_items[name]])}") elif "<" in item_name: # LT requirement found. name, version = item_name.split("<") nvc["name"] = name.strip() version = version.strip() pruned = False # First, prune down to highest tolerable version if len(sorted_items[name]) > 0: while ( len(sorted_items[name]) > 0 and compare_versions(sorted_items[name][0]["version"], version) >= 0 ): # Remove a version from the sorted_items. sorted_items[name].remove(sorted_items[name][0]) pruned = True # Then, prune down to the highest version provided by a the requested cookbook if len(sorted_items[name]) > 0: while len(sorted_items[name]) > 0 and not item_selected: item_selected = select_cookbook_version( nvc, sorted_items[name][0], target ) if not item_selected: # Remove a version from the sorted_items. sorted_items[name].remove(sorted_items[name][0]) pruned = True if logger != None and pruned: logger.debug(f"{name} limited to version: {", ".join([item["version"] for item in sorted_items[name]])}") else: eq_cond = False if "==" in item_name: name, version = item_name.split("==") eq_cond = True elif "=" in item_name: name, version = item_name.split("=") eq_cond = True elif "-" in item_name: name, version = item_name.split("-") eq_cond = True elif "@" in item_name: name, version = item_name.split("@") eq_cond = True if eq_cond == True: nvc["name"] = name.strip() nvc["version"] = version.strip() # EQ requirement found. # Try to find the specific version, and remove all others. item_selected = False for item_version in sorted_items[nvc["name"]]: if version == item_version["version"]: item_selected = select_cookbook_version(nvc, item_version, target) if item_selected: if logger != None and len(sorted_items[nvc["name"]]) > 1: logger.debug(f"{name} limited to version: {version}") sorted_items[nvc["name"]] = [item_version] break else: # No version requirement found. nvc["name"] = item_name.strip() for item_version in sorted_items[nvc["name"]]: item_selected = select_cookbook_version(nvc, item_version, target) if item_selected: break if not item_selected: if target == "": raise Exception( f"No versions available to satisfy requirement for {requested_item}.\nThe requested version may have been filtered out by requirements for another recipe." ) else: raise Exception( f"No versions available to satisfy requirement for {requested_item} ({target}).\nThe requested version may have been filtered out by requirements for another recipe." ) return NVC(nvc["name"], nvc["version"], nvc["cookbook"]) def nvc_str(name, version, cookbook: str = ""): def nv_str(name, version): if version != "": return f"{name}-{version}" else: return name if cookbook != "": return f"{cookbook}:{nv_str(name, version)}" else: return nv_str(name, version)
""" Copyright (C) 2019-2020 Cisco Systems, Inc. and/or its affiliates. All rights reserved. This module provides an assortment of helper functions that Mussels depends on. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from collections import defaultdict, namedtuple import platform NVC = namedtuple("NVC", "name version cookbook") def version_keys(s): """ `key` function enabling python's `sort` function to sort version strings. """ import re keys = [] for u in s.split("."): for v in re.split(r"(\d+)", u): try: val = int(v) except: val = str(v) keys.append(val) return keys def sort_cookbook_by_version(items) -> defaultdict: """ Sort items, and determine the highest versions. """ sorted_items: defaultdict = defaultdict(list) for item in items: versions_list = list(items[item].keys()) versions_list.sort(key=version_keys) versions_list.reverse() for version in versions_list: sorted_items[item].append(version) return sorted_items PLATFORMS = { "posix": [ "linux", "darwin", "macos", "osx", "freebsd", "openbsd", "sunos", "aix", "hp-ux", ], "unix": ["darwin", "macos", "osx", "freebsd", "openbsd", "sunos", "aix", "hp-ux"], } def platform_matches(requested_platform: str, specific_platform) -> bool: """ Compare two platforms. Common platforms: - Windows - macos / darwin / osx - linux - unix (macos, sunos, bsd unix's) - *nix / posix (not windows) :return: True if current platform matches requested platform. :return: False otherwise. """ specific_platform = specific_platform.lower() requested_platform = requested_platform.lower() if requested_platform == specific_platform: return True elif ( requested_platform == "mac" or requested_platform == "macos" or requested_platform == "osx" ) and specific_platform == "darwin": return True if (requested_platform == "unix") and ( specific_platform == "darwin" or specific_platform == "sunos" or "bsd" in specific_platform ): return True elif requested_platform == "*nix" or requested_platform == "posix": if specific_platform != "windows": return True else: return False return False def platform_is(requested_platform: str) -> bool: """ Compare requested platform with current platform. Common platforms: - Win / Windows - Mac / macOS / Darwin - Linux - Unix (Mac, SunOS, BSD unix's) - *nix / posix (Not Windows) :return: True if current platform matches requested platform. :return: False otherwise. """ return platform_matches(requested_platform, platform.system()) def pick_platform(requested_platform: str, platform_options: list) -> str: """ Given a list of platforms, pick the one that most closely matches the current platform. Prefer exact, allow superset. :return: string name of selected platform. """ if requested_platform in platform_options: return requested_platform for option in platform_options: if platform_matches(option, requested_platform): return option return "" def compare_versions(version_a: str, version_b: str) -> int: """ Evaluate version strings of two versions. Compare if version A against version B. :return: -1 if A < B :return: 0 if A == B :return: 1 if A > B """ if version_a == version_b: return 0 versions_list = [version_a, version_b] versions_list.sort(key=version_keys) if versions_list[0] == version_a: return -1 else: return 1 def get_item_version(item_name: str, sorted_items: dict, target: str = "", logger = None) -> NVC: """ Convert a item name in the below format to a (name, version) tuple: [cookbook:]name[>=,<=,>,<,(==|=|@)version] Examples: - meepioux - blarghus>=1.2.3 - wheeple@0.2.0 - pyplo==5.1.0g - scrapbook:sasquatch<2.0.0 - scrapbook: minnow < 0.1.12 The highest available version will be selected if one is not specified. Version requirements will whittle down the list of available versions in the sorted_items list. If a specific version is specified, all other versions will be disqualified (pruned). If no versions remain that satisfy build qualifications, an exception will be raised. :return: named tuple describing the highest qualified version: NVC( "name"->str, "version"->str, "cookbook"->str, ) """ def select_cookbook_version(nvc, item_version, target: str = "") -> bool: cookbook_selected = False def cookbook_has_build_target(cookbooks_item: dict, target) -> bool: if target == "": return True for each_platform in cookbooks_item: # Note: sorted_items has been filtered down to compatible platform. # No need to check with platform_is() if target in cookbooks_item[each_platform]: return True return False # Prefer local over all else, enabling monkey-patching of recipes. if "local" in item_version["cookbooks"] and cookbook_has_build_target( item_version["cookbooks"]["local"], target ): if nvc["cookbook"] != "" and nvc["cookbook"] != "local": if logger: logger.debug(f"Overriding {nvc_str(nvc['name'], nvc['version'], nvc['cookbook'])} with {nvc_str(nvc['name'], item_version['version'], 'local')}") nvc["version"] = item_version["version"] nvc["cookbook"] = "local" cookbook_selected = True else: if nvc["cookbook"] == "": # Any cookbook will do. for cookbook in item_version["cookbooks"]: if cookbook_has_build_target( item_version["cookbooks"][cookbook], target ): nvc["version"] = item_version["version"] nvc["cookbook"] = cookbook cookbook_selected = True break else: # Check for requested cookbook. for cookbook in item_version["cookbooks"]: if cookbook == nvc["cookbook"] and cookbook_has_build_target( item_version["cookbooks"][cookbook], target ): nvc["version"] = item_version["version"] cookbook_selected = True break # Remove all other cookbooks for this item version. if cookbook_selected: item_version["cookbooks"] = { nvc["cookbook"]: item_version["cookbooks"][nvc["cookbook"]] } return cookbook_selected nvc = {"name": "", "version": "", "cookbook": ""} requested_item = item_name item_selected = False # Identify cookbook name, if provided. if ":" in item_name: cookbook, item = item_name.split(":") nvc["cookbook"] = cookbook.strip() item_name = item.strip() if ">=" in item_name: # GTE requirement found. name, version = item_name.split(">=") nvc["name"] = name.strip() version = version.strip() for i, item_version in enumerate(sorted_items[name]): cmp = compare_versions(item_version["version"], version) if cmp >= 0: # Version is good. if item_selected != True: item_selected = select_cookbook_version(nvc, item_version, target) else: # Version is too low. Remove it, and subsequent versions. if logger != None and len(sorted_items[name][:i]) < len(sorted_items[name]): logger.debug(f"{name} limited to version: {', '.join([item['version'] for item in sorted_items[name][:i]])}") sorted_items[name] = sorted_items[name][:i] break elif ">" in item_name: # GT requirement found. name, version = item_name.split(">") nvc["name"] = name.strip() version = version.strip() for i, item_version in enumerate(sorted_items[name]): cmp = compare_versions(item_version["version"], version) if cmp > 0: # Version is good. if item_selected != True: item_selected = select_cookbook_version(nvc, item_version, target) else: # Version is too low. Remove it, and subsequent versions. if logger != None and len(sorted_items[name][:i]) < len(sorted_items[name]): logger.debug(f"{name} limited to version: {', '.join([item['version'] for item in sorted_items[name][:i]])}") sorted_items[name] = sorted_items[name][:i] break elif "<=" in item_name: # LTE requirement found. name, version = item_name.split("<=") nvc["name"] = name.strip() version = version.strip() pruned = False # First, prune down to highest tolerable version if len(sorted_items[name]) > 0: while ( len(sorted_items[name]) > 0 and compare_versions(sorted_items[name][0]["version"], version) > 0 ): # Remove a version from the sorted_items. sorted_items[name].remove(sorted_items[name][0]) pruned = True # Then, prune down to the highest version provided by a the requested cookbook if len(sorted_items[name]) > 0: while len(sorted_items[name]) > 0 and not item_selected: item_selected = select_cookbook_version( nvc, sorted_items[name][0], target ) if not item_selected: # Remove a version from the sorted_items. sorted_items[name].remove(sorted_items[name][0]) pruned = True if logger != None and pruned: logger.debug(f"{name} limited to version: {', '.join([item['version'] for item in sorted_items[name]])}") elif "<" in item_name: # LT requirement found. name, version = item_name.split("<") nvc["name"] = name.strip() version = version.strip() pruned = False # First, prune down to highest tolerable version if len(sorted_items[name]) > 0: while ( len(sorted_items[name]) > 0 and compare_versions(sorted_items[name][0]["version"], version) >= 0 ): # Remove a version from the sorted_items. sorted_items[name].remove(sorted_items[name][0]) pruned = True # Then, prune down to the highest version provided by a the requested cookbook if len(sorted_items[name]) > 0: while len(sorted_items[name]) > 0 and not item_selected: item_selected = select_cookbook_version( nvc, sorted_items[name][0], target ) if not item_selected: # Remove a version from the sorted_items. sorted_items[name].remove(sorted_items[name][0]) pruned = True if logger != None and pruned: logger.debug(f"{name} limited to version: {', '.join([item['version'] for item in sorted_items[name]])}") else: eq_cond = False if "==" in item_name: name, version = item_name.split("==") eq_cond = True elif "=" in item_name: name, version = item_name.split("=") eq_cond = True elif "-" in item_name: name, version = item_name.split("-") eq_cond = True elif "@" in item_name: name, version = item_name.split("@") eq_cond = True if eq_cond == True: nvc["name"] = name.strip() nvc["version"] = version.strip() # EQ requirement found. # Try to find the specific version, and remove all others. item_selected = False for item_version in sorted_items[nvc["name"]]: if version == item_version["version"]: item_selected = select_cookbook_version(nvc, item_version, target) if item_selected: if logger != None and len(sorted_items[nvc["name"]]) > 1: logger.debug(f"{name} limited to version: {version}") sorted_items[nvc["name"]] = [item_version] break else: # No version requirement found. nvc["name"] = item_name.strip() for item_version in sorted_items[nvc["name"]]: item_selected = select_cookbook_version(nvc, item_version, target) if item_selected: break if not item_selected: if target == "": raise Exception( f"No versions available to satisfy requirement for {requested_item}.\nThe requested version may have been filtered out by requirements for another recipe." ) else: raise Exception( f"No versions available to satisfy requirement for {requested_item} ({target}).\nThe requested version may have been filtered out by requirements for another recipe." ) return NVC(nvc["name"], nvc["version"], nvc["cookbook"]) def nvc_str(name, version, cookbook: str = ""): def nv_str(name, version): if version != "": return f"{name}-{version}" else: return name if cookbook != "": return f"{cookbook}:{nv_str(name, version)}" else: return nv_str(name, version)
#!/usr/bin/env python # Copyright 2017 Johns Hopkins University (Shinji Watanabe) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """pytorch dataset and dataloader implementation for chainer training.""" import torch import torch.utils.data import logging import numpy as np import string import re import torch.nn.functional as F class TransformDataset(torch.utils.data.Dataset): """Transform Dataset for pytorch backend. Args: data: list object from make_batchset transfrom: transform function """ def __init__(self, data, transform): """Init function.""" super(TransformDataset).__init__() self.data = data self.transform = transform # logging.warning(f'TransformDataset {data[0]}') # logging.warning(f'TransformDataset {self.transform}') def __len__(self): """Len function.""" return len(self.data) def __getitem__(self, idx): """[] operator.""" xs_pad, ilens, ys_pad = self.transform(self.data[idx]) # logging.warning(f"TransformDataset __getitem__ {idx} [total] {len(self.data[idx])}") # logging.warning(f"TransformDataset __getitem__ {idx} [0] {self.data[idx][0]}") # logging.warning(f"TransformDataset __getitem__ {idx} [1] {self.data[idx][1]}") # logging.warning(f"TransformDataset __getitem__ {idx} [lang] {[d[0].split("_")[0] for d in self.data[idx]]}") # logging.warning(f'TransformDataset __getitem__ {idx} {xs_pad.size(), ilens.size(), ys_pad.size()}') # raise return self.transform(self.data[idx]) class TransformDatasetRandomFlip(torch.utils.data.Dataset): """Transform Dataset for pytorch backend. Args: data: list object from make_batchset transfrom: transform function """ def __init__(self, data, transform): """Init function.""" super(TransformDataset).__init__() self.data = data self.transform = transform # logging.warning(f'TransformDataset {data[0]}') # logging.warning(f'TransformDataset {self.transform}') def __len__(self): """Len function.""" return len(self.data) def __getitem__(self, idx): """[] operator.""" xs_pad, ilens, ys_pad = self.transform(self.data[idx]) # logging.warning(f"TransformDataset __getitem__ {idx} [total] {len(self.data[idx])}") # logging.warning(f"TransformDataset __getitem__ {idx} [0] {self.data[idx][0]}") # logging.warning(f"TransformDataset __getitem__ {idx} [1] {self.data[idx][1]}") # logging.warning(f"TransformDataset __getitem__ {idx} [lang] {[d[0].split("_")[0] for d in self.data[idx]]}") # logging.warning(f'TransformDataset __getitem__ {idx} {xs_pad.size(), ilens.size(), ys_pad.size()}') # raise return self.transform(self.data[idx]) class TransformDataset(torch.utils.data.Dataset): """Transform Dataset for pytorch backend. Args: data: list object from make_batchset transfrom: transform function """ def __init__( self, data, transform, utt=False, lang_label=False, lang=False, lang_onehot=False, speech_type=False, num_langs=None, all_lang=None): """Init function.""" super(TransformDataset).__init__() self.data = data self.transform = transform self.utt, self.lang, self.lang_onehot = utt, lang, lang_onehot self.lang_label = lang_label self.speech_type = speech_type if all_lang is not None: # passing all the languages manually self.num_langs = len(all_lang) self.all_lang = all_lang self.lang2int = {l: i for i, l in enumerate(sorted(self.all_lang))} self.int2lang = {i: l for l, i in self.lang2int.items()} logging.warning(f'TransformDataset [all_lang] {self.all_lang}') logging.warning(f'TransformDataset [lang2int] {self.lang2int}') elif self.lang or self.lang_onehot or self.lang_label: self.all_lang = set() for dt in self.data: self.all_lang.update([self.get_lang(d) for d in dt]) from collections import Counter cnt = Counter() for dt in self.data: cnt.update([d[0].split('_')[0] for d in dt]) # logging.warning(f'TransformDataset [counter] {cnt}') self.num_langs = num_langs self.lang2int = {l: i for i, l in enumerate(sorted(self.all_lang))} self.int2lang = {i: l for l, i in self.lang2int.items()} logging.warning(f'TransformDataset [all_lang] {self.all_lang}') logging.warning(f'TransformDataset [lang2int] {self.lang2int}') # logging.warning(f'TransformDataset {data[0]}') # logging.warning(f'TransformDataset {self.transform}') if self.speech_type: assert self.lang or self.lang_onehot self.lang2type = { '101': 0, '103': 0, '107': 0, '203': 0, '206': 0, '307': 0, '402': 0, '404': 0, 'BG': 1, 'CH': 1, 'CR': 1, 'CZ': 1, 'FR': 1, 'GE': 1, 'N': 0, 'PL': 1, 'PO': 1, 'SP': 1, 'TH': 1, 'TU': 1 } # 0 is spontaneous speech; 1 is read speech def __len__(self): """Len function.""" return len(self.data) def get_lang(self, d): s = d[0].split('_')[0] s = re.sub(r'\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s return s def __getitem__(self, idx): """[] operator.""" return self.custom_transform(self.data[idx]) def custom_transform(self, data): items = [] xs_pad, ilens, ys_pad = self.transform(data) if self.lang_label: lang_labels = [self.get_lang(d) for d in data] items.append(lang_labels) if self.utt: utts = [d[0] for d in data] items.append(utts) if self.lang: langs = torch.from_numpy(np.array([ self.lang2int[self.get_lang(d)] for d in data ])).long() items.append(langs) if self.lang_onehot: langs_onehot = F.one_hot(torch.from_numpy(np.array([ self.lang2int[self.get_lang(d)] for d in data ])), num_classes=self.num_langs).float() items.append(langs_onehot) if self.speech_type: speech_type = torch.from_numpy(np.array([ self.lang2type[self.get_lang(d)] for d in data ])).long() items.append(speech_type) items.extend([xs_pad, ilens, ys_pad]) return tuple(items) class TransformDatasetEar(torch.utils.data.Dataset): """Transform Dataset for pytorch backend. Args: data: list object from make_batchset transfrom: transform function """ def get_lang(self, d): s = d[0].split('_')[0] s = re.sub(r'\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s return s def __init__(self, data, transform): """Init function.""" super(TransformDataset).__init__() self.data = data self.transform = transform self.all_lang = set() for dt in self.data: self.all_lang.update([self.get_lang(d) for d in dt]) # ll = [] # for d in dt: # s = d[0].split('_')[0] # s = re.sub(r'\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s # ll.append(s) # self.all_lang.update(ll) # [ # re.sub(r'\d+$', '', d[0].split('_')[0]) if re.search('[a-zA-Z]+',d[0].split('_')[0]) else d[0].split('_')[0] # for d in dt # ] from collections import Counter cnt = Counter() for dt in self.data: cnt.update([d[0].split('_')[0] for d in dt]) # logging.warning(f'TransformDataset [counter] {cnt}') self.lang2int = {l: i for i, l in enumerate(sorted(self.all_lang))} self.int2lang = {i: l for l, i in self.lang2int.items()} logging.warning(f'TransformDatasetEar [all_lang] {self.all_lang}') logging.warning(f'TransformDatasetEar [lang2int] {self.lang2int}') # logging.warning(f'TransformDatasetEar [int2lang] {self.int2lang}') # logging.warning(f'TransformDatasetEar {self.transform}') def __len__(self): """Len function.""" return len(self.data) def __getitem__(self, idx): """[] operator.""" xs_pad, ilens, ys_pad = self.transform(self.data[idx]) # logging.warning(f"TransformDatasetEar __getitem__ {idx} [total] {len(self.data[idx])}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [0] {self.data[idx][0]}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [1] {self.data[idx][1]}") lang = torch.from_numpy(np.array( [ self.lang2int[self.get_lang(d)] for d in self.data[idx] ] )).long() # logging.warning(f"TransformDatasetEar __getitem__ {idx} [lang] len {len(lang)} {lang}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [lang] {[d[0].split("_")[0] for d in self.data[idx]]}") # logging.warning(f'TransformDatasetEar __getitem__ {idx} {xs_pad.size(), ilens.size(), ys_pad.size()}') return lang, xs_pad, ilens, ys_pad class TransformDatasetEval(torch.utils.data.Dataset): """Transform Dataset for pytorch backend. Args: data: list object from make_batchset transfrom: transform function """ def get_lang(self, d): s = d[0].split('_')[0] s = re.sub(r'\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s return s def __init__(self, data, transform): """Init function.""" super(TransformDataset).__init__() self.data = data self.transform = transform self.all_lang = set() for dt in self.data: self.all_lang.update([self.get_lang(d) for d in dt]) from collections import Counter cnt = Counter() for dt in self.data: cnt.update([d[0].split('_')[0] for d in dt]) # logging.warning(f'TransformDataset [counter] {cnt}') self.lang2int = {l: i for i, l in enumerate(sorted(self.all_lang))} self.int2lang = {i: l for l, i in self.lang2int.items()} # logging.warning(f'TransformDatasetEar [all_lang] {self.all_lang}') # logging.warning(f'TransformDatasetEar [lang2int] {self.lang2int}') # logging.warning(f'TransformDatasetEar [int2lang] {self.int2lang}') # logging.warning(f'TransformDatasetEar {self.transform}') def __len__(self): """Len function.""" return len(self.data) def __getitem__(self, idx): """[] operator.""" xs_pad, ilens, ys_pad = self.transform(self.data[idx]) # logging.warning(f"TransformDatasetEar __getitem__ {idx} [total] {len(self.data[idx])}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [0] {self.data[idx][0]}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [1] {self.data[idx][1]}") # lang = torch.from_numpy(np.array( # [ # self.lang2int[d[0].split('_')[0]] for d in self.data[idx] # ] # )).long() utts = [d[0] for d in self.data[idx]] # logging.warning(f"TransformDatasetEar __getitem__ {idx} [lang] {lang}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [lang] {[d[0].split("_")[0] for d in self.data[idx]]}") # logging.warning(f'TransformDatasetEar __getitem__ {idx} {xs_pad.size(), ilens.size(), ys_pad.size()}') return utts, xs_pad, ilens, ys_pad class ChainerDataLoader(object): """Pytorch dataloader in chainer style. Args: all args for torch.utils.data.dataloader.Dataloader """ def __init__(self, **kwargs): """Init function.""" self.loader = torch.utils.data.dataloader.DataLoader(**kwargs) self.len = len(kwargs["dataset"]) self.current_position = 0 self.epoch = 0 self.iter = None self.kwargs = kwargs def next(self): """Implement next function.""" if self.iter is None: self.iter = iter(self.loader) try: ret = next(self.iter) except StopIteration: self.iter = None return self.next() self.current_position += 1 if self.current_position == self.len: self.epoch = self.epoch + 1 self.current_position = 0 return ret def synchronize(self, epoch): self.current_position = 0 self.epoch = epoch def __iter__(self): """Implement iter function.""" for batch in self.loader: yield batch @property def epoch_detail(self): """Epoch_detail required by chainer.""" return self.epoch + self.current_position / self.len def serialize(self, serializer): """Serialize and deserialize function.""" epoch = serializer("epoch", self.epoch) current_position = serializer("current_position", self.current_position) self.epoch = epoch self.current_position = current_position def start_shuffle(self): """Shuffle function for sortagrad.""" self.kwargs["shuffle"] = True self.loader = torch.utils.data.dataloader.DataLoader(**self.kwargs) def finalize(self): """Implement finalize function.""" if hasattr(self, 'loader'): del self.loader else: logging.warning(f'dataset has already been deleted')
#!/usr/bin/env python # Copyright 2017 Johns Hopkins University (Shinji Watanabe) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """pytorch dataset and dataloader implementation for chainer training.""" import torch import torch.utils.data import logging import numpy as np import string import re import torch.nn.functional as F class TransformDataset(torch.utils.data.Dataset): """Transform Dataset for pytorch backend. Args: data: list object from make_batchset transfrom: transform function """ def __init__(self, data, transform): """Init function.""" super(TransformDataset).__init__() self.data = data self.transform = transform # logging.warning(f'TransformDataset {data[0]}') # logging.warning(f'TransformDataset {self.transform}') def __len__(self): """Len function.""" return len(self.data) def __getitem__(self, idx): """[] operator.""" xs_pad, ilens, ys_pad = self.transform(self.data[idx]) # logging.warning(f"TransformDataset __getitem__ {idx} [total] {len(self.data[idx])}") # logging.warning(f"TransformDataset __getitem__ {idx} [0] {self.data[idx][0]}") # logging.warning(f"TransformDataset __getitem__ {idx} [1] {self.data[idx][1]}") # logging.warning(f"TransformDataset __getitem__ {idx} [lang] {[d[0].split('_')[0] for d in self.data[idx]]}") # logging.warning(f'TransformDataset __getitem__ {idx} {xs_pad.size(), ilens.size(), ys_pad.size()}') # raise return self.transform(self.data[idx]) class TransformDatasetRandomFlip(torch.utils.data.Dataset): """Transform Dataset for pytorch backend. Args: data: list object from make_batchset transfrom: transform function """ def __init__(self, data, transform): """Init function.""" super(TransformDataset).__init__() self.data = data self.transform = transform # logging.warning(f'TransformDataset {data[0]}') # logging.warning(f'TransformDataset {self.transform}') def __len__(self): """Len function.""" return len(self.data) def __getitem__(self, idx): """[] operator.""" xs_pad, ilens, ys_pad = self.transform(self.data[idx]) # logging.warning(f"TransformDataset __getitem__ {idx} [total] {len(self.data[idx])}") # logging.warning(f"TransformDataset __getitem__ {idx} [0] {self.data[idx][0]}") # logging.warning(f"TransformDataset __getitem__ {idx} [1] {self.data[idx][1]}") # logging.warning(f"TransformDataset __getitem__ {idx} [lang] {[d[0].split('_')[0] for d in self.data[idx]]}") # logging.warning(f'TransformDataset __getitem__ {idx} {xs_pad.size(), ilens.size(), ys_pad.size()}') # raise return self.transform(self.data[idx]) class TransformDataset(torch.utils.data.Dataset): """Transform Dataset for pytorch backend. Args: data: list object from make_batchset transfrom: transform function """ def __init__( self, data, transform, utt=False, lang_label=False, lang=False, lang_onehot=False, speech_type=False, num_langs=None, all_lang=None): """Init function.""" super(TransformDataset).__init__() self.data = data self.transform = transform self.utt, self.lang, self.lang_onehot = utt, lang, lang_onehot self.lang_label = lang_label self.speech_type = speech_type if all_lang is not None: # passing all the languages manually self.num_langs = len(all_lang) self.all_lang = all_lang self.lang2int = {l: i for i, l in enumerate(sorted(self.all_lang))} self.int2lang = {i: l for l, i in self.lang2int.items()} logging.warning(f'TransformDataset [all_lang] {self.all_lang}') logging.warning(f'TransformDataset [lang2int] {self.lang2int}') elif self.lang or self.lang_onehot or self.lang_label: self.all_lang = set() for dt in self.data: self.all_lang.update([self.get_lang(d) for d in dt]) from collections import Counter cnt = Counter() for dt in self.data: cnt.update([d[0].split('_')[0] for d in dt]) # logging.warning(f'TransformDataset [counter] {cnt}') self.num_langs = num_langs self.lang2int = {l: i for i, l in enumerate(sorted(self.all_lang))} self.int2lang = {i: l for l, i in self.lang2int.items()} logging.warning(f'TransformDataset [all_lang] {self.all_lang}') logging.warning(f'TransformDataset [lang2int] {self.lang2int}') # logging.warning(f'TransformDataset {data[0]}') # logging.warning(f'TransformDataset {self.transform}') if self.speech_type: assert self.lang or self.lang_onehot self.lang2type = { '101': 0, '103': 0, '107': 0, '203': 0, '206': 0, '307': 0, '402': 0, '404': 0, 'BG': 1, 'CH': 1, 'CR': 1, 'CZ': 1, 'FR': 1, 'GE': 1, 'N': 0, 'PL': 1, 'PO': 1, 'SP': 1, 'TH': 1, 'TU': 1 } # 0 is spontaneous speech; 1 is read speech def __len__(self): """Len function.""" return len(self.data) def get_lang(self, d): s = d[0].split('_')[0] s = re.sub(r'\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s return s def __getitem__(self, idx): """[] operator.""" return self.custom_transform(self.data[idx]) def custom_transform(self, data): items = [] xs_pad, ilens, ys_pad = self.transform(data) if self.lang_label: lang_labels = [self.get_lang(d) for d in data] items.append(lang_labels) if self.utt: utts = [d[0] for d in data] items.append(utts) if self.lang: langs = torch.from_numpy(np.array([ self.lang2int[self.get_lang(d)] for d in data ])).long() items.append(langs) if self.lang_onehot: langs_onehot = F.one_hot(torch.from_numpy(np.array([ self.lang2int[self.get_lang(d)] for d in data ])), num_classes=self.num_langs).float() items.append(langs_onehot) if self.speech_type: speech_type = torch.from_numpy(np.array([ self.lang2type[self.get_lang(d)] for d in data ])).long() items.append(speech_type) items.extend([xs_pad, ilens, ys_pad]) return tuple(items) class TransformDatasetEar(torch.utils.data.Dataset): """Transform Dataset for pytorch backend. Args: data: list object from make_batchset transfrom: transform function """ def get_lang(self, d): s = d[0].split('_')[0] s = re.sub(r'\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s return s def __init__(self, data, transform): """Init function.""" super(TransformDataset).__init__() self.data = data self.transform = transform self.all_lang = set() for dt in self.data: self.all_lang.update([self.get_lang(d) for d in dt]) # ll = [] # for d in dt: # s = d[0].split('_')[0] # s = re.sub(r'\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s # ll.append(s) # self.all_lang.update(ll) # [ # re.sub(r'\d+$', '', d[0].split('_')[0]) if re.search('[a-zA-Z]+',d[0].split('_')[0]) else d[0].split('_')[0] # for d in dt # ] from collections import Counter cnt = Counter() for dt in self.data: cnt.update([d[0].split('_')[0] for d in dt]) # logging.warning(f'TransformDataset [counter] {cnt}') self.lang2int = {l: i for i, l in enumerate(sorted(self.all_lang))} self.int2lang = {i: l for l, i in self.lang2int.items()} logging.warning(f'TransformDatasetEar [all_lang] {self.all_lang}') logging.warning(f'TransformDatasetEar [lang2int] {self.lang2int}') # logging.warning(f'TransformDatasetEar [int2lang] {self.int2lang}') # logging.warning(f'TransformDatasetEar {self.transform}') def __len__(self): """Len function.""" return len(self.data) def __getitem__(self, idx): """[] operator.""" xs_pad, ilens, ys_pad = self.transform(self.data[idx]) # logging.warning(f"TransformDatasetEar __getitem__ {idx} [total] {len(self.data[idx])}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [0] {self.data[idx][0]}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [1] {self.data[idx][1]}") lang = torch.from_numpy(np.array( [ self.lang2int[self.get_lang(d)] for d in self.data[idx] ] )).long() # logging.warning(f"TransformDatasetEar __getitem__ {idx} [lang] len {len(lang)} {lang}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [lang] {[d[0].split('_')[0] for d in self.data[idx]]}") # logging.warning(f'TransformDatasetEar __getitem__ {idx} {xs_pad.size(), ilens.size(), ys_pad.size()}') return lang, xs_pad, ilens, ys_pad class TransformDatasetEval(torch.utils.data.Dataset): """Transform Dataset for pytorch backend. Args: data: list object from make_batchset transfrom: transform function """ def get_lang(self, d): s = d[0].split('_')[0] s = re.sub(r'\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s return s def __init__(self, data, transform): """Init function.""" super(TransformDataset).__init__() self.data = data self.transform = transform self.all_lang = set() for dt in self.data: self.all_lang.update([self.get_lang(d) for d in dt]) from collections import Counter cnt = Counter() for dt in self.data: cnt.update([d[0].split('_')[0] for d in dt]) # logging.warning(f'TransformDataset [counter] {cnt}') self.lang2int = {l: i for i, l in enumerate(sorted(self.all_lang))} self.int2lang = {i: l for l, i in self.lang2int.items()} # logging.warning(f'TransformDatasetEar [all_lang] {self.all_lang}') # logging.warning(f'TransformDatasetEar [lang2int] {self.lang2int}') # logging.warning(f'TransformDatasetEar [int2lang] {self.int2lang}') # logging.warning(f'TransformDatasetEar {self.transform}') def __len__(self): """Len function.""" return len(self.data) def __getitem__(self, idx): """[] operator.""" xs_pad, ilens, ys_pad = self.transform(self.data[idx]) # logging.warning(f"TransformDatasetEar __getitem__ {idx} [total] {len(self.data[idx])}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [0] {self.data[idx][0]}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [1] {self.data[idx][1]}") # lang = torch.from_numpy(np.array( # [ # self.lang2int[d[0].split('_')[0]] for d in self.data[idx] # ] # )).long() utts = [d[0] for d in self.data[idx]] # logging.warning(f"TransformDatasetEar __getitem__ {idx} [lang] {lang}") # logging.warning(f"TransformDatasetEar __getitem__ {idx} [lang] {[d[0].split('_')[0] for d in self.data[idx]]}") # logging.warning(f'TransformDatasetEar __getitem__ {idx} {xs_pad.size(), ilens.size(), ys_pad.size()}') return utts, xs_pad, ilens, ys_pad class ChainerDataLoader(object): """Pytorch dataloader in chainer style. Args: all args for torch.utils.data.dataloader.Dataloader """ def __init__(self, **kwargs): """Init function.""" self.loader = torch.utils.data.dataloader.DataLoader(**kwargs) self.len = len(kwargs["dataset"]) self.current_position = 0 self.epoch = 0 self.iter = None self.kwargs = kwargs def next(self): """Implement next function.""" if self.iter is None: self.iter = iter(self.loader) try: ret = next(self.iter) except StopIteration: self.iter = None return self.next() self.current_position += 1 if self.current_position == self.len: self.epoch = self.epoch + 1 self.current_position = 0 return ret def synchronize(self, epoch): self.current_position = 0 self.epoch = epoch def __iter__(self): """Implement iter function.""" for batch in self.loader: yield batch @property def epoch_detail(self): """Epoch_detail required by chainer.""" return self.epoch + self.current_position / self.len def serialize(self, serializer): """Serialize and deserialize function.""" epoch = serializer("epoch", self.epoch) current_position = serializer("current_position", self.current_position) self.epoch = epoch self.current_position = current_position def start_shuffle(self): """Shuffle function for sortagrad.""" self.kwargs["shuffle"] = True self.loader = torch.utils.data.dataloader.DataLoader(**self.kwargs) def finalize(self): """Implement finalize function.""" if hasattr(self, 'loader'): del self.loader else: logging.warning(f'dataset has already been deleted')
import argparse from datetime import datetime import sqlite3 from sqlite3 import Error import re class ScratchPad: def __init__(self, db_file, args): self.db_file = db_file self.args = args self.create_connection() def handle_args(self): # If a command was specified, use it. Otherwise, assume List command if self.args.command: return getattr(self, self.args.command) else: return self.list def create_connection(self): try: conn = sqlite3.connect(self.db_file) self.connection = conn except Error: print(Error) self.connection = None def close_connection(self): self.connection.close() def run(self): self.setup() action = self.handle_args() if action: action() self.close_connection() def setup(self): if not self.table_exists("notes"): self.create_table("notes", "id integer PRIMARY KEY, timestamp text, category text, content text") return self def table_exists(self, table_name): cursor = self.connection.cursor() cursor.execute(f"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{table_name}'") # if the count is 1, then table exists if cursor.fetchone()[0] == 1: return True else: return False def create_table(self, name, table_string): cursor = self.connection.cursor() query = f"CREATE TABLE {name}({table_string})" cursor.execute(query) self.connection.commit() def insert_into_table(self, table_name, note): cursor = self.connection.cursor() cursor.execute(f"INSERT INTO {table_name} (id, timestamp, category, content) VALUES (:id, :timestamp, :category, :content)", note.dict) self.connection.commit() def add(self): note_data = {"category": self.args.category, "content": ' '.join(self.args.content)} cursor = self.connection.cursor() cursor.execute(f"SELECT id FROM notes ORDER BY id DESC LIMIT 1") try: most_recent_id = cursor.fetchone()[0] except TypeError: most_recent_id = 0 note = Note(most_recent_id+1, note_data["category"], note_data["content"]) self.insert_into_table("notes", note) self.connection.commit() def list(self): cursor = self.connection.cursor() query = "SELECT * FROM notes" if hasattr(self.args, 'category') and self.args.category is not None: query += f" WHERE category='{self.args.category}'" query += " ORDER BY id DESC" if hasattr(self.args, "quantity") and not self.args.all: query += f" LIMIT {self.args.quantity}" cursor.execute(query) for item in cursor.fetchall(): note = Note(item[0], item[2], item[3], date_time=datetime.strptime(item[1], "%m-%d-%y %H:%M:%S")) print(note) def delete(self): ids_to_delete = range_parser(self.args.delete_criteria) cursor = self.connection.cursor() for tagged_id in ids_to_delete: id = tagged_id[0] tag = tagged_id[1] if tag == 'exact': cursor.execute(f'DELETE FROM notes WHERE id={id}') elif tag == 'below': for i in range(0,id+1): cursor.execute(f'DELETE FROM notes WHERE id={i}') elif tag == 'above': cursor.execute('SELECT max(id) FROM notes') max_id = cursor.fetchone()[0] for i in range(id,max_id+1): cursor.execute(f'DELETE FROM notes WHERE id={i}') self.connection.commit() def clear(self): cursor = self.connection.cursor() cursor.execute('DELETE FROM notes') self.connection.commit() def search(self): if hasattr(self.args, 'search_criteria'): regex = f".*{self.args.search_criteria[0]}.*" else: regex = ".*" cursor = self.connection.cursor() query = "SELECT * FROM notes" if hasattr(self.args, 'category') and self.args.category is not None: query += f" WHERE category='{self.args.category}'" query += " ORDER BY id DESC" if hasattr(self.args, "quantity") and not self.args.all: query += f" LIMIT {self.args.quantity}" cursor.execute(query) for item in cursor.fetchall(): id = item[0] category = item[2] content = item[3] match = re.search(regex.lower(), content.lower()) if match: note = Note(id, category, content, date_time=datetime.strptime(item[1], "%m-%d-%y %H:%M:%S")) print(note) def fetch(self): notes = [] cursor = self.connection.cursor() query = "SELECT * FROM notes ORDER BY id DESC" cursor.execute(query) for item in cursor.fetchall(): note = Note(item[0], item[2], item[3], date_time=datetime.strptime(item[1], "%m-%d-%y %H:%M:%S")) notes.append(note) return notes class Note: def __init__(self, id, category, content, date_time=None): self.id = id self.category = category self.content = content if not date_time: self.date_time = datetime.now() else: self.date_time = date_time self.timestamp = datetime.strftime(self.date_time, "%m-%d-%y %H:%M:%S") @property def dict(self): return { "id": self.id, "category": self.category, "content": self.content, "timestamp": self.timestamp } @property def values(self): return [self.id, self.timestamp, self.category, self.content] def __str__(self): return f"[{self.id}] [{datetime.strftime(self.date_time, "%m-%d-%Y %H:%M:%S")}] [{self.category}] - {self.content}" def __repr__(self): return self.__str__() def parse_args(): # Read environment from command line args parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest='command', help='Commands') # Test/Dev argument parser.add_argument('-t', '--test', action='store_true', help='Run the program against a test database for testing features') # Add command parser_add = subparsers.add_parser('add', help='Add a note to the database') parser_add.add_argument('content', nargs='*', action='store', type=str, default=None, help="Content of note") parser_add.add_argument('-c', '--category', default='General', action='store', help="Choose a category for the note to be added under") # List command parser_list = subparsers.add_parser('list', help='List notes in the database') parser_list.add_argument('-a', '--all', action='store_true', help="List all notes under given criteria") parser_list.add_argument('-q', '--quantity', nargs='?', action='store', default=5, type=int, help="Specify the amount of results to list") parser_list.add_argument('-c', '--category', nargs='?', default=None, action='store', help="Choose a category of notes to list") # Delete command parser_delete = subparsers.add_parser('delete', help='Delete one or multiple notes from the database') parser_delete.add_argument('delete_criteria', nargs='*', action='store', type=str) # Erase command parser_erase = subparsers.add_parser('erase', help='Delete all notes from the database') # Help command parser_help = subparsers.add_parser('help', help='Display help text') # parser_help.add_argument('help', nargs='?', action='store', default=False) # Search command parser_search = subparsers.add_parser('search', help='List notes matching search term') parser_search.add_argument('search_criteria', nargs='*', action='store', type=str) args = parser.parse_args() if args.test: print(args) if args.command == "help": parser.print_help() exit() return args def range_parser(item_list): regex_modifiers = { r"([0-9]+)\-([0-9]+)": "exact", r"([0-9]+)\:": "above", r"\:([0-9]+)": "below" } new_list = [] for item in item_list: try: new_list.append((int(item), 'exact')) except ValueError as e: for regex in regex_modifiers.keys(): match = re.search(regex, item) if match: modifier = regex_modifiers[regex] if modifier == 'exact': minimum = int(match.group(1)) maximum = int(match.group(2)) for i in range(minimum, maximum+1): new_list.append((i, modifier)) else: val = int(match.group(1)) new_list.append((int(val), modifier)) return new_list if __name__ == "__main__": args = parse_args() # print(args) if args.test: db_file = r"C:\sqlite\db\notes_test.db" else: db_file = r"C:\sqlite\db\notes.db" scratchpad = ScratchPad(db_file, args) scratchpad.run() # scratchpad.add_note()
import argparse from datetime import datetime import sqlite3 from sqlite3 import Error import re class ScratchPad: def __init__(self, db_file, args): self.db_file = db_file self.args = args self.create_connection() def handle_args(self): # If a command was specified, use it. Otherwise, assume List command if self.args.command: return getattr(self, self.args.command) else: return self.list def create_connection(self): try: conn = sqlite3.connect(self.db_file) self.connection = conn except Error: print(Error) self.connection = None def close_connection(self): self.connection.close() def run(self): self.setup() action = self.handle_args() if action: action() self.close_connection() def setup(self): if not self.table_exists("notes"): self.create_table("notes", "id integer PRIMARY KEY, timestamp text, category text, content text") return self def table_exists(self, table_name): cursor = self.connection.cursor() cursor.execute(f"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{table_name}'") # if the count is 1, then table exists if cursor.fetchone()[0] == 1: return True else: return False def create_table(self, name, table_string): cursor = self.connection.cursor() query = f"CREATE TABLE {name}({table_string})" cursor.execute(query) self.connection.commit() def insert_into_table(self, table_name, note): cursor = self.connection.cursor() cursor.execute(f"INSERT INTO {table_name} (id, timestamp, category, content) VALUES (:id, :timestamp, :category, :content)", note.dict) self.connection.commit() def add(self): note_data = {"category": self.args.category, "content": ' '.join(self.args.content)} cursor = self.connection.cursor() cursor.execute(f"SELECT id FROM notes ORDER BY id DESC LIMIT 1") try: most_recent_id = cursor.fetchone()[0] except TypeError: most_recent_id = 0 note = Note(most_recent_id+1, note_data["category"], note_data["content"]) self.insert_into_table("notes", note) self.connection.commit() def list(self): cursor = self.connection.cursor() query = "SELECT * FROM notes" if hasattr(self.args, 'category') and self.args.category is not None: query += f" WHERE category='{self.args.category}'" query += " ORDER BY id DESC" if hasattr(self.args, "quantity") and not self.args.all: query += f" LIMIT {self.args.quantity}" cursor.execute(query) for item in cursor.fetchall(): note = Note(item[0], item[2], item[3], date_time=datetime.strptime(item[1], "%m-%d-%y %H:%M:%S")) print(note) def delete(self): ids_to_delete = range_parser(self.args.delete_criteria) cursor = self.connection.cursor() for tagged_id in ids_to_delete: id = tagged_id[0] tag = tagged_id[1] if tag == 'exact': cursor.execute(f'DELETE FROM notes WHERE id={id}') elif tag == 'below': for i in range(0,id+1): cursor.execute(f'DELETE FROM notes WHERE id={i}') elif tag == 'above': cursor.execute('SELECT max(id) FROM notes') max_id = cursor.fetchone()[0] for i in range(id,max_id+1): cursor.execute(f'DELETE FROM notes WHERE id={i}') self.connection.commit() def clear(self): cursor = self.connection.cursor() cursor.execute('DELETE FROM notes') self.connection.commit() def search(self): if hasattr(self.args, 'search_criteria'): regex = f".*{self.args.search_criteria[0]}.*" else: regex = ".*" cursor = self.connection.cursor() query = "SELECT * FROM notes" if hasattr(self.args, 'category') and self.args.category is not None: query += f" WHERE category='{self.args.category}'" query += " ORDER BY id DESC" if hasattr(self.args, "quantity") and not self.args.all: query += f" LIMIT {self.args.quantity}" cursor.execute(query) for item in cursor.fetchall(): id = item[0] category = item[2] content = item[3] match = re.search(regex.lower(), content.lower()) if match: note = Note(id, category, content, date_time=datetime.strptime(item[1], "%m-%d-%y %H:%M:%S")) print(note) def fetch(self): notes = [] cursor = self.connection.cursor() query = "SELECT * FROM notes ORDER BY id DESC" cursor.execute(query) for item in cursor.fetchall(): note = Note(item[0], item[2], item[3], date_time=datetime.strptime(item[1], "%m-%d-%y %H:%M:%S")) notes.append(note) return notes class Note: def __init__(self, id, category, content, date_time=None): self.id = id self.category = category self.content = content if not date_time: self.date_time = datetime.now() else: self.date_time = date_time self.timestamp = datetime.strftime(self.date_time, "%m-%d-%y %H:%M:%S") @property def dict(self): return { "id": self.id, "category": self.category, "content": self.content, "timestamp": self.timestamp } @property def values(self): return [self.id, self.timestamp, self.category, self.content] def __str__(self): return f"[{self.id}] [{datetime.strftime(self.date_time, '%m-%d-%Y %H:%M:%S')}] [{self.category}] - {self.content}" def __repr__(self): return self.__str__() def parse_args(): # Read environment from command line args parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest='command', help='Commands') # Test/Dev argument parser.add_argument('-t', '--test', action='store_true', help='Run the program against a test database for testing features') # Add command parser_add = subparsers.add_parser('add', help='Add a note to the database') parser_add.add_argument('content', nargs='*', action='store', type=str, default=None, help="Content of note") parser_add.add_argument('-c', '--category', default='General', action='store', help="Choose a category for the note to be added under") # List command parser_list = subparsers.add_parser('list', help='List notes in the database') parser_list.add_argument('-a', '--all', action='store_true', help="List all notes under given criteria") parser_list.add_argument('-q', '--quantity', nargs='?', action='store', default=5, type=int, help="Specify the amount of results to list") parser_list.add_argument('-c', '--category', nargs='?', default=None, action='store', help="Choose a category of notes to list") # Delete command parser_delete = subparsers.add_parser('delete', help='Delete one or multiple notes from the database') parser_delete.add_argument('delete_criteria', nargs='*', action='store', type=str) # Erase command parser_erase = subparsers.add_parser('erase', help='Delete all notes from the database') # Help command parser_help = subparsers.add_parser('help', help='Display help text') # parser_help.add_argument('help', nargs='?', action='store', default=False) # Search command parser_search = subparsers.add_parser('search', help='List notes matching search term') parser_search.add_argument('search_criteria', nargs='*', action='store', type=str) args = parser.parse_args() if args.test: print(args) if args.command == "help": parser.print_help() exit() return args def range_parser(item_list): regex_modifiers = { r"([0-9]+)\-([0-9]+)": "exact", r"([0-9]+)\:": "above", r"\:([0-9]+)": "below" } new_list = [] for item in item_list: try: new_list.append((int(item), 'exact')) except ValueError as e: for regex in regex_modifiers.keys(): match = re.search(regex, item) if match: modifier = regex_modifiers[regex] if modifier == 'exact': minimum = int(match.group(1)) maximum = int(match.group(2)) for i in range(minimum, maximum+1): new_list.append((i, modifier)) else: val = int(match.group(1)) new_list.append((int(val), modifier)) return new_list if __name__ == "__main__": args = parse_args() # print(args) if args.test: db_file = r"C:\sqlite\db\notes_test.db" else: db_file = r"C:\sqlite\db\notes.db" scratchpad = ScratchPad(db_file, args) scratchpad.run() # scratchpad.add_note()
# Copyright 2021 AI Singapore # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Face detection class using mtcnn model to find face bboxes """ import logging from pathlib import Path from typing import Any, Dict, List, Tuple import numpy as np import tensorflow as tf from peekingduck.pipeline.nodes.model.mtcnnv1.mtcnn_files.graph_functions import ( load_graph, ) class Detector: # pylint: disable=too-many-instance-attributes """Face detection class using MTCNN model to find bboxes and landmarks""" def __init__(self, config: Dict[str, Any], model_dir: Path) -> None: self.logger = logging.getLogger(__name__) self.config = config self.model_dir = model_dir self.min_size = self.config["mtcnn_min_size"] self.factor = self.config["mtcnn_factor"] self.thresholds = self.config["mtcnn_thresholds"] self.score = self.config["mtcnn_score"] self.mtcnn = self._create_mtcnn_model() def _create_mtcnn_model(self) -> tf.keras.Model: """ Creates MTCNN model for face detection """ model_path = self.model_dir / self.config["weights"]["model_file"] self.logger.info( "MTCNN model loaded with following configs: \n\t" f"Min size: {self.config["mtcnn_min_size"]}, \n\t" f"Scale Factor: {self.config["mtcnn_factor"]}, \n\t" f"Steps Thresholds: {self.config["mtcnn_thresholds"]}, \n\t" f"Score Threshold: {self.config["mtcnn_score"]}" ) return self._load_mtcnn_graph(model_path) @staticmethod def _load_mtcnn_graph(model_path: Path) -> tf.compat.v1.GraphDef: if model_path.is_file(): return load_graph(str(model_path)) raise ValueError( "Graph file does not exist. Please check that " "%s exists" % model_path ) def predict_bbox_landmarks( self, image: np.ndarray ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """Predicts face bboxes, scores and landmarks Args: image (np.ndarray): image in numpy array Returns: bboxes (np.ndarray): numpy array of detected bboxes scores (np.ndarray): numpy array of confidence scores landmarks (np.ndarray): numpy array of facial landmarks labels (np.ndarray): numpy array of class labels (i.e. face) """ # 1. process inputs image = self.process_image(image) min_size, factor, thresholds = self.process_params( self.min_size, self.factor, self.thresholds ) # 2. evaluate image bboxes, scores, landmarks = self.mtcnn(image, min_size, factor, thresholds) # 3. process outputs bboxes, scores, landmarks = self.process_outputs( image, bboxes, scores, landmarks ) # 4. create bbox_labels classes = np.array(["face"] * len(bboxes)) return bboxes, scores, landmarks, classes @staticmethod def process_image(image: np.ndarray) -> tf.Tensor: """Processes input image Args: image (np.ndarray): image in numpy array Returns: image (np.ndarray): processed numpy array of image """ image = image.astype(np.float32) image = tf.convert_to_tensor(image) return image @staticmethod def process_params( min_size: int, factor: float, thresholds: List[float] ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Processes model input parameters Args: min_size (int): minimum face size factor (float): scale factor thresholds (list): steps thresholds Returns: min_size (tf.Tensor): processed minimum face size factor (tf.Tensor): processed scale factor thresholds (tf.Tensor): processed steps thresholds """ min_size = tf.convert_to_tensor(float(min_size)) factor = tf.convert_to_tensor(float(factor)) thresholds = [float(integer) for integer in thresholds] thresholds = tf.convert_to_tensor(thresholds) return min_size, factor, thresholds def process_outputs( self, image: np.ndarray, bboxes: tf.Tensor, scores: tf.Tensor, landmarks: tf.Tensor, ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """Processes MTCNN model outputs Args: image (np.ndarray): image in numpy array bboxes (tf.Tensor): tensor array of detected bboxes scores (tf.Tensor): tensor array of confidence scores landmarks (tf.Tensor): tensor array of facial landmarks Returns: bboxes (np.ndarray): processed numpy array of detected bboxes scores (np.ndarray): processed numpy array of confidence scores landmarks (np.ndarray): processed numpy array of facial landmarks """ bboxes, scores, landmarks = bboxes.numpy(), scores.numpy(), landmarks.numpy() # Filter bboxes by confidence score indices = np.where(scores > self.score)[0] bboxes = bboxes[indices] scores = scores[indices] landmarks = landmarks[indices] # Swap position of x, y coordinates bboxes[:, [0, 1]] = bboxes[:, [1, 0]] bboxes[:, [2, 3]] = bboxes[:, [3, 2]] # Express image coordinates as a percentage of image height and width bboxes[:, [0, 2]] = bboxes[:, [0, 2]] / image.shape[1] bboxes[:, [1, 3]] = bboxes[:, [1, 3]] / image.shape[0] return bboxes, scores, landmarks
# Copyright 2021 AI Singapore # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Face detection class using mtcnn model to find face bboxes """ import logging from pathlib import Path from typing import Any, Dict, List, Tuple import numpy as np import tensorflow as tf from peekingduck.pipeline.nodes.model.mtcnnv1.mtcnn_files.graph_functions import ( load_graph, ) class Detector: # pylint: disable=too-many-instance-attributes """Face detection class using MTCNN model to find bboxes and landmarks""" def __init__(self, config: Dict[str, Any], model_dir: Path) -> None: self.logger = logging.getLogger(__name__) self.config = config self.model_dir = model_dir self.min_size = self.config["mtcnn_min_size"] self.factor = self.config["mtcnn_factor"] self.thresholds = self.config["mtcnn_thresholds"] self.score = self.config["mtcnn_score"] self.mtcnn = self._create_mtcnn_model() def _create_mtcnn_model(self) -> tf.keras.Model: """ Creates MTCNN model for face detection """ model_path = self.model_dir / self.config["weights"]["model_file"] self.logger.info( "MTCNN model loaded with following configs: \n\t" f"Min size: {self.config['mtcnn_min_size']}, \n\t" f"Scale Factor: {self.config['mtcnn_factor']}, \n\t" f"Steps Thresholds: {self.config['mtcnn_thresholds']}, \n\t" f"Score Threshold: {self.config['mtcnn_score']}" ) return self._load_mtcnn_graph(model_path) @staticmethod def _load_mtcnn_graph(model_path: Path) -> tf.compat.v1.GraphDef: if model_path.is_file(): return load_graph(str(model_path)) raise ValueError( "Graph file does not exist. Please check that " "%s exists" % model_path ) def predict_bbox_landmarks( self, image: np.ndarray ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """Predicts face bboxes, scores and landmarks Args: image (np.ndarray): image in numpy array Returns: bboxes (np.ndarray): numpy array of detected bboxes scores (np.ndarray): numpy array of confidence scores landmarks (np.ndarray): numpy array of facial landmarks labels (np.ndarray): numpy array of class labels (i.e. face) """ # 1. process inputs image = self.process_image(image) min_size, factor, thresholds = self.process_params( self.min_size, self.factor, self.thresholds ) # 2. evaluate image bboxes, scores, landmarks = self.mtcnn(image, min_size, factor, thresholds) # 3. process outputs bboxes, scores, landmarks = self.process_outputs( image, bboxes, scores, landmarks ) # 4. create bbox_labels classes = np.array(["face"] * len(bboxes)) return bboxes, scores, landmarks, classes @staticmethod def process_image(image: np.ndarray) -> tf.Tensor: """Processes input image Args: image (np.ndarray): image in numpy array Returns: image (np.ndarray): processed numpy array of image """ image = image.astype(np.float32) image = tf.convert_to_tensor(image) return image @staticmethod def process_params( min_size: int, factor: float, thresholds: List[float] ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Processes model input parameters Args: min_size (int): minimum face size factor (float): scale factor thresholds (list): steps thresholds Returns: min_size (tf.Tensor): processed minimum face size factor (tf.Tensor): processed scale factor thresholds (tf.Tensor): processed steps thresholds """ min_size = tf.convert_to_tensor(float(min_size)) factor = tf.convert_to_tensor(float(factor)) thresholds = [float(integer) for integer in thresholds] thresholds = tf.convert_to_tensor(thresholds) return min_size, factor, thresholds def process_outputs( self, image: np.ndarray, bboxes: tf.Tensor, scores: tf.Tensor, landmarks: tf.Tensor, ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """Processes MTCNN model outputs Args: image (np.ndarray): image in numpy array bboxes (tf.Tensor): tensor array of detected bboxes scores (tf.Tensor): tensor array of confidence scores landmarks (tf.Tensor): tensor array of facial landmarks Returns: bboxes (np.ndarray): processed numpy array of detected bboxes scores (np.ndarray): processed numpy array of confidence scores landmarks (np.ndarray): processed numpy array of facial landmarks """ bboxes, scores, landmarks = bboxes.numpy(), scores.numpy(), landmarks.numpy() # Filter bboxes by confidence score indices = np.where(scores > self.score)[0] bboxes = bboxes[indices] scores = scores[indices] landmarks = landmarks[indices] # Swap position of x, y coordinates bboxes[:, [0, 1]] = bboxes[:, [1, 0]] bboxes[:, [2, 3]] = bboxes[:, [3, 2]] # Express image coordinates as a percentage of image height and width bboxes[:, [0, 2]] = bboxes[:, [0, 2]] / image.shape[1] bboxes[:, [1, 3]] = bboxes[:, [1, 3]] / image.shape[0] return bboxes, scores, landmarks
#!/usr/bin/env python3 import os from pprint import pprint from ipaddress import IPv6Address from more_itertools import pairwise import itertools import pathlib import matplotlib.pyplot as plt import matplotlib.dates as mdates from analysis.parser.edge_challenge_response import main as parse_cr from analysis.parser.wsn_pyterm import main as parse_pyterm from analysis.graph.util import squash_generic_seq, savefig from common.names import hostname_to_name, eui64_to_name plt.rcParams['text.usetex'] = True plt.rcParams['font.size'] = 12 def find_status_at_time(status, t): # All pairs of changes for ((at, av), (bt, bv)) in pairwise(status): if t >= at and t < bt: return av if t >= status[-1][0]: return status[-1][1] # Unknown return None def belief_correct(belief, actual): # These lists contain the times at which the status changed # All the times belief or actual status changed changes = list(sorted(list(zip(*belief))[0] + list(zip(*actual))[0])) result = [] for t in changes: # Find the actual value at this time a = find_status_at_time(actual, t) b = find_status_at_time(belief, t) r = None if a is None or b is None: r = None # Correct elif a and b: r = "TP" elif not a and not b: r = "TN" # Incorrect, believed to be bad elif a and not b: r = "FN" # Incorrect believed to be good elif not a and b: r = "FP" result.append((t, r)) return result def main(log_dir: pathlib.Path): (log_dir / "graphs").mkdir(parents=True, exist_ok=True) results = parse_cr(log_dir) pyterm_results = parse_pyterm(log_dir) print([r.behaviour_changes for r in results.values()]) earliest = min(t for r in results.values() for (t, v) in r.behaviour_changes) latest = max(t for r in results.values() for (t, v) in r.behaviour_changes) # Find the latest time a task was submitted # Some times we might not have much behaviour changing to go on latest_task = max(t.time for r in pyterm_results.values() for t in r.tasks) latest = max(latest, latest_task) # Create a graph showing when tasks where offloaded to nodes and that node was bad # Need to create some data ranges for well-behaved nodes, as they don't say when they are being bad actual = { hostname_to_name(hostname): result.behaviour_changes + [(latest, result.behaviour_changes[-1][1])] if result.behaviour_changes else [(earliest, True), (latest, True)] for (hostname, result) in results.items() } edge_labels = {up.edge_id for result in pyterm_results.values() for up in result.tm_updates} belived = { (hostname, eui64_to_name(edge_label)): [ (up.time, not up.tm_to.bad) for up in result.tm_updates if up.edge_id == edge_label ] for (hostname, result) in pyterm_results.items() for edge_label in edge_labels } # Translate believed into whether the belief was correct or not correct = { (wsn, edge): belief_correct(results, actual[edge]) for ((wsn, edge), results) in belived.items() } targets = {task.target for result in pyterm_results.values() for task in result.tasks} data = { target: [ task.time for result in pyterm_results.values() for task in result.tasks if task.target == target ] for target in targets } fig = plt.figure() ax = fig.gca() y = 0 yticks = [] ytick_labels = [] legend = True x = plt.cm.get_cmap('tab10') # new tab10 """tp_colour = "#59a14f" tn_colour = "#4e79a7" fp_colour = "#b07aa1" fn_colour = "#9c755f" u_colour = "#bab0ac""" tp_colour = x(2) tn_colour = x(0) fp_colour = x(4) fn_colour = x(5) u_colour = x(7) summaries = {} for (hostname, XY) in sorted(correct.items(), key=lambda x: x[0]): result = squash_generic_seq(XY, ("TP", "TN", "FP", "FN", None)) summary = {k: sum(v[1].total_seconds() for v in vv) for (k, vv) in result.items() if k is not None} summary_total = sum(summary.values()) summary_pc = {k: round(v/summary_total, 2) for (k, v) in summary.items()} print(hostname, summary_pc) summaries[hostname] = f"\\ConfusionMatrix{{{summary_pc["TP"]}}}{{{summary_pc["TN"]}}}{{{summary_pc["FP"]}}}{{{summary_pc["FN"]}}}" ax.broken_barh(result["TP"], (y,0.9), color=tp_colour, label="TP" if legend else None) ax.broken_barh(result["TN"], (y,0.9), color=tn_colour, label="TN" if legend else None) ax.broken_barh(result["FP"], (y,0.9), color=fp_colour, label="FP" if legend else None) ax.broken_barh(result["FN"], (y,0.9), color=fn_colour, label="FN" if legend else None) #ax.broken_barh(result[None], (y,0.9), color=u_colour, label="U" if legend else None) yticks.append(y) ytick_labels.append(f"{hostname[0]}\\newline eval {hostname[1]}") y += 1 legend = False ax.set_yticks([x+0.45 for x in yticks]) ax.set_yticklabels(ytick_labels) ax.set_xlabel('Time') ax.set_ylabel('Status') ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) ax.legend() savefig(fig, log_dir / "graphs" / "cr_correctly_evaluated.pdf") print("\\begin{table}[H]") wsns = list(sorted({k[0] for k in summaries.keys()})) rrs = list(sorted({k[1] for k in summaries.keys()})) print("\\centering") print("\\begin{tabular}{l c c c}") print(" & ".join(['~'] + wsns) + "\\\\") for rr in rrs: print(rr) for wsn in wsns: summary = summaries[(wsn, rr)] print("&", summary) print("\\\\") print("\\end{tabular}") print("\\end{table}") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Graph Correctly Evaluated') parser.add_argument('--log-dir', type=pathlib.Path, default="results", nargs='+', help='The directory which contains the log output') args = parser.parse_args() for log_dir in args.log_dir: print(f"Graphing for {log_dir}") main(log_dir)
#!/usr/bin/env python3 import os from pprint import pprint from ipaddress import IPv6Address from more_itertools import pairwise import itertools import pathlib import matplotlib.pyplot as plt import matplotlib.dates as mdates from analysis.parser.edge_challenge_response import main as parse_cr from analysis.parser.wsn_pyterm import main as parse_pyterm from analysis.graph.util import squash_generic_seq, savefig from common.names import hostname_to_name, eui64_to_name plt.rcParams['text.usetex'] = True plt.rcParams['font.size'] = 12 def find_status_at_time(status, t): # All pairs of changes for ((at, av), (bt, bv)) in pairwise(status): if t >= at and t < bt: return av if t >= status[-1][0]: return status[-1][1] # Unknown return None def belief_correct(belief, actual): # These lists contain the times at which the status changed # All the times belief or actual status changed changes = list(sorted(list(zip(*belief))[0] + list(zip(*actual))[0])) result = [] for t in changes: # Find the actual value at this time a = find_status_at_time(actual, t) b = find_status_at_time(belief, t) r = None if a is None or b is None: r = None # Correct elif a and b: r = "TP" elif not a and not b: r = "TN" # Incorrect, believed to be bad elif a and not b: r = "FN" # Incorrect believed to be good elif not a and b: r = "FP" result.append((t, r)) return result def main(log_dir: pathlib.Path): (log_dir / "graphs").mkdir(parents=True, exist_ok=True) results = parse_cr(log_dir) pyterm_results = parse_pyterm(log_dir) print([r.behaviour_changes for r in results.values()]) earliest = min(t for r in results.values() for (t, v) in r.behaviour_changes) latest = max(t for r in results.values() for (t, v) in r.behaviour_changes) # Find the latest time a task was submitted # Some times we might not have much behaviour changing to go on latest_task = max(t.time for r in pyterm_results.values() for t in r.tasks) latest = max(latest, latest_task) # Create a graph showing when tasks where offloaded to nodes and that node was bad # Need to create some data ranges for well-behaved nodes, as they don't say when they are being bad actual = { hostname_to_name(hostname): result.behaviour_changes + [(latest, result.behaviour_changes[-1][1])] if result.behaviour_changes else [(earliest, True), (latest, True)] for (hostname, result) in results.items() } edge_labels = {up.edge_id for result in pyterm_results.values() for up in result.tm_updates} belived = { (hostname, eui64_to_name(edge_label)): [ (up.time, not up.tm_to.bad) for up in result.tm_updates if up.edge_id == edge_label ] for (hostname, result) in pyterm_results.items() for edge_label in edge_labels } # Translate believed into whether the belief was correct or not correct = { (wsn, edge): belief_correct(results, actual[edge]) for ((wsn, edge), results) in belived.items() } targets = {task.target for result in pyterm_results.values() for task in result.tasks} data = { target: [ task.time for result in pyterm_results.values() for task in result.tasks if task.target == target ] for target in targets } fig = plt.figure() ax = fig.gca() y = 0 yticks = [] ytick_labels = [] legend = True x = plt.cm.get_cmap('tab10') # new tab10 """tp_colour = "#59a14f" tn_colour = "#4e79a7" fp_colour = "#b07aa1" fn_colour = "#9c755f" u_colour = "#bab0ac""" tp_colour = x(2) tn_colour = x(0) fp_colour = x(4) fn_colour = x(5) u_colour = x(7) summaries = {} for (hostname, XY) in sorted(correct.items(), key=lambda x: x[0]): result = squash_generic_seq(XY, ("TP", "TN", "FP", "FN", None)) summary = {k: sum(v[1].total_seconds() for v in vv) for (k, vv) in result.items() if k is not None} summary_total = sum(summary.values()) summary_pc = {k: round(v/summary_total, 2) for (k, v) in summary.items()} print(hostname, summary_pc) summaries[hostname] = f"\\ConfusionMatrix{{{summary_pc['TP']}}}{{{summary_pc['TN']}}}{{{summary_pc['FP']}}}{{{summary_pc['FN']}}}" ax.broken_barh(result["TP"], (y,0.9), color=tp_colour, label="TP" if legend else None) ax.broken_barh(result["TN"], (y,0.9), color=tn_colour, label="TN" if legend else None) ax.broken_barh(result["FP"], (y,0.9), color=fp_colour, label="FP" if legend else None) ax.broken_barh(result["FN"], (y,0.9), color=fn_colour, label="FN" if legend else None) #ax.broken_barh(result[None], (y,0.9), color=u_colour, label="U" if legend else None) yticks.append(y) ytick_labels.append(f"{hostname[0]}\\newline eval {hostname[1]}") y += 1 legend = False ax.set_yticks([x+0.45 for x in yticks]) ax.set_yticklabels(ytick_labels) ax.set_xlabel('Time') ax.set_ylabel('Status') ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) ax.legend() savefig(fig, log_dir / "graphs" / "cr_correctly_evaluated.pdf") print("\\begin{table}[H]") wsns = list(sorted({k[0] for k in summaries.keys()})) rrs = list(sorted({k[1] for k in summaries.keys()})) print("\\centering") print("\\begin{tabular}{l c c c}") print(" & ".join(['~'] + wsns) + "\\\\") for rr in rrs: print(rr) for wsn in wsns: summary = summaries[(wsn, rr)] print("&", summary) print("\\\\") print("\\end{tabular}") print("\\end{table}") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Graph Correctly Evaluated') parser.add_argument('--log-dir', type=pathlib.Path, default="results", nargs='+', help='The directory which contains the log output') args = parser.parse_args() for log_dir in args.log_dir: print(f"Graphing for {log_dir}") main(log_dir)
""" contains class and methods for reading avro files and dirs """ import copy import os import json from avro_to_python.classes.node import Node from avro_to_python.classes.file import File from avro_to_python.utils.paths import ( get_system_path, get_avsc_files, verify_path_exists ) from avro_to_python.utils.exceptions import ( NoFileOrDir, MissingFileError, NoFilesError ) from avro_to_python.utils.avro.helpers import _get_namespace from avro_to_python.utils.avro.files.enum import _enum_file from avro_to_python.utils.avro.files.record import _record_file class AvscReader(object): """ reader object for avro avsc files Should contain all logic for reading and formatting information within a dir of avsc files or a single file """ file_tree = None def __init__(self, directory: str=None, file: str=None) -> None: """ Initializer should just create a list of files to process Parameters ---------- directory: str Directory of files to read Cannot be used with "file" param file: str path of avsc file to compile Cannot be used with "directory" param Returns ------- None """ # initialize cental object self.obj = {} self.file_tree = None if directory: if os.path.isfile(directory): raise OSError(f'{directory} is a file!') files = get_avsc_files(directory) if files: self.files = files self.obj['root_dir'] = get_system_path(directory) self.obj['read_type'] = 'directory' else: raise NoFilesError(f'No avsc files found in {directory}') elif file: if not verify_path_exists(file): raise MissingFileError(f'{file} does not exist!') if os.path.isdir(file): raise IsADirectoryError(f'{file} is a directory!') syspath = get_system_path(file) self.files = [syspath] self.obj['read_type'] = 'file' else: raise NoFileOrDir self.obj['avsc'] = [] def read(self): """ runner method for AvscReader object """ self._read_files() self._build_namespace_tree() def _traverse_tree(self, root_node: dict, namespace: str='') -> dict: """ Traverses the namespace tree to add files to namespace paths Parameters ---------- root_node: dict root_node node to start tree traversal namespace: str (period seperated) namespace representing the tree path Returns ------- current_node: dict child node in tree representing namespace destination """ current_node = root_node namespaces = namespace.split('.') # empty namespace if namespace == '': return current_node for name in namespaces: # create node if it doesn't exist if name not in current_node.children: current_node.children[name] = Node( name=name, children={}, files={} ) # move through tree current_node = current_node.children[name] return current_node def _read_files(self) -> None: """ reads and serializes avsc files to central object """ for file in self.files: with open(file, 'r') as f: serialized = json.load(f) self.obj['avsc'].append(serialized) def _build_namespace_tree(self) -> None: """ builds tree structure on namespace """ # initialize empty node with empty string name root_node = Node(name='') # populate queue prior to tree building queue = copy.deepcopy(self.obj['avsc']) while queue: # get first item in queue item = queue.pop(0) # impute namespace item['namespace'] = _get_namespace(item) # traverse to namespace starting from root_node current_node = self._traverse_tree( root_node=root_node, namespace=item['namespace'] ) # initialize empty file obj for mutation file = File( name=item['name'], avrotype=item['type'], namespace=item['namespace'], schema=item, fields={}, imports=[], enum_sumbols=[] ) # handle record type if file.avrotype == 'record': _record_file(file, item, queue) # handle enum type file elif file.avrotype == 'enum': _enum_file(file, item) else: raise ValueError( f"{file["type"]} is currently not supported." ) current_node.files[item['name']] = file self.file_tree = root_node
""" contains class and methods for reading avro files and dirs """ import copy import os import json from avro_to_python.classes.node import Node from avro_to_python.classes.file import File from avro_to_python.utils.paths import ( get_system_path, get_avsc_files, verify_path_exists ) from avro_to_python.utils.exceptions import ( NoFileOrDir, MissingFileError, NoFilesError ) from avro_to_python.utils.avro.helpers import _get_namespace from avro_to_python.utils.avro.files.enum import _enum_file from avro_to_python.utils.avro.files.record import _record_file class AvscReader(object): """ reader object for avro avsc files Should contain all logic for reading and formatting information within a dir of avsc files or a single file """ file_tree = None def __init__(self, directory: str=None, file: str=None) -> None: """ Initializer should just create a list of files to process Parameters ---------- directory: str Directory of files to read Cannot be used with "file" param file: str path of avsc file to compile Cannot be used with "directory" param Returns ------- None """ # initialize cental object self.obj = {} self.file_tree = None if directory: if os.path.isfile(directory): raise OSError(f'{directory} is a file!') files = get_avsc_files(directory) if files: self.files = files self.obj['root_dir'] = get_system_path(directory) self.obj['read_type'] = 'directory' else: raise NoFilesError(f'No avsc files found in {directory}') elif file: if not verify_path_exists(file): raise MissingFileError(f'{file} does not exist!') if os.path.isdir(file): raise IsADirectoryError(f'{file} is a directory!') syspath = get_system_path(file) self.files = [syspath] self.obj['read_type'] = 'file' else: raise NoFileOrDir self.obj['avsc'] = [] def read(self): """ runner method for AvscReader object """ self._read_files() self._build_namespace_tree() def _traverse_tree(self, root_node: dict, namespace: str='') -> dict: """ Traverses the namespace tree to add files to namespace paths Parameters ---------- root_node: dict root_node node to start tree traversal namespace: str (period seperated) namespace representing the tree path Returns ------- current_node: dict child node in tree representing namespace destination """ current_node = root_node namespaces = namespace.split('.') # empty namespace if namespace == '': return current_node for name in namespaces: # create node if it doesn't exist if name not in current_node.children: current_node.children[name] = Node( name=name, children={}, files={} ) # move through tree current_node = current_node.children[name] return current_node def _read_files(self) -> None: """ reads and serializes avsc files to central object """ for file in self.files: with open(file, 'r') as f: serialized = json.load(f) self.obj['avsc'].append(serialized) def _build_namespace_tree(self) -> None: """ builds tree structure on namespace """ # initialize empty node with empty string name root_node = Node(name='') # populate queue prior to tree building queue = copy.deepcopy(self.obj['avsc']) while queue: # get first item in queue item = queue.pop(0) # impute namespace item['namespace'] = _get_namespace(item) # traverse to namespace starting from root_node current_node = self._traverse_tree( root_node=root_node, namespace=item['namespace'] ) # initialize empty file obj for mutation file = File( name=item['name'], avrotype=item['type'], namespace=item['namespace'], schema=item, fields={}, imports=[], enum_sumbols=[] ) # handle record type if file.avrotype == 'record': _record_file(file, item, queue) # handle enum type file elif file.avrotype == 'enum': _enum_file(file, item) else: raise ValueError( f"{file['type']} is currently not supported." ) current_node.files[item['name']] = file self.file_tree = root_node
from discord.ext import commands from .utils import checks, db, fuzzy, cache, time import asyncio import discord import re import lxml.etree as etree from collections import Counter DISCORD_API_ID = 81384788765712384 DISCORD_BOTS_ID = 110373943822540800 USER_BOTS_ROLE = 178558252869484544 CONTRIBUTORS_ROLE = 111173097888993280 DISCORD_PY_ID = 84319995256905728 DISCORD_PY_GUILD = 336642139381301249 def is_discord_api(): return checks.is_in_guilds(DISCORD_API_ID, DISCORD_PY_GUILD) def contributor_or_higher(): def predicate(ctx): guild = ctx.guild if guild is None: return False role = discord.utils.find(lambda r: r.id == CONTRIBUTORS_ROLE, guild.roles) if role is None: return False return ctx.author.top_role >= role return commands.check(predicate) class Feeds(db.Table): id = db.PrimaryKeyColumn() channel_id = db.Column(db.Integer(big=True)) role_id = db.Column(db.Integer(big=True)) name = db.Column(db.String) class RTFM(db.Table): id = db.PrimaryKeyColumn() user_id = db.Column(db.Integer(big=True), unique=True, index=True) count = db.Column(db.Integer, default=1) class API: """Discord API exclusive things.""" def __init__(self, bot): self.bot = bot self.issue = re.compile(r'##(?P<number>[0-9]+)') self._recently_blocked = set() async def on_member_join(self, member): if member.guild.id != DISCORD_API_ID: return if member.bot: role = discord.Object(id=USER_BOTS_ROLE) await member.add_roles(role) async def on_message(self, message): channel = message.channel author = message.author if channel.id != DISCORD_PY_ID: return if author.status is discord.Status.offline: fmt = f'{author.mention} has been blocked for being invisible until they change their status or for 5 minutes.' try: await channel.set_permissions(author, read_messages=False, reason='invisible block') self._recently_blocked.add(author.id) await channel.send(fmt) msg = f'Heya. You have been automatically blocked from <#{DISCORD_PY_ID}> for 5 minutes for being ' \ 'invisible.\nTry chatting again in 5 minutes or when you change your status. If you\'re curious ' \ 'why invisible users are blocked, it is because they tend to break the client and cause them to ' \ 'be hard to mention. Since we want to help you usually, we expect mentions to work without ' \ 'headaches.\n\nSorry for the trouble.' await author.send(msg) except discord.HTTPException: pass await asyncio.sleep(300) self._recently_blocked.discard(author.id) await channel.set_permissions(author, overwrite=None, reason='invisible unblock') return m = self.issue.search(message.content) if m is not None: url = 'https://github.com/Rapptz/discord.py/issues/' await channel.send(url + m.group('number')) async def on_member_update(self, before, after): if after.guild.id != DISCORD_API_ID: return if before.status is discord.Status.offline and after.status is not discord.Status.offline: if after.id in self._recently_blocked: self._recently_blocked.discard(after.id) channel = after.guild.get_channel(DISCORD_PY_ID) await channel.set_permissions(after, overwrite=None, reason='invisible unblock') async def build_rtfm_lookup_table(self): cache = {} page_types = { 'rewrite': ( 'http://discordpy.rtfd.io/en/rewrite/api.html', 'http://discordpy.rtfd.io/en/rewrite/ext/commands/api.html' ), 'latest': ( 'http://discordpy.rtfd.io/en/latest/api.html', ) } for key, pages in page_types.items(): sub = cache[key] = {} for page in pages: async with self.bot.session.get(page) as resp: if resp.status != 200: raise RuntimeError('Cannot build rtfm lookup table, try again later.') text = await resp.text(encoding='utf-8') root = etree.fromstring(text, etree.HTMLParser()) nodes = root.findall(".//dt/a[@class='headerlink']") for node in nodes: href = node.get('href', '') as_key = href.replace('#discord.', '').replace('ext.commands.', '') sub[as_key] = page + href self._rtfm_cache = cache async def do_rtfm(self, ctx, key, obj): base_url = f'http://discordpy.rtfd.io/en/{key}/' if obj is None: await ctx.send(base_url) return if not hasattr(self, '_rtfm_cache'): await ctx.trigger_typing() await self.build_rtfm_lookup_table() # identifiers don't have spaces obj = obj.replace(' ', '_') if key == 'rewrite': pit_of_success_helpers = { 'vc': 'VoiceClient', 'msg': 'Message', 'color': 'Colour', 'perm': 'Permissions', 'channel': 'TextChannel', 'chan': 'TextChannel', } # point the abc.Messageable types properly: q = obj.lower() for name in dir(discord.abc.Messageable): if name[0] == '_': continue if q == name: obj = f'abc.Messageable.{name}' break def replace(o): return pit_of_success_helpers.get(o.group(0), '') pattern = re.compile('|'.join(fr'\b{k}\b' for k in pit_of_success_helpers.keys())) obj = pattern.sub(replace, obj) cache = list(self._rtfm_cache[key].items()) def transform(tup): return tup[0] matches = fuzzy.finder(obj, cache, key=lambda t: t[0], lazy=False)[:5] e = discord.Embed(colour=discord.Colour.blurple()) if len(matches) == 0: return await ctx.send('Could not find anything. Sorry.') e.description = '\n'.join(f'[{key}]({url})' for key, url in matches) await ctx.send(embed=e) if ctx.guild and ctx.guild.id == DISCORD_API_ID: query = 'INSERT INTO rtfm (user_id) VALUES ($1) ON CONFLICT (user_id) DO UPDATE SET count = rtfm.count + 1;' await ctx.db.execute(query, ctx.author.id) @commands.group(aliases=['rtfd'], invoke_without_command=True) async def rtfm(self, ctx, *, obj: str = None): """Gives you a documentation link for a discord.py entity. Events, objects, and functions are all supported through a a cruddy fuzzy algorithm. """ await self.do_rtfm(ctx, 'latest', obj) @rtfm.command(name='rewrite') async def rtfm_rewrite(self, ctx, *, obj: str = None): """Gives you a documentation link for a rewrite discord.py entity.""" await self.do_rtfm(ctx, 'rewrite', obj) async def _member_stats(self, ctx, member, total_uses): e = discord.Embed(title='RTFM Stats') e.set_author(name=str(member), icon_url=member.avatar_url) query = 'SELECT count FROM rtfm WHERE user_id=$1;' record = await ctx.db.fetchrow(query, member.id) if record is None: count = 0 else: count = record['count'] e.add_field(name='Uses', value=count) e.add_field(name='Percentage', value=f'{count/total_uses:.2%} out of {total_uses}') e.colour = discord.Colour.blurple() await ctx.send(embed=e) @rtfm.command() async def stats(self, ctx, *, member: discord.Member = None): """Tells you stats about the ?rtfm command.""" query = 'SELECT SUM(count) AS total_uses FROM rtfm;' record = await ctx.db.fetchrow(query) total_uses = record['total_uses'] if member is not None: return await self._member_stats(ctx, member, total_uses) query = 'SELECT user_id, count FROM rtfm ORDER BY count DESC LIMIT 10;' records = await ctx.db.fetch(query) output = [] output.append(f'**Total uses**: {total_uses}') # first we get the most used users if records: output.append(f'**Top {len(records)} users**:') for rank, (user_id, count) in enumerate(records, 1): user = self.bot.get_user(user_id) if rank != 10: output.append(f'{rank}\u20e3 {user}: {count}') else: output.append(f'\N{KEYCAP TEN} {user}: {count}') await ctx.send('\n'.join(output)) def library_name(self, channel): # language_<name> name = channel.name index = name.find('_') if index != -1: name = name[index + 1:] return name.replace('-', '.') @commands.command() @commands.has_permissions(manage_roles=True) @is_discord_api() async def block(self, ctx, *, member: discord.Member): """Blocks a user from your channel.""" reason = f'Block by {ctx.author} (ID: {ctx.author.id})' try: await ctx.channel.set_permissions(member, send_messages=False, reason=reason) except: await ctx.send('\N{THUMBS DOWN SIGN}') else: await ctx.send('\N{THUMBS UP SIGN}') @commands.command() @commands.has_permissions(manage_roles=True) @is_discord_api() async def tempblock(self, ctx, duration: time.FutureTime, *, member: discord.Member): """Temporarily blocks a user from your channel. The duration can be a a short time form, e.g. 30d or a more human duration such as "until thursday at 3PM" or a more concrete time such as "2017-12-31". Note that times are in UTC. """ reminder = self.bot.get_cog('Reminder') if reminder is None: return await ctx.send('Sorry, this functionality is currently unavailable. Try again later?') timer = await reminder.create_timer(duration.dt, 'tempblock', ctx.guild.id, ctx.author.id, ctx.channel.id, member.id, connection=ctx.db) reason = f'Tempblock by {ctx.author} (ID: {ctx.author.id}) until {duration.dt}' try: await ctx.channel.set_permissions(member, send_messages=False, reason=reason) except: await ctx.send('\N{THUMBS DOWN SIGN}') else: await ctx.send(f'Blocked {member} for {time.human_timedelta(duration.dt)}.') async def on_tempblock_timer_complete(self, timer): guild_id, mod_id, channel_id, member_id = timer.args guild = self.bot.get_guild(guild_id) if guild is None: # RIP return channel = guild.get_channel(channel_id) if channel is None: # RIP x2 return to_unblock = guild.get_member(member_id) if to_unblock is None: # RIP x3 return moderator = guild.get_member(mod_id) if moderator is None: try: moderator = await self.bot.get_user_info(mod_id) except: # request failed somehow moderator = f'Mod ID {mod_id}' else: moderator = f'{moderator} (ID: {mod_id})' else: moderator = f'{moderator} (ID: {mod_id})' reason = f'Automatic unblock from timer made on {timer.created_at} by {moderator}.' try: await channel.set_permissions(to_unblock, send_messages=None, reason=reason) except: pass @cache.cache() async def get_feeds(self, channel_id, *, connection=None): con = connection or self.bot.pool query = 'SELECT name, role_id FROM feeds WHERE channel_id=$1;' feeds = await con.fetch(query, channel_id) return {f['name']: f['role_id'] for f in feeds} @commands.group(name='feeds', invoke_without_command=True) @commands.guild_only() async def _feeds(self, ctx): """Shows the list of feeds that the channel has. A feed is something that users can opt-in to to receive news about a certain feed by running the `sub` command (and opt-out by doing the `unsub` command). You can publish to a feed by using the `publish` command. """ feeds = await self.get_feeds(ctx.channel.id) if len(feeds) == 0: await ctx.send('This channel has no feeds.') return names = '\n'.join(f'- {r}' for r in feeds) await ctx.send(f'Found {len(feeds)} feeds.\n{names}') @_feeds.command(name='create') @commands.has_permissions(manage_roles=True) @commands.guild_only() async def feeds_create(self, ctx, *, name: str): """Creates a feed with the specified name. You need Manage Roles permissions to create a feed. """ name = name.lower() if name in ('@everyone', '@here'): return await ctx.send('That is an invalid feed name.') query = 'SELECT role_id FROM feeds WHERE channel_id=$1 AND name=$2;' exists = await ctx.db.fetchrow(query, ctx.channel.id, name) if exists is not None: await ctx.send('This feed already exists.') return # create the role if ctx.guild.id == DISCORD_API_ID: role_name = self.library_name(ctx.channel) + ' ' + name else: role_name = name role = await ctx.guild.create_role(name=role_name, permissions=discord.Permissions.none()) query = 'INSERT INTO feeds (role_id, channel_id, name) VALUES ($1, $2, $3);' await ctx.db.execute(query, role.id, ctx.channel.id, name) self.get_feeds.invalidate(self, ctx.channel.id) await ctx.send(f'{ctx.tick(True)} Successfully created feed.') @_feeds.command(name='delete', aliases=['remove']) @commands.has_permissions(manage_roles=True) @commands.guild_only() async def feeds_delete(self, ctx, *, feed: str): """Removes a feed from the channel. This will also delete the associated role so this action is irreversible. """ query = 'DELETE FROM feeds WHERE channel_id=$1 AND name=$2 RETURNING *;' records = await ctx.db.fetch(query, ctx.channel.id, feed) self.get_feeds.invalidate(self, ctx.channel.id) if len(records) == 0: return await ctx.send('This feed does not exist.') for record in records: role = discord.utils.find(lambda r: r.id == record['role_id'], ctx.guild.roles) if role is not None: try: await role.delete() except discord.HTTPException: continue await ctx.send(f'{ctx.tick(True)} Removed feed.') async def do_subscription(self, ctx, feed, action): feeds = await self.get_feeds(ctx.channel.id) if len(feeds) == 0: await ctx.send('This channel has no feeds set up.') return if feed not in feeds: await ctx.send(f'This feed does not exist.\nValid feeds: {', '.join(feeds)}') return role_id = feeds[feed] role = discord.utils.find(lambda r: r.id == role_id, ctx.guild.roles) if role is not None: await action(role) await ctx.message.add_reaction(ctx.tick(True).strip('<:>')) else: await ctx.message.add_reaction(ctx.tick(False).strip('<:>')) @commands.command() @commands.guild_only() async def sub(self, ctx, *, feed: str): """Subscribes to the publication of a feed. This will allow you to receive updates from the channel owner. To unsubscribe, see the `unsub` command. """ await self.do_subscription(ctx, feed, ctx.author.add_roles) @commands.command() @commands.guild_only() async def unsub(self, ctx, *, feed: str): """Unsubscribe to the publication of a feed. This will remove you from notifications of a feed you are no longer interested in. You can always sub back by using the `sub` command. """ await self.do_subscription(ctx, feed, ctx.author.remove_roles) @commands.command() @commands.has_permissions(manage_roles=True) @commands.guild_only() async def publish(self, ctx, feed: str, *, content: str): """Publishes content to a feed. Everyone who is subscribed to the feed will be notified with the content. Use this to notify people of important events or changes. """ feeds = await self.get_feeds(ctx.channel.id) feed = feed.lower() if feed not in feeds: await ctx.send('This feed does not exist.') return role = discord.utils.get(ctx.guild.roles, id=feeds[feed]) if role is None: fmt = 'Uh.. a fatal error occurred here. The role associated with ' \ 'this feed has been removed or not found. ' \ 'Please recreate the feed.' await ctx.send(fmt) return # delete the message we used to invoke it try: await ctx.message.delete() except: pass # make the role mentionable await role.edit(mentionable=True) # then send the message.. await ctx.send(f'{role.mention}: {content}'[:2000]) # then make the role unmentionable await role.edit(mentionable=False) async def refresh_faq_cache(self): self.faq_entries = {} base_url = 'http://discordpy.readthedocs.io/en/latest/faq.html' async with self.bot.session.get(base_url) as resp: text = await resp.text(encoding='utf-8') root = etree.fromstring(text, etree.HTMLParser()) nodes = root.findall(".//div[@id='questions']/ul[@class='simple']//ul/li/a") for node in nodes: self.faq_entries[''.join(node.itertext()).strip()] = base_url + node.get('href').strip() @commands.command() async def faq(self, ctx, *, query: str = None): """Shows an FAQ entry from the discord.py documentation""" if not hasattr(self, 'faq_entries'): await self.refresh_faq_cache() if query is None: return await ctx.send('http://discordpy.readthedocs.io/en/latest/faq.html') matches = fuzzy.extract_matches(query, self.faq_entries, scorer=fuzzy.partial_ratio, score_cutoff=40) if len(matches) == 0: return await ctx.send('Nothing found...') fmt = '\n'.join(f'**{key}**\n{value}' for key, _, value in matches) await ctx.send(fmt) def setup(bot): bot.add_cog(API(bot))
from discord.ext import commands from .utils import checks, db, fuzzy, cache, time import asyncio import discord import re import lxml.etree as etree from collections import Counter DISCORD_API_ID = 81384788765712384 DISCORD_BOTS_ID = 110373943822540800 USER_BOTS_ROLE = 178558252869484544 CONTRIBUTORS_ROLE = 111173097888993280 DISCORD_PY_ID = 84319995256905728 DISCORD_PY_GUILD = 336642139381301249 def is_discord_api(): return checks.is_in_guilds(DISCORD_API_ID, DISCORD_PY_GUILD) def contributor_or_higher(): def predicate(ctx): guild = ctx.guild if guild is None: return False role = discord.utils.find(lambda r: r.id == CONTRIBUTORS_ROLE, guild.roles) if role is None: return False return ctx.author.top_role >= role return commands.check(predicate) class Feeds(db.Table): id = db.PrimaryKeyColumn() channel_id = db.Column(db.Integer(big=True)) role_id = db.Column(db.Integer(big=True)) name = db.Column(db.String) class RTFM(db.Table): id = db.PrimaryKeyColumn() user_id = db.Column(db.Integer(big=True), unique=True, index=True) count = db.Column(db.Integer, default=1) class API: """Discord API exclusive things.""" def __init__(self, bot): self.bot = bot self.issue = re.compile(r'##(?P<number>[0-9]+)') self._recently_blocked = set() async def on_member_join(self, member): if member.guild.id != DISCORD_API_ID: return if member.bot: role = discord.Object(id=USER_BOTS_ROLE) await member.add_roles(role) async def on_message(self, message): channel = message.channel author = message.author if channel.id != DISCORD_PY_ID: return if author.status is discord.Status.offline: fmt = f'{author.mention} has been blocked for being invisible until they change their status or for 5 minutes.' try: await channel.set_permissions(author, read_messages=False, reason='invisible block') self._recently_blocked.add(author.id) await channel.send(fmt) msg = f'Heya. You have been automatically blocked from <#{DISCORD_PY_ID}> for 5 minutes for being ' \ 'invisible.\nTry chatting again in 5 minutes or when you change your status. If you\'re curious ' \ 'why invisible users are blocked, it is because they tend to break the client and cause them to ' \ 'be hard to mention. Since we want to help you usually, we expect mentions to work without ' \ 'headaches.\n\nSorry for the trouble.' await author.send(msg) except discord.HTTPException: pass await asyncio.sleep(300) self._recently_blocked.discard(author.id) await channel.set_permissions(author, overwrite=None, reason='invisible unblock') return m = self.issue.search(message.content) if m is not None: url = 'https://github.com/Rapptz/discord.py/issues/' await channel.send(url + m.group('number')) async def on_member_update(self, before, after): if after.guild.id != DISCORD_API_ID: return if before.status is discord.Status.offline and after.status is not discord.Status.offline: if after.id in self._recently_blocked: self._recently_blocked.discard(after.id) channel = after.guild.get_channel(DISCORD_PY_ID) await channel.set_permissions(after, overwrite=None, reason='invisible unblock') async def build_rtfm_lookup_table(self): cache = {} page_types = { 'rewrite': ( 'http://discordpy.rtfd.io/en/rewrite/api.html', 'http://discordpy.rtfd.io/en/rewrite/ext/commands/api.html' ), 'latest': ( 'http://discordpy.rtfd.io/en/latest/api.html', ) } for key, pages in page_types.items(): sub = cache[key] = {} for page in pages: async with self.bot.session.get(page) as resp: if resp.status != 200: raise RuntimeError('Cannot build rtfm lookup table, try again later.') text = await resp.text(encoding='utf-8') root = etree.fromstring(text, etree.HTMLParser()) nodes = root.findall(".//dt/a[@class='headerlink']") for node in nodes: href = node.get('href', '') as_key = href.replace('#discord.', '').replace('ext.commands.', '') sub[as_key] = page + href self._rtfm_cache = cache async def do_rtfm(self, ctx, key, obj): base_url = f'http://discordpy.rtfd.io/en/{key}/' if obj is None: await ctx.send(base_url) return if not hasattr(self, '_rtfm_cache'): await ctx.trigger_typing() await self.build_rtfm_lookup_table() # identifiers don't have spaces obj = obj.replace(' ', '_') if key == 'rewrite': pit_of_success_helpers = { 'vc': 'VoiceClient', 'msg': 'Message', 'color': 'Colour', 'perm': 'Permissions', 'channel': 'TextChannel', 'chan': 'TextChannel', } # point the abc.Messageable types properly: q = obj.lower() for name in dir(discord.abc.Messageable): if name[0] == '_': continue if q == name: obj = f'abc.Messageable.{name}' break def replace(o): return pit_of_success_helpers.get(o.group(0), '') pattern = re.compile('|'.join(fr'\b{k}\b' for k in pit_of_success_helpers.keys())) obj = pattern.sub(replace, obj) cache = list(self._rtfm_cache[key].items()) def transform(tup): return tup[0] matches = fuzzy.finder(obj, cache, key=lambda t: t[0], lazy=False)[:5] e = discord.Embed(colour=discord.Colour.blurple()) if len(matches) == 0: return await ctx.send('Could not find anything. Sorry.') e.description = '\n'.join(f'[{key}]({url})' for key, url in matches) await ctx.send(embed=e) if ctx.guild and ctx.guild.id == DISCORD_API_ID: query = 'INSERT INTO rtfm (user_id) VALUES ($1) ON CONFLICT (user_id) DO UPDATE SET count = rtfm.count + 1;' await ctx.db.execute(query, ctx.author.id) @commands.group(aliases=['rtfd'], invoke_without_command=True) async def rtfm(self, ctx, *, obj: str = None): """Gives you a documentation link for a discord.py entity. Events, objects, and functions are all supported through a a cruddy fuzzy algorithm. """ await self.do_rtfm(ctx, 'latest', obj) @rtfm.command(name='rewrite') async def rtfm_rewrite(self, ctx, *, obj: str = None): """Gives you a documentation link for a rewrite discord.py entity.""" await self.do_rtfm(ctx, 'rewrite', obj) async def _member_stats(self, ctx, member, total_uses): e = discord.Embed(title='RTFM Stats') e.set_author(name=str(member), icon_url=member.avatar_url) query = 'SELECT count FROM rtfm WHERE user_id=$1;' record = await ctx.db.fetchrow(query, member.id) if record is None: count = 0 else: count = record['count'] e.add_field(name='Uses', value=count) e.add_field(name='Percentage', value=f'{count/total_uses:.2%} out of {total_uses}') e.colour = discord.Colour.blurple() await ctx.send(embed=e) @rtfm.command() async def stats(self, ctx, *, member: discord.Member = None): """Tells you stats about the ?rtfm command.""" query = 'SELECT SUM(count) AS total_uses FROM rtfm;' record = await ctx.db.fetchrow(query) total_uses = record['total_uses'] if member is not None: return await self._member_stats(ctx, member, total_uses) query = 'SELECT user_id, count FROM rtfm ORDER BY count DESC LIMIT 10;' records = await ctx.db.fetch(query) output = [] output.append(f'**Total uses**: {total_uses}') # first we get the most used users if records: output.append(f'**Top {len(records)} users**:') for rank, (user_id, count) in enumerate(records, 1): user = self.bot.get_user(user_id) if rank != 10: output.append(f'{rank}\u20e3 {user}: {count}') else: output.append(f'\N{KEYCAP TEN} {user}: {count}') await ctx.send('\n'.join(output)) def library_name(self, channel): # language_<name> name = channel.name index = name.find('_') if index != -1: name = name[index + 1:] return name.replace('-', '.') @commands.command() @commands.has_permissions(manage_roles=True) @is_discord_api() async def block(self, ctx, *, member: discord.Member): """Blocks a user from your channel.""" reason = f'Block by {ctx.author} (ID: {ctx.author.id})' try: await ctx.channel.set_permissions(member, send_messages=False, reason=reason) except: await ctx.send('\N{THUMBS DOWN SIGN}') else: await ctx.send('\N{THUMBS UP SIGN}') @commands.command() @commands.has_permissions(manage_roles=True) @is_discord_api() async def tempblock(self, ctx, duration: time.FutureTime, *, member: discord.Member): """Temporarily blocks a user from your channel. The duration can be a a short time form, e.g. 30d or a more human duration such as "until thursday at 3PM" or a more concrete time such as "2017-12-31". Note that times are in UTC. """ reminder = self.bot.get_cog('Reminder') if reminder is None: return await ctx.send('Sorry, this functionality is currently unavailable. Try again later?') timer = await reminder.create_timer(duration.dt, 'tempblock', ctx.guild.id, ctx.author.id, ctx.channel.id, member.id, connection=ctx.db) reason = f'Tempblock by {ctx.author} (ID: {ctx.author.id}) until {duration.dt}' try: await ctx.channel.set_permissions(member, send_messages=False, reason=reason) except: await ctx.send('\N{THUMBS DOWN SIGN}') else: await ctx.send(f'Blocked {member} for {time.human_timedelta(duration.dt)}.') async def on_tempblock_timer_complete(self, timer): guild_id, mod_id, channel_id, member_id = timer.args guild = self.bot.get_guild(guild_id) if guild is None: # RIP return channel = guild.get_channel(channel_id) if channel is None: # RIP x2 return to_unblock = guild.get_member(member_id) if to_unblock is None: # RIP x3 return moderator = guild.get_member(mod_id) if moderator is None: try: moderator = await self.bot.get_user_info(mod_id) except: # request failed somehow moderator = f'Mod ID {mod_id}' else: moderator = f'{moderator} (ID: {mod_id})' else: moderator = f'{moderator} (ID: {mod_id})' reason = f'Automatic unblock from timer made on {timer.created_at} by {moderator}.' try: await channel.set_permissions(to_unblock, send_messages=None, reason=reason) except: pass @cache.cache() async def get_feeds(self, channel_id, *, connection=None): con = connection or self.bot.pool query = 'SELECT name, role_id FROM feeds WHERE channel_id=$1;' feeds = await con.fetch(query, channel_id) return {f['name']: f['role_id'] for f in feeds} @commands.group(name='feeds', invoke_without_command=True) @commands.guild_only() async def _feeds(self, ctx): """Shows the list of feeds that the channel has. A feed is something that users can opt-in to to receive news about a certain feed by running the `sub` command (and opt-out by doing the `unsub` command). You can publish to a feed by using the `publish` command. """ feeds = await self.get_feeds(ctx.channel.id) if len(feeds) == 0: await ctx.send('This channel has no feeds.') return names = '\n'.join(f'- {r}' for r in feeds) await ctx.send(f'Found {len(feeds)} feeds.\n{names}') @_feeds.command(name='create') @commands.has_permissions(manage_roles=True) @commands.guild_only() async def feeds_create(self, ctx, *, name: str): """Creates a feed with the specified name. You need Manage Roles permissions to create a feed. """ name = name.lower() if name in ('@everyone', '@here'): return await ctx.send('That is an invalid feed name.') query = 'SELECT role_id FROM feeds WHERE channel_id=$1 AND name=$2;' exists = await ctx.db.fetchrow(query, ctx.channel.id, name) if exists is not None: await ctx.send('This feed already exists.') return # create the role if ctx.guild.id == DISCORD_API_ID: role_name = self.library_name(ctx.channel) + ' ' + name else: role_name = name role = await ctx.guild.create_role(name=role_name, permissions=discord.Permissions.none()) query = 'INSERT INTO feeds (role_id, channel_id, name) VALUES ($1, $2, $3);' await ctx.db.execute(query, role.id, ctx.channel.id, name) self.get_feeds.invalidate(self, ctx.channel.id) await ctx.send(f'{ctx.tick(True)} Successfully created feed.') @_feeds.command(name='delete', aliases=['remove']) @commands.has_permissions(manage_roles=True) @commands.guild_only() async def feeds_delete(self, ctx, *, feed: str): """Removes a feed from the channel. This will also delete the associated role so this action is irreversible. """ query = 'DELETE FROM feeds WHERE channel_id=$1 AND name=$2 RETURNING *;' records = await ctx.db.fetch(query, ctx.channel.id, feed) self.get_feeds.invalidate(self, ctx.channel.id) if len(records) == 0: return await ctx.send('This feed does not exist.') for record in records: role = discord.utils.find(lambda r: r.id == record['role_id'], ctx.guild.roles) if role is not None: try: await role.delete() except discord.HTTPException: continue await ctx.send(f'{ctx.tick(True)} Removed feed.') async def do_subscription(self, ctx, feed, action): feeds = await self.get_feeds(ctx.channel.id) if len(feeds) == 0: await ctx.send('This channel has no feeds set up.') return if feed not in feeds: await ctx.send(f'This feed does not exist.\nValid feeds: {", ".join(feeds)}') return role_id = feeds[feed] role = discord.utils.find(lambda r: r.id == role_id, ctx.guild.roles) if role is not None: await action(role) await ctx.message.add_reaction(ctx.tick(True).strip('<:>')) else: await ctx.message.add_reaction(ctx.tick(False).strip('<:>')) @commands.command() @commands.guild_only() async def sub(self, ctx, *, feed: str): """Subscribes to the publication of a feed. This will allow you to receive updates from the channel owner. To unsubscribe, see the `unsub` command. """ await self.do_subscription(ctx, feed, ctx.author.add_roles) @commands.command() @commands.guild_only() async def unsub(self, ctx, *, feed: str): """Unsubscribe to the publication of a feed. This will remove you from notifications of a feed you are no longer interested in. You can always sub back by using the `sub` command. """ await self.do_subscription(ctx, feed, ctx.author.remove_roles) @commands.command() @commands.has_permissions(manage_roles=True) @commands.guild_only() async def publish(self, ctx, feed: str, *, content: str): """Publishes content to a feed. Everyone who is subscribed to the feed will be notified with the content. Use this to notify people of important events or changes. """ feeds = await self.get_feeds(ctx.channel.id) feed = feed.lower() if feed not in feeds: await ctx.send('This feed does not exist.') return role = discord.utils.get(ctx.guild.roles, id=feeds[feed]) if role is None: fmt = 'Uh.. a fatal error occurred here. The role associated with ' \ 'this feed has been removed or not found. ' \ 'Please recreate the feed.' await ctx.send(fmt) return # delete the message we used to invoke it try: await ctx.message.delete() except: pass # make the role mentionable await role.edit(mentionable=True) # then send the message.. await ctx.send(f'{role.mention}: {content}'[:2000]) # then make the role unmentionable await role.edit(mentionable=False) async def refresh_faq_cache(self): self.faq_entries = {} base_url = 'http://discordpy.readthedocs.io/en/latest/faq.html' async with self.bot.session.get(base_url) as resp: text = await resp.text(encoding='utf-8') root = etree.fromstring(text, etree.HTMLParser()) nodes = root.findall(".//div[@id='questions']/ul[@class='simple']//ul/li/a") for node in nodes: self.faq_entries[''.join(node.itertext()).strip()] = base_url + node.get('href').strip() @commands.command() async def faq(self, ctx, *, query: str = None): """Shows an FAQ entry from the discord.py documentation""" if not hasattr(self, 'faq_entries'): await self.refresh_faq_cache() if query is None: return await ctx.send('http://discordpy.readthedocs.io/en/latest/faq.html') matches = fuzzy.extract_matches(query, self.faq_entries, scorer=fuzzy.partial_ratio, score_cutoff=40) if len(matches) == 0: return await ctx.send('Nothing found...') fmt = '\n'.join(f'**{key}**\n{value}' for key, _, value in matches) await ctx.send(fmt) def setup(bot): bot.add_cog(API(bot))
#!/usr/bin/env python # -*- coding: utf-8 -*- """ chipS2Extract.py -- A routine to extract a small subset from imagery in S3 object storage. Essential part of DIAS functionality for CAP Checks by Monitoring Author: Guido Lemoine, European Commission, Joint Research Centre License: see git repository Version 1.3 - 2020-03-17 Revisions in 1.1: - Implementation of parallel multi-band retrieval - Using python3 formatted strings Revisions in 1.2: - Refactored, including VM transfer and HTML preparation Revisions in 1.3: - lut and bands parameter handling Revisions in 1.4 by Konstantinos Anastasakis: - Remove parallel processing """ import os import glob import time import sys import logging from datetime import datetime # from concurrent.futures import ProcessPoolExecutor, as_completed from scripts.dias import creodiasCARDchips as ccc from scripts import chipRipper2 # logging.basicConfig(filename=os.path.basename(sys.argv[0]).replace( # '.py', '.log'), filemode='w', # format='%(name)s - %(levelname)s - %(message)s', level=logging.DEBUG) def parallelExtract(lon, lat, start_date, end_date, unique_dir, lut, bands, plevel): start = time.time() logging.debug(start) # Read in chip list, make sure to skip duplicates that have # least complete overlap or lower version numbers chiplist = ccc.getS2Chips(float(lon), float( lat), start_date, end_date, 1280, plevel) logging.debug(chiplist) chiplist = ccc.rinseAndDryS2(chiplist) logging.debug("INITIAL CHIPS") logging.debug(chiplist) logging.debug(unique_dir) if os.path.exists(unique_dir): # Check which chips are already in the unique_dir # (simple form of caching) cachedList = glob.glob(f"{unique_dir}/*.png") cachedList = [f.split('/')[-1].replace('.png', '') for f in cachedList] logging.debug("CACHED") logging.debug(cachedList) for f in cachedList: if f in chiplist: chiplist.remove(f) logging.debug("FINAL CHIPS") logging.debug(chiplist) if len(chiplist) == 0: logging.debug("No new chips to be processed") return 0 else: logging.debug(f"Creating {unique_dir} on host") os.makedirs(unique_dir) logging.debug(f"Processing {len(chiplist)} chips") if len(chiplist) > 24: logging.debug("Too many chips requested") return -1 # chip_set = {} for i in range(len(chiplist)): reference = chiplist[i] chipRipper2.main(lon, lat, reference, unique_dir, lut, bands, plevel) logging.debug( f"Total time required for {len(chiplist)} images with {3} bands: {time.time() - start} seconds") logging.debug(f"Generated {len(chiplist)} chips") return len(chiplist) def calendarCheck(klist, start_date, end_date): # Check whether keys are within the time window s_date = datetime.strptime(start_date, '%Y-%m-%d') e_date = datetime.strptime(end_date, '%Y-%m-%d') dlist = [datetime.strptime(k, '%Y%m%dT%H%M%S') for k in klist] slist = [d.strftime('%Y%m%dT%H%M%S') for d in dlist if s_date < d < e_date] return slist def buildHTML(unique_dir, start_date, end_date, columns=8): flist = glob.glob(f"{unique_dir}/*.png") dict = {} for f in flist: dict[f.split('_')[-5]] = f keys = sorted(dict.keys()) logging.debug("Before calendar check: ") logging.debug(keys) keys = calendarCheck(keys, start_date, end_date) logging.debug("After calendar check: ") logging.debug(keys) html = [] html.append("<!DOCTYPE html><html><head>") html.append("<style>table { border-spacing: 10px; }") html.append("</style></head><body>") html.append("<table style=\"width:100%\">") for i in range(len(keys)): if i % columns == 0: html.append("<tr>") html.append( f"""<td><label><img id = "{keys[i]}" src="{dict[keys[i]].replace("dump", "/dump")}"/><br/>{keys[i]}</label></td>""") if i % columns == columns - 1: html.append("</tr><br/>") html.append("</tr></table></body></html>") with open(f"{unique_dir}/dump.html", "w") as out: out.write("".join(html)) return True
#!/usr/bin/env python # -*- coding: utf-8 -*- """ chipS2Extract.py -- A routine to extract a small subset from imagery in S3 object storage. Essential part of DIAS functionality for CAP Checks by Monitoring Author: Guido Lemoine, European Commission, Joint Research Centre License: see git repository Version 1.3 - 2020-03-17 Revisions in 1.1: - Implementation of parallel multi-band retrieval - Using python3 formatted strings Revisions in 1.2: - Refactored, including VM transfer and HTML preparation Revisions in 1.3: - lut and bands parameter handling Revisions in 1.4 by Konstantinos Anastasakis: - Remove parallel processing """ import os import glob import time import sys import logging from datetime import datetime # from concurrent.futures import ProcessPoolExecutor, as_completed from scripts.dias import creodiasCARDchips as ccc from scripts import chipRipper2 # logging.basicConfig(filename=os.path.basename(sys.argv[0]).replace( # '.py', '.log'), filemode='w', # format='%(name)s - %(levelname)s - %(message)s', level=logging.DEBUG) def parallelExtract(lon, lat, start_date, end_date, unique_dir, lut, bands, plevel): start = time.time() logging.debug(start) # Read in chip list, make sure to skip duplicates that have # least complete overlap or lower version numbers chiplist = ccc.getS2Chips(float(lon), float( lat), start_date, end_date, 1280, plevel) logging.debug(chiplist) chiplist = ccc.rinseAndDryS2(chiplist) logging.debug("INITIAL CHIPS") logging.debug(chiplist) logging.debug(unique_dir) if os.path.exists(unique_dir): # Check which chips are already in the unique_dir # (simple form of caching) cachedList = glob.glob(f"{unique_dir}/*.png") cachedList = [f.split('/')[-1].replace('.png', '') for f in cachedList] logging.debug("CACHED") logging.debug(cachedList) for f in cachedList: if f in chiplist: chiplist.remove(f) logging.debug("FINAL CHIPS") logging.debug(chiplist) if len(chiplist) == 0: logging.debug("No new chips to be processed") return 0 else: logging.debug(f"Creating {unique_dir} on host") os.makedirs(unique_dir) logging.debug(f"Processing {len(chiplist)} chips") if len(chiplist) > 24: logging.debug("Too many chips requested") return -1 # chip_set = {} for i in range(len(chiplist)): reference = chiplist[i] chipRipper2.main(lon, lat, reference, unique_dir, lut, bands, plevel) logging.debug( f"Total time required for {len(chiplist)} images with {3} bands: {time.time() - start} seconds") logging.debug(f"Generated {len(chiplist)} chips") return len(chiplist) def calendarCheck(klist, start_date, end_date): # Check whether keys are within the time window s_date = datetime.strptime(start_date, '%Y-%m-%d') e_date = datetime.strptime(end_date, '%Y-%m-%d') dlist = [datetime.strptime(k, '%Y%m%dT%H%M%S') for k in klist] slist = [d.strftime('%Y%m%dT%H%M%S') for d in dlist if s_date < d < e_date] return slist def buildHTML(unique_dir, start_date, end_date, columns=8): flist = glob.glob(f"{unique_dir}/*.png") dict = {} for f in flist: dict[f.split('_')[-5]] = f keys = sorted(dict.keys()) logging.debug("Before calendar check: ") logging.debug(keys) keys = calendarCheck(keys, start_date, end_date) logging.debug("After calendar check: ") logging.debug(keys) html = [] html.append("<!DOCTYPE html><html><head>") html.append("<style>table { border-spacing: 10px; }") html.append("</style></head><body>") html.append("<table style=\"width:100%\">") for i in range(len(keys)): if i % columns == 0: html.append("<tr>") html.append( f"""<td><label><img id = "{keys[i]}" src="{dict[keys[i]].replace('dump', '/dump')}"/><br/>{keys[i]}</label></td>""") if i % columns == columns - 1: html.append("</tr><br/>") html.append("</tr></table></body></html>") with open(f"{unique_dir}/dump.html", "w") as out: out.write("".join(html)) return True
import re from pathlib import Path import markdown from markdown.extensions.fenced_code import FencedBlockPreprocessor # highlightJS expects the class "language-*" but markdown default is "*" FencedBlockPreprocessor.LANG_TAG = ' class="language-%s"' CONTENT = Path(__file__).parent / "content" DEST = Path(__file__).parent / "templates" / "generated" DOCS_HTML_TEMPLATE = """{% extends "docs.html" %} {% block title %}<title><TITLE></title>{% endblock %} {% block content %}<CONTENT>{% endblock %} """ # noqa CHANGELOG_HTML_TEMPLATE = """{% extends "changelog.html" %} {% block title %}<title><TITLE></title>{% endblock %} {% block content %}<CONTENT>{% endblock %} """ # noqa TAB_OUTER_TEMPLATE = """<div class="card mb-3"> <div class="card-header"> <ul class="nav nav-tabs card-header-tabs{classes}"{id_value} role="tablist">{tabs}</ul> </div> <div class="card-body tab-content">{tab_panes}</div> </div> """ # noqa TAB_TEMPLATE = """<li class="nav-item" role="presentation"> <a class="nav-link{2}" id="{0}-tab" data-toggle="tab" href="#{0}" role="tab" aria-controls="{0}" aria-selected="{3}">{1}</a> </li> """ # noqa TAB_PANE_TEMPLATE = """<div class="tab-pane fade show{2}" id="{0}" role="tabpanel" aria-labelledby="{0}-tab">{1}</div>""" # noqa TAB_COUNT = 0 def tab_formatter(source, language, class_name, options, md, **kwargs): """Format source as tabs.""" global TAB_COUNT TAB_COUNT += 1 source = [chunk.split("\n", 1) for chunk in source.split("-----\n")] classes = kwargs["classes"] id_value = kwargs["id_value"] attrs = kwargs["attrs"] if class_name: classes.insert(0, class_name) id_value = ' id="{}"'.format(id_value) if id_value else "" classes = " {}".format(" ".join(classes)) if classes else "" attrs = ( " " + " ".join('{k}="{v}"'.format(k=k, v=v) for k, v in attrs.items()) if attrs else "" ) tabs = "" tab_panes = "" for i, (tab_name, tab_content) in enumerate(source): tab_id = re.sub(r"\s", "_", tab_name).lower() + str(TAB_COUNT) tabs += TAB_TEMPLATE.format( tab_id, tab_name, " active" if i == 0 else "", "true" if i == 0 else "false", ) tab_panes += TAB_PANE_TEMPLATE.format( tab_id, markdown.markdown(tab_content, extensions=["fenced_code", "meta"]), " active" if i == 0 else "", ) return TAB_OUTER_TEMPLATE.format( tabs=tabs, tab_panes=tab_panes, id_value=id_value, classes=classes ) extension_configs = { "pymdownx.superfences": { "custom_fences": [ { "name": "bootstrap-tabs", "class": "bootstrap-tabs", "format": tab_formatter, } ] } } def convert_all_markdown_files(): for path in CONTENT.glob("docs/*.md"): template = template_from_markdown(path, title_suffix=" - dbc docs") with open(DEST / "docs" / path.name.replace(".md", ".html"), "w") as f: f.write(template) for path in CONTENT.glob("*.md"): template = template_from_markdown( path, template=CHANGELOG_HTML_TEMPLATE ) with open(DEST / path.name.replace(".md", ".html"), "w") as f: f.write(template) def template_from_markdown(path, title_suffix="", template=DOCS_HTML_TEMPLATE): md = markdown.Markdown( extensions=["fenced_code", "meta", "pymdownx.superfences"], extension_configs=extension_configs, ) text = path.read_text() template = template.replace("<CONTENT>", md.convert(text)) return template.replace("<TITLE>", f"{md.Meta["title"][0]} - dbc docs") if __name__ == "__main__": convert_all_markdown_files()
import re from pathlib import Path import markdown from markdown.extensions.fenced_code import FencedBlockPreprocessor # highlightJS expects the class "language-*" but markdown default is "*" FencedBlockPreprocessor.LANG_TAG = ' class="language-%s"' CONTENT = Path(__file__).parent / "content" DEST = Path(__file__).parent / "templates" / "generated" DOCS_HTML_TEMPLATE = """{% extends "docs.html" %} {% block title %}<title><TITLE></title>{% endblock %} {% block content %}<CONTENT>{% endblock %} """ # noqa CHANGELOG_HTML_TEMPLATE = """{% extends "changelog.html" %} {% block title %}<title><TITLE></title>{% endblock %} {% block content %}<CONTENT>{% endblock %} """ # noqa TAB_OUTER_TEMPLATE = """<div class="card mb-3"> <div class="card-header"> <ul class="nav nav-tabs card-header-tabs{classes}"{id_value} role="tablist">{tabs}</ul> </div> <div class="card-body tab-content">{tab_panes}</div> </div> """ # noqa TAB_TEMPLATE = """<li class="nav-item" role="presentation"> <a class="nav-link{2}" id="{0}-tab" data-toggle="tab" href="#{0}" role="tab" aria-controls="{0}" aria-selected="{3}">{1}</a> </li> """ # noqa TAB_PANE_TEMPLATE = """<div class="tab-pane fade show{2}" id="{0}" role="tabpanel" aria-labelledby="{0}-tab">{1}</div>""" # noqa TAB_COUNT = 0 def tab_formatter(source, language, class_name, options, md, **kwargs): """Format source as tabs.""" global TAB_COUNT TAB_COUNT += 1 source = [chunk.split("\n", 1) for chunk in source.split("-----\n")] classes = kwargs["classes"] id_value = kwargs["id_value"] attrs = kwargs["attrs"] if class_name: classes.insert(0, class_name) id_value = ' id="{}"'.format(id_value) if id_value else "" classes = " {}".format(" ".join(classes)) if classes else "" attrs = ( " " + " ".join('{k}="{v}"'.format(k=k, v=v) for k, v in attrs.items()) if attrs else "" ) tabs = "" tab_panes = "" for i, (tab_name, tab_content) in enumerate(source): tab_id = re.sub(r"\s", "_", tab_name).lower() + str(TAB_COUNT) tabs += TAB_TEMPLATE.format( tab_id, tab_name, " active" if i == 0 else "", "true" if i == 0 else "false", ) tab_panes += TAB_PANE_TEMPLATE.format( tab_id, markdown.markdown(tab_content, extensions=["fenced_code", "meta"]), " active" if i == 0 else "", ) return TAB_OUTER_TEMPLATE.format( tabs=tabs, tab_panes=tab_panes, id_value=id_value, classes=classes ) extension_configs = { "pymdownx.superfences": { "custom_fences": [ { "name": "bootstrap-tabs", "class": "bootstrap-tabs", "format": tab_formatter, } ] } } def convert_all_markdown_files(): for path in CONTENT.glob("docs/*.md"): template = template_from_markdown(path, title_suffix=" - dbc docs") with open(DEST / "docs" / path.name.replace(".md", ".html"), "w") as f: f.write(template) for path in CONTENT.glob("*.md"): template = template_from_markdown( path, template=CHANGELOG_HTML_TEMPLATE ) with open(DEST / path.name.replace(".md", ".html"), "w") as f: f.write(template) def template_from_markdown(path, title_suffix="", template=DOCS_HTML_TEMPLATE): md = markdown.Markdown( extensions=["fenced_code", "meta", "pymdownx.superfences"], extension_configs=extension_configs, ) text = path.read_text() template = template.replace("<CONTENT>", md.convert(text)) return template.replace("<TITLE>", f"{md.Meta['title'][0]} - dbc docs") if __name__ == "__main__": convert_all_markdown_files()
import re import numpy as np from .exceptions import expected, typename from .formats import parse_number from .matrix import create_array from .preprocessors import planner, preprocessor, _drop_weak_columns int_type = (int, np.int64) def clean_numeric_labels(name, values, receiver): if values.dtype in (float, int, bool): return values NumType = (float, int, np.int64, np.float64) if all(isinstance(i, NumType) for i in values): return values count = sum(1 for i in values if not isinstance(i, NumType)) suffix = '' if count == 1 else 's' receiver.warn( f'Converting {count} value{suffix} in the "{name}" column' f' to number{suffix}.' ) def conv(x): if isinstance(x, NumType): return x if isinstance(x, str): try: return parse_number(x) except Exception: pass return 0.0 return np.array([conv(x) for x in values]) @planner def clean_dataset(ctx): """Cleans the context's dataset, and records the steps for later playback. This function peforms a handful of steps: - Drops columns that contain unexpected values. A value is unexpected if is is not a boolean, a number, a string, or None. - Drops columns that only contain None values. - Whenever possible, coerces columns to have numeric types. - In columns of strings, replaces None values with the empty string. - In a column of numbers, if any numbers are missing, replaces the missing number with zero, and adds a second column of booleans to indicate which values were missing. - Replaces a column of mixed strings and numbers with two columns: one for the string values and another for the number values. After running these steps, each column in the dataset should have a uniform type for all the values in that column. For example, if a column has one boolean value, then all the values in the column will be booleans. Parameters: ctx (RecordingContext): The current context. """ for col in ctx.matrix.columns: _clean_column(ctx, col) def _clean_column(ctx, col): # If numpy inferred a real dtype for this column, then it's clean enough. # TODO: Consider bipartitioning columns with nan values. if col.dtype != object: return index = ctx.matrix.columns.index(col) values = col.values num_values = len(values) num_bools = sum(1 for i in values if isinstance(i, bool)) num_floats = sum(1 for i in values if isinstance(i, float)) num_ints = sum(1 for i in values if isinstance(i, int_type) and not isinstance(i, bool)) num_none = sum(1 for i in values if i is None) num_strings = sum(1 for i in values if isinstance(i, str)) computed_total = num_bools + num_floats + num_ints + num_none + num_strings if computed_total != num_values: try: found = {typename(i) for i in values if i is not None and not isinstance(i, (bool, int, float, str, np.int64))} except Exception: found = 'unexpected values' ctx.receiver.warn(f'Dropping column "{col.name}". A column must only' f' contain booleans, numbers, and strings. Received: {found}.') _drop_weak_columns(ctx, [index]) return # Record the number of values for each type. counts = {bool: num_bools, float: num_floats, int: num_ints} # If we somehow got an array of primitives with dtype == object, then just # coerce it to the appropriate type. for typ, num in counts.items(): if num == num_values: _coerce_column(ctx, index, typ) return # If we have all None values, then drop this column. if num_none == num_values: ctx.receiver.warn(f'Dropping column of all None values: {col.name}') _drop_weak_columns(ctx, [index]) return # If we have some strings, see if we can convert them all to numbers. if num_strings > 0 and _can_coerce_all_strings_to_numbers(values): _coerce_strings_to_numbers(ctx, index) _clean_column(ctx, col) return # If we have all strings, then we're clean. Just leave this column alone. if num_strings == num_values: assert all(isinstance(i, str) for i in values) return # If we have all strings, but some are None, then use the empty string. # (And don't warn the user about the None values in this case.) if num_strings + num_none == num_values: _replace_none_values(ctx, index, '') return # If we have some ints and some floats, then coerce the ints to floats # and recur. if num_ints > 0 and num_floats > 0: _coerce_ints_to_floats(ctx, index) _clean_column(ctx, col) return # If any strings are blank, then replace them with the empty string and # recur. if any(i.isspace() for i in values if isinstance(i, str)): _replace_blank_strings(ctx, index) _clean_column(ctx, col) return # If all of the strings are the empty string, then replace the empty # strings with None and recur. if num_strings > 0 and all(i == '' for i in values if isinstance(i, str)): _replace_empty_strings(ctx, index, None) _clean_column(ctx, col) return # If we have all primitive values, but some are None, then replace None # values with the appropriate zero value. Add a boolean column that records # which values were missing. for typ, num in counts.items(): if num + num_none == num_values: ctx.receiver.warn( f'Column {repr(col.name)} has {num_none} missing' f' value{'' if num_none == 1 else 's'}.' ) _flag_missing_values(ctx, index, typ(0)) return # Now we know we have some strings, some numbers, and maybe some Nones. # We also know that the nonempty strings cannot all be coerced into # numbers (but maybe some can be coerced). # First, coerce as many of the strings as we can into numbers. Then split # this column into two columns: one for numbers and another for strings. _coerce_strings_to_numbers(ctx, index) _bipartition_strings(ctx, index) @preprocessor def _coerce_column(ctx, index, to_type): """Converts all the items in a column to a specific data type.""" col = ctx.matrix.columns[index] col.coerce(to_type) _string_to_number_regex = re.compile(r''' ^\s* # leading spaces \$* # any number of dollar sign characters ( [\-\+]? # optional sign ( \.[0-9_]+ # decimal part without integral part | # OR [0-9,_]+ # integral part (\.[0-9_]*)? # and optional decimal part ) ) \%* # any number of percent characters \s*$ # trailing spaces ''', re.VERBOSE) def _can_coerce_all_strings_to_numbers(values): """Tests if each string in the collection can be coerced to a number. >>> _can_coerce_all_strings_to_numbers(['1', '2', '3']) True >>> _can_coerce_all_strings_to_numbers(['1.0', '$2', '3%', '.4', '5.', '-6', '+7']) True >>> _can_coerce_all_strings_to_numbers(['0,000', '1_234_567', '$-8.%', '_9']) True >>> _can_coerce_all_strings_to_numbers([None, 1, 2, '3']) True >>> _can_coerce_all_strings_to_numbers([None, 1, 2, '3', 'foo']) False >>> _can_coerce_all_strings_to_numbers([]) True >>> _can_coerce_all_strings_to_numbers([None, (), object()]) True >>> _can_coerce_all_strings_to_numbers(['1.2.3']) False >>> _can_coerce_all_strings_to_numbers(['1.2,']) False >>> _can_coerce_all_strings_to_numbers(['1+2']) False """ return all(_string_to_number_regex.match(i) for i in values if isinstance(i, str)) @preprocessor def _coerce_strings_to_numbers(ctx, index): """ Converts textual data to numberical data whenever possible. For example, this function converts text like "$5" to the number 5. """ col = ctx.matrix.columns[index] new_values = [_coerce_string_to_number(i) if isinstance(i, str) else i for i in col.values] col.values = create_array(new_values) def _coerce_string_to_number(obj): assert isinstance(obj, str) # Remove leading and trailing whitespace. obj = obj.strip() # Replace the empty string with None. if obj == '': return None try: return parse_number(obj) except Exception: pass # Pull out the number part and try parsing it. try: m = _string_to_number_regex.match(obj) return parse_number(m.group(1)) if m else obj except Exception: # If we still can't parse it, then just admit defeat. return obj @preprocessor def _replace_none_values(ctx, index, replacement): """Treats missing data as blank text fields.""" col = ctx.matrix.columns[index] new_values = [replacement if i is None else i for i in col.values] col.values = create_array(new_values) @preprocessor def _coerce_ints_to_floats(ctx, index): """Turns integer values into decimals, to make columns more uniform.""" col = ctx.matrix.columns[index] new_values = [float(i) if isinstance(i, int_type) else i for i in col.values] col.values = create_array(new_values) @preprocessor def _replace_blank_strings(ctx, index): """Treats text that only contains space characters as blank fields.""" col = ctx.matrix.columns[index] new_values = [ '' if isinstance(i, str) and i.isspace() else i for i in col.values ] col.values = create_array(new_values) @preprocessor def _replace_empty_strings(ctx, index, replacement): """Treats blank text fields as missing data.""" col = ctx.matrix.columns[index] new_values = [ replacement if isinstance(i, str) and i == '' else i for i in col.values ] col.values = create_array(new_values) @preprocessor def _flag_missing_values(ctx, index, replacement): """ Takes a column that contains missing data, and adds a new column of true/false values to indicate which rows contain missing data. """ col = ctx.matrix.columns[index] new_values = [replacement if i is None else i for i in col.values] ctx.matrix.drop_columns_by_index([index]) ctx.matrix.columns.append(col.copy_with(create_array(new_values))) ctx.matrix.append_column( values=col.values != None, formula=['is-defined', col], role='encoded', is_original=True, ) @preprocessor def _bipartition_strings(ctx, index): """ Takes a column that contains both text and numbers and turns it into two columns: one that contains only the text values, and another that contains only the numbers. """ # For None values, just put a 0 in the number-column and an empty string # in the string-column. col = ctx.matrix.columns[index] ctx.matrix.drop_columns_by_index([index]) is_num = lambda x: x is not None and not isinstance(x, str) numbers = [i if is_num(i) else 0 for i in col.values] strings = [i if isinstance(i, str) else '' for i in col.values] ctx.matrix.append_column( values=create_array(numbers), formula=['number', col], role=None, is_original=True, ) ctx.matrix.append_column( values=create_array(strings), formula=['string', col], role=None, is_original=True, )
import re import numpy as np from .exceptions import expected, typename from .formats import parse_number from .matrix import create_array from .preprocessors import planner, preprocessor, _drop_weak_columns int_type = (int, np.int64) def clean_numeric_labels(name, values, receiver): if values.dtype in (float, int, bool): return values NumType = (float, int, np.int64, np.float64) if all(isinstance(i, NumType) for i in values): return values count = sum(1 for i in values if not isinstance(i, NumType)) suffix = '' if count == 1 else 's' receiver.warn( f'Converting {count} value{suffix} in the "{name}" column' f' to number{suffix}.' ) def conv(x): if isinstance(x, NumType): return x if isinstance(x, str): try: return parse_number(x) except Exception: pass return 0.0 return np.array([conv(x) for x in values]) @planner def clean_dataset(ctx): """Cleans the context's dataset, and records the steps for later playback. This function peforms a handful of steps: - Drops columns that contain unexpected values. A value is unexpected if is is not a boolean, a number, a string, or None. - Drops columns that only contain None values. - Whenever possible, coerces columns to have numeric types. - In columns of strings, replaces None values with the empty string. - In a column of numbers, if any numbers are missing, replaces the missing number with zero, and adds a second column of booleans to indicate which values were missing. - Replaces a column of mixed strings and numbers with two columns: one for the string values and another for the number values. After running these steps, each column in the dataset should have a uniform type for all the values in that column. For example, if a column has one boolean value, then all the values in the column will be booleans. Parameters: ctx (RecordingContext): The current context. """ for col in ctx.matrix.columns: _clean_column(ctx, col) def _clean_column(ctx, col): # If numpy inferred a real dtype for this column, then it's clean enough. # TODO: Consider bipartitioning columns with nan values. if col.dtype != object: return index = ctx.matrix.columns.index(col) values = col.values num_values = len(values) num_bools = sum(1 for i in values if isinstance(i, bool)) num_floats = sum(1 for i in values if isinstance(i, float)) num_ints = sum(1 for i in values if isinstance(i, int_type) and not isinstance(i, bool)) num_none = sum(1 for i in values if i is None) num_strings = sum(1 for i in values if isinstance(i, str)) computed_total = num_bools + num_floats + num_ints + num_none + num_strings if computed_total != num_values: try: found = {typename(i) for i in values if i is not None and not isinstance(i, (bool, int, float, str, np.int64))} except Exception: found = 'unexpected values' ctx.receiver.warn(f'Dropping column "{col.name}". A column must only' f' contain booleans, numbers, and strings. Received: {found}.') _drop_weak_columns(ctx, [index]) return # Record the number of values for each type. counts = {bool: num_bools, float: num_floats, int: num_ints} # If we somehow got an array of primitives with dtype == object, then just # coerce it to the appropriate type. for typ, num in counts.items(): if num == num_values: _coerce_column(ctx, index, typ) return # If we have all None values, then drop this column. if num_none == num_values: ctx.receiver.warn(f'Dropping column of all None values: {col.name}') _drop_weak_columns(ctx, [index]) return # If we have some strings, see if we can convert them all to numbers. if num_strings > 0 and _can_coerce_all_strings_to_numbers(values): _coerce_strings_to_numbers(ctx, index) _clean_column(ctx, col) return # If we have all strings, then we're clean. Just leave this column alone. if num_strings == num_values: assert all(isinstance(i, str) for i in values) return # If we have all strings, but some are None, then use the empty string. # (And don't warn the user about the None values in this case.) if num_strings + num_none == num_values: _replace_none_values(ctx, index, '') return # If we have some ints and some floats, then coerce the ints to floats # and recur. if num_ints > 0 and num_floats > 0: _coerce_ints_to_floats(ctx, index) _clean_column(ctx, col) return # If any strings are blank, then replace them with the empty string and # recur. if any(i.isspace() for i in values if isinstance(i, str)): _replace_blank_strings(ctx, index) _clean_column(ctx, col) return # If all of the strings are the empty string, then replace the empty # strings with None and recur. if num_strings > 0 and all(i == '' for i in values if isinstance(i, str)): _replace_empty_strings(ctx, index, None) _clean_column(ctx, col) return # If we have all primitive values, but some are None, then replace None # values with the appropriate zero value. Add a boolean column that records # which values were missing. for typ, num in counts.items(): if num + num_none == num_values: ctx.receiver.warn( f'Column {repr(col.name)} has {num_none} missing' f' value{"" if num_none == 1 else "s"}.' ) _flag_missing_values(ctx, index, typ(0)) return # Now we know we have some strings, some numbers, and maybe some Nones. # We also know that the nonempty strings cannot all be coerced into # numbers (but maybe some can be coerced). # First, coerce as many of the strings as we can into numbers. Then split # this column into two columns: one for numbers and another for strings. _coerce_strings_to_numbers(ctx, index) _bipartition_strings(ctx, index) @preprocessor def _coerce_column(ctx, index, to_type): """Converts all the items in a column to a specific data type.""" col = ctx.matrix.columns[index] col.coerce(to_type) _string_to_number_regex = re.compile(r''' ^\s* # leading spaces \$* # any number of dollar sign characters ( [\-\+]? # optional sign ( \.[0-9_]+ # decimal part without integral part | # OR [0-9,_]+ # integral part (\.[0-9_]*)? # and optional decimal part ) ) \%* # any number of percent characters \s*$ # trailing spaces ''', re.VERBOSE) def _can_coerce_all_strings_to_numbers(values): """Tests if each string in the collection can be coerced to a number. >>> _can_coerce_all_strings_to_numbers(['1', '2', '3']) True >>> _can_coerce_all_strings_to_numbers(['1.0', '$2', '3%', '.4', '5.', '-6', '+7']) True >>> _can_coerce_all_strings_to_numbers(['0,000', '1_234_567', '$-8.%', '_9']) True >>> _can_coerce_all_strings_to_numbers([None, 1, 2, '3']) True >>> _can_coerce_all_strings_to_numbers([None, 1, 2, '3', 'foo']) False >>> _can_coerce_all_strings_to_numbers([]) True >>> _can_coerce_all_strings_to_numbers([None, (), object()]) True >>> _can_coerce_all_strings_to_numbers(['1.2.3']) False >>> _can_coerce_all_strings_to_numbers(['1.2,']) False >>> _can_coerce_all_strings_to_numbers(['1+2']) False """ return all(_string_to_number_regex.match(i) for i in values if isinstance(i, str)) @preprocessor def _coerce_strings_to_numbers(ctx, index): """ Converts textual data to numberical data whenever possible. For example, this function converts text like "$5" to the number 5. """ col = ctx.matrix.columns[index] new_values = [_coerce_string_to_number(i) if isinstance(i, str) else i for i in col.values] col.values = create_array(new_values) def _coerce_string_to_number(obj): assert isinstance(obj, str) # Remove leading and trailing whitespace. obj = obj.strip() # Replace the empty string with None. if obj == '': return None try: return parse_number(obj) except Exception: pass # Pull out the number part and try parsing it. try: m = _string_to_number_regex.match(obj) return parse_number(m.group(1)) if m else obj except Exception: # If we still can't parse it, then just admit defeat. return obj @preprocessor def _replace_none_values(ctx, index, replacement): """Treats missing data as blank text fields.""" col = ctx.matrix.columns[index] new_values = [replacement if i is None else i for i in col.values] col.values = create_array(new_values) @preprocessor def _coerce_ints_to_floats(ctx, index): """Turns integer values into decimals, to make columns more uniform.""" col = ctx.matrix.columns[index] new_values = [float(i) if isinstance(i, int_type) else i for i in col.values] col.values = create_array(new_values) @preprocessor def _replace_blank_strings(ctx, index): """Treats text that only contains space characters as blank fields.""" col = ctx.matrix.columns[index] new_values = [ '' if isinstance(i, str) and i.isspace() else i for i in col.values ] col.values = create_array(new_values) @preprocessor def _replace_empty_strings(ctx, index, replacement): """Treats blank text fields as missing data.""" col = ctx.matrix.columns[index] new_values = [ replacement if isinstance(i, str) and i == '' else i for i in col.values ] col.values = create_array(new_values) @preprocessor def _flag_missing_values(ctx, index, replacement): """ Takes a column that contains missing data, and adds a new column of true/false values to indicate which rows contain missing data. """ col = ctx.matrix.columns[index] new_values = [replacement if i is None else i for i in col.values] ctx.matrix.drop_columns_by_index([index]) ctx.matrix.columns.append(col.copy_with(create_array(new_values))) ctx.matrix.append_column( values=col.values != None, formula=['is-defined', col], role='encoded', is_original=True, ) @preprocessor def _bipartition_strings(ctx, index): """ Takes a column that contains both text and numbers and turns it into two columns: one that contains only the text values, and another that contains only the numbers. """ # For None values, just put a 0 in the number-column and an empty string # in the string-column. col = ctx.matrix.columns[index] ctx.matrix.drop_columns_by_index([index]) is_num = lambda x: x is not None and not isinstance(x, str) numbers = [i if is_num(i) else 0 for i in col.values] strings = [i if isinstance(i, str) else '' for i in col.values] ctx.matrix.append_column( values=create_array(numbers), formula=['number', col], role=None, is_original=True, ) ctx.matrix.append_column( values=create_array(strings), formula=['string', col], role=None, is_original=True, )
from abc import abstractmethod, ABC from copy import copy from datetime import datetime from pathlib import Path from shutil import copyfileobj from typing import TYPE_CHECKING, Optional from . import Stream if TYPE_CHECKING: from . import File class Storage(ABC): @abstractmethod def get_stream(self, file: "File") -> Stream: """ :raises OSError """ @abstractmethod def store(self, file: "File") -> None: """ :raises OSError """ @abstractmethod def mark_as_deleted(self, file: "File", missing_ok=False) -> None: """ :raises OSError """ @abstractmethod def delete_marked(self, file: "File", missing_ok=False) -> None: """ :raises OSError """ @abstractmethod def delete(self, file: "File", missing_ok=False) -> None: """ :raises OSError """ @abstractmethod def unmark_delete(self, file: "File", missing_ok=True) -> None: """ :raises OSError """ @abstractmethod def rename(self, file: "File", path: str) -> "File": """ :raises OSError """ class FileStorage(Storage): _base_path: Path _tmp_deleted_path: Path _deleted_path: Optional[Path] _seek_to_start_before_store: bool _rename_instead_delete: bool _clean_empty_sub_directory: bool _base_directories: set[Path] def __init__( self, base_path: Path, seek_to_start_before_store=True, rename_instead_delete: bool = False, clean_empty_sub_directory: bool = True ): self._base_path = self.create_directory(base_path) self._tmp_deleted_path = self.create_directory(base_path / "tmp_deleted") self._base_directories = {self._base_path, self._tmp_deleted_path} if rename_instead_delete: self._deleted_path = self.create_directory(base_path / "deleted") self._base_directories.add(self._deleted_path) self._seek_to_start_before_store = seek_to_start_before_store self._rename_instead_delete = rename_instead_delete self._clean_empty_sub_directory = clean_empty_sub_directory def create_directory(self, target: Path) -> Path: target.mkdir(parents=True, exist_ok=True) return target def get_stream(self, file: "File") -> Stream: mode = 'rb' if file.is_binary else 'r' return (self._base_path / file.get_name()).open(mode) def store(self, file: "File") -> None: mode = 'wb' if file.is_binary else 'w' fn = self._base_path / file.get_name() self.create_directory(fn.parent) if fn.exists(): raise FileExistsError(fn) if self._seek_to_start_before_store and file.get_stream().seekable(): file.get_stream().seek(0) with fn.open(mode) as fp: copyfileobj(file.get_stream(), fp) def mark_as_deleted(self, file: "File", missing_ok=False) -> None: try: deleted_path = self._tmp_deleted_path / file.get_name() self.create_directory(deleted_path.parent) (self._base_path / file.get_name()).rename(deleted_path) except FileNotFoundError: if not missing_ok: raise def delete_marked(self, file: "File", missing_ok=False) -> None: self._delete(self._tmp_deleted_path / file.get_name(), file, missing_ok) self._clean_directory_if_empty((self._base_path / file.get_name()).parent) def delete(self, file: "File", missing_ok=False) -> None: self._delete(self._base_path / file.get_name(), file, missing_ok) def _delete(self, fn: Path, file: "File", missing_ok=False) -> None: try: if self._rename_instead_delete: deleted_path = self._deleted_path / file.get_name() deleted_path.with_stem(f"deleted_at_{datetime.now().isoformat(timespec="seconds")}_{deleted_path.stem}") self.create_directory(deleted_path.parent) fn.rename(self._deleted_path / deleted_path) else: fn.unlink() self._clean_directory_if_empty(fn.parent) except FileNotFoundError: if not missing_ok: raise def unmark_delete(self, file: "File", missing_ok=True) -> None: try: fn = self._base_path / file.get_name() self.create_directory(fn.parent) tmp_deleted_path = self._tmp_deleted_path / file.get_name() tmp_deleted_path.rename(fn) self._clean_directory_if_empty(tmp_deleted_path.parent) except FileNotFoundError: if not missing_ok: raise def rename(self, file: "File", path: str) -> "File": origin_path = self._base_path / file.get_name() new_path = self._base_path / path self.create_directory(new_path.parent) origin_path.rename(new_path) self._clean_directory_if_empty(origin_path.parent) new_file = copy(file) new_file.rename(path) file.get_model().file = new_file return new_file def _clean_directory_if_empty(self, directory: Path) -> None: """ Delete directory, if it is empty and not base directories, recursively. """ if ( directory.is_dir() and self._clean_empty_sub_directory and directory not in self._base_directories ): for _ in directory.iterdir(): return # directory is not empty else: parent_dir = directory.parent try: directory.rmdir() except FileNotFoundError: pass # ignore self._clean_directory_if_empty(parent_dir)
from abc import abstractmethod, ABC from copy import copy from datetime import datetime from pathlib import Path from shutil import copyfileobj from typing import TYPE_CHECKING, Optional from . import Stream if TYPE_CHECKING: from . import File class Storage(ABC): @abstractmethod def get_stream(self, file: "File") -> Stream: """ :raises OSError """ @abstractmethod def store(self, file: "File") -> None: """ :raises OSError """ @abstractmethod def mark_as_deleted(self, file: "File", missing_ok=False) -> None: """ :raises OSError """ @abstractmethod def delete_marked(self, file: "File", missing_ok=False) -> None: """ :raises OSError """ @abstractmethod def delete(self, file: "File", missing_ok=False) -> None: """ :raises OSError """ @abstractmethod def unmark_delete(self, file: "File", missing_ok=True) -> None: """ :raises OSError """ @abstractmethod def rename(self, file: "File", path: str) -> "File": """ :raises OSError """ class FileStorage(Storage): _base_path: Path _tmp_deleted_path: Path _deleted_path: Optional[Path] _seek_to_start_before_store: bool _rename_instead_delete: bool _clean_empty_sub_directory: bool _base_directories: set[Path] def __init__( self, base_path: Path, seek_to_start_before_store=True, rename_instead_delete: bool = False, clean_empty_sub_directory: bool = True ): self._base_path = self.create_directory(base_path) self._tmp_deleted_path = self.create_directory(base_path / "tmp_deleted") self._base_directories = {self._base_path, self._tmp_deleted_path} if rename_instead_delete: self._deleted_path = self.create_directory(base_path / "deleted") self._base_directories.add(self._deleted_path) self._seek_to_start_before_store = seek_to_start_before_store self._rename_instead_delete = rename_instead_delete self._clean_empty_sub_directory = clean_empty_sub_directory def create_directory(self, target: Path) -> Path: target.mkdir(parents=True, exist_ok=True) return target def get_stream(self, file: "File") -> Stream: mode = 'rb' if file.is_binary else 'r' return (self._base_path / file.get_name()).open(mode) def store(self, file: "File") -> None: mode = 'wb' if file.is_binary else 'w' fn = self._base_path / file.get_name() self.create_directory(fn.parent) if fn.exists(): raise FileExistsError(fn) if self._seek_to_start_before_store and file.get_stream().seekable(): file.get_stream().seek(0) with fn.open(mode) as fp: copyfileobj(file.get_stream(), fp) def mark_as_deleted(self, file: "File", missing_ok=False) -> None: try: deleted_path = self._tmp_deleted_path / file.get_name() self.create_directory(deleted_path.parent) (self._base_path / file.get_name()).rename(deleted_path) except FileNotFoundError: if not missing_ok: raise def delete_marked(self, file: "File", missing_ok=False) -> None: self._delete(self._tmp_deleted_path / file.get_name(), file, missing_ok) self._clean_directory_if_empty((self._base_path / file.get_name()).parent) def delete(self, file: "File", missing_ok=False) -> None: self._delete(self._base_path / file.get_name(), file, missing_ok) def _delete(self, fn: Path, file: "File", missing_ok=False) -> None: try: if self._rename_instead_delete: deleted_path = self._deleted_path / file.get_name() deleted_path.with_stem(f"deleted_at_{datetime.now().isoformat(timespec='seconds')}_{deleted_path.stem}") self.create_directory(deleted_path.parent) fn.rename(self._deleted_path / deleted_path) else: fn.unlink() self._clean_directory_if_empty(fn.parent) except FileNotFoundError: if not missing_ok: raise def unmark_delete(self, file: "File", missing_ok=True) -> None: try: fn = self._base_path / file.get_name() self.create_directory(fn.parent) tmp_deleted_path = self._tmp_deleted_path / file.get_name() tmp_deleted_path.rename(fn) self._clean_directory_if_empty(tmp_deleted_path.parent) except FileNotFoundError: if not missing_ok: raise def rename(self, file: "File", path: str) -> "File": origin_path = self._base_path / file.get_name() new_path = self._base_path / path self.create_directory(new_path.parent) origin_path.rename(new_path) self._clean_directory_if_empty(origin_path.parent) new_file = copy(file) new_file.rename(path) file.get_model().file = new_file return new_file def _clean_directory_if_empty(self, directory: Path) -> None: """ Delete directory, if it is empty and not base directories, recursively. """ if ( directory.is_dir() and self._clean_empty_sub_directory and directory not in self._base_directories ): for _ in directory.iterdir(): return # directory is not empty else: parent_dir = directory.parent try: directory.rmdir() except FileNotFoundError: pass # ignore self._clean_directory_if_empty(parent_dir)
from abc import ABC import pygame import numpy as np from tkinter import Tk from tkinter import messagebox from typing import Tuple, Optional, Dict from .maze import Maze from .base import Viewport, BoundType from ...enums import Colors from ...database import Database from ...sprites.object_to_color import ObjectToColor class MazeEditable(Maze, ABC): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.buttons_coordinates_matrix = np.empty((len(self.objects), 4)) self.manipulation_buttons: Dict[str, pygame.Rect] = {} def draw(self): self.draw_grid() self.draw_toolbox() toolbox_vp = self.viewports['toolbox'] toolbox_width = toolbox_vp.end_x - toolbox_vp.origin_x self.adjust_screen(self.draw, right_margin=toolbox_width) pygame.display.update() def get_toolbox_buttons_size(self) -> Tuple[int, int]: button_width = 100 button_height = button_width // 2 return button_width, button_height def get_toolbox_margins(self) -> Tuple[int, int, int, int]: left_margin = 50 top_margin = 0 right_margin = 50 bottom_margin = 0 return left_margin, top_margin, right_margin, bottom_margin def draw_toolbox(self) -> None: """ Creates the toolbar. """ button_width, button_height = self.get_toolbox_buttons_size() left_margin, top_margin, right_margin, bottom_margin = self.get_toolbox_margins() total_side_length = left_margin + button_width + right_margin # Reset buttons coordinates matrix self.buttons_coordinates_matrix = np.empty((len(self.objects), 4)) # Get toolbox viewport coordinates. # We want to stick this viewport to the right side of the grid. grid_viewport = self.viewports['grid'] # Define this viewport viewport_name = 'toolbox' viewport_origin_x = grid_viewport.end_x viewport_origin_y = grid_viewport.origin_y viewport_end_x = grid_viewport.end_x + total_side_length viewport_end_y = grid_viewport.end_y # Update viewport toolbox_viewport = Viewport( viewport_name, (viewport_origin_x, viewport_origin_y, viewport_end_x, viewport_end_y) ) self.viewports[viewport_name] = toolbox_viewport x = viewport_origin_x + left_margin # Add labels label = pygame.font.SysFont('calibri', 20) menu_label = label.render("ToolBox", True, Colors.BLACK) self.screen.blit(menu_label, (x, 5)) for i, item in enumerate(self.objects): # Compute position on the vertical axis y = viewport_origin_y + (i + 1) * button_height # Display label for this item title = label.render(item.name, True, Colors.WHITE) # Draw the rectangle. We'll keep it in order to know when the user clicks on it. rectangle = pygame.Rect(x, y, button_width, title.get_height()) color = ObjectToColor[item.name].value pygame.draw.rect(self.screen, color, rectangle) self.screen.blit(title, (x, y)) # Get coordinates origin_x = x origin_y = y end_x = origin_x + button_width end_y = origin_y + button_height # And update the matrix self.buttons_coordinates_matrix[i] = np.array([origin_x, origin_y, end_x, end_y]) # Starting from the window lower edge, we'll display the labels up `n` pixels n = 100 manipulation_labels_y = viewport_end_y - n save_label = label.render("Save", True, Colors.BLACK) save_label_x = x # Display label save_button = self.screen.blit(save_label, (save_label_x, manipulation_labels_y)) # And add the button to the manipulation buttons list. # We'll use the method `collidepoint` on it. self.manipulation_buttons.update({'save': save_button}) cancel_label = label.render("Cancel", True, Colors.BLACK) cancel_label_x = viewport_end_x - cancel_label.get_width() - right_margin cancel_button = self.screen.blit(cancel_label, (cancel_label_x, manipulation_labels_y)) self.manipulation_buttons.update({'cancel': cancel_button}) # We'll also display two dates: # 1. The level creation date # 2. The last time it was modified creation_intro_label = label.render(f"Created on", True, Colors.BLACK) creation_label = label.render(f'{self.level.creation_date}', True, Colors.BLACK) creation_intro_label_y = viewport_end_y - n - 100 creation_label_y = creation_intro_label_y + creation_intro_label.get_height() last_modified_intro_label = label.render("Last modified on", True, Colors.BLACK) last_modified_label = label.render(f"{self.level.last_modification_date}", True, Colors.BLACK) last_modified_intro_label_y = creation_label_y + creation_label.get_height() last_modified_label_y = last_modified_intro_label_y + last_modified_intro_label.get_height() self.screen.blit(creation_intro_label, (x, creation_intro_label_y)) self.screen.blit(creation_label, (x, creation_label_y)) self.screen.blit(last_modified_intro_label, (x, last_modified_intro_label_y)) self.screen.blit(last_modified_label, (x, last_modified_label_y)) def get_button_bounds(self, x: int, y: int, z: Tuple[int, int]) -> BoundType: """ Given `x` and `y` (coordinates), return a 4-tuple of integers delimiting the bounds of a toolbox button. """ button_width, button_height = self.get_toolbox_buttons_size() left_margin, top_margin, _, _ = self.get_toolbox_margins() toolbox_viewport = self.viewports['toolbox'] x_offset = toolbox_viewport.origin_x z_x, z_y = z remainder_x = x % x_offset % z_x remainder_y = y % z_y origin_x = x - remainder_x + left_margin origin_y = y - remainder_y + top_margin end_x = origin_x + button_width end_y = origin_y + button_height return origin_x, origin_y, end_x, end_y def get_clicked_button_index(self, x: int, y: int) -> Optional[int]: """ Iterates through the buttons matrix and returns the one the user clicked on, based on the coordinates of his input. :param int x: :param int y: :return int: The index of the button clicked on. """ b = self.get_toolbox_buttons_size() searching_for = self.get_button_bounds(x, y, z=b) cell_index = np.where((self.buttons_coordinates_matrix == searching_for).all(axis=1)) try: index = int(cell_index[0]) return index except (TypeError, IndexError): return def get_clicked_cell_index(self, x: int, y: int) -> Tuple[int, int]: """ Iterates through the cells matrix and returns the coordinates of the one the user clicked on, based on the coordinates of his input. """ raise NotImplementedError() def save(self) -> None: anomalies = self.level.get_objects_occurrences_anomalies(self.objects) if len(anomalies) == 0: db = Database() db.update_level_content(self.level) self._running = False else: # Add popup to signal the issue Tk().wm_withdraw() formatted_anomalies = '\n'.join( [ f"{v["min"]} < {object_name} ({v["current"]}) < {v["max"]}" for object_name, v in anomalies.items() ] ) messagebox.showwarning('Anomalies', f'Got errors: \n{formatted_anomalies}') def cancel(self) -> None: self._running = False def run(self) -> None: """ Main loop. """ self.draw() # Select the first item of the list as default (should be object `Empty`) selected_object = self.objects[0] while self._running: for event in pygame.event.get(): if event.type == pygame.QUIT: self._running = False break if event.type == pygame.VIDEORESIZE: self.resize(event.w, event.h, self.draw) mouse_x, mouse_y = pygame.mouse.get_pos() selected_viewport = self.get_selected_viewport(mouse_x, mouse_y) if selected_viewport.name == 'toolbox': # If in the toolbox area. if event.type == pygame.MOUSEBUTTONDOWN: for label, button in self.manipulation_buttons.items(): if button.collidepoint(mouse_x, mouse_y): if label == 'save': self.save() elif label == 'cancel': self.cancel() idx = self.get_clicked_button_index(mouse_x, mouse_y) if isinstance(idx, int): selected_object = self.objects[idx] if selected_viewport.name == 'grid': # If in the maze - grid - area. if event.type == pygame.MOUSEBUTTONDOWN: x, y = self.get_clicked_cell_index(mouse_x, mouse_y) # Set the cell's object in the level content if within limits. self.level.set_cell_object(x, y, selected_object) self.draw() pygame.display.update()
from abc import ABC import pygame import numpy as np from tkinter import Tk from tkinter import messagebox from typing import Tuple, Optional, Dict from .maze import Maze from .base import Viewport, BoundType from ...enums import Colors from ...database import Database from ...sprites.object_to_color import ObjectToColor class MazeEditable(Maze, ABC): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.buttons_coordinates_matrix = np.empty((len(self.objects), 4)) self.manipulation_buttons: Dict[str, pygame.Rect] = {} def draw(self): self.draw_grid() self.draw_toolbox() toolbox_vp = self.viewports['toolbox'] toolbox_width = toolbox_vp.end_x - toolbox_vp.origin_x self.adjust_screen(self.draw, right_margin=toolbox_width) pygame.display.update() def get_toolbox_buttons_size(self) -> Tuple[int, int]: button_width = 100 button_height = button_width // 2 return button_width, button_height def get_toolbox_margins(self) -> Tuple[int, int, int, int]: left_margin = 50 top_margin = 0 right_margin = 50 bottom_margin = 0 return left_margin, top_margin, right_margin, bottom_margin def draw_toolbox(self) -> None: """ Creates the toolbar. """ button_width, button_height = self.get_toolbox_buttons_size() left_margin, top_margin, right_margin, bottom_margin = self.get_toolbox_margins() total_side_length = left_margin + button_width + right_margin # Reset buttons coordinates matrix self.buttons_coordinates_matrix = np.empty((len(self.objects), 4)) # Get toolbox viewport coordinates. # We want to stick this viewport to the right side of the grid. grid_viewport = self.viewports['grid'] # Define this viewport viewport_name = 'toolbox' viewport_origin_x = grid_viewport.end_x viewport_origin_y = grid_viewport.origin_y viewport_end_x = grid_viewport.end_x + total_side_length viewport_end_y = grid_viewport.end_y # Update viewport toolbox_viewport = Viewport( viewport_name, (viewport_origin_x, viewport_origin_y, viewport_end_x, viewport_end_y) ) self.viewports[viewport_name] = toolbox_viewport x = viewport_origin_x + left_margin # Add labels label = pygame.font.SysFont('calibri', 20) menu_label = label.render("ToolBox", True, Colors.BLACK) self.screen.blit(menu_label, (x, 5)) for i, item in enumerate(self.objects): # Compute position on the vertical axis y = viewport_origin_y + (i + 1) * button_height # Display label for this item title = label.render(item.name, True, Colors.WHITE) # Draw the rectangle. We'll keep it in order to know when the user clicks on it. rectangle = pygame.Rect(x, y, button_width, title.get_height()) color = ObjectToColor[item.name].value pygame.draw.rect(self.screen, color, rectangle) self.screen.blit(title, (x, y)) # Get coordinates origin_x = x origin_y = y end_x = origin_x + button_width end_y = origin_y + button_height # And update the matrix self.buttons_coordinates_matrix[i] = np.array([origin_x, origin_y, end_x, end_y]) # Starting from the window lower edge, we'll display the labels up `n` pixels n = 100 manipulation_labels_y = viewport_end_y - n save_label = label.render("Save", True, Colors.BLACK) save_label_x = x # Display label save_button = self.screen.blit(save_label, (save_label_x, manipulation_labels_y)) # And add the button to the manipulation buttons list. # We'll use the method `collidepoint` on it. self.manipulation_buttons.update({'save': save_button}) cancel_label = label.render("Cancel", True, Colors.BLACK) cancel_label_x = viewport_end_x - cancel_label.get_width() - right_margin cancel_button = self.screen.blit(cancel_label, (cancel_label_x, manipulation_labels_y)) self.manipulation_buttons.update({'cancel': cancel_button}) # We'll also display two dates: # 1. The level creation date # 2. The last time it was modified creation_intro_label = label.render(f"Created on", True, Colors.BLACK) creation_label = label.render(f'{self.level.creation_date}', True, Colors.BLACK) creation_intro_label_y = viewport_end_y - n - 100 creation_label_y = creation_intro_label_y + creation_intro_label.get_height() last_modified_intro_label = label.render("Last modified on", True, Colors.BLACK) last_modified_label = label.render(f"{self.level.last_modification_date}", True, Colors.BLACK) last_modified_intro_label_y = creation_label_y + creation_label.get_height() last_modified_label_y = last_modified_intro_label_y + last_modified_intro_label.get_height() self.screen.blit(creation_intro_label, (x, creation_intro_label_y)) self.screen.blit(creation_label, (x, creation_label_y)) self.screen.blit(last_modified_intro_label, (x, last_modified_intro_label_y)) self.screen.blit(last_modified_label, (x, last_modified_label_y)) def get_button_bounds(self, x: int, y: int, z: Tuple[int, int]) -> BoundType: """ Given `x` and `y` (coordinates), return a 4-tuple of integers delimiting the bounds of a toolbox button. """ button_width, button_height = self.get_toolbox_buttons_size() left_margin, top_margin, _, _ = self.get_toolbox_margins() toolbox_viewport = self.viewports['toolbox'] x_offset = toolbox_viewport.origin_x z_x, z_y = z remainder_x = x % x_offset % z_x remainder_y = y % z_y origin_x = x - remainder_x + left_margin origin_y = y - remainder_y + top_margin end_x = origin_x + button_width end_y = origin_y + button_height return origin_x, origin_y, end_x, end_y def get_clicked_button_index(self, x: int, y: int) -> Optional[int]: """ Iterates through the buttons matrix and returns the one the user clicked on, based on the coordinates of his input. :param int x: :param int y: :return int: The index of the button clicked on. """ b = self.get_toolbox_buttons_size() searching_for = self.get_button_bounds(x, y, z=b) cell_index = np.where((self.buttons_coordinates_matrix == searching_for).all(axis=1)) try: index = int(cell_index[0]) return index except (TypeError, IndexError): return def get_clicked_cell_index(self, x: int, y: int) -> Tuple[int, int]: """ Iterates through the cells matrix and returns the coordinates of the one the user clicked on, based on the coordinates of his input. """ raise NotImplementedError() def save(self) -> None: anomalies = self.level.get_objects_occurrences_anomalies(self.objects) if len(anomalies) == 0: db = Database() db.update_level_content(self.level) self._running = False else: # Add popup to signal the issue Tk().wm_withdraw() formatted_anomalies = '\n'.join( [ f"{v['min']} < {object_name} ({v['current']}) < {v['max']}" for object_name, v in anomalies.items() ] ) messagebox.showwarning('Anomalies', f'Got errors: \n{formatted_anomalies}') def cancel(self) -> None: self._running = False def run(self) -> None: """ Main loop. """ self.draw() # Select the first item of the list as default (should be object `Empty`) selected_object = self.objects[0] while self._running: for event in pygame.event.get(): if event.type == pygame.QUIT: self._running = False break if event.type == pygame.VIDEORESIZE: self.resize(event.w, event.h, self.draw) mouse_x, mouse_y = pygame.mouse.get_pos() selected_viewport = self.get_selected_viewport(mouse_x, mouse_y) if selected_viewport.name == 'toolbox': # If in the toolbox area. if event.type == pygame.MOUSEBUTTONDOWN: for label, button in self.manipulation_buttons.items(): if button.collidepoint(mouse_x, mouse_y): if label == 'save': self.save() elif label == 'cancel': self.cancel() idx = self.get_clicked_button_index(mouse_x, mouse_y) if isinstance(idx, int): selected_object = self.objects[idx] if selected_viewport.name == 'grid': # If in the maze - grid - area. if event.type == pygame.MOUSEBUTTONDOWN: x, y = self.get_clicked_cell_index(mouse_x, mouse_y) # Set the cell's object in the level content if within limits. self.level.set_cell_object(x, y, selected_object) self.draw() pygame.display.update()
""" Module contains all functions working on users page. Functions: users_page() edit_user(id) delete_user(id) check_session() """ import os import sys import urllib.parse from flask_login import login_user, login_required from flask import render_template, request, redirect, Blueprint, session sys.path.append(os.path.abspath(os.path.join('..'))) from models.users import User ADMIN = User.query.get(1).login BASE_URL = 'http://127.0.0.1:5000/' api_users = Blueprint('api_users', __name__) @api_users.route('/users', methods=['POST', 'GET']) @login_required def users_page(): """ Function working on departments page: 1) adding new users if method "POST" received and session is used by admin 2) showing the table of the users :return: the template of the departments page """ if session.get('user') and session.get('user')[0] == ADMIN: users = User.query.all() if request.method == 'POST': login = request.form.get('login') password = request.form.get('password') data = f'?login={session['user'][0]}&password={session['user'][1]}' \ f'&new_login={urllib.parse.quote(login)}&new_password={urllib.parse.quote(password)}&page=True' return redirect('/api/users/add' + data) return render_template('users_for_admin.html', users=users) user = User.query.filter_by(login=session.get('user')[0]).first() return render_template('users.html', user=user) @api_users.route('/users/<int:id>/edit', methods=['GET', 'POST']) @login_required def edit_user(id): """ Function editing information about specific users :param id: id of the specific user an admin wants to change information about :return: return template of the users page or redirects to users page """ if session.get('user') and session.get('user')[0] == ADMIN: users = User.query.all() if User.query.get(id): if request.method == 'POST': login = request.form.get('new_login') password = request.form.get('new_password') data = f'?login={session['user'][0]}&password={session['user'][1]}' \ f'&id={id}&new_login={urllib.parse.quote(login)}&new_password={urllib.parse.quote(password)}&page=True' return redirect('/api/users/edit' + data) return render_template('users_for_admin.html', id=id, users=users) return redirect('/users') @api_users.route('/users/<int:id>/del') @login_required def delete_user(id): """ Function deleting specific user by its id :param id: id of the specific user an admin wants to delete :return: redirects user to the users page """ if session.get('user') and session.get('user')[0] == ADMIN: data = f'?login={session['user'][0]}&password={session['user'][1]}' \ f'&id={id}&page=True' return redirect('/api/users/del' + data) @api_users.before_request def check_session(): """ Function logging in user to the page if session has been already created. Else redirects to the main page. :return: None or redirect """ if session.get('user'): users = User.query.filter_by(login=session.get('user')[0]).all() login_user(users[0]) session.permanent = False else: return redirect('/')
""" Module contains all functions working on users page. Functions: users_page() edit_user(id) delete_user(id) check_session() """ import os import sys import urllib.parse from flask_login import login_user, login_required from flask import render_template, request, redirect, Blueprint, session sys.path.append(os.path.abspath(os.path.join('..'))) from models.users import User ADMIN = User.query.get(1).login BASE_URL = 'http://127.0.0.1:5000/' api_users = Blueprint('api_users', __name__) @api_users.route('/users', methods=['POST', 'GET']) @login_required def users_page(): """ Function working on departments page: 1) adding new users if method "POST" received and session is used by admin 2) showing the table of the users :return: the template of the departments page """ if session.get('user') and session.get('user')[0] == ADMIN: users = User.query.all() if request.method == 'POST': login = request.form.get('login') password = request.form.get('password') data = f'?login={session["user"][0]}&password={session["user"][1]}' \ f'&new_login={urllib.parse.quote(login)}&new_password={urllib.parse.quote(password)}&page=True' return redirect('/api/users/add' + data) return render_template('users_for_admin.html', users=users) user = User.query.filter_by(login=session.get('user')[0]).first() return render_template('users.html', user=user) @api_users.route('/users/<int:id>/edit', methods=['GET', 'POST']) @login_required def edit_user(id): """ Function editing information about specific users :param id: id of the specific user an admin wants to change information about :return: return template of the users page or redirects to users page """ if session.get('user') and session.get('user')[0] == ADMIN: users = User.query.all() if User.query.get(id): if request.method == 'POST': login = request.form.get('new_login') password = request.form.get('new_password') data = f'?login={session["user"][0]}&password={session["user"][1]}' \ f'&id={id}&new_login={urllib.parse.quote(login)}&new_password={urllib.parse.quote(password)}&page=True' return redirect('/api/users/edit' + data) return render_template('users_for_admin.html', id=id, users=users) return redirect('/users') @api_users.route('/users/<int:id>/del') @login_required def delete_user(id): """ Function deleting specific user by its id :param id: id of the specific user an admin wants to delete :return: redirects user to the users page """ if session.get('user') and session.get('user')[0] == ADMIN: data = f'?login={session["user"][0]}&password={session["user"][1]}' \ f'&id={id}&page=True' return redirect('/api/users/del' + data) @api_users.before_request def check_session(): """ Function logging in user to the page if session has been already created. Else redirects to the main page. :return: None or redirect """ if session.get('user'): users = User.query.filter_by(login=session.get('user')[0]).all() login_user(users[0]) session.permanent = False else: return redirect('/')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = "Christian Heider Nielsen" __doc__ = r""" Created on 22/03/2020 """ import copy from collections import namedtuple from enum import Enum from pathlib import Path from typing import Any, List, Mapping, Sequence, Tuple, Union import torch import torch.utils.data import torchvision from numpy.core.multiarray import ndarray from pycocotools import mask from pycocotools.coco import COCO from torch.utils.data import Dataset from torchvision.transforms import Compose __all__ = [ "FilterAndRemapCocoCategories", "convert_coco_poly_to_mask", "ConvertCocoPolysToMask", "_coco_remove_images_without_annotations", "convert_to_coco_api", "get_coco_api_from_dataset", "CocoDetection", "get_coco_ins", "get_coco_kp", "CocoMask", "CocoPolyAnnotation", "CocoModeEnum", ] from draugr.torch_utilities import Split from draugr.torch_utilities.tensors.tensor_container import NamedTensorTuple class CocoModeEnum(Enum): instances = "instances" person_keypoints = "person_keypoints" CocoPolyAnnotation = namedtuple( "CocoPolyAno", ( "image_id", "bbox", "category_id", "area", "iscrowd", "id", "segmentation", "keypoints", "num_keypoints", ), ) CocoMask = namedtuple( "CocoMask", ("boxes", "labels", "masks", "image_id", "area", "iscrowd", "keypoints") ) class FilterAndRemapCocoCategories(object): """ """ def __init__(self, categories: List[str], remap: bool = True): self._categories = categories self._remap = remap def __call__(self, image, target: Mapping[str, Any]) -> Tuple: anno = target["annotations"] anno = [obj for obj in anno if obj["category_id"] in self._categories] if not self._remap: target["annotations"] = anno return image, target anno = copy.deepcopy(anno) for obj in anno: obj["category_id"] = self._categories.index(obj["category_id"]) target["annotations"] = anno return image, target def convert_coco_poly_to_mask( segmentations: Sequence, height: int, width: int ) -> NamedTensorTuple: """ :param segmentations: :type segmentations: :param height: :type height: :param width: :type width: :return: :rtype: """ masks = [] for polygons in segmentations: rles = mask.frPyObjects(polygons, height, width) mask = mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = torch.as_tensor(mask, dtype=torch.uint8) mask = mask.any(dim=2) masks.append(mask) if masks: masks = torch.stack(masks, dim=0) else: masks = torch.zeros((0, height, width), dtype=torch.uint8) return NamedTensorTuple(masks=masks) class ConvertCocoPolysToMask(object): def __call__(self, image: ndarray, target: Mapping[str, Any]) -> Tuple: w, h = image.size image_id = torch.tensor([target["image_id"]]) anno = [obj for obj in target["annotations"] if obj.iscrowd == 0] boxes = [obj.BoundingBox for obj in anno] # guard against no boxes via resizing boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2].clamp_(min=0, max=w) boxes[:, 1::2].clamp_(min=0, max=h) classes = torch.tensor([obj.category_id for obj in anno], dtype=torch.int64) masks = convert_coco_poly_to_mask([obj.segmentation for obj in anno], h, w) keypoints = None if anno and anno[0].Keypoints is not None: keypoints = [obj.Keypoints for obj in anno] keypoints = torch.as_tensor(keypoints, dtype=torch.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.view(num_keypoints, -1, 3) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) target = CocoMask( boxes=boxes[keep], labels=classes[keep], masks=masks[keep], image_id=image_id, area=torch.tensor([obj.area for obj in anno]), iscrowd=torch.tensor([obj.iscrowd for obj in anno]), keypoints=None, ) if keypoints is not None: target.keypoints = keypoints[keep] return image, target def _coco_remove_images_without_annotations( dataset: Dataset, category_list: Sequence[CocoPolyAnnotation] = None, min_keypoints_per_image: int = 10, ) -> Dataset: def _has_only_empty_bbox(anno: List[CocoPolyAnnotation]) -> bool: return all(any(o <= 1 for o in obj.bbox[2:]) for obj in anno) def _count_visible_keypoints(anno: List[CocoPolyAnnotation]) -> int: return sum(sum(1 for v in ann.keypoints[2::3] if v > 0) for ann in anno) def _has_valid_annotation(anno: List[CocoPolyAnnotation]) -> bool: # if it's empty, there is no annotation if len(anno) == 0: return False # if all boxes have close to zero area, there is no annotation if _has_only_empty_bbox(anno): return False # keypoints task have a slight different critera for considering # if an annotation is valid if anno[0].keypoints is None: return True # for keypoint detection tasks, only consider valid images those # containing at least min_keypoints_per_image if _count_visible_keypoints(anno) >= min_keypoints_per_image: return True return False assert isinstance(dataset, torchvision.datasets.CocoDetection) ids = [] for ds_idx, img_id in enumerate(dataset.ids): ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None) anno = dataset.coco.loadAnns(ann_ids) if category_list: anno = [obj for obj in anno if obj.category_id in category_list] if _has_valid_annotation(anno): ids.append(ds_idx) dataset = torch.utils.data.Subset(dataset, ids) return dataset def convert_to_coco_api(ds): """ :param ds: :type ds: :return: :rtype: """ coco_ds = COCO() ann_id = 0 dataset = {"images": [], "categories": [], "annotations": []} categories = set() for img_idx in range(len(ds)): # find better way to get target # targets = ds.get_annotations(img_idx) img, targets = ds[img_idx] image_id = targets["image_id"].item() dataset["images"].append( {"id": image_id, "height": img.shape[-2], "width": img.shape[-1]} ) bboxes = targets["boxes"] bboxes[:, 2:] -= bboxes[:, :2] bboxes = bboxes.tolist() labels = targets["labels"].tolist() areas = targets["area"].tolist() iscrowd = targets["iscrowd"].tolist() if "masks" in targets: masks = targets["masks"] # make masks Fortran contiguous for coco_mask masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1) if "keypoints" in targets: keypoints = targets["keypoints"] keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist() num_objs = len(bboxes) for i in range(num_objs): ann = CocoPolyAnnotation( image_id=image_id, bbox=bboxes[i], category_id=labels[i], area=areas[i], iscrowd=iscrowd[i], id=ann_id, segmentation=None, keypoints=None, num_keypoints=None, ) categories.add(labels[i]) if "masks" in targets: ann.segmentation = mask.encode(masks[i].numpy()) if "keypoints" in targets: ann.keypoints = keypoints[i] ann.num_keypoints = sum(k != 0 for k in keypoints[i][2::3]) dataset["annotations"].append(ann) ann_id += 1 dataset["categories"] = [{"id": i} for i in sorted(categories)] coco_ds.dataset = dataset coco_ds.createIndex() return coco_ds def get_coco_api_from_dataset( dataset: Union[torch.utils.data.Subset, torchvision.datasets.CocoDetection] ) -> COCO: """ :param dataset: :type dataset: :return: :rtype: """ for i in range(10): if isinstance(dataset, torchvision.datasets.CocoDetection): break if isinstance(dataset, torch.utils.data.Subset): dataset = dataset.dataset if isinstance(dataset, torchvision.datasets.CocoDetection): return dataset.coco return convert_to_coco_api(dataset) class CocoDetection(torchvision.datasets.CocoDetection): """ """ def __init__(self, img_folder, ann_file, transforms): super(CocoDetection, self).__init__(img_folder, ann_file) self._transforms = transforms def __getitem__(self, idx): img, target = super().__getitem__(idx) image_id = self.ids[idx] target = NamedTensorTuple(image_id=image_id, annotations=target) if self._transforms is not None: img, target = self._transforms(img, target) return img, target def get_coco_ins( root_path: Path, image_set: Split, transforms, mode: CocoModeEnum = CocoModeEnum.instances, ): """ :param root_path: :type root_path: :param image_set: :type image_set: :param transforms: :type transforms: :param mode: :type mode: :return: :rtype: """ assert image_set in Split assert image_set != Split.Testing annotations_path = Path("annotations") PATHS = { Split.Training: ("train2017", annotations_path / f"{mode}_{"train"}2017.json"), Split.Validation: ("val2017", annotations_path / f"{mode}_{"val"}2017.json"), } t = [ConvertCocoPolysToMask()] if transforms is not None: t.append(transforms) transforms = Compose(t) img_folder, ann_file = PATHS[image_set] dataset = CocoDetection( root_path / img_folder, root_path / ann_file, transforms=transforms ) if image_set == Split.Training: dataset = _coco_remove_images_without_annotations(dataset) # dataset = torch.utils.data.Subset(dataset, [i for i in range(500)]) return dataset def get_coco_kp(root, image_set, transforms): """ :param root: :type root: :param image_set: :type image_set: :param transforms: :type transforms: :return: :rtype: """ return get_coco_ins(root, image_set, transforms, mode=CocoModeEnum.person_keypoints)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = "Christian Heider Nielsen" __doc__ = r""" Created on 22/03/2020 """ import copy from collections import namedtuple from enum import Enum from pathlib import Path from typing import Any, List, Mapping, Sequence, Tuple, Union import torch import torch.utils.data import torchvision from numpy.core.multiarray import ndarray from pycocotools import mask from pycocotools.coco import COCO from torch.utils.data import Dataset from torchvision.transforms import Compose __all__ = [ "FilterAndRemapCocoCategories", "convert_coco_poly_to_mask", "ConvertCocoPolysToMask", "_coco_remove_images_without_annotations", "convert_to_coco_api", "get_coco_api_from_dataset", "CocoDetection", "get_coco_ins", "get_coco_kp", "CocoMask", "CocoPolyAnnotation", "CocoModeEnum", ] from draugr.torch_utilities import Split from draugr.torch_utilities.tensors.tensor_container import NamedTensorTuple class CocoModeEnum(Enum): instances = "instances" person_keypoints = "person_keypoints" CocoPolyAnnotation = namedtuple( "CocoPolyAno", ( "image_id", "bbox", "category_id", "area", "iscrowd", "id", "segmentation", "keypoints", "num_keypoints", ), ) CocoMask = namedtuple( "CocoMask", ("boxes", "labels", "masks", "image_id", "area", "iscrowd", "keypoints") ) class FilterAndRemapCocoCategories(object): """ """ def __init__(self, categories: List[str], remap: bool = True): self._categories = categories self._remap = remap def __call__(self, image, target: Mapping[str, Any]) -> Tuple: anno = target["annotations"] anno = [obj for obj in anno if obj["category_id"] in self._categories] if not self._remap: target["annotations"] = anno return image, target anno = copy.deepcopy(anno) for obj in anno: obj["category_id"] = self._categories.index(obj["category_id"]) target["annotations"] = anno return image, target def convert_coco_poly_to_mask( segmentations: Sequence, height: int, width: int ) -> NamedTensorTuple: """ :param segmentations: :type segmentations: :param height: :type height: :param width: :type width: :return: :rtype: """ masks = [] for polygons in segmentations: rles = mask.frPyObjects(polygons, height, width) mask = mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = torch.as_tensor(mask, dtype=torch.uint8) mask = mask.any(dim=2) masks.append(mask) if masks: masks = torch.stack(masks, dim=0) else: masks = torch.zeros((0, height, width), dtype=torch.uint8) return NamedTensorTuple(masks=masks) class ConvertCocoPolysToMask(object): def __call__(self, image: ndarray, target: Mapping[str, Any]) -> Tuple: w, h = image.size image_id = torch.tensor([target["image_id"]]) anno = [obj for obj in target["annotations"] if obj.iscrowd == 0] boxes = [obj.BoundingBox for obj in anno] # guard against no boxes via resizing boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2].clamp_(min=0, max=w) boxes[:, 1::2].clamp_(min=0, max=h) classes = torch.tensor([obj.category_id for obj in anno], dtype=torch.int64) masks = convert_coco_poly_to_mask([obj.segmentation for obj in anno], h, w) keypoints = None if anno and anno[0].Keypoints is not None: keypoints = [obj.Keypoints for obj in anno] keypoints = torch.as_tensor(keypoints, dtype=torch.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.view(num_keypoints, -1, 3) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) target = CocoMask( boxes=boxes[keep], labels=classes[keep], masks=masks[keep], image_id=image_id, area=torch.tensor([obj.area for obj in anno]), iscrowd=torch.tensor([obj.iscrowd for obj in anno]), keypoints=None, ) if keypoints is not None: target.keypoints = keypoints[keep] return image, target def _coco_remove_images_without_annotations( dataset: Dataset, category_list: Sequence[CocoPolyAnnotation] = None, min_keypoints_per_image: int = 10, ) -> Dataset: def _has_only_empty_bbox(anno: List[CocoPolyAnnotation]) -> bool: return all(any(o <= 1 for o in obj.bbox[2:]) for obj in anno) def _count_visible_keypoints(anno: List[CocoPolyAnnotation]) -> int: return sum(sum(1 for v in ann.keypoints[2::3] if v > 0) for ann in anno) def _has_valid_annotation(anno: List[CocoPolyAnnotation]) -> bool: # if it's empty, there is no annotation if len(anno) == 0: return False # if all boxes have close to zero area, there is no annotation if _has_only_empty_bbox(anno): return False # keypoints task have a slight different critera for considering # if an annotation is valid if anno[0].keypoints is None: return True # for keypoint detection tasks, only consider valid images those # containing at least min_keypoints_per_image if _count_visible_keypoints(anno) >= min_keypoints_per_image: return True return False assert isinstance(dataset, torchvision.datasets.CocoDetection) ids = [] for ds_idx, img_id in enumerate(dataset.ids): ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None) anno = dataset.coco.loadAnns(ann_ids) if category_list: anno = [obj for obj in anno if obj.category_id in category_list] if _has_valid_annotation(anno): ids.append(ds_idx) dataset = torch.utils.data.Subset(dataset, ids) return dataset def convert_to_coco_api(ds): """ :param ds: :type ds: :return: :rtype: """ coco_ds = COCO() ann_id = 0 dataset = {"images": [], "categories": [], "annotations": []} categories = set() for img_idx in range(len(ds)): # find better way to get target # targets = ds.get_annotations(img_idx) img, targets = ds[img_idx] image_id = targets["image_id"].item() dataset["images"].append( {"id": image_id, "height": img.shape[-2], "width": img.shape[-1]} ) bboxes = targets["boxes"] bboxes[:, 2:] -= bboxes[:, :2] bboxes = bboxes.tolist() labels = targets["labels"].tolist() areas = targets["area"].tolist() iscrowd = targets["iscrowd"].tolist() if "masks" in targets: masks = targets["masks"] # make masks Fortran contiguous for coco_mask masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1) if "keypoints" in targets: keypoints = targets["keypoints"] keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist() num_objs = len(bboxes) for i in range(num_objs): ann = CocoPolyAnnotation( image_id=image_id, bbox=bboxes[i], category_id=labels[i], area=areas[i], iscrowd=iscrowd[i], id=ann_id, segmentation=None, keypoints=None, num_keypoints=None, ) categories.add(labels[i]) if "masks" in targets: ann.segmentation = mask.encode(masks[i].numpy()) if "keypoints" in targets: ann.keypoints = keypoints[i] ann.num_keypoints = sum(k != 0 for k in keypoints[i][2::3]) dataset["annotations"].append(ann) ann_id += 1 dataset["categories"] = [{"id": i} for i in sorted(categories)] coco_ds.dataset = dataset coco_ds.createIndex() return coco_ds def get_coco_api_from_dataset( dataset: Union[torch.utils.data.Subset, torchvision.datasets.CocoDetection] ) -> COCO: """ :param dataset: :type dataset: :return: :rtype: """ for i in range(10): if isinstance(dataset, torchvision.datasets.CocoDetection): break if isinstance(dataset, torch.utils.data.Subset): dataset = dataset.dataset if isinstance(dataset, torchvision.datasets.CocoDetection): return dataset.coco return convert_to_coco_api(dataset) class CocoDetection(torchvision.datasets.CocoDetection): """ """ def __init__(self, img_folder, ann_file, transforms): super(CocoDetection, self).__init__(img_folder, ann_file) self._transforms = transforms def __getitem__(self, idx): img, target = super().__getitem__(idx) image_id = self.ids[idx] target = NamedTensorTuple(image_id=image_id, annotations=target) if self._transforms is not None: img, target = self._transforms(img, target) return img, target def get_coco_ins( root_path: Path, image_set: Split, transforms, mode: CocoModeEnum = CocoModeEnum.instances, ): """ :param root_path: :type root_path: :param image_set: :type image_set: :param transforms: :type transforms: :param mode: :type mode: :return: :rtype: """ assert image_set in Split assert image_set != Split.Testing annotations_path = Path("annotations") PATHS = { Split.Training: ("train2017", annotations_path / f"{mode}_{'train'}2017.json"), Split.Validation: ("val2017", annotations_path / f"{mode}_{'val'}2017.json"), } t = [ConvertCocoPolysToMask()] if transforms is not None: t.append(transforms) transforms = Compose(t) img_folder, ann_file = PATHS[image_set] dataset = CocoDetection( root_path / img_folder, root_path / ann_file, transforms=transforms ) if image_set == Split.Training: dataset = _coco_remove_images_without_annotations(dataset) # dataset = torch.utils.data.Subset(dataset, [i for i in range(500)]) return dataset def get_coco_kp(root, image_set, transforms): """ :param root: :type root: :param image_set: :type image_set: :param transforms: :type transforms: :return: :rtype: """ return get_coco_ins(root, image_set, transforms, mode=CocoModeEnum.person_keypoints)
import torch import ipywidgets as widgets import numpy as np import matplotlib.pyplot as plt plt.rcParams.update({'font.size': 30, 'legend.fontsize': 20}) plt.rc('font', size=25) plt.rc('axes', titlesize=25) def format_attention(attention): squeezed = [] for layer_attention in attention: # 1 x num_heads x seq_len x seq_len if len(layer_attention.shape) != 4: raise ValueError("The attention tensor does not have the correct number of dimensions. Make sure you set " "output_attentions=True when initializing your model.") squeezed.append(layer_attention.squeeze(0)) # num_layers x num_heads x seq_len x seq_len return torch.stack(squeezed) def format_special_chars(tokens): return [t.replace('Ġ', '').replace('▁', ' ').replace('</w>', '') for t in tokens] def drop_down(options, value=None, description='Select:', disabled=False): widget = widgets.Dropdown( options=options, value=value, description=description, disabled=False, ) return widget def print_sentence(sent_a, sent_b=None): if sent_b is None: return print(f"Sentence: {sent_a}") else: print(f"Sentence: {sent_a}\nSentence b: {sent_b}") return def sentence_index(luke_data, sentence_selected, entity): index = [] for i, example in enumerate(luke_data): if sentence_selected == luke_data[example]["sentence"] and luke_data[example]["entity"] == entity: index = i return index def get_entity_string(data): entity_vector = [data[sent]["entity_position_ids"][0][0] for sent in data] entity_index = [vector[vector > 0] for vector in entity_vector] tokens = [format_special_chars(data[sent]["tokens"]) for sent in data] sentences = [data[sent]["sentence"] for sent in data.keys()] for i, sent in enumerate(data): data[sent]["entity"] = " ".join(tokens[i][entity_index[i][1]:entity_index[i][-1]]) data[sent]["sentence_with_entity"] = sentences[i] + f' [entity:{data[sent]['entity']}]' return data def only_mask_attention(output_attention): zero_output_attention = output_attention for i, attention in enumerate(zero_output_attention): for ii, layer in enumerate(attention): zero_output_attention[i][ii][:-2] = zero_output_attention[i][ii][:-2]*0 zero_output_attention[i][ii][-1] = zero_output_attention[i][ii][-1]*0 return zero_output_attention def attention_token2token(tokens, attention, token1, token2): for i, token in enumerate(tokens): if token == token1: index_token1 = i if token == token2: index_token2 = i attn_token2token = [] for layer in attention: attention_temp = [] for head in layer: attention_temp.append(head[index_token1, index_token2]) attn_token2token.append(attention_temp) return attn_token2token def plot_attention_token2token(tokens, attention, token1, token2, color="blue"): attention_scores = attention_token2token(tokens, attention, token1, token2) attn_token2token_mean = [np.mean(attn_scores) for attn_scores in attention_scores] number_of_layers = len(attention_scores) number_of_tokens = len(attention_scores[0]) labels = np.arange(1,number_of_tokens+1) colors = ["b", "g", "r", "c", "m", "y", "k", "brown", "pink", "gray", "olive", "purple", "tan", "lightcoral", "lime", "navy"] if len(colors) < number_of_tokens: colors = colors = ["b", "orange", "g", "r", "purple", "saddlebrown", "m", "grey", "olive", "c"]*number_of_tokens # Begin Figure: figure, ax = plt.subplots(figsize=(14,9)) # Plot attention heads:: for i in range(number_of_layers): for ii in range(len(attention_scores[i])): ax.plot([i], attention_scores[i][ii], "*", color=colors[ii]) box = ax.get_position() ax.set_position([box.x0, box.y0, box.width*0.6, box.height]) legend1 = ax.legend(labels, loc='center left', bbox_to_anchor=(1, 0.5), edgecolor="white", title="Attention head", fontsize="medium") ax.add_artist(legend1) # Plot average line: plt.plot(range(len(attention_scores)), attn_token2token_mean, "o-", color=color, label=f"[{token1}] $\longrightarrow$ [{token2}]") plt.legend() # Finalize plot: ax.set_title("Token-to-Token attention", size="x-large") ax.set_xlabel("Layer", fontsize="large") ax.set_ylabel("Attention score", fontsize="large") ax.set_xticks(range(0, number_of_layers, 2)) ax.set_xticklabels(range(0,number_of_layers, 2)) plt.tick_params(axis='x', labelsize="large") plt.tick_params(axis='y', labelsize="large") plt.grid() plt.tight_layout(rect=[0,0,0.8,1]) return figure
import torch import ipywidgets as widgets import numpy as np import matplotlib.pyplot as plt plt.rcParams.update({'font.size': 30, 'legend.fontsize': 20}) plt.rc('font', size=25) plt.rc('axes', titlesize=25) def format_attention(attention): squeezed = [] for layer_attention in attention: # 1 x num_heads x seq_len x seq_len if len(layer_attention.shape) != 4: raise ValueError("The attention tensor does not have the correct number of dimensions. Make sure you set " "output_attentions=True when initializing your model.") squeezed.append(layer_attention.squeeze(0)) # num_layers x num_heads x seq_len x seq_len return torch.stack(squeezed) def format_special_chars(tokens): return [t.replace('Ġ', '').replace('▁', ' ').replace('</w>', '') for t in tokens] def drop_down(options, value=None, description='Select:', disabled=False): widget = widgets.Dropdown( options=options, value=value, description=description, disabled=False, ) return widget def print_sentence(sent_a, sent_b=None): if sent_b is None: return print(f"Sentence: {sent_a}") else: print(f"Sentence: {sent_a}\nSentence b: {sent_b}") return def sentence_index(luke_data, sentence_selected, entity): index = [] for i, example in enumerate(luke_data): if sentence_selected == luke_data[example]["sentence"] and luke_data[example]["entity"] == entity: index = i return index def get_entity_string(data): entity_vector = [data[sent]["entity_position_ids"][0][0] for sent in data] entity_index = [vector[vector > 0] for vector in entity_vector] tokens = [format_special_chars(data[sent]["tokens"]) for sent in data] sentences = [data[sent]["sentence"] for sent in data.keys()] for i, sent in enumerate(data): data[sent]["entity"] = " ".join(tokens[i][entity_index[i][1]:entity_index[i][-1]]) data[sent]["sentence_with_entity"] = sentences[i] + f' [entity:{data[sent]["entity"]}]' return data def only_mask_attention(output_attention): zero_output_attention = output_attention for i, attention in enumerate(zero_output_attention): for ii, layer in enumerate(attention): zero_output_attention[i][ii][:-2] = zero_output_attention[i][ii][:-2]*0 zero_output_attention[i][ii][-1] = zero_output_attention[i][ii][-1]*0 return zero_output_attention def attention_token2token(tokens, attention, token1, token2): for i, token in enumerate(tokens): if token == token1: index_token1 = i if token == token2: index_token2 = i attn_token2token = [] for layer in attention: attention_temp = [] for head in layer: attention_temp.append(head[index_token1, index_token2]) attn_token2token.append(attention_temp) return attn_token2token def plot_attention_token2token(tokens, attention, token1, token2, color="blue"): attention_scores = attention_token2token(tokens, attention, token1, token2) attn_token2token_mean = [np.mean(attn_scores) for attn_scores in attention_scores] number_of_layers = len(attention_scores) number_of_tokens = len(attention_scores[0]) labels = np.arange(1,number_of_tokens+1) colors = ["b", "g", "r", "c", "m", "y", "k", "brown", "pink", "gray", "olive", "purple", "tan", "lightcoral", "lime", "navy"] if len(colors) < number_of_tokens: colors = colors = ["b", "orange", "g", "r", "purple", "saddlebrown", "m", "grey", "olive", "c"]*number_of_tokens # Begin Figure: figure, ax = plt.subplots(figsize=(14,9)) # Plot attention heads:: for i in range(number_of_layers): for ii in range(len(attention_scores[i])): ax.plot([i], attention_scores[i][ii], "*", color=colors[ii]) box = ax.get_position() ax.set_position([box.x0, box.y0, box.width*0.6, box.height]) legend1 = ax.legend(labels, loc='center left', bbox_to_anchor=(1, 0.5), edgecolor="white", title="Attention head", fontsize="medium") ax.add_artist(legend1) # Plot average line: plt.plot(range(len(attention_scores)), attn_token2token_mean, "o-", color=color, label=f"[{token1}] $\longrightarrow$ [{token2}]") plt.legend() # Finalize plot: ax.set_title("Token-to-Token attention", size="x-large") ax.set_xlabel("Layer", fontsize="large") ax.set_ylabel("Attention score", fontsize="large") ax.set_xticks(range(0, number_of_layers, 2)) ax.set_xticklabels(range(0,number_of_layers, 2)) plt.tick_params(axis='x', labelsize="large") plt.tick_params(axis='y', labelsize="large") plt.grid() plt.tight_layout(rect=[0,0,0.8,1]) return figure