Olas-predict-dataset / scripts /tools_metrics.py
cyberosa
updating scripts and fixing tools dataset formatting
e93a0eb
import pandas as pd
from utils import TMP_DIR, INC_TOOLS, ROOT_DIR
def get_error_data_by_market(tools_df: pd.DataFrame) -> pd.DataFrame:
"""Gets the error data for the given tools and calculates the error percentage."""
mech_tool_errors = tools_df[tools_df["error"] != -1]
error = (
mech_tool_errors.groupby(
["tool", "request_month_year_week", "market_creator", "error"], sort=False
)
.size()
.unstack()
.fillna(0)
.reset_index()
)
error["error_perc"] = (error[1] / (error[0] + error[1])) * 100
error["total_requests"] = error[0] + error[1]
return error
def get_tool_winning_rate_by_market(tools_df: pd.DataFrame) -> pd.DataFrame:
"""Gets the tool winning rate data for the given tools by market and calculates the winning percentage."""
tools_non_error = tools_df[tools_df["error"] == 0]
tools_non_error.loc[:, "currentAnswer"] = tools_non_error["currentAnswer"].replace(
{"no": "No", "yes": "Yes"}
)
tools_non_error = tools_non_error[
tools_non_error["currentAnswer"].isin(["Yes", "No"])
]
tools_non_error = tools_non_error[tools_non_error["vote"].isin(["Yes", "No"])]
tools_non_error["win"] = (
tools_non_error["currentAnswer"] == tools_non_error["vote"]
).astype(int)
tools_non_error.columns = tools_non_error.columns.astype(str)
wins = (
tools_non_error.groupby(
["tool", "request_month_year_week", "market_creator", "win"], sort=False
)
.size()
.unstack()
.fillna(0)
)
wins["win_perc"] = (wins[1] / (wins[0] + wins[1])) * 100
wins.reset_index(inplace=True)
wins["total_request"] = wins[0] + wins[1]
wins.columns = wins.columns.astype(str)
# Convert request_month_year_week to string and explicitly set type for Altair
# wins["request_month_year_week"] = wins["request_month_year_week"].astype(str)
return wins
def prepare_tools(tools: pd.DataFrame, total_included: bool = True) -> pd.DataFrame:
# remove non relevant tools
tools = tools[tools["tool"].isin(INC_TOOLS)]
tools["request_time"] = pd.to_datetime(tools["request_time"], utc=True)
tools = tools.sort_values(by="request_time", ascending=True)
tools["request_date"] = tools["request_time"].dt.date
tools["request_month_year_week"] = (
pd.to_datetime(tools["request_time"])
.dt.to_period("W")
.dt.start_time.dt.strftime("%b-%d-%Y")
)
# preparing the tools graph
if total_included:
# adding the total
tools_all = tools.copy(deep=True)
tools_all["market_creator"] = "all"
# merging both dataframes
tools = pd.concat([tools, tools_all], ignore_index=True)
tools = tools.sort_values(by="request_time", ascending=True)
return tools
def get_error_category(error_value: int):
if error_value == 0:
return "non_error"
if error_value == 1:
return "tool_error"
return "request_error"
def get_errors_by_mech_address(tools_df: pd.DataFrame) -> pd.DataFrame:
"""Gets the tool errors distribution by mech address in a weekly fashion"""
weekly_errors = (
tools_df.groupby(
["request_month_year_week", "mech_address", "error"], sort=False
)
.size()
.reset_index(name="requests")
)
weekly_errors["error_cat"] = weekly_errors["error"].apply(
lambda x: get_error_category(x)
)
total_requests_errors = (
tools_df.groupby(["request_month_year_week", "mech_address"], sort=False)
.size()
.reset_index(name="total_requests")
)
all_errors = weekly_errors.merge(
total_requests_errors, on=["request_month_year_week", "mech_address"]
)
all_errors["errors_percentage"] = (
all_errors["requests"] / all_errors["total_requests"]
) * 100
return all_errors
def compute_tools_based_datasets():
print("Computing tools based datasets")
try:
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
except Exception as e:
print(f"Error reading tools parquet file {e}")
return None
# mech tool errors by markets
print("Computing mech tool errors by markets")
tool_error_by_markets = get_error_data_by_market(tools_df=prepare_tools(tools_df))
tool_error_by_markets.to_parquet(ROOT_DIR / "error_by_markets.parquet", index=False)
try:
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
tools_df = prepare_tools(tools_df)
except Exception as e:
print(f"Error reading tools parquet file {e}")
return None
winning_df = get_tool_winning_rate_by_market(tools_df)
winning_df.to_parquet(ROOT_DIR / "winning_df.parquet", index=False)
# all errors by mech address
try:
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
tools_df = prepare_tools(tools_df, total_included=False)
except Exception as e:
print(f"Error reading tools parquet file {e}")
return None
errors_by_mech = get_errors_by_mech_address(tools_df=tools_df)
errors_by_mech.to_parquet(ROOT_DIR / "errors_by_mech.parquet", index=False)
try:
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
tools_df = prepare_tools(tools_df, total_included=False)
except Exception as e:
print(f"Error reading tools parquet file {e}")
return None
generate_daily_mech_requests_per_tool(tools_df=tools_df)
generate_daily_mech_request_for_pearl_agents(tools_df=tools_df)
def generate_daily_mech_requests_per_tool(tools_df: pd.DataFrame) -> None:
"""Generates the daily mech requests per tool."""
# daily mech requests in
daily_mech_req_per_tool = (
tools_df.groupby(["request_date", "tool", "market_creator"])["request_id"]
.count()
.reset_index(name="total_mech_requests")
)
daily_mech_req_per_tool.to_parquet(
ROOT_DIR / "daily_mech_requests.parquet", index=False
)
def generate_daily_mech_request_for_pearl_agents(tools_df: pd.DataFrame) -> None:
# read the prediction agents file
pearl_agents = pd.read_parquet(ROOT_DIR / "pearl_agents.parquet")
unique_addresses = pearl_agents["safe_address"].unique()
# filter tools for only traders from the list above
selected_tools_df = tools_df[
tools_df["trader_address"].isin(unique_addresses)
].copy()
daily_mech_req_per_tool = (
selected_tools_df.groupby(["request_date", "tool"])["request_id"]
.count()
.reset_index(name="total_mech_requests")
)
daily_mech_req_per_tool.to_parquet(
ROOT_DIR / "daily_mech_requests_by_pearl_agents.parquet", index=False
)
if __name__ == "__main__":
compute_tools_based_datasets()