File size: 6,803 Bytes
cd451ea 1a99ad4 cd451ea 1a99ad4 cd451ea 2f097fe cd451ea 1a99ad4 cd451ea 1a99ad4 cd451ea e93a0eb 1a99ad4 e93a0eb 23d3748 cd451ea 23d3748 cd451ea e93a0eb cd451ea 2f097fe 1a99ad4 ca32d33 1a99ad4 ca32d33 1a99ad4 ca32d33 2f097fe ca32d33 1a99ad4 ca32d33 cd451ea 66dc719 cd451ea 23d3748 cd451ea 23d3748 2f097fe 1a99ad4 e93a0eb 2f097fe cd451ea 23d3748 cd451ea 1a99ad4 cd451ea 23d3748 2f097fe ca32d33 e93a0eb ca32d33 1a99ad4 ca32d33 1a99ad4 e93a0eb 1a99ad4 bdd8f0a 1a99ad4 e8b0cb2 1a99ad4 23d3748 e8b0cb2 23d3748 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
import pandas as pd
from utils import TMP_DIR, INC_TOOLS, ROOT_DIR
def get_error_data_by_market(tools_df: pd.DataFrame) -> pd.DataFrame:
"""Gets the error data for the given tools and calculates the error percentage."""
mech_tool_errors = tools_df[tools_df["error"] != -1]
error = (
mech_tool_errors.groupby(
["tool", "request_month_year_week", "market_creator", "error"], sort=False
)
.size()
.unstack()
.fillna(0)
.reset_index()
)
error["error_perc"] = (error[1] / (error[0] + error[1])) * 100
error["total_requests"] = error[0] + error[1]
return error
def get_tool_winning_rate_by_market(tools_df: pd.DataFrame) -> pd.DataFrame:
"""Gets the tool winning rate data for the given tools by market and calculates the winning percentage."""
tools_non_error = tools_df[tools_df["error"] == 0]
tools_non_error.loc[:, "currentAnswer"] = tools_non_error["currentAnswer"].replace(
{"no": "No", "yes": "Yes"}
)
tools_non_error = tools_non_error[
tools_non_error["currentAnswer"].isin(["Yes", "No"])
]
tools_non_error = tools_non_error[tools_non_error["vote"].isin(["Yes", "No"])]
tools_non_error["win"] = (
tools_non_error["currentAnswer"] == tools_non_error["vote"]
).astype(int)
tools_non_error.columns = tools_non_error.columns.astype(str)
wins = (
tools_non_error.groupby(
["tool", "request_month_year_week", "market_creator", "win"], sort=False
)
.size()
.unstack()
.fillna(0)
)
wins["win_perc"] = (wins[1] / (wins[0] + wins[1])) * 100
wins.reset_index(inplace=True)
wins["total_request"] = wins[0] + wins[1]
wins.columns = wins.columns.astype(str)
# Convert request_month_year_week to string and explicitly set type for Altair
# wins["request_month_year_week"] = wins["request_month_year_week"].astype(str)
return wins
def prepare_tools(tools: pd.DataFrame, total_included: bool = True) -> pd.DataFrame:
# remove non relevant tools
tools = tools[tools["tool"].isin(INC_TOOLS)]
tools["request_time"] = pd.to_datetime(tools["request_time"], utc=True)
tools = tools.sort_values(by="request_time", ascending=True)
tools["request_date"] = tools["request_time"].dt.date
tools["request_month_year_week"] = (
pd.to_datetime(tools["request_time"])
.dt.to_period("W")
.dt.start_time.dt.strftime("%b-%d-%Y")
)
# preparing the tools graph
if total_included:
# adding the total
tools_all = tools.copy(deep=True)
tools_all["market_creator"] = "all"
# merging both dataframes
tools = pd.concat([tools, tools_all], ignore_index=True)
tools = tools.sort_values(by="request_time", ascending=True)
return tools
def get_error_category(error_value: int):
if error_value == 0:
return "non_error"
if error_value == 1:
return "tool_error"
return "request_error"
def get_errors_by_mech_address(tools_df: pd.DataFrame) -> pd.DataFrame:
"""Gets the tool errors distribution by mech address in a weekly fashion"""
weekly_errors = (
tools_df.groupby(
["request_month_year_week", "mech_address", "error"], sort=False
)
.size()
.reset_index(name="requests")
)
weekly_errors["error_cat"] = weekly_errors["error"].apply(
lambda x: get_error_category(x)
)
total_requests_errors = (
tools_df.groupby(["request_month_year_week", "mech_address"], sort=False)
.size()
.reset_index(name="total_requests")
)
all_errors = weekly_errors.merge(
total_requests_errors, on=["request_month_year_week", "mech_address"]
)
all_errors["errors_percentage"] = (
all_errors["requests"] / all_errors["total_requests"]
) * 100
return all_errors
def compute_tools_based_datasets():
print("Computing tools based datasets")
try:
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
except Exception as e:
print(f"Error reading tools parquet file {e}")
return None
# mech tool errors by markets
print("Computing mech tool errors by markets")
tool_error_by_markets = get_error_data_by_market(tools_df=prepare_tools(tools_df))
tool_error_by_markets.to_parquet(ROOT_DIR / "error_by_markets.parquet", index=False)
try:
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
tools_df = prepare_tools(tools_df)
except Exception as e:
print(f"Error reading tools parquet file {e}")
return None
winning_df = get_tool_winning_rate_by_market(tools_df)
winning_df.to_parquet(ROOT_DIR / "winning_df.parquet", index=False)
# all errors by mech address
try:
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
tools_df = prepare_tools(tools_df, total_included=False)
except Exception as e:
print(f"Error reading tools parquet file {e}")
return None
errors_by_mech = get_errors_by_mech_address(tools_df=tools_df)
errors_by_mech.to_parquet(ROOT_DIR / "errors_by_mech.parquet", index=False)
try:
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
tools_df = prepare_tools(tools_df, total_included=False)
except Exception as e:
print(f"Error reading tools parquet file {e}")
return None
generate_daily_mech_requests_per_tool(tools_df=tools_df)
generate_daily_mech_request_for_pearl_agents(tools_df=tools_df)
def generate_daily_mech_requests_per_tool(tools_df: pd.DataFrame) -> None:
"""Generates the daily mech requests per tool."""
# daily mech requests in
daily_mech_req_per_tool = (
tools_df.groupby(["request_date", "tool", "market_creator"])["request_id"]
.count()
.reset_index(name="total_mech_requests")
)
daily_mech_req_per_tool.to_parquet(
ROOT_DIR / "daily_mech_requests.parquet", index=False
)
def generate_daily_mech_request_for_pearl_agents(tools_df: pd.DataFrame) -> None:
# read the prediction agents file
pearl_agents = pd.read_parquet(ROOT_DIR / "pearl_agents.parquet")
unique_addresses = pearl_agents["safe_address"].unique()
# filter tools for only traders from the list above
selected_tools_df = tools_df[
tools_df["trader_address"].isin(unique_addresses)
].copy()
daily_mech_req_per_tool = (
selected_tools_df.groupby(["request_date", "tool"])["request_id"]
.count()
.reset_index(name="total_mech_requests")
)
daily_mech_req_per_tool.to_parquet(
ROOT_DIR / "daily_mech_requests_by_pearl_agents.parquet", index=False
)
if __name__ == "__main__":
compute_tools_based_datasets()
|