|
|
import os |
|
|
import pandas as pd |
|
|
from typing import Tuple, List, Dict |
|
|
import ipfshttpclient |
|
|
from utils import INC_TOOLS |
|
|
from typing import List |
|
|
from utils import TMP_DIR, ROOT_DIR |
|
|
from cloud_storage import ( |
|
|
initialize_client, |
|
|
download_tools_historical_files, |
|
|
FILES_IN_TWO_MONTHS, |
|
|
FILES_IN_FOUR_MONTHS, |
|
|
FILES_IN_SIX_MONTHS, |
|
|
FILES_IN_EIGHT_MONTHS, |
|
|
FILES_IN_TEN_MONTHS, |
|
|
) |
|
|
|
|
|
ACCURACY_FILENAME = "tools_accuracy.csv" |
|
|
IPFS_SERVER = "/dns/registry.autonolas.tech/tcp/443/https" |
|
|
NR_ANSWERED_MARKETS = 1000 |
|
|
MAX_ATTEMPTS = 5 |
|
|
historical_files_count_map = { |
|
|
1: FILES_IN_TWO_MONTHS, |
|
|
2: FILES_IN_FOUR_MONTHS, |
|
|
3: FILES_IN_SIX_MONTHS, |
|
|
4: FILES_IN_EIGHT_MONTHS, |
|
|
5: FILES_IN_TEN_MONTHS, |
|
|
} |
|
|
DEFAULT_ACCURACY = 0.50 |
|
|
DEFAULT_BAD_ACCURACY = 0.00 |
|
|
|
|
|
|
|
|
def keep_last_answer_per_question_per_tool( |
|
|
clean_tools_df: pd.DataFrame, |
|
|
) -> pd.DataFrame: |
|
|
""" |
|
|
For each tool, keep only the last answer for each question (title) based on request_time. |
|
|
Returns a filtered DataFrame. |
|
|
""" |
|
|
|
|
|
sorted_df = clean_tools_df.sort_values(by=["tool", "title", "request_time"]) |
|
|
|
|
|
last_answers = sorted_df.groupby(["tool", "title"], as_index=False).tail(1) |
|
|
|
|
|
last_answers = last_answers.reset_index(drop=True) |
|
|
return last_answers |
|
|
|
|
|
|
|
|
def push_csv_file_to_ipfs(filename: str = ACCURACY_FILENAME) -> str: |
|
|
"""Push the tools accuracy CSV file to IPFS.""" |
|
|
client = ipfshttpclient.connect(IPFS_SERVER) |
|
|
result = client.add(ROOT_DIR / filename) |
|
|
print(f"HASH of the tools accuracy file: {result['Hash']}") |
|
|
return result["Hash"] |
|
|
|
|
|
|
|
|
def clean_tools_dataset(tools_df: pd.DataFrame) -> pd.DataFrame: |
|
|
|
|
|
|
|
|
tools_non_error = tools_df[ |
|
|
tools_df["tool"].isin(["tool_name", "TEMP_TOOL"]) == False |
|
|
].copy() |
|
|
|
|
|
tools_non_error = tools_non_error[tools_non_error["error"] == 0] |
|
|
tools_non_error.loc[:, "currentAnswer"] = tools_non_error["currentAnswer"].replace( |
|
|
{"no": "No", "yes": "Yes"} |
|
|
) |
|
|
tools_non_error = tools_non_error[ |
|
|
tools_non_error["currentAnswer"].isin(["Yes", "No"]) |
|
|
] |
|
|
tools_non_error = tools_non_error[tools_non_error["vote"].isin(["Yes", "No"])] |
|
|
tools_non_error["win"] = ( |
|
|
tools_non_error["currentAnswer"] == tools_non_error["vote"] |
|
|
).astype(int) |
|
|
tools_non_error.columns = tools_non_error.columns.astype(str) |
|
|
return tools_non_error |
|
|
|
|
|
|
|
|
def take_toptool_name(tools_df: pd.DataFrame) -> str: |
|
|
volumes = tools_df.tool.value_counts().reset_index() |
|
|
return volumes.iloc[0].tool |
|
|
|
|
|
|
|
|
def compute_nr_questions_per_tool(clean_tools_df: pd.DataFrame) -> dict: |
|
|
answered_questions = {} |
|
|
|
|
|
for tool in INC_TOOLS: |
|
|
print(f"processing tool {tool}") |
|
|
tool_data = clean_tools_df[clean_tools_df["tool"] == tool].copy() |
|
|
|
|
|
tool_data = tool_data.sort_values(by="request_time", ascending=True) |
|
|
|
|
|
unique_questions = tool_data.title.unique() |
|
|
answered_questions[tool] = {} |
|
|
answered_questions[tool]["total_answered_questions"] = len(unique_questions) |
|
|
markets_different_answer = {} |
|
|
for question in unique_questions: |
|
|
market_data = tool_data[tool_data["title"] == question] |
|
|
different_responses = market_data.currentAnswer.value_counts() |
|
|
|
|
|
yes_count = different_responses.get("Yes", 0) |
|
|
no_count = different_responses.get("No", 0) |
|
|
if yes_count > 0 and no_count > 0: |
|
|
|
|
|
|
|
|
markets_different_answer[question] = { |
|
|
"yes_responses": yes_count, |
|
|
"no_responses": no_count, |
|
|
} |
|
|
|
|
|
answered_questions[tool]["markets_different_answers"] = markets_different_answer |
|
|
return answered_questions |
|
|
|
|
|
|
|
|
def classify_tools_by_responses( |
|
|
answered_questions: dict, ref_nr_questions: int |
|
|
) -> Tuple: |
|
|
more_questions_tools = [] |
|
|
total_tools = answered_questions.keys() |
|
|
for tool in total_tools: |
|
|
if answered_questions[tool]["total_answered_questions"] < ref_nr_questions: |
|
|
more_questions_tools.append(tool) |
|
|
return more_questions_tools |
|
|
|
|
|
|
|
|
def add_historical_data( |
|
|
tools_historical_file: str, |
|
|
tools_df: pd.DataFrame, |
|
|
more_questions_tools: list, |
|
|
recent_nr_questions: int, |
|
|
completed_tools: List[str], |
|
|
) -> pd.DataFrame: |
|
|
""" |
|
|
It searches into the historical cloud files to get more samples for the tools. |
|
|
""" |
|
|
if not tools_historical_file: |
|
|
raise ValueError( |
|
|
"No historical tools file found, skipping adding historical data." |
|
|
) |
|
|
|
|
|
|
|
|
print(f"Downloaded historical file into the tmp folder: {tools_historical_file}") |
|
|
|
|
|
historical_tools_df = pd.read_parquet(TMP_DIR / tools_historical_file) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
historical_tools_df = historical_tools_df[ |
|
|
historical_tools_df["tool"].isin(INC_TOOLS) == True |
|
|
] |
|
|
historical_tools_df = clean_tools_dataset(historical_tools_df) |
|
|
historical_tools_df = get_unique_recent_samples(tools_df=historical_tools_df) |
|
|
|
|
|
|
|
|
|
|
|
tools_df = pd.concat([tools_df, historical_tools_df], ignore_index=True) |
|
|
|
|
|
tools_df.drop_duplicates( |
|
|
subset=["request_id", "request_block"], keep="first", inplace=True |
|
|
) |
|
|
|
|
|
answered_questions = compute_nr_questions_per_tool(clean_tools_df=tools_df) |
|
|
for tool in more_questions_tools: |
|
|
new_count = answered_questions[tool]["total_answered_questions"] |
|
|
if new_count >= recent_nr_questions: |
|
|
completed_tools.append(tool) |
|
|
|
|
|
for tool in completed_tools: |
|
|
print(f"Tool {tool} with enough questions now, removing from list") |
|
|
if tool in more_questions_tools: |
|
|
more_questions_tools.remove(tool) |
|
|
return tools_df |
|
|
|
|
|
|
|
|
def check_historical_samples( |
|
|
client, |
|
|
tools_df: pd.DataFrame, |
|
|
more_questions_tools: list, |
|
|
ref_nr_questions: int, |
|
|
attempt_nr: int, |
|
|
) -> Tuple: |
|
|
""" |
|
|
Function to download historical data from tools and to update the list |
|
|
of tools that need more questions. It returns the updated dataframe and a list of the tools that we |
|
|
managed to complete the requirement |
|
|
""" |
|
|
print(f"Tools with not enough samples: {more_questions_tools}") |
|
|
completed_tools = [] |
|
|
|
|
|
files_count = historical_files_count_map[attempt_nr] |
|
|
tools_historical_file = download_tools_historical_files( |
|
|
client, skip_files_count=files_count |
|
|
) |
|
|
|
|
|
tools_df = add_historical_data( |
|
|
tools_historical_file, |
|
|
tools_df, |
|
|
more_questions_tools, |
|
|
ref_nr_questions, |
|
|
completed_tools, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools_df = get_unique_recent_samples(tools_df=tools_df) |
|
|
print("Current count of answered questions per tool after adding historical data:") |
|
|
print(tools_df.groupby("tool")["title"].nunique()) |
|
|
return tools_df, completed_tools |
|
|
|
|
|
|
|
|
def get_unique_recent_samples( |
|
|
tools_df: pd.DataFrame, recent_samples_size: int = None |
|
|
) -> pd.DataFrame: |
|
|
""" |
|
|
For each tool, keep the most recent answer for each question (title), |
|
|
and limit to the most recent N questions per tool if needed. |
|
|
""" |
|
|
|
|
|
tools_df = tools_df.sort_values( |
|
|
by=["tool", "title", "request_time"], ascending=[True, True, False] |
|
|
) |
|
|
|
|
|
tools_df = tools_df.groupby(["tool", "title"], as_index=False).head(1) |
|
|
|
|
|
tools_df = tools_df.sort_values( |
|
|
by=["tool", "request_time"], ascending=[True, False] |
|
|
) |
|
|
|
|
|
if recent_samples_size is not None: |
|
|
tools_df = tools_df.groupby("tool").head(recent_samples_size) |
|
|
return tools_df.reset_index(drop=True) |
|
|
|
|
|
|
|
|
def sampled_accuracy( |
|
|
tools_data: pd.DataFrame, n: int = None, sampling_percentage: float = 0.30 |
|
|
) -> float: |
|
|
"""Function to estimate the accuracy of the tools based on a sampling percentage.""" |
|
|
if n is not None: |
|
|
sampled_data = tools_data.sample(n=n, random_state=42) |
|
|
elif sampling_percentage <= 0 or sampling_percentage > 1: |
|
|
raise ValueError("Sampling percentage must be between 0 and 1.") |
|
|
else: |
|
|
|
|
|
sampled_data = tools_data.sample(frac=sampling_percentage, random_state=42) |
|
|
|
|
|
correct_answers = int(sampled_data.win.sum()) |
|
|
return round(correct_answers / len(sampled_data), 5) |
|
|
|
|
|
|
|
|
def evenly_distributed_sampling( |
|
|
tools_data: pd.DataFrame, group_size: int, sampling_percentage: float = 0.30 |
|
|
) -> pd.DataFrame: |
|
|
""" |
|
|
Function to sample the tools data of length N, after sorting it by time, |
|
|
by taking groups of sequential samples of size group_size evenly distributed along the time axis. |
|
|
We take as many distributed groups of size S til reaching the desired sampling percentage |
|
|
""" |
|
|
if group_size > len(tools_data): |
|
|
raise ValueError( |
|
|
"Group size cannot be larger than the number of rows in the DataFrame." |
|
|
) |
|
|
if sampling_percentage <= 0 or sampling_percentage > 1: |
|
|
raise ValueError("Sampling percentage must be between 0 and 1.") |
|
|
|
|
|
|
|
|
sorted_data = tools_data.sort_values(by="request_time") |
|
|
|
|
|
|
|
|
total_samples = int(len(sorted_data) * sampling_percentage) |
|
|
|
|
|
|
|
|
num_groups = total_samples // group_size |
|
|
print(f"Number of groups we need to reach the total samples: {num_groups}") |
|
|
|
|
|
if num_groups == 0: |
|
|
raise ValueError( |
|
|
"Not enough data to sample with the given group size and sampling percentage." |
|
|
) |
|
|
if len(sorted_data) < num_groups * group_size: |
|
|
raise ValueError( |
|
|
"Not enough data to sample with the given group size and sampling percentage." |
|
|
) |
|
|
|
|
|
sampled_data = pd.DataFrame() |
|
|
|
|
|
section_length = len(sorted_data) // num_groups |
|
|
|
|
|
for i in range(num_groups): |
|
|
|
|
|
start_index = i * section_length |
|
|
end_index = start_index + group_size |
|
|
if end_index > len(sorted_data): |
|
|
end_index = len(sorted_data) |
|
|
|
|
|
group_sample = sorted_data.iloc[start_index:end_index] |
|
|
sampled_data = pd.concat([sampled_data, group_sample], ignore_index=True) |
|
|
|
|
|
if len(sampled_data) > total_samples: |
|
|
|
|
|
sampled_data = sampled_data.sample(n=total_samples, random_state=42) |
|
|
elif len(sampled_data) < total_samples: |
|
|
|
|
|
print( |
|
|
f"Warning: Sampled data has fewer rows ({len(sampled_data)}) than requested ({total_samples})." |
|
|
) |
|
|
|
|
|
return sampled_data.reset_index(drop=True) |
|
|
|
|
|
|
|
|
def get_accuracy_values(tools_df: pd.DataFrame, more_q_tools: list) -> list: |
|
|
global_accuracies = [] |
|
|
tools = tools_df.tool.unique() |
|
|
|
|
|
for tool in tools: |
|
|
print(f"Processing tool: {tool}") |
|
|
tools_data = tools_df[tools_df["tool"] == tool] |
|
|
min_timestamp = tools_data.request_time.min().strftime("%Y-%m-%d %H:%M:%S") |
|
|
max_timestamp = tools_data.request_time.max().strftime("%Y-%m-%d %H:%M:%S") |
|
|
if tool in more_q_tools: |
|
|
global_accuracies.append( |
|
|
{ |
|
|
"tool": tool, |
|
|
"tool_accuracy": DEFAULT_ACCURACY, |
|
|
"nr_responses": NR_ANSWERED_MARKETS, |
|
|
"min": min_timestamp, |
|
|
"max": max_timestamp, |
|
|
} |
|
|
) |
|
|
continue |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
correct_answers = int(tools_data.win.sum()) |
|
|
tool_accuracy = round(correct_answers / len(tools_data), 5) |
|
|
|
|
|
global_accuracies.append( |
|
|
{ |
|
|
"tool": tool, |
|
|
"tool_accuracy": tool_accuracy, |
|
|
"nr_responses": len(tools_data), |
|
|
"min": min_timestamp, |
|
|
"max": max_timestamp, |
|
|
} |
|
|
) |
|
|
return global_accuracies |
|
|
|
|
|
|
|
|
def global_tool_accuracy(): |
|
|
|
|
|
print("Reading tools parquet file") |
|
|
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet") |
|
|
|
|
|
|
|
|
clean_tools_df = clean_tools_dataset(tools_df) |
|
|
print("Current count of answered questions per tool after cleaning:") |
|
|
print(clean_tools_df.groupby("tool")["title"].nunique()) |
|
|
|
|
|
|
|
|
answered_questions = compute_nr_questions_per_tool(clean_tools_df=clean_tools_df) |
|
|
ref_nr_questions = NR_ANSWERED_MARKETS |
|
|
|
|
|
|
|
|
more_q_tools = classify_tools_by_responses(answered_questions, ref_nr_questions) |
|
|
|
|
|
clean_tools_df = get_unique_recent_samples(tools_df=clean_tools_df) |
|
|
|
|
|
print( |
|
|
"Current count of answered questions per tool after selecting the global population:" |
|
|
) |
|
|
print(clean_tools_df.groupby("tool")["title"].nunique()) |
|
|
|
|
|
|
|
|
nr_attempts = 0 |
|
|
client = initialize_client() |
|
|
while len(more_q_tools) > 0 and nr_attempts < MAX_ATTEMPTS: |
|
|
nr_attempts += 1 |
|
|
print(f"Attempt {nr_attempts} to reach the reference number of questions") |
|
|
clean_tools_df, updated_tools = check_historical_samples( |
|
|
client=client, |
|
|
tools_df=clean_tools_df, |
|
|
more_questions_tools=more_q_tools, |
|
|
ref_nr_questions=ref_nr_questions, |
|
|
attempt_nr=nr_attempts, |
|
|
) |
|
|
print(f"Tools that were completed with historical data {updated_tools}") |
|
|
print(f"More tools with not enough questions {more_q_tools}") |
|
|
|
|
|
|
|
|
try: |
|
|
if "request_block" in clean_tools_df.columns: |
|
|
clean_tools_df["request_block"] = pd.to_numeric( |
|
|
clean_tools_df["request_block"], errors="coerce" |
|
|
).astype("Int64") |
|
|
clean_tools_df.to_parquet(TMP_DIR / "clean_tools.parquet", index=False) |
|
|
except Exception as e: |
|
|
print(f"Error saving clean tools parquet file: {e}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print( |
|
|
"Current count of answered questions per tool after reading the parquet file:" |
|
|
) |
|
|
print( |
|
|
clean_tools_df.groupby("tool")["title"].nunique().sort_values(ascending=False) |
|
|
) |
|
|
|
|
|
if len(more_q_tools) > 0: |
|
|
raise ValueError( |
|
|
f"Not enough data for the tools: {more_q_tools}. " |
|
|
"Please check the historical data or increase the number of attempts." |
|
|
) |
|
|
|
|
|
|
|
|
if len(updated_tools) > 0: |
|
|
last_tool = updated_tools[-1] |
|
|
print(f"last tool with enough questions: {last_tool}") |
|
|
else: |
|
|
|
|
|
last_tool = ( |
|
|
clean_tools_df.groupby("tool")["title"] |
|
|
.nunique() |
|
|
.sort_values(ascending=True) |
|
|
.index[0] |
|
|
) |
|
|
|
|
|
|
|
|
clean_tools_df = clean_tools_df[clean_tools_df["tool"].isin(INC_TOOLS) == True] |
|
|
|
|
|
last_tool_data = clean_tools_df[clean_tools_df["tool"] == last_tool].copy() |
|
|
|
|
|
last_tool_data = last_tool_data.sort_values(by="request_time", ascending=False) |
|
|
|
|
|
last_tool_data = last_tool_data.head(NR_ANSWERED_MARKETS) |
|
|
print("Extracting the final list of market questions from the last tool") |
|
|
|
|
|
last_tool_titles = last_tool_data.title.unique() |
|
|
print("Current count of answered questions per tool before filtering:") |
|
|
print(clean_tools_df.groupby("tool")["title"].nunique()) |
|
|
common_titles = [] |
|
|
for tool in clean_tools_df.tool.unique(): |
|
|
tool_data = clean_tools_df[clean_tools_df["tool"] == tool] |
|
|
tool_titles_set = set( |
|
|
tool_data[tool_data["title"].isin(last_tool_titles)]["title"].unique() |
|
|
) |
|
|
print(f"Tool: {tool}, Count of titles: {len(tool_titles_set)}") |
|
|
common_titles.append(tool_titles_set) |
|
|
|
|
|
|
|
|
common_titles_set = set.intersection(*common_titles) |
|
|
print(f"Common titles across all tools: {len(common_titles_set)}") |
|
|
|
|
|
|
|
|
clean_tools_df = clean_tools_df[clean_tools_df["title"].isin(common_titles_set)] |
|
|
|
|
|
print( |
|
|
f"Current count of answered questions per tool after selecting common titles:" |
|
|
) |
|
|
print(clean_tools_df.groupby("tool")["title"].nunique()) |
|
|
|
|
|
print("Computing the global accuracies for the tools") |
|
|
global_accuracies = get_accuracy_values( |
|
|
tools_df=clean_tools_df, |
|
|
more_q_tools=["prediction-offline-sme", "prediction-url-cot-claude"], |
|
|
) |
|
|
|
|
|
if len(more_q_tools) > 0: |
|
|
|
|
|
total_accuracy = sum(item["accuracy"] for item in global_accuracies) |
|
|
avg_accuracy = ( |
|
|
round(total_accuracy / len(global_accuracies), 5) |
|
|
if len(global_accuracies) > 0 |
|
|
else DEFAULT_ACCURACY |
|
|
) |
|
|
for tool in more_q_tools: |
|
|
global_accuracies[tool]["accuracy"] = avg_accuracy |
|
|
|
|
|
print(f"global accuracies {global_accuracies}") |
|
|
|
|
|
computed_accuracy_df = pd.DataFrame(global_accuracies) |
|
|
|
|
|
computed_accuracy_df = computed_accuracy_df.sort_values( |
|
|
by="tool_accuracy", ascending=False, ignore_index=True |
|
|
) |
|
|
print(computed_accuracy_df.head()) |
|
|
print("Saving into a csv file") |
|
|
computed_accuracy_df.to_csv(ROOT_DIR / ACCURACY_FILENAME, index=False) |
|
|
|
|
|
push_csv_file_to_ipfs() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
global_tool_accuracy() |
|
|
|
|
|
|