import os import pandas as pd from typing import Tuple, List, Dict import ipfshttpclient from utils import INC_TOOLS from typing import List from utils import TMP_DIR, ROOT_DIR from cloud_storage import ( initialize_client, download_tools_historical_files, FILES_IN_TWO_MONTHS, FILES_IN_FOUR_MONTHS, FILES_IN_SIX_MONTHS, FILES_IN_EIGHT_MONTHS, FILES_IN_TEN_MONTHS, ) ACCURACY_FILENAME = "tools_accuracy.csv" IPFS_SERVER = "/dns/registry.autonolas.tech/tcp/443/https" NR_ANSWERED_MARKETS = 1000 # In two months the max we can reach is 1000 for top tools MAX_ATTEMPTS = 5 historical_files_count_map = { 1: FILES_IN_TWO_MONTHS, 2: FILES_IN_FOUR_MONTHS, 3: FILES_IN_SIX_MONTHS, 4: FILES_IN_EIGHT_MONTHS, 5: FILES_IN_TEN_MONTHS, } DEFAULT_ACCURACY = 0.50 DEFAULT_BAD_ACCURACY = 0.00 def keep_last_answer_per_question_per_tool( clean_tools_df: pd.DataFrame, ) -> pd.DataFrame: """ For each tool, keep only the last answer for each question (title) based on request_time. Returns a filtered DataFrame. """ # Sort by tool, title, and request_time sorted_df = clean_tools_df.sort_values(by=["tool", "title", "request_time"]) # Keep the last answer for each tool and question last_answers = sorted_df.groupby(["tool", "title"], as_index=False).tail(1) # Reset index for cleanliness last_answers = last_answers.reset_index(drop=True) return last_answers def push_csv_file_to_ipfs(filename: str = ACCURACY_FILENAME) -> str: """Push the tools accuracy CSV file to IPFS.""" client = ipfshttpclient.connect(IPFS_SERVER) result = client.add(ROOT_DIR / filename) print(f"HASH of the tools accuracy file: {result['Hash']}") return result["Hash"] def clean_tools_dataset(tools_df: pd.DataFrame) -> pd.DataFrame: # Remove tool_name and TEMP_TOOL tools_non_error = tools_df[ tools_df["tool"].isin(["tool_name", "TEMP_TOOL"]) == False ].copy() # Remove errors tools_non_error = tools_non_error[tools_non_error["error"] == 0] tools_non_error.loc[:, "currentAnswer"] = tools_non_error["currentAnswer"].replace( {"no": "No", "yes": "Yes"} ) tools_non_error = tools_non_error[ tools_non_error["currentAnswer"].isin(["Yes", "No"]) ] tools_non_error = tools_non_error[tools_non_error["vote"].isin(["Yes", "No"])] tools_non_error["win"] = ( tools_non_error["currentAnswer"] == tools_non_error["vote"] ).astype(int) tools_non_error.columns = tools_non_error.columns.astype(str) return tools_non_error def take_toptool_name(tools_df: pd.DataFrame) -> str: volumes = tools_df.tool.value_counts().reset_index() return volumes.iloc[0].tool def compute_nr_questions_per_tool(clean_tools_df: pd.DataFrame) -> dict: answered_questions = {} for tool in INC_TOOLS: print(f"processing tool {tool}") tool_data = clean_tools_df[clean_tools_df["tool"] == tool].copy() # sort tool_data by request date in ascending order tool_data = tool_data.sort_values(by="request_time", ascending=True) # count unique prediction markets unique_questions = tool_data.title.unique() answered_questions[tool] = {} answered_questions[tool]["total_answered_questions"] = len(unique_questions) markets_different_answer = {} for question in unique_questions: market_data = tool_data[tool_data["title"] == question] different_responses = market_data.currentAnswer.value_counts() # Extract yes and no counts, defaulting to 0 if not present yes_count = different_responses.get("Yes", 0) no_count = different_responses.get("No", 0) if yes_count > 0 and no_count > 0: # print(f"found a market {question} with different answers") # found a market with different responses from the same tool markets_different_answer[question] = { "yes_responses": yes_count, "no_responses": no_count, } answered_questions[tool]["markets_different_answers"] = markets_different_answer return answered_questions def classify_tools_by_responses( answered_questions: dict, ref_nr_questions: int ) -> Tuple: more_questions_tools = [] total_tools = answered_questions.keys() for tool in total_tools: if answered_questions[tool]["total_answered_questions"] < ref_nr_questions: more_questions_tools.append(tool) return more_questions_tools def add_historical_data( tools_historical_file: str, tools_df: pd.DataFrame, more_questions_tools: list, recent_nr_questions: int, completed_tools: List[str], ) -> pd.DataFrame: """ It searches into the historical cloud files to get more samples for the tools. """ if not tools_historical_file: raise ValueError( "No historical tools file found, skipping adding historical data." ) # get the historical tools data print(f"Downloaded historical file into the tmp folder: {tools_historical_file}") # Load the historical tools data historical_tools_df = pd.read_parquet(TMP_DIR / tools_historical_file) # check if the historical tools data has samples from the tools that need more samples # historical_tools_df = historical_tools_df[ # historical_tools_df["tool"].isin(more_questions_tools) # ] historical_tools_df = historical_tools_df[ historical_tools_df["tool"].isin(INC_TOOLS) == True ] historical_tools_df = clean_tools_dataset(historical_tools_df) historical_tools_df = get_unique_recent_samples(tools_df=historical_tools_df) # check the volume of questions for the tools in the historical data # adding all responses for all tools tools_df = pd.concat([tools_df, historical_tools_df], ignore_index=True) # remove duplicates tools_df.drop_duplicates( subset=["request_id", "request_block"], keep="first", inplace=True ) # check the new total of answered questions per tool answered_questions = compute_nr_questions_per_tool(clean_tools_df=tools_df) for tool in more_questions_tools: new_count = answered_questions[tool]["total_answered_questions"] if new_count >= recent_nr_questions: completed_tools.append(tool) # remove the tools in completed_tools list from more_questions_tools for tool in completed_tools: print(f"Tool {tool} with enough questions now, removing from list") if tool in more_questions_tools: more_questions_tools.remove(tool) return tools_df def check_historical_samples( client, tools_df: pd.DataFrame, more_questions_tools: list, ref_nr_questions: int, attempt_nr: int, ) -> Tuple: """ Function to download historical data from tools and to update the list of tools that need more questions. It returns the updated dataframe and a list of the tools that we managed to complete the requirement """ print(f"Tools with not enough samples: {more_questions_tools}") completed_tools = [] files_count = historical_files_count_map[attempt_nr] tools_historical_file = download_tools_historical_files( client, skip_files_count=files_count ) tools_df = add_historical_data( tools_historical_file, tools_df, more_questions_tools, ref_nr_questions, completed_tools, ) # for each tool in tools_df, take the last answer only for each question ("title" column) based on request_time column # tools_df = keep_last_answer_per_question_per_tool(tools_df) # keep the unique responses for all tools tools_df = get_unique_recent_samples(tools_df=tools_df) print("Current count of answered questions per tool after adding historical data:") print(tools_df.groupby("tool")["title"].nunique()) return tools_df, completed_tools def get_unique_recent_samples( tools_df: pd.DataFrame, recent_samples_size: int = None ) -> pd.DataFrame: """ For each tool, keep the most recent answer for each question (title), and limit to the most recent N questions per tool if needed. """ # Sort by tool, title, and request_time descending tools_df = tools_df.sort_values( by=["tool", "title", "request_time"], ascending=[True, True, False] ) # Keep the most recent answer for each (tool, title) tools_df = tools_df.groupby(["tool", "title"], as_index=False).head(1) # For each tool, keep up to recent_samples_size most recent questions tools_df = tools_df.sort_values( by=["tool", "request_time"], ascending=[True, False] ) if recent_samples_size is not None: tools_df = tools_df.groupby("tool").head(recent_samples_size) return tools_df.reset_index(drop=True) def sampled_accuracy( tools_data: pd.DataFrame, n: int = None, sampling_percentage: float = 0.30 ) -> float: """Function to estimate the accuracy of the tools based on a sampling percentage.""" if n is not None: sampled_data = tools_data.sample(n=n, random_state=42) elif sampling_percentage <= 0 or sampling_percentage > 1: raise ValueError("Sampling percentage must be between 0 and 1.") else: # Sample a percentage of the tools data without replacement sampled_data = tools_data.sample(frac=sampling_percentage, random_state=42) # win column ==1 is a correct answer correct_answers = int(sampled_data.win.sum()) return round(correct_answers / len(sampled_data), 5) def evenly_distributed_sampling( tools_data: pd.DataFrame, group_size: int, sampling_percentage: float = 0.30 ) -> pd.DataFrame: """ Function to sample the tools data of length N, after sorting it by time, by taking groups of sequential samples of size group_size evenly distributed along the time axis. We take as many distributed groups of size S til reaching the desired sampling percentage """ if group_size > len(tools_data): raise ValueError( "Group size cannot be larger than the number of rows in the DataFrame." ) if sampling_percentage <= 0 or sampling_percentage > 1: raise ValueError("Sampling percentage must be between 0 and 1.") # Sort the data by request_time sorted_data = tools_data.sort_values(by="request_time") # Calculate the number of samples that we need to take total_samples = int(len(sorted_data) * sampling_percentage) # Calculate the number of groups we need to reach the desired sample size num_groups = total_samples // group_size print(f"Number of groups we need to reach the total samples: {num_groups}") # Divide the sorted data into evenly distributed num_groups of size group_size if num_groups == 0: raise ValueError( "Not enough data to sample with the given group size and sampling percentage." ) if len(sorted_data) < num_groups * group_size: raise ValueError( "Not enough data to sample with the given group size and sampling percentage." ) sampled_data = pd.DataFrame() # divide the sorted data length into num_groups sections section_length = len(sorted_data) // num_groups # from each section take samples of size group_size for i in range(num_groups): # jump into the section start_index = i * section_length end_index = start_index + group_size if end_index > len(sorted_data): end_index = len(sorted_data) # take the group of size group_size group_sample = sorted_data.iloc[start_index:end_index] sampled_data = pd.concat([sampled_data, group_sample], ignore_index=True) if len(sampled_data) > total_samples: # If we have more samples than needed, randomly sample down to total_samples sampled_data = sampled_data.sample(n=total_samples, random_state=42) elif len(sampled_data) < total_samples: # If we have fewer samples than needed, we can either raise an error or return what we have print( f"Warning: Sampled data has fewer rows ({len(sampled_data)}) than requested ({total_samples})." ) return sampled_data.reset_index(drop=True) def get_accuracy_values(tools_df: pd.DataFrame, more_q_tools: list) -> list: global_accuracies = [] tools = tools_df.tool.unique() # print("Using evenly distributed sampling for accuracy computation") for tool in tools: print(f"Processing tool: {tool}") tools_data = tools_df[tools_df["tool"] == tool] min_timestamp = tools_data.request_time.min().strftime("%Y-%m-%d %H:%M:%S") max_timestamp = tools_data.request_time.max().strftime("%Y-%m-%d %H:%M:%S") if tool in more_q_tools: global_accuracies.append( { "tool": tool, "tool_accuracy": DEFAULT_ACCURACY, "nr_responses": NR_ANSWERED_MARKETS, "min": min_timestamp, "max": max_timestamp, } ) continue # tool_accuracy = sampled_accuracy(tools_data, sampling_percentage=0.50) # tool_accuracy = sampled_accuracy(tools_data, n=500) # sampled_data = evenly_distributed_sampling( # tools_data, group_size=5, sampling_percentage=0.30 # ) # print(f"length of sampled data for tool {tool}: {len(sampled_data)}") correct_answers = int(tools_data.win.sum()) tool_accuracy = round(correct_answers / len(tools_data), 5) global_accuracies.append( { "tool": tool, "tool_accuracy": tool_accuracy, "nr_responses": len(tools_data), "min": min_timestamp, "max": max_timestamp, } ) return global_accuracies def global_tool_accuracy(): # read the tools df print("Reading tools parquet file") tools_df = pd.read_parquet(TMP_DIR / "tools.parquet") # clean the tools df clean_tools_df = clean_tools_dataset(tools_df) print("Current count of answered questions per tool after cleaning:") print(clean_tools_df.groupby("tool")["title"].nunique()) # extract the number of questions answered from each tool answered_questions = compute_nr_questions_per_tool(clean_tools_df=clean_tools_df) ref_nr_questions = NR_ANSWERED_MARKETS # classify tools between those with enough questions and those that need more data more_q_tools = classify_tools_by_responses(answered_questions, ref_nr_questions) clean_tools_df = get_unique_recent_samples(tools_df=clean_tools_df) print( "Current count of answered questions per tool after selecting the global population:" ) print(clean_tools_df.groupby("tool")["title"].nunique()) # go for historical data if needed up to a maximum of 5 attempts nr_attempts = 0 client = initialize_client() while len(more_q_tools) > 0 and nr_attempts < MAX_ATTEMPTS: nr_attempts += 1 print(f"Attempt {nr_attempts} to reach the reference number of questions") clean_tools_df, updated_tools = check_historical_samples( client=client, tools_df=clean_tools_df, more_questions_tools=more_q_tools, ref_nr_questions=ref_nr_questions, attempt_nr=nr_attempts, ) print(f"Tools that were completed with historical data {updated_tools}") print(f"More tools with not enough questions {more_q_tools}") # save cleaned tools df into a parquet file try: if "request_block" in clean_tools_df.columns: clean_tools_df["request_block"] = pd.to_numeric( clean_tools_df["request_block"], errors="coerce" ).astype("Int64") clean_tools_df.to_parquet(TMP_DIR / "clean_tools.parquet", index=False) except Exception as e: print(f"Error saving clean tools parquet file: {e}") # read the cleaned tools df # clean_tools_df = pd.read_parquet(TMP_DIR / "clean_tools.parquet") print( "Current count of answered questions per tool after reading the parquet file:" ) print( clean_tools_df.groupby("tool")["title"].nunique().sort_values(ascending=False) ) # check the last tool if len(more_q_tools) > 0: raise ValueError( f"Not enough data for the tools: {more_q_tools}. " "Please check the historical data or increase the number of attempts." ) # take the name of the last tool in the completed_tools list if len(updated_tools) > 0: last_tool = updated_tools[-1] print(f"last tool with enough questions: {last_tool}") else: # the last tool from the one with the lowest count of titles last_tool = ( clean_tools_df.groupby("tool")["title"] .nunique() .sort_values(ascending=True) .index[0] ) # Remove non relevant tools clean_tools_df = clean_tools_df[clean_tools_df["tool"].isin(INC_TOOLS) == True] # take only the last recent_samples_size for the last tool last_tool_data = clean_tools_df[clean_tools_df["tool"] == last_tool].copy() # sort by request_time in descending order last_tool_data = last_tool_data.sort_values(by="request_time", ascending=False) # take the last recent_samples_size rows last_tool_data = last_tool_data.head(NR_ANSWERED_MARKETS) print("Extracting the final list of market questions from the last tool") # extract the title values in last_tool_data last_tool_titles = last_tool_data.title.unique() print("Current count of answered questions per tool before filtering:") print(clean_tools_df.groupby("tool")["title"].nunique()) common_titles = [] for tool in clean_tools_df.tool.unique(): tool_data = clean_tools_df[clean_tools_df["tool"] == tool] tool_titles_set = set( tool_data[tool_data["title"].isin(last_tool_titles)]["title"].unique() ) print(f"Tool: {tool}, Count of titles: {len(tool_titles_set)}") common_titles.append(tool_titles_set) # create a list with the titles that appear in all sets common_titles_set = set.intersection(*common_titles) print(f"Common titles across all tools: {len(common_titles_set)}") # filter clean_tools_df to include only the titles from the common set clean_tools_df = clean_tools_df[clean_tools_df["title"].isin(common_titles_set)] print( f"Current count of answered questions per tool after selecting common titles:" ) print(clean_tools_df.groupby("tool")["title"].nunique()) # compute the accuracy print("Computing the global accuracies for the tools") global_accuracies = get_accuracy_values( tools_df=clean_tools_df, more_q_tools=["prediction-offline-sme", "prediction-url-cot-claude"], ) # new tools if len(more_q_tools) > 0: # compute the average accuracy for the new tools total_accuracy = sum(item["accuracy"] for item in global_accuracies) avg_accuracy = ( round(total_accuracy / len(global_accuracies), 5) if len(global_accuracies) > 0 else DEFAULT_ACCURACY ) for tool in more_q_tools: global_accuracies[tool]["accuracy"] = avg_accuracy print(f"global accuracies {global_accuracies}") # create a dataframe from global_accuracies computed_accuracy_df = pd.DataFrame(global_accuracies) # sort by accuracy descending computed_accuracy_df = computed_accuracy_df.sort_values( by="tool_accuracy", ascending=False, ignore_index=True ) print(computed_accuracy_df.head()) print("Saving into a csv file") computed_accuracy_df.to_csv(ROOT_DIR / ACCURACY_FILENAME, index=False) # save the data into IPFS push_csv_file_to_ipfs() if __name__ == "__main__": global_tool_accuracy() # push_csv_file_to_ipfs()