cyberosa commited on
Commit
d4de675
·
1 Parent(s): 02cbfc5

updating tools accuracy computation and csv file

Browse files
Files changed (2) hide show
  1. scripts/global_tool_accuracy.py +96 -23
  2. tools_accuracy.csv +13 -13
scripts/global_tool_accuracy.py CHANGED
@@ -15,6 +15,9 @@ from cloud_storage import (
15
  FILES_IN_TEN_MONTHS,
16
  )
17
 
 
 
 
18
  MAX_ATTEMPTS = 5
19
  historical_files_count_map = {
20
  1: FILES_IN_TWO_MONTHS,
@@ -23,6 +26,23 @@ historical_files_count_map = {
23
  4: FILES_IN_EIGHT_MONTHS,
24
  5: FILES_IN_TEN_MONTHS,
25
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
 
28
  def push_csv_file_to_ipfs(filename: str = ACCURACY_FILENAME) -> str:
@@ -60,19 +80,6 @@ def take_toptool_name(tools_df: pd.DataFrame) -> str:
60
  return volumes.iloc[0].tool
61
 
62
 
63
- def keep_last_answer_per_question_per_tool(clean_tools_df: pd.DataFrame) -> None:
64
- for tool in INC_TOOLS:
65
- print(f"checking answers from tool {tool}")
66
- tool_data = clean_tools_df[clean_tools_df["tool"] == tool]
67
- # sort tool_data by request date in ascending order
68
- tool_data = tool_data.sort_values(by="request_time", ascending=True)
69
-
70
- unique_questions = tool_data.title.unique()
71
- for question in unique_questions:
72
- market_data = tool_data[tool_data["title"] == question]
73
- market_data = market_data.sort_values(by="request_time", ascending=True)
74
-
75
-
76
  def compute_nr_questions_per_tool(clean_tools_df: pd.DataFrame) -> dict:
77
  answered_questions = {}
78
 
@@ -111,7 +118,7 @@ def classify_tools_by_responses(
111
  more_questions_tools = []
112
  total_tools = answered_questions.keys()
113
  for tool in total_tools:
114
- if answered_questions[tool] >= ref_nr_questions:
115
  enough_questions_tools.append(tool)
116
  else:
117
  more_questions_tools.append(tool)
@@ -153,7 +160,11 @@ def add_historical_data(
153
  new_count = answered_questions[tool]["total_answered_questions"]
154
  if new_count >= recent_nr_questions:
155
  completed_tools.append(tool)
156
- # TODO remove the tools in completed_tools list from more_questions_tools
 
 
 
 
157
  return tools_df
158
 
159
 
@@ -166,7 +177,7 @@ def check_historical_samples(
166
  ) -> Tuple:
167
  """
168
  Function to download historical data from tools and to update the list
169
- of tools that need more questions. It returns a list of the tools that we
170
  managed to complete the requirement
171
  """
172
  print(f"Tools with not enough samples: {more_questions_tools}")
@@ -184,10 +195,47 @@ def check_historical_samples(
184
  ref_nr_questions,
185
  completed_tools,
186
  )
187
- # TODO for each tool in tools_df, take the last answer only for each question based on request_time
 
188
  return tools_df, completed_tools
189
 
190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  def global_tool_accuracy():
192
  # read the tools df
193
  print("Reading tools parquet file")
@@ -201,15 +249,15 @@ def global_tool_accuracy():
201
 
202
  # extract the number of questions answered from the top tool
203
  answered_questions = compute_nr_questions_per_tool(clean_tools_df=clean_tools_df)
204
- ref_nr_questions = answered_questions[top_tool]["total_answered_questions"]
205
 
206
  # classify tools between those with enough questions and those that need more data
207
  enough_q_tools, more_q_tools = classify_tools_by_responses(
208
  answered_questions, ref_nr_questions
209
  )
210
 
211
- # TODO for each tool in clean_tools_df, take the last answer only for each question based on request_time
212
-
213
  # go for historical data if needed up to a maximum of 5 attempts
214
  nr_attempts = 0
215
  client = initialize_client()
@@ -218,13 +266,38 @@ def global_tool_accuracy():
218
  print(f"Attempt {nr_attempts} to reach the reference number of questions")
219
  clean_tools_df, updated_tools = check_historical_samples(
220
  client=client,
221
- tools_df=tools_df,
222
  more_questions_tools=more_q_tools,
223
  ref_nr_questions=ref_nr_questions,
224
  attempt_nr=nr_attempts,
225
  )
226
- print(f"Updated tools {updated_tools}")
227
- print(f"more tools with missing data {more_q_tools}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
 
229
 
230
  if __name__ == "__main__":
 
15
  FILES_IN_TEN_MONTHS,
16
  )
17
 
18
+ ACCURACY_FILENAME = "tools_accuracy.csv"
19
+ IPFS_SERVER = "/dns/registry.autonolas.tech/tcp/443/https"
20
+ NR_ANSWERED_MARKETS = 800 # In two months the max we can reach is 1000 for top tools
21
  MAX_ATTEMPTS = 5
22
  historical_files_count_map = {
23
  1: FILES_IN_TWO_MONTHS,
 
26
  4: FILES_IN_EIGHT_MONTHS,
27
  5: FILES_IN_TEN_MONTHS,
28
  }
29
+ DEFAULT_ACCURACY = 0.50
30
+
31
+
32
+ def keep_last_answer_per_question_per_tool(
33
+ clean_tools_df: pd.DataFrame,
34
+ ) -> pd.DataFrame:
35
+ """
36
+ For each tool, keep only the last answer for each question (title) based on request_time.
37
+ Returns a filtered DataFrame.
38
+ """
39
+ # Sort by tool, title, and request_time
40
+ sorted_df = clean_tools_df.sort_values(by=["tool", "title", "request_time"])
41
+ # Keep the last answer for each tool and question
42
+ last_answers = sorted_df.groupby(["tool", "title"], as_index=False).tail(1)
43
+ # Reset index for cleanliness
44
+ last_answers = last_answers.reset_index(drop=True)
45
+ return last_answers
46
 
47
 
48
  def push_csv_file_to_ipfs(filename: str = ACCURACY_FILENAME) -> str:
 
80
  return volumes.iloc[0].tool
81
 
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  def compute_nr_questions_per_tool(clean_tools_df: pd.DataFrame) -> dict:
84
  answered_questions = {}
85
 
 
118
  more_questions_tools = []
119
  total_tools = answered_questions.keys()
120
  for tool in total_tools:
121
+ if answered_questions[tool]["total_answered_questions"] >= ref_nr_questions:
122
  enough_questions_tools.append(tool)
123
  else:
124
  more_questions_tools.append(tool)
 
160
  new_count = answered_questions[tool]["total_answered_questions"]
161
  if new_count >= recent_nr_questions:
162
  completed_tools.append(tool)
163
+ # remove the tools in completed_tools list from more_questions_tools
164
+ for tool in completed_tools:
165
+ print(f"Tool {tool} with enough questions now, removing from list")
166
+ if tool in more_questions_tools:
167
+ more_questions_tools.remove(tool)
168
  return tools_df
169
 
170
 
 
177
  ) -> Tuple:
178
  """
179
  Function to download historical data from tools and to update the list
180
+ of tools that need more questions. It returns the updated dataframe and a list of the tools that we
181
  managed to complete the requirement
182
  """
183
  print(f"Tools with not enough samples: {more_questions_tools}")
 
195
  ref_nr_questions,
196
  completed_tools,
197
  )
198
+ # for each tool in tools_df, take the last answer only for each question ("title" column) based on request_time column
199
+ tools_df = keep_last_answer_per_question_per_tool(tools_df)
200
  return tools_df, completed_tools
201
 
202
 
203
+ def get_accuracy_values(tools_df: pd.DataFrame, more_q_tools: list) -> list:
204
+ global_accuracies = []
205
+ tools = tools_df.tool.unique()
206
+ for tool in tools:
207
+ tools_data = tools_df[tools_df["tool"] == tool]
208
+ min_timestamp = tools_data.request_time.min().strftime("%Y-%m-%d %H:%M:%S")
209
+ max_timestamp = tools_data.request_time.max().strftime("%Y-%m-%d %H:%M:%S")
210
+ if tool in more_q_tools:
211
+ global_accuracies.append(
212
+ {
213
+ "tool": tool,
214
+ "accuracy": None,
215
+ "nr_responses": NR_ANSWERED_MARKETS,
216
+ "min": min_timestamp,
217
+ "max": max_timestamp,
218
+ }
219
+ )
220
+ continue
221
+
222
+ # win column ==1 is a correct answer
223
+ correct_answers = int(tools_data.win.sum())
224
+ tool_accuracy = round(correct_answers / len(tools_data), 5)
225
+ # no values under 50%
226
+ tool_accuracy = max(DEFAULT_ACCURACY, tool_accuracy)
227
+ global_accuracies.append(
228
+ {
229
+ "tool": tool,
230
+ "accuracy": tool_accuracy,
231
+ "nr_responses": NR_ANSWERED_MARKETS,
232
+ "min": min_timestamp,
233
+ "max": max_timestamp,
234
+ }
235
+ )
236
+ return global_accuracies
237
+
238
+
239
  def global_tool_accuracy():
240
  # read the tools df
241
  print("Reading tools parquet file")
 
249
 
250
  # extract the number of questions answered from the top tool
251
  answered_questions = compute_nr_questions_per_tool(clean_tools_df=clean_tools_df)
252
+ ref_nr_questions = NR_ANSWERED_MARKETS
253
 
254
  # classify tools between those with enough questions and those that need more data
255
  enough_q_tools, more_q_tools = classify_tools_by_responses(
256
  answered_questions, ref_nr_questions
257
  )
258
 
259
+ # for each tool in clean_tools_df, take the last answer only for each question ("title" column) based on request_time column
260
+ clean_tools_df = keep_last_answer_per_question_per_tool(clean_tools_df)
261
  # go for historical data if needed up to a maximum of 5 attempts
262
  nr_attempts = 0
263
  client = initialize_client()
 
266
  print(f"Attempt {nr_attempts} to reach the reference number of questions")
267
  clean_tools_df, updated_tools = check_historical_samples(
268
  client=client,
269
+ tools_df=clean_tools_df,
270
  more_questions_tools=more_q_tools,
271
  ref_nr_questions=ref_nr_questions,
272
  attempt_nr=nr_attempts,
273
  )
274
+ print(f"Tools that were completed with historical data {updated_tools}")
275
+ print(f"More tools with not enough questions {more_q_tools}")
276
+
277
+ # compute the accuracy
278
+ global_accuracies = get_accuracy_values(
279
+ tools_df=clean_tools_df, more_q_tools=more_q_tools
280
+ )
281
+ # new tools + not enough samples
282
+ if len(more_q_tools) > 0:
283
+ # compute the average accuracy for the new tools
284
+ total_accuracy = sum(item["accuracy"] for item in global_accuracies)
285
+ avg_accuracy = (
286
+ round(total_accuracy / len(global_accuracies), 5)
287
+ if len(global_accuracies) > 0
288
+ else DEFAULT_ACCURACY
289
+ )
290
+ for tool in more_q_tools:
291
+ global_accuracies[tool]["accuracy"] = avg_accuracy
292
+
293
+ print(f"global accuracies {global_accuracies}")
294
+ # create a dataframe from global_accuracies
295
+ computed_accuracy_df = pd.DataFrame(global_accuracies)
296
+ print(computed_accuracy_df.head())
297
+ print("Saving into a csv file")
298
+ computed_accuracy_df.to_csv(ROOT_DIR / ACCURACY_FILENAME, index=False)
299
+ # save the data into IPFS
300
+ # push_csv_file_to_ipfs()
301
 
302
 
303
  if __name__ == "__main__":
tools_accuracy.csv CHANGED
@@ -1,13 +1,13 @@
1
- tool,tool_accuracy,total_requests,min,max
2
- prediction-offline,61.97,500,2025-06-03 00:00:05,2025-08-03 23:44:55
3
- prediction-online-sme,52.33,500,2025-06-03 00:04:30,2025-08-03 22:49:45
4
- prediction-online,59.38,500,2025-06-03 00:00:05,2025-08-03 22:31:35
5
- prediction-request-reasoning,58.02,500,2025-06-03 00:00:30,2025-08-03 23:44:40
6
- claude-prediction-offline,57.92,500,2025-06-06 00:13:05,2025-08-03 21:46:10
7
- claude-prediction-online,57.92,500,2025-06-11 07:23:05,2025-08-03 23:02:15
8
- superforcaster,57.92,500,2025-06-03 01:15:10,2025-08-03 22:50:05
9
- prediction-request-reasoning-claude,57.92,500,2025-06-16 11:02:15,2025-08-03 22:59:10
10
- prediction-request-rag-claude,57.92,500,2025-06-03 17:51:10,2025-08-03 22:52:10
11
- prediction-offline-sme,57.92,500,2025-06-03 11:55:10,2025-08-02 07:55:50
12
- prediction-url-cot-claude,57.92,500,2025-06-12 20:36:25,2025-07-27 08:15:15
13
- prediction-request-rag,57.92,500,2025-06-03 18:59:40,2025-08-03 21:23:40
 
1
+ tool,accuracy,nr_responses,min,max
2
+ claude-prediction-offline,0.59785,800,2025-06-11 22:43:20,2025-08-10 23:28:40
3
+ claude-prediction-online,0.57552,800,2025-06-12 07:00:35,2025-08-11 02:47:25
4
+ prediction-offline,0.65566,800,2025-06-10 19:23:15,2025-08-10 23:44:45
5
+ prediction-offline-sme,0.5,800,2025-03-18 00:34:45,2025-08-09 09:24:30
6
+ prediction-online,0.65628,800,2025-06-10 09:02:55,2025-08-10 23:30:00
7
+ prediction-online-sme,0.56816,800,2025-06-10 04:40:00,2025-08-10 23:39:40
8
+ prediction-request-rag,0.59852,800,2025-06-11 02:24:45,2025-08-10 23:38:35
9
+ prediction-request-rag-claude,0.5,800,2025-03-18 00:08:15,2025-08-10 19:05:15
10
+ prediction-request-reasoning,0.66068,800,2025-06-10 00:56:45,2025-08-11 02:47:50
11
+ prediction-request-reasoning-claude,0.5,800,2025-03-18 16:35:45,2025-08-10 22:41:50
12
+ prediction-url-cot-claude,0.5,800,2025-01-12 01:30:50,2025-07-27 08:15:15
13
+ superforcaster,0.66273,800,2025-06-10 02:00:30,2025-08-10 19:59:40