cyberosa commited on
Commit
eb8aafc
·
1 Parent(s): e219afb

updating weekly data

Browse files
active_traders.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7727e9fbae3a653b41f9192248638ce7b7ac8b533310f061a29512e573218821
3
- size 17602207
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d850739f444bb9337e9209e0e9621ceda5ae4c6f8f227ffcdf053cc76c22fdaf
3
+ size 16389159
all_trades_profitability.parquet.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8bbfb567e9806ae7997948e3ee71b4290e65ef030380444c2f8512a7657de57
3
- size 18568500
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9a2f70b74be670b84461caf773c34199c066c41aca4dca68c0f570f3a911d6b
3
+ size 17759795
closed_market_metrics.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:272d78629ef4d784e7a467a82ca1993ae867d6fb2fb17ee77d911e48fdbd3c36
3
- size 154444
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3300c5cb6fd336357991b0897bc22b3b201da5ac978e4b210c11ebdaf2c12fe8
3
+ size 155930
closed_markets_div.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:60360de64451798ed9912159646a76b49ff12f512de35ea273c34a43905ce457
3
- size 89956
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:170e7c48d104389d76844d21669751025c3477c1626ebbf7ff04f00c0a589029
3
+ size 90284
daily_info.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0a4e76bed48ed636d0c89ec84d498185334723e4ac46d96211d3382fec4315e
3
- size 4177342
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a84ecf43b7588282449ba622377bb8cb3f302680989b7d7f803dabfd62110fbe
3
+ size 4355346
daily_mech_requests.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:201c9ac7225d22d0be03358c2a3081f8564d5144aae98e9178fe677ba3a9a992
3
- size 8176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:084a769b47ae76332f7fe8f6289041898b49884edb6b5b5a2c04765b05ed036a
3
+ size 7946
daily_mech_requests_by_pearl_agents.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4049f30daf7cbead7673dc34d25a99185791c0e52933a6d49d970e3bdc9fd3fe
3
- size 4518
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ef0d2ec6bf25d9764bb345eb38a3721e0772ffad87ff229dd18a0d27448a94c
3
+ size 4347
error_by_markets.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f518dbd3033a1eab0373781417f480396e4c83698a33e39ccc636d0720171b75
3
- size 11916
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87488e719ad9e43f2114aae23ae9d22c383cd70754001e2d1bd6297759b0da37
3
+ size 11793
errors_by_mech.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a3c31fd76e9df1b93bc1d9fed3c94c46ae2c0b6ce83ddd2a51abdf2aa79ce8aa
3
- size 6110
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:890fcc3f2330c1cb55c0d767264e09445783ee3cf9836e1e1a709b2305a4757a
3
+ size 6114
invalid_trades.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4683a24a13839ab001ca8bfe15991f0a496dbd21385f6188808f67032f6f42b
3
- size 306391
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b493859d8ada47fa68ee286e03dcb1d1e2bb8c578e39121ed0c49e7171474e7f
3
+ size 263410
latest_result_DAA_Pearl.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e832c9c43b641ef6bc14921c11f893a86cd37f0d5eb5058603e2e8b6359fdf4
3
- size 5571
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:823b220ddf235c79c3b2cd74991db666a5d58ebf4acc21c3afeadd74012b17a2
3
+ size 5667
latest_result_DAA_QS.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1133eeb1575145c40dc3880fe477d7526b5c743ffb519ff3cb6f6400ff1d2bac
3
- size 6228
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d752333680a50caa1dcd96186b6e2bf9669a2792a114e46b788d67a3ac026f41
3
+ size 6298
pearl_agents.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:85b35dc498ad596d0ee0c26145f757f24b9bc65e37440770ba389a7fd6e50ed9
3
- size 47542
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:596c31bf6e5b66a4bc144a02b92be5c5dc8c9be81038fed8174f283d47fc38bf
3
+ size 47869
retention_activity.parquet.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c18b461909c4edad27cdae323194baaabe3b8be901d9d9afa7ab0493bf15dfa3
3
- size 9546971
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdabb575fa84e470fde74908a1e05a5fe81f7119fbe1aeeee54cbbde094c4b8e
3
+ size 4106895
scripts/global_tool_accuracy.py CHANGED
@@ -17,7 +17,7 @@ from cloud_storage import (
17
 
18
  ACCURACY_FILENAME = "tools_accuracy.csv"
19
  IPFS_SERVER = "/dns/registry.autonolas.tech/tcp/443/https"
20
- NR_ANSWERED_MARKETS = 800 # In two months the max we can reach is 1000 for top tools
21
  MAX_ATTEMPTS = 5
22
  historical_files_count_map = {
23
  1: FILES_IN_TWO_MONTHS,
@@ -27,6 +27,7 @@ historical_files_count_map = {
27
  5: FILES_IN_TEN_MONTHS,
28
  }
29
  DEFAULT_ACCURACY = 0.50
 
30
 
31
 
32
  def keep_last_answer_per_question_per_tool(
@@ -314,8 +315,6 @@ def get_accuracy_values(tools_df: pd.DataFrame, more_q_tools: list) -> list:
314
  # print("Using evenly distributed sampling for accuracy computation")
315
  for tool in tools:
316
  print(f"Processing tool: {tool}")
317
- if tool == "prediction-offline-sme" or tool == "prediction-url-cot-claude":
318
- continue
319
  tools_data = tools_df[tools_df["tool"] == tool]
320
  min_timestamp = tools_data.request_time.min().strftime("%Y-%m-%d %H:%M:%S")
321
  max_timestamp = tools_data.request_time.max().strftime("%Y-%m-%d %H:%M:%S")
@@ -323,7 +322,7 @@ def get_accuracy_values(tools_df: pd.DataFrame, more_q_tools: list) -> list:
323
  global_accuracies.append(
324
  {
325
  "tool": tool,
326
- "accuracy": None,
327
  "nr_responses": NR_ANSWERED_MARKETS,
328
  "min": min_timestamp,
329
  "max": max_timestamp,
@@ -332,14 +331,14 @@ def get_accuracy_values(tools_df: pd.DataFrame, more_q_tools: list) -> list:
332
  continue
333
 
334
  # tool_accuracy = sampled_accuracy(tools_data, sampling_percentage=0.50)
335
- tool_accuracy = sampled_accuracy(tools_data, n=500)
336
 
337
  # sampled_data = evenly_distributed_sampling(
338
  # tools_data, group_size=5, sampling_percentage=0.30
339
  # )
340
  # print(f"length of sampled data for tool {tool}: {len(sampled_data)}")
341
- # correct_answers = int(tools_data.win.sum())
342
- # tool_accuracy = round(correct_answers / len(tools_data), 5)
343
 
344
  global_accuracies.append(
345
  {
@@ -355,56 +354,56 @@ def get_accuracy_values(tools_df: pd.DataFrame, more_q_tools: list) -> list:
355
 
356
  def global_tool_accuracy():
357
  # read the tools df
358
- # print("Reading tools parquet file")
359
- # tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
360
-
361
- # # clean the tools df
362
- # clean_tools_df = clean_tools_dataset(tools_df)
363
- # print("Current count of answered questions per tool after cleaning:")
364
- # print(clean_tools_df.groupby("tool")["title"].nunique())
365
-
366
- # # extract the number of questions answered from the top tool
367
- # answered_questions = compute_nr_questions_per_tool(clean_tools_df=clean_tools_df)
368
- # ref_nr_questions = NR_ANSWERED_MARKETS
369
-
370
- # # classify tools between those with enough questions and those that need more data
371
- # more_q_tools = classify_tools_by_responses(answered_questions, ref_nr_questions)
372
-
373
- # clean_tools_df = get_unique_recent_samples(tools_df=clean_tools_df)
374
-
375
- # print(
376
- # "Current count of answered questions per tool after selecting the global population:"
377
- # )
378
- # print(clean_tools_df.groupby("tool")["title"].nunique())
379
-
380
- # # go for historical data if needed up to a maximum of 5 attempts
381
- # nr_attempts = 0
382
- # client = initialize_client()
383
- # while len(more_q_tools) > 0 and nr_attempts < MAX_ATTEMPTS:
384
- # nr_attempts += 1
385
- # print(f"Attempt {nr_attempts} to reach the reference number of questions")
386
- # clean_tools_df, updated_tools = check_historical_samples(
387
- # client=client,
388
- # tools_df=clean_tools_df,
389
- # more_questions_tools=more_q_tools,
390
- # ref_nr_questions=ref_nr_questions,
391
- # attempt_nr=nr_attempts,
392
- # )
393
- # print(f"Tools that were completed with historical data {updated_tools}")
394
- # print(f"More tools with not enough questions {more_q_tools}")
395
-
396
- # # save cleaned tools df into a parquet file
397
- # try:
398
- # if "request_block" in clean_tools_df.columns:
399
- # clean_tools_df["request_block"] = pd.to_numeric(
400
- # clean_tools_df["request_block"], errors="coerce"
401
- # ).astype("Int64")
402
- # clean_tools_df.to_parquet(TMP_DIR / "clean_tools.parquet", index=False)
403
- # except Exception as e:
404
- # print(f"Error saving clean tools parquet file: {e}")
405
 
406
  # read the cleaned tools df
407
- clean_tools_df = pd.read_parquet(TMP_DIR / "clean_tools.parquet")
408
 
409
  print(
410
  "Current count of answered questions per tool after reading the parquet file:"
@@ -413,86 +412,88 @@ def global_tool_accuracy():
413
  clean_tools_df.groupby("tool")["title"].nunique().sort_values(ascending=False)
414
  )
415
  # check the last tool
416
- # if len(more_q_tools) > 0:
417
- # raise ValueError(
418
- # f"Not enough data for the tools: {more_q_tools}. "
419
- # "Please check the historical data or increase the number of attempts."
420
- # )
421
 
422
  # take the name of the last tool in the completed_tools list
423
- # if len(updated_tools) > 0:
424
- # last_tool = updated_tools[-1]
425
- # print(f"last tool with enough questions: {last_tool}")
426
- # else:
427
- # # the last tool from the one with the lowest count of titles
428
- # last_tool = (
429
- # clean_tools_df.groupby("tool")["title"]
430
- # .nunique()
431
- # .sort_values(ascending=True)
432
- # .index[0]
433
- # )
434
- # last_tool = "prediction-request-reasoning-claude"
435
- # # take only the last recent_samples_size for the last tool
436
- # last_tool_data = clean_tools_df[clean_tools_df["tool"] == last_tool].copy()
437
- # # sort by request_time in descending order
438
- # last_tool_data = last_tool_data.sort_values(by="request_time", ascending=False)
439
- # # take the last recent_samples_size rows
440
- # last_tool_data = last_tool_data.head(NR_ANSWERED_MARKETS)
441
- # print("Extracting the final list of market questions from the last tool")
442
- # # extract the title values in last_tool_data
443
- # last_tool_titles = last_tool_data.title.unique()
444
- # print("Current count of answered questions per tool before filtering:")
445
- # print(clean_tools_df.groupby("tool")["title"].nunique())
446
- # common_titles = []
447
- # for tool in clean_tools_df.tool.unique():
448
- # if tool in INC_TOOLS == False:
449
- # continue
450
- # # if tool == "prediction-request-reasoning-claude":
451
- # # continue # skip the last tool as we already have its titles
452
- # tool_data = clean_tools_df[clean_tools_df["tool"] == tool]
453
- # tool_titles_set = set(
454
- # tool_data[tool_data["title"].isin(last_tool_titles)]["title"].unique()
455
- # )
456
- # print(f"Tool: {tool}, Count of titles: {len(tool_titles_set)}")
457
- # common_titles.append(tool_titles_set)
458
-
459
- # # create a list with the titles that appear in all sets
460
- # common_titles_set = set.intersection(*common_titles)
461
 
462
  # filter clean_tools_df to include only the titles from the common set
463
- # clean_tools_df = clean_tools_df[clean_tools_df["title"].isin(common_titles_set)]
464
 
465
- # getting the most NR_ANSWERED_MARKETS answered questions per tool
466
- clean_tools_df = get_unique_recent_samples(
467
- clean_tools_df, recent_samples_size=NR_ANSWERED_MARKETS
468
  )
469
- # print(
470
- # f"Current count of answered questions per tool after selecting common titles {len(common_titles_set)}:"
471
- # )
472
  print(clean_tools_df.groupby("tool")["title"].nunique())
473
  # compute the accuracy
474
  print("Computing the global accuracies for the tools")
475
- global_accuracies = get_accuracy_values(tools_df=clean_tools_df, more_q_tools=[])
 
 
 
476
  # new tools
477
- # if len(more_q_tools) > 0:
478
- # # compute the average accuracy for the new tools
479
- # total_accuracy = sum(item["accuracy"] for item in global_accuracies)
480
- # avg_accuracy = (
481
- # round(total_accuracy / len(global_accuracies), 5)
482
- # if len(global_accuracies) > 0
483
- # else DEFAULT_ACCURACY
484
- # )
485
- # for tool in more_q_tools:
486
- # global_accuracies[tool]["accuracy"] = avg_accuracy
487
 
488
  print(f"global accuracies {global_accuracies}")
489
  # create a dataframe from global_accuracies
490
  computed_accuracy_df = pd.DataFrame(global_accuracies)
 
 
 
 
491
  print(computed_accuracy_df.head())
492
  print("Saving into a csv file")
493
  computed_accuracy_df.to_csv(ROOT_DIR / ACCURACY_FILENAME, index=False)
494
  # save the data into IPFS
495
- # push_csv_file_to_ipfs()
496
 
497
 
498
  if __name__ == "__main__":
 
17
 
18
  ACCURACY_FILENAME = "tools_accuracy.csv"
19
  IPFS_SERVER = "/dns/registry.autonolas.tech/tcp/443/https"
20
+ NR_ANSWERED_MARKETS = 1000 # In two months the max we can reach is 1000 for top tools
21
  MAX_ATTEMPTS = 5
22
  historical_files_count_map = {
23
  1: FILES_IN_TWO_MONTHS,
 
27
  5: FILES_IN_TEN_MONTHS,
28
  }
29
  DEFAULT_ACCURACY = 0.50
30
+ DEFAULT_BAD_ACCURACY = 0.00
31
 
32
 
33
  def keep_last_answer_per_question_per_tool(
 
315
  # print("Using evenly distributed sampling for accuracy computation")
316
  for tool in tools:
317
  print(f"Processing tool: {tool}")
 
 
318
  tools_data = tools_df[tools_df["tool"] == tool]
319
  min_timestamp = tools_data.request_time.min().strftime("%Y-%m-%d %H:%M:%S")
320
  max_timestamp = tools_data.request_time.max().strftime("%Y-%m-%d %H:%M:%S")
 
322
  global_accuracies.append(
323
  {
324
  "tool": tool,
325
+ "accuracy": DEFAULT_ACCURACY,
326
  "nr_responses": NR_ANSWERED_MARKETS,
327
  "min": min_timestamp,
328
  "max": max_timestamp,
 
331
  continue
332
 
333
  # tool_accuracy = sampled_accuracy(tools_data, sampling_percentage=0.50)
334
+ # tool_accuracy = sampled_accuracy(tools_data, n=500)
335
 
336
  # sampled_data = evenly_distributed_sampling(
337
  # tools_data, group_size=5, sampling_percentage=0.30
338
  # )
339
  # print(f"length of sampled data for tool {tool}: {len(sampled_data)}")
340
+ correct_answers = int(tools_data.win.sum())
341
+ tool_accuracy = round(correct_answers / len(tools_data), 5)
342
 
343
  global_accuracies.append(
344
  {
 
354
 
355
  def global_tool_accuracy():
356
  # read the tools df
357
+ print("Reading tools parquet file")
358
+ tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
359
+
360
+ # clean the tools df
361
+ clean_tools_df = clean_tools_dataset(tools_df)
362
+ print("Current count of answered questions per tool after cleaning:")
363
+ print(clean_tools_df.groupby("tool")["title"].nunique())
364
+
365
+ # extract the number of questions answered from each tool
366
+ answered_questions = compute_nr_questions_per_tool(clean_tools_df=clean_tools_df)
367
+ ref_nr_questions = NR_ANSWERED_MARKETS
368
+
369
+ # classify tools between those with enough questions and those that need more data
370
+ more_q_tools = classify_tools_by_responses(answered_questions, ref_nr_questions)
371
+
372
+ clean_tools_df = get_unique_recent_samples(tools_df=clean_tools_df)
373
+
374
+ print(
375
+ "Current count of answered questions per tool after selecting the global population:"
376
+ )
377
+ print(clean_tools_df.groupby("tool")["title"].nunique())
378
+
379
+ # go for historical data if needed up to a maximum of 5 attempts
380
+ nr_attempts = 0
381
+ client = initialize_client()
382
+ while len(more_q_tools) > 0 and nr_attempts < MAX_ATTEMPTS:
383
+ nr_attempts += 1
384
+ print(f"Attempt {nr_attempts} to reach the reference number of questions")
385
+ clean_tools_df, updated_tools = check_historical_samples(
386
+ client=client,
387
+ tools_df=clean_tools_df,
388
+ more_questions_tools=more_q_tools,
389
+ ref_nr_questions=ref_nr_questions,
390
+ attempt_nr=nr_attempts,
391
+ )
392
+ print(f"Tools that were completed with historical data {updated_tools}")
393
+ print(f"More tools with not enough questions {more_q_tools}")
394
+
395
+ # save cleaned tools df into a parquet file
396
+ try:
397
+ if "request_block" in clean_tools_df.columns:
398
+ clean_tools_df["request_block"] = pd.to_numeric(
399
+ clean_tools_df["request_block"], errors="coerce"
400
+ ).astype("Int64")
401
+ clean_tools_df.to_parquet(TMP_DIR / "clean_tools.parquet", index=False)
402
+ except Exception as e:
403
+ print(f"Error saving clean tools parquet file: {e}")
404
 
405
  # read the cleaned tools df
406
+ # clean_tools_df = pd.read_parquet(TMP_DIR / "clean_tools.parquet")
407
 
408
  print(
409
  "Current count of answered questions per tool after reading the parquet file:"
 
412
  clean_tools_df.groupby("tool")["title"].nunique().sort_values(ascending=False)
413
  )
414
  # check the last tool
415
+ if len(more_q_tools) > 0:
416
+ raise ValueError(
417
+ f"Not enough data for the tools: {more_q_tools}. "
418
+ "Please check the historical data or increase the number of attempts."
419
+ )
420
 
421
  # take the name of the last tool in the completed_tools list
422
+ if len(updated_tools) > 0:
423
+ last_tool = updated_tools[-1]
424
+ print(f"last tool with enough questions: {last_tool}")
425
+ else:
426
+ # the last tool from the one with the lowest count of titles
427
+ last_tool = (
428
+ clean_tools_df.groupby("tool")["title"]
429
+ .nunique()
430
+ .sort_values(ascending=True)
431
+ .index[0]
432
+ )
433
+
434
+ # Remove non relevant tools
435
+ clean_tools_df = clean_tools_df[clean_tools_df["tool"].isin(INC_TOOLS) == True]
436
+ # take only the last recent_samples_size for the last tool
437
+ last_tool_data = clean_tools_df[clean_tools_df["tool"] == last_tool].copy()
438
+ # sort by request_time in descending order
439
+ last_tool_data = last_tool_data.sort_values(by="request_time", ascending=False)
440
+ # take the last recent_samples_size rows
441
+ last_tool_data = last_tool_data.head(NR_ANSWERED_MARKETS)
442
+ print("Extracting the final list of market questions from the last tool")
443
+ # extract the title values in last_tool_data
444
+ last_tool_titles = last_tool_data.title.unique()
445
+ print("Current count of answered questions per tool before filtering:")
446
+ print(clean_tools_df.groupby("tool")["title"].nunique())
447
+ common_titles = []
448
+ for tool in clean_tools_df.tool.unique():
449
+ tool_data = clean_tools_df[clean_tools_df["tool"] == tool]
450
+ tool_titles_set = set(
451
+ tool_data[tool_data["title"].isin(last_tool_titles)]["title"].unique()
452
+ )
453
+ print(f"Tool: {tool}, Count of titles: {len(tool_titles_set)}")
454
+ common_titles.append(tool_titles_set)
455
+
456
+ # create a list with the titles that appear in all sets
457
+ common_titles_set = set.intersection(*common_titles)
458
+ print(f"Common titles across all tools: {len(common_titles_set)}")
 
459
 
460
  # filter clean_tools_df to include only the titles from the common set
461
+ clean_tools_df = clean_tools_df[clean_tools_df["title"].isin(common_titles_set)]
462
 
463
+ print(
464
+ f"Current count of answered questions per tool after selecting common titles:"
 
465
  )
 
 
 
466
  print(clean_tools_df.groupby("tool")["title"].nunique())
467
  # compute the accuracy
468
  print("Computing the global accuracies for the tools")
469
+ global_accuracies = get_accuracy_values(
470
+ tools_df=clean_tools_df,
471
+ more_q_tools=["prediction-offline-sme", "prediction-url-cot-claude"],
472
+ )
473
  # new tools
474
+ if len(more_q_tools) > 0:
475
+ # compute the average accuracy for the new tools
476
+ total_accuracy = sum(item["accuracy"] for item in global_accuracies)
477
+ avg_accuracy = (
478
+ round(total_accuracy / len(global_accuracies), 5)
479
+ if len(global_accuracies) > 0
480
+ else DEFAULT_ACCURACY
481
+ )
482
+ for tool in more_q_tools:
483
+ global_accuracies[tool]["accuracy"] = avg_accuracy
484
 
485
  print(f"global accuracies {global_accuracies}")
486
  # create a dataframe from global_accuracies
487
  computed_accuracy_df = pd.DataFrame(global_accuracies)
488
+ # sort by accuracy descending
489
+ computed_accuracy_df = computed_accuracy_df.sort_values(
490
+ by="accuracy", ascending=False, ignore_index=True
491
+ )
492
  print(computed_accuracy_df.head())
493
  print("Saving into a csv file")
494
  computed_accuracy_df.to_csv(ROOT_DIR / ACCURACY_FILENAME, index=False)
495
  # save the data into IPFS
496
+ push_csv_file_to_ipfs()
497
 
498
 
499
  if __name__ == "__main__":
scripts/pull_data.py CHANGED
@@ -22,7 +22,7 @@ from get_mech_info import (
22
  get_mech_events_since_last_run,
23
  update_json_files,
24
  )
25
- from update_tools_accuracy import compute_tools_accuracy
26
  from cleaning_old_info import clean_old_data_from_parquet_files
27
  from web3_utils import get_timestamp_two_weeks_ago
28
  from manage_space_files import move_files
@@ -136,16 +136,17 @@ def only_new_weekly_analysis():
136
 
137
  save_historical_data()
138
  try:
139
- clean_old_data_from_parquet_files("2025-06-10")
140
  clean_old_data_from_json_files()
141
  except Exception as e:
142
  print("Error cleaning the oldest information from parquet files")
143
  print(f"reason = {e}")
144
- # compute_tools_accuracy()
145
  compute_tools_based_datasets()
146
  # move to tmp folder the new generated files
147
  move_files()
148
  prepare_daa_data()
 
149
  logging.info("Weekly analysis files generated and saved")
150
 
151
 
 
22
  get_mech_events_since_last_run,
23
  update_json_files,
24
  )
25
+ from global_tool_accuracy import global_tool_accuracy
26
  from cleaning_old_info import clean_old_data_from_parquet_files
27
  from web3_utils import get_timestamp_two_weeks_ago
28
  from manage_space_files import move_files
 
136
 
137
  save_historical_data()
138
  try:
139
+ clean_old_data_from_parquet_files("2025-06-19")
140
  clean_old_data_from_json_files()
141
  except Exception as e:
142
  print("Error cleaning the oldest information from parquet files")
143
  print(f"reason = {e}")
144
+
145
  compute_tools_based_datasets()
146
  # move to tmp folder the new generated files
147
  move_files()
148
  prepare_daa_data()
149
+ global_tool_accuracy()
150
  logging.info("Weekly analysis files generated and saved")
151
 
152
 
scripts/utils.py CHANGED
@@ -41,11 +41,11 @@ INC_TOOLS = [
41
  "prediction-offline",
42
  "claude-prediction-online",
43
  "claude-prediction-offline",
44
- "prediction-offline-sme",
45
  "prediction-online-sme",
46
  "prediction-request-rag",
47
  "prediction-request-reasoning",
48
- "prediction-url-cot-claude",
49
  "prediction-request-rag-claude",
50
  "prediction-request-reasoning-claude",
51
  "superforcaster",
 
41
  "prediction-offline",
42
  "claude-prediction-online",
43
  "claude-prediction-offline",
44
+ # "prediction-offline-sme",
45
  "prediction-online-sme",
46
  "prediction-request-rag",
47
  "prediction-request-reasoning",
48
+ # "prediction-url-cot-claude",
49
  "prediction-request-rag-claude",
50
  "prediction-request-reasoning-claude",
51
  "superforcaster",
service_map.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a8e1dc04106d040f27211a539da58807fe72cdb1142eef615bd2c32eb5ae770
3
- size 173007
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67cabc3346e428edad8119d8bdba4fde1190c39f50c5cc960977945bf62ba143
3
+ size 173883
tools_accuracy.csv CHANGED
@@ -1,13 +1,11 @@
1
- tool,tool_accuracy,nr_responses,min,max
2
- claude-prediction-offline,0.59785,800,2025-06-11 22:43:20,2025-08-10 23:28:40
3
- claude-prediction-online,0.57552,800,2025-06-12 07:00:35,2025-08-11 02:47:25
4
- prediction-offline,0.65566,800,2025-06-10 19:23:15,2025-08-10 23:44:45
5
- prediction-offline-sme,0.5,800,2025-03-18 00:34:45,2025-08-09 09:24:30
6
- prediction-online,0.65628,800,2025-06-10 09:02:55,2025-08-10 23:30:00
7
- prediction-online-sme,0.56816,800,2025-06-10 04:40:00,2025-08-10 23:39:40
8
- prediction-request-rag,0.59852,800,2025-06-11 02:24:45,2025-08-10 23:38:35
9
- prediction-request-rag-claude,0.5,800,2025-03-18 00:08:15,2025-08-10 19:05:15
10
- prediction-request-reasoning,0.66068,800,2025-06-10 00:56:45,2025-08-11 02:47:50
11
- prediction-request-reasoning-claude,0.5,800,2025-03-18 16:35:45,2025-08-10 22:41:50
12
- prediction-url-cot-claude,0.5,800,2025-01-12 01:30:50,2025-07-27 08:15:15
13
- superforcaster,0.66273,800,2025-06-10 02:00:30,2025-08-10 19:59:40
 
1
+ tool,accuracy,nr_responses,min,max
2
+ prediction-request-reasoning,0.66411,521,2025-01-09 11:05:10,2025-08-19 22:53:50
3
+ superforcaster,0.66027,521,2025-01-08 23:10:20,2025-08-19 18:43:45
4
+ prediction-request-reasoning-claude,0.62572,521,2025-01-09 10:59:05,2025-08-19 15:25:30
5
+ prediction-online,0.62188,521,2025-01-08 19:29:40,2025-08-20 01:06:25
6
+ prediction-offline,0.60845,521,2025-01-08 20:59:40,2025-08-19 22:41:10
7
+ claude-prediction-online,0.58541,521,2025-01-09 09:01:50,2025-08-19 23:40:20
8
+ claude-prediction-offline,0.57965,521,2025-01-09 09:39:50,2025-08-19 23:13:00
9
+ prediction-request-rag,0.57965,521,2025-01-08 23:18:55,2025-08-19 23:44:40
10
+ prediction-request-rag-claude,0.57582,521,2025-01-07 19:31:55,2025-08-19 23:34:25
11
+ prediction-online-sme,0.54894,521,2025-01-08 17:52:20,2025-08-19 22:33:25
 
 
traders_weekly_metrics.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:937a5868a8332291641aa2cfe7ab63877cfb83ea2479f8589ae65f9382cba87b
3
- size 196259
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f60dc21c00fe0ab69d7ec223193ca450dd8abc88b3d13b019eb59d8fa5f55c26
3
+ size 189890
two_weeks_avg_roi_pearl_agents.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7f09036eb0fe07cf48bf919a7e3127766b58f1634958f0a0bc90fcd85020513
3
- size 3057
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73d880c1af37f02e214f8a68d332ce4351f4080caf99651a37b034e9d1d913f1
3
+ size 3045
unknown_traders.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ecf4b3d8852d5674b319d33d3e39c9feaeaf73524c55d6e878ccd75c830fc57c
3
- size 1415132
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8c66af325ff60dc4406be8380da0ed9f518badac372831ecc3eab8a5c749244
3
+ size 1448774
weekly_avg_roi_pearl_agents.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bec740a63395c83f61bb5ed1918e5f5cbfb33e1e273760928c1c020f107cc4f9
3
- size 2413
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab1ad5dcd7bd80e54cd086775273c59037e02da57d84cdf5e9627ba2a3ef6a74
3
+ size 2396
weekly_mech_calls.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6489039f9de5677e6448cbf1ae1dc59b9460ee4d42637461d9482481dd852695
3
- size 52100
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e70b88cd1fa079f73515c2bb6a909df694bc4f094ca5e17fa80d1905077d16c5
3
+ size 51049
winning_df.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e8658874826dcfa2ebc690902a77745f329e550571655dba6981830a2ceb81b4
3
- size 12527
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f1a1655b720483e0b2561e759ae6683fe6fb26c1a86ee60321982b1c74ca695
3
+ size 12198