Jay-Rajput commited on
Commit
95c6c66
·
1 Parent(s): 3120659

using datasets

Browse files
Files changed (2) hide show
  1. app.py +15 -32
  2. requirements.txt +1 -0
app.py CHANGED
@@ -7,7 +7,7 @@ from datetime import datetime
7
  from pathlib import Path
8
  import pandas as pd
9
  import pytz
10
- import requests
11
  import streamlit as st
12
  from huggingface_hub import CommitScheduler, HfApi
13
 
@@ -20,7 +20,7 @@ PLAYERS_JSON = 'players.json'
20
  image_path = 'ipl_image.png'
21
 
22
 
23
- PREDICTIONS_FOLDER = Path("ipl_predictions")
24
  PREDICTIONS_FOLDER.mkdir(parents=True, exist_ok=True)
25
 
26
  users_file = Path("leaders") / f"users.json"
@@ -32,7 +32,7 @@ scheduler = CommitScheduler(
32
  repo_id="DIS_IPL_Dataset",
33
  repo_type="dataset",
34
  folder_path=PREDICTIONS_FOLDER, # Local folder where predictions are saved temporarily
35
- path_in_repo="ipl_predictions", # Path in dataset repo where predictions will be saved
36
  every=5, # Push every 240 minutes (4 hours)
37
  )
38
 
@@ -347,25 +347,13 @@ with st.expander("Leaderboard 🏆"):
347
  ############################# Admin Panel ##################################
348
  ADMIN_PASSPHRASE = "admin123"
349
 
350
- from huggingface_hub import Repository
351
-
352
- # Define the local path to clone the repository
353
- local_repo_path = "DIS_IPL_Dataset"
354
- # Define your dataset repository name on Hugging Face Hub
355
- repo_id = "datasets/Jay-Rajput/DIS_IPL_Dataset"
356
- # Clone the repository (this only needs to be done once)
357
- repo = Repository(local_dir=local_repo_path, clone_from=repo_id)
358
-
359
 
360
  def fetch_latest_predictions(match_id):
361
- # Assuming predictions are stored in the "ipl_predictions" directory in your dataset repo
362
- predictions_path = Path("ipl_predictions")
363
- latest_predictions = []
364
- for prediction_file in predictions_path.glob(f"prediction_{match_id}_*.json"):
365
- with open(prediction_file, 'r') as file:
366
- prediction_data = json.load(file)
367
- latest_predictions.append(prediction_data)
368
- return latest_predictions
369
 
370
 
371
  def save_match_outcomes(outcomes):
@@ -378,9 +366,9 @@ def update_leaderboard_and_outcomes(match_id, winning_team, man_of_the_match):
378
  predictions = fetch_latest_predictions(match_id)
379
 
380
  outcomes = load_data(OUTCOMES_JSON) # Load existing match outcomes
381
- users_file_path = repo.git_pull("leaders/users.json")
382
- with open(users_file_path, 'r') as file:
383
- users = json.load(file)
384
 
385
  # Directly update or add the match outcome
386
  outcome_exists = False
@@ -395,7 +383,9 @@ def update_leaderboard_and_outcomes(match_id, winning_team, man_of_the_match):
395
  # Update user points based on prediction accuracy
396
  for prediction in predictions:
397
  user_name = prediction['user_name']
398
- users[user_name] = users.get(user_name, 0) # Initialize user points if not present
 
 
399
 
400
  # Update points based on prediction accuracy
401
  if prediction['predicted_winner'] == winning_team:
@@ -407,14 +397,7 @@ def update_leaderboard_and_outcomes(match_id, winning_team, man_of_the_match):
407
  users[user_name] -= 200 + prediction['bid_points'] # Penalty for wrong team prediction
408
 
409
  save_match_outcomes(outcomes)
410
- # Save updated users data back to the dataset repo
411
- with open(users_file_path, 'w') as file:
412
- json.dump(users, file)
413
-
414
- # Commit changes to the dataset repo
415
- repo.git_add(users_file_path)
416
- repo.git_commit("Update leaderboard")
417
- repo.git_push()
418
 
419
 
420
  with st.sidebar:
 
7
  from pathlib import Path
8
  import pandas as pd
9
  import pytz
10
+ from datasets import load_dataset
11
  import streamlit as st
12
  from huggingface_hub import CommitScheduler, HfApi
13
 
 
20
  image_path = 'ipl_image.png'
21
 
22
 
23
+ PREDICTIONS_FOLDER = Path("predictions")
24
  PREDICTIONS_FOLDER.mkdir(parents=True, exist_ok=True)
25
 
26
  users_file = Path("leaders") / f"users.json"
 
32
  repo_id="DIS_IPL_Dataset",
33
  repo_type="dataset",
34
  folder_path=PREDICTIONS_FOLDER, # Local folder where predictions are saved temporarily
35
+ path_in_repo="predictions", # Path in dataset repo where predictions will be saved
36
  every=5, # Push every 240 minutes (4 hours)
37
  )
38
 
 
347
  ############################# Admin Panel ##################################
348
  ADMIN_PASSPHRASE = "admin123"
349
 
350
+ def load_dataset_repo():
351
+ return load_dataset("Jay-Rajput/DIS_IPL_Dataset")
 
 
 
 
 
 
 
352
 
353
  def fetch_latest_predictions(match_id):
354
+ dataset = load_dataset_repo()
355
+ predictions = dataset['train'].filter(lambda example: example['match_id'] == match_id)
356
+ return predictions
 
 
 
 
 
357
 
358
 
359
  def save_match_outcomes(outcomes):
 
366
  predictions = fetch_latest_predictions(match_id)
367
 
368
  outcomes = load_data(OUTCOMES_JSON) # Load existing match outcomes
369
+ # Load existing match outcomes and user data from the test split
370
+ dataset = load_dataset_repo()
371
+ users = {item['user_name']: item for item in dataset['test']}
372
 
373
  # Directly update or add the match outcome
374
  outcome_exists = False
 
383
  # Update user points based on prediction accuracy
384
  for prediction in predictions:
385
  user_name = prediction['user_name']
386
+ # Initialize user points if not present
387
+ if user_name not in users:
388
+ users[user_name] = {'user_name': user_name, 'points': 0}
389
 
390
  # Update points based on prediction accuracy
391
  if prediction['predicted_winner'] == winning_team:
 
397
  users[user_name] -= 200 + prediction['bid_points'] # Penalty for wrong team prediction
398
 
399
  save_match_outcomes(outcomes)
400
+ users.save_to_disk(USERS_JSON)
 
 
 
 
 
 
 
401
 
402
 
403
  with st.sidebar:
requirements.txt CHANGED
@@ -1,3 +1,4 @@
 
1
  huggingface_hub
2
  pandas
3
  pytz
 
1
+ datasets
2
  huggingface_hub
3
  pandas
4
  pytz