EveryCricket / app.py
AK97GAMERZ's picture
Update app.py
dab95a5 verified
import gradio as gr
import requests
import pandas as pd
import google.generativeai as genai
import json
from datetime import datetime
import re
import os
# --- App Title ---
APP_TITLE = "🏏 EveryCricket by Anand"
APP_CAPTION = "Live Scores, Twitter Buzz & AI Insights"
# --- API Key Setup ---
try:
RAPIDAPI_KEY_CRICKET = os.getenv("RAPIDAPI_KEY_CRICKET")
RAPIDAPI_KEY_TWITTER = os.getenv("RAPIDAPI_KEY_TWITTER")
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
if not RAPIDAPI_KEY_CRICKET: raise ValueError("RAPIDAPI_KEY_CRICKET secret not found.")
if not RAPIDAPI_KEY_TWITTER:": f"API Endpoint Not Found ({e.response.status_code})"}
elif e.response.status_code in [401, 403]: return {"error": f"API Authorization Failed ({e.response.status_code}). Check API Key or Subscription."}
elif "API doesn't exists" in response_text or "Application doesn't exist" in response_text: return {"error": "API/Endpoint configuration error on RapidAPI."}
return {"error": f"API HTTP Error ({e.response.status_code}). See console logs."}
except requests.exceptions.RequestException as e: print(f"🚨 API Connection Error ({url}): {e}"); return {"error": f"API Connection Error: {e}"}
except json.JSONDecodeError as err: print(f"🚨 API Error: Could not decode JSON response from {url}. Error: {err}"); return {"error": "API returned invalid JSON."}
except Exception as e: print(f"🚨 An unexpected error occurred during API fetch: {e}"); return {"error": f"Unexpected API fetch error: {e}"}
# --- Mock Cricket API Functions ---
def get_ongoing_matches():
""" MOCK FUNCTION: Fetches ongoing matches list. """
print("ℹ️ Using MOCK data for ongoing matches.")
return [ {"id": "mock123", "name": "IND vs PAK", "status": "Live", "venue": "Dubai", "series": "Asia Cup", "hashtags": ["#INDvPAK", "#AsiaCup"]}, {"id": "mock456", "name": "AUS vs ENG", "status": "Live", "venue": "Lord's", "series": "Ashes", "hashtags": ["#AUSvENG", "#Ashes"]}, {"id": "mock789", "name": "NZ vs SA", "status": "Innings Break", "venue": "Wellington", "series": "Test Series", "hashtags": ["#NZvSA"]}, {"id": "mock101", "name": "SL vs BAN", "status": "Scheduled", "venue": "Colombo", "series": "ODI Series", "hashtags": ["#SLvBAN"]}, ]
def get_match_score(match_id):
""" MOCK FUNCTION: Fetches score details. """
if not match_id: return None; print(f"ℹ️ Using MOCK data for match score (ID: {match_id}).")
if match_id == "mock123": return { "summary": "IND 180/3 (18.2 ov)", "status_text": "Target 181 | PAK need 1 run in 10 balls", "batsmen": [{"name": "Kohli", "runs": "75*", "balls": "40"}, {"name": "Pandya", "runs": "22*", "balls": "10"}], "bowlers": [{"name": "Rauf", "overs": "3.2", "wickets": "1", "runs": "35"}], "last_wicket": "Rohit Sharma c Azam b Rauf 55", "run_rate": "9.82", "required_run_rate": "0.60", "update_time": datetime.now().strftime("%H:%M:%S") }
elif match_id == "mock456": return { "summary": "AUS 310/8 (85 ov)", "status_text": "Stumps - Day 1", "batsmen": [{"name": "Smith", "runs": "110*", "balls": "205"}, {"name": "Starc", "runs": "15*", "balls": "30"}], "bowlers": [{"name": "Anderson", "overs": "22", "wickets": "3", "runs": "60"}], "last_wicket": "Carey lbw Broad 25", "run_rate": "3.65", "required_run_rate": "N/A", "update_time": datetime.now().strftime("%H:%M:%S") }
else: return {"error": "Mock score data not available for this ID."}
# --- Twitter API Function ---
def get_twitter_user_feed(user_id=DEFAULT_TWITTER_USER_ID, count=10):
""" Fetches tweets for a SPECIFIC user ID using the twitter-x API. """
if not RAPIDAPI_KEY_TWITTER: return {"error": "Twitter API Key Missing"}
endpoint = TWITTER_USER_TWEETS_ENDPOINT; headers = {"X-RapidAPI-Key": RAPIDAPI_KEY_TWITTER, "X-RapidAPI-Host": TWITTER_API_HOST}; params = {"user_id": str(user_ raise ValueError("RAPIDAPI_KEY_TWITTER secret not found.")
if not GEMINI_API_KEY: raise ValueError("GEMINI_API_KEY secret not found.")
except ValueError as e:
print(f"ERROR: Missing API Key: {e}")
# Configure Gemini AI
gemini_model = None
gemini_config_error = None
if GEMINI_API_KEY:
try:
genai.configure(api_key=GEMINI_API_KEY)
gemini_model = genai.GenerativeModel('gemini-1.5-flash')
except Exception as e:
gemini_config_error = f"🔴 Error configuring Gemini AI: {e}"
print(gemini_config_error)
else:
gemini_config_error = "🔴 Gemini API Key not found. AI features disabled."
print(gemini_config_error)
# --- API Constants ---
CRICKET_API_HOST = "cricbuzz-cricket.p.rapidapi.com" # EXAMPLE - REPLACE
CRICKET_BASE_URL = f"https://{CRICKET_API_HOST}"
CRICKET_LIVE_MATCHES_ENDPOINT = "/matches/v1/live" # EXAMPLE - REPLACE
CRICKET_SCORE_ENDPOINT_TEMPLATE = "/mcenter/v1/{matchId}/hscrd" # EXAMPLE - REPLACE
TWITTER_API_HOST = "twitter-x.p.rapidapi.com" # HOST based on user cURL
TWITTER_BASE_URL = f"https://{TWITTER_API_HOST}"
TWITTER_USER_TWEETS_ENDPOINT = "/user/tweetsandreplies" # Endpoint based on user cURL
DEFAULT_TWITTER_USER_ID = "17438364" # Example: ESPNcricinfo - VERIFY & REPLACE
# --- Helper Function to Fetch RapidAPI Data ---
def fetch_api_data_internal(url, headers, params=None):
"""Fetches data from a RapidAPI endpoint with basic error handling. Returns JSON or error dict."""
try:
print(f"Fetching URL: {url} | Params: {params}")
response = requests.get(url, headers=headers, params=params, timeout=20)
print(f"Response Status Code: {response.status_code}")
response.raise_for_status()
return response.json()
except requests.exceptions.Timeout: print(f"⏳ API Timeout accessing {url}"); return {"error": f"API Timeout accessing {url.split('?')[0].split('/')[-1]}"}
except requests.exceptions.HTTPError as e:
error_detail = f"Status Code: {e.response.status_code}"; response_text = e.response.text
try: error_detail += f", Response JSON: {e.response.json()}"
except json.JSONDecodeError: error_detail += f", Response Text: {response_text}"
print(f"🚨 API HTTP Error ({url}): {error_detail}")
if e.response.status_code == 404: return {"error": f"API Endpoint Not Found ({e.response.status_code})"}
elif e.response.status_code in [401, 403]: return {"error": f"API Authorization Failed ({e.response.status_code}). Check API Key or Subscription."}
elif "API doesn't exists" in response_text or "Application doesn't exist" in response_text: return {"error": "API/Endpoint configuration error on RapidAPI."}
return {"error": f"API HTTP Error ({e.response.status_code}). See console logs."}
except requests.exceptions.RequestException as e: print(f"🚨 API Connection Error ({url}): {e}"); return {"error": f"API Connection Error: {e}"}
except json.JSONDecodeError as err: print(f"🚨 API Error: Could not decode JSON response from {url}. Error: {err}"); return {"error": "API returned invalid JSON."}
except Exception as e: print(f"🚨 An unexpected error occurred during API fetch: {e}"); return {"error": f"Unexpected API fetch error: {e}"}
# --- Mock Cricket API Functions ---
def get_ongoing_matches():
""" MOCK FUNCTION: Fetches ongoing matches list. """
id), "limit": str(count)}
print(f"Attempting to fetch tweets for User ID: {user_id} from {TWITTER_API_HOST}{endpoint}"); data = fetch_api_data_internal(f"{TWITTER_BASE_URL}{endpoint}", headers=headers, params=params)
if data is None: return {"error": "API fetch returned None."};
if isinstance(data, dict) and 'error' in data: return data
parsed_tweets = [];
try:
print("Attempting to parse Twitter response...")
tweets_raw = []
if isinstance(data, list): tweets_raw = data; print(f"Parsing direct list of {len(tweets_raw)} items.")
elif isinstance(data, dict):
if 'results' in data and isinstance(data['results'], list): tweets_raw = data['results']; print(f"Parsing list under 'results' key ({len(tweets_raw)} items).")
elif 'data' in data and isinstance(data['data'], list): tweets_raw = data['data']; printprint("ℹ️ Using MOCK data for ongoing matches.")
return [ {"id": "mock123", "name": "IND vs PAK", "status": "Live", "venue": "Dubai", "series": "Asia Cup", "hashtags": ["#INDvPAK", "#AsiaCup"]}, {"id": "mock456", "name": "AUS vs ENG", "status": "Live", "venue": "Lord's", "series": "Ashes", "hashtags": ["#AUSvENG", "#Ashes"]}, {"id": "mock789", "name": "NZ vs SA", "status": "Innings Break", "venue": "Wellington", "series": "Test Series", "hashtags": ["#NZvSA"]}, {"id": "mock101", "name": "SL vs BAN", "status": "Scheduled", "venue": "Colombo", "series": "ODI Series", "hashtags": ["#SLvBAN"]}, ]
def get_match_score(match_id):
""" MOCK FUNCTION: Fetches score details. """
if not match_id: return None; print(f"ℹ️ Using MOCK data for match score (ID: {match_id}).")
if match_id == "mock123": return { "summary": "IND 180/3 (18.2 ov)", "status_text": "Target 181 | PAK need 1 run in 10 balls", "batsmen": [{"name": "Kohli", "runs": "75*", "balls": "40"}, {"name": "Pandya", "runs": "22*", "balls": "10"}], "bowlers": [{"name": "Rauf", "overs(f"Parsing list under 'data' key ({len(tweets_raw)} items).")
else: print("Could not find a list of tweets in the response dictionary. Keys found:", list(data.keys())); return {"error": "Tweet list not found in expected format."}
else: print(f"Unexpected API response type: {type(data)}"); return {"error": f"Unexpected API response type: {type(data)}"}
if not tweets_raw: print("Parsed tweet list is empty."); return []
for i, tweet in enumerate(tweets_raw):
if isinstance(tweet, dict):
text = tweet.get('full_text') or tweet.get('text', ''); tweet_id = tweet.get('id_str') or tweet.get('rest_id') or tweet.get('id'); screen_name = "UnknownUser"; user_info = tweet.get('user')
if isinstance(user_info, dict): screen_name = user_info.get('screen_name', 'UnknownUser')
url = f"https://twitter.com/{screen_name}/status/{tweet_id}" if screen_name != 'UnknownUser' and tweet_id else "#"
cleaned_text = re.sub(r'http\S+', '', text).strip()
if cleaned_text and tweet_id: print(f" Parsed tweet {i+1}: ID={tweet_id}, User={screen_name}"); parsed_tweets.append({"user": screen_name, "text": cleaned_text, "url": url})
else: print(f" Skipped tweet {i+1}: Missing text or ID. Raw keys:": "3.2", "wickets": "1", "runs": "35"}], "last_wicket": "Rohit Sharma c Azam b Rauf 55", "run_rate": "9.82", "required_run_rate": "0.60", "update_time": datetime.now().strftime("%H:%M:%S") }
elif match_id == "mock456": return { "summary": "AUS 310/8 (85 ov)", "status_text": "Stumps - Day 1", "batsmen": [{"name": "Smith", "runs": "110*", "balls": "205"}, {"name": "Starc", "runs": "15*", "balls": "30"}], "bowlers": [{"name": "Anderson", "overs": "22", "wickets": "3", "runs": "60"}], "last_wicket": "Carey lbw Broad 25", "run_rate": "3.65", "required_run_rate": "N/A", "update_time": datetime.now().strftime("%H:%M:%S") }
{list(tweet.keys())}")
else: print(f" Skipped item {i+1}: Not a dictionary.")
print(f"Successfully parsed {len(parsed_tweets)} tweets."); return parsed_tweets
except Exception as e: print(f"🔴 Error during parsing twitter-x API response: {e}"); return {"error": f"Failed to process tweets: {e}"}
# --- Gemini AI Functions ---
def ask_gemini(prompt):
""" Sends prompt to Gemini. """
if not gemini_model: return gemini_config_error or "Gemini model not configured."; print("🤖 Sending prompt to Gemini...")
try: response = gemini_model.generate_content(prompt);
if response.parts: return response.text
elif response.prompt_feedback.block_reason: return f"⚠️ Gemini response blocked: {response.prompt_feedback.block_reason.name}"
else: return "⚠️ Gemini returned an empty response."
except Exception as e: print(f"🔴 Gemini AI Error: {e}"); return f"🔴 Gemini AI Error: {e}"
def get_match_prediction(match_info, score_info):
""" Generates match analysis using Gemini. """
if not gemini_model: return gemini_config_error or "Gemini model not configured."
if not match_info or not score_info or isinstance(score_info, dictelse: return {"error": "Mock score data not available for this ID."}
# --- Twitter API Function ---
def get_twitter_user_feed(user_id=DEFAULT_TWITTER_USER_ID, count=10):
""" Fetches tweets for a SPECIFIC user ID using the twitter-x API. """
if not RAPIDAPI_KEY_TWITTER: return {"error": "Twitter API Key Missing"}
endpoint) and 'error' in score_info: return "Need valid match and live score information for AI analysis."
prompt = f"""Analyze the current situation in the cricket match: {match_info.get('name', 'N/A')} ({match_info.get('series', 'N/A')}). Status: {score_info.get('status_text', 'N/A')} Score: {score_info.get('summary', 'N/A')} = TWITTER_USER_TWEETS_ENDPOINT; headers = {"X-RapidAPI-Key": RAPIDAPI_KEY_TWITTER, "X-RapidAPI-Host": TWITTER_API_HOST}; params = {"user_id": str(user_id), "limit": str(count)}
print(f"Attempting to fetch tweets for User ID: {user_id} from {TWITTER_API_HOST}{endpoint}"); data = fetch_api_data_internal(f"{TWITTER_BASE_URL}{endpoint}", headers=headers, Batsmen: {', '.join([f"{b['name']} ({b.get('runs','?')}*/{b.get('balls','?')})" for b in score_info.get('batsmen', [])]) if score_info.get('batsmen') else 'N/A'} Bowlers: {', '.join([f"{b['name']} params=params)
if data is None: return {"error": "API fetch returned None."};
if isinstance(data, dict) and 'error' in data: return data
parsed_tweets = [];
try:
print("Attempting to parse Twitter response...")
tweets_raw = []
if isinstance(data ({b.get('wickets','?')}/{b.get('runs','?')} in {b.get('overs','?')} ov)" for b in score_info.get('bowlers', [])]) if score_info.get('bowlers') else 'N/A'}. Provide a brief analysis of which team seems better positioned and, list): tweets_raw = data; print(f"Parsing direct list of {len(tweets_raw)} why: """
return ask_gemini(prompt)
# --- Gradio Interface Logic ---
def load items.")
elif isinstance(data, dict):
if 'results' in data and isinstance(data['results'], list): tweets_raw = data['results']; print(f"Parsing list under 'results' key ({len(tweets_matches():
""" Function to load initial matches into dropdown. """
print("Loading initial matches..."); matches_data = get_ongoing_matches()
if isinstance(matches_data, dict) and 'error' in matches__raw)} items).")
elif 'data' in data and isinstance(data['data'], list): tweets_raw = data['data']; print(f"Parsing list under 'data' key ({len(tweets_rawdata: print(f"Error loading matches: {matches_data['error']}"); return gr.update(choices)} items).")
else: print("Could not find a list of tweets in the response dictionary. Keys found=["-- Select a Match --"], value="-- Select a Match --"), [], f"Error loading matches: {matches_data['error']}"
elif matches_data: match_display_list = [f"{match[':", list(data.keys())); return {"error": "Tweet list not found in expected format."}
elsename']} ({match['status']})" for match in matches_data]; choices = ["-- Select a Match --"]: print(f"Unexpected API response type: {type(data)}"); return {"error": f"Unexpected API response type: + match_display_list; return gr.update(choices=choices, value="-- Select a Match --"), matches {type(data)}"}
if not tweets_raw: print("Parsed tweet list is empty."); return []_data, ""
else: return gr.update(choices=["-- Select a Match --"], value="--
for i, tweet in enumerate(tweets_raw):
if isinstance(tweet, dict):
Select a Match --"), [], "No ongoing matches found or API failed."
def update_match_details(selectedtext = tweet.get('full_text') or tweet.get('text', ''); tweet_id = tweet.get('id_match_name, matches_state):
""" Function triggered when dropdown selection changes. """
print(_str') or tweet.get('rest_id') or tweet.get('id'); screen_name = "UnknownUser";f"Match selected: {selected_match_name}"); empty_df = pd.DataFrame(); outputs_reset = [None, "", "", "", "", empty_df, empty_df, "", "", "", "", "", "", gr.update user_info = tweet.get('user')
if isinstance(user_info, dict): screen_name = user_info.get('screen_name', 'UnknownUser')
url = f"https://twitter.com(visible=False)]
if selected_match_name == "-- Select a Match --" or not matches_state: return outputs_reset
selected_match_info = next((match for match in matches_state if/{screen_name}/status/{tweet_id}" if screen_name != 'UnknownUser' and tweet_id else "#"
cleaned_text = re.sub(r'http\S+', '', text).strip()
if cleaned_text and tweet_id: print(f" Parsed tweet {i+1}: ID={tweet_ f"{match['name']} ({match['status']})" == selected_match_name), None)
if not selected_match_info: print("Error: Could not find selected match info in state."); return None, "Error", "Match data not found.", "", "", pd.DataFrame(), pd.DataFrame(), "", "", "", "", "", "", gr.update(visible=True)
match_id = selected_match_info['id']; header_text = fid}, User={screen_name}"); parsed_tweets.append({"user": screen_name, "text": cleaned_text, "url": url})
else: print(f" Skipped tweet {i+1}: Missing text or ID. Raw keys: {list(tweet.keys())}")
else: print(f" Skipped item {i+1}: Not a dictionary.")
print(f"Successfully parsed {len"🏏 {selected_match_info['name']}"; caption_text = f"Series: {selected_match_info.get('series', 'N/A')} | Venue: {selected_match_info.get('(parsed_tweets)} tweets."); return parsed_tweets
except Exception as e: print(f"🔴 Error during parsing twitter-x API response: {e}"); return {"error": f"Failed to process tweets: {e}"}
# --- Gemini AI Functions ---
def ask_gemini(prompt):
""" Sends prompt to Gemini. """
if not gemini_model: return gemini_config_error or "Gemini model not configured."; print("🤖 Sending prompt to Gemini...")
try:
response = gemini_model.generate_content(prompt)
if response.parts: return response.text
elif response.prompt_feedback.block_reason: return f"⚠️ Gemini response blocked: {response.prompt_feedback.block_reason.name}"
else: return "⚠️ Gemini returned an empty response."
except Exception as e: print(f"🔴 Gemini AI Error: {e}"); return f"🔴 Gemini AI Error: {e}"
def get_match_prediction(match_info, score_info):
""" Generates match analysis using Gemini. """
if not gemini_venue', 'N/A')} | Status: {selected_match_info.get('status', 'N/A')}"
score_data = get_match_score(match_id); score_summary, score_status, df_batsmen, df_bowlers, score_footer, score_status_msg = "", "Fetching score...", pd.DataFrame(), pd.DataFrame(), "", ""
if isinstance(score_data, dict) and 'error' in score_data: score_status_msg = f"⚠️ Score Error: {score_data['error']}"
elif score_data: score_summary = score_data.get('summary', 'N/A'); score_status = score_data.get('status_text', 'N/A'); batsmen = score_data.get('batsmen', []); bowlers = score_data.get('bowlers', []); df_batsmen = pd.DataFrame(batsmen) if batsmen else pd.DataFrame(); df_bowlers = pd.DataFrame(bowlers) if bowlers else pd.DataFrame(); score_footer = f"Run Rate: {score_data.get('run_rate', 'N/A')} | Required RR: {score_data.get('required_run_rate', 'N/A')} | Last Wicket: {score_data.get('last_wicket', 'N/A')}"
else: score_status_msg = "⚠️ Could not fetch score data."
tweets_data = get_twitter_user_feed(user_id=DEFAULT_TWITTER_USER_ID, count=5); tweets_html = ""
if isinstance(tweets_data, dict) and 'error' in tweets_data: tweets_html = f"<p>⚠️ Twitter Error: {tweets_data['error']}";
elif isinstance(tweets_data, list):
if not tweets_data: tweets_html = f"<p><i>No recent tweets found for user ID {DEFAULT_TWITTER_USER_ID}.</i></p>"
else:
tweets_html += f"<p><small><i>Showing recent tweets from User ID {DEFAULT_TWITTER_USER_ID}.</i></small></p>";
# Correctly indented loop
for tweet in tweets_data:
handle = f"<b>@{tweet['user']}</b>" if tweet['user'] != "UnknownUser" else ""
tweets_html += f"""<div style="border-left: 3px solid #1DA1F2; padding-left: model: return gemini_config_error or "Gemini model not configured."
if not match_info or not score_info or isinstance(score_info, dict) and 'error' in score_info: return "Need valid match and live score information for AI analysis."
prompt = f"""Analyze the current situation in the cricket match: {match_info.get('name', 'N/A')} ({match_info.get('series', 'N/A')}). Status: {score_info.get('status_text', 'N/A')} Score: {score_info.get('summary', 'N/A')} Batsmen: {', '.join([f"{b['name']} ({b.get('runs','?')}*/{b.get('balls','?')})" for b in score_info.get('batsmen', [])]) if score_info.get('batsmen') else 'N/A'} Bowlers: {', '.join([f"{b['name']} ({b.get('wickets','?')}/{b.get('runs','?')} in {b.get('overs','?')} ov)" for b in score_info.get('bowlers', [])]) if score_info.get('bowlers') else 'N/A'}. Provide a brief analysis of which team seems better positioned and why: """
return ask_gemini(prompt)
# --- Gradio Interface Logic ---
def load_matches():
""" Function to load initial matches into dropdown. """
print("Loading initial matches..."); matches_data = get_ongoing_matches()
if isinstance(matches_data, dict) and 'error' in matches_data: print(f"Error10px; margin-bottom: 8px; font-size: 0.9em;">{handle}<br>{tweet['text']}<small> <a href="{tweet['url']}" target="_blank">[link]</a></small></div>"""
else: tweets_html = "<p>⚠️ Could not fetch or process user tweets (unexpected data type).</p>"; print(f"Unexpected data type from get_twitter_user_feed: {type(tweets_data)}")
return (match_id, header_text, caption_text, score_summary, score_status, df_batsmen, df_bowlers, score_footer, score_status_msg, tweets_html, "", "", "", gr.update(visible=True))
def handle_ai_question(question, match_id, matches_state):
""" Function for AI Q&A Button. loading matches: {matches_data['error']}"); return gr.update(choices=["-- Select a Match --"], value="-- Select a Match --"), [], f"Error loading matches: {matches_data['error']}"
elif matches_data: match_display_list = [f"{match['name']} ({match['status']})" for match in matches_data]; choices = ["-- Select a Match --"] + match_display_list; return gr.update(choices=choices, value="-- Select a Match --"), matches_data, ""
else: return gr.update(choices=["-- Select a Match --"], value="-- Select a Match --"), [], "No ongoing matches found or API failed."
def update_match_details(selected_match_name, matches_state):
""" Function triggered when dropdown selection changes. """
print(f"Match selected: {selected_match_name}"); empty_df = pd.DataFrame(); outputs_reset = [None, "", "", "", "", empty_df, empty_df, "", "", "", "", "", "", gr.update(visible=False)]
if """
if not question: return "Please enter a question.";
if not match_id or not matches_state: return "Please select a match first.";
match_info = next((m for m in matches_state if m['id'] == match_id), None)
if not match_info: return "Error: Match context not found.";
score_data = get_match_score(match_id); score_context = "Score context unavailable."
if score_data and not (isinstance(score_data, dict) and 'error' in score_data): score_context = f"Status is \"{score_data.get('status_text', 'N/A')}\", Score is \"{score_data.get('summary', 'N/A')}\"."
context_prompt = f"Context: Cricket match: {match_info['name']} ({match_info.get('series', 'N/A')}). {score_context}\nUser Question: {question}\ selected_match_name == "-- Select a Match --" or not matches_state: return outputs_reset
selected_match_info = next((match for match in matches_state if f"{match['name']} ({match['status']})" == selected_match_name), None)
if not selected_match_info: print("Error: Could not find selected match info in state."); return None, "Error", "Match data not found.", "", "", pd.DataFrame(), pd.DataFrame(), "", "", "", "", "", "", gr.update(visible=True)
match_id = selected_match_info['id']; header_text = f"🏏 {selected_match_info['name']}"; caption_text = f"Series: {selected_match_info.get('series', 'N/A')} | Venue: {selected_match_info.get('venue', 'N/A')} |nAnswer based ONLY on provided context and general cricket knowledge:"
return ask_gemini(context_prompt)
def handle_ai_prediction(match_id, matches_state):
""" Function for AI Prediction Button. """
if not match_id or not matches_state: return "Please select a match first.";
match_info = next((m for m in matches_state if m['id'] == match_id), None)
if not match_info: return "Error: Match context not found.";
score_data = get_match_score(match_id); return get_match_prediction(match_info, score_data)
# --- Build Gradio Interface ---
css = """
.gradio-container { font-family: 'IBM Plex Sans', sans-serif; } .gr-button { background-color: #FF4B Status: {selected_match_info.get('status', 'N/A')}"
score_data = get_match_score(match_id); score_summary, score_status, df_batsmen, df_bowlers, score_footer, score_status_msg = "", "Fetching score...", pd.DataFrame(), pd.DataFrame(), "", ""
if isinstance(score_data, dict) and 'error' in score_data: score_status_msg = f"⚠️ Score Error: {score_data['error']}"
elif score_data: score_summary = score_data.get('summary', 'N/A'); score_status = score_data.get('status_text', 'N/A'); batsmen = score_data.get('batsmen', []); bowlers = score_data.get('bowlers', []); df_batsmen = pd.DataFrame(batsmen) if batsmen else pd.DataFrame(); df_bowlers = pd.DataFrame(bowlers) if bowlers4B; color: white; }
.gr-button:hover { background-color: #DC3545; } #match_header { font-size: 1.5em; font-weight: bold; margin-bottom: 0; color: #FF4B4B; }
#match_caption { font-size: 0.9em; color: gray; margin-top: 0; margin-bottom: 10px; } #score_status_msg { color: orange; font-weight: bold; }
.tweet-container { border-left: 3px solid #1DA1F2; padding-left: 10px; margin-bottom: 8px; font-size: 0.9em; }
.error-message { color: red; font-weight: bold; padding: 10px else pd.DataFrame(); score_footer = f"Run Rate: {score_data.get('run_rate', 'N/A')} | Required RR: {score_data.get('required_run_rate', 'N/A')} | Last Wicket: {score_data.get('last_wicket', 'N/A')}"
else: score_status_msg = "⚠️ Could not fetch score data."
tweets_data = get_twitter_user_feed(user_id=DEFAULT_TWITTER_USER_ID, count=5); tweets_html = ""
if isinstance(tweets_data, dict) and 'error' in tweets_data: tweets_html = f"<p>⚠️ Twitter Error: {tweets_data['error']}";; border: 1px solid red; border-radius: 5px; margin-bottom:10px;}
"""
with gr.Blocks(css=css, title=APP_TITLE) as demo:
gr.Markdown(f"<h1 style='text-align: center; color: #FF4B4B;'>{APP_TITLE}</h1>")
gr.Markdown(f"<p style='text-align: center;'>{APP_CAPTION}</p>")
matches_state = gr.State([]); selected_match_id_state = gr.State(None); api_key_error = None
if not RAPIDAPI_KEY_CRICKET or not RAPIDAPI_KEY_TWITTER or not GEMINI_API_KEY: api_key_error = "ERROR: One or more API keys are missing in secrets."
elif gemini_config_error
elif isinstance(tweets_data, list):
if not tweets_data: tweets_html = f"<p><i>No recent tweets found for user ID {DEFAULT_TWITTER_USER_ID}.</i></p>"
else:
tweets_html += f"<p><small><i>Showing recent tweets from User ID {DEFAULT_TWITTER_USER_ID}.</i></small></p>";
for tweet in tweets_data: # This loop indentation was fixed
handle = f"<b>@{tweet['user']}</b>" if tweet['user'] != "UnknownUser" else ""
tweets_html += f"""<div style="border-left: 3px solid #1DA1F2; padding-left: 10px; margin-bottom: 8px; font-size: 0.9em;">{handle}<br>{tweet['text'] and "API Key not found" not in gemini_config_error: api_key_error = gemini_config_error
if api_key_error: gr.Markdown(f"<p class='error-message'>{api_key_error}</p>")
with gr.Row():
match_dropdown = gr.Dropdown(label="Select a Match", choices=["-- Select a Match --"], value="-- Select a Match --", interactive=True, scale=3)
status_message_text = gr.Markdown("") # Removed scale
with gr.Column(visible=False) as details_row:
match_header = gr.Markdown("", elem_id="match_header"); match_caption = gr.Markdown("", elem_id="match_caption")
with gr.Tabs():
with gr.TabItem("📊 Live Score"):
score_status_message = gr.Markdown}<small> <a href="{tweet['url']}" target="_blank">[link]</a></small></div>"""
else: tweets_html = "<p>⚠️ Could not fetch or process user tweets (unexpected data type).</p>"; print(f"Unexpected data type from get_twitter_user_feed: {type(tweets_data)}")
return (match_id, header_text, caption_text, score_summary, score_status, df_batsmen, df_bowlers, score_footer, score_status_msg, tweets_html, "", "", "", gr.update(visible=True))
def handle_ai_question(question, match_id, matches_state):("", elem_id="score_status_msg")
with gr.Row(): score_summary_text = gr.Textbox(label="Score Summary", interactive=False, scale=2); score_status_text = gr.Textbox(label="Match Status", interactive=False, scale=1)
gr.Markdown("**Batsmen**"); score_batsmen_df = gr.DataFrame(interactive=False, headers=["Name", "Runs", "Balls"])
gr.Markdown("**Bowlers**"); score_bowlers_df = gr.DataFrame(interactive=False, headers=["
""" Function for AI Q&A Button. """
if not question: return "Please enter a question.";
if not match_id or not matches_state: return "Please select a match first.";
match_info = next((m for m in matches_state if m['id'] == match_id), None)
if not match_info: return "Error: Match context not found.";
score_data = get_match_score(match_id); score_context = "Score context unavailable."
if score_data and not (isinstance(score_data, dict) and 'error' in score_data): score_context = fName", "Overs", "Wickets", "Runs"])
score_footer_text = gr.Textbox(label="Details", interactive=False)
with gr.TabItem("🐦 Twitter Feed"):
gr.Markdown(f"**Recent Tweets from User ID {DEFAULT_TWITTER_USER_ID}**"); twitter_feed_html = gr.HTML("<p><i>Select a match to load feed.</i></p>")
with gr.TabItem("🤖 AI Insights"):
if not gemini_model: gr.Markdown(f"<p class='error-message'>{gemini_config_error}</p>")
else:
with gr."Status is \"{score_data.get('status_text', 'N/A')}\", Score is \"{score_data.get('summary', 'N/A')}\"."
context_prompt = f"Context: Cricket match: {match_info['name']} ({match_info.get('series', 'N/A')}). {score_context}\nUser Question: {question}\nAnswer based ONLY on provided context and general cricket knowledge:"
return ask_gemini(context_prompt)
def handle_ai_prediction(match_id, matches_state):
""" Function for AI Prediction Button. """
if not match_Row():
with gr.Column(scale=2): ai_question_textbox = gr.Textbox(label="Ask AI", placeholder="e.g., Who has the advantage?", lines=3); ask_ai_button = gr.Button("Ask Gemini")
with gr.Column(scale=3): ai_answer_output = gr.Textbox(label="Gemini's Answer", interactive=False, lines=6)
grid or not matches_state: return "Please select a match first.";
match_info = next((m for m in matches_state if m['id'] == match_id), None)
if not match_info: return "Error: Match context not found.";
score_data = get_match_score(match_id); return get_match_prediction(match_info, score_data)
# --- Build Gradio.Markdown("---")
with gr.Row():
with gr.Column(scale=2): predict_button = gr.Button("Get AI Analysis")
with gr.Column(scale=3): prediction_output = gr.Textbox(label="Gemini's Analysis", interactive=False, lines=6)
outputs_on_change = [selected_match_id_state, match_header, match_caption, score_summary_text, score_status_text, score_batsmen_df, score_bowlers_df, score_footer_text, score_status_message, twitter_feed_html, ai_question_textbox Interface ---
css = """
.gradio-container { font-family: 'IBM Plex Sans', sans-serif; } .gr-button { background-color: #FF4B4B; color: white; }
.gr-button:hover { background-color: #DC3545; } #match_header { font-size: 1.5em; font-weight: bold; margin-bottom: 0; color: #FF4B4B; }
#match_caption { font-size: 0.9em; color: gray; margin-top: 0; margin-bottom: 10px; } #score_status_msg { color: orange; font-weight: bold; }
.tweet-container { border-left:, ai_answer_output, prediction_output, details_row]
demo.load(fn=load_matches, inputs=[], outputs=[match_dropdown, matches_state, status_message_text])
match_dropdown.change(fn=update_match_details, inputs=[match_dropdown, matches_state], outputs=outputs_on_change)
if gemini_model:
ask_ai_button.click(fn=handle_ai_question, inputs=[ai_question_textbox, selected_match_id_state, matches_state], outputs=[ai_answer_output])
predict_button.click(fn=handle_ai_prediction, inputs=[selected_match_id_state, matches_state], outputs=[prediction_output 3px solid #1DA1F2; padding-left: 10px; margin-bottom: 8px; font-size: 0.9em; }
.error-message { color: red; font-weight: bold; padding: 10px; border: 1px solid red; border-radius: 5px; margin-bottom:10px;}
"""
with gr.Blocks(css=css, title=APP_TITLE) as demo:
gr.Markdown(f"<h1 style='text-align: center; color: #FF4B4B;'>{APP_TITLE}</h1>")
gr.])
demo.launch()