Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| import requests | |
| import pandas as pd | |
| from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
| # --- Constants --- | |
| DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
| DEFAULT_HF_MODEL = "mistralai/Mistral-7B-Instruct-v0.1" | |
| # --- Basic Agent Definition --- | |
| class BasicAgent: | |
| def __init__(self, hf_token=None, model_name=DEFAULT_HF_MODEL): | |
| print("Initializing BasicAgent with LLM...") | |
| self.hf_token = hf_token | |
| self.model_name = model_name | |
| self.llm = None | |
| if hf_token: | |
| try: | |
| print(f"Loading model: {model_name}") | |
| self.tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token) | |
| self.model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token) | |
| self.llm = pipeline( | |
| "text-generation", | |
| model=self.model, | |
| tokenizer=self.tokenizer, | |
| device_map="auto" | |
| ) | |
| print("Model loaded successfully") | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |
| raise Exception(f"Could not load model: {e}") | |
| else: | |
| print("No HF token provided - agent will use default answers") | |
| def __call__(self, question: str) -> str: | |
| if not self.llm: | |
| return "This is a default answer (no LLM initialized)" | |
| try: | |
| print(f"Generating answer for question: {question[:50]}...") | |
| response = self.llm( | |
| question, | |
| max_new_tokens=150, | |
| do_sample=True, | |
| temperature=0.7, | |
| top_p=0.9 | |
| ) | |
| return response[0]['generated_text'] | |
| except Exception as e: | |
| print(f"Error generating answer: {e}") | |
| return f"Error generating answer: {e}" | |
| def run_and_submit_all(hf_token: str, request: gr.Request): | |
| """Main function to run evaluation and submit answers""" | |
| # Get user info from the request | |
| if not request.username: | |
| return "Please Login to Hugging Face with the button.", None | |
| username = request.username | |
| space_id = os.getenv("SPACE_ID") | |
| api_url = DEFAULT_API_URL | |
| questions_url = f"{api_url}/questions" | |
| submit_url = f"{api_url}/submit" | |
| # Initialize agent | |
| try: | |
| agent = BasicAgent(hf_token=hf_token) | |
| except Exception as e: | |
| return f"Error initializing agent: {e}", None | |
| agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
| # Fetch questions | |
| try: | |
| response = requests.get(questions_url, timeout=15) | |
| response.raise_for_status() | |
| questions_data = response.json() | |
| if not questions_data: | |
| return "Fetched questions list is empty or invalid format.", None | |
| except Exception as e: | |
| return f"Error fetching questions: {e}", None | |
| # Process questions | |
| results_log = [] | |
| answers_payload = [] | |
| for item in questions_data: | |
| task_id = item.get("task_id") | |
| question_text = item.get("question") | |
| if not task_id or question_text is None: | |
| continue | |
| try: | |
| submitted_answer = agent(question_text) | |
| answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) | |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) | |
| except Exception as e: | |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"}) | |
| if not answers_payload: | |
| return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) | |
| # Submit answers | |
| submission_data = { | |
| "username": username.strip(), | |
| "agent_code": agent_code, | |
| "answers": answers_payload | |
| } | |
| try: | |
| response = requests.post(submit_url, json=submission_data, timeout=60) | |
| response.raise_for_status() | |
| result_data = response.json() | |
| final_status = ( | |
| f"Submission Successful!\n" | |
| f"User: {result_data.get('username')}\n" | |
| f"Overall Score: {result_data.get('score', 'N/A')}% " | |
| f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
| f"Message: {result_data.get('message', 'No message received.')}" | |
| ) | |
| return final_status, pd.DataFrame(results_log) | |
| except Exception as e: | |
| return f"Submission Failed: {e}", pd.DataFrame(results_log) | |
| # --- Gradio Interface --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# LLM Agent Evaluation Runner") | |
| gr.Markdown(""" | |
| **Instructions:** | |
| 1. Get your Hugging Face API token from [your settings](https://huggingface.co/settings/tokens) | |
| 2. Enter your token below | |
| 3. Log in to your Hugging Face account | |
| 4. Click 'Run Evaluation & Submit All Answers' | |
| """) | |
| with gr.Row(): | |
| hf_token_input = gr.Textbox( | |
| label="Hugging Face API Token", | |
| type="password", | |
| placeholder="hf_xxxxxxxxxxxxxxxx", | |
| info="Required for LLM access" | |
| ) | |
| gr.LoginButton() | |
| run_button = gr.Button("Run Evaluation & Submit All Answers") | |
| status_output = gr.Textbox(label="Run Status", lines=5) | |
| results_table = gr.DataFrame(label="Results", wrap=True) | |
| run_button.click( | |
| fn=run_and_submit_all, | |
| inputs=[hf_token_input], | |
| outputs=[status_output, results_table] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |