Spaces:
Build error
Build error
| import os | |
| import gradio as gr | |
| import requests | |
| import pandas as pd | |
| from smolagents import CodeAgent, DuckDuckGoSearchTool | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| # --- Constants --- | |
| DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
| # Create our own model wrapper that handles the chat template properly | |
| class CustomTransformersModel: | |
| def __init__(self, model_id="EleutherAI/gpt-neo-125m"): | |
| self.model_id = model_id | |
| # Create the tokenizer and explicitly set the chat template | |
| self.tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| # Set the chat template directly on the tokenizer | |
| simple_template = "{% for message in messages %}\n{% if message['role'] == 'user' %}\nUser: {{ message['content'] }}\n{% elif message['role'] == 'assistant' %}\nAssistant: {{ message['content'] }}\n{% elif message['role'] == 'system' %}\nSystem: {{ message['content'] }}\n{% endif %}\n{% endfor %}\n{% if add_generation_prompt %}\nAssistant: {% endif %}" | |
| self.tokenizer.chat_template = simple_template | |
| # Load the model | |
| self.model = AutoModelForCausalLM.from_pretrained(model_id) | |
| def __call__(self, prompt, **kwargs): | |
| # Extract and handle stop_sequences if present | |
| stop_sequences = kwargs.pop('stop_sequences', None) | |
| # Format the prompt using our chat template | |
| messages = [{"role": "user", "content": prompt}] | |
| formatted_prompt = self.tokenizer.apply_chat_template(messages, tokenize=False) | |
| # Tokenize the prompt | |
| inputs = self.tokenizer(formatted_prompt, return_tensors="pt") | |
| # Generate the response | |
| outputs = self.model.generate( | |
| inputs.input_ids, | |
| max_new_tokens=256, | |
| do_sample=True, | |
| temperature=0.7, | |
| **kwargs | |
| ) | |
| # Decode the response | |
| response = self.tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Apply stop sequences manually if provided | |
| if stop_sequences: | |
| for stop_seq in stop_sequences: | |
| if stop_seq in response: | |
| response = response.split(stop_seq)[0] | |
| # Extract just the assistant's response | |
| try: | |
| assistant_response = response.split("Assistant: ")[-1] | |
| except: | |
| assistant_response = response | |
| return assistant_response | |
| # Add generate method to match the interface expected by CodeAgent | |
| def generate(self, prompt, **kwargs): | |
| return self(prompt, **kwargs) | |
| def __call__(self, prompt, **kwargs): | |
| # Format the prompt using our chat template | |
| messages = [{"role": "user", "content": prompt}] | |
| formatted_prompt = self.tokenizer.apply_chat_template(messages, tokenize=False) | |
| # Tokenize the prompt | |
| inputs = self.tokenizer(formatted_prompt, return_tensors="pt") | |
| # Generate the response | |
| outputs = self.model.generate( | |
| inputs.input_ids, | |
| max_new_tokens=256, | |
| do_sample=True, | |
| temperature=0.7, | |
| **kwargs | |
| ) | |
| # Decode the response | |
| response = self.tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Extract just the assistant's response | |
| try: | |
| assistant_response = response.split("Assistant: ")[-1] | |
| except: | |
| assistant_response = response | |
| return assistant_response | |
| # --- Define Agent --- | |
| class SmolAgentWrapper: | |
| def __init__(self): | |
| # Use our custom model wrapper with GPT-Neo | |
| self.model = CustomTransformersModel(model_id="EleutherAI/gpt-neo-125m") | |
| self.tools = [DuckDuckGoSearchTool()] | |
| self.agent = CodeAgent(model=self.model, tools=self.tools) | |
| def __call__(self, question: str) -> str: | |
| return self.agent.run(question) | |
| # --- Evaluation Logic --- | |
| def run_and_submit_all(profile: gr.OAuthProfile | None): | |
| space_id = os.getenv("SPACE_ID") | |
| if profile: | |
| username = f"{profile.username}" | |
| print(f"User logged in: {username}") | |
| else: | |
| print("User not logged in.") | |
| return "Please Login to Hugging Face with the button.", None | |
| api_url = DEFAULT_API_URL | |
| questions_url = f"{api_url}/questions" | |
| submit_url = f"{api_url}/submit" | |
| # Create the agent | |
| try: | |
| agent = SmolAgentWrapper() | |
| except Exception as e: | |
| return f"Error initializing agent: {e}", None | |
| agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
| # Fetch questions | |
| try: | |
| response = requests.get(questions_url, timeout=15) | |
| response.raise_for_status() | |
| questions_data = response.json() | |
| if not questions_data: | |
| return "Fetched questions list is empty or invalid format.", None | |
| print(f"Fetched {len(questions_data)} questions.") | |
| except Exception as e: | |
| return f"Error fetching questions: {e}", None | |
| # Run agent | |
| results_log = [] | |
| answers_payload = [] | |
| for item in questions_data: | |
| task_id = item.get("task_id") | |
| question_text = item.get("question") | |
| if not task_id or question_text is None: | |
| continue | |
| try: | |
| submitted_answer = agent(question_text) | |
| answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) | |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) | |
| except Exception as e: | |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"}) | |
| if not answers_payload: | |
| return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) | |
| # Submit answers | |
| submission_data = { | |
| "username": username.strip(), | |
| "agent_code": agent_code, | |
| "answers": answers_payload | |
| } | |
| try: | |
| response = requests.post(submit_url, json=submission_data, timeout=60) | |
| response.raise_for_status() | |
| result_data = response.json() | |
| final_status = ( | |
| f"Submission Successful!\n" | |
| f"User: {result_data.get('username')}\n" | |
| f"Overall Score: {result_data.get('score', 'N/A')}% " | |
| f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
| f"Message: {result_data.get('message', 'No message received.')}" | |
| ) | |
| results_df = pd.DataFrame(results_log) | |
| return final_status, results_df | |
| except Exception as e: | |
| return f"Submission Failed: {e}", pd.DataFrame(results_log) | |
| # --- Gradio Interface --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# SmolAgent Evaluation Runner (GPT-Neo Implementation)") | |
| gr.Markdown( | |
| """ | |
| **Instructions:** | |
| 1. Log in to Hugging Face with the button below. | |
| 2. Click the button to run all GAIA questions through the SmolAgent. | |
| 3. Results will be submitted automatically and your score will be shown. | |
| **Note:** Using GPT-Neo 125M with custom chat template implementation. | |
| """ | |
| ) | |
| gr.LoginButton() | |
| run_button = gr.Button("Run Evaluation & Submit All Answers") | |
| status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) | |
| results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) | |
| run_button.click( | |
| fn=run_and_submit_all, | |
| outputs=[status_output, results_table] | |
| ) | |
| if __name__ == "__main__": | |
| print("-" * 60) | |
| print("Launching SmolAgent Space...") | |
| print("-" * 60) | |
| demo.launch(debug=True, share=False) |