File size: 3,674 Bytes
10e9b7d
 
eccf8e4
5889dfe
10e9b7d
e80aab9
3db6293
5889dfe
e80aab9
5889dfe
31243f4
5889dfe
 
 
 
3c4371f
5889dfe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e4a06b
31243f4
 
e80aab9
5889dfe
31243f4
5889dfe
31243f4
 
3c4371f
5889dfe
eccf8e4
31243f4
7d65c66
31243f4
 
5889dfe
7d65c66
5889dfe
e80aab9
5889dfe
7d65c66
5889dfe
31243f4
5889dfe
 
31243f4
 
 
7d65c66
 
 
31243f4
5889dfe
31243f4
 
5889dfe
31243f4
5889dfe
 
 
 
e80aab9
7d65c66
e80aab9
7d65c66
e80aab9
 
5889dfe
 
 
 
 
 
7d65c66
5889dfe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import os
import gradio as gr
import requests
from transformers import AutoModelForCausalLM, AutoTokenizer

# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
MODEL_NAME = "meta-llama/Llama-4-Scout-17B-16E-Instruct"

# --- Basic Agent Using Llama 4 ---
class BasicAgent:
    def __init__(self, hf_token: str):
        print("Initializing Llama 4 Agent...")
        self.tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=hf_token)
        self.model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, use_auth_token=hf_token)

    def __call__(self, question: str) -> str:
        question = question.encode('utf-8', errors='ignore').decode('utf-8')
        inputs = self.tokenizer(question, return_tensors="pt")
        outputs = self.model.generate(**inputs, max_new_tokens=50)
        answer = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        # 只回覆答案,不包含 prompt
        if question in answer:
            answer = answer.replace(question, '').strip()
        return answer

# --- Function to run and submit ---
def run_and_submit_all(profile: gr.OAuthProfile | None):
    hf_token = os.getenv('HF_TOKEN')
    if not hf_token:
        return "HF_TOKEN not set!", None

    if not profile:
        return "Please login to Hugging Face.", None

    username = profile.username
    api_url = DEFAULT_API_URL
    questions_url = f"{api_url}/questions"
    submit_url = f"{api_url}/submit"

    # 1. Create agent
    try:
        agent = BasicAgent(hf_token)
    except Exception as e:
        return f"Error initializing agent: {e}", None

    # 2. Fetch questions
    try:
        response = requests.get(questions_url, timeout=15)
        response.raise_for_status()
        questions_data = response.json()
        if not questions_data:
            return "No questions fetched.", None
    except Exception as e:
        return f"Error fetching questions: {e}", None

    # 3. Run agent on questions
    answers_payload = []
    results_log = []
    for item in questions_data:
        task_id = item.get('task_id')
        question_text = item.get('question')
        if not task_id or question_text is None:
            continue
        try:
            submitted_answer = agent(question_text)
            answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
            results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
        except Exception as e:
            results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})

    if not answers_payload:
        return "No answers generated.", results_log

    # 4. Prepare submission
    space_id = os.getenv('SPACE_ID', 'YOUR_SPACE_NAME')
    agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
    submission_data = {"username": username, "agent_code": agent_code, "answers": answers_payload}

    # 5. Submit
    try:
        response = requests.post(submit_url, json=submission_data, timeout=60)
        response.raise_for_status()
        result_data = response.json()
        final_status = (f"Submission Successful!\n"
                        f"User: {result_data.get('username')}\n"
                        f"Overall Score: {result_data.get('score', 'N/A')}%"
                        f" ({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
                        f"Message: {result_data.get('message', '')}")
        return final_status, results_log
    except Exception as e:
        return f"Submission Failed: {e}", results_log