alexputhiyadom commited on
Commit
bb601b1
Β·
verified Β·
1 Parent(s): f44cbc9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -49
app.py CHANGED
@@ -3,42 +3,30 @@ import gradio as gr
3
  import requests
4
  import pandas as pd
5
  import json
6
- import torch
7
- from transformers import AutoTokenizer, AutoModelForCausalLM
8
 
9
  # --- Constants ---
10
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
11
 
12
- # --- Smart Agent using Mistral-7B ---
13
- class SmartLLMAgent:
14
  def __init__(self):
15
- print("πŸ” Loading SmartLLMAgent with Mistral-7B...")
16
- self.tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
17
- self.model = AutoModelForCausalLM.from_pretrained(
18
- "HuggingFaceH4/zephyr-7b-beta",
19
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
20
- device_map="auto"
 
 
21
  )
22
 
23
  def __call__(self, question: str) -> str:
24
- prompt = f"Answer the following question clearly and precisely:\n\nQuestion: {question}\n\nAnswer:"
25
- inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048)
26
- inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
27
-
28
- with torch.no_grad():
29
- outputs = self.model.generate(
30
- **inputs,
31
- max_new_tokens=150,
32
- do_sample=True,
33
- top_p=0.9,
34
- temperature=0.7,
35
- pad_token_id=self.tokenizer.eos_token_id
36
- )
37
-
38
- decoded = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
39
- final_answer = decoded.split("Answer:")[-1].strip()
40
- print(f"βœ… Generated answer: {final_answer}")
41
- return final_answer or "Not Enough Information"
42
 
43
  # --- Run Agent & Submit ---
44
  def run_and_submit_all(profile: gr.OAuthProfile | None):
@@ -46,39 +34,35 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
46
 
47
  if profile:
48
  username = f"{profile.username}"
49
- print(f"πŸ‘€ User logged in: {username}")
50
  else:
51
- return "Please Login to Hugging Face with the button.", None
52
 
 
53
  questions_url = f"{DEFAULT_API_URL}/questions"
54
  submit_url = f"{DEFAULT_API_URL}/submit"
55
 
56
  try:
57
- agent = SmartLLMAgent()
58
  except Exception as e:
59
- return f"❌ Error loading agent: {e}", None
60
-
61
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
62
- print(f"πŸ”— Agent Code: {agent_code}")
63
 
64
  try:
65
  response = requests.get(questions_url, timeout=15)
66
  response.raise_for_status()
67
- questions = response.json()
68
- print(f"πŸ“₯ Fetched {len(questions)} questions.")
69
  except Exception as e:
70
- return f"❌ Failed to fetch questions: {e}", None
71
 
72
- answers_payload = []
73
  results_log = []
 
74
 
75
- for item in questions:
76
  task_id = item.get("task_id")
77
  question_text = item.get("question")
78
-
79
  if not task_id or not question_text:
80
  continue
81
-
82
  try:
83
  answer = agent(question_text)
84
  answers_payload.append({"task_id": task_id, "submitted_answer": answer})
@@ -87,7 +71,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
87
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"ERROR: {e}"})
88
 
89
  if not answers_payload:
90
- return "❌ No answers produced.", pd.DataFrame(results_log)
91
 
92
  submission_data = {
93
  "username": username.strip(),
@@ -95,7 +79,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
95
  "answers": answers_payload
96
  }
97
 
98
- print("πŸ“€ Submission Payload:")
99
  print(json.dumps(submission_data, indent=2)[:1000])
100
 
101
  try:
@@ -115,13 +99,13 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
115
 
116
  # --- Gradio UI ---
117
  with gr.Blocks() as demo:
118
- gr.Markdown("# 🧠 Smart Agent Evaluation Runner (Mistral-7B)")
119
  gr.Markdown(
120
  """
121
  βœ… **Instructions:**
122
- 1. Login to your Hugging Face account.
123
- 2. Click **"Run Evaluation & Submit All Answers"**.
124
- 3. Wait 1–2 mins. You need at least **30% correct** to pass Level 1 and get the certificate.
125
  """
126
  )
127
 
@@ -133,5 +117,5 @@ with gr.Blocks() as demo:
133
  run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
134
 
135
  if __name__ == "__main__":
136
- print("πŸš€ Launching Smart Agent with Mistral-7B...")
137
  demo.launch(debug=True, share=False)
 
3
  import requests
4
  import pandas as pd
5
  import json
6
+ from transformers import pipeline
 
7
 
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
+ # --- Hugging Face Agent using Transformers Pipeline ---
12
+ class HFTransformersAgent:
13
  def __init__(self):
14
+ print("πŸ€– Loading text-generation agent...")
15
+ self.generator = pipeline(
16
+ "text-generation",
17
+ model="tiiuae/falcon-7b-instruct",
18
+ max_new_tokens=100,
19
+ do_sample=True,
20
+ temperature=0.7,
21
+ top_p=0.9
22
  )
23
 
24
  def __call__(self, question: str) -> str:
25
+ prompt = f"Answer the following question:\n\n{question}\n\nAnswer:"
26
+ result = self.generator(prompt)[0]["generated_text"]
27
+ answer = result.split("Answer:")[-1].strip()
28
+ print(f"βœ… Generated answer: {answer}")
29
+ return answer if answer else "Not Enough Information"
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  # --- Run Agent & Submit ---
32
  def run_and_submit_all(profile: gr.OAuthProfile | None):
 
34
 
35
  if profile:
36
  username = f"{profile.username}"
37
+ print(f"πŸ‘€ Logged in: {username}")
38
  else:
39
+ return "Please log in to Hugging Face using the button.", None
40
 
41
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
42
  questions_url = f"{DEFAULT_API_URL}/questions"
43
  submit_url = f"{DEFAULT_API_URL}/submit"
44
 
45
  try:
46
+ agent = HFTransformersAgent()
47
  except Exception as e:
48
+ return f"❌ Error initializing agent: {e}", None
 
 
 
49
 
50
  try:
51
  response = requests.get(questions_url, timeout=15)
52
  response.raise_for_status()
53
+ questions_data = response.json()
54
+ print(f"πŸ“₯ Fetched {len(questions_data)} questions.")
55
  except Exception as e:
56
+ return f"❌ Error fetching questions: {e}", None
57
 
 
58
  results_log = []
59
+ answers_payload = []
60
 
61
+ for item in questions_data:
62
  task_id = item.get("task_id")
63
  question_text = item.get("question")
 
64
  if not task_id or not question_text:
65
  continue
 
66
  try:
67
  answer = agent(question_text)
68
  answers_payload.append({"task_id": task_id, "submitted_answer": answer})
 
71
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"ERROR: {e}"})
72
 
73
  if not answers_payload:
74
+ return "❌ Agent did not produce any answers.", pd.DataFrame(results_log)
75
 
76
  submission_data = {
77
  "username": username.strip(),
 
79
  "answers": answers_payload
80
  }
81
 
82
+ print("πŸ“€ Submission preview:")
83
  print(json.dumps(submission_data, indent=2)[:1000])
84
 
85
  try:
 
99
 
100
  # --- Gradio UI ---
101
  with gr.Blocks() as demo:
102
+ gr.Markdown("# πŸ€– Smart Agent Runner (HF Transformers)")
103
  gr.Markdown(
104
  """
105
  βœ… **Instructions:**
106
+ 1. Log in to your Hugging Face account.
107
+ 2. Click **β€œRun Evaluation & Submit All Answers”**.
108
+ 3. Wait ~1–2 mins. You need at least **30% correct** to unlock the certificate.
109
  """
110
  )
111
 
 
117
  run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
118
 
119
  if __name__ == "__main__":
120
+ print("πŸš€ Launching agent...")
121
  demo.launch(debug=True, share=False)