pathmohd123 commited on
Commit
f63b186
Β·
verified Β·
1 Parent(s): 709c6a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -16
app.py CHANGED
@@ -6,33 +6,34 @@ import gradio as gr
6
  import pandas as pd
7
  from typing import Optional
8
  from dotenv import load_dotenv
9
- from transformers import pipeline
10
 
11
- # ──────────────── Load Environment ────────────────
12
  load_dotenv()
13
  hf_token = os.getenv("HF_TOKEN")
14
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
15
 
16
- # ──────────────── Load Falcon Model ────────────────
17
  try:
18
- pipe = pipeline("text-generation", model="tiiuae/falcon-rw-1b", token=hf_token)
19
  except Exception as e:
20
  raise RuntimeError(f"Model loading failed: {e}")
21
 
22
- # ──────────────── Agent Class ────────────────
23
- class FalconAgent:
24
  def __init__(self):
25
- print("βœ… FalconAgent initialized.")
26
 
27
  def __call__(self, question: str) -> str:
28
  try:
29
- prompt = f"{question}"
30
- output = pipe(prompt, max_new_tokens=100, do_sample=False)
31
- return output[0]["generated_text"].replace(prompt, "").strip()
32
  except Exception as e:
33
  return f"LLM Error: {e}"
34
 
35
- # ──────────────── API Helpers ────────────────
36
  def get_all_questions(api_url: str) -> list[dict]:
37
  resp = requests.get(f"{api_url}/questions", timeout=15)
38
  resp.raise_for_status()
@@ -48,8 +49,8 @@ def submit_answers(api_url: str, username: str, code_link: str, answers: list[di
48
  resp.raise_for_status()
49
  return resp.json()
50
 
51
- # ──────────────── Gradio Evaluation Logic ────────────────
52
- def run_and_submit_all(profile: gr.OAuthProfile | None):
53
  if not profile:
54
  return "❌ Please log in to Hugging Face using the button above.", None
55
  username = profile.username.strip()
@@ -58,7 +59,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
58
  code_link = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else ""
59
 
60
  try:
61
- agent = FalconAgent()
62
  except Exception as e:
63
  return f"❌ Error initializing agent: {e}", None
64
 
@@ -109,9 +110,9 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
109
  )
110
  return final_status, pd.DataFrame(results_log)
111
 
112
- # ──────────────── Gradio UI ────────────────
113
  with gr.Blocks() as demo:
114
- gr.Markdown("# 🧠 Falcon-RW-1B Agent Evaluation")
115
  gr.Markdown(
116
  """
117
  **Instructions:**
 
6
  import pandas as pd
7
  from typing import Optional
8
  from dotenv import load_dotenv
9
+ from huggingface_hub import InferenceClient
10
 
11
+ # ──────────────── Load Environment ────────────────
12
  load_dotenv()
13
  hf_token = os.getenv("HF_TOKEN")
14
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
+ MODEL_ID = "mistralai/Mistral-7B-Instruct-v0.1"
16
 
17
+ # ──────────────── Load Inference Client ────────────────
18
  try:
19
+ client = InferenceClient(model=MODEL_ID, token=hf_token)
20
  except Exception as e:
21
  raise RuntimeError(f"Model loading failed: {e}")
22
 
23
+ # ──────────────── Agent Class ────────────────
24
+ class MistralAgent:
25
  def __init__(self):
26
+ print("βœ… MistralAgent (Inference API) initialized.")
27
 
28
  def __call__(self, question: str) -> str:
29
  try:
30
+ prompt = f"[INST] {question.strip()} [/INST]"
31
+ response = client.text_generation(prompt, max_new_tokens=100, temperature=0.0, do_sample=False)
32
+ return response.strip()
33
  except Exception as e:
34
  return f"LLM Error: {e}"
35
 
36
+ # ──────────────── API Helpers ────────────────
37
  def get_all_questions(api_url: str) -> list[dict]:
38
  resp = requests.get(f"{api_url}/questions", timeout=15)
39
  resp.raise_for_status()
 
49
  resp.raise_for_status()
50
  return resp.json()
51
 
52
+ # ──────────────── Gradio Evaluation Logic ────────────────
53
+ def run_and_submit_all(profile: Optional[gr.OAuthProfile]):
54
  if not profile:
55
  return "❌ Please log in to Hugging Face using the button above.", None
56
  username = profile.username.strip()
 
59
  code_link = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else ""
60
 
61
  try:
62
+ agent = MistralAgent()
63
  except Exception as e:
64
  return f"❌ Error initializing agent: {e}", None
65
 
 
110
  )
111
  return final_status, pd.DataFrame(results_log)
112
 
113
+ # ──────────────── Gradio UI ────────────────
114
  with gr.Blocks() as demo:
115
+ gr.Markdown("# 🧠 Mistral-7B Agent Evaluation (via Inference API)")
116
  gr.Markdown(
117
  """
118
  **Instructions:**