Ragulvasanth66 commited on
Commit
e688a84
·
verified ·
1 Parent(s): 0614efa
Files changed (1) hide show
  1. app.py +5 -10
app.py CHANGED
@@ -20,22 +20,17 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
20
  class BasicAgent:
21
  def __init__(self):
22
  self.token = os.getenv("token")
23
- self.model_id = "mistralai/Mistral-7B-Instruct-v0.2"
24
  self.client = InferenceClient(model=self.model_id, token=self.token)
25
 
26
  def __call__(self, question: str) -> str:
27
  prompt = f"Answer this question concisely and clearly. Only return the final answer.\nQuestion: {question}"
28
  try:
29
- response = self.client.conversational(
30
- messages=[{"role": "user", "content": prompt}]
31
- )
32
- print("✅ Raw response:", response)
33
- return response[0]["generated_text"].strip()
34
  except Exception as e:
35
- print("Error calling inference API")
36
- traceback.print_exc()
37
- return f"error: {str(e)}"
38
-
39
 
40
 
41
  def run_and_submit_all(profile: gr.OAuthProfile | None):
 
20
  class BasicAgent:
21
  def __init__(self):
22
  self.token = os.getenv("token")
23
+ self.model_id = "meta-llama/Meta-Llama-3-70B-Instruct"
24
  self.client = InferenceClient(model=self.model_id, token=self.token)
25
 
26
  def __call__(self, question: str) -> str:
27
  prompt = f"Answer this question concisely and clearly. Only return the final answer.\nQuestion: {question}"
28
  try:
29
+ response = self.client.text_generation(prompt, max_new_tokens=100)
30
+ return response.strip()
 
 
 
31
  except Exception as e:
32
+ print(f"Error calling inference API: {e}")
33
+ return f"error: {e} | token used: {self.token}"
 
 
34
 
35
 
36
  def run_and_submit_all(profile: gr.OAuthProfile | None):