d16
Browse files
app.py
CHANGED
|
@@ -20,22 +20,17 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
| 20 |
class BasicAgent:
|
| 21 |
def __init__(self):
|
| 22 |
self.token = os.getenv("token")
|
| 23 |
-
self.model_id = "
|
| 24 |
self.client = InferenceClient(model=self.model_id, token=self.token)
|
| 25 |
|
| 26 |
def __call__(self, question: str) -> str:
|
| 27 |
prompt = f"Answer this question concisely and clearly. Only return the final answer.\nQuestion: {question}"
|
| 28 |
try:
|
| 29 |
-
response = self.client.
|
| 30 |
-
|
| 31 |
-
)
|
| 32 |
-
print("✅ Raw response:", response)
|
| 33 |
-
return response[0]["generated_text"].strip()
|
| 34 |
except Exception as e:
|
| 35 |
-
print("
|
| 36 |
-
|
| 37 |
-
return f"error: {str(e)}"
|
| 38 |
-
|
| 39 |
|
| 40 |
|
| 41 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
|
|
| 20 |
class BasicAgent:
|
| 21 |
def __init__(self):
|
| 22 |
self.token = os.getenv("token")
|
| 23 |
+
self.model_id = "meta-llama/Meta-Llama-3-70B-Instruct"
|
| 24 |
self.client = InferenceClient(model=self.model_id, token=self.token)
|
| 25 |
|
| 26 |
def __call__(self, question: str) -> str:
|
| 27 |
prompt = f"Answer this question concisely and clearly. Only return the final answer.\nQuestion: {question}"
|
| 28 |
try:
|
| 29 |
+
response = self.client.text_generation(prompt, max_new_tokens=100)
|
| 30 |
+
return response.strip()
|
|
|
|
|
|
|
|
|
|
| 31 |
except Exception as e:
|
| 32 |
+
print(f"Error calling inference API: {e}")
|
| 33 |
+
return f"error: {e} | token used: {self.token}"
|
|
|
|
|
|
|
| 34 |
|
| 35 |
|
| 36 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|