chatbot fix2 sherika
Browse files
phase/Student_view/chatbot.py
CHANGED
|
@@ -64,21 +64,13 @@ def _reply_with_hf():
|
|
| 64 |
prompt = f"{TUTOR_PROMPT}\n\n{convo}\n\nTutor:"
|
| 65 |
|
| 66 |
try:
|
| 67 |
-
response = client.
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
)
|
| 73 |
-
|
| 74 |
-
if isinstance(response, list) and "generated_text" in response[0]:
|
| 75 |
-
return response[0]["generated_text"].strip()
|
| 76 |
-
elif isinstance(response, dict) and "generated_text" in response:
|
| 77 |
-
return response["generated_text"].strip()
|
| 78 |
-
elif isinstance(response, str):
|
| 79 |
-
return response.strip()
|
| 80 |
-
else:
|
| 81 |
-
raise ValueError(f"Unexpected response format: {response}")
|
| 82 |
except Exception as e:
|
| 83 |
err_text = ''.join(traceback.format_exception_only(type(e), e)).strip()
|
| 84 |
raise RuntimeError(f"Hugging Face API Error: {err_text}")
|
|
|
|
| 64 |
prompt = f"{TUTOR_PROMPT}\n\n{convo}\n\nTutor:"
|
| 65 |
|
| 66 |
try:
|
| 67 |
+
response = client.text_generation(
|
| 68 |
+
prompt,
|
| 69 |
+
max_new_tokens=220,
|
| 70 |
+
temperature=0.2,
|
| 71 |
+
return_full_text=False # only return the completion
|
| 72 |
)
|
| 73 |
+
return response.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
except Exception as e:
|
| 75 |
err_text = ''.join(traceback.format_exception_only(type(e), e)).strip()
|
| 76 |
raise RuntimeError(f"Hugging Face API Error: {err_text}")
|