lanna_lalala;- commited on
Commit ·
83c08a0
1
Parent(s): edeae54
update chatbot
Browse files
phase/Student_view/chatbot.py
CHANGED
|
@@ -14,7 +14,7 @@ GEN_MODEL = os.getenv("GEN_MODEL", "google/flan-t5-base") # ✅ CPU-friendly se
|
|
| 14 |
if not HF_TOKEN:
|
| 15 |
st.error("⚠️ HF_TOKEN is not set. In your Space, add a Secret named HF_TOKEN.")
|
| 16 |
else:
|
| 17 |
-
client = InferenceClient(model=GEN_MODEL, token=HF_TOKEN)
|
| 18 |
|
| 19 |
|
| 20 |
# -------------------------------
|
|
@@ -65,7 +65,7 @@ def _reply_with_hf():
|
|
| 65 |
prompt = f"{TUTOR_PROMPT}\n\n{convo}\n\nTutor:"
|
| 66 |
|
| 67 |
try:
|
| 68 |
-
response = client.
|
| 69 |
prompt,
|
| 70 |
max_new_tokens=220,
|
| 71 |
temperature=0.2
|
|
|
|
| 14 |
if not HF_TOKEN:
|
| 15 |
st.error("⚠️ HF_TOKEN is not set. In your Space, add a Secret named HF_TOKEN.")
|
| 16 |
else:
|
| 17 |
+
client = InferenceClient(model=GEN_MODEL, token=HF_TOKEN, provider="hf-inference")
|
| 18 |
|
| 19 |
|
| 20 |
# -------------------------------
|
|
|
|
| 65 |
prompt = f"{TUTOR_PROMPT}\n\n{convo}\n\nTutor:"
|
| 66 |
|
| 67 |
try:
|
| 68 |
+
response = client.text_generation(
|
| 69 |
prompt,
|
| 70 |
max_new_tokens=220,
|
| 71 |
temperature=0.2
|