Spaces:
Sleeping
Sleeping
Fix Gradio Chatbot type error and HF token env var
Browse files
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 🔍
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: "4.44.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
|
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: "4.44.1"
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
app.py
CHANGED
|
@@ -74,11 +74,13 @@ def structure_meeting(project_name, meeting_title, meeting_date, participants, m
|
|
| 74 |
|
| 75 |
try:
|
| 76 |
# Use HF Inference API
|
|
|
|
|
|
|
| 77 |
endpoint = HuggingFaceEndpoint(
|
| 78 |
repo_id="meta-llama/Llama-3.2-3B-Instruct",
|
| 79 |
temperature=0.3,
|
| 80 |
max_new_tokens=1024,
|
| 81 |
-
huggingfacehub_api_token=
|
| 82 |
)
|
| 83 |
llm = ChatHuggingFace(llm=endpoint)
|
| 84 |
|
|
@@ -201,7 +203,6 @@ with gr.Blocks(title="Sherlock: AI Project Assistant", theme=gr.themes.Soft(), c
|
|
| 201 |
chatbot = gr.Chatbot(
|
| 202 |
label="Chat",
|
| 203 |
height=350,
|
| 204 |
-
type="messages",
|
| 205 |
show_label=False
|
| 206 |
)
|
| 207 |
|
|
@@ -220,16 +221,13 @@ with gr.Blocks(title="Sherlock: AI Project Assistant", theme=gr.themes.Soft(), c
|
|
| 220 |
if not message:
|
| 221 |
return chat_history, ""
|
| 222 |
|
| 223 |
-
# Add user message to history
|
| 224 |
-
chat_history.append({"role": "user", "content": message})
|
| 225 |
-
|
| 226 |
# Get bot response
|
| 227 |
bot_message = ""
|
| 228 |
for response_chunk in chat(message, chat_history, project):
|
| 229 |
bot_message = response_chunk
|
| 230 |
|
| 231 |
-
# Add
|
| 232 |
-
chat_history.append(
|
| 233 |
|
| 234 |
return chat_history, ""
|
| 235 |
|
|
|
|
| 74 |
|
| 75 |
try:
|
| 76 |
# Use HF Inference API
|
| 77 |
+
# HF Spaces provides token as HF_TOKEN or HUGGING_FACE_HUB_TOKEN
|
| 78 |
+
hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGING_FACE_HUB_TOKEN")
|
| 79 |
endpoint = HuggingFaceEndpoint(
|
| 80 |
repo_id="meta-llama/Llama-3.2-3B-Instruct",
|
| 81 |
temperature=0.3,
|
| 82 |
max_new_tokens=1024,
|
| 83 |
+
huggingfacehub_api_token=hf_token
|
| 84 |
)
|
| 85 |
llm = ChatHuggingFace(llm=endpoint)
|
| 86 |
|
|
|
|
| 203 |
chatbot = gr.Chatbot(
|
| 204 |
label="Chat",
|
| 205 |
height=350,
|
|
|
|
| 206 |
show_label=False
|
| 207 |
)
|
| 208 |
|
|
|
|
| 221 |
if not message:
|
| 222 |
return chat_history, ""
|
| 223 |
|
|
|
|
|
|
|
|
|
|
| 224 |
# Get bot response
|
| 225 |
bot_message = ""
|
| 226 |
for response_chunk in chat(message, chat_history, project):
|
| 227 |
bot_message = response_chunk
|
| 228 |
|
| 229 |
+
# Add to history as tuple
|
| 230 |
+
chat_history.append((message, bot_message))
|
| 231 |
|
| 232 |
return chat_history, ""
|
| 233 |
|