Spaces:
Paused
Paused
chatbot
Browse files
app.py
CHANGED
|
@@ -55,6 +55,7 @@ llm = HuggingFaceEndpoint(
|
|
| 55 |
|
| 56 |
model = ChatHuggingFace(llm=llm, verbose=True)
|
| 57 |
|
|
|
|
| 58 |
def predict(message, history):
|
| 59 |
history_langchain_format = []
|
| 60 |
for msg in history:
|
|
@@ -65,7 +66,28 @@ def predict(message, history):
|
|
| 65 |
history_langchain_format.append(HumanMessage(content=message))
|
| 66 |
gpt_response = model.invoke(history_langchain_format)
|
| 67 |
return gpt_response.content
|
|
|
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
# setup agents
|
| 71 |
|
|
|
|
| 55 |
|
| 56 |
model = ChatHuggingFace(llm=llm, verbose=True)
|
| 57 |
|
| 58 |
+
"""
|
| 59 |
def predict(message, history):
|
| 60 |
history_langchain_format = []
|
| 61 |
for msg in history:
|
|
|
|
| 66 |
history_langchain_format.append(HumanMessage(content=message))
|
| 67 |
gpt_response = model.invoke(history_langchain_format)
|
| 68 |
return gpt_response.content
|
| 69 |
+
"""
|
| 70 |
|
| 71 |
+
def predict(message, history):
|
| 72 |
+
# Convert Gradio history to LangChain message format
|
| 73 |
+
history_langchain_format = []
|
| 74 |
+
for msg in history:
|
| 75 |
+
if msg['role'] == "user":
|
| 76 |
+
history_langchain_format.append(HumanMessage(content=msg['content']))
|
| 77 |
+
elif msg['role'] == "assistant":
|
| 78 |
+
history_langchain_format.append(AIMessage(content=msg['content"]))
|
| 79 |
+
|
| 80 |
+
# Add new user message
|
| 81 |
+
history_langchain_format.append(HumanMessage(content=message))
|
| 82 |
+
|
| 83 |
+
# Invoke Alfred agent with full message history
|
| 84 |
+
response = alfred.invoke(
|
| 85 |
+
input={"messages": history_langchain_format},
|
| 86 |
+
config={"recursion_limit": 100}
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
# Extract final assistant message
|
| 90 |
+
return response["messages"][-1].content
|
| 91 |
|
| 92 |
# setup agents
|
| 93 |
|