Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -505,55 +505,46 @@
|
|
| 505 |
|
| 506 |
import os
|
| 507 |
import gradio as gr
|
| 508 |
-
from
|
| 509 |
-
from langchain.
|
| 510 |
-
from langchain.memory import ConversationBufferMemory
|
| 511 |
|
| 512 |
# Set OpenAI API Key
|
| 513 |
-
OPENAI_API_KEY =
|
| 514 |
-
|
| 515 |
-
# Define the template for the chatbot's response
|
| 516 |
-
template = """You are a helpful assistant to answer all user queries.
|
| 517 |
-
{chat_history}
|
| 518 |
-
User: {user_message}
|
| 519 |
-
Chatbot:"""
|
| 520 |
-
|
| 521 |
-
# Define the prompt template
|
| 522 |
-
prompt = PromptTemplate(
|
| 523 |
-
input_variables=["chat_history", "user_message"],
|
| 524 |
-
template=template
|
| 525 |
-
)
|
| 526 |
|
| 527 |
-
# Initialize
|
| 528 |
-
|
| 529 |
|
| 530 |
-
# Function to
|
| 531 |
-
def get_text_response(
|
| 532 |
# Ensure history is a list
|
| 533 |
if history is None:
|
| 534 |
history = []
|
| 535 |
-
|
| 536 |
-
# Prepare the conversation history
|
| 537 |
-
chat_history = history + [f"User: {user_message}"]
|
| 538 |
|
| 539 |
-
#
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
|
|
|
|
| 543 |
|
| 544 |
-
#
|
| 545 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 546 |
|
| 547 |
# Return the response and updated history
|
| 548 |
-
return
|
| 549 |
-
|
| 550 |
-
# Create a Gradio chat interface
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
|
| 557 |
if __name__ == "__main__":
|
| 558 |
demo.launch()
|
| 559 |
|
|
@@ -566,3 +557,4 @@ if __name__ == "__main__":
|
|
| 566 |
|
| 567 |
|
| 568 |
|
|
|
|
|
|
| 505 |
|
| 506 |
import os
|
| 507 |
import gradio as gr
|
| 508 |
+
from langchain.chat_models import ChatOpenAI
|
| 509 |
+
from langchain.schema import AIMessage, HumanMessage
|
|
|
|
| 510 |
|
| 511 |
# Set OpenAI API Key
|
| 512 |
+
os.environ["OPENAI_API_KEY"] = "sk-3_mJiR5z9Q3XN-D33cgrAIYGffmMvHfu5Je1U0CW1ZT3BlbkFJA2vfSvDqZAVUyHo2JIcU91XPiAq424OSS8ci29tWMA" # Replace with your key
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 513 |
|
| 514 |
+
# Initialize the ChatOpenAI model
|
| 515 |
+
llm = ChatOpenAI(temperature=1.0, model="gpt-3.5-turbo-0613")
|
| 516 |
|
| 517 |
+
# Function to predict response
|
| 518 |
+
def get_text_response(message, history=None):
|
| 519 |
# Ensure history is a list
|
| 520 |
if history is None:
|
| 521 |
history = []
|
|
|
|
|
|
|
|
|
|
| 522 |
|
| 523 |
+
# Convert the Gradio history format to LangChain message format
|
| 524 |
+
history_langchain_format = []
|
| 525 |
+
for human, ai in history:
|
| 526 |
+
history_langchain_format.append(HumanMessage(content=human))
|
| 527 |
+
history_langchain_format.append(AIMessage(content=ai))
|
| 528 |
|
| 529 |
+
# Add the new user message to the history
|
| 530 |
+
history_langchain_format.append(HumanMessage(content=message))
|
| 531 |
+
|
| 532 |
+
# Get the model's response
|
| 533 |
+
gpt_response = llm(history_langchain_format)
|
| 534 |
+
|
| 535 |
+
# Append AI response to history
|
| 536 |
+
history.append((message, gpt_response.content))
|
| 537 |
|
| 538 |
# Return the response and updated history
|
| 539 |
+
return gpt_response.content, history
|
| 540 |
+
|
| 541 |
+
# Create a Gradio chat interface
|
| 542 |
+
demo = gr.Interface(
|
| 543 |
+
fn=get_text_response,
|
| 544 |
+
inputs=["text", "state"],
|
| 545 |
+
outputs=["text", "state"]
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
if __name__ == "__main__":
|
| 549 |
demo.launch()
|
| 550 |
|
|
|
|
| 557 |
|
| 558 |
|
| 559 |
|
| 560 |
+
|