Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -292,6 +292,57 @@
|
|
| 292 |
# if __name__ == "__main__":
|
| 293 |
# demo.launch()
|
| 294 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 295 |
import os
|
| 296 |
import subprocess
|
| 297 |
import gradio as gr
|
|
@@ -302,7 +353,6 @@ subprocess.check_call(["pip", "install", "-U", "langchain-openai", "gradio", "la
|
|
| 302 |
from langchain_openai import ChatOpenAI
|
| 303 |
from langchain.prompts import PromptTemplate
|
| 304 |
from langchain.memory import ConversationBufferMemory
|
| 305 |
-
from langchain.chains import Runnable, RunnableSequence
|
| 306 |
|
| 307 |
# Set OpenAI API Key
|
| 308 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
|
@@ -322,9 +372,6 @@ prompt = PromptTemplate(
|
|
| 322 |
# Initialize conversation memory
|
| 323 |
memory = ConversationBufferMemory(memory_key="chat_history")
|
| 324 |
|
| 325 |
-
# Define the runnable sequence
|
| 326 |
-
chatbot_runnable = RunnableSequence(prompt | ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"))
|
| 327 |
-
|
| 328 |
# Function to get chatbot response
|
| 329 |
def get_text_response(user_message, history=None):
|
| 330 |
# Ensure history is a list
|
|
@@ -333,9 +380,10 @@ def get_text_response(user_message, history=None):
|
|
| 333 |
|
| 334 |
# Prepare the conversation history
|
| 335 |
chat_history = history + [f"User: {user_message}"]
|
| 336 |
-
|
|
|
|
| 337 |
|
| 338 |
-
return response
|
| 339 |
|
| 340 |
# Create a Gradio chat interface
|
| 341 |
demo = gr.Interface(fn=get_text_response, inputs=["text", "state"], outputs="text")
|
|
@@ -348,3 +396,4 @@ if __name__ == "__main__":
|
|
| 348 |
|
| 349 |
|
| 350 |
|
|
|
|
|
|
| 292 |
# if __name__ == "__main__":
|
| 293 |
# demo.launch()
|
| 294 |
|
| 295 |
+
# import os
|
| 296 |
+
# import subprocess
|
| 297 |
+
# import gradio as gr
|
| 298 |
+
|
| 299 |
+
# # Install necessary packages
|
| 300 |
+
# subprocess.check_call(["pip", "install", "-U", "langchain-openai", "gradio", "langchain-community"])
|
| 301 |
+
|
| 302 |
+
# from langchain_openai import ChatOpenAI
|
| 303 |
+
# from langchain.prompts import PromptTemplate
|
| 304 |
+
# from langchain.memory import ConversationBufferMemory
|
| 305 |
+
# from langchain.chains import Runnable, RunnableSequence
|
| 306 |
+
|
| 307 |
+
# # Set OpenAI API Key
|
| 308 |
+
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
| 309 |
+
|
| 310 |
+
# # Define the template for the chatbot's response
|
| 311 |
+
# template = """You are a helpful assistant to answer all user queries.
|
| 312 |
+
# {chat_history}
|
| 313 |
+
# User: {user_message}
|
| 314 |
+
# Chatbot:"""
|
| 315 |
+
|
| 316 |
+
# # Define the prompt template
|
| 317 |
+
# prompt = PromptTemplate(
|
| 318 |
+
# input_variables=["chat_history", "user_message"],
|
| 319 |
+
# template=template
|
| 320 |
+
# )
|
| 321 |
+
|
| 322 |
+
# # Initialize conversation memory
|
| 323 |
+
# memory = ConversationBufferMemory(memory_key="chat_history")
|
| 324 |
+
|
| 325 |
+
# # Define the runnable sequence
|
| 326 |
+
# chatbot_runnable = RunnableSequence(prompt | ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"))
|
| 327 |
+
|
| 328 |
+
# # Function to get chatbot response
|
| 329 |
+
# def get_text_response(user_message, history=None):
|
| 330 |
+
# # Ensure history is a list
|
| 331 |
+
# if history is None:
|
| 332 |
+
# history = []
|
| 333 |
+
|
| 334 |
+
# # Prepare the conversation history
|
| 335 |
+
# chat_history = history + [f"User: {user_message}"]
|
| 336 |
+
# response = chatbot_runnable.invoke({"chat_history": "\n".join(chat_history), "user_message": user_message})
|
| 337 |
+
|
| 338 |
+
# return response
|
| 339 |
+
|
| 340 |
+
# # Create a Gradio chat interface
|
| 341 |
+
# demo = gr.Interface(fn=get_text_response, inputs=["text", "state"], outputs="text")
|
| 342 |
+
|
| 343 |
+
# if __name__ == "__main__":
|
| 344 |
+
# demo.launch()
|
| 345 |
+
|
| 346 |
import os
|
| 347 |
import subprocess
|
| 348 |
import gradio as gr
|
|
|
|
| 353 |
from langchain_openai import ChatOpenAI
|
| 354 |
from langchain.prompts import PromptTemplate
|
| 355 |
from langchain.memory import ConversationBufferMemory
|
|
|
|
| 356 |
|
| 357 |
# Set OpenAI API Key
|
| 358 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
|
|
|
| 372 |
# Initialize conversation memory
|
| 373 |
memory = ConversationBufferMemory(memory_key="chat_history")
|
| 374 |
|
|
|
|
|
|
|
|
|
|
| 375 |
# Function to get chatbot response
|
| 376 |
def get_text_response(user_message, history=None):
|
| 377 |
# Ensure history is a list
|
|
|
|
| 380 |
|
| 381 |
# Prepare the conversation history
|
| 382 |
chat_history = history + [f"User: {user_message}"]
|
| 383 |
+
llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo")
|
| 384 |
+
response = llm({"chat_history": "\n".join(chat_history), "user_message": user_message})
|
| 385 |
|
| 386 |
+
return response['choices'][0]['message']['content']
|
| 387 |
|
| 388 |
# Create a Gradio chat interface
|
| 389 |
demo = gr.Interface(fn=get_text_response, inputs=["text", "state"], outputs="text")
|
|
|
|
| 396 |
|
| 397 |
|
| 398 |
|
| 399 |
+
|