| import os |
| import gradio as gr |
| from langchain.chat_models import ChatOpenAI |
| from langchain import LLMChain, PromptTemplate |
| from langchain.memory import ConversationBufferMemory |
|
|
| OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') |
|
|
| template = """You are a helpful assistant to answer all user queries. |
| {chat_history} |
| User: {user_message} |
| Chatbot:""" |
|
|
| prompt = PromptTemplate( |
| input_variables=["chat_history", "user_message"], template=template |
| ) |
|
|
| memory = ConversationBufferMemory(memory_key="chat_history") |
|
|
| llm_chain = LLMChain( |
| llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), |
| prompt=prompt, |
| verbose=True, |
| memory=memory, |
| ) |
|
|
| def get_text_response(user_message,history): |
| response = llm_chain.predict(user_message = user_message) |
| return response |
|
|
| demo = gr.ChatInterface(get_text_response) |
|
|
| if __name__ == "__main__": |
| demo.launch() |
|
|