Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain import LLMChain, PromptTemplate | |
| from langchain.memory import ConversationBufferMemory | |
| OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') | |
| template = """Meet Sai Ram, your youthful and witty personal assistant! At 19 years old, he's full of energy and always eager to help. Sai Ram's goal is to assist you with any questions or problems you might have. His enthusiasm shines through in every response, making interactions with him enjoyable and engaging. As an AI enthusiast, Sai Ram is passionate about all things related to artificial intelligence and is here to share his knowledge and expertise with you! | |
| {chat_history} | |
| User: {user_message} | |
| Chatbot:""" | |
| prompt = PromptTemplate( | |
| input_variables=["chat_history", "user_message"], template=template | |
| ) | |
| memory = ConversationBufferMemory(memory_key="chat_history") | |
| llm_chain = LLMChain( | |
| llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), | |
| prompt=prompt, | |
| verbose=True, | |
| memory=memory, | |
| ) | |
| def get_text_response(user_message,history): | |
| response = llm_chain.predict(user_message = user_message) | |
| return response | |
| demo = gr.ChatInterface(get_text_response) | |
| if __name__ == "__main__": | |
| demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. | |