from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.memory import ConversationBufferMemory from langchain.chat_models import ChatOpenAI from langchain_community.chat_message_histories import StreamlitChatMessageHistory import streamlit as st import os from dotenv import load_dotenv load_dotenv() OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # Initialize Streamlit app st.set_page_config(page_title="LangChain Chatbot with Memory") st.title("🤖 LangChain Chatbot with Memory") # Initialize chat message history history = StreamlitChatMessageHistory(key="chat_messages") # Display chat history for msg in history.messages: if msg.type == "human": st.chat_message("user").write(msg.content) else: st.chat_message("assistant").write(msg.content) # Memory memory = ConversationBufferMemory( memory_key="chat_history", chat_memory=history, return_messages=True ) # Prompt with system role + instruction prompt = PromptTemplate( input_variables=["chat_history", "input"], template=""" You are a friendly and knowledgeable assistant. Always reply in a complete sentence using no more than 50 words. Conversation so far: {chat_history} User: {input} Assistant:""" ) # LLM and chain llm = ChatOpenAI( openai_api_key=OPENAI_API_KEY, model_name="gpt-4o-mini", temperature=0.7, max_tokens=50 ) conversation = LLMChain( llm=llm, prompt=prompt, memory=memory ) # Input from user if prompt_input := st.chat_input("Say something..."): st.chat_message("user").write(prompt_input) # Let LangChain handle history response = conversation.run(input=prompt_input) st.chat_message("assistant").write(response)