File size: 3,340 Bytes
bd07837
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368dfa9
bd07837
 
 
 
 
 
 
368dfa9
bd07837
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ca39bef
bd07837
 
 
 
 
 
 
 
 
 
368dfa9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import streamlit as st
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain.schema import HumanMessage, AIMessage

# Load environment variables
load_dotenv()

# Initialize the ChatOpenAI model
@st.cache_resource
def init_llm():
    return ChatOpenAI(
        model="gpt-4o-mini",
        temperature=0.1,
        max_tokens=300  # Increased for better context explanations
    )

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Streamlit UI
st.title("🤖 Simple Chatbot using openai")
st.write("Ask me anything and I'll give you a short, simple answer!")

# Display chat history
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Chat input
if prompt := st.chat_input("What would you like to know?"):
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})
    
    # Display user message
    with st.chat_message("user"):
        st.markdown(prompt)
    
    # Get bot response
    with st.chat_message("assistant"):
        try:
            llm = init_llm()
            
            # Create a system message to ensure short responses and context awareness
            system_prompt = "You are a helpful assistant. Always give short, simple, and direct answers. Keep responses under 100 words. Pay attention to the conversation history to maintain context."
            
            # Prepare messages for the model including recent chat history
            messages = [HumanMessage(content=system_prompt)]
            
            # Add recent chat history (last 10 messages) to provide context
            recent_messages = st.session_state.messages[-10:] if len(st.session_state.messages) > 10 else st.session_state.messages
            
            for msg in recent_messages:
                if msg["role"] == "user":
                    messages.append(HumanMessage(content=msg["content"]))
                else:
                    messages.append(AIMessage(content=msg["content"]))
            
            # Add the current user message
            messages.append(HumanMessage(content=prompt))
            
            # Get response from the model
            response = llm.invoke(messages)
            bot_response = response.content
            
            # Display response
            st.markdown(bot_response)
            
            # Add assistant response to chat history
            st.session_state.messages.append({"role": "assistant", "content": bot_response})
            
        except Exception as e:
            error_msg = f"Error: {str(e)}"
            st.error(error_msg)
            st.session_state.messages.append({"role": "assistant", "content": error_msg})

# Sidebar with clear chat button

with st.sidebar:
    st.header("Chat Controls")
    if st.button("Clear Chat History"):
        st.session_state.messages = []
        st.rerun()
    
    st.markdown("---")
    st.markdown("**Instructions:**")
    st.markdown("1. Make sure you have your OpenAI API key in a `.env` file")
    st.markdown("2. The format should be: `OPENAI_API_KEY=your_api_key_here`")
    st.markdown("3. Install required packages: `pip install streamlit langchain-openai python-dotenv`")