File size: 2,186 Bytes
5acbd0d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1014e76
 
5acbd0d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import streamlit as st
from streamlit_chat import message
from transformers import AutoModelForCausalLM, AutoTokenizer
import random

# Title and UI Customization
st.set_page_config(page_title="AI Chatbot", page_icon="🤖", layout="wide")
st.markdown(
    """
    <style>
    .main {background-color: #f5f7fa;}
    .stButton button {background-color: #6200EE; color: white; border-radius: 12px;}
    #chat-container {animation: fadeIn 2s ease-in;}
    @keyframes fadeIn {from {opacity: 0;} to {opacity: 1;}}
    </style>
    """,
    unsafe_allow_html=True,
)

# Load Hugging Face Model
def load_model():
    model_name = "gpt2"  # Replace with your preferred Hugging Face model
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name)
    return tokenizer, model

tokenizer, model = load_model()

# Store Chat History
if "chat_history" not in st.session_state:
    st.session_state.chat_history = []

# User Interface
def chatbot_ui():
    st.markdown(
        """
        <div id="chat-container" style="text-align: center;">
            <h1>🚀 AI Chatbot</h1>
            <p>Ask me anything, and I'll do my best to help you!</p>
        </div>
        """,
        unsafe_allow_html=True,
    )
    user_input = st.text_input("Type your question:", "")


    if st.button("Send") and user_input:
        generate_response(user_input)

    # Display Chat History
    for i, chat in enumerate(st.session_state.chat_history):
        if chat['role'] == 'user':
            message(chat['content'], is_user=True, key=f'user_{i}')
        else:
            message(chat['content'], key=f'bot_{i}')

# Generate Response
def generate_response(user_input):
    inputs = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
    outputs = model.generate(inputs, max_length=200, num_return_sequences=1, do_sample=True, temperature=0.7)
    bot_reply = tokenizer.decode(outputs[0], skip_special_tokens=True)

    # Update Chat History
    st.session_state.chat_history.append({"role": "user", "content": user_input})
    st.session_state.chat_history.append({"role": "bot", "content": bot_reply})

chatbot_ui()