chatBot / app.py
istiak101's picture
Update app.py
0b81ab4 verified
import os
import subprocess
import streamlit as st
from dotenv import load_dotenv
from xhtml2pdf import pisa
import io
from textwrap import dedent
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Load Model Resources
def load_resources():
load_dotenv()
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
subprocess.run(["huggingface-cli", "login", "--token", huggingface_token], capture_output=True)
tokenizer = AutoTokenizer.from_pretrained("istiak101/TinyLlama-1.1B-Chat-v0.6-rag-finetunedv2.0")
model = AutoModelForCausalLM.from_pretrained("istiak101/TinyLlama-1.1B-Chat-v0.6-rag-finetunedv2.0")
return model, tokenizer
# Chat Prompt
def create_test_prompt(question, context, tokenizer):
prompt = dedent(
f"""
{question}
Information:
```
{context}
```
"""
)
messages = [
{
"role": "system",
"content": "Use only the information to answer the question",
},
{"role": "user", "content": prompt},
]
return tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
# Store model and tokenizer in session state
if "llama_model" not in st.session_state or "llama_tokenizer" not in st.session_state:
model, tokenizer = load_resources()
st.session_state.llama_model = model
st.session_state.llama_tokenizer = tokenizer
st.set_page_config(page_title="Tell Me Why", page_icon="❓", layout="wide")
def get_llama_response(query):
# model = st.session_state.llama_model
# tokenizer = st.session_state.llama_tokenizer
# inputs = tokenizer(query, return_tensors="pt")
# outputs = model.generate(**inputs, max_new_tokens=300)
# response = tokenizer.decode(outputs[0]["generated_text"], skip_special_tokens=True)
pipe = pipeline(
task="text-generation",
model=st.session_state.llama_model,
tokenizer=st.session_state.llama_tokenizer,
max_new_tokens=128,
return_full_text=False,
)
outputs = pipe(query)
return outputs[0]["generated_text"]
# PDF Generation
def generate_pdf(convo, topic):
html = f"<h2>{topic}</h2><hr>"
for msg in convo:
if msg["role"] == "user":
html += f"<p><strong>You:</strong> {msg['text']}</p>"
elif msg["role"] == "assistant":
html += f"<p><strong>AI Assistant:</strong> {msg['text']}</p>"
result = io.BytesIO()
pisa_status = pisa.CreatePDF(io.StringIO(html), dest=result)
if pisa_status.err:
return None
return result
# Session Init
if "chat_sessions" not in st.session_state:
st.session_state.chat_sessions = {}
if "current_conversation" not in st.session_state:
st.session_state.current_conversation = None
if "edit_mode" not in st.session_state:
st.session_state.edit_mode = {}
# App Title
st.title("πŸ’¬ Tell Me Why")
# Custom CSS
st.markdown("""
<style>
.chat-wrapper {
display: flex;
flex-direction: column;
}
.message-row {
display: flex;
align-items: flex-end;
justify-content: flex-end;
margin-top: 10px;
}
.user-bubble {
background-color: rgba(0, 200, 83, 0.1);
color: var(--text-color);
padding: 15px;
border-radius: 15px;
max-width: 80%;
border: 1px solid rgba(0, 200, 83, 0.4);
position: relative;
margin-bottom: 20px;
}
.assistant-bubble {
background-color: rgba(3, 169, 244, 0.1);
color: var(--text-color);
padding: 15px;
border-radius: 15px;
max-width: 80%;
align-self: flex-start;
margin-right: auto;
border: 1px solid rgba(3, 169, 244, 0.3);
margin-top: 10px;
margin-bottom: 40px;
}
.bubble-header {
font-size: 14px;
font-weight: bold;
margin-bottom: 5px;
display: flex;
align-items: center;
}
.bubble-header span {
margin-left: 5px;
}
.icon-col {
display: flex;
flex-direction: column;
gap: 5px;
align-items: center;
}
</style>
""", unsafe_allow_html=True)
# Sidebar: Conversations
st.sidebar.title("Conversations")
titles = list(st.session_state.chat_sessions.keys())
if titles:
for topic in titles:
col1, col2 = st.sidebar.columns([0.8, 0.2])
if col1.button(f"🎨 {topic}", key=f"select_{topic}"):
st.session_state.current_conversation = topic
st.rerun()
if col2.button("πŸ—‘", key=f"delete_{topic}"):
del st.session_state.chat_sessions[topic]
if st.session_state.current_conversation == topic:
st.session_state.current_conversation = None
st.rerun()
else:
st.sidebar.write("No conversations yet. Start one below!")
# New Conversation
with st.sidebar.form(key='new_conversation_form', clear_on_submit=True):
new_topic = st.text_input("New Conversation Name")
submit_button = st.form_submit_button("Start New Conversation")
if submit_button:
if new_topic.strip() and new_topic not in st.session_state.chat_sessions:
st.session_state.chat_sessions[new_topic] = []
st.session_state.current_conversation = new_topic
st.sidebar.success(f"Started new conversation: {new_topic}")
st.rerun()
elif not new_topic.strip():
st.sidebar.warning("Please enter a name.")
else:
st.sidebar.warning("Conversation already exists!")
# Main Chat Area
if st.session_state.current_conversation:
convo = st.session_state.chat_sessions[st.session_state.current_conversation]
st.markdown('<div class="chat-wrapper">', unsafe_allow_html=True)
for idx, msg in enumerate(convo):
with st.container():
if msg["role"] == "user":
if st.session_state.edit_mode.get(idx, False):
# Split the message into question and context
question_input, context_input = msg["text"].split("<br><br>")
# Remove the "Question:" and "Context:" parts from the beginning
question_input = question_input.replace("Question: ", "")
context_input = context_input.replace("Context: ", "")
# Provide separate inputs for the question and context
new_question = st.text_input("Edit your question:", value=question_input, key=f"edit_question_{idx}")
new_context = st.text_area("Edit your context:", value=context_input, key=f"edit_context_{idx}")
prompt = create_test_prompt(new_question, new_context, st.session_state.llama_tokenizer)
col1, col2 = st.columns([1, 1])
with col1:
if st.button("βœ… Save", key=f"save_{idx}"):
# Combine question and context without the "Question:" and "Context:" labels
new_combined_input = f"{new_question}<br><br>{new_context}"
msg["text"] = new_combined_input
with st.spinner("Generating response..."):
try:
new_response = get_llama_response(prompt)
except:
new_response = "Failed to retrieve response."
if idx + 1 < len(convo) and convo[idx + 1]["role"] == "assistant":
convo[idx + 1]["text"] = new_response
st.session_state.edit_mode[idx] = False
st.session_state.chat_sessions[st.session_state.current_conversation] = convo
st.rerun()
with col2:
if st.button("❌ Cancel", key=f"cancel_{idx}"):
st.session_state.edit_mode[idx] = False
st.rerun()
else:
col1, col2 = st.columns([0.1, 0.9])
with col1:
if st.button("✏️", key=f"edit_btn_{idx}"):
st.session_state.edit_mode[idx] = True
st.rerun()
with col2:
st.markdown(f'''
<div class="user-bubble">
<div class="bubble-header">πŸ‘€ <span>You</span></div>
{msg["text"]}
</div>
''', unsafe_allow_html=True)
elif msg["role"] == "assistant":
st.markdown(f'''
<div class="assistant-bubble">
<div class="bubble-header">πŸ“˜ <span>AI Assistant</span></div>
{msg["text"]}
</div>
''', unsafe_allow_html=True)
st.markdown('</div>', unsafe_allow_html=True)
# User Prompt
if len(convo) % 2 == 1:
last_user_msg = convo[-1]["text"]
question_input, context_input = last_user_msg.split("<br><br>")
question_input = question_input.replace("Question: ", "")
context_input = context_input.replace("Context: ", "")
prompt = create_test_prompt(question_input, context_input, st.session_state.llama_tokenizer)
with st.spinner("Generating response..."):
try:
assistant_reply = get_llama_response(prompt)
except Exception as e:
assistant_reply = f"⚠️ Failed to generate response"
convo.append({"role": "assistant", "text": assistant_reply})
st.session_state.chat_sessions[st.session_state.current_conversation] = convo
st.rerun()
st.markdown("---")
# Export PDF
if st.button("πŸ“₯ Export Conversation as PDF"):
pdf_bytes = generate_pdf(convo, st.session_state.current_conversation)
if pdf_bytes:
st.download_button("Download PDF", pdf_bytes, file_name="TellMeWhy_Conversation.pdf", mime="application/pdf")
else:
st.error("❌ Failed to generate PDF.")
with st.form(key="submit_form", clear_on_submit=True):
question_input = st.text_input("Enter your question:")
context_input = st.text_area("Enter your context:")
submit_button = st.form_submit_button("Submit")
if submit_button:
if question_input and context_input:
combined_input = f"Question: {question_input}<br><br>Context: {context_input}"
convo.append({"role": "user", "text": combined_input})
st.session_state.chat_sessions[st.session_state.current_conversation] = convo
st.rerun()