Agentic / src /app.py
sahilmayekar's picture
User support and Thinking UI added
e05f15c
import streamlit as st
from streamlit.components.v1 import html
from langchain_ollama.chat_models import ChatOllama
from langchain_ollama.embeddings import OllamaEmbeddings
from langchain.schema import HumanMessage, SystemMessage, BaseMessage, AIMessage
from langchain_core.prompts import MessagesPlaceholder, ChatPromptTemplate
st.set_page_config(layout="wide")
st.set_page_config(page_title="Agentic", page_icon=":robot_face:")
st.header(":red[CineGuide] the AI Agent\n  ", divider='rainbow')
@st.cache_resource
def get_chat_model():
return ChatOllama(model="deepseek-r1:8b")
@st.cache_resource
def get_embedding_model():
return OllamaEmbeddings(model='nomic-embed-text:latest')
if "chat_model" not in st.session_state:
st.session_state.chat_model = get_chat_model()
if "embedding_model" not in st.session_state:
st.session_state.embedding_model = get_embedding_model()
if "messages" not in st.session_state:
st.session_state.messages = []
system_message = SystemMessage(
content="""
You are “CineGuide,” a friendly and intelligent AI assistant specializing in personalized film and TV show recommendations.
Before performing any recommendations, searches, or actions:
- Check if the user's profile includes:
1. Name (optional but good for personalization)
2. Location (for local theaters and premieres)
3. Preference: Movies, TV shows, or both
4. Favorite genres
5. Preferred time period (classics, recent releases, specific decades)
- If any of these are missing, ask in a conversational way, one at a time, without overwhelming the user.
- If the user has not yet mentioned whether they prefer movies or TV shows, their favorite genres, or the period they enjoy most, naturally discover this before proceeding.
Your mission:
- Build and update a dynamic user profile purely from conversation.
- Recommend both classic and new releases tailored to their tastes.
- Use available tools to:
- Search movies and shows with filters (genre, release year, rating, etc.).
- Find local theaters or events where these are premiering.
- Identify OTT platforms where suggested content is available.
- Provide short summaries and available viewing options for each recommendation.
- Ask follow-up questions to refine recommendations (e.g., “Do you feel like watching a drama or a comedy tonight?”).
Tone & style:
- Be conversational and engaging.
- Keep recommendations relevant.
- Avoid asking for unnecessary personal data unless it clearly improves suggestions.
"""
)
col1,col2 =st.columns([0.3,0.7])
st.markdown("""
<style>
.stMainBlockContainer{
padding: 4rem;
}
.stColumn:nth-child(1) {
padding: 2rem;
width: 100%;
background-color: #06080c;
}
.stColumn:nth-child(2)>div {
overflow-y: scroll;
height: 70vh;
}
</style>
""", unsafe_allow_html=True)
with col1:
st.markdown("<h4 style='text-align:right'>User Information</h4>", unsafe_allow_html=True)
st.text_input('Name',placeholder="")
st.text_input('Location',placeholder="")
st.multiselect('Preferred Language', options=['English', 'Hindi'])
st.multiselect("Preferred Genres", options=['Action', 'Comedy', 'Drama', 'Horror', 'Romance', 'Sci-Fi', 'Thriller', 'Documentary'])
st.text_area('Preferred Plots', placeholder="")
with st.expander("Liked Movies", expanded=True):
pass
with st.expander("Liked TV Shows", expanded=True):
pass
with col2:
for msg in st.session_state.messages:
if isinstance(msg, HumanMessage):
st.chat_message("user").write(msg.content)
if isinstance(msg, AIMessage):
st.chat_message("assistant").write(msg.content)
user_input = st.chat_input("Type your message here...")
st.markdown("""
<style>
.stChatInput {
position: fixed;
bottom: 4rem;
left: 32%;
width: 65%;
z-index: 999;
}
</style>
""", unsafe_allow_html=True)
scroll_js = """
<script>
function scrollToBottom(){
const el = window.parent.document.querySelector(".stColumn:nth-child(2) > div");
if (el) {
el.scrollTop = el.scrollHeight;
}
}
setInterval(scrollToBottom, 500);
</script>
"""
html(scroll_js, height=0, width=0, scrolling=False)
if user_input:
user_msg = HumanMessage(content=user_input)
st.chat_message("user").write(user_input)
st.session_state.messages.append(user_msg)
prompt = ChatPromptTemplate.from_messages(
[system_message, MessagesPlaceholder(variable_name="messages")]
)
formatted_prompt = prompt.invoke({"messages": st.session_state.messages})
response = st.session_state.chat_model.stream(formatted_prompt)
thinking = False
thinking_available = False
thinking_data = ""
assistant_response_text = ""
def stream_saver(response):
for chunk in response:
global assistant_response_text, thinking, thinking_data, thinking_available
text = chunk.content
if text == "<think>":
if not thinking:
thinking = True
thinking_available = True
if text == "</think>":
if thinking:
thinking = False
continue
if not thinking:
assistant_response_text += text
else:
thinking_data += text
yield text
for chunk in stream_saver(response):
if chunk == "<think>":
status = st.status("Thinking...", expanded=True) if thinking_available else None
thinking_placeholder = status.empty() if thinking_available else None
thinking_text_accumulated = ""
if thinking:
if chunk == "<think>":
status.update(label="Thinking...", state="running", expanded=True)
else:
thinking_text_accumulated += chunk
thinking_placeholder.markdown(thinking_text_accumulated)
else:
if thinking_available:
status.update(label="Thinking complete.", state="complete", expanded=False)
break
if not thinking:
assistant_message_container = st.chat_message("assistant")
assistant_message_container.write_stream(stream_saver(response))
st.session_state.messages.append(AIMessage(content=assistant_response_text))