import streamlit as st from streamlit.components.v1 import html from langchain_ollama.chat_models import ChatOllama from langchain_ollama.embeddings import OllamaEmbeddings from langchain.schema import HumanMessage, SystemMessage, BaseMessage, AIMessage from langchain_core.prompts import MessagesPlaceholder, ChatPromptTemplate st.set_page_config(layout="wide") st.set_page_config(page_title="Agentic", page_icon=":robot_face:") st.header(":red[CineGuide] the AI Agent\n  ", divider='rainbow') @st.cache_resource def get_chat_model(): return ChatOllama(model="deepseek-r1:8b") @st.cache_resource def get_embedding_model(): return OllamaEmbeddings(model='nomic-embed-text:latest') if "chat_model" not in st.session_state: st.session_state.chat_model = get_chat_model() if "embedding_model" not in st.session_state: st.session_state.embedding_model = get_embedding_model() if "messages" not in st.session_state: st.session_state.messages = [] system_message = SystemMessage( content=""" You are “CineGuide,” a friendly and intelligent AI assistant specializing in personalized film and TV show recommendations. Before performing any recommendations, searches, or actions: - Check if the user's profile includes: 1. Name (optional but good for personalization) 2. Location (for local theaters and premieres) 3. Preference: Movies, TV shows, or both 4. Favorite genres 5. Preferred time period (classics, recent releases, specific decades) - If any of these are missing, ask in a conversational way, one at a time, without overwhelming the user. - If the user has not yet mentioned whether they prefer movies or TV shows, their favorite genres, or the period they enjoy most, naturally discover this before proceeding. Your mission: - Build and update a dynamic user profile purely from conversation. - Recommend both classic and new releases tailored to their tastes. - Use available tools to: - Search movies and shows with filters (genre, release year, rating, etc.). - Find local theaters or events where these are premiering. - Identify OTT platforms where suggested content is available. - Provide short summaries and available viewing options for each recommendation. - Ask follow-up questions to refine recommendations (e.g., “Do you feel like watching a drama or a comedy tonight?”). Tone & style: - Be conversational and engaging. - Keep recommendations relevant. - Avoid asking for unnecessary personal data unless it clearly improves suggestions. """ ) col1,col2 =st.columns([0.3,0.7]) st.markdown(""" """, unsafe_allow_html=True) with col1: st.markdown("

User Information

", unsafe_allow_html=True) st.text_input('Name',placeholder="") st.text_input('Location',placeholder="") st.multiselect('Preferred Language', options=['English', 'Hindi']) st.multiselect("Preferred Genres", options=['Action', 'Comedy', 'Drama', 'Horror', 'Romance', 'Sci-Fi', 'Thriller', 'Documentary']) st.text_area('Preferred Plots', placeholder="") with st.expander("Liked Movies", expanded=True): pass with st.expander("Liked TV Shows", expanded=True): pass with col2: for msg in st.session_state.messages: if isinstance(msg, HumanMessage): st.chat_message("user").write(msg.content) if isinstance(msg, AIMessage): st.chat_message("assistant").write(msg.content) user_input = st.chat_input("Type your message here...") st.markdown(""" """, unsafe_allow_html=True) scroll_js = """ """ html(scroll_js, height=0, width=0, scrolling=False) if user_input: user_msg = HumanMessage(content=user_input) st.chat_message("user").write(user_input) st.session_state.messages.append(user_msg) prompt = ChatPromptTemplate.from_messages( [system_message, MessagesPlaceholder(variable_name="messages")] ) formatted_prompt = prompt.invoke({"messages": st.session_state.messages}) response = st.session_state.chat_model.stream(formatted_prompt) thinking = False thinking_available = False thinking_data = "" assistant_response_text = "" def stream_saver(response): for chunk in response: global assistant_response_text, thinking, thinking_data, thinking_available text = chunk.content if text == "": if not thinking: thinking = True thinking_available = True if text == "": if thinking: thinking = False continue if not thinking: assistant_response_text += text else: thinking_data += text yield text for chunk in stream_saver(response): if chunk == "": status = st.status("Thinking...", expanded=True) if thinking_available else None thinking_placeholder = status.empty() if thinking_available else None thinking_text_accumulated = "" if thinking: if chunk == "": status.update(label="Thinking...", state="running", expanded=True) else: thinking_text_accumulated += chunk thinking_placeholder.markdown(thinking_text_accumulated) else: if thinking_available: status.update(label="Thinking complete.", state="complete", expanded=False) break if not thinking: assistant_message_container = st.chat_message("assistant") assistant_message_container.write_stream(stream_saver(response)) st.session_state.messages.append(AIMessage(content=assistant_response_text))