File size: 6,904 Bytes
72d957a
e05f15c
72d957a
30d6a18
 
 
 
1de2db2
e05f15c
 
30d6a18
e05f15c
72d957a
30d6a18
 
9dc9e28
72d957a
30d6a18
 
 
f67351d
30d6a18
 
 
 
 
 
f67351d
30d6a18
e05f15c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30d6a18
f67351d
ae4a110
e05f15c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0aba0b9
e05f15c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ae4a110
e05f15c
 
 
30d6a18
e05f15c
ae4a110
e05f15c
ae4a110
e05f15c
 
 
 
 
 
 
 
 
ae4a110
e05f15c
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import streamlit as st
from streamlit.components.v1 import html

from langchain_ollama.chat_models import ChatOllama
from langchain_ollama.embeddings import OllamaEmbeddings
from langchain.schema import HumanMessage, SystemMessage, BaseMessage, AIMessage
from langchain_core.prompts import MessagesPlaceholder, ChatPromptTemplate

st.set_page_config(layout="wide")

st.set_page_config(page_title="Agentic", page_icon=":robot_face:")
st.header(":red[CineGuide] the AI Agent\n  ", divider='rainbow')

@st.cache_resource
def get_chat_model():
    return ChatOllama(model="deepseek-r1:8b")

@st.cache_resource
def get_embedding_model():
    return OllamaEmbeddings(model='nomic-embed-text:latest')

if "chat_model" not in st.session_state:
    st.session_state.chat_model = get_chat_model()
if "embedding_model" not in st.session_state:
    st.session_state.embedding_model = get_embedding_model()
if "messages" not in st.session_state:
    st.session_state.messages = []

system_message = SystemMessage(
    content="""
You are “CineGuide,” a friendly and intelligent AI assistant specializing in personalized film and TV show recommendations.

Before performing any recommendations, searches, or actions:
- Check if the user's profile includes:
    1. Name (optional but good for personalization)
    2. Location (for local theaters and premieres)
    3. Preference: Movies, TV shows, or both
    4. Favorite genres
    5. Preferred time period (classics, recent releases, specific decades)
- If any of these are missing, ask in a conversational way, one at a time, without overwhelming the user.
- If the user has not yet mentioned whether they prefer movies or TV shows, their favorite genres, or the period they enjoy most, naturally discover this before proceeding.

Your mission:
- Build and update a dynamic user profile purely from conversation.
- Recommend both classic and new releases tailored to their tastes.
- Use available tools to:
    - Search movies and shows with filters (genre, release year, rating, etc.).
    - Find local theaters or events where these are premiering.
    - Identify OTT platforms where suggested content is available.
- Provide short summaries and available viewing options for each recommendation.
- Ask follow-up questions to refine recommendations (e.g., “Do you feel like watching a drama or a comedy tonight?”).

Tone & style:
- Be conversational and engaging.
- Keep recommendations relevant.
- Avoid asking for unnecessary personal data unless it clearly improves suggestions.
"""
)


col1,col2 =st.columns([0.3,0.7])

st.markdown("""
                <style>
                .stMainBlockContainer{
                    padding: 4rem;
                }
                .stColumn:nth-child(1) {
                    padding: 2rem;
                    width: 100%;
                    background-color: #06080c;
                }
                .stColumn:nth-child(2)>div {
                    overflow-y: scroll;
                    height: 70vh;
                }
                </style>
            """, unsafe_allow_html=True)

with col1:
    st.markdown("<h4 style='text-align:right'>User Information</h4>", unsafe_allow_html=True)
    st.text_input('Name',placeholder="")
    st.text_input('Location',placeholder="")
    st.multiselect('Preferred Language', options=['English', 'Hindi'])
    st.multiselect("Preferred Genres", options=['Action', 'Comedy', 'Drama', 'Horror', 'Romance', 'Sci-Fi', 'Thriller', 'Documentary'])
    st.text_area('Preferred Plots', placeholder="")
    with st.expander("Liked Movies", expanded=True):
        pass
    with st.expander("Liked TV Shows", expanded=True):
        pass

with col2:
    for msg in st.session_state.messages:
        if isinstance(msg, HumanMessage):
            st.chat_message("user").write(msg.content)
        
        if isinstance(msg, AIMessage):
            st.chat_message("assistant").write(msg.content)

    user_input = st.chat_input("Type your message here...")
    st.markdown("""
    <style>
    .stChatInput {
        position: fixed;
        bottom: 4rem;
        left: 32%;
        width: 65%;
        z-index: 999;
    }
    </style>
    """, unsafe_allow_html=True)

    
    scroll_js = """
    <script>
    function scrollToBottom(){
        const el = window.parent.document.querySelector(".stColumn:nth-child(2) > div");
        if (el) {
            el.scrollTop = el.scrollHeight;
        }
    }
    setInterval(scrollToBottom, 500);
    </script>
    """

    html(scroll_js, height=0, width=0, scrolling=False)

    if user_input:
        user_msg = HumanMessage(content=user_input)
        st.chat_message("user").write(user_input)
        st.session_state.messages.append(user_msg)

        prompt = ChatPromptTemplate.from_messages(
            [system_message, MessagesPlaceholder(variable_name="messages")]
        )
        formatted_prompt = prompt.invoke({"messages": st.session_state.messages})

        response = st.session_state.chat_model.stream(formatted_prompt)

        thinking = False
        thinking_available = False
        thinking_data = ""

        assistant_response_text = ""
        def stream_saver(response):
            for chunk in response:
                global assistant_response_text, thinking, thinking_data, thinking_available
                text = chunk.content

                if text == "<think>":
                    if not thinking:
                        thinking = True
                        thinking_available = True
                if text == "</think>":
                    if thinking:
                        thinking = False
                        continue
                
                if not thinking:
                    assistant_response_text += text
                else:
                    thinking_data += text

                yield text

        for chunk in stream_saver(response):
            if chunk == "<think>":
                status = st.status("Thinking...", expanded=True) if thinking_available else None
                thinking_placeholder = status.empty() if thinking_available else None
                thinking_text_accumulated = ""
            if thinking:
                if chunk == "<think>":
                    status.update(label="Thinking...", state="running", expanded=True)
                else:
                    thinking_text_accumulated += chunk
                    thinking_placeholder.markdown(thinking_text_accumulated)
            else:
                if thinking_available:
                    status.update(label="Thinking complete.", state="complete", expanded=False)
                break
        if not thinking:
            assistant_message_container = st.chat_message("assistant")
            assistant_message_container.write_stream(stream_saver(response))

        st.session_state.messages.append(AIMessage(content=assistant_response_text))