stevafernandes commited on
Commit
928442a
·
verified ·
1 Parent(s): 0fddf97

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +183 -0
app.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import google.generativeai as genai
3
+ import os
4
+ import tempfile
5
+ import time
6
+ import mimetypes
7
+ from pathlib import Path
8
+
9
+ # Hard‑coded API key (make sure this is okay with your security setup)
10
+ GEMINI_API_KEY = "AIzaSyDCMPwXHagWqYTQB3HL7FceHEmKUv3v4wc"
11
+ genai.configure(api_key=GEMINI_API_KEY)
12
+
13
+ # Page config
14
+ st.set_page_config(
15
+ page_title="Video RAG with Gemini",
16
+ page_icon="🎬",
17
+ layout="wide"
18
+ )
19
+
20
+ # ===========================
21
+ # Video Processing Class
22
+ # ===========================
23
+ class VideoProcessor:
24
+ def __init__(self):
25
+ # Using the flash model for video context
26
+ self.model = genai.GenerativeModel("gemini-2.0-flash")
27
+
28
+ def upload_video(self, video_path: str, display_name: str = None):
29
+ try:
30
+ return genai.upload_file(path=video_path, display_name=display_name or "uploaded_video")
31
+ except Exception as e:
32
+ st.error(f"Video upload error: {e}")
33
+ return None
34
+
35
+ def wait_for_file_processing(self, video_file):
36
+ try:
37
+ while video_file.state.name == "PROCESSING":
38
+ time.sleep(2)
39
+ video_file = genai.get_file(video_file.name)
40
+ if video_file.state.name == "FAILED":
41
+ raise ValueError("Video failed to process")
42
+ return video_file
43
+ except Exception as e:
44
+ st.error(f"Processing error: {e}")
45
+ return None
46
+
47
+ def chat_with_video(self, video_file, prompt: str):
48
+ try:
49
+ resp = self.model.generate_content([video_file, prompt])
50
+ return resp.text
51
+ except Exception as e:
52
+ st.error(f"Generation error: {e}")
53
+ return None
54
+
55
+ # Helper functions
56
+ def is_video_file(file) -> bool:
57
+ return mimetypes.guess_type(file.name)[0].startswith("video/") if file else False
58
+
59
+ def get_size_mb(file) -> float:
60
+ return len(file.getvalue()) / (1024**2)
61
+
62
+ def reset_chat():
63
+ st.session_state.messages = []
64
+ if st.session_state.get("video_file"):
65
+ try:
66
+ genai.delete_file(st.session_state.video_file.name)
67
+ except:
68
+ pass
69
+ del st.session_state["video_file"]
70
+ for key in ["proc", "video_name"]:
71
+ if key in st.session_state:
72
+ del st.session_state[key]
73
+
74
+ def display_video(bytes_, name):
75
+ st.markdown(f"### 🎬 {name}")
76
+ st.video(bytes_)
77
+
78
+ # Initialize session state
79
+ if "messages" not in st.session_state:
80
+ st.session_state.messages = []
81
+ if "video_file" not in st.session_state:
82
+ st.session_state.video_file = None
83
+ if "proc" not in st.session_state:
84
+ st.session_state.proc = None
85
+ if "video_name" not in st.session_state:
86
+ st.session_state.video_name = None
87
+
88
+ # Sidebar for upload and controls
89
+ with st.sidebar:
90
+ st.header("📹 Upload a Video")
91
+ if st.session_state.proc is None:
92
+ st.session_state.proc = VideoProcessor()
93
+
94
+ upload = st.file_uploader("Video file", type=['mp4','mov','avi','mkv','webm'])
95
+ if upload:
96
+ if not is_video_file(upload):
97
+ st.error("Invalid video type.")
98
+ else:
99
+ size = get_size_mb(upload)
100
+ st.info(f"File size: {size:.2f} MB")
101
+ if size > 100:
102
+ st.warning("Large files may fail or take longer to process.")
103
+
104
+ if (st.session_state.video_file is None
105
+ or st.session_state.video_name != upload.name):
106
+ with st.spinner("Uploading & processing…"):
107
+ tmp = tempfile.NamedTemporaryFile(delete=False, suffix=Path(upload.name).suffix)
108
+ tmp.write(upload.getvalue())
109
+ tmp.flush()
110
+ tmp.close()
111
+ vf = st.session_state.proc.upload_video(tmp.name, upload.name)
112
+ if vf:
113
+ pf = st.session_state.proc.wait_for_file_processing(vf)
114
+ if pf:
115
+ st.session_state.video_file = pf
116
+ st.session_state.video_name = upload.name
117
+ st.session_state.messages = []
118
+ st.success("✅ Video ready!")
119
+ os.unlink(tmp.name)
120
+
121
+ if st.session_state.video_file:
122
+ display_video(upload.getvalue(), upload.name)
123
+
124
+ st.markdown("---")
125
+ col1, col2 = st.columns(2)
126
+ with col1:
127
+ if st.button("🗑️ Clear Chat"):
128
+ st.session_state.messages = []
129
+ st.rerun()
130
+ with col2:
131
+ if st.button("🔄 Reset All"):
132
+ reset_chat()
133
+ st.rerun()
134
+
135
+ # Main interface
136
+ st.title("🎬 Video RAG with Gemini")
137
+ st.markdown("Upload video & chat with its contents!")
138
+
139
+ if st.session_state.video_file is None:
140
+ st.info("👈 Upload a video to start chatting.")
141
+ else:
142
+ st.success(f"✅ Chatting on: **{st.session_state.video_name}**")
143
+ for msg in st.session_state.messages:
144
+ with st.chat_message(msg["role"]):
145
+ st.markdown(msg["content"])
146
+
147
+ if not st.session_state.messages:
148
+ st.markdown("### 💡 Examples:")
149
+ for ex in [
150
+ "What is happening in this video?",
151
+ "Summarize the main events",
152
+ "Who is present?"
153
+ ]:
154
+ if st.button(ex, key=ex):
155
+ st.session_state.messages.append({"role": "user", "content": ex})
156
+ st.rerun()
157
+
158
+ if prompt := st.chat_input("Ask a question..."):
159
+ st.session_state.messages.append({"role": "user", "content": prompt})
160
+ with st.chat_message("user"):
161
+ st.markdown(prompt)
162
+ with st.chat_message("assistant"):
163
+ ph = st.empty()
164
+ with st.spinner("Thinking…"):
165
+ resp = st.session_state.proc.chat_with_video(
166
+ st.session_state.video_file, prompt
167
+ )
168
+ if resp:
169
+ full = ""
170
+ for word in resp.split():
171
+ full += word + " "
172
+ ph.markdown(full + "▌")
173
+ time.sleep(0.02)
174
+ ph.markdown(resp)
175
+ st.session_state.messages.append({"role":"assistant","content":resp})
176
+ else:
177
+ st.error("No response — please try again.")
178
+
179
+ # Footer
180
+ st.markdown("""---
181
+ <p style='text-align:center; color:#666'>
182
+ Built with ❤️ using Gemini Video API
183
+ </p>""", unsafe_allow_html=True)