Hug0endob commited on
Commit
751a25b
·
verified ·
1 Parent(s): 3f8bbcc

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +25 -17
streamlit_app.py CHANGED
@@ -26,11 +26,10 @@ import snscrape.modules.twitter as sntwitter
26
  DATA_DIR = Path("./data")
27
  DATA_DIR.mkdir(exist_ok=True)
28
 
29
- DEFAULT_MODEL = "gemini-2.0-flash-lite"
30
  DEFAULT_PROMPT = (
31
- "Analyze the video and summarize the overall themes and patterns observed, "
32
- "focusing on abstract representations of human actions without explicit details. "
33
- "Describe the emotional context, dynamics of movement, and interactions using metaphorical language."
34
  )
35
 
36
  MODEL_OPTIONS = [
@@ -193,22 +192,32 @@ def generate_report(video_path: Path, prompt: str, model_id: str, timeout: int =
193
  video_part = {"inline_data": {"mime_type": "video/mp4", "data": b64}}
194
  model = genai.GenerativeModel(model_name=model_id)
195
 
196
- resp = model.generate_content(
197
- [prompt, video_part],
198
- generation_config={"max_output_tokens": 1024},
199
- request_options={"timeout": timeout},
200
- )
 
201
 
202
- # Applying the filter right after generating the response
203
- output_text = getattr(resp, "text", str(resp))
204
-
205
- # Define keywords that you want to filter out
 
 
 
 
 
 
 
 
 
 
 
206
  keywords = ["explicit", "graphic", "violence", "nudity"] # Add any other terms if necessary
207
-
208
- filtered_text = filter_output(output_text, keywords) # Use the filtering function here
209
  return filtered_text
210
 
211
-
212
  def _strip_prompt_echo(prompt: str, text: str, threshold: float = 0.68) -> str:
213
  if not prompt or not text:
214
  return text
@@ -404,7 +413,6 @@ def main() -> None:
404
  with st.expander("Show error details"):
405
  st.code(st.session_state["last_error_detail"], language="text")
406
 
407
-
408
  # ----------------------------------------------------------------------
409
  # Entry point
410
  # ----------------------------------------------------------------------
 
26
  DATA_DIR = Path("./data")
27
  DATA_DIR.mkdir(exist_ok=True)
28
 
29
+ DEFAULT_MODEL = "gemini-2.5-flash-lite"
30
  DEFAULT_PROMPT = (
31
+ "Analyze the video and provide an overview of the general actions and interactions observed. "
32
+ "Describe the emotional tone and patterns of movement without referencing specific or explicit content."
 
33
  )
34
 
35
  MODEL_OPTIONS = [
 
192
  video_part = {"inline_data": {"mime_type": "video/mp4", "data": b64}}
193
  model = genai.GenerativeModel(model_name=model_id)
194
 
195
+ try:
196
+ resp = model.generate_content(
197
+ [prompt, video_part],
198
+ generation_config={"max_output_tokens": 1024},
199
+ request_options={"timeout": timeout},
200
+ )
201
 
202
+ # Check if the response contains valid candidates
203
+ if not resp.candidates:
204
+ feedback = getattr(resp, 'prompt_feedback', None)
205
+ if feedback:
206
+ st.warning(f"Feedback on the prompt: {feedback.block_reason}")
207
+ return "Analysis could not be generated due to content restrictions."
208
+
209
+ return "No valid analysis could be generated."
210
+ output_text = getattr(resp, "text", str(resp))
211
+
212
+ except Exception as e:
213
+ st.error(f"An error occurred during report generation: {e}")
214
+ return "Failed to generate report."
215
+
216
+ # Define keywords to filter out
217
  keywords = ["explicit", "graphic", "violence", "nudity"] # Add any other terms if necessary
218
+ filtered_text = filter_output(output_text, keywords)
 
219
  return filtered_text
220
 
 
221
  def _strip_prompt_echo(prompt: str, text: str, threshold: float = 0.68) -> str:
222
  if not prompt or not text:
223
  return text
 
413
  with st.expander("Show error details"):
414
  st.code(st.session_state["last_error_detail"], language="text")
415
 
 
416
  # ----------------------------------------------------------------------
417
  # Entry point
418
  # ----------------------------------------------------------------------