Hug0endob commited on
Commit
4d0af4e
·
verified ·
1 Parent(s): 751a25b

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +9 -33
streamlit_app.py CHANGED
@@ -28,8 +28,9 @@ DATA_DIR.mkdir(exist_ok=True)
28
 
29
  DEFAULT_MODEL = "gemini-2.5-flash-lite"
30
  DEFAULT_PROMPT = (
31
- "Analyze the video and provide an overview of the general actions and interactions observed. "
32
- "Describe the emotional tone and patterns of movement without referencing specific or explicit content."
 
33
  )
34
 
35
  MODEL_OPTIONS = [
@@ -192,31 +193,12 @@ def generate_report(video_path: Path, prompt: str, model_id: str, timeout: int =
192
  video_part = {"inline_data": {"mime_type": "video/mp4", "data": b64}}
193
  model = genai.GenerativeModel(model_name=model_id)
194
 
195
- try:
196
- resp = model.generate_content(
197
- [prompt, video_part],
198
- generation_config={"max_output_tokens": 1024},
199
- request_options={"timeout": timeout},
200
- )
201
-
202
- # Check if the response contains valid candidates
203
- if not resp.candidates:
204
- feedback = getattr(resp, 'prompt_feedback', None)
205
- if feedback:
206
- st.warning(f"Feedback on the prompt: {feedback.block_reason}")
207
- return "Analysis could not be generated due to content restrictions."
208
-
209
- return "No valid analysis could be generated."
210
- output_text = getattr(resp, "text", str(resp))
211
-
212
- except Exception as e:
213
- st.error(f"An error occurred during report generation: {e}")
214
- return "Failed to generate report."
215
-
216
- # Define keywords to filter out
217
- keywords = ["explicit", "graphic", "violence", "nudity"] # Add any other terms if necessary
218
- filtered_text = filter_output(output_text, keywords)
219
- return filtered_text
220
 
221
  def _strip_prompt_echo(prompt: str, text: str, threshold: float = 0.68) -> str:
222
  if not prompt or not text:
@@ -228,12 +210,6 @@ def _strip_prompt_echo(prompt: str, text: str, threshold: float = 0.68) -> str:
228
  return text[cut:].lstrip(" \n:-")
229
  return text
230
 
231
- def filter_output(text: str, keywords: list) -> str:
232
- for keyword in keywords:
233
- if keyword.lower() in text.lower():
234
- text = text.replace(keyword, "[REDACTED]")
235
- return text
236
-
237
  # ----------------------------------------------------------------------
238
  # UI helpers
239
  # ----------------------------------------------------------------------
 
28
 
29
  DEFAULT_MODEL = "gemini-2.5-flash-lite"
30
  DEFAULT_PROMPT = (
31
+ "Watch the video and provide a detailed behavioral report focusing on human actions, "
32
+ "interactions, posture, movement, and apparent intent. Keep language professional. "
33
+ "Include a list of observations for notable events."
34
  )
35
 
36
  MODEL_OPTIONS = [
 
193
  video_part = {"inline_data": {"mime_type": "video/mp4", "data": b64}}
194
  model = genai.GenerativeModel(model_name=model_id)
195
 
196
+ resp = model.generate_content(
197
+ [prompt, video_part],
198
+ generation_config={"max_output_tokens": 1024},
199
+ request_options={"timeout": timeout},
200
+ )
201
+ return getattr(resp, "text", str(resp))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
  def _strip_prompt_echo(prompt: str, text: str, threshold: float = 0.68) -> str:
204
  if not prompt or not text:
 
210
  return text[cut:].lstrip(" \n:-")
211
  return text
212
 
 
 
 
 
 
 
213
  # ----------------------------------------------------------------------
214
  # UI helpers
215
  # ----------------------------------------------------------------------