Spaces:
Sleeping
Sleeping
Update streamlit_app.py
Browse files- streamlit_app.py +24 -21
streamlit_app.py
CHANGED
|
@@ -188,39 +188,42 @@ def _encode_video_b64(path: Path) -> str:
|
|
| 188 |
return base64.b64encode(path.read_bytes()).decode()
|
| 189 |
|
| 190 |
|
| 191 |
-
def generate_report(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
b64 = _encode_video_b64(video_path)
|
| 193 |
video_part = {"inline_data": {"mime_type": "video/mp4", "data": b64}}
|
| 194 |
-
model = genai.GenerativeModel(model_name=model_id)
|
| 195 |
|
| 196 |
-
#
|
| 197 |
safety_settings = [
|
| 198 |
-
{
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
},
|
| 202 |
-
{
|
| 203 |
-
"category": "HARM_CATEGORY_HATE_SPEECH",
|
| 204 |
-
"threshold": "BLOCK_NONE"
|
| 205 |
-
},
|
| 206 |
-
{
|
| 207 |
-
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
| 208 |
-
"threshold": "BLOCK_NONE"
|
| 209 |
-
},
|
| 210 |
-
{
|
| 211 |
-
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
| 212 |
-
"threshold": "BLOCK_NONE"
|
| 213 |
-
}
|
| 214 |
]
|
| 215 |
|
| 216 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
resp = model.generate_content(
|
| 218 |
[prompt, video_part],
|
| 219 |
-
generation_config=
|
| 220 |
request_options={"timeout": timeout},
|
| 221 |
)
|
| 222 |
return getattr(resp, "text", str(resp))
|
| 223 |
|
|
|
|
| 224 |
def _strip_prompt_echo(prompt: str, text: str, threshold: float = 0.68) -> str:
|
| 225 |
if not prompt or not text:
|
| 226 |
return text
|
|
|
|
| 188 |
return base64.b64encode(path.read_bytes()).decode()
|
| 189 |
|
| 190 |
|
| 191 |
+
def generate_report(
|
| 192 |
+
video_path: Path,
|
| 193 |
+
prompt: str,
|
| 194 |
+
model_id: str,
|
| 195 |
+
timeout: int = 300,
|
| 196 |
+
) -> str:
|
| 197 |
+
# Encode video as base‑64
|
| 198 |
b64 = _encode_video_b64(video_path)
|
| 199 |
video_part = {"inline_data": {"mime_type": "video/mp4", "data": b64}}
|
|
|
|
| 200 |
|
| 201 |
+
# Define safety settings (these are passed to the model, not the config)
|
| 202 |
safety_settings = [
|
| 203 |
+
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
| 204 |
+
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
| 205 |
+
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
|
| 206 |
+
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
]
|
| 208 |
|
| 209 |
+
# Create the model with safety settings attached
|
| 210 |
+
model = genai.GenerativeModel(
|
| 211 |
+
model_name=model_id,
|
| 212 |
+
safety_settings=safety_settings,
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# Generation‑specific config (no safety_settings here)
|
| 216 |
+
gen_config = {"max_output_tokens": 1024}
|
| 217 |
+
|
| 218 |
+
# Send the request
|
| 219 |
resp = model.generate_content(
|
| 220 |
[prompt, video_part],
|
| 221 |
+
generation_config=gen_config,
|
| 222 |
request_options={"timeout": timeout},
|
| 223 |
)
|
| 224 |
return getattr(resp, "text", str(resp))
|
| 225 |
|
| 226 |
+
|
| 227 |
def _strip_prompt_echo(prompt: str, text: str, threshold: float = 0.68) -> str:
|
| 228 |
if not prompt or not text:
|
| 229 |
return text
|