Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,39 +2,28 @@ import gradio as gr
|
|
| 2 |
import google.generativeai as genai
|
| 3 |
from pathlib import Path
|
| 4 |
import tempfile
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
def summarize_video(video_path):
|
| 9 |
if video_path is None:
|
| 10 |
return "Please upload a video file."
|
| 11 |
|
| 12 |
try:
|
| 13 |
-
#
|
| 14 |
-
with tempfile.NamedTemporaryFile(suffix=Path(video_path.name).suffix, delete=False) as tmp_file:
|
| 15 |
-
tmp_file.write(video_path.read())
|
| 16 |
-
video_file_path = tmp_file.name
|
| 17 |
-
|
| 18 |
# Create the prompt
|
| 19 |
prompt = "Summarize this video"
|
| 20 |
|
| 21 |
# Set up the model
|
| 22 |
-
model = genai.GenerativeModel(model_name="models/gemini-1.5-pro", api_key=os.environ['GOOGLE_API_KEY'])
|
| 23 |
|
| 24 |
# Make the LLM request
|
| 25 |
print("Making LLM inference request...")
|
| 26 |
-
response = model.generate_content([prompt,
|
| 27 |
request_options={"timeout": 2000})
|
| 28 |
|
| 29 |
return response.text
|
| 30 |
|
| 31 |
except Exception as e:
|
| 32 |
return f"An error occurred: {str(e)}"
|
| 33 |
-
|
| 34 |
-
finally:
|
| 35 |
-
# Clean up temporary file
|
| 36 |
-
if 'video_file_path' in locals():
|
| 37 |
-
Path(video_file_path).unlink(missing_ok=True)
|
| 38 |
|
| 39 |
# Create Gradio interface
|
| 40 |
iface = gr.Interface(
|
|
|
|
| 2 |
import google.generativeai as genai
|
| 3 |
from pathlib import Path
|
| 4 |
import tempfile
|
| 5 |
+
import os
|
|
|
|
|
|
|
| 6 |
def summarize_video(video_path):
|
| 7 |
if video_path is None:
|
| 8 |
return "Please upload a video file."
|
| 9 |
|
| 10 |
try:
|
| 11 |
+
# Since Gradio passes the path as a string, we can use it directly
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
# Create the prompt
|
| 13 |
prompt = "Summarize this video"
|
| 14 |
|
| 15 |
# Set up the model
|
| 16 |
+
model = genai.GenerativeModel(model_name="models/gemini-1.5-pro", api_key = os.environ['GOOGLE_API_KEY'])
|
| 17 |
|
| 18 |
# Make the LLM request
|
| 19 |
print("Making LLM inference request...")
|
| 20 |
+
response = model.generate_content([prompt, video_path],
|
| 21 |
request_options={"timeout": 2000})
|
| 22 |
|
| 23 |
return response.text
|
| 24 |
|
| 25 |
except Exception as e:
|
| 26 |
return f"An error occurred: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
# Create Gradio interface
|
| 29 |
iface = gr.Interface(
|