Spaces:
Runtime error
Runtime error
Miquel Farre
commited on
Commit
·
b7a96e8
1
Parent(s):
312644d
fix
Browse files
app.py
CHANGED
|
@@ -57,7 +57,12 @@ class VideoHighlightDetector:
|
|
| 57 |
|
| 58 |
def analyze_video_content(self, video_path: str) -> str:
|
| 59 |
"""Analyze video content to determine its type and description."""
|
|
|
|
| 60 |
messages = [
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
{
|
| 62 |
"role": "user",
|
| 63 |
"content": [
|
|
@@ -76,7 +81,7 @@ class VideoHighlightDetector:
|
|
| 76 |
).to(self.device)
|
| 77 |
|
| 78 |
outputs = self.model.generate(**inputs, max_new_tokens=512, do_sample=True, temperature=0.7)
|
| 79 |
-
return self.processor.decode(outputs[0], skip_special_tokens=True)
|
| 80 |
|
| 81 |
def determine_highlights(self, video_description: str) -> str:
|
| 82 |
"""Determine what constitutes highlights based on video description."""
|
|
@@ -104,7 +109,7 @@ class VideoHighlightDetector:
|
|
| 104 |
).to(self.device)
|
| 105 |
|
| 106 |
outputs = self.model.generate(**inputs, max_new_tokens=256, do_sample=True, temperature=0.7)
|
| 107 |
-
return self.processor.decode(outputs[0], skip_special_tokens=True)
|
| 108 |
|
| 109 |
def process_segment(self, video_path: str, highlight_types: str) -> bool:
|
| 110 |
"""Process a video segment and determine if it contains highlights."""
|
|
@@ -132,7 +137,7 @@ class VideoHighlightDetector:
|
|
| 132 |
).to(self.device)
|
| 133 |
|
| 134 |
outputs = self.model.generate(**inputs, max_new_tokens=64, do_sample=False)
|
| 135 |
-
response = self.processor.decode(outputs[0], skip_special_tokens=True).lower()
|
| 136 |
|
| 137 |
return "yes" in response
|
| 138 |
|
|
|
|
| 57 |
|
| 58 |
def analyze_video_content(self, video_path: str) -> str:
|
| 59 |
"""Analyze video content to determine its type and description."""
|
| 60 |
+
system_message = "You are a helpful assistant that can understand videos. Describe what type of video this is and what's happening in it."
|
| 61 |
messages = [
|
| 62 |
+
{
|
| 63 |
+
"role": "system",
|
| 64 |
+
"content": [{"type": "text", "text": system_message}]
|
| 65 |
+
},
|
| 66 |
{
|
| 67 |
"role": "user",
|
| 68 |
"content": [
|
|
|
|
| 81 |
).to(self.device)
|
| 82 |
|
| 83 |
outputs = self.model.generate(**inputs, max_new_tokens=512, do_sample=True, temperature=0.7)
|
| 84 |
+
return self.processor.decode(outputs[0], skip_special_tokens=True).lower().split("Assistant: ")[1]
|
| 85 |
|
| 86 |
def determine_highlights(self, video_description: str) -> str:
|
| 87 |
"""Determine what constitutes highlights based on video description."""
|
|
|
|
| 109 |
).to(self.device)
|
| 110 |
|
| 111 |
outputs = self.model.generate(**inputs, max_new_tokens=256, do_sample=True, temperature=0.7)
|
| 112 |
+
return self.processor.decode(outputs[0], skip_special_tokens=True).lower().split("Assistant: ")[1]
|
| 113 |
|
| 114 |
def process_segment(self, video_path: str, highlight_types: str) -> bool:
|
| 115 |
"""Process a video segment and determine if it contains highlights."""
|
|
|
|
| 137 |
).to(self.device)
|
| 138 |
|
| 139 |
outputs = self.model.generate(**inputs, max_new_tokens=64, do_sample=False)
|
| 140 |
+
response = self.processor.decode(outputs[0], skip_special_tokens=True).lower().split("Assistant: ")[1]
|
| 141 |
|
| 142 |
return "yes" in response
|
| 143 |
|