Update README.md
Browse files
README.md
CHANGED
|
@@ -40,13 +40,6 @@ pip install qwen-vl-utils livecc-utils
|
|
| 40 |
Here we show a code snippet to show you how to do **real-time video commentary** with `transformers` and the above utils:
|
| 41 |
|
| 42 |
```python
|
| 43 |
-
import functools, torch, os, tqdm
|
| 44 |
-
from liger_kernel.transformers import apply_liger_kernel_to_qwen2_vl
|
| 45 |
-
apply_liger_kernel_to_qwen2_vl() # important. our model is trained with this. keep consistency
|
| 46 |
-
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, LogitsProcessor, logging
|
| 47 |
-
from livecc_utils import prepare_multiturn_multimodal_inputs_for_generation, get_smart_resized_clip, get_smart_resized_video_reader
|
| 48 |
-
from qwen_vl_utils import process_vision_info
|
| 49 |
-
|
| 50 |
class LiveCCDemoInfer:
|
| 51 |
fps = 2
|
| 52 |
initial_fps_frames = 6
|
|
@@ -61,7 +54,6 @@ class LiveCCDemoInfer:
|
|
| 61 |
attn_implementation='flash_attention_2'
|
| 62 |
)
|
| 63 |
self.processor = AutoProcessor.from_pretrained(model_path, use_fast=False)
|
| 64 |
-
self.streaming_eos_token_id = self.processor.tokenizer(' ...').input_ids[-1]
|
| 65 |
self.model.prepare_inputs_for_generation = functools.partial(prepare_multiturn_multimodal_inputs_for_generation, self.model)
|
| 66 |
message = {
|
| 67 |
"role": "user",
|
|
@@ -73,7 +65,7 @@ class LiveCCDemoInfer:
|
|
| 73 |
self.system_prompt_offset = texts.index('<|im_start|>user')
|
| 74 |
self._cached_video_readers_with_hw = {}
|
| 75 |
|
| 76 |
-
|
| 77 |
def live_cc(
|
| 78 |
self,
|
| 79 |
query: str,
|
|
@@ -82,8 +74,6 @@ class LiveCCDemoInfer:
|
|
| 82 |
default_query: str = 'Please describe the video.',
|
| 83 |
do_sample: bool = False,
|
| 84 |
repetition_penalty: float = 1.05,
|
| 85 |
-
streaming_eos_base_threshold: float = None,
|
| 86 |
-
streaming_eos_threshold_step: float = None,
|
| 87 |
**kwargs,
|
| 88 |
):
|
| 89 |
"""
|
|
@@ -94,6 +84,8 @@ class LiveCCDemoInfer:
|
|
| 94 |
last_video_pts_index: int, last processed video frame index
|
| 95 |
video_pts: np.ndarray, video pts
|
| 96 |
last_history: list, last processed history
|
|
|
|
|
|
|
| 97 |
"""
|
| 98 |
# 1. preparation: video_reader, and last processing info
|
| 99 |
video_timestamp, last_timestamp = state.get('video_timestamp', 0), state.get('last_timestamp', -1 / self.fps)
|
|
@@ -147,7 +139,7 @@ class LiveCCDemoInfer:
|
|
| 147 |
}
|
| 148 |
if not query and not state.get('query', None):
|
| 149 |
query = default_query
|
| 150 |
-
|
| 151 |
if query and state.get('query', None) != query:
|
| 152 |
message['content'].append({"type": "text", "text": query})
|
| 153 |
state['query'] = query
|
|
@@ -165,23 +157,18 @@ class LiveCCDemoInfer:
|
|
| 165 |
inputs.to('cuda')
|
| 166 |
if past_ids is not None:
|
| 167 |
inputs['input_ids'] = torch.cat([past_ids, inputs.input_ids], dim=1)
|
| 168 |
-
if streaming_eos_base_threshold is not None:
|
| 169 |
-
logits_processor = [ThresholdLogitsProcessor(self.streaming_eos_token_id, streaming_eos_base_threshold, streaming_eos_threshold_step)]
|
| 170 |
-
else:
|
| 171 |
-
logits_processor = None
|
| 172 |
outputs = self.model.generate(
|
| 173 |
**inputs, past_key_values=state.get('past_key_values', None),
|
| 174 |
return_dict_in_generate=True, do_sample=do_sample,
|
| 175 |
repetition_penalty=repetition_penalty,
|
| 176 |
-
logits_processor=logits_processor,
|
| 177 |
)
|
| 178 |
state['past_key_values'] = outputs.past_key_values
|
| 179 |
state['past_ids'] = outputs.sequences[:, :-1]
|
| 180 |
yield (start_timestamp, stop_timestamp), self.processor.decode(outputs.sequences[0, inputs.input_ids.size(1):], skip_special_tokens=True), state
|
| 181 |
|
| 182 |
model_path = 'chenjoya/LiveCC-7B-Instruct'
|
| 183 |
-
video_path = "
|
| 184 |
-
query = "
|
| 185 |
|
| 186 |
infer = LiveCCDemoInfer(model_path=model_path)
|
| 187 |
state = {'video_path': video_path}
|
|
@@ -191,7 +178,7 @@ for t in range(31):
|
|
| 191 |
state['video_timestamp'] = t
|
| 192 |
for (start_t, stop_t), response, state in infer.live_cc(
|
| 193 |
query=query, state=state,
|
| 194 |
-
max_pixels =
|
| 195 |
streaming_eos_base_threshold=0.0, streaming_eos_threshold_step=0
|
| 196 |
):
|
| 197 |
print(f'{start_t}s-{stop_t}s: {response}')
|
|
|
|
| 40 |
Here we show a code snippet to show you how to do **real-time video commentary** with `transformers` and the above utils:
|
| 41 |
|
| 42 |
```python
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
class LiveCCDemoInfer:
|
| 44 |
fps = 2
|
| 45 |
initial_fps_frames = 6
|
|
|
|
| 54 |
attn_implementation='flash_attention_2'
|
| 55 |
)
|
| 56 |
self.processor = AutoProcessor.from_pretrained(model_path, use_fast=False)
|
|
|
|
| 57 |
self.model.prepare_inputs_for_generation = functools.partial(prepare_multiturn_multimodal_inputs_for_generation, self.model)
|
| 58 |
message = {
|
| 59 |
"role": "user",
|
|
|
|
| 65 |
self.system_prompt_offset = texts.index('<|im_start|>user')
|
| 66 |
self._cached_video_readers_with_hw = {}
|
| 67 |
|
| 68 |
+
|
| 69 |
def live_cc(
|
| 70 |
self,
|
| 71 |
query: str,
|
|
|
|
| 74 |
default_query: str = 'Please describe the video.',
|
| 75 |
do_sample: bool = False,
|
| 76 |
repetition_penalty: float = 1.05,
|
|
|
|
|
|
|
| 77 |
**kwargs,
|
| 78 |
):
|
| 79 |
"""
|
|
|
|
| 84 |
last_video_pts_index: int, last processed video frame index
|
| 85 |
video_pts: np.ndarray, video pts
|
| 86 |
last_history: list, last processed history
|
| 87 |
+
past_key_values: llm past_key_values
|
| 88 |
+
past_ids: past generated ids
|
| 89 |
"""
|
| 90 |
# 1. preparation: video_reader, and last processing info
|
| 91 |
video_timestamp, last_timestamp = state.get('video_timestamp', 0), state.get('last_timestamp', -1 / self.fps)
|
|
|
|
| 139 |
}
|
| 140 |
if not query and not state.get('query', None):
|
| 141 |
query = default_query
|
| 142 |
+
print(f'No query provided, use default_query={default_query}')
|
| 143 |
if query and state.get('query', None) != query:
|
| 144 |
message['content'].append({"type": "text", "text": query})
|
| 145 |
state['query'] = query
|
|
|
|
| 157 |
inputs.to('cuda')
|
| 158 |
if past_ids is not None:
|
| 159 |
inputs['input_ids'] = torch.cat([past_ids, inputs.input_ids], dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
outputs = self.model.generate(
|
| 161 |
**inputs, past_key_values=state.get('past_key_values', None),
|
| 162 |
return_dict_in_generate=True, do_sample=do_sample,
|
| 163 |
repetition_penalty=repetition_penalty,
|
|
|
|
| 164 |
)
|
| 165 |
state['past_key_values'] = outputs.past_key_values
|
| 166 |
state['past_ids'] = outputs.sequences[:, :-1]
|
| 167 |
yield (start_timestamp, stop_timestamp), self.processor.decode(outputs.sequences[0, inputs.input_ids.size(1):], skip_special_tokens=True), state
|
| 168 |
|
| 169 |
model_path = 'chenjoya/LiveCC-7B-Instruct'
|
| 170 |
+
video_path = "demo/sources/howto_fix_laptop_mute_1080p.mp4"
|
| 171 |
+
query = "Please describe the video."
|
| 172 |
|
| 173 |
infer = LiveCCDemoInfer(model_path=model_path)
|
| 174 |
state = {'video_path': video_path}
|
|
|
|
| 178 |
state['video_timestamp'] = t
|
| 179 |
for (start_t, stop_t), response, state in infer.live_cc(
|
| 180 |
query=query, state=state,
|
| 181 |
+
max_pixels = 384 * 28 * 28, repetition_penalty=1.05,
|
| 182 |
streaming_eos_base_threshold=0.0, streaming_eos_threshold_step=0
|
| 183 |
):
|
| 184 |
print(f'{start_t}s-{stop_t}s: {response}')
|