Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,11 +6,10 @@ from decord import cpu, VideoReader, bridge
|
|
| 6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 7 |
from transformers import BitsAndBytesConfig
|
| 8 |
|
| 9 |
-
MODEL_PATH = "THUDM/cogvlm2-
|
| 10 |
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 11 |
TORCH_TYPE = torch.bfloat16 if torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8 else torch.float16
|
| 12 |
|
| 13 |
-
|
| 14 |
def get_step_info(step_number):
|
| 15 |
"""Returns detailed information about a manufacturing step."""
|
| 16 |
step_details = {
|
|
@@ -125,6 +124,87 @@ def get_step_info(step_number):
|
|
| 125 |
|
| 126 |
return step_details.get(step_number, {"Error": "Invalid step number. Please provide a valid step number."})
|
| 127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
def get_analysis_prompt(step_number):
|
| 129 |
"""Constructs the prompt for analyzing delay reasons based on the selected step."""
|
| 130 |
step_info = get_step_info(step_number)
|
|
@@ -172,41 +252,6 @@ Output:
|
|
| 172 |
|
| 173 |
model, tokenizer = load_model()
|
| 174 |
|
| 175 |
-
def predict(prompt, video_data, temperature, model, tokenizer):
|
| 176 |
-
"""Generates predictions based on the video and textual prompt."""
|
| 177 |
-
video = load_video(video_data, strategy='chat')
|
| 178 |
-
|
| 179 |
-
inputs = model.build_conversation_input_ids(
|
| 180 |
-
tokenizer=tokenizer,
|
| 181 |
-
query=prompt,
|
| 182 |
-
images=[video],
|
| 183 |
-
history=[],
|
| 184 |
-
template_version='chat'
|
| 185 |
-
)
|
| 186 |
-
|
| 187 |
-
inputs = {
|
| 188 |
-
'input_ids': inputs['input_ids'].unsqueeze(0).to(DEVICE),
|
| 189 |
-
'token_type_ids': inputs['token_type_ids'].unsqueeze(0).to(DEVICE),
|
| 190 |
-
'attention_mask': inputs['attention_mask'].unsqueeze(0).to(DEVICE),
|
| 191 |
-
'images': [[inputs['images'][0].to(DEVICE).to(TORCH_TYPE)]],
|
| 192 |
-
}
|
| 193 |
-
|
| 194 |
-
gen_kwargs = {
|
| 195 |
-
"max_new_tokens": 2048,
|
| 196 |
-
"pad_token_id": 128002,
|
| 197 |
-
"top_k": 1,
|
| 198 |
-
"do_sample": False,
|
| 199 |
-
"top_p": 0.1,
|
| 200 |
-
"temperature": temperature,
|
| 201 |
-
}
|
| 202 |
-
|
| 203 |
-
with torch.no_grad():
|
| 204 |
-
outputs = model.generate(**inputs, **gen_kwargs)
|
| 205 |
-
outputs = outputs[:, inputs['input_ids'].shape[1]:]
|
| 206 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 207 |
-
|
| 208 |
-
return response
|
| 209 |
-
|
| 210 |
def inference(video, step_number):
|
| 211 |
"""Analyzes video to predict possible issues based on the manufacturing step."""
|
| 212 |
try:
|
|
|
|
| 6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 7 |
from transformers import BitsAndBytesConfig
|
| 8 |
|
| 9 |
+
MODEL_PATH = "THUDM/cogvlm2-llama3-caption"
|
| 10 |
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 11 |
TORCH_TYPE = torch.bfloat16 if torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8 else torch.float16
|
| 12 |
|
|
|
|
| 13 |
def get_step_info(step_number):
|
| 14 |
"""Returns detailed information about a manufacturing step."""
|
| 15 |
step_details = {
|
|
|
|
| 124 |
|
| 125 |
return step_details.get(step_number, {"Error": "Invalid step number. Please provide a valid step number."})
|
| 126 |
|
| 127 |
+
def load_video(video_data, strategy='chat'):
|
| 128 |
+
"""Loads and processes video data into a format suitable for model input."""
|
| 129 |
+
bridge.set_bridge('torch')
|
| 130 |
+
num_frames = 24
|
| 131 |
+
|
| 132 |
+
if isinstance(video_data, str):
|
| 133 |
+
decord_vr = VideoReader(video_data, ctx=cpu(0))
|
| 134 |
+
else:
|
| 135 |
+
decord_vr = VideoReader(io.BytesIO(video_data), ctx=cpu(0))
|
| 136 |
+
|
| 137 |
+
frame_id_list = []
|
| 138 |
+
total_frames = len(decord_vr)
|
| 139 |
+
timestamps = [i[0] for i in decord_vr.get_frame_timestamp(np.arange(total_frames))]
|
| 140 |
+
max_second = round(max(timestamps)) + 1
|
| 141 |
+
|
| 142 |
+
for second in range(max_second):
|
| 143 |
+
closest_num = min(timestamps, key=lambda x: abs(x - second))
|
| 144 |
+
index = timestamps.index(closest_num)
|
| 145 |
+
frame_id_list.append(index)
|
| 146 |
+
if len(frame_id_list) >= num_frames:
|
| 147 |
+
break
|
| 148 |
+
|
| 149 |
+
video_data = decord_vr.get_batch(frame_id_list)
|
| 150 |
+
video_data = video_data.permute(3, 0, 1, 2)
|
| 151 |
+
return video_data
|
| 152 |
+
|
| 153 |
+
def load_model():
|
| 154 |
+
"""Loads the pre-trained model and tokenizer with quantization configurations."""
|
| 155 |
+
quantization_config = BitsAndBytesConfig(
|
| 156 |
+
load_in_4bit=True,
|
| 157 |
+
bnb_4bit_compute_dtype=TORCH_TYPE,
|
| 158 |
+
bnb_4bit_use_double_quant=True,
|
| 159 |
+
bnb_4bit_quant_type="nf4"
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)
|
| 163 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 164 |
+
MODEL_PATH,
|
| 165 |
+
torch_dtype=TORCH_TYPE,
|
| 166 |
+
trust_remote_code=True,
|
| 167 |
+
quantization_config=quantization_config,
|
| 168 |
+
device_map="auto"
|
| 169 |
+
).eval()
|
| 170 |
+
|
| 171 |
+
return model, tokenizer
|
| 172 |
+
|
| 173 |
+
def predict(prompt, video_data, temperature, model, tokenizer):
|
| 174 |
+
"""Generates predictions based on the video and textual prompt."""
|
| 175 |
+
video = load_video(video_data, strategy='chat')
|
| 176 |
+
|
| 177 |
+
inputs = model.build_conversation_input_ids(
|
| 178 |
+
tokenizer=tokenizer,
|
| 179 |
+
query=prompt,
|
| 180 |
+
images=[video],
|
| 181 |
+
history=[],
|
| 182 |
+
template_version='chat'
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
inputs = {
|
| 186 |
+
'input_ids': inputs['input_ids'].unsqueeze(0).to(DEVICE),
|
| 187 |
+
'token_type_ids': inputs['token_type_ids'].unsqueeze(0).to(DEVICE),
|
| 188 |
+
'attention_mask': inputs['attention_mask'].unsqueeze(0).to(DEVICE),
|
| 189 |
+
'images': [[inputs['images'][0].to(DEVICE).to(TORCH_TYPE)]],
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
gen_kwargs = {
|
| 193 |
+
"max_new_tokens": 2048,
|
| 194 |
+
"pad_token_id": 128002,
|
| 195 |
+
"top_k": 1,
|
| 196 |
+
"do_sample": False,
|
| 197 |
+
"top_p": 0.1,
|
| 198 |
+
"temperature": temperature,
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
with torch.no_grad():
|
| 202 |
+
outputs = model.generate(**inputs, **gen_kwargs)
|
| 203 |
+
outputs = outputs[:, inputs['input_ids'].shape[1]:]
|
| 204 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 205 |
+
|
| 206 |
+
return response
|
| 207 |
+
|
| 208 |
def get_analysis_prompt(step_number):
|
| 209 |
"""Constructs the prompt for analyzing delay reasons based on the selected step."""
|
| 210 |
step_info = get_step_info(step_number)
|
|
|
|
| 252 |
|
| 253 |
model, tokenizer = load_model()
|
| 254 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
def inference(video, step_number):
|
| 256 |
"""Analyzes video to predict possible issues based on the manufacturing step."""
|
| 257 |
try:
|