Update modeling_minicpmv.py
Browse files- modeling_minicpmv.py +3 -0
modeling_minicpmv.py
CHANGED
|
@@ -239,6 +239,7 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
|
|
| 239 |
**kwargs
|
| 240 |
):
|
| 241 |
batch = []
|
|
|
|
| 242 |
for model_inputs in model_inputs_batch:
|
| 243 |
bs = len(model_inputs["input_ids"])
|
| 244 |
img_list = model_inputs["pixel_values"]
|
|
@@ -265,6 +266,8 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
|
|
| 265 |
input_embeds,
|
| 266 |
vision_hidden_states,
|
| 267 |
) = self.get_vllm_embedding(model_inputs)
|
|
|
|
|
|
|
| 268 |
batch.append(input_embeds)
|
| 269 |
|
| 270 |
batch = torch.stack(batch)
|
|
|
|
| 239 |
**kwargs
|
| 240 |
):
|
| 241 |
batch = []
|
| 242 |
+
counter = 0
|
| 243 |
for model_inputs in model_inputs_batch:
|
| 244 |
bs = len(model_inputs["input_ids"])
|
| 245 |
img_list = model_inputs["pixel_values"]
|
|
|
|
| 266 |
input_embeds,
|
| 267 |
vision_hidden_states,
|
| 268 |
) = self.get_vllm_embedding(model_inputs)
|
| 269 |
+
print(input_embeds.shape, f"INPUT_EMBEDS {counter}")
|
| 270 |
+
counter += 1
|
| 271 |
batch.append(input_embeds)
|
| 272 |
|
| 273 |
batch = torch.stack(batch)
|