andrewqian123 commited on
Commit
b891c8c
·
verified ·
1 Parent(s): a9aab54

Update modeling_minicpmv.py

Browse files
Files changed (1) hide show
  1. modeling_minicpmv.py +4 -2
modeling_minicpmv.py CHANGED
@@ -267,7 +267,7 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
267
  ) = self.get_vllm_embedding(model_inputs)
268
  batch.append(input_embeds)
269
 
270
-
271
  # output_ids = self._decode(input_embeds, tokenizer, **kwargs)
272
  if stream:
273
  kwargs.pop("decode_text")
@@ -333,7 +333,9 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
333
  batchM.append(copy_msgs)
334
  batchI.append(imagelist)
335
  prompt = processor.tokenizer.apply_chat_template(batchM, tokenize=False, add_generation_prompt=True)
336
- inputs = processor(prompt, batchI, return_tensors="pt", max_length=max_inp_length).to(self.device)
 
 
337
 
338
  if sampling:
339
  generation_config = {
 
267
  ) = self.get_vllm_embedding(model_inputs)
268
  batch.append(input_embeds)
269
 
270
+ batch = torch.stack(batch)
271
  # output_ids = self._decode(input_embeds, tokenizer, **kwargs)
272
  if stream:
273
  kwargs.pop("decode_text")
 
333
  batchM.append(copy_msgs)
334
  batchI.append(imagelist)
335
  prompt = processor.tokenizer.apply_chat_template(batchM, tokenize=False, add_generation_prompt=True)
336
+ inputs = processor(prompt, batchI, return_tensors="pt", max_length=max_inp_length)
337
+ for input in inputs:
338
+ input = input.to(sef.device)
339
 
340
  if sampling:
341
  generation_config = {