andrewqian123 commited on
Commit
8cd6f4f
·
verified ·
1 Parent(s): e4bc263

Update modeling_minicpmv.py

Browse files
Files changed (1) hide show
  1. modeling_minicpmv.py +3 -1
modeling_minicpmv.py CHANGED
@@ -300,8 +300,10 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
300
  for pl in range(tensor.shape[1]):
301
  to_add.append(1)
302
  for pl in range(tensor.shape[1], max_x):
 
303
  tensor = torch.cat((tensor, vector_reshaped), dim=1)
304
  to_add.append(0)
 
305
  attention_mask.append(to_add)
306
  attention_mask = torch.tensor(attention_mask)
307
 
@@ -310,7 +312,7 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
310
  # Step 3: Stack the padded tensors into a single batch
311
  for stuff in batch:
312
  print(stuff.shape, "SHAPE")
313
- batch = torch.cat(padded_tensors, dim=0)
314
  print(batch.shape)
315
  print(batch)
316
  # output_ids = self._decode(input_embeds, tokenizer, **kwargs)
 
300
  for pl in range(tensor.shape[1]):
301
  to_add.append(1)
302
  for pl in range(tensor.shape[1], max_x):
303
+ print("here")
304
  tensor = torch.cat((tensor, vector_reshaped), dim=1)
305
  to_add.append(0)
306
+ print(tensor.shape, "UPDATED_SHAPE")
307
  attention_mask.append(to_add)
308
  attention_mask = torch.tensor(attention_mask)
309
 
 
312
  # Step 3: Stack the padded tensors into a single batch
313
  for stuff in batch:
314
  print(stuff.shape, "SHAPE")
315
+ batch = torch.cat(batch, dim=0)
316
  print(batch.shape)
317
  print(batch)
318
  # output_ids = self._decode(input_embeds, tokenizer, **kwargs)