ZhouZJ36DL commited on
Commit
63432be
·
1 Parent(s): 3c6ed71

modified: src/flux/modules/conditioner.py

Browse files
src/flux/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/__init__.cpython-310.pyc and b/src/flux/__pycache__/__init__.cpython-310.pyc differ
 
src/flux/__pycache__/_version.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/_version.cpython-310.pyc and b/src/flux/__pycache__/_version.cpython-310.pyc differ
 
src/flux/__pycache__/math.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/math.cpython-310.pyc and b/src/flux/__pycache__/math.cpython-310.pyc differ
 
src/flux/__pycache__/model.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/model.cpython-310.pyc and b/src/flux/__pycache__/model.cpython-310.pyc differ
 
src/flux/__pycache__/sampling.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/sampling.cpython-310.pyc and b/src/flux/__pycache__/sampling.cpython-310.pyc differ
 
src/flux/__pycache__/util.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/util.cpython-310.pyc and b/src/flux/__pycache__/util.cpython-310.pyc differ
 
src/flux/modules/__pycache__/autoencoder.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc and b/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc differ
 
src/flux/modules/__pycache__/conditioner.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/conditioner.cpython-310.pyc and b/src/flux/modules/__pycache__/conditioner.cpython-310.pyc differ
 
src/flux/modules/__pycache__/layers.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/layers.cpython-310.pyc and b/src/flux/modules/__pycache__/layers.cpython-310.pyc differ
 
src/flux/modules/conditioner.py CHANGED
@@ -96,11 +96,32 @@ class HFEmbedder(nn.Module):
96
 
97
  print(f"Input IDs device: {input_ids.device}")
98
  print(f"Attention Mask device: {attention_mask.device}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  try:
100
  outputs = self.hf_module(
101
  input_ids=input_ids,
102
  attention_mask=attention_mask,
103
  output_hidden_states=False,
 
104
  )
105
  except IndexError as e:
106
  # 捕获并提供更详细的错误上下文
 
96
 
97
  print(f"Input IDs device: {input_ids.device}")
98
  print(f"Attention Mask device: {attention_mask.device}")
99
+
100
+ # --- FIX FOR CLIP POSITION IDs ---
101
+ # Prepare arguments for the model call
102
+ model_kwargs = {
103
+ "input_ids": input_ids,
104
+ "attention_mask": attention_mask,
105
+ "output_hidden_states": False,
106
+ }
107
+
108
+ # If it's a CLIP model, explicitly generate and pass position_ids
109
+ if self.is_clip:
110
+ # Generate position_ids: [0, 1, 2, ..., max_length-1] for each item in the batch
111
+ # Shape: (batch_size, max_length)
112
+ position_ids = torch.arange(self.max_length, dtype=torch.long, device=input_ids.device).expand(input_ids.size(0), -1)
113
+ print(f"Generated CLIP position_ids: shape={position_ids.shape}, range=[{position_ids.min().item()}, {position_ids.max().item()}]")
114
+ # Check if generated position_ids are within the model's limit
115
+ max_pos_emb = getattr(self.hf_module.config, 'max_position_embeddings', -1)
116
+ if max_pos_emb > 0 and position_ids.max() >= max_pos_emb:
117
+ raise ValueError(f"Generated position_ids max ({position_ids.max().item()}) >= model's max_position_embeddings ({max_pos_emb})")
118
+
119
  try:
120
  outputs = self.hf_module(
121
  input_ids=input_ids,
122
  attention_mask=attention_mask,
123
  output_hidden_states=False,
124
+ position_ids=position_ids
125
  )
126
  except IndexError as e:
127
  # 捕获并提供更详细的错误上下文