ZhouZJ36DL commited on
Commit
c524e88
·
1 Parent(s): 84a1d6d

modified: src/flux/modules/conditioner.py

Browse files
src/flux/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/__init__.cpython-310.pyc and b/src/flux/__pycache__/__init__.cpython-310.pyc differ
 
src/flux/__pycache__/_version.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/_version.cpython-310.pyc and b/src/flux/__pycache__/_version.cpython-310.pyc differ
 
src/flux/__pycache__/math.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/math.cpython-310.pyc and b/src/flux/__pycache__/math.cpython-310.pyc differ
 
src/flux/__pycache__/model.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/model.cpython-310.pyc and b/src/flux/__pycache__/model.cpython-310.pyc differ
 
src/flux/__pycache__/sampling.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/sampling.cpython-310.pyc and b/src/flux/__pycache__/sampling.cpython-310.pyc differ
 
src/flux/__pycache__/util.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/util.cpython-310.pyc and b/src/flux/__pycache__/util.cpython-310.pyc differ
 
src/flux/modules/__pycache__/autoencoder.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc and b/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc differ
 
src/flux/modules/__pycache__/conditioner.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/conditioner.cpython-310.pyc and b/src/flux/modules/__pycache__/conditioner.cpython-310.pyc differ
 
src/flux/modules/__pycache__/layers.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/layers.cpython-310.pyc and b/src/flux/modules/__pycache__/layers.cpython-310.pyc differ
 
src/flux/modules/conditioner.py CHANGED
@@ -1,6 +1,6 @@
1
  from torch import Tensor, nn
2
  from transformers import (CLIPTextModel, CLIPTokenizer, T5EncoderModel,
3
- T5Tokenizer, AutoConfig)
4
  import os
5
 
6
  class HFEmbedder(nn.Module):
@@ -12,9 +12,7 @@ class HFEmbedder(nn.Module):
12
 
13
  if self.is_clip:
14
  self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length)
15
- config = AutoConfig.from_pretrained(version, max_position_embeddings=77)
16
- self.hf_module = CLIPTextModel.from_pretrained(version, config=config, **hf_kwargs)
17
- #self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
18
  else:
19
  self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length)
20
  self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs)
@@ -33,8 +31,12 @@ class HFEmbedder(nn.Module):
33
  return_tensors="pt",
34
  )
35
 
 
 
 
 
36
  outputs = self.hf_module(
37
- input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
38
  attention_mask=None,
39
  output_hidden_states=False,
40
  )
 
1
  from torch import Tensor, nn
2
  from transformers import (CLIPTextModel, CLIPTokenizer, T5EncoderModel,
3
+ T5Tokenizer)
4
  import os
5
 
6
  class HFEmbedder(nn.Module):
 
12
 
13
  if self.is_clip:
14
  self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length)
15
+ self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
 
 
16
  else:
17
  self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length)
18
  self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs)
 
31
  return_tensors="pt",
32
  )
33
 
34
+ input_ids = batch_encoding["input_ids"]
35
+ print(f"input_ids shape: {input_ids.shape}, max_length: {self.max_length}") # Debug
36
+ assert input_ids.shape[1] == 77, f"Sequence length {input_ids.shape[1]} exceeds max_length 77"
37
+
38
  outputs = self.hf_module(
39
+ input_ids=input_ids.to(self.hf_module.device),
40
  attention_mask=None,
41
  output_hidden_states=False,
42
  )