ZhouZJ36DL commited on
Commit
ff3110c
·
1 Parent(s): f322615

modified: src/flux/modules/conditioner.py

Browse files
src/flux/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/__init__.cpython-310.pyc and b/src/flux/__pycache__/__init__.cpython-310.pyc differ
 
src/flux/__pycache__/_version.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/_version.cpython-310.pyc and b/src/flux/__pycache__/_version.cpython-310.pyc differ
 
src/flux/__pycache__/math.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/math.cpython-310.pyc and b/src/flux/__pycache__/math.cpython-310.pyc differ
 
src/flux/__pycache__/model.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/model.cpython-310.pyc and b/src/flux/__pycache__/model.cpython-310.pyc differ
 
src/flux/__pycache__/sampling.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/sampling.cpython-310.pyc and b/src/flux/__pycache__/sampling.cpython-310.pyc differ
 
src/flux/__pycache__/util.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/util.cpython-310.pyc and b/src/flux/__pycache__/util.cpython-310.pyc differ
 
src/flux/modules/__pycache__/autoencoder.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc and b/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc differ
 
src/flux/modules/__pycache__/conditioner.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/conditioner.cpython-310.pyc and b/src/flux/modules/__pycache__/conditioner.cpython-310.pyc differ
 
src/flux/modules/__pycache__/layers.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/layers.cpython-310.pyc and b/src/flux/modules/__pycache__/layers.cpython-310.pyc differ
 
src/flux/modules/conditioner.py CHANGED
@@ -11,25 +11,52 @@ class HFEmbedder(nn.Module):
11
  self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
12
 
13
  if self.is_clip:
14
- #self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length)
15
- self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained("/home/user/app/models/tokenizer", max_length=max_length, truncation=True)
16
  self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
17
 
18
- # --- DEBUG ---
19
- print(f"CLIP Tokenizer max length: {self.tokenizer.model_max_length}")
20
- print(f"CLIP max position embeddings: {self.hf_module.config.max_position_embeddings}")
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  else:
23
  self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length, truncation=True)
24
  self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs)
25
- # --- DEBUG ---
26
- '''print(f"T5 Tokenizer max length: {T5Tokenizer.model_max_length}")
27
- print(f"T5 max position embeddings: {T5EncoderModel.config.max_position_embeddings}")'''
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  self.hf_module = self.hf_module.eval().requires_grad_(False)
30
 
31
 
32
  def forward(self, text: list[str]) -> Tensor:
 
 
 
 
33
  batch_encoding = self.tokenizer(
34
  text,
35
  truncation=True,
@@ -40,22 +67,53 @@ class HFEmbedder(nn.Module):
40
  return_tensors="pt",
41
  )
42
 
43
- if self.is_clip:
44
- flag = 'clip'
45
- else:
46
- flag = 't5'
47
- print(f'foward {flag}')
48
  input_ids = batch_encoding["input_ids"]
49
- print(f"input_ids shape: {input_ids.shape}, max_length: {self.max_length}") # Debug
50
- assert input_ids.shape[1] == self.max_length, f"Sequence length {input_ids.shape[1]} does not match max_length {self.max_length}"
51
- print(input_ids)
 
 
52
 
53
- print(f"{flag} self.tokenizer.vocab_size: {self.tokenizer.vocab_size}") # Debug
54
- print(f"{flag} self.hf_module.config.vocab_size: {self.hf_module.config.vocab_size}") # Debug
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
- outputs = self.hf_module(
57
- input_ids=input_ids.to(self.hf_module.device),
58
- attention_mask=batch_encoding["attention_mask"].to(self.hf_module.device),
59
- output_hidden_states=False,
60
- )
61
  return outputs[self.output_key]
 
11
  self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
12
 
13
  if self.is_clip:
14
+ self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length, truncation=True)
15
+ #self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained("/home/user/app/models/tokenizer", max_length=max_length, truncation=True)
16
  self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
17
 
18
+ # --- DEBUG 信息 ---
19
+ print(f"--- CLIP Model Info ---")
20
+ print(f" Requested version/path: {version}")
21
+ print(f" Tokenizer loaded from: {getattr(self.tokenizer, 'name_or_path', 'Unknown')}")
22
+ print(f" Model loaded from: {getattr(self.hf_module, 'name_or_path', 'Unknown')}")
23
+ print(f" Tokenizer max length: {getattr(self.tokenizer, 'model_max_length', 'N/A')}")
24
+ print(f" Model max position embeddings: {getattr(self.hf_module.config, 'max_position_embeddings', 'N/A')}")
25
+
26
+ # 关键调试信息:词汇表大小
27
+ tokenizer_vocab_size = len(self.tokenizer.get_vocab()) if hasattr(self.tokenizer, 'get_vocab') else getattr(self.tokenizer, 'vocab_size', 'Unknown')
28
+ print(f" Tokenizer vocab size (len(get_vocab())): {tokenizer_vocab_size}")
29
+ print(f" Tokenizer vocab size (attribute): {getattr(self.tokenizer, 'vocab_size', 'N/A')}")
30
+ print(f" Model config vocab size: {self.hf_module.config.vocab_size}")
31
+ print(f" Actual model embedding weight shape: {self.hf_module.text_model.embeddings.token_embedding.weight.shape}")
32
+ print(f"-------------------------")
33
 
34
  else:
35
  self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length, truncation=True)
36
  self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs)
37
+
38
+ # --- DEBUG 信息 ---
39
+ print(f"--- T5 Model Info ---")
40
+ print(f" Requested version/path: {version}")
41
+ print(f" Tokenizer loaded from: {getattr(self.tokenizer, 'name_or_path', 'Unknown')}")
42
+ print(f" Model loaded from: {getattr(self.hf_module, 'name_or_path', 'Unknown')}")
43
+ print(f" Tokenizer max length: {getattr(self.tokenizer, 'model_max_length', 'N/A')}")
44
+ print(f" Model max position embeddings: {getattr(self.hf_module.config, 'd_model', 'N/A (T5 uses relative pos)')}") # T5 uses relative
45
+ tokenizer_vocab_size = len(self.tokenizer.get_vocab()) if hasattr(self.tokenizer, 'get_vocab') else getattr(self.tokenizer, 'vocab_size', 'Unknown')
46
+ print(f" Tokenizer vocab size (len(get_vocab())): {tokenizer_vocab_size}")
47
+ print(f" Tokenizer vocab size (attribute): {getattr(self.tokenizer, 'vocab_size', 'N/A')}")
48
+ print(f" Model config vocab size: {self.hf_module.config.vocab_size}")
49
+ print(f" Actual model embedding weight shape: {self.hf_module.encoder.embed_tokens.weight.shape}")
50
+ print(f"----------------------")
51
 
52
  self.hf_module = self.hf_module.eval().requires_grad_(False)
53
 
54
 
55
  def forward(self, text: list[str]) -> Tensor:
56
+ # Ensure text is a list
57
+ if isinstance(text, str):
58
+ text = [text]
59
+
60
  batch_encoding = self.tokenizer(
61
  text,
62
  truncation=True,
 
67
  return_tensors="pt",
68
  )
69
 
70
+ encoder_type = 'clip' if self.is_clip else 't5'
71
+ print(f'Forward pass for {encoder_type}')
 
 
 
72
  input_ids = batch_encoding["input_ids"]
73
+ print(f"Input IDs shape: {input_ids.shape}, Max Length: {self.max_length}")
74
+
75
+ # 更严格的断言
76
+ assert input_ids.shape == (len(text), self.max_length), f"Input IDs shape {input_ids.shape} does not match expected ({len(text)}, {self.max_length})"
77
+ print(f"Input IDs:\n{input_ids}")
78
 
79
+ # --- 关键调试:检查输入 ID 范围 ---
80
+ min_id, max_id = input_ids.min().item(), input_ids.max().item()
81
+ print(f"Input IDs range: [{min_id}, {max_id}]")
82
+
83
+ vocab_source = "tokenizer" if self.is_clip else "model_config"
84
+ vocab_size = len(self.tokenizer.get_vocab()) if self.is_clip and hasattr(self.tokenizer, 'get_vocab') else self.hf_module.config.vocab_size
85
+ print(f"Vocab size (from {vocab_source}): {vocab_size}")
86
+
87
+ if max_id >= vocab_size:
88
+ raise IndexError(f"Found input ID ({max_id}) >= vocab size ({vocab_size}). This will cause an embedding error.")
89
+
90
+ if min_id < 0:
91
+ raise IndexError(f"Found negative input ID ({min_id}). This is invalid.")
92
+
93
+ # 确保输入在正确的设备上
94
+ input_ids = input_ids.to(self.device)
95
+ attention_mask = batch_encoding["attention_mask"].to(self.device)
96
+
97
+ print(f"Input IDs device: {input_ids.device}")
98
+ print(f"Attention Mask device: {attention_mask.device}")
99
+ print(f"Model device: {next(self.hf_module.parameters()).device}")
100
+
101
+ try:
102
+ outputs = self.hf_module(
103
+ input_ids=input_ids,
104
+ attention_mask=attention_mask,
105
+ output_hidden_states=False,
106
+ )
107
+ except IndexError as e:
108
+ # 捕获并提供更详细的错误上下文
109
+ print(f"*** IndexError caught during model forward pass ***")
110
+ print(f"Error: {e}")
111
+ print(f"Input IDs shape: {input_ids.shape}")
112
+ print(f"Input IDs range: [{input_ids.min().item()}, {input_ids.max().item()}]")
113
+ print(f"Model vocab size: {self.hf_module.config.vocab_size}")
114
+ if self.is_clip:
115
+ print(f"Tokenizer vocab size: {len(self.tokenizer.get_vocab()) if hasattr(self.tokenizer, 'get_vocab') else 'N/A'}")
116
+ print(f"Embedding layer weight shape: {self.hf_module.text_model.embeddings.token_embedding.weight.shape}")
117
+ raise # Re-raise the error after logging
118
 
 
 
 
 
 
119
  return outputs[self.output_key]
src/flux/util.py CHANGED
@@ -136,8 +136,8 @@ def load_t5(device: str | torch.device = "cuda", max_length: int = 77) -> HFEmbe
136
 
137
 
138
  def load_clip(device: str | torch.device = "cuda") -> HFEmbedder:
139
- #return HFEmbedder("openai/clip-vit-base-patch32", max_length=77, is_clip=True, torch_dtype=torch.bfloat16).to(device)
140
- return HFEmbedder("/home/user/app/models/text_encoder", max_length=77, is_clip=True, torch_dtype=torch.bfloat16).to(device)
141
 
142
 
143
  def load_ae(name: str, device: str | torch.device = "cuda", hf_download: bool = True) -> AutoEncoder:
 
136
 
137
 
138
  def load_clip(device: str | torch.device = "cuda") -> HFEmbedder:
139
+ return HFEmbedder("openai/clip-vit-base-patch32", max_length=77, is_clip=True, torch_dtype=torch.bfloat16).to(device)
140
+ #return HFEmbedder("/home/user/app/models/text_encoder", max_length=77, is_clip=True, torch_dtype=torch.bfloat16).to(device)
141
 
142
 
143
  def load_ae(name: str, device: str | torch.device = "cuda", hf_download: bool = True) -> AutoEncoder: