Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
9db2fe3
1
Parent(s):
c731de3
modified: src/flux/modules/conditioner.py
Browse files- src/flux/__pycache__/__init__.cpython-310.pyc +0 -0
- src/flux/__pycache__/_version.cpython-310.pyc +0 -0
- src/flux/__pycache__/math.cpython-310.pyc +0 -0
- src/flux/__pycache__/model.cpython-310.pyc +0 -0
- src/flux/__pycache__/sampling.cpython-310.pyc +0 -0
- src/flux/__pycache__/util.cpython-310.pyc +0 -0
- src/flux/modules/__pycache__/autoencoder.cpython-310.pyc +0 -0
- src/flux/modules/__pycache__/conditioner.cpython-310.pyc +0 -0
- src/flux/modules/__pycache__/layers.cpython-310.pyc +0 -0
- src/flux/modules/conditioner.py +23 -0
src/flux/__pycache__/__init__.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/__init__.cpython-310.pyc and b/src/flux/__pycache__/__init__.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/_version.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/_version.cpython-310.pyc and b/src/flux/__pycache__/_version.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/math.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/math.cpython-310.pyc and b/src/flux/__pycache__/math.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/model.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/model.cpython-310.pyc and b/src/flux/__pycache__/model.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/sampling.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/sampling.cpython-310.pyc and b/src/flux/__pycache__/sampling.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/util.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/util.cpython-310.pyc and b/src/flux/__pycache__/util.cpython-310.pyc differ
|
|
|
src/flux/modules/__pycache__/autoencoder.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc and b/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc differ
|
|
|
src/flux/modules/__pycache__/conditioner.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/modules/__pycache__/conditioner.cpython-310.pyc and b/src/flux/modules/__pycache__/conditioner.cpython-310.pyc differ
|
|
|
src/flux/modules/__pycache__/layers.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/modules/__pycache__/layers.cpython-310.pyc and b/src/flux/modules/__pycache__/layers.cpython-310.pyc differ
|
|
|
src/flux/modules/conditioner.py
CHANGED
|
@@ -19,6 +19,29 @@ class HFEmbedder(nn.Module):
|
|
| 19 |
|
| 20 |
self.hf_module = self.hf_module.eval().requires_grad_(False)
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
def forward(self, text: list[str]) -> Tensor:
|
| 23 |
batch_encoding = self.tokenizer(
|
| 24 |
text,
|
|
|
|
| 19 |
|
| 20 |
self.hf_module = self.hf_module.eval().requires_grad_(False)
|
| 21 |
|
| 22 |
+
# In your HFEmbedder class's __init__ method, or after loading the clip model
|
| 23 |
+
# Assuming 'self.hf_module' is the CLIPTextModel instance for your CLIP embedder
|
| 24 |
+
if self.is_clip: # Add this check if HFEmbedder is used for both T5 and CLIP
|
| 25 |
+
clip_model_config = self.hf_module.config
|
| 26 |
+
print(f"CLIP Model Version: {self.hf_module.name_or_path}") # or 'version' if that's the attribute
|
| 27 |
+
print(f"CLIP Config max_position_embeddings: {clip_model_config.max_position_embeddings}")
|
| 28 |
+
|
| 29 |
+
# Accessing the CLIPTextEmbeddings module
|
| 30 |
+
# Path is typically model.embeddings for CLIPTextModel
|
| 31 |
+
text_embeddings_module = self.hf_module.embeddings
|
| 32 |
+
|
| 33 |
+
print(f"CLIP Position Embedding Layer num_embeddings: {text_embeddings_module.position_embedding.num_embeddings}")
|
| 34 |
+
print(f"CLIP Position IDs buffer 'position_ids' (from CLIPTextEmbeddings) shape: {text_embeddings_module.position_ids.shape}")
|
| 35 |
+
|
| 36 |
+
if text_embeddings_module.position_ids.shape[1] != text_embeddings_module.position_embedding.num_embeddings:
|
| 37 |
+
print("CRITICAL WARNING: Mismatch between position_ids buffer length and actual embedding layer size!")
|
| 38 |
+
if clip_model_config.max_position_embeddings != text_embeddings_module.position_embedding.num_embeddings:
|
| 39 |
+
print("CRITICAL WARNING: Mismatch between config.max_position_embeddings and actual embedding layer size!")
|
| 40 |
+
if self.max_length != text_embeddings_module.position_embedding.num_embeddings:
|
| 41 |
+
print(f"WARNING: Tokenizer max_length ({self.max_length}) "
|
| 42 |
+
f"does not match position embedding size ({text_embeddings_module.position_embedding.num_embeddings})")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
def forward(self, text: list[str]) -> Tensor:
|
| 46 |
batch_encoding = self.tokenizer(
|
| 47 |
text,
|