Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
749bc4b
1
Parent(s):
a25edb3
modified: src/flux/modules/conditioner.py
Browse files- src/flux/__pycache__/__init__.cpython-310.pyc +0 -0
- src/flux/__pycache__/_version.cpython-310.pyc +0 -0
- src/flux/__pycache__/math.cpython-310.pyc +0 -0
- src/flux/__pycache__/model.cpython-310.pyc +0 -0
- src/flux/__pycache__/sampling.cpython-310.pyc +0 -0
- src/flux/__pycache__/util.cpython-310.pyc +0 -0
- src/flux/modules/__pycache__/autoencoder.cpython-310.pyc +0 -0
- src/flux/modules/__pycache__/conditioner.cpython-310.pyc +0 -0
- src/flux/modules/__pycache__/layers.cpython-310.pyc +0 -0
- src/flux/modules/conditioner.py +8 -2
src/flux/__pycache__/__init__.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/__init__.cpython-310.pyc and b/src/flux/__pycache__/__init__.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/_version.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/_version.cpython-310.pyc and b/src/flux/__pycache__/_version.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/math.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/math.cpython-310.pyc and b/src/flux/__pycache__/math.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/model.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/model.cpython-310.pyc and b/src/flux/__pycache__/model.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/sampling.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/sampling.cpython-310.pyc and b/src/flux/__pycache__/sampling.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/util.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/util.cpython-310.pyc and b/src/flux/__pycache__/util.cpython-310.pyc differ
|
|
|
src/flux/modules/__pycache__/autoencoder.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc and b/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc differ
|
|
|
src/flux/modules/__pycache__/conditioner.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/modules/__pycache__/conditioner.cpython-310.pyc and b/src/flux/modules/__pycache__/conditioner.cpython-310.pyc differ
|
|
|
src/flux/modules/__pycache__/layers.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/modules/__pycache__/layers.cpython-310.pyc and b/src/flux/modules/__pycache__/layers.cpython-310.pyc differ
|
|
|
src/flux/modules/conditioner.py
CHANGED
|
@@ -14,9 +14,17 @@ class HFEmbedder(nn.Module):
|
|
| 14 |
#self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length)
|
| 15 |
self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained("/home/user/app/models/tokenizer", max_length=max_length, truncation=True)
|
| 16 |
self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
else:
|
| 18 |
self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length, truncation=True)
|
| 19 |
self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs)
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
self.hf_module = self.hf_module.eval().requires_grad_(False)
|
| 22 |
|
|
@@ -42,8 +50,6 @@ class HFEmbedder(nn.Module):
|
|
| 42 |
assert input_ids.shape[1] == self.max_length, f"Sequence length {input_ids.shape[1]} does not match max_length {self.max_length}"
|
| 43 |
print(input_ids)
|
| 44 |
|
| 45 |
-
print(f"self.tokenizer.vocab_size: {self.tokenizer.vocab_size}") # Debug
|
| 46 |
-
print(f"self.hf_module.config.vocab_size: {self.hf_module.config.vocab_size}") # Debug
|
| 47 |
print(f"self.tokenizer.vocab_size: {self.tokenizer.vocab_size}") # Debug
|
| 48 |
print(f"self.hf_module.config.vocab_size: {self.hf_module.config.vocab_size}") # Debug
|
| 49 |
|
|
|
|
| 14 |
#self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length)
|
| 15 |
self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained("/home/user/app/models/tokenizer", max_length=max_length, truncation=True)
|
| 16 |
self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
|
| 17 |
+
|
| 18 |
+
# --- DEBUG ---
|
| 19 |
+
print(f"CLIP Tokenizer max length: {CLIPTokenizer.model_max_length}")
|
| 20 |
+
print(f"CLIP max position embeddings: {CLIPTextModel.config.max_position_embeddings}")
|
| 21 |
+
|
| 22 |
else:
|
| 23 |
self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length, truncation=True)
|
| 24 |
self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs)
|
| 25 |
+
# --- DEBUG ---
|
| 26 |
+
'''print(f"T5 Tokenizer max length: {T5Tokenizer.model_max_length}")
|
| 27 |
+
print(f"T5 max position embeddings: {T5EncoderModel.config.max_position_embeddings}")'''
|
| 28 |
|
| 29 |
self.hf_module = self.hf_module.eval().requires_grad_(False)
|
| 30 |
|
|
|
|
| 50 |
assert input_ids.shape[1] == self.max_length, f"Sequence length {input_ids.shape[1]} does not match max_length {self.max_length}"
|
| 51 |
print(input_ids)
|
| 52 |
|
|
|
|
|
|
|
| 53 |
print(f"self.tokenizer.vocab_size: {self.tokenizer.vocab_size}") # Debug
|
| 54 |
print(f"self.hf_module.config.vocab_size: {self.hf_module.config.vocab_size}") # Debug
|
| 55 |
|