Commit
·
3e7ee7c
1
Parent(s):
b3b8cf5
Update default device setting to CPU in encoder modules for improved compatibility across systems without GPU support.
Browse files
imagedream/ldm/modules/encoders/modules.py
CHANGED
|
@@ -62,7 +62,7 @@ class FrozenT5Embedder(AbstractEncoder):
|
|
| 62 |
"""Uses the T5 transformer encoder for text"""
|
| 63 |
|
| 64 |
def __init__(
|
| 65 |
-
self, version="google/t5-v1_1-large", device="
|
| 66 |
): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
|
| 67 |
super().__init__()
|
| 68 |
self.tokenizer = T5Tokenizer.from_pretrained(version)
|
|
@@ -106,7 +106,7 @@ class FrozenCLIPEmbedder(AbstractEncoder):
|
|
| 106 |
def __init__(
|
| 107 |
self,
|
| 108 |
version="openai/clip-vit-large-patch14",
|
| 109 |
-
device="
|
| 110 |
max_length=77,
|
| 111 |
freeze=True,
|
| 112 |
layer="last",
|
|
@@ -173,7 +173,7 @@ class FrozenOpenCLIPEmbedder(AbstractEncoder, nn.Module):
|
|
| 173 |
self,
|
| 174 |
arch="ViT-H-14",
|
| 175 |
version="laion2b_s32b_b79k",
|
| 176 |
-
device="
|
| 177 |
max_length=77,
|
| 178 |
freeze=True,
|
| 179 |
layer="last",
|
|
@@ -306,7 +306,7 @@ class FrozenCLIPT5Encoder(AbstractEncoder):
|
|
| 306 |
self,
|
| 307 |
clip_version="openai/clip-vit-large-patch14",
|
| 308 |
t5_version="google/t5-v1_1-xl",
|
| 309 |
-
device="
|
| 310 |
clip_max_length=77,
|
| 311 |
t5_max_length=77,
|
| 312 |
):
|
|
|
|
| 62 |
"""Uses the T5 transformer encoder for text"""
|
| 63 |
|
| 64 |
def __init__(
|
| 65 |
+
self, version="google/t5-v1_1-large", device="cpu", max_length=77, freeze=True
|
| 66 |
): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
|
| 67 |
super().__init__()
|
| 68 |
self.tokenizer = T5Tokenizer.from_pretrained(version)
|
|
|
|
| 106 |
def __init__(
|
| 107 |
self,
|
| 108 |
version="openai/clip-vit-large-patch14",
|
| 109 |
+
device="cpu",
|
| 110 |
max_length=77,
|
| 111 |
freeze=True,
|
| 112 |
layer="last",
|
|
|
|
| 173 |
self,
|
| 174 |
arch="ViT-H-14",
|
| 175 |
version="laion2b_s32b_b79k",
|
| 176 |
+
device="cpu",
|
| 177 |
max_length=77,
|
| 178 |
freeze=True,
|
| 179 |
layer="last",
|
|
|
|
| 306 |
self,
|
| 307 |
clip_version="openai/clip-vit-large-patch14",
|
| 308 |
t5_version="google/t5-v1_1-xl",
|
| 309 |
+
device="cpu",
|
| 310 |
clip_max_length=77,
|
| 311 |
t5_max_length=77,
|
| 312 |
):
|