Upload pipeline.py with huggingface_hub
Browse files- pipeline.py +4 -2
pipeline.py
CHANGED
|
@@ -121,12 +121,13 @@ class StableDiffusionT5Pipeline(StableDiffusionPipeline):
|
|
| 121 |
prompt,
|
| 122 |
negative_prompt, #can be None
|
| 123 |
device,
|
|
|
|
| 124 |
):
|
| 125 |
def _tok(text):
|
| 126 |
out = self.tokenizer(
|
| 127 |
text,
|
| 128 |
return_tensors="pt",
|
| 129 |
-
padding=
|
| 130 |
max_length=self.tokenizer.model_max_length,
|
| 131 |
truncation=True,
|
| 132 |
)
|
|
@@ -149,13 +150,14 @@ class StableDiffusionT5Pipeline(StableDiffusionPipeline):
|
|
| 149 |
num_images_per_prompt,
|
| 150 |
do_classifier_free_guidance,
|
| 151 |
negative_prompt=None,
|
|
|
|
| 152 |
**kwargs,
|
| 153 |
):
|
| 154 |
|
| 155 |
|
| 156 |
scaling_factor = self.t5_projection.config.scaling_factor
|
| 157 |
|
| 158 |
-
pos_hidden, neg_hidden = self.encode_prompt_t5(prompt, negative_prompt, device)
|
| 159 |
|
| 160 |
pos_embeds = self.t5_projection.text_projection(pos_hidden)
|
| 161 |
pos_embeds = pos_embeds * scaling_factor
|
|
|
|
| 121 |
prompt,
|
| 122 |
negative_prompt, #can be None
|
| 123 |
device,
|
| 124 |
+
padding=None,
|
| 125 |
):
|
| 126 |
def _tok(text):
|
| 127 |
out = self.tokenizer(
|
| 128 |
text,
|
| 129 |
return_tensors="pt",
|
| 130 |
+
padding=padding,
|
| 131 |
max_length=self.tokenizer.model_max_length,
|
| 132 |
truncation=True,
|
| 133 |
)
|
|
|
|
| 150 |
num_images_per_prompt,
|
| 151 |
do_classifier_free_guidance,
|
| 152 |
negative_prompt=None,
|
| 153 |
+
padding=None,
|
| 154 |
**kwargs,
|
| 155 |
):
|
| 156 |
|
| 157 |
|
| 158 |
scaling_factor = self.t5_projection.config.scaling_factor
|
| 159 |
|
| 160 |
+
pos_hidden, neg_hidden = self.encode_prompt_t5(prompt, negative_prompt, device, padding=padding)
|
| 161 |
|
| 162 |
pos_embeds = self.t5_projection.text_projection(pos_hidden)
|
| 163 |
pos_embeds = pos_embeds * scaling_factor
|