Spaces:
Running
on
Zero
Running
on
Zero
<fix> init base models at the beginning.
Browse files
app.py
CHANGED
|
@@ -83,11 +83,7 @@ def init_basemodel():
|
|
| 83 |
|
| 84 |
@spaces.GPU
|
| 85 |
def process_image_and_text(condition_image, target_prompt, condition_image_prompt, task, random_seed, inpainting, fill_x1, fill_x2, fill_y1, fill_y2):
|
| 86 |
-
# set up
|
| 87 |
-
required_models = [transformer, scheduler, vae, text_encoder, text_encoder_2, tokenizer, tokenizer_2, image_processor]
|
| 88 |
-
if any(model is None for model in required_models):
|
| 89 |
-
init_basemodel()
|
| 90 |
-
|
| 91 |
if pipe is None or current_task != task:
|
| 92 |
# insert LoRA
|
| 93 |
lora_config = LoraConfig(
|
|
@@ -322,4 +318,5 @@ def create_app():
|
|
| 322 |
|
| 323 |
|
| 324 |
if __name__ == "__main__":
|
|
|
|
| 325 |
create_app().launch(debug=True, ssr_mode=False)
|
|
|
|
| 83 |
|
| 84 |
@spaces.GPU
|
| 85 |
def process_image_and_text(condition_image, target_prompt, condition_image_prompt, task, random_seed, inpainting, fill_x1, fill_x2, fill_y1, fill_y2):
|
| 86 |
+
# set up the model
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
if pipe is None or current_task != task:
|
| 88 |
# insert LoRA
|
| 89 |
lora_config = LoraConfig(
|
|
|
|
| 318 |
|
| 319 |
|
| 320 |
if __name__ == "__main__":
|
| 321 |
+
init_basemodel()
|
| 322 |
create_app().launch(debug=True, ssr_mode=False)
|