Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,21 +16,25 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
| 16 |
MAX_SEED = np.iinfo(np.int32).max
|
| 17 |
MAX_IMAGE_SIZE = 1024
|
| 18 |
|
| 19 |
-
def remote_text_encoder(prompts):
|
| 20 |
-
import
|
| 21 |
-
|
| 22 |
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
# Load model
|
| 36 |
repo_id = "black-forest-labs/FLUX.2-dev"
|
|
@@ -80,7 +84,7 @@ def update_dimensions_from_image(image_list):
|
|
| 80 |
def get_duration(prompt_embeds, image_list, width, height, num_inference_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
|
| 81 |
num_images = 0 if image_list is None else len(image_list)
|
| 82 |
step_duration = 1 + 0.8 * num_images
|
| 83 |
-
return max(
|
| 84 |
|
| 85 |
@spaces.GPU(duration=get_duration)
|
| 86 |
def generate_image(prompt_embeds, image_list, width, height, num_inference_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
| 16 |
MAX_SEED = np.iinfo(np.int32).max
|
| 17 |
MAX_IMAGE_SIZE = 1024
|
| 18 |
|
| 19 |
+
def remote_text_encoder(prompts, max_retries=3):
|
| 20 |
+
from gradio_client import Client
|
| 21 |
+
import time
|
| 22 |
|
| 23 |
+
for attempt in range(max_retries):
|
| 24 |
+
try:
|
| 25 |
+
client = Client("multimodalart/mistral-text-encoder")
|
| 26 |
+
result = client.predict(
|
| 27 |
+
prompt=prompts,
|
| 28 |
+
api_name="/encode_text"
|
| 29 |
+
)
|
| 30 |
+
prompt_embeds = torch.load(result[0])
|
| 31 |
+
return prompt_embeds
|
| 32 |
+
except Exception as e:
|
| 33 |
+
print(f"Text encoder attempt {attempt + 1}/{max_retries} failed: {e}")
|
| 34 |
+
if attempt < max_retries - 1:
|
| 35 |
+
time.sleep(2)
|
| 36 |
+
else:
|
| 37 |
+
raise Exception(f"Text encoder failed after {max_retries} attempts: {e}")
|
| 38 |
|
| 39 |
# Load model
|
| 40 |
repo_id = "black-forest-labs/FLUX.2-dev"
|
|
|
|
| 84 |
def get_duration(prompt_embeds, image_list, width, height, num_inference_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
|
| 85 |
num_images = 0 if image_list is None else len(image_list)
|
| 86 |
step_duration = 1 + 0.8 * num_images
|
| 87 |
+
return max(45, num_inference_steps * step_duration + 10)
|
| 88 |
|
| 89 |
@spaces.GPU(duration=get_duration)
|
| 90 |
def generate_image(prompt_embeds, image_list, width, height, num_inference_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
|