Spaces:
Running on Zero
Running on Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,6 +9,7 @@ from diffusers import Flux2Pipeline, Flux2Transformer2DModel
|
|
| 9 |
import requests
|
| 10 |
from PIL import Image
|
| 11 |
import base64
|
|
|
|
| 12 |
|
| 13 |
dtype = torch.bfloat16
|
| 14 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
@@ -16,15 +17,15 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
| 16 |
MAX_SEED = np.iinfo(np.int32).max
|
| 17 |
MAX_IMAGE_SIZE = 1024
|
| 18 |
|
|
|
|
|
|
|
|
|
|
| 19 |
def remote_text_encoder(prompts):
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
client = Client("multimodalart/mistral-text-encoder")
|
| 23 |
-
result = client.predict(
|
| 24 |
prompt=prompts,
|
| 25 |
api_name="/encode_text"
|
| 26 |
)
|
| 27 |
-
|
| 28 |
prompt_embeds = torch.load(result[0])
|
| 29 |
return prompt_embeds
|
| 30 |
|
|
|
|
| 9 |
import requests
|
| 10 |
from PIL import Image
|
| 11 |
import base64
|
| 12 |
+
from gradio_client import Client
|
| 13 |
|
| 14 |
dtype = torch.bfloat16
|
| 15 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 17 |
MAX_SEED = np.iinfo(np.int32).max
|
| 18 |
MAX_IMAGE_SIZE = 1024
|
| 19 |
|
| 20 |
+
# Initialize text encoder client ONCE at module level to avoid thread exhaustion
|
| 21 |
+
text_encoder_client = Client("multimodalart/mistral-text-encoder")
|
| 22 |
+
|
| 23 |
def remote_text_encoder(prompts):
|
| 24 |
+
result = text_encoder_client.predict(
|
|
|
|
|
|
|
|
|
|
| 25 |
prompt=prompts,
|
| 26 |
api_name="/encode_text"
|
| 27 |
)
|
| 28 |
+
|
| 29 |
prompt_embeds = torch.load(result[0])
|
| 30 |
return prompt_embeds
|
| 31 |
|