Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,25 +16,21 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
| 16 |
MAX_SEED = np.iinfo(np.int32).max
|
| 17 |
MAX_IMAGE_SIZE = 1024
|
| 18 |
|
| 19 |
-
def remote_text_encoder(prompts
|
| 20 |
-
|
| 21 |
-
import
|
| 22 |
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
if attempt < max_retries - 1:
|
| 35 |
-
time.sleep(2)
|
| 36 |
-
else:
|
| 37 |
-
raise Exception(f"Text encoder failed after {max_retries} attempts: {e}")
|
| 38 |
|
| 39 |
# Load model
|
| 40 |
repo_id = "black-forest-labs/FLUX.2-dev"
|
|
|
|
| 16 |
MAX_SEED = np.iinfo(np.int32).max
|
| 17 |
MAX_IMAGE_SIZE = 1024
|
| 18 |
|
| 19 |
+
def remote_text_encoder(prompts):
|
| 20 |
+
import requests
|
| 21 |
+
from huggingface_hub import get_token
|
| 22 |
|
| 23 |
+
response = requests.post(
|
| 24 |
+
"https://remote-text-encoder-flux-2.huggingface.co/predict",
|
| 25 |
+
json={"prompt": prompts},
|
| 26 |
+
headers={
|
| 27 |
+
"Authorization": f"Bearer {get_token()}",
|
| 28 |
+
"Content-Type": "application/json"
|
| 29 |
+
},
|
| 30 |
+
timeout=120
|
| 31 |
+
)
|
| 32 |
+
prompt_embeds = torch.load(io.BytesIO(response.content))
|
| 33 |
+
return prompt_embeds
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
# Load model
|
| 36 |
repo_id = "black-forest-labs/FLUX.2-dev"
|