Gemini899 commited on
Commit
59d61f0
·
verified ·
1 Parent(s): 4d8d54b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -18
app.py CHANGED
@@ -16,25 +16,21 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 1024
18
 
19
- def remote_text_encoder(prompts, max_retries=3):
20
- from gradio_client import Client
21
- import time
22
 
23
- for attempt in range(max_retries):
24
- try:
25
- client = Client("multimodalart/mistral-text-encoder")
26
- result = client.predict(
27
- prompt=prompts,
28
- api_name="/encode_text"
29
- )
30
- prompt_embeds = torch.load(result[0])
31
- return prompt_embeds
32
- except Exception as e:
33
- print(f"Text encoder attempt {attempt + 1}/{max_retries} failed: {e}")
34
- if attempt < max_retries - 1:
35
- time.sleep(2)
36
- else:
37
- raise Exception(f"Text encoder failed after {max_retries} attempts: {e}")
38
 
39
  # Load model
40
  repo_id = "black-forest-labs/FLUX.2-dev"
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 1024
18
 
19
+ def remote_text_encoder(prompts):
20
+ import requests
21
+ from huggingface_hub import get_token
22
 
23
+ response = requests.post(
24
+ "https://remote-text-encoder-flux-2.huggingface.co/predict",
25
+ json={"prompt": prompts},
26
+ headers={
27
+ "Authorization": f"Bearer {get_token()}",
28
+ "Content-Type": "application/json"
29
+ },
30
+ timeout=120
31
+ )
32
+ prompt_embeds = torch.load(io.BytesIO(response.content))
33
+ return prompt_embeds
 
 
 
 
34
 
35
  # Load model
36
  repo_id = "black-forest-labs/FLUX.2-dev"