Gemini899 commited on
Commit
950e246
·
verified ·
1 Parent(s): f901488

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -17
app.py CHANGED
@@ -16,25 +16,17 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 1024
18
 
19
- def remote_text_encoder(prompts, max_retries=3):
20
  from gradio_client import Client
21
- import time
22
 
23
- for attempt in range(max_retries):
24
- try:
25
- client = Client("multimodalart/mistral-text-encoder")
26
- result = client.predict(
27
- prompt=prompts,
28
- api_name="/encode_text"
29
- )
30
- prompt_embeds = torch.load(result[0])
31
- return prompt_embeds
32
- except Exception as e:
33
- print(f"Text encoder attempt {attempt + 1}/{max_retries} failed: {e}")
34
- if attempt < max_retries - 1:
35
- time.sleep(2)
36
- else:
37
- raise Exception(f"Text encoder failed after {max_retries} attempts: {e}")
38
 
39
  # Load model
40
  repo_id = "black-forest-labs/FLUX.2-dev"
@@ -266,3 +258,4 @@ FLUX.2 [dev] is a 32B model rectified flow capable of generating, editing and co
266
  outputs=[result, seed]
267
  )
268
 
 
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 1024
18
 
19
+ def remote_text_encoder(prompts):
20
  from gradio_client import Client
 
21
 
22
+ client = Client("multimodalart/mistral-text-encoder")
23
+ result = client.predict(
24
+ prompt=prompts,
25
+ api_name="/encode_text"
26
+ )
27
+
28
+ prompt_embeds = torch.load(result[0])
29
+ return prompt_embeds
 
 
 
 
 
 
 
30
 
31
  # Load model
32
  repo_id = "black-forest-labs/FLUX.2-dev"
 
258
  outputs=[result, seed]
259
  )
260
 
261
+ demo.launch(css=css)