Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -84,8 +84,8 @@ def load_text_summarization_model():
|
|
| 84 |
tokenizer, model = load_text_summarization_model()
|
| 85 |
|
| 86 |
|
| 87 |
-
|
| 88 |
-
def
|
| 89 |
text: str,
|
| 90 |
seed: int = 42,
|
| 91 |
width: int = 1024,
|
|
@@ -109,7 +109,6 @@ def generate_image_with_flux(
|
|
| 109 |
# Initialize FLUX pipeline here
|
| 110 |
dtype = torch.bfloat16
|
| 111 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 112 |
-
|
| 113 |
torch.cuda.empty_cache() # Clear cache
|
| 114 |
gc.collect() # Run garbage collection
|
| 115 |
flux_pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device)
|
|
@@ -128,6 +127,37 @@ def generate_image_with_flux(
|
|
| 128 |
print("DEBUG: Image generated successfully.")
|
| 129 |
return image
|
| 130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
# --------- End of MinDalle Functions ---------
|
| 132 |
# Merge audio files
|
| 133 |
|
|
|
|
| 84 |
tokenizer, model = load_text_summarization_model()
|
| 85 |
|
| 86 |
|
| 87 |
+
#@spaces.GPU()
|
| 88 |
+
def generate_image_with_flux_old(
|
| 89 |
text: str,
|
| 90 |
seed: int = 42,
|
| 91 |
width: int = 1024,
|
|
|
|
| 109 |
# Initialize FLUX pipeline here
|
| 110 |
dtype = torch.bfloat16
|
| 111 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 112 |
torch.cuda.empty_cache() # Clear cache
|
| 113 |
gc.collect() # Run garbage collection
|
| 114 |
flux_pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device)
|
|
|
|
| 127 |
print("DEBUG: Image generated successfully.")
|
| 128 |
return image
|
| 129 |
|
| 130 |
+
@spaces.GPU()
|
| 131 |
+
def generate_image_with_flux(
|
| 132 |
+
text: str,
|
| 133 |
+
seed: int = 42,
|
| 134 |
+
width: int = 1024,
|
| 135 |
+
height: int = 1024,
|
| 136 |
+
num_inference_steps: int = 4,
|
| 137 |
+
randomize_seed: bool = True):
|
| 138 |
+
"""
|
| 139 |
+
Generates an image from text using FLUX.
|
| 140 |
+
"""
|
| 141 |
+
print(f"DEBUG: Generating image with FLUX for text: '{text}'")
|
| 142 |
+
|
| 143 |
+
# Use the global flux_pipe (which was already initialized at startup)
|
| 144 |
+
global flux_pipe
|
| 145 |
+
if flux_pipe is None:
|
| 146 |
+
raise RuntimeError("FLUX pipeline not initialized because CUDA is unavailable.")
|
| 147 |
+
|
| 148 |
+
if randomize_seed:
|
| 149 |
+
seed = random.randint(0, MAX_SEED)
|
| 150 |
+
generator = torch.Generator(device=device).manual_seed(seed) # Specify device for generator
|
| 151 |
+
image = flux_pipe(
|
| 152 |
+
prompt=text,
|
| 153 |
+
width=width,
|
| 154 |
+
height=height,
|
| 155 |
+
num_inference_steps=num_inference_steps,
|
| 156 |
+
generator=generator,
|
| 157 |
+
guidance_scale=0.0
|
| 158 |
+
).images[0]
|
| 159 |
+
print("DEBUG: Image generated successfully.")
|
| 160 |
+
return image
|
| 161 |
# --------- End of MinDalle Functions ---------
|
| 162 |
# Merge audio files
|
| 163 |
|