Update app.py
Browse files
app.py
CHANGED
|
@@ -11,6 +11,9 @@ import time
|
|
| 11 |
from PIL import Image
|
| 12 |
import io
|
| 13 |
import requests
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
# Configuration des modèles disponibles
|
| 16 |
TEXT_MODELS = {
|
|
@@ -90,14 +93,34 @@ class PresentationGenerator:
|
|
| 90 |
token=self.token
|
| 91 |
)
|
| 92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
def load_image_model(self, model_name):
|
| 94 |
"""Charge le modèle de génération d'images"""
|
| 95 |
model_id = IMAGE_MODELS[model_name]
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
def generate_text(self, prompt, temperature=0.7, max_tokens=4096):
|
| 103 |
"""Génère le texte de la présentation"""
|
|
@@ -122,18 +145,45 @@ class PresentationGenerator:
|
|
| 122 |
)
|
| 123 |
return self.text_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
def generate_image(self, prompt, negative_prompt="", num_inference_steps=30):
|
| 126 |
-
"""Génère une image pour la diapositive"""
|
| 127 |
-
try:
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
def parse_presentation_content(self, content):
|
| 139 |
"""Parse le contenu généré en sections pour les diapositives"""
|
|
@@ -379,5 +429,4 @@ with gr.Blocks(theme=gr.themes.Default(), css=css) as demo:
|
|
| 379 |
)
|
| 380 |
|
| 381 |
if __name__ == "__main__":
|
| 382 |
-
demo.launch()
|
| 383 |
-
|
|
|
|
| 11 |
from PIL import Image
|
| 12 |
import io
|
| 13 |
import requests
|
| 14 |
+
from diffusers import FluxPipeline
|
| 15 |
+
|
| 16 |
+
|
| 17 |
|
| 18 |
# Configuration des modèles disponibles
|
| 19 |
TEXT_MODELS = {
|
|
|
|
| 93 |
token=self.token
|
| 94 |
)
|
| 95 |
|
| 96 |
+
# def load_image_model(self, model_name):
|
| 97 |
+
# """Charge le modèle de génération d'images"""
|
| 98 |
+
# model_id = IMAGE_MODELS[model_name]
|
| 99 |
+
# self.image_pipeline = pipeline(
|
| 100 |
+
# "text-to-image",
|
| 101 |
+
# model=model_id,
|
| 102 |
+
# token=self.token
|
| 103 |
+
# )
|
| 104 |
+
|
| 105 |
+
##Modif01 : Correction Pour Flux Non Chargé sur HFSpace
|
| 106 |
def load_image_model(self, model_name):
|
| 107 |
"""Charge le modèle de génération d'images"""
|
| 108 |
model_id = IMAGE_MODELS[model_name]
|
| 109 |
+
if model_id == "black-forest-labs/FLUX.1-schnell":
|
| 110 |
+
self.image_pipeline = FluxPipeline.from_pretrained(
|
| 111 |
+
model_id,
|
| 112 |
+
torch_dtype=torch.bfloat16
|
| 113 |
+
)
|
| 114 |
+
self.image_pipeline.enable_model_cpu_offload() # Économise de la VRAM en déchargeant le modèle sur le CPU
|
| 115 |
+
print(f"Modèle d'image FLUX chargé : {model_id}")
|
| 116 |
+
else:
|
| 117 |
+
self.image_pipeline = pipeline(
|
| 118 |
+
"text-to-image",
|
| 119 |
+
model=model_id,
|
| 120 |
+
token=self.token
|
| 121 |
+
)
|
| 122 |
+
print(f"Modèle d'image chargé : {model_id}")
|
| 123 |
+
|
| 124 |
|
| 125 |
def generate_text(self, prompt, temperature=0.7, max_tokens=4096):
|
| 126 |
"""Génère le texte de la présentation"""
|
|
|
|
| 145 |
)
|
| 146 |
return self.text_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 147 |
|
| 148 |
+
# def generate_image(self, prompt, negative_prompt="", num_inference_steps=30):
|
| 149 |
+
# """Génère une image pour la diapositive"""
|
| 150 |
+
# try:
|
| 151 |
+
# image = self.image_pipeline(
|
| 152 |
+
# prompt=prompt,
|
| 153 |
+
# negative_prompt=negative_prompt,
|
| 154 |
+
# num_inference_steps=num_inference_steps
|
| 155 |
+
# )[0] # Pipeline retourne une liste d'images, on prend la première
|
| 156 |
+
# return image
|
| 157 |
+
# except Exception as e:
|
| 158 |
+
# print(f"Erreur lors de la génération de l'image: {str(e)}")
|
| 159 |
+
# return None
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
##Modif01 : Correction Pour Flux Non Chargé sur HFSpace
|
| 163 |
+
|
| 164 |
def generate_image(self, prompt, negative_prompt="", num_inference_steps=30):
|
| 165 |
+
"""Génère une image pour la diapositive"""
|
| 166 |
+
try:
|
| 167 |
+
if isinstance(self.image_pipeline, FluxPipeline):
|
| 168 |
+
image = self.image_pipeline(
|
| 169 |
+
prompt=prompt,
|
| 170 |
+
guidance_scale=0.0,
|
| 171 |
+
num_inference_steps=num_inference_steps,
|
| 172 |
+
max_sequence_length=256,
|
| 173 |
+
generator=torch.Generator("cpu").manual_seed(0)
|
| 174 |
+
).images[0]
|
| 175 |
+
else:
|
| 176 |
+
image = self.image_pipeline(
|
| 177 |
+
prompt=prompt,
|
| 178 |
+
negative_prompt=negative_prompt,
|
| 179 |
+
num_inference_steps=num_inference_steps
|
| 180 |
+
)[0] # Pipeline retourne une liste d'images, on prend la première
|
| 181 |
+
return image
|
| 182 |
+
except Exception as e:
|
| 183 |
+
print(f"Erreur lors de la génération de l'image: {str(e)}")
|
| 184 |
+
return None
|
| 185 |
+
|
| 186 |
+
|
| 187 |
|
| 188 |
def parse_presentation_content(self, content):
|
| 189 |
"""Parse le contenu généré en sections pour les diapositives"""
|
|
|
|
| 429 |
)
|
| 430 |
|
| 431 |
if __name__ == "__main__":
|
| 432 |
+
demo.launch()
|
|
|