Spaces:
Sleeping
Sleeping
Upload 5 files
Browse filesAppli for presentation like power point
- ai_enhance.py +171 -0
- app.py +172 -0
- model_loader.py +29 -0
- presentation_generator.py +151 -0
- requirements.txt +11 -0
ai_enhance.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import string
|
| 3 |
+
from typing import Dict, List, Any
|
| 4 |
+
from collections import Counter
|
| 5 |
+
from model_loader import get_summarizer
|
| 6 |
+
|
| 7 |
+
logger = logging.getLogger(__name__)
|
| 8 |
+
|
| 9 |
+
class AIEnhance:
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self.sentence_model = None
|
| 12 |
+
self.llm_service = None
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
logger.info("📦 Chargement du modèle de résumé IA (summarizer)...")
|
| 16 |
+
self.summarizer = get_summarizer()
|
| 17 |
+
logger.info("✅ Modèle summarizer chargé avec succès.")
|
| 18 |
+
except Exception as e:
|
| 19 |
+
logger.warning(f"❌ Échec du chargement du summarizer : {e}")
|
| 20 |
+
self.summarizer = None
|
| 21 |
+
|
| 22 |
+
logger.info("✅ AIEnhance initialisé.")
|
| 23 |
+
|
| 24 |
+
def smart_summarize(self, texte: str, max_length: int = 150) -> str:
|
| 25 |
+
if self.summarizer and len(texte) > 200:
|
| 26 |
+
try:
|
| 27 |
+
input_length = len(texte.split())
|
| 28 |
+
summary_max = min(max_length, input_length // 2)
|
| 29 |
+
summary_min = max(50, summary_max // 3)
|
| 30 |
+
|
| 31 |
+
summary = self.summarizer(
|
| 32 |
+
texte,
|
| 33 |
+
max_length=summary_max,
|
| 34 |
+
min_length=summary_min,
|
| 35 |
+
do_sample=False
|
| 36 |
+
)
|
| 37 |
+
return summary[0]['summary_text']
|
| 38 |
+
except Exception as e:
|
| 39 |
+
logger.warning(f"❌ Résumé IA échoué, fallback activé : {e}")
|
| 40 |
+
|
| 41 |
+
return self._fallback_summary(texte)
|
| 42 |
+
|
| 43 |
+
def smart_summarize(self, texte: str, max_length: int = 150) -> str:
|
| 44 |
+
try:
|
| 45 |
+
summarizer = get_summarizer()
|
| 46 |
+
except Exception as e:
|
| 47 |
+
logger.warning(f"❌ Chargement summarizer échoué : {e}")
|
| 48 |
+
return self._fallback_summary(texte)
|
| 49 |
+
|
| 50 |
+
if summarizer and len(texte) > 200:
|
| 51 |
+
try:
|
| 52 |
+
input_length = len(texte.split())
|
| 53 |
+
summary_max = min(max_length, input_length // 2)
|
| 54 |
+
summary_min = max(50, summary_max // 3)
|
| 55 |
+
|
| 56 |
+
summary = summarizer(
|
| 57 |
+
texte,
|
| 58 |
+
max_length=summary_max,
|
| 59 |
+
min_length=summary_min,
|
| 60 |
+
do_sample=False
|
| 61 |
+
)
|
| 62 |
+
return summary[0]['summary_text']
|
| 63 |
+
except Exception as e:
|
| 64 |
+
logger.warning(f"Résumé IA échoué, fallback: {e}")
|
| 65 |
+
|
| 66 |
+
return self._fallback_summary(texte)
|
| 67 |
+
|
| 68 |
+
def _fallback_summary(self, texte: str) -> str:
|
| 69 |
+
sentences = [s.strip() for s in texte.split('.') if s.strip()]
|
| 70 |
+
if len(sentences) <= 2:
|
| 71 |
+
return texte
|
| 72 |
+
|
| 73 |
+
important_sentences = [sentences[0]]
|
| 74 |
+
keywords = self._extract_keywords(texte, 5)
|
| 75 |
+
for sentence in sentences[1:4]:
|
| 76 |
+
if any(keyword in sentence.lower() for keyword in keywords):
|
| 77 |
+
important_sentences.append(sentence)
|
| 78 |
+
|
| 79 |
+
return '. '.join(important_sentences[:3]) + '.'
|
| 80 |
+
|
| 81 |
+
def _extract_keywords(self, texte: str, top_n: int = 10) -> List[str]:
|
| 82 |
+
try:
|
| 83 |
+
words = texte.translate(str.maketrans('', '', string.punctuation)).lower().split()
|
| 84 |
+
stop_words = {'le', 'la', 'les', 'de', 'des', 'du', 'et', 'est', 'son', 'ses', 'dans', 'pour', 'par'}
|
| 85 |
+
meaningful_words = [word for word in words if len(word) > 3 and word not in stop_words]
|
| 86 |
+
word_freq = Counter(meaningful_words)
|
| 87 |
+
return [word for word, count in word_freq.most_common(top_n)]
|
| 88 |
+
except Exception as e:
|
| 89 |
+
logger.warning(f"Erreur extraction keywords: {e}")
|
| 90 |
+
return []
|
| 91 |
+
|
| 92 |
+
def _analyze_complexity(self, texte: str) -> float:
|
| 93 |
+
words = texte.split()
|
| 94 |
+
if not words:
|
| 95 |
+
return 0.0
|
| 96 |
+
avg_word_length = sum(len(word) for word in words) / len(words)
|
| 97 |
+
unique_words = len(set(words))
|
| 98 |
+
lexical_diversity = unique_words / len(words)
|
| 99 |
+
return min(1.0, (avg_word_length * 0.1 + lexical_diversity) / 2)
|
| 100 |
+
|
| 101 |
+
def _suggest_structure(self, paragraphs: List[str], sentences: List[str]) -> Dict[str, Any]:
|
| 102 |
+
slides = []
|
| 103 |
+
if sentences:
|
| 104 |
+
slides.append({
|
| 105 |
+
"title": "Introduction",
|
| 106 |
+
"content": sentences[0][:100] + "..." if len(sentences[0]) > 100 else sentences[0],
|
| 107 |
+
"type": "introduction"
|
| 108 |
+
})
|
| 109 |
+
|
| 110 |
+
for i, para in enumerate(paragraphs[:5]):
|
| 111 |
+
slides.append({
|
| 112 |
+
"title": f"Point {i+1}",
|
| 113 |
+
"content": para[:150] + "..." if len(para) > 150 else para,
|
| 114 |
+
"type": "content"
|
| 115 |
+
})
|
| 116 |
+
|
| 117 |
+
if len(sentences) > 1:
|
| 118 |
+
slides.append({
|
| 119 |
+
"title": "Conclusion",
|
| 120 |
+
"content": "Synthèse des points principaux et perspectives",
|
| 121 |
+
"type": "conclusion"
|
| 122 |
+
})
|
| 123 |
+
|
| 124 |
+
return {
|
| 125 |
+
"total_slides": len(slides),
|
| 126 |
+
"slides": slides,
|
| 127 |
+
"recommended_style": "professionnel",
|
| 128 |
+
"estimated_duration": len(slides) * 2
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
def _analyze_content_type(self, texte: str) -> str:
|
| 132 |
+
texte_lower = texte.lower()
|
| 133 |
+
keywords_technique = {'technique', 'technologie', 'code', 'programmation', 'algorithme', 'données'}
|
| 134 |
+
keywords_commercial = {'vente', 'marketing', 'client', 'business', 'profit', 'stratégie'}
|
| 135 |
+
keywords_educatif = {'apprentissage', 'éducation', 'enseignement', 'cours', 'étudiant'}
|
| 136 |
+
|
| 137 |
+
if any(keyword in texte_lower for keyword in keywords_technique):
|
| 138 |
+
return "technique"
|
| 139 |
+
elif any(keyword in texte_lower for keyword in keywords_commercial):
|
| 140 |
+
return "commercial"
|
| 141 |
+
elif any(keyword in texte_lower for keyword in keywords_educatif):
|
| 142 |
+
return "éducatif"
|
| 143 |
+
else:
|
| 144 |
+
return "général"
|
| 145 |
+
|
| 146 |
+
def _basic_analysis(self, texte: str) -> Dict[str, Any]:
|
| 147 |
+
words = texte.split()
|
| 148 |
+
sentences = [s.strip() for s in texte.split('.') if s.strip()]
|
| 149 |
+
|
| 150 |
+
return {
|
| 151 |
+
"statistics": {
|
| 152 |
+
"word_count": len(words),
|
| 153 |
+
"sentence_count": len(sentences),
|
| 154 |
+
"paragraph_count": len([p for p in texte.split('\n') if p.strip()]),
|
| 155 |
+
"avg_sentence_length": len(words) / len(sentences) if sentences else 0,
|
| 156 |
+
"complexity_score": 0.5
|
| 157 |
+
},
|
| 158 |
+
"keywords": ["analyse", "texte", "présentation"],
|
| 159 |
+
"recommended_structure": {
|
| 160 |
+
"total_slides": 4,
|
| 161 |
+
"slides": [
|
| 162 |
+
{"title": "Introduction", "content": "Présentation du sujet", "type": "introduction"},
|
| 163 |
+
{"title": "Développement", "content": "Points principaux", "type": "content"},
|
| 164 |
+
{"title": "Exemples", "content": "Illustrations concrètes", "type": "content"},
|
| 165 |
+
{"title": "Conclusion", "content": "Synthèse et perspectives", "type": "conclusion"}
|
| 166 |
+
],
|
| 167 |
+
"recommended_style": "professionnel",
|
| 168 |
+
"estimated_duration": 8
|
| 169 |
+
},
|
| 170 |
+
"content_analysis": "général"
|
| 171 |
+
}
|
app.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
+
import tempfile
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
import logging
|
| 6 |
+
import json
|
| 7 |
+
|
| 8 |
+
from ai_enhance import AIEnhance
|
| 9 |
+
from presentation_generator import PresentationGenerator
|
| 10 |
+
|
| 11 |
+
# Configuration logging
|
| 12 |
+
logging.basicConfig(level=logging.INFO)
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
# Initialisation des services
|
| 16 |
+
ai_service = AIEnhance()
|
| 17 |
+
presentation_generator = PresentationGenerator()
|
| 18 |
+
|
| 19 |
+
def create_presentation_structure(texte):
|
| 20 |
+
"""Crée la structure de présentation avec analyse IA"""
|
| 21 |
+
# CORRECTION : Utiliser _basic_analysis au lieu de analyze_text_advanced
|
| 22 |
+
analysis = ai_service._basic_analysis(texte)
|
| 23 |
+
summary = ai_service.smart_summarize(texte)
|
| 24 |
+
structure = {
|
| 25 |
+
"title": f"Présentation: {analysis['content_analysis'].capitalize()}",
|
| 26 |
+
"slides": analysis["recommended_structure"]["slides"],
|
| 27 |
+
"key_points": analysis["keywords"][:8],
|
| 28 |
+
"style_recommendation": analysis["recommended_structure"]["recommended_style"],
|
| 29 |
+
"analysis_metadata": analysis
|
| 30 |
+
}
|
| 31 |
+
return structure, summary
|
| 32 |
+
|
| 33 |
+
def generate_presentation_gradio(texte, style="professionnel"):
|
| 34 |
+
"""Version Gradio de votre fonction generate()"""
|
| 35 |
+
try:
|
| 36 |
+
if not texte or len(texte.strip()) < 50:
|
| 37 |
+
return None, "❌ Veuillez entrer au moins 50 caractères."
|
| 38 |
+
|
| 39 |
+
logger.info(f"🚀 Génération IA pour {len(texte)} caractères")
|
| 40 |
+
|
| 41 |
+
# VOTRE LOGIQUE EXISTANTE
|
| 42 |
+
structure, summary = create_presentation_structure(texte)
|
| 43 |
+
filename = presentation_generator.generate_presentation(structure, style)
|
| 44 |
+
|
| 45 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 46 |
+
download_filename = f"presentation_ia_{timestamp}.pptx"
|
| 47 |
+
|
| 48 |
+
logger.info("✅ Présentation générée avec succès!")
|
| 49 |
+
return filename, f"🎉 Présentation générée! ({len(structure['slides'])} slides)"
|
| 50 |
+
|
| 51 |
+
except Exception as e:
|
| 52 |
+
logger.error(f"❌ Erreur lors de la génération: {e}")
|
| 53 |
+
return None, f"❌ Erreur: {str(e)}"
|
| 54 |
+
|
| 55 |
+
def analyze_text_gradio(texte):
|
| 56 |
+
"""Version Gradio de votre api_analyze()"""
|
| 57 |
+
try:
|
| 58 |
+
if not texte or len(texte.strip()) < 10:
|
| 59 |
+
return "❌ Texte trop court. Minimum 10 caractères."
|
| 60 |
+
|
| 61 |
+
# VOTRE LOGIQUE EXISTANTE
|
| 62 |
+
structure, summary = create_presentation_structure(texte)
|
| 63 |
+
|
| 64 |
+
# Formatage pour l'interface Gradio
|
| 65 |
+
result = f"""
|
| 66 |
+
## 📊 Analyse IA du Texte
|
| 67 |
+
|
| 68 |
+
**Statistiques:**
|
| 69 |
+
- {structure['analysis_metadata']['statistics']['word_count']} mots
|
| 70 |
+
- {structure['analysis_metadata']['statistics']['sentence_count']} phrases
|
| 71 |
+
- {structure['analysis_metadata']['statistics']['paragraph_count']} paragraphes
|
| 72 |
+
|
| 73 |
+
**🎯 Thèmes identifiés:**
|
| 74 |
+
{', '.join(structure['key_points'][:8])}
|
| 75 |
+
|
| 76 |
+
**📝 Résumé:**
|
| 77 |
+
{summary}
|
| 78 |
+
|
| 79 |
+
**🏗️ Structure proposée:**
|
| 80 |
+
"""
|
| 81 |
+
for i, slide in enumerate(structure['slides']):
|
| 82 |
+
result += f"\n{i+1}. **{slide['title']}** - {slide['content'][:100]}..."
|
| 83 |
+
|
| 84 |
+
return result
|
| 85 |
+
|
| 86 |
+
except Exception as e:
|
| 87 |
+
return f"❌ Erreur d'analyse: {str(e)}"
|
| 88 |
+
|
| 89 |
+
def summarize_text_gradio(texte):
|
| 90 |
+
"""Version Gradio de votre api_summarize()"""
|
| 91 |
+
try:
|
| 92 |
+
if not texte.strip():
|
| 93 |
+
return "❌ Texte manquant"
|
| 94 |
+
|
| 95 |
+
# VOTRE LOGIQUE EXISTANTE
|
| 96 |
+
summary = ai_service.smart_summarize(texte)
|
| 97 |
+
return f"**📄 Résumé:**\n\n{summary}"
|
| 98 |
+
|
| 99 |
+
except Exception as e:
|
| 100 |
+
return f"❌ Erreur résumé: {str(e)}"
|
| 101 |
+
|
| 102 |
+
# INTERFACE GRADIO (reste identique)
|
| 103 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="Générateur de Présentation IA") as demo:
|
| 104 |
+
gr.Markdown("# 🧠 Générateur de Présentation IA Intelligente")
|
| 105 |
+
gr.Markdown("Powered by Lab_Math_and Labhp & CIE Label_Bertoua")
|
| 106 |
+
|
| 107 |
+
with gr.Tab("🚀 Générer Présentation"):
|
| 108 |
+
with gr.Row():
|
| 109 |
+
with gr.Column():
|
| 110 |
+
text_input = gr.Textbox(
|
| 111 |
+
label="📝 Collez votre texte ici",
|
| 112 |
+
placeholder="Collez ou tapez votre texte, article, rapport...",
|
| 113 |
+
lines=10,
|
| 114 |
+
max_lines=20
|
| 115 |
+
)
|
| 116 |
+
style_dropdown = gr.Dropdown(
|
| 117 |
+
choices=["professionnel", "moderne", "creatif"],
|
| 118 |
+
label="🎨 Style de présentation",
|
| 119 |
+
value="professionnel"
|
| 120 |
+
)
|
| 121 |
+
generate_btn = gr.Button("🚀 Générer la Présentation", variant="primary")
|
| 122 |
+
|
| 123 |
+
with gr.Column():
|
| 124 |
+
output_file = gr.File(label="📥 Présentation Générée")
|
| 125 |
+
output_message = gr.Textbox(label="📋 Statut", interactive=False)
|
| 126 |
+
|
| 127 |
+
generate_btn.click(
|
| 128 |
+
fn=generate_presentation_gradio,
|
| 129 |
+
inputs=[text_input, style_dropdown],
|
| 130 |
+
outputs=[output_file, output_message]
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
with gr.Tab("🔍 Analyser le Texte"):
|
| 134 |
+
with gr.Row():
|
| 135 |
+
with gr.Column():
|
| 136 |
+
analyze_text_input = gr.Textbox(
|
| 137 |
+
label="📝 Texte à analyser",
|
| 138 |
+
placeholder="Collez votre texte pour l'analyse IA...",
|
| 139 |
+
lines=8
|
| 140 |
+
)
|
| 141 |
+
analyze_btn = gr.Button("🔍 Analyser avec IA", variant="secondary")
|
| 142 |
+
|
| 143 |
+
with gr.Column():
|
| 144 |
+
analysis_output = gr.Markdown(label="📊 Résultats de l'analyse")
|
| 145 |
+
|
| 146 |
+
analyze_btn.click(
|
| 147 |
+
fn=analyze_text_gradio,
|
| 148 |
+
inputs=[analyze_text_input],
|
| 149 |
+
outputs=[analysis_output]
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
with gr.Tab("📄 Résumer le Texte"):
|
| 153 |
+
with gr.Row():
|
| 154 |
+
with gr.Column():
|
| 155 |
+
summarize_text_input = gr.Textbox(
|
| 156 |
+
label="📝 Texte à résumer",
|
| 157 |
+
placeholder="Collez votre texte pour le résumé IA...",
|
| 158 |
+
lines=8
|
| 159 |
+
)
|
| 160 |
+
summarize_btn = gr.Button("📊 Générer Résumé", variant="secondary")
|
| 161 |
+
|
| 162 |
+
with gr.Column():
|
| 163 |
+
summary_output = gr.Markdown(label="📋 Résumé généré")
|
| 164 |
+
|
| 165 |
+
summarize_btn.click(
|
| 166 |
+
fn=summarize_text_gradio,
|
| 167 |
+
inputs=[summarize_text_input],
|
| 168 |
+
outputs=[summary_output]
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
if __name__ == "__main__":
|
| 172 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
model_loader.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import pipeline
|
| 2 |
+
|
| 3 |
+
# Liste des modèles de summarization plus légers
|
| 4 |
+
MODEL_OPTIONS = [
|
| 5 |
+
"Falconsai/text_summarization", # Original
|
| 6 |
+
"facebook/bart-large-cnn", # Alternative 1
|
| 7 |
+
"t5-small", # Alternative 2 (léger)
|
| 8 |
+
"mrm8488/bert-mini-finetuned-cnn_daily_mail-summarization" # Alternative 3
|
| 9 |
+
]
|
| 10 |
+
|
| 11 |
+
summarizer = None # Global
|
| 12 |
+
|
| 13 |
+
def load_model_with_fallback():
|
| 14 |
+
for model_name in MODEL_OPTIONS:
|
| 15 |
+
try:
|
| 16 |
+
print(f"Tentative de chargement: {model_name}")
|
| 17 |
+
model = pipeline("summarization", model=model_name)
|
| 18 |
+
print(f"Succès avec: {model_name}")
|
| 19 |
+
return model
|
| 20 |
+
except Exception as e:
|
| 21 |
+
print(f"Échec avec {model_name}: {e}")
|
| 22 |
+
continue
|
| 23 |
+
raise Exception("Aucun modèle n'a pu être chargé")
|
| 24 |
+
|
| 25 |
+
def get_summarizer():
|
| 26 |
+
global summarizer
|
| 27 |
+
if summarizer is None:
|
| 28 |
+
summarizer = load_model_with_fallback()
|
| 29 |
+
return summarizer
|
presentation_generator.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pptx import Presentation
|
| 2 |
+
from pptx.util import Inches, Pt
|
| 3 |
+
from pptx.enum.text import PP_ALIGN
|
| 4 |
+
from pptx.dml.color import RGBColor
|
| 5 |
+
import tempfile
|
| 6 |
+
import logging
|
| 7 |
+
|
| 8 |
+
logger = logging.getLogger(__name__)
|
| 9 |
+
|
| 10 |
+
class PresentationGenerator:
|
| 11 |
+
def __init__(self):
|
| 12 |
+
self.styles = {
|
| 13 |
+
"professionnel": {
|
| 14 |
+
"title_color": RGBColor(0, 0, 128),
|
| 15 |
+
"text_color": RGBColor(0, 0, 0),
|
| 16 |
+
"background_color": RGBColor(255, 255, 255),
|
| 17 |
+
"font_name": "Calibri"
|
| 18 |
+
},
|
| 19 |
+
"moderne": {
|
| 20 |
+
"title_color": RGBColor(220, 20, 60),
|
| 21 |
+
"text_color": RGBColor(50, 50, 50),
|
| 22 |
+
"background_color": RGBColor(240, 240, 240),
|
| 23 |
+
"font_name": "Segoe UI"
|
| 24 |
+
},
|
| 25 |
+
"creatif": {
|
| 26 |
+
"title_color": RGBColor(75, 0, 130),
|
| 27 |
+
"text_color": RGBColor(0, 0, 0),
|
| 28 |
+
"background_color": RGBColor(255, 250, 240),
|
| 29 |
+
"font_name": "Arial"
|
| 30 |
+
}
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
def generate_presentation(self, structure, style="professionnel"):
|
| 34 |
+
"""Génère une présentation PowerPoint à partir de la structure IA"""
|
| 35 |
+
try:
|
| 36 |
+
logger.info("📄 Initialisation de la présentation...")
|
| 37 |
+
prs = Presentation()
|
| 38 |
+
style_config = self.styles.get(style, self.styles["professionnel"])
|
| 39 |
+
|
| 40 |
+
logger.info("🎯 Ajout de la slide de titre...")
|
| 41 |
+
self._add_title_slide(prs, structure, style_config)
|
| 42 |
+
|
| 43 |
+
logger.info("📄 Ajout des slides de contenu...")
|
| 44 |
+
for i, slide_data in enumerate(structure.get('slides', [])):
|
| 45 |
+
logger.info(f" ➕ Slide {i+1}: {slide_data.get('title', 'Sans titre')}")
|
| 46 |
+
self._add_content_slide(prs, slide_data, style_config)
|
| 47 |
+
|
| 48 |
+
if structure.get('key_points'):
|
| 49 |
+
logger.info("🧠 Ajout des points clés...")
|
| 50 |
+
self._add_keypoints_slide(prs, structure, style_config)
|
| 51 |
+
|
| 52 |
+
logger.info("🏁 Ajout de la conclusion...")
|
| 53 |
+
self._add_conclusion_slide(prs, style_config)
|
| 54 |
+
|
| 55 |
+
logger.info("💾 Sauvegarde de la présentation...")
|
| 56 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
|
| 57 |
+
prs.save(temp_file.name)
|
| 58 |
+
|
| 59 |
+
logger.info(f"✅ Présentation enregistrée dans : {temp_file.name}")
|
| 60 |
+
return temp_file.name
|
| 61 |
+
|
| 62 |
+
except Exception as e:
|
| 63 |
+
logger.error(f"❌ Erreur génération présentation: {e}")
|
| 64 |
+
raise
|
| 65 |
+
|
| 66 |
+
def _add_title_slide(self, prs, structure, style_config):
|
| 67 |
+
"""Ajoute la slide de titre"""
|
| 68 |
+
slide_layout = prs.slide_layouts[0]
|
| 69 |
+
slide = prs.slides.add_slide(slide_layout)
|
| 70 |
+
title = slide.shapes.title
|
| 71 |
+
subtitle = slide.placeholders[1]
|
| 72 |
+
|
| 73 |
+
title.text = structure.get('title', 'Présentation Générée par IA')
|
| 74 |
+
subtitle.text = f"Style: {style_config}\nGénéré intelligemment par IA"
|
| 75 |
+
|
| 76 |
+
def _add_content_slide(self, prs, slide_data, style_config):
|
| 77 |
+
"""Ajoute une slide de contenu"""
|
| 78 |
+
slide_layout = prs.slide_layouts[1]
|
| 79 |
+
slide = prs.slides.add_slide(slide_layout)
|
| 80 |
+
|
| 81 |
+
title_shape = slide.shapes.title
|
| 82 |
+
try:
|
| 83 |
+
content_shape = slide.placeholders[1]
|
| 84 |
+
except IndexError:
|
| 85 |
+
logger.warning("⚠️ Placeholder 1 non trouvé. Création d'une textbox manuelle.")
|
| 86 |
+
content_shape = slide.shapes.add_textbox(Inches(1), Inches(2), Inches(8), Inches(5))
|
| 87 |
+
content_shape.text_frame.text = ""
|
| 88 |
+
|
| 89 |
+
title_shape.text = slide_data.get('title', 'Slide')
|
| 90 |
+
content_shape.text = slide_data.get('content', '')
|
| 91 |
+
|
| 92 |
+
self._apply_style(title_shape, style_config, is_title=True)
|
| 93 |
+
self._apply_style(content_shape, style_config, is_title=False)
|
| 94 |
+
|
| 95 |
+
def _add_keypoints_slide(self, prs, structure, style_config):
|
| 96 |
+
"""Ajoute la slide des points clés"""
|
| 97 |
+
slide_layout = prs.slide_layouts[1]
|
| 98 |
+
slide = prs.slides.add_slide(slide_layout)
|
| 99 |
+
|
| 100 |
+
title_shape = slide.shapes.title
|
| 101 |
+
try:
|
| 102 |
+
content_shape = slide.placeholders[1]
|
| 103 |
+
except IndexError:
|
| 104 |
+
logger.warning("⚠️ Placeholder 1 non trouvé. Création d'une textbox manuelle.")
|
| 105 |
+
content_shape = slide.shapes.add_textbox(Inches(1), Inches(2), Inches(8), Inches(5))
|
| 106 |
+
content_shape.text_frame.text = ""
|
| 107 |
+
|
| 108 |
+
title_shape.text = "Points Clés Identifiés par IA"
|
| 109 |
+
content_shape.text = "\n".join([f"• {point}" for point in structure['key_points']])
|
| 110 |
+
|
| 111 |
+
self._apply_style(title_shape, style_config, is_title=True)
|
| 112 |
+
self._apply_style(content_shape, style_config, is_title=False)
|
| 113 |
+
|
| 114 |
+
def _add_conclusion_slide(self, prs, style_config):
|
| 115 |
+
"""Ajoute la slide de conclusion"""
|
| 116 |
+
slide_layout = prs.slide_layouts[1]
|
| 117 |
+
slide = prs.slides.add_slide(slide_layout)
|
| 118 |
+
|
| 119 |
+
title_shape = slide.shapes.title
|
| 120 |
+
try:
|
| 121 |
+
content_shape = slide.placeholders[1]
|
| 122 |
+
except IndexError:
|
| 123 |
+
logger.warning("⚠️ Placeholder 1 non trouvé. Création d'une textbox manuelle.")
|
| 124 |
+
content_shape = slide.shapes.add_textbox(Inches(1), Inches(2), Inches(8), Inches(5))
|
| 125 |
+
content_shape.text_frame.text = ""
|
| 126 |
+
|
| 127 |
+
title_shape.text = "Conclusion"
|
| 128 |
+
content_shape.text = "Présentation générée automatiquement avec IA\nAnalyse sémantique avancée\nMerci pour votre attention !"
|
| 129 |
+
|
| 130 |
+
self._apply_style(title_shape, style_config, is_title=True)
|
| 131 |
+
self._apply_style(content_shape, style_config, is_title=False)
|
| 132 |
+
|
| 133 |
+
def _apply_style(self, shape, style_config, is_title=True):
|
| 134 |
+
"""Applique le style aux éléments textuels"""
|
| 135 |
+
if not hasattr(shape, "text_frame") or shape.text_frame is None:
|
| 136 |
+
logger.warning("⚠️ Shape sans text_frame, style non appliqué.")
|
| 137 |
+
return
|
| 138 |
+
|
| 139 |
+
try:
|
| 140 |
+
for paragraph in shape.text_frame.paragraphs:
|
| 141 |
+
for run in paragraph.runs:
|
| 142 |
+
if run.font:
|
| 143 |
+
run.font.name = style_config["font_name"]
|
| 144 |
+
run.font.color.rgb = (
|
| 145 |
+
style_config["title_color"] if is_title else style_config["text_color"]
|
| 146 |
+
)
|
| 147 |
+
run.font.size = Pt(32 if is_title else 18)
|
| 148 |
+
run.font.bold = is_title
|
| 149 |
+
except Exception as e:
|
| 150 |
+
logger.warning(f"❌ Échec de l'application du style: {e}")
|
| 151 |
+
print("présentation générée")
|
requirements.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio==4.19.1
|
| 2 |
+
flask==2.3.3
|
| 3 |
+
python-pptx==0.6.21
|
| 4 |
+
transformers==4.30.2
|
| 5 |
+
torch==2.0.1
|
| 6 |
+
numpy==1.24.3
|
| 7 |
+
scikit-learn==1.2.2
|
| 8 |
+
sentence-transformers==2.2.2
|
| 9 |
+
nltk==3.8.1
|
| 10 |
+
requests==2.28.2
|
| 11 |
+
python-dotenv==1.0.0
|