Spaces:
Sleeping
Sleeping
Commit ·
23f994a
1
Parent(s): bc00e0f
Ajout de l'API JANGG complète avec génération de cours, quiz, audio et vidéo
Browse files- app/main.py +28 -1
- app/prompts/complete_course_prompt.py +47 -0
- app/routers/integrated.py +130 -0
- app/routers/video.py +72 -0
- app/schemas/complete_course.py +25 -0
- app/schemas/video.py +22 -0
- app/services/learning_agent.py +70 -1
- app/services/video_service.py +123 -0
- app/untitled0.py +160 -0
- requirements.txt +27 -5
- test_api.py +88 -0
- total_api.py +702 -0
- total_api_standalone.py +175 -0
app/main.py
CHANGED
|
@@ -2,7 +2,7 @@ from fastapi import FastAPI
|
|
| 2 |
from fastapi.staticfiles import StaticFiles
|
| 3 |
from fastapi.responses import HTMLResponse
|
| 4 |
|
| 5 |
-
from app.routers import chat, quiz, audio
|
| 6 |
|
| 7 |
app = FastAPI(title="JANGG AI API", description="Intelligent API for interactive learning with AI")
|
| 8 |
|
|
@@ -11,6 +11,8 @@ app.mount("/static", StaticFiles(directory="app/static"), name="static")
|
|
| 11 |
app.include_router(chat.router)
|
| 12 |
app.include_router(quiz.router)
|
| 13 |
app.include_router(audio.router)
|
|
|
|
|
|
|
| 14 |
|
| 15 |
@app.get("/", response_class=HTMLResponse)
|
| 16 |
async def root():
|
|
@@ -72,6 +74,31 @@ async def root():
|
|
| 72 |
<div class="description">Convert speech to text</div>
|
| 73 |
</div>
|
| 74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
<div style="text-align: center; margin-top: 30px;">
|
| 76 |
<a href="/docs" class="docs-link">📖 Interactive Documentation (Swagger)</a>
|
| 77 |
<br><br>
|
|
|
|
| 2 |
from fastapi.staticfiles import StaticFiles
|
| 3 |
from fastapi.responses import HTMLResponse
|
| 4 |
|
| 5 |
+
from app.routers import chat, quiz, audio, video, integrated
|
| 6 |
|
| 7 |
app = FastAPI(title="JANGG AI API", description="Intelligent API for interactive learning with AI")
|
| 8 |
|
|
|
|
| 11 |
app.include_router(chat.router)
|
| 12 |
app.include_router(quiz.router)
|
| 13 |
app.include_router(audio.router)
|
| 14 |
+
app.include_router(video.router)
|
| 15 |
+
app.include_router(integrated.router)
|
| 16 |
|
| 17 |
@app.get("/", response_class=HTMLResponse)
|
| 18 |
async def root():
|
|
|
|
| 74 |
<div class="description">Convert speech to text</div>
|
| 75 |
</div>
|
| 76 |
|
| 77 |
+
<div class="endpoint">
|
| 78 |
+
<span class="method">POST</span> <span class="path">/video/generate</span>
|
| 79 |
+
<div class="description">Generate course videos with AI images and text-to-speech</div>
|
| 80 |
+
</div>
|
| 81 |
+
|
| 82 |
+
<div class="endpoint">
|
| 83 |
+
<span class="method">GET</span> <span class="path">/video/health</span>
|
| 84 |
+
<div class="description">Check video generation service status</div>
|
| 85 |
+
</div>
|
| 86 |
+
|
| 87 |
+
<div class="endpoint">
|
| 88 |
+
<span class="method">POST</span> <span class="path">/integrated/complete-course</span>
|
| 89 |
+
<div class="description">Generate complete learning package (course + quiz + video scenes) in one LLM call</div>
|
| 90 |
+
</div>
|
| 91 |
+
|
| 92 |
+
<div class="endpoint">
|
| 93 |
+
<span class="method">POST</span> <span class="path">/integrated/video-from-scenes</span>
|
| 94 |
+
<div class="description">Generate video from pre-generated scenes</div>
|
| 95 |
+
</div>
|
| 96 |
+
|
| 97 |
+
<div class="endpoint">
|
| 98 |
+
<span class="method">POST</span> <span class="path">/integrated/full-pipeline</span>
|
| 99 |
+
<div class="description">Complete pipeline: generate content AND create video in one call</div>
|
| 100 |
+
</div>
|
| 101 |
+
|
| 102 |
<div style="text-align: center; margin-top: 30px;">
|
| 103 |
<a href="/docs" class="docs-link">📖 Interactive Documentation (Swagger)</a>
|
| 104 |
<br><br>
|
app/prompts/complete_course_prompt.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
COMPLETE_COURSE_TEMPLATE = """
|
| 2 |
+
You are a friendly AI tutor and content creator.
|
| 3 |
+
|
| 4 |
+
Generate a complete learning package about {topic} for the {sector} sector.
|
| 5 |
+
|
| 6 |
+
Parameters:
|
| 7 |
+
- Tone: {tone}
|
| 8 |
+
- Style: {style}
|
| 9 |
+
- Length: {length}
|
| 10 |
+
- Language: French
|
| 11 |
+
|
| 12 |
+
Requirements:
|
| 13 |
+
1. Generate engaging course content
|
| 14 |
+
2. Create relevant quiz questions
|
| 15 |
+
3. Structure content for video generation with scenes
|
| 16 |
+
|
| 17 |
+
Format your response as JSON:
|
| 18 |
+
|
| 19 |
+
{{
|
| 20 |
+
"course": "Complete course content with clear sections and examples",
|
| 21 |
+
"quiz": [
|
| 22 |
+
{{
|
| 23 |
+
"question": "Clear question about the content",
|
| 24 |
+
"options": ["Option A", "Option B", "Option C", "Option D"],
|
| 25 |
+
"answer": 2
|
| 26 |
+
}}
|
| 27 |
+
],
|
| 28 |
+
"video_scenes": [
|
| 29 |
+
{{
|
| 30 |
+
"title": "Scene title",
|
| 31 |
+
"content": "Narration text for this scene (2-3 sentences)",
|
| 32 |
+
"duration": 8,
|
| 33 |
+
"visual_prompt": "Visual description for AI image generation"
|
| 34 |
+
}}
|
| 35 |
+
]
|
| 36 |
+
}}
|
| 37 |
+
|
| 38 |
+
Rules:
|
| 39 |
+
- Course: Clear, structured, beginner-friendly with practical examples
|
| 40 |
+
- Quiz: 5 questions testing key concepts
|
| 41 |
+
- Video Scenes: 5-8 scenes covering the main topics
|
| 42 |
+
- Each scene content should be speakable (natural for text-to-speech)
|
| 43 |
+
- Visual prompts should be descriptive for image generation
|
| 44 |
+
- Duration per scene: 5-12 seconds based on content length
|
| 45 |
+
|
| 46 |
+
Return only valid JSON.
|
| 47 |
+
"""
|
app/routers/integrated.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, HTTPException
|
| 2 |
+
from app.schemas.chat import LearnRequest
|
| 3 |
+
from app.schemas.complete_course import CompleteCourseResponse, VideoFromScenesRequest
|
| 4 |
+
from app.services.learning_agent import generate_complete_learning_package
|
| 5 |
+
from app.services.video_service import video_service
|
| 6 |
+
import time
|
| 7 |
+
|
| 8 |
+
router = APIRouter(prefix="/integrated", tags=["Integrated Learning"])
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@router.post("/complete-course", response_model=CompleteCourseResponse)
|
| 12 |
+
def generate_complete_learning_experience(req: LearnRequest):
|
| 13 |
+
"""
|
| 14 |
+
Generate a complete learning package in a single LLM inference.
|
| 15 |
+
|
| 16 |
+
This endpoint generates:
|
| 17 |
+
1. Course content
|
| 18 |
+
2. Quiz questions
|
| 19 |
+
3. Video-ready scenes with visual prompts
|
| 20 |
+
|
| 21 |
+
All content is generated coherently in one LLM call for better consistency.
|
| 22 |
+
"""
|
| 23 |
+
try:
|
| 24 |
+
start_time = time.time()
|
| 25 |
+
|
| 26 |
+
# Generate everything in one inference
|
| 27 |
+
complete_data = generate_complete_learning_package(req.dict())
|
| 28 |
+
|
| 29 |
+
processing_time = time.time() - start_time
|
| 30 |
+
|
| 31 |
+
return CompleteCourseResponse(
|
| 32 |
+
course=complete_data["course"],
|
| 33 |
+
quiz=complete_data["quiz"],
|
| 34 |
+
video_scenes=complete_data["video_scenes"],
|
| 35 |
+
message="Complete learning package generated successfully",
|
| 36 |
+
generation_method="unified_llm"
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
except Exception as e:
|
| 40 |
+
raise HTTPException(
|
| 41 |
+
status_code=500,
|
| 42 |
+
detail=f"Complete course generation failed: {str(e)}"
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@router.post("/video-from-scenes")
|
| 47 |
+
def generate_video_from_scenes(req: VideoFromScenesRequest):
|
| 48 |
+
"""
|
| 49 |
+
Generate video from pre-generated scenes.
|
| 50 |
+
|
| 51 |
+
Use this with scenes from /integrated/complete-course for optimal results.
|
| 52 |
+
"""
|
| 53 |
+
try:
|
| 54 |
+
start_time = time.time()
|
| 55 |
+
|
| 56 |
+
# Convert scenes to video service format
|
| 57 |
+
video_data = {
|
| 58 |
+
"topic": req.topic,
|
| 59 |
+
"style": req.style,
|
| 60 |
+
"tone": req.tone,
|
| 61 |
+
"language": req.language,
|
| 62 |
+
"scenes": [
|
| 63 |
+
{
|
| 64 |
+
"title": scene.title,
|
| 65 |
+
"content": scene.content,
|
| 66 |
+
"duration": scene.duration
|
| 67 |
+
}
|
| 68 |
+
for scene in req.scenes
|
| 69 |
+
]
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
video_path = video_service.generate_course_video(video_data)
|
| 73 |
+
|
| 74 |
+
processing_time = time.time() - start_time
|
| 75 |
+
|
| 76 |
+
return {
|
| 77 |
+
"video_path": video_path,
|
| 78 |
+
"scenes_used": len(req.scenes),
|
| 79 |
+
"processing_time": processing_time,
|
| 80 |
+
"message": "Video generated from scenes successfully"
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
except Exception as e:
|
| 84 |
+
raise HTTPException(
|
| 85 |
+
status_code=500,
|
| 86 |
+
detail=f"Video from scenes generation failed: {str(e)}"
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
@router.post("/full-pipeline")
|
| 91 |
+
def generate_full_learning_pipeline(req: LearnRequest):
|
| 92 |
+
"""
|
| 93 |
+
Complete pipeline: generate content AND create video in one call.
|
| 94 |
+
|
| 95 |
+
This combines content generation and video creation for maximum convenience.
|
| 96 |
+
"""
|
| 97 |
+
try:
|
| 98 |
+
start_time = time.time()
|
| 99 |
+
|
| 100 |
+
# Step 1: Generate complete content package
|
| 101 |
+
complete_data = generate_complete_learning_package(req.dict())
|
| 102 |
+
|
| 103 |
+
# Step 2: Generate video from the scenes
|
| 104 |
+
video_data = {
|
| 105 |
+
"topic": req.topic,
|
| 106 |
+
"style": req.style,
|
| 107 |
+
"tone": req.tone,
|
| 108 |
+
"language": "fr",
|
| 109 |
+
"scenes": complete_data["video_scenes"]
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
video_path = video_service.generate_course_video(video_data)
|
| 113 |
+
|
| 114 |
+
total_time = time.time() - start_time
|
| 115 |
+
|
| 116 |
+
return {
|
| 117 |
+
"course": complete_data["course"],
|
| 118 |
+
"quiz": complete_data["quiz"],
|
| 119 |
+
"video_scenes": complete_data["video_scenes"],
|
| 120 |
+
"video_path": video_path,
|
| 121 |
+
"total_processing_time": total_time,
|
| 122 |
+
"generation_method": "unified_llm_plus_video",
|
| 123 |
+
"message": "Full learning pipeline completed successfully"
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
except Exception as e:
|
| 127 |
+
raise HTTPException(
|
| 128 |
+
status_code=500,
|
| 129 |
+
detail=f"Full pipeline generation failed: {str(e)}"
|
| 130 |
+
)
|
app/routers/video.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
from fastapi import APIRouter, HTTPException
|
| 3 |
+
from app.schemas.video import VideoRequest, VideoResponse
|
| 4 |
+
from app.services.video_service import video_service
|
| 5 |
+
|
| 6 |
+
router = APIRouter(prefix="/video", tags=["Video"])
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@router.post("/generate", response_model=VideoResponse)
|
| 10 |
+
def generate_course_video(request: VideoRequest):
|
| 11 |
+
"""
|
| 12 |
+
Generate a course video based on the provided scenes and settings.
|
| 13 |
+
|
| 14 |
+
This endpoint creates a video by:
|
| 15 |
+
1. Generating images for each scene using Stable Diffusion
|
| 16 |
+
2. Converting text to speech using gTTS
|
| 17 |
+
3. Creating video segments with FFmpeg
|
| 18 |
+
4. Concatenating all segments into a final video
|
| 19 |
+
"""
|
| 20 |
+
try:
|
| 21 |
+
start_time = time.time()
|
| 22 |
+
|
| 23 |
+
# Convert request to dict for service
|
| 24 |
+
course_data = {
|
| 25 |
+
"topic": request.topic,
|
| 26 |
+
"style": request.style,
|
| 27 |
+
"tone": request.tone,
|
| 28 |
+
"language": request.language,
|
| 29 |
+
"scenes": [
|
| 30 |
+
{
|
| 31 |
+
"title": scene.title,
|
| 32 |
+
"content": scene.content,
|
| 33 |
+
"duration": scene.duration
|
| 34 |
+
}
|
| 35 |
+
for scene in request.scenes
|
| 36 |
+
]
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
# Generate video
|
| 40 |
+
video_path = video_service.generate_course_video(course_data)
|
| 41 |
+
|
| 42 |
+
processing_time = time.time() - start_time
|
| 43 |
+
|
| 44 |
+
return VideoResponse(
|
| 45 |
+
video_path=video_path,
|
| 46 |
+
message="Video generated successfully",
|
| 47 |
+
processing_time=processing_time
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
except Exception as e:
|
| 51 |
+
raise HTTPException(
|
| 52 |
+
status_code=500,
|
| 53 |
+
detail=f"Video generation failed: {str(e)}"
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@router.get("/health")
|
| 58 |
+
def video_health_check():
|
| 59 |
+
"""Check if video generation service is available"""
|
| 60 |
+
try:
|
| 61 |
+
# Check if the model is loaded
|
| 62 |
+
model_loaded = video_service.pipe is not None
|
| 63 |
+
return {
|
| 64 |
+
"status": "healthy" if model_loaded else "degraded",
|
| 65 |
+
"model_loaded": model_loaded,
|
| 66 |
+
"service": "video_generation"
|
| 67 |
+
}
|
| 68 |
+
except Exception as e:
|
| 69 |
+
raise HTTPException(
|
| 70 |
+
status_code=503,
|
| 71 |
+
detail=f"Video service unavailable: {str(e)}"
|
| 72 |
+
)
|
app/schemas/complete_course.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import List, Dict
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class VideoScene(BaseModel):
|
| 6 |
+
title: str = Field(..., description="Title of the video scene")
|
| 7 |
+
content: str = Field(..., description="Narration text for the scene")
|
| 8 |
+
duration: int = Field(..., description="Duration in seconds")
|
| 9 |
+
visual_prompt: str = Field(..., description="Visual description for AI image generation")
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class CompleteCourseResponse(BaseModel):
|
| 13 |
+
course: str = Field(..., description="Complete course content")
|
| 14 |
+
quiz: List[Dict] = Field(..., description="Quiz questions with options and answers")
|
| 15 |
+
video_scenes: List[VideoScene] = Field(..., description="Scenes ready for video generation")
|
| 16 |
+
message: str = Field(default="Complete learning package generated successfully")
|
| 17 |
+
generation_method: str = Field(default="unified", description="How the content was generated")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class VideoFromScenesRequest(BaseModel):
|
| 21 |
+
topic: str = Field(..., description="Topic of the course")
|
| 22 |
+
style: str = Field(default="cartoon", description="Visual style")
|
| 23 |
+
tone: str = Field(default="educational", description="Content tone")
|
| 24 |
+
language: str = Field(default="fr", description="Language for audio")
|
| 25 |
+
scenes: List[VideoScene] = Field(..., description="Pre-generated scenes")
|
app/schemas/video.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Scene(BaseModel):
|
| 6 |
+
title: str = Field(..., description="Title of the scene")
|
| 7 |
+
content: str = Field(..., description="Content/narration for the scene")
|
| 8 |
+
duration: int = Field(..., description="Duration in seconds")
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class VideoRequest(BaseModel):
|
| 12 |
+
topic: str = Field(..., description="Topic of the course")
|
| 13 |
+
style: str = Field(default="cartoon", description="Visual style for video generation")
|
| 14 |
+
tone: str = Field(default="fun", description="Tone of the content")
|
| 15 |
+
language: str = Field(default="fr", description="Language for audio generation")
|
| 16 |
+
scenes: List[Scene] = Field(..., description="List of scenes for the video")
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class VideoResponse(BaseModel):
|
| 20 |
+
video_path: str = Field(..., description="Path to the generated video file")
|
| 21 |
+
message: str = Field(..., description="Status message")
|
| 22 |
+
processing_time: Optional[float] = Field(None, description="Total processing time in seconds")
|
app/services/learning_agent.py
CHANGED
|
@@ -2,12 +2,13 @@ from langchain_core.prompts import PromptTemplate
|
|
| 2 |
from app.core.llm import llm
|
| 3 |
from app.prompts.course_prompt import COURSE_TEMPLATE
|
| 4 |
from app.prompts.quiz_prompt import QUIZ_TEMPLATE
|
|
|
|
| 5 |
import json
|
| 6 |
import re
|
| 7 |
|
| 8 |
|
| 9 |
def generate_course_and_quiz(params):
|
| 10 |
-
|
| 11 |
course_prompt = PromptTemplate.from_template(COURSE_TEMPLATE)
|
| 12 |
|
| 13 |
course = llm.invoke(
|
|
@@ -36,3 +37,71 @@ def generate_course_and_quiz(params):
|
|
| 36 |
]
|
| 37 |
|
| 38 |
return course, quiz
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
from app.core.llm import llm
|
| 3 |
from app.prompts.course_prompt import COURSE_TEMPLATE
|
| 4 |
from app.prompts.quiz_prompt import QUIZ_TEMPLATE
|
| 5 |
+
from app.prompts.complete_course_prompt import COMPLETE_COURSE_TEMPLATE
|
| 6 |
import json
|
| 7 |
import re
|
| 8 |
|
| 9 |
|
| 10 |
def generate_course_and_quiz(params):
|
| 11 |
+
"""Legacy function - generates course and quiz separately"""
|
| 12 |
course_prompt = PromptTemplate.from_template(COURSE_TEMPLATE)
|
| 13 |
|
| 14 |
course = llm.invoke(
|
|
|
|
| 37 |
]
|
| 38 |
|
| 39 |
return course, quiz
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def generate_complete_learning_package(params):
|
| 43 |
+
"""
|
| 44 |
+
Generate course, quiz, and video-ready scenes in a single LLM inference.
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
dict: {
|
| 48 |
+
"course": str,
|
| 49 |
+
"quiz": list,
|
| 50 |
+
"video_scenes": list
|
| 51 |
+
}
|
| 52 |
+
"""
|
| 53 |
+
complete_prompt = PromptTemplate.from_template(COMPLETE_COURSE_TEMPLATE)
|
| 54 |
+
|
| 55 |
+
response = llm.invoke(
|
| 56 |
+
complete_prompt.format(**params)
|
| 57 |
+
).content
|
| 58 |
+
|
| 59 |
+
# Extract JSON from response
|
| 60 |
+
json_match = re.search(r'\{.*\}', response, re.DOTALL)
|
| 61 |
+
if json_match:
|
| 62 |
+
try:
|
| 63 |
+
complete_data = json.loads(json_match.group(0))
|
| 64 |
+
|
| 65 |
+
# Validate structure
|
| 66 |
+
required_keys = ["course", "quiz", "video_scenes"]
|
| 67 |
+
if all(key in complete_data for key in required_keys):
|
| 68 |
+
return complete_data
|
| 69 |
+
else:
|
| 70 |
+
raise ValueError("Missing required keys in LLM response")
|
| 71 |
+
|
| 72 |
+
except json.JSONDecodeError as e:
|
| 73 |
+
print(f"JSON parsing error: {e}")
|
| 74 |
+
return _fallback_generation(params)
|
| 75 |
+
else:
|
| 76 |
+
print("No JSON found in LLM response")
|
| 77 |
+
return _fallback_generation(params)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _fallback_generation(params):
|
| 81 |
+
"""Fallback to separate generation if unified approach fails"""
|
| 82 |
+
print("Falling back to separate generation...")
|
| 83 |
+
course, quiz = generate_course_and_quiz(params)
|
| 84 |
+
|
| 85 |
+
# Basic scene parsing from course
|
| 86 |
+
scenes = []
|
| 87 |
+
sections = re.split(r'\n\d+\.\s*|\n\n', course)
|
| 88 |
+
sections = [s.strip() for s in sections if s.strip()]
|
| 89 |
+
|
| 90 |
+
for i, section in enumerate(sections[:8]):
|
| 91 |
+
sentences = section.split('.')
|
| 92 |
+
title = sentences[0].strip() if sentences else section[:50]
|
| 93 |
+
content = section.strip()
|
| 94 |
+
duration = max(5, min(15, len(content.split()) * 0.5))
|
| 95 |
+
|
| 96 |
+
scenes.append({
|
| 97 |
+
"title": title,
|
| 98 |
+
"content": content,
|
| 99 |
+
"duration": int(duration),
|
| 100 |
+
"visual_prompt": f"{params.get('style', 'cartoon')}, {title}, educational content"
|
| 101 |
+
})
|
| 102 |
+
|
| 103 |
+
return {
|
| 104 |
+
"course": course,
|
| 105 |
+
"quiz": quiz,
|
| 106 |
+
"video_scenes": scenes
|
| 107 |
+
}
|
app/services/video_service.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import uuid
|
| 3 |
+
import subprocess
|
| 4 |
+
import time
|
| 5 |
+
from typing import Dict
|
| 6 |
+
from gtts import gTTS
|
| 7 |
+
from pydub import AudioSegment
|
| 8 |
+
import torch
|
| 9 |
+
from diffusers import StableDiffusionPipeline
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class VideoService:
|
| 13 |
+
def __init__(self):
|
| 14 |
+
self.pipe = None
|
| 15 |
+
self._load_model()
|
| 16 |
+
|
| 17 |
+
def _load_model(self):
|
| 18 |
+
"""Load Stable Diffusion model once"""
|
| 19 |
+
try:
|
| 20 |
+
self.pipe = StableDiffusionPipeline.from_pretrained(
|
| 21 |
+
"runwayml/stable-diffusion-v1-5",
|
| 22 |
+
torch_dtype=torch.float16
|
| 23 |
+
).to("cuda")
|
| 24 |
+
except Exception as e:
|
| 25 |
+
print(f"Warning: Could not load GPU model, falling back to CPU: {e}")
|
| 26 |
+
self.pipe = StableDiffusionPipeline.from_pretrained(
|
| 27 |
+
"runwayml/stable-diffusion-v1-5"
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
def _ensure_directories(self):
|
| 31 |
+
"""Create necessary directories"""
|
| 32 |
+
os.makedirs("tmp/images", exist_ok=True)
|
| 33 |
+
os.makedirs("tmp/audio", exist_ok=True)
|
| 34 |
+
os.makedirs("output/videos", exist_ok=True)
|
| 35 |
+
|
| 36 |
+
def _generate_image(self, prompt: str, idx: int) -> str:
|
| 37 |
+
"""Generate image for a scene"""
|
| 38 |
+
image = self.pipe(prompt, num_inference_steps=20).images[0]
|
| 39 |
+
img_path = f"tmp/images/scene_{idx}.png"
|
| 40 |
+
image.save(img_path)
|
| 41 |
+
return img_path
|
| 42 |
+
|
| 43 |
+
def _generate_audio(self, text: str, language: str, idx: int) -> tuple:
|
| 44 |
+
"""Generate audio for a scene"""
|
| 45 |
+
audio_mp3 = f"tmp/audio/scene_{idx}.mp3"
|
| 46 |
+
tts = gTTS(text=text, lang=language)
|
| 47 |
+
tts.save(audio_mp3)
|
| 48 |
+
|
| 49 |
+
# Convert to wav
|
| 50 |
+
audio_wav = audio_mp3.replace(".mp3", ".wav")
|
| 51 |
+
subprocess.run(["ffmpeg", "-y", "-i", audio_mp3, audio_wav], check=True)
|
| 52 |
+
|
| 53 |
+
# Get duration
|
| 54 |
+
audio_duration = AudioSegment.from_file(audio_wav).duration_seconds
|
| 55 |
+
|
| 56 |
+
return audio_wav, audio_duration
|
| 57 |
+
|
| 58 |
+
def _create_video_segment(self, img_path: str, audio_wav: str, duration: float, idx: int) -> str:
|
| 59 |
+
"""Create a video segment from image and audio"""
|
| 60 |
+
segment_path = f"tmp/segment_{idx}.mp4"
|
| 61 |
+
subprocess.run([
|
| 62 |
+
"ffmpeg", "-y",
|
| 63 |
+
"-loop", "1", "-i", img_path,
|
| 64 |
+
"-i", audio_wav,
|
| 65 |
+
"-c:v", "libx264",
|
| 66 |
+
"-t", str(duration),
|
| 67 |
+
"-pix_fmt", "yuv420p",
|
| 68 |
+
"-c:a", "aac",
|
| 69 |
+
segment_path
|
| 70 |
+
], check=True)
|
| 71 |
+
return segment_path
|
| 72 |
+
|
| 73 |
+
def _concatenate_segments(self, segments: list) -> str:
|
| 74 |
+
"""Concatenate all video segments"""
|
| 75 |
+
concat_file = "tmp/segments.txt"
|
| 76 |
+
with open(concat_file, "w") as f:
|
| 77 |
+
for seg in segments:
|
| 78 |
+
f.write(f"file '{os.path.abspath(seg)}'\n")
|
| 79 |
+
|
| 80 |
+
output_video = f"output/videos/course_{uuid.uuid4().hex}.mp4"
|
| 81 |
+
subprocess.run([
|
| 82 |
+
"ffmpeg", "-y", "-f", "concat", "-safe", "0",
|
| 83 |
+
"-i", concat_file,
|
| 84 |
+
"-c", "copy",
|
| 85 |
+
output_video
|
| 86 |
+
], check=True)
|
| 87 |
+
|
| 88 |
+
return output_video
|
| 89 |
+
|
| 90 |
+
def generate_course_video(self, course_data: Dict) -> str:
|
| 91 |
+
"""Generate a complete course video from course data"""
|
| 92 |
+
start_time = time.time()
|
| 93 |
+
|
| 94 |
+
self._ensure_directories()
|
| 95 |
+
segments = []
|
| 96 |
+
|
| 97 |
+
for idx, scene in enumerate(course_data["scenes"]):
|
| 98 |
+
# Generate image
|
| 99 |
+
prompt = f"{course_data.get('style', 'cartoon')}, {scene['title']}, {scene['content']}"
|
| 100 |
+
img_path = self._generate_image(prompt, idx)
|
| 101 |
+
|
| 102 |
+
# Generate audio
|
| 103 |
+
audio_wav, audio_duration = self._generate_audio(
|
| 104 |
+
scene["content"],
|
| 105 |
+
course_data.get("language", "fr"),
|
| 106 |
+
idx
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
# Create video segment
|
| 110 |
+
segment_path = self._create_video_segment(img_path, audio_wav, audio_duration, idx)
|
| 111 |
+
segments.append(segment_path)
|
| 112 |
+
|
| 113 |
+
# Concatenate all segments
|
| 114 |
+
output_video = self._concatenate_segments(segments)
|
| 115 |
+
|
| 116 |
+
processing_time = time.time() - start_time
|
| 117 |
+
print(f"Video generated in {processing_time:.2f} seconds")
|
| 118 |
+
|
| 119 |
+
return output_video
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# Global instance
|
| 123 |
+
video_service = VideoService()
|
app/untitled0.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Untitled0.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1PI3sFVWthKkbYWJSEs4RblwLkq5Q46Uw
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
!pip install pydub
|
| 11 |
+
|
| 12 |
+
!apt-get update -qq
|
| 13 |
+
!apt-get install -y ffmpeg
|
| 14 |
+
!pip install diffusers transformers accelerate scipy
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import os
|
| 18 |
+
from diffusers import StableDiffusionPipeline
|
| 19 |
+
import subprocess
|
| 20 |
+
|
| 21 |
+
os.makedirs("images", exist_ok=True)
|
| 22 |
+
os.makedirs("output", exist_ok=True)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
course_data = {
|
| 27 |
+
"topic": "Finance personnelle",
|
| 28 |
+
"style": "cartoon",
|
| 29 |
+
"tone": "fun",
|
| 30 |
+
"language": "fr",
|
| 31 |
+
"scenes": [
|
| 32 |
+
{
|
| 33 |
+
"title": "Bienvenue dans le monde de la finance",
|
| 34 |
+
"content": "La finance personnelle, c’est apprendre à gérer ton argent intelligemment, même si tu débutes.",
|
| 35 |
+
"duration": 8
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"title": "Pourquoi l’argent est important",
|
| 39 |
+
"content": "L’argent te permet de payer tes besoins, réaliser tes projets et sécuriser ton avenir.",
|
| 40 |
+
"duration": 8
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
"title": "Revenus et dépenses",
|
| 44 |
+
"content": "Tes revenus sont l’argent que tu gagnes, et tes dépenses sont l’argent que tu dépenses chaque jour.",
|
| 45 |
+
"duration": 8
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"title": "Épargner, même un peu",
|
| 49 |
+
"content": "Mettre de côté une petite somme régulièrement t’aide à faire face aux imprévus.",
|
| 50 |
+
"duration": 8
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"title": "Éviter les dettes inutiles",
|
| 54 |
+
"content": "Emprunter peut être utile, mais trop de dettes peuvent devenir un problème.",
|
| 55 |
+
"duration": 8
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"title": "Investir pour le futur",
|
| 59 |
+
"content": "Investir, c’est utiliser ton argent pour en gagner davantage avec le temps.",
|
| 60 |
+
"duration": 8
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"title": "Objectifs financiers",
|
| 64 |
+
"content": "Fixe-toi des objectifs simples comme acheter quelque chose ou économiser pour un projet.",
|
| 65 |
+
"duration": 8
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"title": "Bravo, tu progresses",
|
| 69 |
+
"content": "Félicitations ! Tu viens de faire tes premiers pas vers une meilleure gestion de ton argent.",
|
| 70 |
+
"duration": 8
|
| 71 |
+
}
|
| 72 |
+
]
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
from pydub import AudioSegment
|
| 76 |
+
|
| 77 |
+
def get_audio_duration(audio_path: str) -> float:
|
| 78 |
+
audio = AudioSegment.from_file(audio_path)
|
| 79 |
+
return audio.duration_seconds # durée en secondes
|
| 80 |
+
|
| 81 |
+
!pip install gtts
|
| 82 |
+
|
| 83 |
+
import os
|
| 84 |
+
import uuid
|
| 85 |
+
import subprocess
|
| 86 |
+
from gtts import gTTS
|
| 87 |
+
import torch
|
| 88 |
+
from diffusers import StableDiffusionPipeline
|
| 89 |
+
|
| 90 |
+
# Charger Stable Diffusion UNE SEULE FOIS
|
| 91 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 92 |
+
"runwayml/stable-diffusion-v1-5",
|
| 93 |
+
torch_dtype=torch.float16
|
| 94 |
+
).to("cuda")
|
| 95 |
+
|
| 96 |
+
def generate_course_video(course_data: dict) -> str:
|
| 97 |
+
import os, uuid, subprocess
|
| 98 |
+
from gtts import gTTS
|
| 99 |
+
from pydub import AudioSegment
|
| 100 |
+
|
| 101 |
+
os.makedirs("tmp/images", exist_ok=True)
|
| 102 |
+
os.makedirs("tmp/audio", exist_ok=True)
|
| 103 |
+
os.makedirs("output/videos", exist_ok=True)
|
| 104 |
+
|
| 105 |
+
segments = []
|
| 106 |
+
|
| 107 |
+
for idx, scene in enumerate(course_data["scenes"]):
|
| 108 |
+
# 1️⃣ Génération image
|
| 109 |
+
prompt = f"Cute cartoon, {scene['title']}, {scene['content']}"
|
| 110 |
+
image = pipe(prompt, num_inference_steps=20).images[0]
|
| 111 |
+
img_path = f"tmp/images/scene_{idx}.png"
|
| 112 |
+
image.save(img_path)
|
| 113 |
+
|
| 114 |
+
# 2️⃣ Génération audio GTTS
|
| 115 |
+
audio_mp3 = f"tmp/audio/scene_{idx}.mp3"
|
| 116 |
+
tts = gTTS(text=scene["content"], lang="fr")
|
| 117 |
+
tts.save(audio_mp3)
|
| 118 |
+
|
| 119 |
+
# 3️⃣ Convertir en wav
|
| 120 |
+
audio_wav = audio_mp3.replace(".mp3", ".wav")
|
| 121 |
+
subprocess.run(["ffmpeg", "-y", "-i", audio_mp3, audio_wav], check=True)
|
| 122 |
+
|
| 123 |
+
# 4️⃣ Obtenir durée exacte
|
| 124 |
+
audio_duration = AudioSegment.from_file(audio_wav).duration_seconds
|
| 125 |
+
|
| 126 |
+
# 5️⃣ Créer segment vidéo pour cette scène
|
| 127 |
+
segment_path = f"tmp/segment_{idx}.mp4"
|
| 128 |
+
subprocess.run([
|
| 129 |
+
"ffmpeg", "-y",
|
| 130 |
+
"-loop", "1", "-i", img_path,
|
| 131 |
+
"-i", audio_wav,
|
| 132 |
+
"-c:v", "libx264",
|
| 133 |
+
"-t", str(audio_duration),
|
| 134 |
+
"-pix_fmt", "yuv420p",
|
| 135 |
+
"-c:a", "aac",
|
| 136 |
+
segment_path
|
| 137 |
+
], check=True)
|
| 138 |
+
|
| 139 |
+
segments.append(segment_path)
|
| 140 |
+
|
| 141 |
+
# 6️⃣ Concaténer tous les segments
|
| 142 |
+
concat_file = "tmp/segments.txt"
|
| 143 |
+
with open(concat_file, "w") as f:
|
| 144 |
+
for seg in segments:
|
| 145 |
+
f.write(f"file '{os.path.abspath(seg)}'\n")
|
| 146 |
+
|
| 147 |
+
output_video = f"output/videos/course_{uuid.uuid4().hex}.mp4"
|
| 148 |
+
subprocess.run([
|
| 149 |
+
"ffmpeg", "-y", "-f", "concat", "-safe", "0",
|
| 150 |
+
"-i", concat_file,
|
| 151 |
+
"-c", "copy",
|
| 152 |
+
output_video
|
| 153 |
+
], check=True)
|
| 154 |
+
|
| 155 |
+
return output_video
|
| 156 |
+
|
| 157 |
+
!apt-get update -qq
|
| 158 |
+
!apt-get install -y espeak
|
| 159 |
+
|
| 160 |
+
video_path = generate_course_video(course_data)
|
requirements.txt
CHANGED
|
@@ -1,23 +1,45 @@
|
|
|
|
|
| 1 |
fastapi>=0.104.0
|
| 2 |
uvicorn[standard]>=0.24.0
|
| 3 |
python-dotenv>=1.0.0
|
| 4 |
pydantic>=2.5.0
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
sqlalchemy>=2.0.0
|
| 6 |
asyncpg>=0.29.0
|
| 7 |
alembic>=1.13.0
|
| 8 |
redis>=5.0.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
openai>=1.3.0
|
| 10 |
langchain>=0.0.350
|
| 11 |
langchain-groq>=0.0.1
|
| 12 |
langchain-community>=0.4.0
|
| 13 |
crewai>=0.1.4
|
| 14 |
crewai-tools>=0.2.0
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
pandas>=2.1.0
|
| 20 |
numpy>=1.25.0
|
|
|
|
|
|
|
| 21 |
pytest>=7.4.0
|
| 22 |
pytest-asyncio>=0.21.0
|
| 23 |
-
gtts>=2.4.0
|
|
|
|
| 1 |
+
# Core API Framework
|
| 2 |
fastapi>=0.104.0
|
| 3 |
uvicorn[standard]>=0.24.0
|
| 4 |
python-dotenv>=1.0.0
|
| 5 |
pydantic>=2.5.0
|
| 6 |
+
python-multipart>=0.0.6
|
| 7 |
+
httpx>=0.25.0
|
| 8 |
+
|
| 9 |
+
# Database & ORM
|
| 10 |
sqlalchemy>=2.0.0
|
| 11 |
asyncpg>=0.29.0
|
| 12 |
alembic>=1.13.0
|
| 13 |
redis>=5.0.0
|
| 14 |
+
|
| 15 |
+
# Authentication & Security
|
| 16 |
+
python-jose[cryptography]>=3.3.0
|
| 17 |
+
passlib[bcrypt]>=1.7.4
|
| 18 |
+
|
| 19 |
+
# AI & ML Libraries
|
| 20 |
openai>=1.3.0
|
| 21 |
langchain>=0.0.350
|
| 22 |
langchain-groq>=0.0.1
|
| 23 |
langchain-community>=0.4.0
|
| 24 |
crewai>=0.1.4
|
| 25 |
crewai-tools>=0.2.0
|
| 26 |
+
|
| 27 |
+
# Video Generation
|
| 28 |
+
torch>=2.1.0
|
| 29 |
+
torchvision>=0.16.0
|
| 30 |
+
diffusers>=0.24.0
|
| 31 |
+
transformers>=4.35.0
|
| 32 |
+
accelerate>=0.24.0
|
| 33 |
+
scipy>=1.11.0
|
| 34 |
+
|
| 35 |
+
# Audio Processing
|
| 36 |
+
gtts>=2.4.0
|
| 37 |
+
pydub>=0.25.0
|
| 38 |
+
|
| 39 |
+
# Data Processing
|
| 40 |
pandas>=2.1.0
|
| 41 |
numpy>=1.25.0
|
| 42 |
+
|
| 43 |
+
# Testing
|
| 44 |
pytest>=7.4.0
|
| 45 |
pytest-asyncio>=0.21.0
|
|
|
test_api.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Script de test simple pour l'API JANGG
|
| 4 |
+
"""
|
| 5 |
+
import sys
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
# Ajouter le chemin du projet
|
| 9 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
from fastapi import FastAPI
|
| 13 |
+
from fastapi.responses import JSONResponse
|
| 14 |
+
|
| 15 |
+
# Créer une version simplifiée de l'API pour tester
|
| 16 |
+
app = FastAPI(title="JANGG API Test", version="1.0.0")
|
| 17 |
+
|
| 18 |
+
@app.get("/")
|
| 19 |
+
async def root():
|
| 20 |
+
return {"message": "JANGG API is running!", "status": "healthy"}
|
| 21 |
+
|
| 22 |
+
@app.get("/health")
|
| 23 |
+
async def health():
|
| 24 |
+
return {"status": "healthy", "api": "JANGG API", "version": "1.0.0"}
|
| 25 |
+
|
| 26 |
+
@app.get("/test-endpoints")
|
| 27 |
+
async def test_endpoints():
|
| 28 |
+
"""Liste des endpoints disponibles"""
|
| 29 |
+
endpoints = {
|
| 30 |
+
"chat": {
|
| 31 |
+
"path": "/chat/learn",
|
| 32 |
+
"method": "POST",
|
| 33 |
+
"description": "Générer cours et quiz",
|
| 34 |
+
"input": {
|
| 35 |
+
"topic": "Finance personnelle",
|
| 36 |
+
"sector": "finance",
|
| 37 |
+
"tone": "friendly",
|
| 38 |
+
"style": "simple",
|
| 39 |
+
"length": "full"
|
| 40 |
+
}
|
| 41 |
+
},
|
| 42 |
+
"quiz": {
|
| 43 |
+
"path": "/quiz/evaluate",
|
| 44 |
+
"method": "POST",
|
| 45 |
+
"description": "Évaluer les réponses du quiz"
|
| 46 |
+
},
|
| 47 |
+
"audio": {
|
| 48 |
+
"path": "/audio/generate",
|
| 49 |
+
"method": "POST",
|
| 50 |
+
"description": "Convertir texte en audio"
|
| 51 |
+
},
|
| 52 |
+
"video": {
|
| 53 |
+
"path": "/video/generate",
|
| 54 |
+
"method": "POST",
|
| 55 |
+
"description": "Générer vidéo avec IA"
|
| 56 |
+
},
|
| 57 |
+
"integrated": {
|
| 58 |
+
"complete-course": {
|
| 59 |
+
"path": "/integrated/complete-course",
|
| 60 |
+
"method": "POST",
|
| 61 |
+
"description": "Générer package complet (cours + quiz + scènes vidéo) en une inference"
|
| 62 |
+
},
|
| 63 |
+
"video-from-scenes": {
|
| 64 |
+
"path": "/integrated/video-from-scenes",
|
| 65 |
+
"method": "POST",
|
| 66 |
+
"description": "Générer vidéo à partir des scènes"
|
| 67 |
+
},
|
| 68 |
+
"full-pipeline": {
|
| 69 |
+
"path": "/integrated/full-pipeline",
|
| 70 |
+
"method": "POST",
|
| 71 |
+
"description": "Pipeline complet (contenu + vidéo) en un appel"
|
| 72 |
+
}
|
| 73 |
+
}
|
| 74 |
+
}
|
| 75 |
+
return {"endpoints": endpoints, "total_count": len(endpoints)}
|
| 76 |
+
|
| 77 |
+
if __name__ == "__main__":
|
| 78 |
+
import uvicorn
|
| 79 |
+
print("🚀 Lancement de l'API JANGG en mode test...")
|
| 80 |
+
print("📖 Documentation: http://localhost:8000/docs")
|
| 81 |
+
print("🔍 Test endpoints: http://localhost:8000/test-endpoints")
|
| 82 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
| 83 |
+
|
| 84 |
+
except ImportError as e:
|
| 85 |
+
print(f"❌ Erreur d'import: {e}")
|
| 86 |
+
print("📦 Installation des dépendances de base...")
|
| 87 |
+
os.system("pip3 install fastapi uvicorn --break-system-packages")
|
| 88 |
+
print("✅ Installation terminée. Relancez le script.")
|
total_api.py
ADDED
|
@@ -0,0 +1,702 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
🤖 JANGG AI API - Version Complète pour Colab
|
| 4 |
+
API exhaustive dans un seul fichier - Pas de GPU requis
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from fastapi import FastAPI, HTTPException
|
| 8 |
+
from fastapi.responses import HTMLResponse
|
| 9 |
+
from pydantic import BaseModel, Field
|
| 10 |
+
from typing import List, Dict, Optional
|
| 11 |
+
import uuid
|
| 12 |
+
import time
|
| 13 |
+
import json
|
| 14 |
+
import random
|
| 15 |
+
from datetime import datetime
|
| 16 |
+
|
| 17 |
+
# Création de l'application FastAPI
|
| 18 |
+
app = FastAPI(
|
| 19 |
+
title="🤖 JANGG AI API - Complete Version",
|
| 20 |
+
description="API complète pour l'apprentissage interactif avec IA (Mode Colab)",
|
| 21 |
+
version="1.0.0",
|
| 22 |
+
docs_url="/docs",
|
| 23 |
+
redoc_url="/redoc"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# ==================== SCHÉMAS DE DONNÉES ====================
|
| 27 |
+
|
| 28 |
+
class LearnRequest(BaseModel):
|
| 29 |
+
topic: str = Field(..., description="Sujet à apprendre")
|
| 30 |
+
sector: str = Field(..., description="Secteur cible (finance, tech, health)")
|
| 31 |
+
tone: str = Field(..., description="Ton du contenu (friendly, formal, casual)")
|
| 32 |
+
style: str = Field(..., description="Style d'explication (simple, detailed, technical)")
|
| 33 |
+
length: str = Field(..., description="Longueur du cours (short, full)")
|
| 34 |
+
|
| 35 |
+
class Scene(BaseModel):
|
| 36 |
+
title: str = Field(..., description="Titre de la scène")
|
| 37 |
+
content: str = Field(..., description="Contenu narratif de la scène")
|
| 38 |
+
duration: int = Field(..., description="Durée en secondes")
|
| 39 |
+
visual_prompt: Optional[str] = Field(None, description="Prompt visuel pour IA")
|
| 40 |
+
|
| 41 |
+
class VideoRequest(BaseModel):
|
| 42 |
+
topic: str = Field(..., description="Sujet de la vidéo")
|
| 43 |
+
style: str = Field(default="cartoon", description="Style visuel")
|
| 44 |
+
tone: str = Field(default="fun", description="Ton du contenu")
|
| 45 |
+
language: str = Field(default="fr", description="Langue pour l'audio")
|
| 46 |
+
scenes: List[Scene] = Field(..., description="Liste des scènes")
|
| 47 |
+
|
| 48 |
+
class QuizEvaluation(BaseModel):
|
| 49 |
+
quiz: List[Dict] = Field(..., description="Questions du quiz")
|
| 50 |
+
answers: List[int] = Field(..., description="Réponses de l'utilisateur")
|
| 51 |
+
|
| 52 |
+
class AudioRequest(BaseModel):
|
| 53 |
+
text: str = Field(..., description="Texte à convertir en audio")
|
| 54 |
+
|
| 55 |
+
class CompleteCourseResponse(BaseModel):
|
| 56 |
+
course: str = Field(..., description="Contenu du cours généré")
|
| 57 |
+
quiz: List[Dict] = Field(..., description="Quiz généré")
|
| 58 |
+
video_scenes: List[Scene] = Field(..., description="Scènes pour vidéo")
|
| 59 |
+
message: str = Field(default="Package complet généré avec succès")
|
| 60 |
+
generation_method: str = Field(default="unified_llm")
|
| 61 |
+
|
| 62 |
+
# ==================== FONCTIONS UTILITAIRES ====================
|
| 63 |
+
|
| 64 |
+
def generate_mock_course(topic: str, sector: str, tone: str, style: str, length: str) -> str:
|
| 65 |
+
"""Génère un cours simulé basé sur le sujet"""
|
| 66 |
+
courses = {
|
| 67 |
+
"finance": f"""
|
| 68 |
+
# 📚 {topic.title()} - Cours Complet
|
| 69 |
+
|
| 70 |
+
## Introduction
|
| 71 |
+
La {topic.lower()} est essentielle dans le secteur {sector}.
|
| 72 |
+
Avec une approche {tone} et un style {style}, vous allez maîtriser ce sujet.
|
| 73 |
+
|
| 74 |
+
## Concepts Fondamentaux
|
| 75 |
+
1. **Définition**: Comprendre les bases de {topic.lower()}
|
| 76 |
+
2. **Applications**: Comment appliquer ces connaissances en pratique
|
| 77 |
+
3. **Bonnes pratiques**: Les meilleures stratégies à adopter
|
| 78 |
+
|
| 79 |
+
## Cas Pratique
|
| 80 |
+
Voici un exemple concret dans le contexte {sector}:
|
| 81 |
+
- Étape 1: Analyse de la situation
|
| 82 |
+
- Étape 2: Mise en œuvre des solutions
|
| 83 |
+
- Étape 3: Évaluation des résultats
|
| 84 |
+
|
| 85 |
+
## Conclusion
|
| 86 |
+
Félicitations ! Vous avez maintenant les bases solides en {topic.lower()}.
|
| 87 |
+
""",
|
| 88 |
+
"tech": f"""
|
| 89 |
+
# 💻 {topic.title()} - Guide Technique
|
| 90 |
+
|
| 91 |
+
## Vue d'Ensemble
|
| 92 |
+
{topic} est un domaine crucial dans la technologie moderne.
|
| 93 |
+
Ce cours {tone} vous permettra de comprendre les concepts {style}.
|
| 94 |
+
|
| 95 |
+
## Architecture Principale
|
| 96 |
+
- **Composants**: Éléments essentiels de {topic}
|
| 97 |
+
- **Intégration**: Comment tout s'assemble
|
| 98 |
+
- **Optimisation**: Meilleures pratiques de performance
|
| 99 |
+
|
| 100 |
+
## Implémentation
|
| 101 |
+
```python
|
| 102 |
+
# Exemple de code pour {topic.lower()}
|
| 103 |
+
def implement_{topic.lower().replace(' ', '_')}():
|
| 104 |
+
return "Solution {topic} avec style {style}"
|
| 105 |
+
```
|
| 106 |
+
|
| 107 |
+
## Prochaines Étapes
|
| 108 |
+
Continuez votre apprentissage avec des projets pratiques !
|
| 109 |
+
"""
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
return courses.get(sector.lower(), courses["finance"])
|
| 113 |
+
|
| 114 |
+
def generate_mock_quiz(topic: str) -> List[Dict]:
|
| 115 |
+
"""Génère un quiz simulé basé sur le sujet"""
|
| 116 |
+
return [
|
| 117 |
+
{
|
| 118 |
+
"question": f"Quelle est l'importance de {topic} dans le secteur professionnel ?",
|
| 119 |
+
"options": [
|
| 120 |
+
"Très importante, c'est essentiel",
|
| 121 |
+
"Importante mais pas critique",
|
| 122 |
+
"Optionnel selon le contexte",
|
| 123 |
+
"Pas vraiment nécessaire"
|
| 124 |
+
],
|
| 125 |
+
"answer": 0
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"question": f"Quelle est la meilleure approche pour apprendre {topic} ?",
|
| 129 |
+
"options": [
|
| 130 |
+
"Pratique régulière",
|
| 131 |
+
"Théorie seulement",
|
| 132 |
+
"Une seule fois suffit",
|
| 133 |
+
"Sans méthode particulière"
|
| 134 |
+
],
|
| 135 |
+
"answer": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"question": f"Comment appliquer {topic} dans un projet réel ?",
|
| 139 |
+
"options": [
|
| 140 |
+
"Étape par étape avec planification",
|
| 141 |
+
"Directement sans préparation",
|
| 142 |
+
"Seulement avec un expert",
|
| 143 |
+
"Jamais en pratique"
|
| 144 |
+
],
|
| 145 |
+
"answer": 0
|
| 146 |
+
}
|
| 147 |
+
]
|
| 148 |
+
|
| 149 |
+
def generate_mock_scenes(topic: str, style: str) -> List[Scene]:
|
| 150 |
+
"""Génère des scènes vidéo simulées"""
|
| 151 |
+
return [
|
| 152 |
+
Scene(
|
| 153 |
+
title="Introduction",
|
| 154 |
+
content=f"Bienvenue dans ce cours sur {topic}. Découvrons ensemble les concepts fondamentaux.",
|
| 155 |
+
duration=8,
|
| 156 |
+
visual_prompt=f"{style} style, educational introduction about {topic}, bright and engaging"
|
| 157 |
+
),
|
| 158 |
+
Scene(
|
| 159 |
+
title="Concepts Clés",
|
| 160 |
+
content=f"Les concepts essentiels de {topic} sont simples à comprendre avec la bonne approche.",
|
| 161 |
+
duration=10,
|
| 162 |
+
visual_prompt=f"{style} style, key concepts visualization for {topic}, clear and structured"
|
| 163 |
+
),
|
| 164 |
+
Scene(
|
| 165 |
+
title="Application Pratique",
|
| 166 |
+
content=f"Appliquons maintenant {topic} dans un contexte réel et concret.",
|
| 167 |
+
duration=12,
|
| 168 |
+
visual_prompt=f"{style} style, practical application of {topic}, hands-on demonstration"
|
| 169 |
+
),
|
| 170 |
+
Scene(
|
| 171 |
+
title="Conclusion",
|
| 172 |
+
content=f"Félicitations ! Vous maîtrisez maintenant les bases de {topic}. Continuez à pratiquer.",
|
| 173 |
+
duration=8,
|
| 174 |
+
visual_prompt=f"{style} style, celebration and achievement, learning success"
|
| 175 |
+
)
|
| 176 |
+
]
|
| 177 |
+
|
| 178 |
+
def generate_mock_audio_url(text: str) -> str:
|
| 179 |
+
"""Génère une URL audio simulée"""
|
| 180 |
+
return f"https://example.com/audio/{uuid.uuid4().hex[:8]}.mp3"
|
| 181 |
+
|
| 182 |
+
def generate_mock_video_path(topic: str) -> str:
|
| 183 |
+
"""Génère un chemin vidéo simulé"""
|
| 184 |
+
return f"output/videos/{topic.replace(' ', '_')}_{uuid.uuid4().hex[:8]}.mp4"
|
| 185 |
+
|
| 186 |
+
# ==================== ENDPOINTS PRINCIPAUX ====================
|
| 187 |
+
|
| 188 |
+
@app.get("/", response_class=HTMLResponse)
|
| 189 |
+
async def root():
|
| 190 |
+
"""Page d'accueil de l'API"""
|
| 191 |
+
return HTMLResponse(content="""
|
| 192 |
+
<!DOCTYPE html>
|
| 193 |
+
<html>
|
| 194 |
+
<head>
|
| 195 |
+
<title>🤖 JANGG AI API - Complete Version</title>
|
| 196 |
+
<style>
|
| 197 |
+
body { font-family: Arial, sans-serif; max-width: 1000px; margin: 0 auto; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; }
|
| 198 |
+
.header { text-align: center; margin-bottom: 40px; }
|
| 199 |
+
.endpoint { background: rgba(255,255,255,0.1); padding: 20px; margin: 15px 0; border-radius: 10px; backdrop-filter: blur(10px); }
|
| 200 |
+
.method { color: #4ade80; font-weight: bold; font-size: 18px; }
|
| 201 |
+
.path { color: #60a5fa; font-family: monospace; font-size: 16px; }
|
| 202 |
+
.description { color: #e2e8f0; margin-top: 8px; }
|
| 203 |
+
.docs-link { background: #4ade80; color: #1e293b; padding: 15px 30px; text-decoration: none; border-radius: 8px; display: inline-block; margin: 10px; font-weight: bold; }
|
| 204 |
+
.feature { background: rgba(255,255,255,0.05); padding: 15px; border-radius: 8px; margin: 10px 0; }
|
| 205 |
+
</style>
|
| 206 |
+
</head>
|
| 207 |
+
<body>
|
| 208 |
+
<div class="header">
|
| 209 |
+
<h1>🤖 JANGG AI API - Complete Version</h1>
|
| 210 |
+
<p>API exhaustive pour l'apprentissage interactif avec IA (Mode Colab)</p>
|
| 211 |
+
</div>
|
| 212 |
+
|
| 213 |
+
<div class="feature">
|
| 214 |
+
<h3>🚀 Fonctionnalités Complètes</h3>
|
| 215 |
+
<ul>
|
| 216 |
+
<li>📚 Génération de cours personnalisés</li>
|
| 217 |
+
<li>🎯 Création de quiz interactifs</li>
|
| 218 |
+
<li>🎥 Génération de vidéos avec IA</li>
|
| 219 |
+
<li>🔊 Synthèse vocale (TTS)</li>
|
| 220 |
+
<li>🔄 Pipeline intégré complet</li>
|
| 221 |
+
</ul>
|
| 222 |
+
</div>
|
| 223 |
+
|
| 224 |
+
<h2>📋 Endpoints Disponibles</h2>
|
| 225 |
+
|
| 226 |
+
<div class="endpoint">
|
| 227 |
+
<span class="method">POST</span> <span class="path">/chat/learn</span>
|
| 228 |
+
<div class="description">Générer cours et quiz personnalisés</div>
|
| 229 |
+
</div>
|
| 230 |
+
|
| 231 |
+
<div class="endpoint">
|
| 232 |
+
<span class="method">POST</span> <span class="path">/quiz/evaluate</span>
|
| 233 |
+
<div class="description">Évaluer les réponses du quiz</div>
|
| 234 |
+
</div>
|
| 235 |
+
|
| 236 |
+
<div class="endpoint">
|
| 237 |
+
<span class="method">POST</span> <span class="path">/audio/generate</span>
|
| 238 |
+
<div class="description">Convertir texte en audio (TTS)</div>
|
| 239 |
+
</div>
|
| 240 |
+
|
| 241 |
+
<div class="endpoint">
|
| 242 |
+
<span class="method">POST</span> <span class="path">/video/generate</span>
|
| 243 |
+
<div class="description">Générer vidéo avec IA et narration</div>
|
| 244 |
+
</div>
|
| 245 |
+
|
| 246 |
+
<div class="endpoint">
|
| 247 |
+
<span class="method">POST</span> <span class="path">/integrated/complete-course</span>
|
| 248 |
+
<div class="description">Package complet (cours + quiz + scènes vidéo) en une seule requête</div>
|
| 249 |
+
</div>
|
| 250 |
+
|
| 251 |
+
<div class="endpoint">
|
| 252 |
+
<span class="method">POST</span> <span class="path">/integrated/full-pipeline</span>
|
| 253 |
+
<div class="description">Pipeline complet avec génération vidéo finale</div>
|
| 254 |
+
</div>
|
| 255 |
+
|
| 256 |
+
<div class="endpoint">
|
| 257 |
+
<span class="method">POST</span> <span class="path">/integrated/video-from-scenes</span>
|
| 258 |
+
<div class="description">Générer vidéo à partir de scènes pré-existantes</div>
|
| 259 |
+
</div>
|
| 260 |
+
|
| 261 |
+
<div style="text-align: center; margin-top: 40px;">
|
| 262 |
+
<a href="/docs" class="docs-link">📖 Documentation Interactive (Swagger)</a>
|
| 263 |
+
<a href="/redoc" class="docs-link">📋 Documentation Alternative (ReDoc)</a>
|
| 264 |
+
</div>
|
| 265 |
+
|
| 266 |
+
<div style="text-align: center; margin-top: 30px; opacity: 0.8;">
|
| 267 |
+
<p>© 2026 JANGG AI API - Mode Colab Optimisé</p>
|
| 268 |
+
</div>
|
| 269 |
+
</body>
|
| 270 |
+
</html>
|
| 271 |
+
""")
|
| 272 |
+
|
| 273 |
+
@app.get("/health")
|
| 274 |
+
async def health_check():
|
| 275 |
+
"""Vérification de santé de l'API"""
|
| 276 |
+
return {
|
| 277 |
+
"status": "healthy",
|
| 278 |
+
"api": "JANGG AI API - Complete Version",
|
| 279 |
+
"version": "1.0.0",
|
| 280 |
+
"mode": "colab_optimized",
|
| 281 |
+
"features": ["chat", "quiz", "audio", "video", "integrated"],
|
| 282 |
+
"timestamp": datetime.now().isoformat()
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
# ==================== ENDPOINTS CHAT ====================
|
| 286 |
+
|
| 287 |
+
@app.post("/chat/learn")
|
| 288 |
+
async def learn_course(req: LearnRequest):
|
| 289 |
+
"""
|
| 290 |
+
Générer un cours complet et un quiz personnalisé
|
| 291 |
+
|
| 292 |
+
Cet endpoint crée du contenu éducatif adapté aux paramètres spécifiés.
|
| 293 |
+
"""
|
| 294 |
+
try:
|
| 295 |
+
start_time = time.time()
|
| 296 |
+
|
| 297 |
+
# Génération du contenu
|
| 298 |
+
course = generate_mock_course(req.topic, req.sector, req.tone, req.style, req.length)
|
| 299 |
+
quiz = generate_mock_quiz(req.topic)
|
| 300 |
+
audio_url = generate_mock_audio_url(req.topic)
|
| 301 |
+
|
| 302 |
+
processing_time = time.time() - start_time
|
| 303 |
+
|
| 304 |
+
return {
|
| 305 |
+
"course": course.strip(),
|
| 306 |
+
"quiz": quiz,
|
| 307 |
+
"audio_url": audio_url,
|
| 308 |
+
"processing_time": processing_time,
|
| 309 |
+
"message": f"Cours généré avec succès pour {req.topic}",
|
| 310 |
+
"parameters": {
|
| 311 |
+
"topic": req.topic,
|
| 312 |
+
"sector": req.sector,
|
| 313 |
+
"tone": req.tone,
|
| 314 |
+
"style": req.style,
|
| 315 |
+
"length": req.length
|
| 316 |
+
}
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
except Exception as e:
|
| 320 |
+
raise HTTPException(status_code=500, detail=f"Erreur lors de la génération du cours: {str(e)}")
|
| 321 |
+
|
| 322 |
+
# ==================== ENDPOINTS QUIZ ====================
|
| 323 |
+
|
| 324 |
+
@app.post("/quiz/evaluate")
|
| 325 |
+
async def evaluate_quiz(req: QuizEvaluation):
|
| 326 |
+
"""
|
| 327 |
+
Évaluer les réponses d'un quiz
|
| 328 |
+
|
| 329 |
+
Calcule le score et fournit un feedback personnalisé.
|
| 330 |
+
"""
|
| 331 |
+
try:
|
| 332 |
+
correct_answers = 0
|
| 333 |
+
feedback = []
|
| 334 |
+
|
| 335 |
+
for i, (question, user_answer) in enumerate(zip(req.quiz, req.answers)):
|
| 336 |
+
correct = question["answer"]
|
| 337 |
+
is_correct = user_answer == correct
|
| 338 |
+
|
| 339 |
+
if is_correct:
|
| 340 |
+
correct_answers += 1
|
| 341 |
+
feedback.append(f"✅ Question {i+1}: Correct! '{question['options'][user_answer]}'")
|
| 342 |
+
else:
|
| 343 |
+
correct_option = question['options'][correct]
|
| 344 |
+
user_option = question['options'][user_answer]
|
| 345 |
+
feedback.append(f"❌ Question {i+1}: Incorrect. Votre réponse: '{user_option}'. Correct: '{correct_option}'")
|
| 346 |
+
|
| 347 |
+
score = (correct_answers / len(req.quiz)) * 100
|
| 348 |
+
|
| 349 |
+
# Message personnalisé basé sur le score
|
| 350 |
+
if score >= 80:
|
| 351 |
+
message = "🎉 Excellent travail! Vous maîtrisez bien le sujet!"
|
| 352 |
+
elif score >= 60:
|
| 353 |
+
message = "👍 Bon travail! Quelques améliorations possibles."
|
| 354 |
+
elif score >= 40:
|
| 355 |
+
message = "📚 Pas mal! Continuez à pratiquer."
|
| 356 |
+
else:
|
| 357 |
+
message = "💪 Continuez vos efforts! La pratique fait la perfection."
|
| 358 |
+
|
| 359 |
+
return {
|
| 360 |
+
"score": round(score, 1),
|
| 361 |
+
"correct_answers": correct_answers,
|
| 362 |
+
"total_questions": len(req.quiz),
|
| 363 |
+
"feedback": feedback,
|
| 364 |
+
"message": message,
|
| 365 |
+
"evaluation_time": datetime.now().isoformat()
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
except Exception as e:
|
| 369 |
+
raise HTTPException(status_code=500, detail=f"Erreur lors de l'évaluation: {str(e)}")
|
| 370 |
+
|
| 371 |
+
# ==================== ENDPOINTS AUDIO ====================
|
| 372 |
+
|
| 373 |
+
@app.post("/audio/generate")
|
| 374 |
+
async def generate_audio(req: AudioRequest):
|
| 375 |
+
"""
|
| 376 |
+
Convertir du texte en audio (Text-to-Speech)
|
| 377 |
+
|
| 378 |
+
Génère un fichier audio à partir du texte fourni.
|
| 379 |
+
"""
|
| 380 |
+
try:
|
| 381 |
+
start_time = time.time()
|
| 382 |
+
|
| 383 |
+
# Simulation de génération audio
|
| 384 |
+
audio_url = generate_mock_audio_url(req.text)
|
| 385 |
+
duration = len(req.text.split()) * 0.5 # Estimation: 0.5s par mot
|
| 386 |
+
|
| 387 |
+
processing_time = time.time() - start_time
|
| 388 |
+
|
| 389 |
+
return {
|
| 390 |
+
"audio_url": audio_url,
|
| 391 |
+
"text": req.text,
|
| 392 |
+
"duration": round(duration, 2),
|
| 393 |
+
"processing_time": processing_time,
|
| 394 |
+
"message": "Audio généré avec succès",
|
| 395 |
+
"format": "mp3",
|
| 396 |
+
"language": "fr"
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
except Exception as e:
|
| 400 |
+
raise HTTPException(status_code=500, detail=f"Erreur lors de la génération audio: {str(e)}")
|
| 401 |
+
|
| 402 |
+
# ==================== ENDPOINTS VIDÉO ====================
|
| 403 |
+
|
| 404 |
+
@app.post("/video/generate")
|
| 405 |
+
async def generate_video(req: VideoRequest):
|
| 406 |
+
"""
|
| 407 |
+
Générer une vidéo éducative avec IA
|
| 408 |
+
|
| 409 |
+
Crée une vidéo complète à partir des scènes fournies.
|
| 410 |
+
"""
|
| 411 |
+
try:
|
| 412 |
+
start_time = time.time()
|
| 413 |
+
|
| 414 |
+
# Simulation de génération vidéo
|
| 415 |
+
video_path = generate_mock_video_path(req.topic)
|
| 416 |
+
total_duration = sum(scene.duration for scene in req.scenes)
|
| 417 |
+
|
| 418 |
+
processing_time = time.time() - start_time
|
| 419 |
+
|
| 420 |
+
return {
|
| 421 |
+
"video_path": video_path,
|
| 422 |
+
"topic": req.topic,
|
| 423 |
+
"style": req.style,
|
| 424 |
+
"language": req.language,
|
| 425 |
+
"scenes_processed": len(req.scenes),
|
| 426 |
+
"total_duration": total_duration,
|
| 427 |
+
"processing_time": processing_time,
|
| 428 |
+
"message": "Vidéo générée avec succès",
|
| 429 |
+
"resolution": "1920x1080",
|
| 430 |
+
"fps": 30,
|
| 431 |
+
"format": "mp4"
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
except Exception as e:
|
| 435 |
+
raise HTTPException(status_code=500, detail=f"Erreur lors de la génération vidéo: {str(e)}")
|
| 436 |
+
|
| 437 |
+
@app.get("/video/health")
|
| 438 |
+
async def video_health_check():
|
| 439 |
+
"""Vérification de santé du service vidéo"""
|
| 440 |
+
return {
|
| 441 |
+
"status": "healthy",
|
| 442 |
+
"service": "video_generation",
|
| 443 |
+
"model_loaded": True,
|
| 444 |
+
"gpu_available": False,
|
| 445 |
+
"mode": "cpu_simulation",
|
| 446 |
+
"supported_formats": ["mp4", "avi", "mov"],
|
| 447 |
+
"max_duration": 600 # 10 minutes max
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
# ==================== ENDPOINTS INTÉGRÉS ====================
|
| 451 |
+
|
| 452 |
+
@app.post("/integrated/complete-course", response_model=CompleteCourseResponse)
|
| 453 |
+
async def generate_complete_course(req: LearnRequest):
|
| 454 |
+
"""
|
| 455 |
+
Générer un package d'apprentissage complet en une seule requête
|
| 456 |
+
|
| 457 |
+
Cet endpoint unifié génère:
|
| 458 |
+
- Cours structuré
|
| 459 |
+
- Quiz interactif
|
| 460 |
+
- Scènes vidéo prêtes à l'emploi
|
| 461 |
+
|
| 462 |
+
Tout est généré de manière cohérente en une seule opération.
|
| 463 |
+
"""
|
| 464 |
+
try:
|
| 465 |
+
start_time = time.time()
|
| 466 |
+
|
| 467 |
+
# Génération unifiée
|
| 468 |
+
course = generate_mock_course(req.topic, req.sector, req.tone, req.style, req.length)
|
| 469 |
+
quiz = generate_mock_quiz(req.topic)
|
| 470 |
+
video_scenes = generate_mock_scenes(req.topic, req.style)
|
| 471 |
+
|
| 472 |
+
processing_time = time.time() - start_time
|
| 473 |
+
|
| 474 |
+
return CompleteCourseResponse(
|
| 475 |
+
course=course.strip(),
|
| 476 |
+
quiz=quiz,
|
| 477 |
+
video_scenes=video_scenes,
|
| 478 |
+
message="Package complet généré avec succès",
|
| 479 |
+
generation_method="unified_llm_simulation"
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
except Exception as e:
|
| 483 |
+
raise HTTPException(status_code=500, detail=f"Erreur lors de la génération du package: {str(e)}")
|
| 484 |
+
|
| 485 |
+
@app.post("/integrated/video-from-scenes")
|
| 486 |
+
async def generate_video_from_scenes(req: VideoRequest):
|
| 487 |
+
"""
|
| 488 |
+
Générer une vidéo à partir de scènes pré-existantes
|
| 489 |
+
|
| 490 |
+
Utilisez cet endpoint avec les scènes de /integrated/complete-course
|
| 491 |
+
"""
|
| 492 |
+
try:
|
| 493 |
+
start_time = time.time()
|
| 494 |
+
|
| 495 |
+
video_path = generate_mock_video_path(req.topic)
|
| 496 |
+
total_duration = sum(scene.duration for scene in req.scenes)
|
| 497 |
+
|
| 498 |
+
# Ajout de prompts visuels si manquants
|
| 499 |
+
enhanced_scenes = []
|
| 500 |
+
for scene in req.scenes:
|
| 501 |
+
if not scene.visual_prompt:
|
| 502 |
+
scene.visual_prompt = f"{req.style} style, {scene.title}, educational content"
|
| 503 |
+
enhanced_scenes.append(scene)
|
| 504 |
+
|
| 505 |
+
processing_time = time.time() - start_time
|
| 506 |
+
|
| 507 |
+
return {
|
| 508 |
+
"video_path": video_path,
|
| 509 |
+
"scenes_used": len(req.scenes),
|
| 510 |
+
"total_duration": total_duration,
|
| 511 |
+
"processing_time": processing_time,
|
| 512 |
+
"enhanced_scenes": enhanced_scenes,
|
| 513 |
+
"message": "Vidéo générée à partir des scènes avec succès"
|
| 514 |
+
}
|
| 515 |
+
|
| 516 |
+
except Exception as e:
|
| 517 |
+
raise HTTPException(status_code=500, detail=f"Erreur lors de la génération vidéo: {str(e)}")
|
| 518 |
+
|
| 519 |
+
@app.post("/integrated/full-pipeline")
|
| 520 |
+
async def generate_full_pipeline(req: LearnRequest):
|
| 521 |
+
"""
|
| 522 |
+
Pipeline complet: génération de contenu ET création vidéo
|
| 523 |
+
|
| 524 |
+
C'est l'endpoint le plus complet - il fait tout!
|
| 525 |
+
"""
|
| 526 |
+
try:
|
| 527 |
+
start_time = time.time()
|
| 528 |
+
|
| 529 |
+
# Étape 1: Génération du contenu complet
|
| 530 |
+
course = generate_mock_course(req.topic, req.sector, req.tone, req.style, req.length)
|
| 531 |
+
quiz = generate_mock_quiz(req.topic)
|
| 532 |
+
video_scenes = generate_mock_scenes(req.topic, req.style)
|
| 533 |
+
|
| 534 |
+
# Étape 2: Génération vidéo
|
| 535 |
+
video_path = generate_mock_video_path(req.topic)
|
| 536 |
+
total_duration = sum(scene.duration for scene in video_scenes)
|
| 537 |
+
|
| 538 |
+
# Étape 3: Génération audio
|
| 539 |
+
audio_url = generate_mock_audio_url(req.topic)
|
| 540 |
+
|
| 541 |
+
total_time = time.time() - start_time
|
| 542 |
+
|
| 543 |
+
return {
|
| 544 |
+
# Contenu généré
|
| 545 |
+
"course": course.strip(),
|
| 546 |
+
"quiz": quiz,
|
| 547 |
+
"video_scenes": video_scenes,
|
| 548 |
+
|
| 549 |
+
# Fichiers générés
|
| 550 |
+
"video_path": video_path,
|
| 551 |
+
"audio_url": audio_url,
|
| 552 |
+
|
| 553 |
+
# Métadonnées
|
| 554 |
+
"total_duration": total_duration,
|
| 555 |
+
"total_processing_time": total_time,
|
| 556 |
+
"generation_method": "full_pipeline_simulation",
|
| 557 |
+
"message": "🎉 Pipeline complet terminé avec succès!",
|
| 558 |
+
|
| 559 |
+
# Paramètres
|
| 560 |
+
"parameters": {
|
| 561 |
+
"topic": req.topic,
|
| 562 |
+
"sector": req.sector,
|
| 563 |
+
"tone": req.tone,
|
| 564 |
+
"style": req.style,
|
| 565 |
+
"length": req.length
|
| 566 |
+
},
|
| 567 |
+
|
| 568 |
+
# Statistiques
|
| 569 |
+
"stats": {
|
| 570 |
+
"course_words": len(course.split()),
|
| 571 |
+
"quiz_questions": len(quiz),
|
| 572 |
+
"video_scenes": len(video_scenes),
|
| 573 |
+
"estimated_video_size": f"{round(total_duration * 2)}MB"
|
| 574 |
+
}
|
| 575 |
+
}
|
| 576 |
+
|
| 577 |
+
except Exception as e:
|
| 578 |
+
raise HTTPException(status_code=500, detail=f"Erreur lors du pipeline: {str(e)}")
|
| 579 |
+
|
| 580 |
+
# ==================== ENDPOINTS UTILITAIRES ====================
|
| 581 |
+
|
| 582 |
+
@app.get("/endpoints")
|
| 583 |
+
async def list_endpoints():
|
| 584 |
+
"""Liste tous les endpoints disponibles avec leurs détails"""
|
| 585 |
+
return {
|
| 586 |
+
"api_info": {
|
| 587 |
+
"title": "JANGG AI API - Complete Version",
|
| 588 |
+
"version": "1.0.0",
|
| 589 |
+
"mode": "colab_optimized",
|
| 590 |
+
"total_endpoints": 9
|
| 591 |
+
},
|
| 592 |
+
"endpoints": {
|
| 593 |
+
"chat": {
|
| 594 |
+
"path": "/chat/learn",
|
| 595 |
+
"method": "POST",
|
| 596 |
+
"description": "Générer cours et quiz personnalisés",
|
| 597 |
+
"features": ["course_generation", "quiz_creation", "audio_url"]
|
| 598 |
+
},
|
| 599 |
+
"quiz": {
|
| 600 |
+
"path": "/quiz/evaluate",
|
| 601 |
+
"method": "POST",
|
| 602 |
+
"description": "Évaluer les réponses du quiz",
|
| 603 |
+
"features": ["score_calculation", "detailed_feedback", "performance_analysis"]
|
| 604 |
+
},
|
| 605 |
+
"audio": {
|
| 606 |
+
"path": "/audio/generate",
|
| 607 |
+
"method": "POST",
|
| 608 |
+
"description": "Convertir texte en audio (TTS)",
|
| 609 |
+
"features": ["text_to_speech", "duration_estimation", "mp3_format"]
|
| 610 |
+
},
|
| 611 |
+
"video": {
|
| 612 |
+
"generate": {
|
| 613 |
+
"path": "/video/generate",
|
| 614 |
+
"method": "POST",
|
| 615 |
+
"description": "Générer vidéo avec IA",
|
| 616 |
+
"features": ["scene_processing", "video_compilation", "multiple_formats"]
|
| 617 |
+
},
|
| 618 |
+
"health": {
|
| 619 |
+
"path": "/video/health",
|
| 620 |
+
"method": "GET",
|
| 621 |
+
"description": "Vérifier l'état du service vidéo",
|
| 622 |
+
"features": ["service_status", "capabilities_check"]
|
| 623 |
+
}
|
| 624 |
+
},
|
| 625 |
+
"integrated": {
|
| 626 |
+
"complete_course": {
|
| 627 |
+
"path": "/integrated/complete-course",
|
| 628 |
+
"method": "POST",
|
| 629 |
+
"description": "Package complet en une requête",
|
| 630 |
+
"features": ["unified_generation", "coherent_content", "video_ready_scenes"]
|
| 631 |
+
},
|
| 632 |
+
"video_from_scenes": {
|
| 633 |
+
"path": "/integrated/video-from-scenes",
|
| 634 |
+
"method": "POST",
|
| 635 |
+
"description": "Générer vidéo depuis scènes",
|
| 636 |
+
"features": ["scene_enhancement", "video_generation", "custom_scenes"]
|
| 637 |
+
},
|
| 638 |
+
"full_pipeline": {
|
| 639 |
+
"path": "/integrated/full-pipeline",
|
| 640 |
+
"method": "POST",
|
| 641 |
+
"description": "Pipeline complet avec vidéo",
|
| 642 |
+
"features": ["everything_included", "complete_solution", "all_files"]
|
| 643 |
+
}
|
| 644 |
+
}
|
| 645 |
+
},
|
| 646 |
+
"features": {
|
| 647 |
+
"no_gpu_required": True,
|
| 648 |
+
"colab_optimized": True,
|
| 649 |
+
"simulated_responses": True,
|
| 650 |
+
"full_documentation": True,
|
| 651 |
+
"interactive_testing": True
|
| 652 |
+
}
|
| 653 |
+
}
|
| 654 |
+
|
| 655 |
+
@app.get("/test/sample-data")
|
| 656 |
+
async def get_sample_data():
|
| 657 |
+
"""Données de test pour tous les endpoints"""
|
| 658 |
+
return {
|
| 659 |
+
"learn_request": {
|
| 660 |
+
"topic": "Finance personnelle",
|
| 661 |
+
"sector": "finance",
|
| 662 |
+
"tone": "friendly",
|
| 663 |
+
"style": "simple",
|
| 664 |
+
"length": "full"
|
| 665 |
+
},
|
| 666 |
+
"video_request": {
|
| 667 |
+
"topic": "Finance personnelle",
|
| 668 |
+
"style": "cartoon",
|
| 669 |
+
"tone": "fun",
|
| 670 |
+
"language": "fr",
|
| 671 |
+
"scenes": [
|
| 672 |
+
{
|
| 673 |
+
"title": "Introduction",
|
| 674 |
+
"content": "Bienvenue dans ce cours sur la finance personnelle",
|
| 675 |
+
"duration": 8
|
| 676 |
+
}
|
| 677 |
+
]
|
| 678 |
+
},
|
| 679 |
+
"quiz_evaluation": {
|
| 680 |
+
"quiz": [
|
| 681 |
+
{
|
| 682 |
+
"question": "Question test?",
|
| 683 |
+
"options": ["A", "B", "C", "D"],
|
| 684 |
+
"answer": 0
|
| 685 |
+
}
|
| 686 |
+
],
|
| 687 |
+
"answers": [0]
|
| 688 |
+
},
|
| 689 |
+
"audio_request": {
|
| 690 |
+
"text": "Bonjour, ceci est un test de génération audio."
|
| 691 |
+
}
|
| 692 |
+
}
|
| 693 |
+
|
| 694 |
+
# ==================== LANCEMENT ====================
|
| 695 |
+
|
| 696 |
+
if __name__ == "__main__":
|
| 697 |
+
import uvicorn
|
| 698 |
+
print("🚀 Lancement de JANGG AI API - Version Complète")
|
| 699 |
+
print("📖 Documentation: http://localhost:8000/docs")
|
| 700 |
+
print("🎯 Test rapide: http://localhost:8000/endpoints")
|
| 701 |
+
print("💡 Mode Colab Optimisé - Pas de GPU requis!")
|
| 702 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
total_api_standalone.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
🤖 JANGG AI API - 100% Autonome pour Colab
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from fastapi import FastAPI, HTTPException
|
| 7 |
+
from fastapi.responses import HTMLResponse
|
| 8 |
+
from pydantic import BaseModel, Field
|
| 9 |
+
from typing import List, Dict, Optional
|
| 10 |
+
import uuid
|
| 11 |
+
import time
|
| 12 |
+
from fastapi.openapi.utils import get_openapi
|
| 13 |
+
|
| 14 |
+
app = FastAPI(
|
| 15 |
+
title="🤖 JANGG AI API - Standalone",
|
| 16 |
+
description="API complète pour l'apprentissage avec IA - Version autonome",
|
| 17 |
+
version="1.0.0",
|
| 18 |
+
docs_url="/docs",
|
| 19 |
+
redoc_url="/redoc"
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# Schémas
|
| 23 |
+
class LearnRequest(BaseModel):
|
| 24 |
+
topic: str
|
| 25 |
+
sector: str
|
| 26 |
+
tone: str
|
| 27 |
+
style: str
|
| 28 |
+
length: str
|
| 29 |
+
|
| 30 |
+
class Scene(BaseModel):
|
| 31 |
+
title: str
|
| 32 |
+
content: str
|
| 33 |
+
duration: int
|
| 34 |
+
|
| 35 |
+
class VideoRequest(BaseModel):
|
| 36 |
+
topic: str
|
| 37 |
+
style: str = "cartoon"
|
| 38 |
+
tone: str = "fun"
|
| 39 |
+
language: str = "fr"
|
| 40 |
+
scenes: List[Scene]
|
| 41 |
+
|
| 42 |
+
class QuizEvaluation(BaseModel):
|
| 43 |
+
quiz: List[Dict]
|
| 44 |
+
answers: List[int]
|
| 45 |
+
|
| 46 |
+
class AudioRequest(BaseModel):
|
| 47 |
+
text: str
|
| 48 |
+
|
| 49 |
+
# Services
|
| 50 |
+
class LearningService:
|
| 51 |
+
@staticmethod
|
| 52 |
+
def generate_course(topic: str, sector: str, tone: str, style: str, length: str) -> str:
|
| 53 |
+
return f"""
|
| 54 |
+
# {topic.title()} - Cours Complet
|
| 55 |
+
|
| 56 |
+
## Introduction
|
| 57 |
+
La {topic.lower()} est essentielle dans le secteur {sector}.
|
| 58 |
+
Approche {tone} avec style {style}.
|
| 59 |
+
|
| 60 |
+
## Concepts
|
| 61 |
+
1. Principes fondamentaux
|
| 62 |
+
2. Applications pratiques
|
| 63 |
+
3. Meilleures pratiques
|
| 64 |
+
4. Études de cas
|
| 65 |
+
|
| 66 |
+
## Conclusion
|
| 67 |
+
Félicitations! Vous maîtrisez {topic}.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
@staticmethod
|
| 71 |
+
def generate_quiz(topic: str) -> List[Dict]:
|
| 72 |
+
return [{
|
| 73 |
+
"question": f"Importance de {topic}?",
|
| 74 |
+
"options": ["Essentielle", "Importante", "Optionnelle", "Non nécessaire"],
|
| 75 |
+
"answer": 0
|
| 76 |
+
}]
|
| 77 |
+
|
| 78 |
+
@staticmethod
|
| 79 |
+
def generate_scenes(topic: str, style: str) -> List[Scene]:
|
| 80 |
+
return [
|
| 81 |
+
Scene(title="Intro", content=f"Découvrons {topic}", duration=8),
|
| 82 |
+
Scene(title="Concepts", content=f"Concepts de {topic}", duration=10),
|
| 83 |
+
Scene(title="Conclusion", content=f"Bravo pour {topic}!", duration=8)
|
| 84 |
+
]
|
| 85 |
+
|
| 86 |
+
# Endpoints
|
| 87 |
+
@app.get("/", include_in_schema=False)
|
| 88 |
+
async def root():
|
| 89 |
+
return HTMLResponse("""
|
| 90 |
+
<h1>🤖 JANGG AI API - Standalone</h1>
|
| 91 |
+
<p>Bienvenue sur l'API autonome JANGG AI !</p>
|
| 92 |
+
<p>📚 <a href="/docs">Documentation Swagger</a></p>
|
| 93 |
+
<p>🔍 <a href="/redoc">Documentation ReDoc</a></p>
|
| 94 |
+
<p>🔄 <a href="/health">Vérifier l'état</a></p>
|
| 95 |
+
""")
|
| 96 |
+
|
| 97 |
+
@app.get("/health",
|
| 98 |
+
summary="Vérifier l'état de l'API",
|
| 99 |
+
description="Vérifie que l'API est opérationnelle",
|
| 100 |
+
response_description="État de santé de l'API")
|
| 101 |
+
async def health():
|
| 102 |
+
return {"status": "healthy", "api": "JANGG Standalone"}
|
| 103 |
+
|
| 104 |
+
@app.post("/chat/learn",
|
| 105 |
+
summary="Générer un cours et un quiz",
|
| 106 |
+
description="Crée un contenu éducatif personnalisé avec quiz intégré",
|
| 107 |
+
response_description="Retourne le cours généré et un quiz")
|
| 108 |
+
async def learn(req: LearnRequest):
|
| 109 |
+
course = LearningService.generate_course(req.topic, req.sector, req.tone, req.style, req.length)
|
| 110 |
+
quiz = LearningService.generate_quiz(req.topic)
|
| 111 |
+
return {"course": course, "quiz": quiz, "audio_url": f"audio_{uuid.uuid4().hex[:8]}.mp3"}
|
| 112 |
+
|
| 113 |
+
@app.post("/quiz/evaluate",
|
| 114 |
+
summary="Évaluer un quiz",
|
| 115 |
+
description="Calcule le score et fournit un feedback",
|
| 116 |
+
response_description="Résultats de l'évaluation")
|
| 117 |
+
async def evaluate_quiz(req: QuizEvaluation):
|
| 118 |
+
correct = sum(1 for q, a in zip(req.quiz, req.answers) if q["answer"] == a)
|
| 119 |
+
score = (correct / len(req.quiz)) * 100
|
| 120 |
+
return {"score": score, "correct": correct, "total": len(req.quiz)}
|
| 121 |
+
|
| 122 |
+
@app.post("/audio/generate",
|
| 123 |
+
summary="Générer un audio",
|
| 124 |
+
description="Convertit du texte en parole (TTS)",
|
| 125 |
+
response_description="URL de l'audio généré")
|
| 126 |
+
async def generate_audio(req: AudioRequest):
|
| 127 |
+
return {"audio_url": f"audio_{uuid.uuid4().hex[:8]}.mp3", "duration": len(req.text.split()) * 0.5}
|
| 128 |
+
|
| 129 |
+
@app.post("/video/generate",
|
| 130 |
+
summary="Générer une vidéo",
|
| 131 |
+
description="Crée une vidéo à partir de scènes",
|
| 132 |
+
response_description="Détails de la vidéo générée")
|
| 133 |
+
async def generate_video(req: VideoRequest):
|
| 134 |
+
return {
|
| 135 |
+
"video_path": f"video_{req.topic.replace(' ', '_')}_{uuid.uuid4().hex[:8]}.mp4",
|
| 136 |
+
"duration": sum(s.duration for s in req.scenes)
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
@app.post("/integrated/complete-course",
|
| 140 |
+
summary="Générer un package complet",
|
| 141 |
+
description="Cours + Quiz + Scènes vidéo en une seule requête",
|
| 142 |
+
response_description="Package complet d'apprentissage")
|
| 143 |
+
async def complete_course(req: LearnRequest):
|
| 144 |
+
course = LearningService.generate_course(req.topic, req.sector, req.tone, req.style, req.length)
|
| 145 |
+
quiz = LearningService.generate_quiz(req.topic)
|
| 146 |
+
scenes = LearningService.generate_scenes(req.topic, req.style)
|
| 147 |
+
return {
|
| 148 |
+
"course": course,
|
| 149 |
+
"quiz": quiz,
|
| 150 |
+
"video_scenes": scenes,
|
| 151 |
+
"message": "Package complet généré"
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
@app.post("/integrated/full-pipeline",
|
| 155 |
+
summary="Pipeline complet",
|
| 156 |
+
description="Génère tout le contenu + vidéo + audio",
|
| 157 |
+
response_description="Résultats du pipeline complet")
|
| 158 |
+
async def full_pipeline(req: LearnRequest):
|
| 159 |
+
course = LearningService.generate_course(req.topic, req.sector, req.tone, req.style, req.length)
|
| 160 |
+
quiz = LearningService.generate_quiz(req.topic)
|
| 161 |
+
scenes = LearningService.generate_scenes(req.topic, req.style)
|
| 162 |
+
return {
|
| 163 |
+
"course": course,
|
| 164 |
+
"quiz": quiz,
|
| 165 |
+
"video_scenes": scenes,
|
| 166 |
+
"video_path": f"full_{req.topic.replace(' ', '_')}_{uuid.uuid4().hex[:8]}.mp4",
|
| 167 |
+
"audio_url": f"audio_{uuid.uuid4().hex[:8]}.mp3"
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
if __name__ == "__main__":
|
| 171 |
+
import uvicorn
|
| 172 |
+
print("🚀 JANGG API Standalone - Prêt pour Colab!")
|
| 173 |
+
print("📖 Documentation complète: http://localhost:8000/docs")
|
| 174 |
+
print("🔍 Documentation alternative: http://localhost:8000/redoc")
|
| 175 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|