.env.example ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # API Keys
2
+ GROQ_API_KEY=your_groq_api_key_here
3
+ OPENAI_API_KEY=your_openai_api_key_here
4
+
5
+ # Database
6
+ DATABASE_URL=sqlite:///./brain_platform.db
7
+
8
+ # Other settings
9
+ ENVIRONMENT=production
.gitignore CHANGED
@@ -1,19 +1,75 @@
 
1
  __pycache__/
2
- .venv/
3
- venv/
4
- env/
5
- .env
6
- *.pyc
7
- *.pyo
8
- *.pyd
9
  .Python
10
- .mypy_cache/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  .pytest_cache/
 
 
 
 
 
 
 
 
 
12
  *.mp3
13
- *.mp4
14
  *.wav
15
- *.jpg
16
- *.png
17
- *.gif
18
- *.log
19
-
 
1
+ # Python
2
  __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
 
 
 
 
6
  .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+ MANIFEST
23
+
24
+ # Virtual environments
25
+ .env
26
+ .venv
27
+ env/
28
+ venv/
29
+ ENV/
30
+ env.bak/
31
+ venv.bak/
32
+ demo-env/
33
+
34
+ # IDE
35
+ .vscode/
36
+ .idea/
37
+ *.swp
38
+ *.swo
39
+ *~
40
+
41
+ # OS
42
+ .DS_Store
43
+ .DS_Store?
44
+ ._*
45
+ .Spotlight-V100
46
+ .Trashes
47
+ ehthumbs.db
48
+ Thumbs.db
49
+
50
+ # Logs
51
+ *.log
52
+ logs/
53
+
54
+ # Database
55
+ *.db
56
+ *.sqlite3
57
+
58
+ # Jupyter Notebook
59
+ .ipynb_checkpoints
60
+
61
+ # pytest
62
+ .coverage
63
  .pytest_cache/
64
+ htmlcov/
65
+
66
+ # mypy
67
+ .mypy_cache/
68
+ .dmypy.json
69
+ dmypy.json
70
+
71
+ # Audio files
72
+ app/static/audio/
73
  *.mp3
 
74
  *.wav
75
+ *.ogg
 
 
 
 
README.md CHANGED
@@ -1,44 +1,11 @@
1
  ---
2
  title: JANGG AI API
3
- emoji: 🤖
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 6.5.1
8
- python_version: '3.10'
9
- app_file: main.py
10
  pinned: false
 
11
  ---
12
 
13
- # JANGG AI API
14
-
15
- API complète pour la génération de contenu éducatif avec IA
16
-
17
- ## Fonctionnalités
18
-
19
- - Génération de cours personnalisés
20
- - Création de quiz interactifs
21
- - Synthèse vocale (TTS)
22
- - Génération de vidéos éducatives
23
-
24
- ## Configuration requise
25
-
26
- - Python 3.10+
27
- - Voir [requirements.txt](cci:7://file:///home/fredy/T%C3%A9l%C3%A9chargements/JANG%20API/Jangg-/requirements.txt:0:0-0:0) pour les dépendances
28
-
29
- ## Installation locale
30
-
31
- ```bash
32
- # Cloner le dépôt
33
- git clone [https://huggingface.co/spaces/FredyHoundayi/JANGG_AI_API](https://huggingface.co/spaces/FredyHoundayi/JANGG_AI_API)
34
- cd JANGG_AI_API
35
-
36
- # Créer un environnement virtuel
37
- python -m venv venv
38
- source venv/bin/activate # Sur Windows: venv\Scripts\activate
39
-
40
- # Installer les dépendances
41
- pip install -r requirements.txt
42
-
43
- # Lancer l'API
44
- uvicorn main:app --reload
 
1
  ---
2
  title: JANGG AI API
3
+ emoji: 🌖
4
+ colorFrom: indigo
5
+ colorTo: pink
6
+ sdk: docker
 
 
 
7
  pinned: false
8
+ short_description: demo friday 30/01/2026
9
  ---
10
 
11
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/main.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi.staticfiles import StaticFiles
3
+ from fastapi.responses import HTMLResponse
4
+
5
+ from app.routers import chat, quiz, audio
6
+
7
+ app = FastAPI(title="JANGG AI API", description="Intelligent API for interactive learning with AI")
8
+
9
+ app.mount("/static", StaticFiles(directory="app/static"), name="static")
10
+
11
+ app.include_router(chat.router)
12
+ app.include_router(quiz.router)
13
+ app.include_router(audio.router)
14
+
15
+ @app.get("/", response_class=HTMLResponse)
16
+ async def root():
17
+ html_content = """
18
+ <!DOCTYPE html>
19
+ <html>
20
+ <head>
21
+ <title>JANGG AI API</title>
22
+ <style>
23
+ body { font-family: Arial, sans-serif; max-width: 800px; margin: 0 auto; padding: 20px; }
24
+ .header { text-align: center; color: #2c3e50; margin-bottom: 30px; }
25
+ .endpoint { background: #f8f9fa; padding: 15px; margin: 10px 0; border-radius: 5px; border-left: 4px solid #3498db; }
26
+ .method { color: #27ae60; font-weight: bold; }
27
+ .path { color: #2980b9; font-family: monospace; }
28
+ .description { color: #34495e; margin-top: 5px; }
29
+ .docs-link { background: #3498db; color: white; padding: 10px 20px; text-decoration: none; border-radius: 5px; display: inline-block; margin-top: 20px; }
30
+ </style>
31
+ </head>
32
+ <body>
33
+ <div class="header">
34
+ <h1>🤖 JANGG AI API</h1>
35
+ <p>Intelligent API for interactive learning with AI</p>
36
+ </div>
37
+
38
+ <h2>📚 Available Endpoints</h2>
39
+
40
+ <div class="endpoint">
41
+ <span class="method">GET</span> <span class="path">/</span>
42
+ <div class="description">API home page</div>
43
+ </div>
44
+
45
+ <div class="endpoint">
46
+ <span class="method">GET</span> <span class="path">/health</span>
47
+ <div class="description">API health check</div>
48
+ </div>
49
+
50
+ <div class="endpoint">
51
+ <span class="method">POST</span> <span class="path">/chat/</span>
52
+ <div class="description">AI chat interface for interactive conversations</div>
53
+ </div>
54
+
55
+ <div class="endpoint">
56
+ <span class="method">POST</span> <span class="path">/quiz/generate</span>
57
+ <div class="description">Generate personalized quizzes based on a topic</div>
58
+ </div>
59
+
60
+ <div class="endpoint">
61
+ <span class="method">POST</span> <span class="path">/quiz/validate</span>
62
+ <div class="description">Validate quiz answers</div>
63
+ </div>
64
+
65
+ <div class="endpoint">
66
+ <span class="method">POST</span> <span class="path">/audio/text-to-speech</span>
67
+ <div class="description">Convert text to speech</div>
68
+ </div>
69
+
70
+ <div class="endpoint">
71
+ <span class="method">POST</span> <span class="path">/audio/speech-to-text</span>
72
+ <div class="description">Convert speech to text</div>
73
+ </div>
74
+
75
+ <div style="text-align: center; margin-top: 30px;">
76
+ <a href="/docs" class="docs-link">📖 Interactive Documentation (Swagger)</a>
77
+ <br><br>
78
+ <a href="/redoc" class="docs-link">📋 Alternative Documentation (ReDoc)</a>
79
+ </div>
80
+
81
+ <div style="text-align: center; margin-top: 30px; color: #7f8c8d;">
82
+ <p>© 2026 JANGG AI API - Intelligent Learning Platform</p>
83
+ </div>
84
+ </body>
85
+ </html>
86
+ """
87
+ return HTMLResponse(content=html_content)
88
+
89
+ @app.get("/health")
90
+ async def health_check():
91
+ return {"status": "healthy", "api": "JANGG AI API", "version": "1.0.0"}
app/prompts/complete_course_prompt.py DELETED
@@ -1,47 +0,0 @@
1
- COMPLETE_COURSE_TEMPLATE = """
2
- You are a friendly AI tutor and content creator.
3
-
4
- Generate a complete learning package about {topic} for the {sector} sector.
5
-
6
- Parameters:
7
- - Tone: {tone}
8
- - Style: {style}
9
- - Length: {length}
10
- - Language: French
11
-
12
- Requirements:
13
- 1. Generate engaging course content
14
- 2. Create relevant quiz questions
15
- 3. Structure content for video generation with scenes
16
-
17
- Format your response as JSON:
18
-
19
- {{
20
- "course": "Complete course content with clear sections and examples",
21
- "quiz": [
22
- {{
23
- "question": "Clear question about the content",
24
- "options": ["Option A", "Option B", "Option C", "Option D"],
25
- "answer": 2
26
- }}
27
- ],
28
- "video_scenes": [
29
- {{
30
- "title": "Scene title",
31
- "content": "Narration text for this scene (2-3 sentences)",
32
- "duration": 8,
33
- "visual_prompt": "Visual description for AI image generation"
34
- }}
35
- ]
36
- }}
37
-
38
- Rules:
39
- - Course: Clear, structured, beginner-friendly with practical examples
40
- - Quiz: 5 questions testing key concepts
41
- - Video Scenes: 5-8 scenes covering the main topics
42
- - Each scene content should be speakable (natural for text-to-speech)
43
- - Visual prompts should be descriptive for image generation
44
- - Duration per scene: 5-12 seconds based on content length
45
-
46
- Return only valid JSON.
47
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/routers/integrated.py DELETED
@@ -1,130 +0,0 @@
1
- from fastapi import APIRouter, HTTPException
2
- from app.schemas.chat import LearnRequest
3
- from app.schemas.complete_course import CompleteCourseResponse, VideoFromScenesRequest
4
- from app.services.learning_agent import generate_complete_learning_package
5
- from app.services.video_service import video_service
6
- import time
7
-
8
- router = APIRouter(prefix="/integrated", tags=["Integrated Learning"])
9
-
10
-
11
- @router.post("/complete-course", response_model=CompleteCourseResponse)
12
- def generate_complete_learning_experience(req: LearnRequest):
13
- """
14
- Generate a complete learning package in a single LLM inference.
15
-
16
- This endpoint generates:
17
- 1. Course content
18
- 2. Quiz questions
19
- 3. Video-ready scenes with visual prompts
20
-
21
- All content is generated coherently in one LLM call for better consistency.
22
- """
23
- try:
24
- start_time = time.time()
25
-
26
- # Generate everything in one inference
27
- complete_data = generate_complete_learning_package(req.dict())
28
-
29
- processing_time = time.time() - start_time
30
-
31
- return CompleteCourseResponse(
32
- course=complete_data["course"],
33
- quiz=complete_data["quiz"],
34
- video_scenes=complete_data["video_scenes"],
35
- message="Complete learning package generated successfully",
36
- generation_method="unified_llm"
37
- )
38
-
39
- except Exception as e:
40
- raise HTTPException(
41
- status_code=500,
42
- detail=f"Complete course generation failed: {str(e)}"
43
- )
44
-
45
-
46
- @router.post("/video-from-scenes")
47
- def generate_video_from_scenes(req: VideoFromScenesRequest):
48
- """
49
- Generate video from pre-generated scenes.
50
-
51
- Use this with scenes from /integrated/complete-course for optimal results.
52
- """
53
- try:
54
- start_time = time.time()
55
-
56
- # Convert scenes to video service format
57
- video_data = {
58
- "topic": req.topic,
59
- "style": req.style,
60
- "tone": req.tone,
61
- "language": req.language,
62
- "scenes": [
63
- {
64
- "title": scene.title,
65
- "content": scene.content,
66
- "duration": scene.duration
67
- }
68
- for scene in req.scenes
69
- ]
70
- }
71
-
72
- video_path = video_service.generate_course_video(video_data)
73
-
74
- processing_time = time.time() - start_time
75
-
76
- return {
77
- "video_path": video_path,
78
- "scenes_used": len(req.scenes),
79
- "processing_time": processing_time,
80
- "message": "Video generated from scenes successfully"
81
- }
82
-
83
- except Exception as e:
84
- raise HTTPException(
85
- status_code=500,
86
- detail=f"Video from scenes generation failed: {str(e)}"
87
- )
88
-
89
-
90
- @router.post("/full-pipeline")
91
- def generate_full_learning_pipeline(req: LearnRequest):
92
- """
93
- Complete pipeline: generate content AND create video in one call.
94
-
95
- This combines content generation and video creation for maximum convenience.
96
- """
97
- try:
98
- start_time = time.time()
99
-
100
- # Step 1: Generate complete content package
101
- complete_data = generate_complete_learning_package(req.dict())
102
-
103
- # Step 2: Generate video from the scenes
104
- video_data = {
105
- "topic": req.topic,
106
- "style": req.style,
107
- "tone": req.tone,
108
- "language": "fr",
109
- "scenes": complete_data["video_scenes"]
110
- }
111
-
112
- video_path = video_service.generate_course_video(video_data)
113
-
114
- total_time = time.time() - start_time
115
-
116
- return {
117
- "course": complete_data["course"],
118
- "quiz": complete_data["quiz"],
119
- "video_scenes": complete_data["video_scenes"],
120
- "video_path": video_path,
121
- "total_processing_time": total_time,
122
- "generation_method": "unified_llm_plus_video",
123
- "message": "Full learning pipeline completed successfully"
124
- }
125
-
126
- except Exception as e:
127
- raise HTTPException(
128
- status_code=500,
129
- detail=f"Full pipeline generation failed: {str(e)}"
130
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/routers/video.py DELETED
@@ -1,72 +0,0 @@
1
- import time
2
- from fastapi import APIRouter, HTTPException
3
- from app.schemas.video import VideoRequest, VideoResponse
4
- from app.services.video_service import video_service
5
-
6
- router = APIRouter(prefix="/video", tags=["Video"])
7
-
8
-
9
- @router.post("/generate", response_model=VideoResponse)
10
- def generate_course_video(request: VideoRequest):
11
- """
12
- Generate a course video based on the provided scenes and settings.
13
-
14
- This endpoint creates a video by:
15
- 1. Generating images for each scene using Stable Diffusion
16
- 2. Converting text to speech using gTTS
17
- 3. Creating video segments with FFmpeg
18
- 4. Concatenating all segments into a final video
19
- """
20
- try:
21
- start_time = time.time()
22
-
23
- # Convert request to dict for service
24
- course_data = {
25
- "topic": request.topic,
26
- "style": request.style,
27
- "tone": request.tone,
28
- "language": request.language,
29
- "scenes": [
30
- {
31
- "title": scene.title,
32
- "content": scene.content,
33
- "duration": scene.duration
34
- }
35
- for scene in request.scenes
36
- ]
37
- }
38
-
39
- # Generate video
40
- video_path = video_service.generate_course_video(course_data)
41
-
42
- processing_time = time.time() - start_time
43
-
44
- return VideoResponse(
45
- video_path=video_path,
46
- message="Video generated successfully",
47
- processing_time=processing_time
48
- )
49
-
50
- except Exception as e:
51
- raise HTTPException(
52
- status_code=500,
53
- detail=f"Video generation failed: {str(e)}"
54
- )
55
-
56
-
57
- @router.get("/health")
58
- def video_health_check():
59
- """Check if video generation service is available"""
60
- try:
61
- # Check if the model is loaded
62
- model_loaded = video_service.pipe is not None
63
- return {
64
- "status": "healthy" if model_loaded else "degraded",
65
- "model_loaded": model_loaded,
66
- "service": "video_generation"
67
- }
68
- except Exception as e:
69
- raise HTTPException(
70
- status_code=503,
71
- detail=f"Video service unavailable: {str(e)}"
72
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/schemas/complete_course.py DELETED
@@ -1,25 +0,0 @@
1
- from pydantic import BaseModel, Field
2
- from typing import List, Dict
3
-
4
-
5
- class VideoScene(BaseModel):
6
- title: str = Field(..., description="Title of the video scene")
7
- content: str = Field(..., description="Narration text for the scene")
8
- duration: int = Field(..., description="Duration in seconds")
9
- visual_prompt: str = Field(..., description="Visual description for AI image generation")
10
-
11
-
12
- class CompleteCourseResponse(BaseModel):
13
- course: str = Field(..., description="Complete course content")
14
- quiz: List[Dict] = Field(..., description="Quiz questions with options and answers")
15
- video_scenes: List[VideoScene] = Field(..., description="Scenes ready for video generation")
16
- message: str = Field(default="Complete learning package generated successfully")
17
- generation_method: str = Field(default="unified", description="How the content was generated")
18
-
19
-
20
- class VideoFromScenesRequest(BaseModel):
21
- topic: str = Field(..., description="Topic of the course")
22
- style: str = Field(default="cartoon", description="Visual style")
23
- tone: str = Field(default="educational", description="Content tone")
24
- language: str = Field(default="fr", description="Language for audio")
25
- scenes: List[VideoScene] = Field(..., description="Pre-generated scenes")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/schemas/video.py DELETED
@@ -1,22 +0,0 @@
1
- from pydantic import BaseModel, Field
2
- from typing import List, Optional
3
-
4
-
5
- class Scene(BaseModel):
6
- title: str = Field(..., description="Title of the scene")
7
- content: str = Field(..., description="Content/narration for the scene")
8
- duration: int = Field(..., description="Duration in seconds")
9
-
10
-
11
- class VideoRequest(BaseModel):
12
- topic: str = Field(..., description="Topic of the course")
13
- style: str = Field(default="cartoon", description="Visual style for video generation")
14
- tone: str = Field(default="fun", description="Tone of the content")
15
- language: str = Field(default="fr", description="Language for audio generation")
16
- scenes: List[Scene] = Field(..., description="List of scenes for the video")
17
-
18
-
19
- class VideoResponse(BaseModel):
20
- video_path: str = Field(..., description="Path to the generated video file")
21
- message: str = Field(..., description="Status message")
22
- processing_time: Optional[float] = Field(None, description="Total processing time in seconds")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/services/learning_agent.py CHANGED
@@ -2,13 +2,12 @@ from langchain_core.prompts import PromptTemplate
2
  from app.core.llm import llm
3
  from app.prompts.course_prompt import COURSE_TEMPLATE
4
  from app.prompts.quiz_prompt import QUIZ_TEMPLATE
5
- from app.prompts.complete_course_prompt import COMPLETE_COURSE_TEMPLATE
6
  import json
7
  import re
8
 
9
 
10
  def generate_course_and_quiz(params):
11
- """Legacy function - generates course and quiz separately"""
12
  course_prompt = PromptTemplate.from_template(COURSE_TEMPLATE)
13
 
14
  course = llm.invoke(
@@ -37,71 +36,3 @@ def generate_course_and_quiz(params):
37
  ]
38
 
39
  return course, quiz
40
-
41
-
42
- def generate_complete_learning_package(params):
43
- """
44
- Generate course, quiz, and video-ready scenes in a single LLM inference.
45
-
46
- Returns:
47
- dict: {
48
- "course": str,
49
- "quiz": list,
50
- "video_scenes": list
51
- }
52
- """
53
- complete_prompt = PromptTemplate.from_template(COMPLETE_COURSE_TEMPLATE)
54
-
55
- response = llm.invoke(
56
- complete_prompt.format(**params)
57
- ).content
58
-
59
- # Extract JSON from response
60
- json_match = re.search(r'\{.*\}', response, re.DOTALL)
61
- if json_match:
62
- try:
63
- complete_data = json.loads(json_match.group(0))
64
-
65
- # Validate structure
66
- required_keys = ["course", "quiz", "video_scenes"]
67
- if all(key in complete_data for key in required_keys):
68
- return complete_data
69
- else:
70
- raise ValueError("Missing required keys in LLM response")
71
-
72
- except json.JSONDecodeError as e:
73
- print(f"JSON parsing error: {e}")
74
- return _fallback_generation(params)
75
- else:
76
- print("No JSON found in LLM response")
77
- return _fallback_generation(params)
78
-
79
-
80
- def _fallback_generation(params):
81
- """Fallback to separate generation if unified approach fails"""
82
- print("Falling back to separate generation...")
83
- course, quiz = generate_course_and_quiz(params)
84
-
85
- # Basic scene parsing from course
86
- scenes = []
87
- sections = re.split(r'\n\d+\.\s*|\n\n', course)
88
- sections = [s.strip() for s in sections if s.strip()]
89
-
90
- for i, section in enumerate(sections[:8]):
91
- sentences = section.split('.')
92
- title = sentences[0].strip() if sentences else section[:50]
93
- content = section.strip()
94
- duration = max(5, min(15, len(content.split()) * 0.5))
95
-
96
- scenes.append({
97
- "title": title,
98
- "content": content,
99
- "duration": int(duration),
100
- "visual_prompt": f"{params.get('style', 'cartoon')}, {title}, educational content"
101
- })
102
-
103
- return {
104
- "course": course,
105
- "quiz": quiz,
106
- "video_scenes": scenes
107
- }
 
2
  from app.core.llm import llm
3
  from app.prompts.course_prompt import COURSE_TEMPLATE
4
  from app.prompts.quiz_prompt import QUIZ_TEMPLATE
 
5
  import json
6
  import re
7
 
8
 
9
  def generate_course_and_quiz(params):
10
+
11
  course_prompt = PromptTemplate.from_template(COURSE_TEMPLATE)
12
 
13
  course = llm.invoke(
 
36
  ]
37
 
38
  return course, quiz
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/services/video_service.py DELETED
@@ -1,123 +0,0 @@
1
- import os
2
- import uuid
3
- import subprocess
4
- import time
5
- from typing import Dict
6
- from gtts import gTTS
7
- from pydub import AudioSegment
8
- import torch
9
- from diffusers import StableDiffusionPipeline
10
-
11
-
12
- class VideoService:
13
- def __init__(self):
14
- self.pipe = None
15
- self._load_model()
16
-
17
- def _load_model(self):
18
- """Load Stable Diffusion model once"""
19
- try:
20
- self.pipe = StableDiffusionPipeline.from_pretrained(
21
- "runwayml/stable-diffusion-v1-5",
22
- torch_dtype=torch.float16
23
- ).to("cuda")
24
- except Exception as e:
25
- print(f"Warning: Could not load GPU model, falling back to CPU: {e}")
26
- self.pipe = StableDiffusionPipeline.from_pretrained(
27
- "runwayml/stable-diffusion-v1-5"
28
- )
29
-
30
- def _ensure_directories(self):
31
- """Create necessary directories"""
32
- os.makedirs("tmp/images", exist_ok=True)
33
- os.makedirs("tmp/audio", exist_ok=True)
34
- os.makedirs("output/videos", exist_ok=True)
35
-
36
- def _generate_image(self, prompt: str, idx: int) -> str:
37
- """Generate image for a scene"""
38
- image = self.pipe(prompt, num_inference_steps=20).images[0]
39
- img_path = f"tmp/images/scene_{idx}.png"
40
- image.save(img_path)
41
- return img_path
42
-
43
- def _generate_audio(self, text: str, language: str, idx: int) -> tuple:
44
- """Generate audio for a scene"""
45
- audio_mp3 = f"tmp/audio/scene_{idx}.mp3"
46
- tts = gTTS(text=text, lang=language)
47
- tts.save(audio_mp3)
48
-
49
- # Convert to wav
50
- audio_wav = audio_mp3.replace(".mp3", ".wav")
51
- subprocess.run(["ffmpeg", "-y", "-i", audio_mp3, audio_wav], check=True)
52
-
53
- # Get duration
54
- audio_duration = AudioSegment.from_file(audio_wav).duration_seconds
55
-
56
- return audio_wav, audio_duration
57
-
58
- def _create_video_segment(self, img_path: str, audio_wav: str, duration: float, idx: int) -> str:
59
- """Create a video segment from image and audio"""
60
- segment_path = f"tmp/segment_{idx}.mp4"
61
- subprocess.run([
62
- "ffmpeg", "-y",
63
- "-loop", "1", "-i", img_path,
64
- "-i", audio_wav,
65
- "-c:v", "libx264",
66
- "-t", str(duration),
67
- "-pix_fmt", "yuv420p",
68
- "-c:a", "aac",
69
- segment_path
70
- ], check=True)
71
- return segment_path
72
-
73
- def _concatenate_segments(self, segments: list) -> str:
74
- """Concatenate all video segments"""
75
- concat_file = "tmp/segments.txt"
76
- with open(concat_file, "w") as f:
77
- for seg in segments:
78
- f.write(f"file '{os.path.abspath(seg)}'\n")
79
-
80
- output_video = f"output/videos/course_{uuid.uuid4().hex}.mp4"
81
- subprocess.run([
82
- "ffmpeg", "-y", "-f", "concat", "-safe", "0",
83
- "-i", concat_file,
84
- "-c", "copy",
85
- output_video
86
- ], check=True)
87
-
88
- return output_video
89
-
90
- def generate_course_video(self, course_data: Dict) -> str:
91
- """Generate a complete course video from course data"""
92
- start_time = time.time()
93
-
94
- self._ensure_directories()
95
- segments = []
96
-
97
- for idx, scene in enumerate(course_data["scenes"]):
98
- # Generate image
99
- prompt = f"{course_data.get('style', 'cartoon')}, {scene['title']}, {scene['content']}"
100
- img_path = self._generate_image(prompt, idx)
101
-
102
- # Generate audio
103
- audio_wav, audio_duration = self._generate_audio(
104
- scene["content"],
105
- course_data.get("language", "fr"),
106
- idx
107
- )
108
-
109
- # Create video segment
110
- segment_path = self._create_video_segment(img_path, audio_wav, audio_duration, idx)
111
- segments.append(segment_path)
112
-
113
- # Concatenate all segments
114
- output_video = self._concatenate_segments(segments)
115
-
116
- processing_time = time.time() - start_time
117
- print(f"Video generated in {processing_time:.2f} seconds")
118
-
119
- return output_video
120
-
121
-
122
- # Global instance
123
- video_service = VideoService()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py CHANGED
@@ -1,34 +1,5 @@
1
- from fastapi import FastAPI
2
- from fastapi.middleware.cors import CORSMiddleware
3
- from app.routers import chat, quiz, audio, video, integrated
4
-
5
- app = FastAPI(
6
- title="🤖 JANGG AI API",
7
- description="API complète pour la génération de contenu éducatif avec IA",
8
- version="1.0.0"
9
- )
10
-
11
- # Configuration CORS
12
- app.add_middleware(
13
- CORSMiddleware,
14
- allow_origins=["*"],
15
- allow_credentials=True,
16
- allow_methods=["*"],
17
- allow_headers=["*"],
18
- )
19
-
20
- # Inclusion des routeurs
21
- app.include_router(chat.router, prefix="/chat", tags=["Chat"])
22
- app.include_router(quiz.router, prefix="/quiz", tags=["Quiz"])
23
- app.include_router(audio.router, prefix="/audio", tags=["Audio"])
24
- app.include_router(video.router, prefix="/video", tags=["Video"])
25
- app.include_router(integrated.router, prefix="/integrated", tags=["Integrated"])
26
-
27
- @app.get("/", include_in_schema=False)
28
- async def root():
29
- return {"message": "Bienvenue sur l'API JANGG AI"}
30
 
31
  if __name__ == "__main__":
32
  import uvicorn
33
- uvicorn.run("main:app", host="0.0.0.0", port=7860, reload=True)
34
-
 
1
+ from app.main import app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  if __name__ == "__main__":
4
  import uvicorn
5
+ uvicorn.run(app, host="0.0.0.0", port=7860)
 
requirements.txt CHANGED
@@ -1,16 +1,23 @@
1
- fastapi>=0.68.0
2
- uvicorn>=0.15.0
3
- pydantic>=1.8.0
4
- gtts>=2.3.0
5
- pydub>=0.25.1
6
- python-dotenv>=0.19.0
7
- diffusers>=0.10.0
8
- torch>=1.12.0
9
- torchvision>=0.13.0
10
- transformers>=4.21.0
11
- accelerate>=0.12.0
12
- python-multipart>=0.0.5
13
- langchain-core
14
- langchain-groq
15
- pydantic-settings
16
-
 
 
 
 
 
 
 
 
1
+ fastapi>=0.104.0
2
+ uvicorn[standard]>=0.24.0
3
+ python-dotenv>=1.0.0
4
+ pydantic>=2.5.0
5
+ sqlalchemy>=2.0.0
6
+ asyncpg>=0.29.0
7
+ alembic>=1.13.0
8
+ redis>=5.0.0
9
+ openai>=1.3.0
10
+ langchain>=0.0.350
11
+ langchain-groq>=0.0.1
12
+ langchain-community>=0.4.0
13
+ crewai>=0.1.4
14
+ crewai-tools>=0.2.0
15
+ python-jose[cryptography]>=3.3.0
16
+ passlib[bcrypt]>=1.7.4
17
+ python-multipart>=0.0.6
18
+ httpx>=0.25.0
19
+ pandas>=2.1.0
20
+ numpy>=1.25.0
21
+ pytest>=7.4.0
22
+ pytest-asyncio>=0.21.0
23
+ gtts>=2.4.0