sushilideaclan01 commited on
Commit
91d209c
·
0 Parent(s):

first push

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +54 -0
  2. .env.example +26 -0
  3. .gitignore +61 -0
  4. AUTH_SETUP.md +194 -0
  5. Dockerfile.hf +64 -0
  6. HUGGINGFACE_DEPLOY.md +294 -0
  7. README.md +108 -0
  8. api/__init__.py +2 -0
  9. api/auth.py +137 -0
  10. api/frame_extraction.py +232 -0
  11. api/image_service.py +61 -0
  12. api/prompt_generation.py +350 -0
  13. api/replicate_service.py +289 -0
  14. api/video_export.py +250 -0
  15. api/video_generation.py +614 -0
  16. api/whisper_service.py +166 -0
  17. frontend/.gitignore +24 -0
  18. frontend/FLOW.md +512 -0
  19. frontend/README.md +127 -0
  20. frontend/eslint.config.js +23 -0
  21. frontend/index.html +21 -0
  22. frontend/package-lock.json +2776 -0
  23. frontend/package.json +27 -0
  24. frontend/postcss.config.js +7 -0
  25. frontend/public/vite.svg +12 -0
  26. frontend/src/App.css +42 -0
  27. frontend/src/App.tsx +192 -0
  28. frontend/src/assets/react.svg +1 -0
  29. frontend/src/components/ErrorDisplay.tsx +70 -0
  30. frontend/src/components/GenerationComplete.tsx +375 -0
  31. frontend/src/components/GenerationForm.tsx +1362 -0
  32. frontend/src/components/GenerationProgress.tsx +411 -0
  33. frontend/src/components/Icons.tsx +277 -0
  34. frontend/src/components/Login.tsx +179 -0
  35. frontend/src/components/ProviderSelect.tsx +158 -0
  36. frontend/src/components/index.ts +8 -0
  37. frontend/src/context/AuthContext.tsx +80 -0
  38. frontend/src/context/GenerationContext.tsx +171 -0
  39. frontend/src/index.css +263 -0
  40. frontend/src/main.tsx +10 -0
  41. frontend/src/types/index.ts +215 -0
  42. frontend/src/utils/api.ts +611 -0
  43. frontend/src/vite-env.d.ts +10 -0
  44. frontend/tailwind.config.js +91 -0
  45. frontend/tsconfig.app.json +28 -0
  46. frontend/tsconfig.json +25 -0
  47. frontend/tsconfig.node.json +10 -0
  48. frontend/vite.config.ts +21 -0
  49. main.py +180 -0
  50. models/__init__.py +2 -0
.dockerignore ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ venv/
8
+ env/
9
+ ENV/
10
+ .venv
11
+
12
+ # Frontend
13
+ frontend/node_modules/
14
+ frontend/dist/
15
+ frontend/.vite/
16
+
17
+ # IDE
18
+ .vscode/
19
+ .idea/
20
+ *.swp
21
+ *.swo
22
+ *~
23
+
24
+ # Environment
25
+ .env
26
+ .env.local
27
+ .env.*.local
28
+
29
+ # Storage
30
+ storage/images/*
31
+ storage/videos/*
32
+ !storage/images/.gitkeep
33
+ !storage/videos/.gitkeep
34
+
35
+ # Git
36
+ .git/
37
+ .gitignore
38
+
39
+ # Documentation
40
+ *.md
41
+ !README.md
42
+
43
+ # Docker
44
+ Dockerfile
45
+ docker-compose.yml
46
+ .dockerignore
47
+
48
+ # Output
49
+ output_videos/
50
+
51
+ # OS
52
+ .DS_Store
53
+ Thumbs.db
54
+
.env.example ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Environment Configuration
2
+ # Copy this file to .env.local for local development or set these in your deployment platform
3
+
4
+ # Server Configuration
5
+ SERVER_PORT=4000
6
+ PUBLIC_URL=http://localhost:4000
7
+ ENVIRONMENT=production
8
+
9
+ # API Keys (Required)
10
+ # Get your KIE API key at: https://kie.ai/api-key
11
+ KIE_API_KEY=your_kie_api_key_here
12
+
13
+ # Optional API Keys
14
+ # Get your Gemini API key at: https://makersuite.google.com/app/apikey
15
+ VITE_GEMINI_API_KEY=your_gemini_api_key_here
16
+
17
+ # Get your OpenAI API key at: https://platform.openai.com/api-keys
18
+ OPENAI_API_KEY=your_openai_api_key_here
19
+
20
+ # Get your Replicate API token at: https://replicate.com/account/api-tokens
21
+ REPLICATE_API_TOKEN=your_replicate_token_here
22
+
23
+ # Frontend Configuration (for build)
24
+ # Set this to your backend URL when building frontend
25
+ # Example: VITE_API_BASE_URL=https://api.yourdomain.com
26
+ VITE_API_BASE_URL=http://localhost:4000
.gitignore ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual Environment
24
+ venv/
25
+ env/
26
+ ENV/
27
+ .venv
28
+
29
+ # Environment Variables
30
+ .env
31
+ .env.local
32
+ .env.*.local
33
+
34
+ # Storage
35
+ storage/images/*
36
+ storage/videos/*
37
+ !storage/images/.gitkeep
38
+ !storage/videos/.gitkeep
39
+
40
+ # IDE
41
+ .vscode/
42
+ .idea/
43
+ *.swp
44
+ *.swo
45
+ *~
46
+
47
+ # OS
48
+ .DS_Store
49
+ Thumbs.db
50
+
51
+ # Testing
52
+ .pytest_cache/
53
+ .coverage
54
+ htmlcov/
55
+
56
+ # Logs
57
+ *.log
58
+ logs/
59
+
60
+ # Embedded repositories
61
+ Video_AdGenesis_App/
AUTH_SETUP.md ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authentication Setup Guide
2
+
3
+ Your Video Genesis Studio now has authentication enabled to restrict access to authorized users only.
4
+
5
+ ## How It Works
6
+
7
+ 1. **Login Required**: Users must log in before accessing the app
8
+ 2. **JWT Tokens**: Authentication uses JWT (JSON Web Tokens) for secure, stateless authentication
9
+ 3. **Protected Routes**: All API endpoints require a valid token (except `/health` and `/api/auth/*`)
10
+
11
+ ## Setting Up Users
12
+
13
+ ### Option 1: Environment Variable (Recommended for Production)
14
+
15
+ Set the `ALLOWED_USERS` environment variable with your user credentials:
16
+
17
+ ```bash
18
+ # Format: "username1:password1,username2:password2"
19
+ ALLOWED_USERS="admin:your-secure-password,user1:password123,user2:password456"
20
+ ```
21
+
22
+ **For Hugging Face Spaces:**
23
+ 1. Go to your Space settings
24
+ 2. Navigate to "Variables and secrets"
25
+ 3. Add a new variable:
26
+ - **Key**: `ALLOWED_USERS`
27
+ - **Value**: `admin:your-password,user1:pass1,user2:pass2`
28
+ 4. Save and restart your Space
29
+
30
+ ### Option 2: Default Credentials (Development Only)
31
+
32
+ If `ALLOWED_USERS` is not set, the app uses default credentials:
33
+ - **Username**: `admin`
34
+ - **Password**: `admin`
35
+
36
+ ⚠️ **Warning**: Never use default credentials in production!
37
+
38
+ ## Security Configuration
39
+
40
+ ### JWT Secret Key
41
+
42
+ Set a strong secret key for JWT token signing:
43
+
44
+ ```bash
45
+ JWT_SECRET_KEY=your-very-secure-random-secret-key-here
46
+ ```
47
+
48
+ **For Hugging Face Spaces:**
49
+ - Add `JWT_SECRET_KEY` to your Space environment variables
50
+ - Use a long, random string (at least 32 characters)
51
+
52
+ ### Generating a Secure Secret Key
53
+
54
+ You can generate a secure key using Python:
55
+
56
+ ```python
57
+ import secrets
58
+ print(secrets.token_urlsafe(32))
59
+ ```
60
+
61
+ Or using OpenSSL:
62
+
63
+ ```bash
64
+ openssl rand -base64 32
65
+ ```
66
+
67
+ ## User Management
68
+
69
+ ### Adding Users
70
+
71
+ Add users by updating the `ALLOWED_USERS` environment variable:
72
+
73
+ ```bash
74
+ # Add a new user
75
+ ALLOWED_USERS="admin:pass1,user1:pass2,newuser:newpass"
76
+ ```
77
+
78
+ ### Removing Users
79
+
80
+ Remove users by updating `ALLOWED_USERS`:
81
+
82
+ ```bash
83
+ # Remove user1
84
+ ALLOWED_USERS="admin:pass1,user2:pass2"
85
+ ```
86
+
87
+ ### Changing Passwords
88
+
89
+ Update the password in `ALLOWED_USERS`:
90
+
91
+ ```bash
92
+ # Change admin password
93
+ ALLOWED_USERS="admin:newpassword,user1:pass1"
94
+ ```
95
+
96
+ ## API Endpoints
97
+
98
+ ### Public Endpoints (No Auth Required)
99
+ - `GET /health` - Health check
100
+ - `POST /api/auth/login` - Login endpoint
101
+ - `GET /api/auth/verify` - Verify token (requires auth)
102
+
103
+ ### Protected Endpoints (Auth Required)
104
+ All other `/api/*` endpoints require a valid JWT token in the Authorization header:
105
+ ```
106
+ Authorization: Bearer <token>
107
+ ```
108
+
109
+ ## Frontend Authentication
110
+
111
+ The frontend automatically:
112
+ 1. Shows login page if not authenticated
113
+ 2. Stores JWT token in localStorage
114
+ 3. Includes token in all API requests
115
+ 4. Redirects to login on 401 errors
116
+ 5. Provides logout functionality
117
+
118
+ ## Token Expiration
119
+
120
+ Tokens expire after **7 days** by default. Users will need to log in again after expiration.
121
+
122
+ To change expiration, modify `ACCESS_TOKEN_EXPIRE_HOURS` in `api/auth.py`.
123
+
124
+ ## Troubleshooting
125
+
126
+ ### Can't Log In
127
+
128
+ 1. **Check credentials**: Verify username and password are correct
129
+ 2. **Check environment variable**: Ensure `ALLOWED_USERS` is set correctly
130
+ 3. **Check logs**: Look for authentication errors in backend logs
131
+
132
+ ### Token Expired
133
+
134
+ - Users will be automatically logged out
135
+ - They need to log in again
136
+
137
+ ### 401 Unauthorized Errors
138
+
139
+ - Token may be expired or invalid
140
+ - Clear browser localStorage and log in again
141
+ - Check that token is being sent in requests
142
+
143
+ ### Multiple Users
144
+
145
+ To add multiple users, separate them with commas:
146
+
147
+ ```bash
148
+ ALLOWED_USERS="admin:admin123,john:john456,jane:jane789"
149
+ ```
150
+
151
+ ## Security Best Practices
152
+
153
+ 1. ✅ **Use strong passwords** for all users
154
+ 2. ✅ **Set JWT_SECRET_KEY** to a secure random value
155
+ 3. ✅ **Never commit credentials** to version control
156
+ 4. ✅ **Use environment variables** for all secrets
157
+ 5. ✅ **Rotate secrets** periodically
158
+ 6. ✅ **Use HTTPS** in production (Hugging Face Spaces provides this)
159
+
160
+ ## Example Configuration
161
+
162
+ For Hugging Face Spaces, set these environment variables:
163
+
164
+ ```
165
+ ALLOWED_USERS=admin:SecurePass123!,user1:AnotherPass456!
166
+ JWT_SECRET_KEY=your-super-secret-key-minimum-32-characters-long
167
+ ```
168
+
169
+ ## Testing
170
+
171
+ 1. **Test Login**:
172
+ ```bash
173
+ curl -X POST http://localhost:4000/api/auth/login \
174
+ -H "Content-Type: application/json" \
175
+ -d '{"username":"admin","password":"admin"}'
176
+ ```
177
+
178
+ 2. **Test Protected Endpoint**:
179
+ ```bash
180
+ curl -X GET http://localhost:4000/api/auth/me \
181
+ -H "Authorization: Bearer <your-token>"
182
+ ```
183
+
184
+ ## Next Steps
185
+
186
+ 1. Set `ALLOWED_USERS` with your actual credentials
187
+ 2. Set `JWT_SECRET_KEY` to a secure random value
188
+ 3. Deploy and test login functionality
189
+ 4. Share credentials only with authorized users
190
+
191
+ ---
192
+
193
+ **Your app is now secured! 🔒**
194
+
Dockerfile.hf ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hugging Face Spaces Dockerfile
2
+ # Multi-stage build for Hugging Face deployment
3
+
4
+ # Stage 1: Build frontend
5
+ FROM node:20-alpine AS frontend-builder
6
+
7
+ WORKDIR /app/frontend
8
+
9
+ # Copy package files
10
+ COPY frontend/package*.json ./
11
+
12
+ # Install dependencies
13
+ RUN npm ci
14
+
15
+ # Copy frontend source
16
+ COPY frontend/ ./
17
+
18
+ # Build arguments for environment variables
19
+ # Hugging Face Spaces will provide the public URL
20
+ ARG VITE_API_BASE_URL
21
+ ENV VITE_API_BASE_URL=${VITE_API_BASE_URL}
22
+
23
+ # Build frontend
24
+ RUN npm run build
25
+
26
+ # Stage 2: Python backend with frontend
27
+ FROM python:3.11-slim
28
+
29
+ # Set working directory
30
+ WORKDIR /app
31
+
32
+ # Install system dependencies including ffmpeg
33
+ RUN apt-get update && \
34
+ apt-get install -y --no-install-recommends \
35
+ ffmpeg \
36
+ && rm -rf /var/lib/apt/lists/*
37
+
38
+ # Copy requirements first for better caching
39
+ COPY requirements.txt .
40
+
41
+ # Install Python dependencies
42
+ RUN pip install --no-cache-dir -r requirements.txt
43
+
44
+ # Copy application code
45
+ COPY . .
46
+
47
+ # Copy built frontend from builder stage
48
+ COPY --from=frontend-builder /app/frontend/dist ./frontend/dist
49
+
50
+ # Create storage directories
51
+ RUN mkdir -p storage/images storage/videos
52
+
53
+ # Hugging Face Spaces uses port 7860 by default
54
+ # But we'll use PORT env var if provided, or default to 7860
55
+ EXPOSE 7860
56
+
57
+ # Health check
58
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
59
+ CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:7860/health')"
60
+
61
+ # Run the application
62
+ # Hugging Face Spaces provides PORT env var, default to 7860
63
+ CMD python -c "import os; port = int(os.getenv('PORT', 7860)); import uvicorn; uvicorn.run('main:app', host='0.0.0.0', port=port)"
64
+
HUGGINGFACE_DEPLOY.md ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Deploying to Hugging Face Spaces
2
+
3
+ This guide will help you deploy Video Genesis Studio to Hugging Face Spaces.
4
+
5
+ ## Prerequisites
6
+
7
+ 1. **Hugging Face Account**
8
+ - Sign up at https://huggingface.co/join
9
+ - Verify your email
10
+
11
+ 2. **API Keys**
12
+ - KIE API Key (required): https://kie.ai/api-key
13
+ - Optional: Gemini, OpenAI, Replicate keys
14
+
15
+ 3. **GitHub Repository**
16
+ - Your code should be in a GitHub repository
17
+ - Or you can upload directly to Hugging Face
18
+
19
+ ## Step-by-Step Deployment
20
+
21
+ ### Option 1: Deploy from GitHub (Recommended)
22
+
23
+ 1. **Prepare Your Repository**:
24
+ ```bash
25
+ # Make sure Dockerfile.hf is in the root
26
+ # Make sure README_HF.md exists (or rename it to README.md)
27
+ git add Dockerfile.hf README_HF.md
28
+ git commit -m "Add Hugging Face deployment files"
29
+ git push
30
+ ```
31
+
32
+ 2. **Create a New Space**:
33
+ - Go to https://huggingface.co/spaces
34
+ - Click "Create new Space"
35
+ - Fill in:
36
+ - **Space name**: `video-genesis-studio` (or your choice)
37
+ - **SDK**: Select **Docker**
38
+ - **Visibility**: Public or Private
39
+ - **Hardware**: CPU Basic (free) or upgrade for better performance
40
+ - Click "Create Space"
41
+
42
+ 3. **Connect Repository**:
43
+ - In Space settings, go to "Repository" tab
44
+ - Click "Connect to GitHub"
45
+ - Select your repository
46
+ - Set **Dockerfile path**: `Dockerfile.hf`
47
+ - Save
48
+
49
+ 4. **Set Environment Variables**:
50
+ - Go to "Variables and secrets" tab
51
+ - Add the following **required** variables:
52
+ ```
53
+ KIE_API_KEY=your_kie_api_key_here
54
+ VITE_API_BASE_URL=https://your-username-video-genesis-studio.hf.space
55
+ ENVIRONMENT=production
56
+ ALLOWED_USERS=admin:your-secure-password,user1:password123
57
+ JWT_SECRET_KEY=your-very-secure-random-secret-key-minimum-32-chars
58
+ ```
59
+ - **Authentication Setup** (Required):
60
+ - `ALLOWED_USERS`: Format is `username1:password1,username2:password2`
61
+ - `JWT_SECRET_KEY`: Generate a secure random key (see AUTH_SETUP.md)
62
+ - Optional variables:
63
+ ```
64
+ VITE_GEMINI_API_KEY=your_gemini_key
65
+ OPENAI_API_KEY=your_openai_key
66
+ REPLICATE_API_TOKEN=your_replicate_token
67
+ ```
68
+ - Click "Save" after adding each variable
69
+
70
+ **Note**: See `AUTH_SETUP.md` for detailed authentication configuration.
71
+
72
+ 5. **Build Arguments**:
73
+ - Go to "Settings" → "Build arguments"
74
+ - Add:
75
+ ```
76
+ VITE_API_BASE_URL=https://your-username-video-genesis-studio.hf.space
77
+ ```
78
+ - This ensures the frontend knows the correct API URL
79
+
80
+ 6. **Deploy**:
81
+ - Hugging Face will automatically build and deploy
82
+ - Watch the build logs in the "Logs" tab
83
+ - Wait for "Your Space is live!" message
84
+
85
+ ### Option 2: Upload Directly to Hugging Face
86
+
87
+ 1. **Create Space**:
88
+ - Go to https://huggingface.co/spaces
89
+ - Click "Create new Space"
90
+ - Select **Docker** SDK
91
+ - Create the Space
92
+
93
+ 2. **Clone and Upload**:
94
+ ```bash
95
+ # Clone your Hugging Face Space
96
+ git clone https://huggingface.co/spaces/your-username/video-genesis-studio
97
+ cd video-genesis-studio
98
+
99
+ # Copy your files
100
+ cp -r /path/to/your/project/* .
101
+
102
+ # Make sure Dockerfile.hf is named correctly
103
+ # Rename README_HF.md to README.md
104
+ mv README_HF.md README.md
105
+
106
+ # Commit and push
107
+ git add .
108
+ git commit -m "Initial deployment"
109
+ git push
110
+ ```
111
+
112
+ 3. **Set Environment Variables** (same as Option 1, step 4)
113
+
114
+ ## Configuration
115
+
116
+ ### Space Settings
117
+
118
+ 1. **Hardware**:
119
+ - **CPU Basic** (Free): Good for testing, limited resources
120
+ - **CPU Upgrade** ($0.60/hour): Better performance
121
+ - **GPU** (if needed): For heavy video processing
122
+
123
+ 2. **Storage**:
124
+ - Default: 50GB
125
+ - Upgrade if you need more for video storage
126
+
127
+ 3. **Environment Variables**:
128
+ - Set in "Variables and secrets" tab
129
+ - Required: `KIE_API_KEY`
130
+ - Recommended: `VITE_API_BASE_URL` (your Space URL)
131
+
132
+ ### Build Configuration
133
+
134
+ The `Dockerfile.hf` is configured to:
135
+ - Build React frontend with correct API URL
136
+ - Install Python dependencies
137
+ - Install FFmpeg for video processing
138
+ - Serve on port 7860 (Hugging Face default)
139
+
140
+ ### Frontend Configuration
141
+
142
+ The frontend is built with `VITE_API_BASE_URL` pointing to your Space URL. This is set via:
143
+ - Build argument in Dockerfile
144
+ - Environment variable during build
145
+
146
+ ## Verifying Deployment
147
+
148
+ 1. **Check Build Logs**:
149
+ - Go to "Logs" tab in your Space
150
+ - Look for successful build messages
151
+ - Check for any errors
152
+
153
+ 2. **Test Health Endpoint**:
154
+ - Visit: `https://your-space.hf.space/health`
155
+ - Should return JSON with status "healthy"
156
+
157
+ 3. **Test Frontend**:
158
+ - Visit: `https://your-space.hf.space`
159
+ - Should load the React application
160
+
161
+ 4. **Test API Docs**:
162
+ - Visit: `https://your-space.hf.space/docs`
163
+ - Should show Swagger UI
164
+
165
+ ## Troubleshooting
166
+
167
+ ### Build Fails
168
+
169
+ **Issue**: Build fails with npm errors
170
+ - **Solution**: Check that `frontend/package.json` exists and is valid
171
+ - Check build logs for specific error messages
172
+
173
+ **Issue**: Frontend build fails
174
+ - **Solution**: Ensure `VITE_API_BASE_URL` is set correctly
175
+ - Check that all frontend dependencies are in `package.json`
176
+
177
+ ### App Not Loading
178
+
179
+ **Issue**: Frontend shows blank page
180
+ - **Solution**:
181
+ - Check browser console for errors
182
+ - Verify `VITE_API_BASE_URL` matches your Space URL
183
+ - Check that frontend was built successfully (check logs)
184
+
185
+ **Issue**: API calls fail
186
+ - **Solution**:
187
+ - Verify `KIE_API_KEY` is set correctly
188
+ - Check API logs in Space logs
189
+ - Ensure CORS is configured (already set to allow all)
190
+
191
+ ### Port Issues
192
+
193
+ **Issue**: App doesn't start
194
+ - **Solution**:
195
+ - Hugging Face uses port 7860 by default
196
+ - The Dockerfile is configured to use `PORT` env var
197
+ - Check logs for port binding errors
198
+
199
+ ### Storage Issues
200
+
201
+ **Issue**: Videos not saving
202
+ - **Solution**:
203
+ - Check storage directory permissions
204
+ - Verify storage path exists
205
+ - Check available disk space
206
+
207
+ ## Updating Your Space
208
+
209
+ 1. **Make Changes**:
210
+ ```bash
211
+ # Make your code changes
212
+ git add .
213
+ git commit -m "Update app"
214
+ git push
215
+ ```
216
+
217
+ 2. **Hugging Face Auto-Rebuilds**:
218
+ - Spaces automatically rebuild on push
219
+ - Watch the logs for build progress
220
+
221
+ 3. **Manual Rebuild**:
222
+ - Go to Space settings
223
+ - Click "Rebuild" if needed
224
+
225
+ ## Environment Variables Reference
226
+
227
+ | Variable | Required | Description |
228
+ |----------|----------|-------------|
229
+ | `KIE_API_KEY` | Yes | KIE API key for video generation |
230
+ | `ALLOWED_USERS` | Yes | User credentials: `username1:pass1,username2:pass2` |
231
+ | `JWT_SECRET_KEY` | Yes | Secure random key for JWT signing (min 32 chars) |
232
+ | `VITE_API_BASE_URL` | Recommended | Your Space URL (for frontend) |
233
+ | `ENVIRONMENT` | No | Set to `production` |
234
+ | `VITE_GEMINI_API_KEY` | No | Gemini API key |
235
+ | `OPENAI_API_KEY` | No | OpenAI API key |
236
+ | `REPLICATE_API_TOKEN` | No | Replicate API token |
237
+ | `PORT` | Auto | Set by Hugging Face (7860) |
238
+
239
+ ## Cost Considerations
240
+
241
+ - **Free Tier**: CPU Basic, 50GB storage
242
+ - **Paid Tiers**:
243
+ - CPU Upgrade: $0.60/hour
244
+ - GPU options available
245
+ - **Storage**: Additional storage can be purchased
246
+
247
+ ## Security Notes
248
+
249
+ 1. **API Keys**:
250
+ - Never commit API keys to your repository
251
+ - Use Hugging Face's "Variables and secrets" feature
252
+ - Keys are encrypted and only accessible to your Space
253
+
254
+ 2. **CORS**:
255
+ - Currently set to allow all origins
256
+ - For production, consider restricting to your domain
257
+
258
+ 3. **Rate Limiting**:
259
+ - Consider implementing rate limiting for public Spaces
260
+ - Monitor API usage
261
+
262
+ ## Authentication
263
+
264
+ Your app now requires login! See `AUTH_SETUP.md` for:
265
+ - Setting up user credentials
266
+ - Configuring JWT secret key
267
+ - Managing multiple users
268
+ - Security best practices
269
+
270
+ **Quick Setup:**
271
+ 1. Set `ALLOWED_USERS` environment variable: `admin:your-password,user1:pass1`
272
+ 2. Set `JWT_SECRET_KEY` to a secure random string (32+ characters)
273
+ 3. Users will see a login page before accessing the app
274
+
275
+ ## Support
276
+
277
+ - **Hugging Face Docs**: https://huggingface.co/docs/hub/spaces
278
+ - **Space Community**: https://huggingface.co/spaces
279
+ - **Issues**: Check Space logs for errors
280
+ - **Authentication**: See `AUTH_SETUP.md` for auth configuration
281
+
282
+ ## Next Steps
283
+
284
+ 1. ✅ Deploy your Space
285
+ 2. ✅ Set environment variables (including `ALLOWED_USERS` and `JWT_SECRET_KEY`)
286
+ 3. ✅ Test login functionality
287
+ 4. ✅ Test the deployment
288
+ 5. ✅ Share credentials with authorized users only
289
+ 6. ✅ Monitor usage and performance
290
+
291
+ ---
292
+
293
+ **Your Video Genesis Studio is now live and secured on Hugging Face! 🚀🔒**
294
+
README.md ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Video AdGenesis Studio
3
+ emoji: 🎬
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: docker
7
+ sdk_version: latest
8
+ app_file: Dockerfile.hf
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ # Video Genesis Studio
14
+
15
+ AI-powered video generation studio with React frontend and FastAPI backend. Create professional videos from text prompts and images using cutting-edge AI models.
16
+
17
+ ## Features
18
+
19
+ ✨ **AI Video Generation**
20
+ - KIE Veo 3.1 integration for high-quality video generation
21
+ - Text-to-video and image-to-video support
22
+ - Real-time progress tracking
23
+
24
+ 🖼️ **Image Processing**
25
+ - Intelligent image compression
26
+ - Multiple format support
27
+ - Automatic optimization
28
+
29
+ 🎬 **Video Processing**
30
+ - Frame extraction
31
+ - Video trimming and concatenation
32
+ - Full ffmpeg integration
33
+
34
+ ## Setup
35
+
36
+ ### Environment Variables
37
+
38
+ Set these in your Hugging Face Space settings:
39
+
40
+ **Required:**
41
+ - `KIE_API_KEY` - Get from https://kie.ai/api-key
42
+
43
+ **Optional:**
44
+ - `VITE_GEMINI_API_KEY` - For Gemini AI features
45
+ - `OPENAI_API_KEY` - For GPT-4o prompt generation
46
+ - `REPLICATE_API_TOKEN` - For Replicate video generation
47
+ - `VITE_API_BASE_URL` - Your Space URL (auto-set by HF, but can override)
48
+ - `ENVIRONMENT` - Set to `production`
49
+
50
+ ### Getting Your API Keys
51
+
52
+ 1. **KIE API Key** (Required):
53
+ - Visit https://kie.ai/api-key
54
+ - Sign up or log in
55
+ - Copy your API key
56
+
57
+ 2. **Gemini API Key** (Optional):
58
+ - Visit https://makersuite.google.com/app/apikey
59
+ - Create a new API key
60
+ - Copy the key
61
+
62
+ 3. **OpenAI API Key** (Optional):
63
+ - Visit https://platform.openai.com/api-keys
64
+ - Create a new secret key
65
+ - Copy the key
66
+
67
+ 4. **Replicate Token** (Optional):
68
+ - Visit https://replicate.com/account/api-tokens
69
+ - Create a new token
70
+ - Copy the token
71
+
72
+ ## Usage
73
+
74
+ 1. **Set Environment Variables**:
75
+ - Go to your Space settings
76
+ - Add all required environment variables
77
+ - Save and restart the Space
78
+
79
+ 2. **Access the App**:
80
+ - Your Space will be available at: `https://your-username-video-genesis-studio.hf.space`
81
+ - The frontend loads automatically
82
+ - API docs available at: `/docs`
83
+
84
+ 3. **Generate Videos**:
85
+ - Enter a text prompt
86
+ - Optionally upload images
87
+ - Select video model and settings
88
+ - Click generate and wait for results
89
+
90
+ ## API Endpoints
91
+
92
+ - `GET /health` - Health check
93
+ - `GET /docs` - API documentation (Swagger UI)
94
+ - `POST /api/veo/generate` - Start video generation
95
+ - `GET /api/veo/status/{task_id}` - Check generation status
96
+ - `POST /api/prompts/generate` - Generate prompts with AI
97
+
98
+ ## Technical Details
99
+
100
+ - **Backend**: FastAPI (Python 3.11)
101
+ - **Frontend**: React + TypeScript + Vite
102
+ - **Video Processing**: FFmpeg
103
+ - **AI Models**: KIE Veo 3.1, Gemini, GPT-4o
104
+
105
+ ## License
106
+
107
+ MIT License
108
+
api/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # API package
2
+
api/auth.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Authentication endpoints for user login and access control
3
+ """
4
+ from fastapi import APIRouter, HTTPException, Depends, status
5
+ from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
6
+ from pydantic import BaseModel
7
+ from datetime import datetime, timedelta
8
+ from typing import Optional
9
+ import os
10
+ import hashlib
11
+ from jose import JWTError, jwt
12
+
13
+ router = APIRouter()
14
+ security = HTTPBearer()
15
+
16
+ # JWT Configuration
17
+ SECRET_KEY = os.getenv("JWT_SECRET_KEY", "your-secret-key-change-in-production")
18
+ ALGORITHM = "HS256"
19
+ ACCESS_TOKEN_EXPIRE_HOURS = 24 * 7 # 7 days
20
+
21
+ # User credentials (in production, use a database)
22
+ # Format: username -> hashed_password
23
+ # You can set these via environment variables or use a simple hash
24
+ ALLOWED_USERS = {}
25
+
26
+ def load_allowed_users():
27
+ """Load allowed users from environment variables"""
28
+ users_str = os.getenv("ALLOWED_USERS", "")
29
+ if not users_str:
30
+ # Default user for development (username: admin, password: admin)
31
+ # In production, always set ALLOWED_USERS env var
32
+ print("⚠️ Using default credentials (admin/admin). Set ALLOWED_USERS env var for production.")
33
+ return {"admin": hash_password("admin")}
34
+
35
+ users = {}
36
+ for user_entry in users_str.split(","):
37
+ if ":" in user_entry:
38
+ username, password = user_entry.split(":", 1)
39
+ users[username.strip()] = hash_password(password.strip())
40
+ print(f"✅ Loaded {len(users)} user(s) from ALLOWED_USERS")
41
+ return users
42
+
43
+ def hash_password(password: str) -> str:
44
+ """Hash password using SHA256 (simple, for basic auth)"""
45
+ return hashlib.sha256(password.encode()).hexdigest()
46
+
47
+ def verify_password(plain_password: str, hashed_password: str) -> bool:
48
+ """Verify password against hash"""
49
+ return hash_password(plain_password) == hashed_password
50
+
51
+ def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
52
+ """Create JWT access token"""
53
+ to_encode = data.copy()
54
+ if expires_delta:
55
+ expire = datetime.utcnow() + expires_delta
56
+ else:
57
+ expire = datetime.utcnow() + timedelta(hours=ACCESS_TOKEN_EXPIRE_HOURS)
58
+ to_encode.update({"exp": expire})
59
+ encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
60
+ return encoded_jwt
61
+
62
+ def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)):
63
+ """Verify JWT token"""
64
+ token = credentials.credentials
65
+ try:
66
+ payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
67
+ username: str = payload.get("sub")
68
+ if username is None:
69
+ raise HTTPException(
70
+ status_code=status.HTTP_401_UNAUTHORIZED,
71
+ detail="Invalid authentication credentials",
72
+ headers={"WWW-Authenticate": "Bearer"},
73
+ )
74
+ return username
75
+ except JWTError:
76
+ raise HTTPException(
77
+ status_code=status.HTTP_401_UNAUTHORIZED,
78
+ detail="Invalid authentication credentials",
79
+ headers={"WWW-Authenticate": "Bearer"},
80
+ )
81
+
82
+ # Load users on module import
83
+ ALLOWED_USERS = load_allowed_users()
84
+
85
+ # Request/Response Models
86
+ class LoginRequest(BaseModel):
87
+ username: str
88
+ password: str
89
+
90
+ class LoginResponse(BaseModel):
91
+ access_token: str
92
+ token_type: str = "bearer"
93
+ username: str
94
+
95
+ class VerifyResponse(BaseModel):
96
+ authenticated: bool
97
+ username: Optional[str] = None
98
+
99
+ @router.post("/auth/login", response_model=LoginResponse)
100
+ async def login(request: LoginRequest):
101
+ """Login endpoint - returns JWT token"""
102
+ username = request.username
103
+ password = request.password
104
+
105
+ # Check if user exists
106
+ if username not in ALLOWED_USERS:
107
+ raise HTTPException(
108
+ status_code=status.HTTP_401_UNAUTHORIZED,
109
+ detail="Incorrect username or password"
110
+ )
111
+
112
+ # Verify password
113
+ if not verify_password(password, ALLOWED_USERS[username]):
114
+ raise HTTPException(
115
+ status_code=status.HTTP_401_UNAUTHORIZED,
116
+ detail="Incorrect username or password"
117
+ )
118
+
119
+ # Create access token
120
+ access_token = create_access_token(data={"sub": username})
121
+
122
+ return LoginResponse(
123
+ access_token=access_token,
124
+ token_type="bearer",
125
+ username=username
126
+ )
127
+
128
+ @router.get("/auth/verify", response_model=VerifyResponse)
129
+ async def verify_token_endpoint(username: str = Depends(verify_token)):
130
+ """Verify if token is valid"""
131
+ return VerifyResponse(authenticated=True, username=username)
132
+
133
+ @router.get("/auth/me", response_model=VerifyResponse)
134
+ async def get_current_user(username: str = Depends(verify_token)):
135
+ """Get current authenticated user"""
136
+ return VerifyResponse(authenticated=True, username=username)
137
+
api/frame_extraction.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Frame Extraction API endpoints
3
+ Intelligent frame selection using Whisper
4
+ """
5
+
6
+ from fastapi import APIRouter, HTTPException, UploadFile, File, Form
7
+ from fastapi.responses import JSONResponse
8
+ from pydantic import BaseModel
9
+ from typing import List, Optional
10
+ import tempfile
11
+ import os
12
+
13
+ from utils.whisper_trim import (
14
+ extract_post_speech_frames,
15
+ find_last_word_timestamp,
16
+ trim_video_to_last_word,
17
+ is_whisper_available
18
+ )
19
+
20
+ router = APIRouter()
21
+
22
+
23
+ class FrameExtractionRequest(BaseModel):
24
+ video_url: str
25
+ script: str
26
+ buffer_time: Optional[float] = 0.3
27
+ num_frames: Optional[int] = 3
28
+ model_size: Optional[str] = "base"
29
+
30
+
31
+ class FrameExtractionResponse(BaseModel):
32
+ frames: List[dict] # [{timestamp, frame_data_url, label}]
33
+ last_word_time: float
34
+ total_duration: float
35
+
36
+
37
+ @router.post("/extract-frames", response_model=FrameExtractionResponse)
38
+ async def extract_frames_api(request: FrameExtractionRequest):
39
+ """
40
+ Extract transition frames using Whisper to detect last spoken word
41
+ """
42
+ if not is_whisper_available():
43
+ raise HTTPException(
44
+ status_code=501,
45
+ detail="Whisper not installed. Install with: pip install openai-whisper moviepy"
46
+ )
47
+
48
+ try:
49
+ # Download video temporarily
50
+ import httpx
51
+ temp_video = tempfile.mktemp(suffix='.mp4')
52
+
53
+ async with httpx.AsyncClient() as client:
54
+ response = await client.get(request.video_url)
55
+ if response.status_code != 200:
56
+ raise HTTPException(
57
+ status_code=400,
58
+ detail=f"Failed to download video: {response.status_code}"
59
+ )
60
+
61
+ with open(temp_video, 'wb') as f:
62
+ f.write(response.content)
63
+
64
+ frames = []
65
+ last_word_time = None
66
+
67
+ try:
68
+ # Prefer Whisper-based post-speech detection
69
+ frames = extract_post_speech_frames(
70
+ temp_video,
71
+ request.script,
72
+ buffer_time=request.buffer_time,
73
+ num_frames=request.num_frames,
74
+ model_size=request.model_size
75
+ )
76
+
77
+ # Get last word timestamp
78
+ last_word_time = find_last_word_timestamp(
79
+ temp_video,
80
+ request.script,
81
+ model_size=request.model_size
82
+ )
83
+ except Exception as whisper_err:
84
+ # Fallback: simple fixed timestamps near the end of the video
85
+ print(f"⚠️ Whisper-based frame extraction failed: {whisper_err}")
86
+ try:
87
+ from moviepy.editor import VideoFileClip
88
+ from utils.video_processor import extract_frame
89
+
90
+ clip = VideoFileClip(temp_video)
91
+ duration = clip.duration
92
+ clip.close()
93
+
94
+ fallback_timestamps = [
95
+ max(0, duration - 1.5),
96
+ max(0, duration - 1.0),
97
+ max(0, duration - 0.5),
98
+ ]
99
+ labels = ["Early End", "Mid End", "Final Frame"]
100
+
101
+ for ts, label in zip(fallback_timestamps, labels):
102
+ frame_data = extract_frame(temp_video, ts, return_base64=True)
103
+ frames.append((ts, frame_data, label))
104
+
105
+ last_word_time = fallback_timestamps[-1] if fallback_timestamps else None
106
+ print("✅ Returned fallback frames near video end.")
107
+ except Exception as fallback_err:
108
+ print(f"❌ Fallback frame extraction failed: {fallback_err}")
109
+ raise HTTPException(
110
+ status_code=500,
111
+ detail=f"Frame extraction failed: {str(whisper_err)}"
112
+ )
113
+
114
+ # Get video duration
115
+ from moviepy.editor import VideoFileClip
116
+ clip = VideoFileClip(temp_video)
117
+ duration = clip.duration
118
+ clip.close()
119
+
120
+ # Clean up
121
+ os.remove(temp_video)
122
+
123
+ # Format response
124
+ frames_data = [
125
+ {
126
+ "timestamp": timestamp,
127
+ "frame_data_url": frame_data,
128
+ "label": label
129
+ }
130
+ for timestamp, frame_data, label in frames
131
+ ]
132
+
133
+ return FrameExtractionResponse(
134
+ frames=frames_data,
135
+ last_word_time=last_word_time,
136
+ total_duration=duration
137
+ )
138
+
139
+ except Exception as e:
140
+ # Clean up temp file if it exists
141
+ if 'temp_video' in locals() and os.path.exists(temp_video):
142
+ os.remove(temp_video)
143
+
144
+ raise HTTPException(
145
+ status_code=500,
146
+ detail=f"Frame extraction failed: {str(e)}"
147
+ )
148
+
149
+
150
+ @router.post("/trim-video")
151
+ async def trim_video_api(
152
+ video_url: str = Form(...),
153
+ script: str = Form(...),
154
+ padding: float = Form(0.5),
155
+ model_size: str = Form("base")
156
+ ):
157
+ """
158
+ Trim video to end after last spoken word
159
+ """
160
+ if not is_whisper_available():
161
+ raise HTTPException(
162
+ status_code=501,
163
+ detail="Whisper not installed. Install with: pip install openai-whisper moviepy"
164
+ )
165
+
166
+ try:
167
+ # Download video temporarily
168
+ import httpx
169
+ temp_video = tempfile.mktemp(suffix='.mp4')
170
+ output_video = tempfile.mktemp(suffix='_trimmed.mp4')
171
+
172
+ async with httpx.AsyncClient() as client:
173
+ response = await client.get(video_url)
174
+ if response.status_code != 200:
175
+ raise HTTPException(
176
+ status_code=400,
177
+ detail=f"Failed to download video: {response.status_code}"
178
+ )
179
+
180
+ with open(temp_video, 'wb') as f:
181
+ f.write(response.content)
182
+
183
+ # Trim video
184
+ output_path = trim_video_to_last_word(
185
+ temp_video,
186
+ script,
187
+ output_video,
188
+ padding=padding,
189
+ model_size=model_size
190
+ )
191
+
192
+ # Read trimmed video
193
+ with open(output_path, 'rb') as f:
194
+ video_data = f.read()
195
+
196
+ # Clean up
197
+ os.remove(temp_video)
198
+ os.remove(output_video)
199
+
200
+ # Return trimmed video
201
+ from fastapi.responses import Response
202
+ return Response(
203
+ content=video_data,
204
+ media_type="video/mp4",
205
+ headers={
206
+ "Content-Disposition": "attachment; filename=trimmed_video.mp4"
207
+ }
208
+ )
209
+
210
+ except Exception as e:
211
+ # Clean up temp files if they exist
212
+ for temp_file in ['temp_video', 'output_video']:
213
+ if temp_file in locals() and os.path.exists(locals()[temp_file]):
214
+ os.remove(locals()[temp_file])
215
+
216
+ raise HTTPException(
217
+ status_code=500,
218
+ detail=f"Video trimming failed: {str(e)}"
219
+ )
220
+
221
+
222
+ @router.get("/whisper-status")
223
+ async def whisper_status():
224
+ """
225
+ Check if Whisper is available
226
+ """
227
+ return {
228
+ "available": is_whisper_available(),
229
+ "message": "Whisper is available" if is_whisper_available()
230
+ else "Install with: pip install openai-whisper moviepy"
231
+ }
232
+
api/image_service.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Image Service API endpoints
3
+ Handles image compression, storage, and serving
4
+ """
5
+
6
+ from fastapi import APIRouter, HTTPException, Response, UploadFile, File
7
+ from fastapi.responses import JSONResponse
8
+ from utils.storage import temp_images
9
+ from utils.image_processor import compress_and_store_image
10
+ import os
11
+
12
+ router = APIRouter()
13
+
14
+ @router.post("/upload-image")
15
+ async def upload_image(file: UploadFile = File(...)):
16
+ """
17
+ Upload and host an image, returns public URL
18
+ """
19
+ try:
20
+ # Read image bytes
21
+ image_bytes = await file.read()
22
+
23
+ # Convert to data URL for processing
24
+ import base64
25
+ encoded = base64.b64encode(image_bytes).decode('utf-8')
26
+ data_url = f"data:{file.content_type};base64,{encoded}"
27
+
28
+ # Get public URL from env or use default
29
+ public_url = os.getenv('PUBLIC_URL', 'http://localhost:4000')
30
+
31
+ # Compress and store, get hosted URL
32
+ hosted_url = await compress_and_store_image(data_url, public_url)
33
+
34
+ return JSONResponse(content={
35
+ "url": hosted_url,
36
+ "filename": file.filename
37
+ })
38
+
39
+ except Exception as e:
40
+ raise HTTPException(status_code=500, detail=f"Image upload failed: {str(e)}")
41
+
42
+
43
+ @router.get("/images/{image_id}")
44
+ async def serve_image(image_id: str):
45
+ """
46
+ Serve temporarily stored images
47
+ Images are compressed and cached for 1 hour
48
+ """
49
+ if image_id not in temp_images:
50
+ raise HTTPException(status_code=404, detail="Image not found")
51
+
52
+ image_data = temp_images[image_id]
53
+
54
+ return Response(
55
+ content=image_data['buffer'],
56
+ media_type=image_data['content_type'],
57
+ headers={
58
+ 'Cache-Control': 'public, max-age=3600'
59
+ }
60
+ )
61
+
api/prompt_generation.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ GPT-4o Prompt Generation API
3
+ Structured, validated segment generation for video prompts
4
+ """
5
+
6
+ from fastapi import APIRouter, HTTPException, UploadFile, File, Form
7
+ from fastapi.responses import JSONResponse
8
+ from pydantic import BaseModel
9
+ from typing import Optional
10
+ import base64
11
+
12
+ from utils.prompt_generator import (
13
+ VeoInputs,
14
+ generate_segments_payload,
15
+ split_script_into_segments
16
+ )
17
+ from openai import OpenAI
18
+ import os
19
+ import json
20
+
21
+ router = APIRouter()
22
+
23
+
24
+ class PromptGenerationRequest(BaseModel):
25
+ """Request for prompt generation"""
26
+ script: str
27
+ style: str = "clean, lifestyle UGC"
28
+ jsonFormat: str = "standard"
29
+ continuationMode: bool = True
30
+ voiceType: Optional[str] = None
31
+ energyLevel: Optional[str] = None
32
+ settingMode: str = "single"
33
+ cameraStyle: Optional[str] = "handheld steadicam"
34
+ energyArc: Optional[str] = None
35
+ narrativeStyle: Optional[str] = "direct address"
36
+ accentRegion: Optional[str] = None
37
+ model: str = "gpt-4o"
38
+
39
+
40
+ @router.post("/generate-prompts")
41
+ async def generate_prompts_api(
42
+ script: str = Form(...),
43
+ style: str = Form("clean, lifestyle UGC"),
44
+ jsonFormat: str = Form("standard"),
45
+ continuationMode: str = Form("true"),
46
+ voiceType: Optional[str] = Form(None),
47
+ energyLevel: Optional[str] = Form(None),
48
+ settingMode: str = Form("single"),
49
+ cameraStyle: Optional[str] = Form("handheld steadicam"),
50
+ energyArc: Optional[str] = Form(None),
51
+ narrativeStyle: Optional[str] = Form("direct address"),
52
+ accentRegion: Optional[str] = Form(None),
53
+ model: str = Form("gpt-4o"),
54
+ image: UploadFile = File(...)
55
+ ):
56
+ """
57
+ Generate structured video prompts using GPT-4o
58
+
59
+ This endpoint:
60
+ 1. Splits the script into 8-second segments
61
+ 2. Generates detailed production prompts using GPT-4o
62
+ 3. Validates the output against strict rules
63
+ 4. Returns structured JSON for video generation
64
+
65
+ Accepts multipart/form-data with:
66
+ - script: The video script text
67
+ - style: Visual style description
68
+ - image: Character reference image (required)
69
+ - Other optional parameters for fine-tuning
70
+
71
+ Returns:
72
+ Validated segments payload ready for video generation
73
+ """
74
+ try:
75
+ # Read image
76
+ image_bytes = await image.read()
77
+ print(f"📷 Received reference image: {len(image_bytes)} bytes")
78
+
79
+ # Convert continuationMode string to boolean
80
+ continuation_mode = continuationMode.lower() == "true"
81
+
82
+ # Create inputs from form data
83
+ inputs = VeoInputs(
84
+ script=script,
85
+ style=style,
86
+ jsonFormat=jsonFormat,
87
+ continuationMode=continuation_mode,
88
+ voiceType=voiceType if voiceType else None,
89
+ energyLevel=energyLevel if energyLevel else None,
90
+ settingMode=settingMode,
91
+ cameraStyle=cameraStyle if cameraStyle else None,
92
+ energyArc=energyArc if energyArc else None,
93
+ narrativeStyle=narrativeStyle if narrativeStyle else None,
94
+ accentRegion=accentRegion if accentRegion else None
95
+ )
96
+
97
+ # Check environment mode
98
+ environment = os.getenv('ENVIRONMENT', 'dev').lower()
99
+ is_dev_mode = environment == 'dev' or environment == 'development'
100
+
101
+ # Generate payload
102
+ payload = generate_segments_payload(
103
+ inputs=inputs,
104
+ image_bytes=image_bytes,
105
+ model=model
106
+ )
107
+
108
+ # Add environment mode to response
109
+ payload['environment'] = environment
110
+ payload['is_dev_mode'] = is_dev_mode
111
+ payload['max_segments'] = 2 if is_dev_mode else None
112
+
113
+ # Validation warnings (if any) are logged to console but don't block
114
+ return JSONResponse(content=payload)
115
+
116
+ except Exception as e:
117
+ # API/network errors only (validation is non-blocking now)
118
+ raise HTTPException(
119
+ status_code=500,
120
+ detail=f"Prompt generation failed: {str(e)}"
121
+ )
122
+
123
+
124
+ @router.post("/split-script")
125
+ async def split_script_api(
126
+ script: str = Form(...),
127
+ seconds_per_segment: int = Form(8),
128
+ words_per_second: float = Form(2.2)
129
+ ):
130
+ """
131
+ Split script into segments for preview
132
+
133
+ Useful for checking how the script will be divided before generation
134
+ """
135
+ try:
136
+ segments = split_script_into_segments(
137
+ script,
138
+ seconds_per_segment=seconds_per_segment,
139
+ words_per_second=words_per_second
140
+ )
141
+
142
+ return {
143
+ "segments": segments,
144
+ "count": len(segments),
145
+ "total_words": sum(len(s.split()) for s in segments)
146
+ }
147
+
148
+ except Exception as e:
149
+ raise HTTPException(
150
+ status_code=500,
151
+ detail=f"Script splitting failed: {str(e)}"
152
+ )
153
+
154
+
155
+ @router.post("/validate-payload")
156
+ async def validate_payload_api(payload: dict):
157
+ """
158
+ Validate a segments payload against strict rules
159
+
160
+ Use this to check if a manually created or modified payload is valid
161
+ """
162
+ try:
163
+ from utils.prompt_generator import validate_segments_payload
164
+
165
+ expected_segments = len(payload.get("segments", []))
166
+ errors = validate_segments_payload(payload, expected_segments)
167
+
168
+ if errors:
169
+ return {
170
+ "valid": False,
171
+ "errors": errors
172
+ }
173
+
174
+ return {
175
+ "valid": True,
176
+ "message": "Payload is valid"
177
+ }
178
+
179
+ except Exception as e:
180
+ raise HTTPException(
181
+ status_code=500,
182
+ detail=f"Validation failed: {str(e)}"
183
+ )
184
+
185
+
186
+ @router.get("/prompt-status")
187
+ async def prompt_status():
188
+ """
189
+ Check if GPT-4o prompt generation is available
190
+ """
191
+ import os
192
+
193
+ openai_key = os.getenv('OPENAI_API_KEY')
194
+
195
+ return {
196
+ "available": bool(openai_key),
197
+ "message": "GPT-4o is configured" if openai_key
198
+ else "Add OPENAI_API_KEY to .env.local"
199
+ }
200
+
201
+
202
+ @router.post("/refine-prompt-continuity")
203
+ async def refine_prompt_for_continuity(
204
+ segmentPrompt: str = Form(...), # JSON string of the next segment
205
+ lastFrame: UploadFile = File(...), # Last frame image from previous video
206
+ transcribedDialogue: str = Form(default=""), # Whisper transcription from previous segment
207
+ expectedDialogue: str = Form(default="") # Expected dialogue from previous segment
208
+ ):
209
+ """
210
+ Refine a segment prompt to match the actual visual AND audio from the previous segment.
211
+
212
+ This ensures perfect continuity by having GPT-4o analyze:
213
+ 1. The last frame (visual consistency)
214
+ 2. The transcribed dialogue (audio consistency - what was actually said)
215
+ """
216
+ try:
217
+ # Read the image
218
+ image_bytes = await lastFrame.read()
219
+ encoded_image = base64.b64encode(image_bytes).decode('utf-8')
220
+
221
+ # Parse the segment prompt
222
+ try:
223
+ segment_data = json.loads(segmentPrompt)
224
+ except json.JSONDecodeError:
225
+ raise HTTPException(
226
+ status_code=400,
227
+ detail="Invalid JSON in segmentPrompt"
228
+ )
229
+
230
+ # Initialize OpenAI client
231
+ client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
232
+
233
+ # Build audio context if available
234
+ audio_context = ""
235
+ if transcribedDialogue.strip():
236
+ audio_context = f"""
237
+
238
+ ═══════════════════════════════════════════════════════════
239
+ AUDIO CONTINUITY CONTEXT (WHAT WAS ACTUALLY SPOKEN)
240
+ ═══════════════════════════════════════════════════════════
241
+
242
+ Previous segment's dialogue (from Whisper transcription):
243
+ \"{transcribedDialogue.strip()}\"
244
+
245
+ Expected dialogue was:
246
+ \"{expectedDialogue.strip() if expectedDialogue.strip() else 'Not provided'}\"
247
+
248
+ IMPORTANT: The next segment should continue naturally from what was ACTUALLY said.
249
+ If there are differences between expected and transcribed dialogue, use the TRANSCRIBED version
250
+ as the ground truth for continuity (it's what the viewer actually heard).
251
+ """
252
+
253
+ # Build the refinement prompt
254
+ refinement_instructions = f"""
255
+ You are a video continuity expert. Your task is to UPDATE the provided segment prompt to ensure PERFECT VISUAL AND AUDIO CONTINUITY with the previous video segment.
256
+
257
+ ═══════════════════════════════════════════════════════════
258
+ VISUAL CONTINUITY (from attached image)
259
+ ═══════════════════════════════════════════════════════════
260
+
261
+ Analyze the image carefully - this is the ACTUAL last frame from the previous video.
262
+
263
+ 1. Update the character_description to match the ACTUAL person in the image:
264
+ - Physical appearance (EXACT age, hair color/style, facial features, skin tone)
265
+ - Clothing (EXACTLY what they're wearing - color, style, pattern)
266
+ - Current state (their actual expression and posture at this moment)
267
+ - Voice matching (adjust to match their appearance)
268
+
269
+ 2. Update the scene_continuity to match the ACTUAL environment:
270
+ - Environment (describe what you see - bedroom, office, outdoor, etc.)
271
+ - Camera position (maintain the SAME angle/framing)
272
+ - Lighting state (match the EXACT lighting conditions in the image)
273
+ - Props and background elements (describe what's actually visible)
274
+ - Spatial relationships (match the actual layout)
275
+ {audio_context}
276
+ ═══════════════════════════════════════════��═══════════════
277
+ ORIGINAL PROMPT TO UPDATE
278
+ ═══════════════════════════════════════════════════════════
279
+
280
+ {json.dumps(segment_data, indent=2)}
281
+
282
+ ═══════════════════════════════════════════════════════════
283
+ CRITICAL RULES
284
+ ═══════════════════════════════════════════════════════════
285
+
286
+ - Be EXTREMELY specific about what you see in the image
287
+ - If the image shows a young woman with red hair, describe EXACTLY that
288
+ - If it's a sunset beach scene, describe EXACTLY that setting
289
+ - If they're wearing a beige blazer, describe EXACTLY that clothing
290
+ - Match colors, styles, and details PRECISELY to what's visible
291
+ - Maintain the SAME camera angle and distance
292
+ - Keep the action_timeline.dialogue EXACTLY as provided (this is the NEXT segment's dialogue)
293
+ - Update segment_info.continuity_markers to reflect the visual state
294
+ - Adjust synchronized_actions to fit the actual character appearance
295
+
296
+ 🚨 CRITICAL: NO BLUR TRANSITIONS AT SEGMENT START 🚨
297
+ - The video MUST start immediately at 0:00 with a SHARP, CLEAR, IN-FOCUS frame
298
+ - NO fade-in, NO blur transition, NO gradual focus effect at the start
299
+ - The first frame (0:00) must be as clear and sharp as any other frame
300
+ - camera_movement MUST describe movement that starts from a clear, sharp state
301
+
302
+
303
+ The goal is SEAMLESS video extension with ZERO visual or audio discontinuity.
304
+
305
+ Return ONLY the updated JSON segment object with the same structure. No explanation, just the corrected JSON.
306
+ """
307
+
308
+ print(f"🔄 Refining prompt for visual continuity...")
309
+
310
+ # Call GPT-4o with vision
311
+ response = client.chat.completions.create(
312
+ model="gpt-4o",
313
+ messages=[
314
+ {
315
+ "role": "user",
316
+ "content": [
317
+ {
318
+ "type": "text",
319
+ "text": refinement_instructions
320
+ },
321
+ {
322
+ "type": "image_url",
323
+ "image_url": {
324
+ "url": f"data:image/jpeg;base64,{encoded_image}"
325
+ }
326
+ }
327
+ ]
328
+ }
329
+ ],
330
+ response_format={"type": "json_object"},
331
+ temperature=0.3, # Lower temperature for precise matching
332
+ )
333
+
334
+ # Parse the response
335
+ refined_prompt = json.loads(response.choices[0].message.content)
336
+
337
+ print(f"✅ Prompt refined for visual continuity")
338
+
339
+ return JSONResponse(content={
340
+ "refined_prompt": refined_prompt,
341
+ "original_prompt": segment_data
342
+ })
343
+
344
+ except Exception as e:
345
+ print(f"❌ Prompt refinement error: {str(e)}")
346
+ raise HTTPException(
347
+ status_code=500,
348
+ detail=f"Prompt refinement failed: {str(e)}"
349
+ )
350
+
api/replicate_service.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Replicate API endpoints
3
+ Handles video generation via Replicate's Python SDK
4
+
5
+ Based on standalone_video_creator.py flow:
6
+ - Uses replicate.run() for synchronous generation
7
+ - Sends prompt as stringified JSON (like the standalone script)
8
+ - Supports image input for frame continuity
9
+ """
10
+
11
+ from fastapi import APIRouter, HTTPException, BackgroundTasks
12
+ from fastapi.responses import JSONResponse
13
+ from pydantic import BaseModel
14
+ from typing import Optional, Dict, Any
15
+ import os
16
+ import asyncio
17
+ import uuid
18
+ import json
19
+ from concurrent.futures import ThreadPoolExecutor
20
+
21
+ router = APIRouter()
22
+
23
+ # Try importing replicate
24
+ try:
25
+ import replicate
26
+ REPLICATE_AVAILABLE = True
27
+ except ImportError:
28
+ REPLICATE_AVAILABLE = False
29
+ print("⚠️ Replicate package not installed. Run: pip install replicate")
30
+
31
+ # Thread pool for running blocking replicate.run() calls
32
+ executor = ThreadPoolExecutor(max_workers=4)
33
+
34
+ # In-memory store for prediction status (in production, use Redis)
35
+ predictions: Dict[str, Dict[str, Any]] = {}
36
+
37
+
38
+ # Request/Response Models
39
+ class ReplicateGenerateRequest(BaseModel):
40
+ prompt: str
41
+ imageUrl: Optional[str] = None
42
+ model: Optional[str] = "google/veo-3"
43
+ aspectRatio: Optional[str] = "9:16"
44
+ seed: Optional[int] = None
45
+
46
+
47
+ class ReplicateGenerateResponse(BaseModel):
48
+ id: str
49
+ status: str
50
+
51
+
52
+ class ReplicateStatusResponse(BaseModel):
53
+ status: str
54
+ output: Optional[str] = None
55
+ url: Optional[str] = None
56
+ error: Optional[str] = None
57
+
58
+
59
+ def get_replicate_api_key():
60
+ """Get Replicate API key from environment"""
61
+ api_key = os.getenv('REPLICATE_API_TOKEN')
62
+ if not api_key:
63
+ raise HTTPException(
64
+ status_code=500,
65
+ detail="REPLICATE_API_TOKEN not configured. Add REPLICATE_API_TOKEN to .env.local"
66
+ )
67
+ return api_key
68
+
69
+
70
+ def run_replicate_sync(
71
+ prediction_id: str,
72
+ model: str,
73
+ input_data: Dict[str, Any]
74
+ ):
75
+ """
76
+ Run replicate.run() synchronously in a thread.
77
+ Updates the predictions dict with status.
78
+
79
+ This mirrors the standalone_video_creator.py approach.
80
+ """
81
+ try:
82
+ # Set API token
83
+ api_key = os.getenv('REPLICATE_API_TOKEN')
84
+ os.environ['REPLICATE_API_TOKEN'] = api_key
85
+
86
+ print(f"🎬 Running replicate.run('{model}')...")
87
+ print(f"📦 Input keys: {list(input_data.keys())}")
88
+
89
+ # Run the model (blocking call)
90
+ output = replicate.run(model, input=input_data)
91
+
92
+ # Handle different output types (same as standalone_video_creator.py)
93
+ video_url = None
94
+ if isinstance(output, str):
95
+ video_url = output
96
+ elif hasattr(output, 'url'):
97
+ # url is a property, not a method
98
+ video_url = output.url
99
+ elif hasattr(output, '__iter__'):
100
+ # Could be a generator or list
101
+ for item in output:
102
+ if isinstance(item, str):
103
+ video_url = item
104
+ break
105
+ else:
106
+ video_url = str(output)
107
+
108
+ print(f"✅ Replicate completed: {video_url[:80] if video_url else 'no url'}...")
109
+
110
+ predictions[prediction_id] = {
111
+ "status": "succeeded",
112
+ "url": video_url,
113
+ "output": video_url,
114
+ "error": None
115
+ }
116
+
117
+ except Exception as e:
118
+ error_msg = str(e)
119
+ print(f"❌ Replicate error: {error_msg}")
120
+
121
+ predictions[prediction_id] = {
122
+ "status": "failed",
123
+ "url": None,
124
+ "output": None,
125
+ "error": error_msg
126
+ }
127
+
128
+
129
+ @router.post("/replicate/generate", response_model=ReplicateGenerateResponse)
130
+ async def generate_video(request: ReplicateGenerateRequest, background_tasks: BackgroundTasks):
131
+ """
132
+ Generate video using Replicate Python SDK.
133
+
134
+ Mirrors standalone_video_creator.py:
135
+ - Uses replicate.run()
136
+ - Sends prompt as-is (frontend should send text prompt)
137
+ - Supports image URL for frame continuity
138
+ """
139
+ if not REPLICATE_AVAILABLE:
140
+ raise HTTPException(
141
+ status_code=500,
142
+ detail="Replicate package not installed. Run: pip install replicate"
143
+ )
144
+
145
+ try:
146
+ # Verify API key is set
147
+ get_replicate_api_key()
148
+
149
+ model_id = request.model or "google/veo-3"
150
+
151
+ # Build input params (matching standalone_video_creator.py)
152
+ input_data: Dict[str, Any] = {
153
+ "prompt": request.prompt,
154
+ }
155
+
156
+ # Add aspect ratio
157
+ if request.aspectRatio:
158
+ input_data["aspect_ratio"] = request.aspectRatio
159
+
160
+ # Add seed if provided
161
+ if request.seed is not None:
162
+ input_data["seed"] = request.seed
163
+
164
+ # Add image URL if provided
165
+ if request.imageUrl:
166
+ input_data["image"] = request.imageUrl
167
+
168
+ print(f"🎬 Starting Replicate generation with model: {model_id}")
169
+ print(f"📝 Prompt: {request.prompt[:100]}...")
170
+ if request.imageUrl:
171
+ print(f"🖼️ Using reference image: {request.imageUrl[:50]}...")
172
+ print(f"⚙️ Input params: {list(input_data.keys())}")
173
+
174
+ # Create prediction ID
175
+ prediction_id = f"rep_{uuid.uuid4().hex[:12]}"
176
+
177
+ # Initialize prediction status
178
+ predictions[prediction_id] = {
179
+ "status": "processing",
180
+ "url": None,
181
+ "output": None,
182
+ "error": None
183
+ }
184
+
185
+ # Run in background thread (replicate.run() is blocking)
186
+ loop = asyncio.get_event_loop()
187
+ loop.run_in_executor(
188
+ executor,
189
+ run_replicate_sync,
190
+ prediction_id,
191
+ model_id,
192
+ input_data
193
+ )
194
+
195
+ return ReplicateGenerateResponse(
196
+ id=prediction_id,
197
+ status="processing"
198
+ )
199
+
200
+ except HTTPException:
201
+ raise
202
+ except Exception as e:
203
+ print(f"❌ Replicate generation error: {str(e)}")
204
+ import traceback
205
+ traceback.print_exc()
206
+ raise HTTPException(
207
+ status_code=500,
208
+ detail=f"Replicate generation failed: {str(e)}"
209
+ )
210
+
211
+
212
+ @router.get("/replicate/status/{prediction_id}", response_model=ReplicateStatusResponse)
213
+ async def get_prediction_status(prediction_id: str):
214
+ """
215
+ Get the status of a Replicate prediction.
216
+ """
217
+ if prediction_id not in predictions:
218
+ raise HTTPException(
219
+ status_code=404,
220
+ detail=f"Prediction not found: {prediction_id}"
221
+ )
222
+
223
+ pred = predictions[prediction_id]
224
+
225
+ return ReplicateStatusResponse(
226
+ status=pred["status"],
227
+ output=pred.get("output"),
228
+ url=pred.get("url"),
229
+ error=pred.get("error")
230
+ )
231
+
232
+
233
+ @router.get("/replicate/models")
234
+ async def list_available_models():
235
+ """List available video generation models"""
236
+ return {
237
+ "models": [
238
+ {
239
+ "id": "google/veo-3",
240
+ "name": "Google Veo 3 (Recommended)",
241
+ "description": "High-quality text/image-to-video generation",
242
+ "type": "text-to-video",
243
+ "supports_image": True
244
+ },
245
+ {
246
+ "id": "minimax/video-01",
247
+ "name": "MiniMax Video-01",
248
+ "description": "High-quality text-to-video generation",
249
+ "type": "text-to-video",
250
+ "supports_image": True
251
+ },
252
+ {
253
+ "id": "luma/ray",
254
+ "name": "Luma Ray",
255
+ "description": "Cinematic video generation",
256
+ "type": "text-to-video",
257
+ "supports_image": True
258
+ }
259
+ ]
260
+ }
261
+
262
+
263
+ @router.post("/replicate/cancel/{prediction_id}")
264
+ async def cancel_prediction(prediction_id: str):
265
+ """Cancel a running prediction (marks as cancelled in our store)"""
266
+ if prediction_id in predictions:
267
+ predictions[prediction_id]["status"] = "failed"
268
+ predictions[prediction_id]["error"] = "Cancelled by user"
269
+
270
+ return JSONResponse(
271
+ status_code=200,
272
+ content={"message": "Prediction cancelled", "id": prediction_id}
273
+ )
274
+
275
+
276
+ @router.get("/replicate/health")
277
+ async def check_replicate_health():
278
+ """Check if Replicate is configured"""
279
+ api_key = os.getenv('REPLICATE_API_TOKEN')
280
+ return {
281
+ "configured": bool(api_key),
282
+ "package_installed": REPLICATE_AVAILABLE,
283
+ "message": "Replicate is ready" if (api_key and REPLICATE_AVAILABLE)
284
+ else "Missing: " + (
285
+ "REPLICATE_API_TOKEN" if not api_key else ""
286
+ ) + (
287
+ " replicate package" if not REPLICATE_AVAILABLE else ""
288
+ )
289
+ }
api/video_export.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Video Export API
3
+ Handles merging multiple video clips into a single output video
4
+ """
5
+
6
+ from fastapi import APIRouter, HTTPException, UploadFile, File, Form
7
+ from fastapi.responses import FileResponse, StreamingResponse
8
+ from typing import List, Optional, Tuple
9
+ import os
10
+ import tempfile
11
+ import subprocess
12
+ import json
13
+ from pathlib import Path
14
+
15
+ router = APIRouter()
16
+
17
+
18
+ def get_video_dimensions(video_path: Path) -> Tuple[int, int]:
19
+ """Get video width and height using ffprobe"""
20
+ try:
21
+ cmd = [
22
+ 'ffprobe',
23
+ '-v', 'error',
24
+ '-select_streams', 'v:0',
25
+ '-show_entries', 'stream=width,height',
26
+ '-of', 'json',
27
+ str(video_path)
28
+ ]
29
+ result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
30
+ if result.returncode == 0:
31
+ data = json.loads(result.stdout)
32
+ streams = data.get('streams', [])
33
+ if streams:
34
+ width = streams[0].get('width', 1080)
35
+ height = streams[0].get('height', 1920)
36
+ return (width, height)
37
+ except Exception as e:
38
+ print(f"⚠️ Could not detect video dimensions: {e}")
39
+
40
+ # Default to 9:16 portrait if detection fails
41
+ return (1080, 1920)
42
+
43
+
44
+ @router.post("/export/merge")
45
+ async def merge_videos(
46
+ clips_data: str = Form(...), # JSON string with clip metadata
47
+ files: List[UploadFile] = File(...)
48
+ ):
49
+ """
50
+ Merge multiple video clips into a single output video
51
+
52
+ clips_data: JSON string containing array of clip objects with:
53
+ - index: order in timeline
54
+ - startTime: start time in clip (seconds)
55
+ - endTime: end time in clip (seconds)
56
+ - type: 'video' or 'image'
57
+ - duration: duration for images (seconds)
58
+
59
+ files: Video/image files in the same order as clips_data
60
+ """
61
+ try:
62
+ # Parse clips data
63
+ clips = json.loads(clips_data)
64
+
65
+ if len(clips) != len(files):
66
+ raise HTTPException(
67
+ status_code=400,
68
+ detail=f"Mismatch: {len(clips)} clips but {len(files)} files"
69
+ )
70
+
71
+ if len(clips) == 0:
72
+ raise HTTPException(status_code=400, detail="No clips to merge")
73
+
74
+ # Create temporary directory for processing
75
+ with tempfile.TemporaryDirectory() as temp_dir:
76
+ temp_path = Path(temp_dir)
77
+
78
+ # Save all uploaded files
79
+ file_paths = []
80
+ for i, file in enumerate(files):
81
+ clip = clips[i]
82
+ file_path = temp_path / f"input_{i}.{file.filename.split('.')[-1] if '.' in file.filename else 'mp4'}"
83
+
84
+ with open(file_path, 'wb') as f:
85
+ content = await file.read()
86
+ f.write(content)
87
+
88
+ file_paths.append(file_path)
89
+
90
+ # Detect dimensions from first video to preserve aspect ratio
91
+ target_width, target_height = get_video_dimensions(file_paths[0])
92
+ print(f"📐 Detected video dimensions: {target_width}x{target_height}")
93
+
94
+ # Build FFmpeg command
95
+ output_path = temp_path / "output.mp4"
96
+
97
+ # Helper function to check if video has audio stream
98
+ def has_audio_stream(video_path: Path) -> bool:
99
+ """Check if video file has an audio stream"""
100
+ try:
101
+ cmd = [
102
+ 'ffprobe',
103
+ '-v', 'error',
104
+ '-select_streams', 'a',
105
+ '-show_entries', 'stream=codec_type',
106
+ '-of', 'json',
107
+ str(video_path)
108
+ ]
109
+ result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
110
+ if result.returncode == 0:
111
+ import json as json_lib
112
+ data = json_lib.loads(result.stdout)
113
+ streams = data.get('streams', [])
114
+ return len(streams) > 0
115
+ return False
116
+ except Exception:
117
+ return False
118
+
119
+ # Build filter complex - process clips in order
120
+ filter_parts = []
121
+ input_args = []
122
+ concat_inputs = []
123
+
124
+ # Process all clips in order
125
+ input_index = 0
126
+ for clip_idx, clip in enumerate(clips):
127
+ file_path = file_paths[clip_idx]
128
+
129
+ if clip['type'] == 'video':
130
+ clip_duration = clip['endTime'] - clip['startTime']
131
+ input_args.extend(['-i', str(file_path)])
132
+
133
+ # Check if video has audio
134
+ has_audio = has_audio_stream(file_path)
135
+
136
+ # Trim video and scale to match first video's dimensions
137
+ # Using scale with force_original_aspect_ratio to handle any size differences
138
+ filter_parts.append(
139
+ f"[{input_index}:v]trim=start={clip['startTime']}:end={clip['endTime']},"
140
+ f"setpts=PTS-STARTPTS,"
141
+ f"scale={target_width}:{target_height}:force_original_aspect_ratio=decrease,"
142
+ f"pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2,"
143
+ f"setsar=1[v{clip_idx}];"
144
+ )
145
+
146
+ if has_audio:
147
+ # Use existing audio stream
148
+ filter_parts.append(
149
+ f"[{input_index}:a]atrim=start={clip['startTime']}:end={clip['endTime']},"
150
+ f"asetpts=PTS-STARTPTS[a{clip_idx}];"
151
+ )
152
+ else:
153
+ # Generate silent audio for videos without audio
154
+ filter_parts.append(
155
+ f"anullsrc=channel_layout=stereo:sample_rate=44100,atrim=0:{clip_duration},"
156
+ f"asetpts=PTS-STARTPTS[a{clip_idx}];"
157
+ )
158
+
159
+ input_index += 1
160
+ else:
161
+ # Image clip
162
+ clip_duration = clip.get('duration', 3.0) # Default 3 seconds for images
163
+ input_args.extend(['-loop', '1', '-t', str(clip_duration), '-i', str(file_path)])
164
+
165
+ # Scale image to match video dimensions
166
+ filter_parts.append(
167
+ f"[{input_index}:v]scale={target_width}:{target_height}:force_original_aspect_ratio=decrease,"
168
+ f"pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2,"
169
+ f"setsar=1,format=yuv420p[v{clip_idx}];"
170
+ )
171
+ # Generate silent audio
172
+ filter_parts.append(
173
+ f"anullsrc=channel_layout=stereo:sample_rate=44100,atrim=0:{clip_duration},"
174
+ f"asetpts=PTS-STARTPTS[a{clip_idx}];"
175
+ )
176
+
177
+ input_index += 1
178
+
179
+ # Add to concat inputs in order
180
+ concat_inputs.append(f"[v{clip_idx}][a{clip_idx}]")
181
+
182
+ # Build complete filter complex
183
+ filter_complex = ''.join(filter_parts)
184
+ filter_complex += f"{''.join(concat_inputs)}concat=n={len(clips)}:v=1:a=1[outv][outa]"
185
+
186
+ # Build FFmpeg command
187
+ ffmpeg_cmd = [
188
+ 'ffmpeg',
189
+ *input_args,
190
+ '-filter_complex', filter_complex,
191
+ '-map', '[outv]',
192
+ '-map', '[outa]',
193
+ '-c:v', 'libx264',
194
+ '-c:a', 'aac',
195
+ '-movflags', '+faststart',
196
+ '-y', # Overwrite output
197
+ str(output_path)
198
+ ]
199
+
200
+ print(f"🎬 Running FFmpeg merge with dimensions: {target_width}x{target_height}")
201
+
202
+ # Run FFmpeg
203
+ result = subprocess.run(
204
+ ffmpeg_cmd,
205
+ capture_output=True,
206
+ text=True,
207
+ timeout=300 # 5 minute timeout
208
+ )
209
+
210
+ if result.returncode != 0:
211
+ print(f"❌ FFmpeg error: {result.stderr}")
212
+ raise HTTPException(
213
+ status_code=500,
214
+ detail=f"FFmpeg failed: {result.stderr[:500]}"
215
+ )
216
+
217
+ if not output_path.exists():
218
+ raise HTTPException(status_code=500, detail="Output file was not created")
219
+
220
+ # Read the entire file into memory before temp directory is deleted
221
+ print(f"📦 Reading merged video file ({output_path.stat().st_size / 1024 / 1024:.2f} MB)...")
222
+ with open(output_path, 'rb') as f:
223
+ video_content = f.read()
224
+
225
+ print(f"✅ Video merged successfully: {target_width}x{target_height}")
226
+
227
+ # Return the merged video file
228
+ def generate():
229
+ # Yield in chunks to avoid loading entire file in memory at once
230
+ chunk_size = 8192
231
+ for i in range(0, len(video_content), chunk_size):
232
+ yield video_content[i:i + chunk_size]
233
+
234
+ return StreamingResponse(
235
+ generate(),
236
+ media_type="video/mp4",
237
+ headers={
238
+ "Content-Disposition": "attachment; filename=exported-video.mp4",
239
+ "Content-Type": "video/mp4",
240
+ "Content-Length": str(len(video_content))
241
+ }
242
+ )
243
+
244
+ except json.JSONDecodeError as e:
245
+ raise HTTPException(status_code=400, detail=f"Invalid JSON: {str(e)}")
246
+ except subprocess.TimeoutExpired:
247
+ raise HTTPException(status_code=504, detail="Video processing timed out")
248
+ except Exception as e:
249
+ print(f"❌ Export error: {str(e)}")
250
+ raise HTTPException(status_code=500, detail=f"Export failed: {str(e)}")
api/video_generation.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Video Generation API endpoints
3
+ Handles KIE API integration with SSE support for real-time updates
4
+ """
5
+
6
+ from fastapi import APIRouter, HTTPException, Request
7
+ from fastapi.responses import StreamingResponse, JSONResponse, Response
8
+ from pydantic import BaseModel
9
+ from typing import List, Optional, Dict, Any
10
+ import httpx
11
+ import asyncio
12
+ import json
13
+ import os
14
+ from datetime import datetime
15
+
16
+ from utils.image_processor import compress_and_store_image
17
+ from utils.storage import video_results, sse_clients, cleanup_old_results
18
+
19
+ router = APIRouter()
20
+
21
+ KIE_API_BASE = "https://api.kie.ai"
22
+
23
+ # Request/Response Models
24
+ class VideoGenerationRequest(BaseModel):
25
+ prompt: Any # Can be string (legacy) or dict/object (structured JSON)
26
+ imageUrls: Optional[List[str]] = []
27
+ model: Optional[str] = "veo3_fast"
28
+ aspectRatio: Optional[str] = "9:16"
29
+ generationType: Optional[str] = None
30
+ seeds: Optional[int] = None # Seed for consistent lighting/style (e.g., 12005)
31
+ voiceType: Optional[str] = None # Voice type for audio generation (e.g., "Deep", "Warm", "Crisp", "None")
32
+
33
+ class VideoExtendRequest(BaseModel):
34
+ taskId: str
35
+ prompt: Any # Can be string or structured JSON
36
+ seeds: Optional[int] = None
37
+ watermark: Optional[str] = None
38
+ voiceType: Optional[str] = None # Voice type for audio generation
39
+
40
+ class VideoGenerationResponse(BaseModel):
41
+ taskId: str
42
+ status: str
43
+
44
+ class CallbackData(BaseModel):
45
+ code: int
46
+ msg: str
47
+ data: Optional[Dict[str, Any]] = None
48
+
49
+ # Helper functions
50
+ def get_kie_api_key():
51
+ """Get KIE API key from environment"""
52
+ api_key = os.getenv('KIE_API_KEY')
53
+ if not api_key:
54
+ raise HTTPException(
55
+ status_code=500,
56
+ detail="KIE_API_KEY not configured on server."
57
+ )
58
+ return api_key
59
+
60
+ async def send_sse_event(task_id: str, data: dict):
61
+ """Send Server-Sent Event to connected client"""
62
+ if task_id in sse_clients:
63
+ queue = sse_clients[task_id]
64
+ await queue.put(data)
65
+
66
+ # Endpoints
67
+ @router.post("/veo/generate", response_model=VideoGenerationResponse)
68
+ async def generate_video(request: VideoGenerationRequest, req: Request):
69
+ """
70
+ Generate video using KIE Veo 3.1 API
71
+ Supports text-to-video and image-to-video generation
72
+ """
73
+ try:
74
+ api_key = get_kie_api_key()
75
+
76
+ # Build public URL for callback
77
+ public_url = os.getenv('PUBLIC_URL', f"http://localhost:{os.getenv('SERVER_PORT', 4000)}")
78
+ callback_url = f"{public_url}/api/veo/callback"
79
+
80
+ # Process image URLs
81
+ public_image_urls = []
82
+ if request.imageUrls:
83
+ print(f"📷 Processing {len(request.imageUrls)} images...")
84
+ for image_url in request.imageUrls:
85
+ # If it's already a public URL, use it as-is
86
+ if image_url.startswith(('http://', 'https://')):
87
+ print(f" Using external URL: {image_url}")
88
+ public_image_urls.append(image_url)
89
+ else:
90
+ # Compress and host the data URL
91
+ hosted_url = await compress_and_store_image(image_url, public_url)
92
+ print(f" Hosted image: {hosted_url}")
93
+ public_image_urls.append(hosted_url)
94
+
95
+ # Determine generation type
96
+ generation_type = request.generationType
97
+ if not generation_type:
98
+ generation_type = "FIRST_AND_LAST_FRAMES_2_VIDEO" if public_image_urls else "TEXT_2_VIDEO"
99
+
100
+ # Log prompt format and seed
101
+ if isinstance(request.prompt, dict):
102
+ print(f"📝 Sending structured JSON prompt to Veo 3.1")
103
+ else:
104
+ print(f"📝 Sending text prompt to Veo 3.1")
105
+
106
+ if request.seeds:
107
+ print(f"🎲 Using seed: {request.seeds} (warm, flattering lighting)")
108
+
109
+ # Call KIE API
110
+ async with httpx.AsyncClient(timeout=30.0) as client:
111
+ payload = {
112
+ "prompt": request.prompt, # Can be string or structured JSON object
113
+ "imageUrls": public_image_urls,
114
+ "model": request.model,
115
+ "aspectRatio": request.aspectRatio,
116
+ "generationType": generation_type,
117
+ "enableTranslation": True,
118
+ "callBackUrl": callback_url
119
+ }
120
+
121
+ # Add optional seed parameter
122
+ if request.seeds is not None:
123
+ payload["seeds"] = request.seeds
124
+
125
+ # Add voice type for audio generation (if not "None")
126
+ if request.voiceType and request.voiceType.lower() != "none":
127
+ payload["voiceType"] = request.voiceType
128
+ print(f"🎤 Using voice type: {request.voiceType}")
129
+ else:
130
+ print(f"🔇 No voice/audio requested (voiceType: {request.voiceType})")
131
+
132
+ response = await client.post(
133
+ f"{KIE_API_BASE}/api/v1/veo/generate",
134
+ headers={
135
+ "Authorization": f"Bearer {api_key}",
136
+ "Content-Type": "application/json"
137
+ },
138
+ json=payload
139
+ )
140
+
141
+ # Log raw response for debugging
142
+ print(f"📡 KIE API response status: {response.status_code}")
143
+
144
+ # Check HTTP status first
145
+ if response.status_code != 200:
146
+ error_text = response.text
147
+ content_type = response.headers.get('content-type', '').lower()
148
+
149
+ # Handle HTML error responses (like 502 Bad Gateway pages)
150
+ if 'text/html' in content_type or error_text.strip().startswith('<!DOCTYPE') or error_text.strip().startswith('<html'):
151
+ # Extract meaningful error from HTML if possible
152
+ error_message = f"KIE API service unavailable (HTTP {response.status_code})"
153
+
154
+ # Try to extract title or error message from HTML
155
+ if '<title>' in error_text:
156
+ import re
157
+ title_match = re.search(r'<title>(.*?)</title>', error_text, re.IGNORECASE | re.DOTALL)
158
+ if title_match:
159
+ title = title_match.group(1).strip()
160
+ # Extract just the error part (e.g., "502: Bad gateway" from "kie.ai | 502: Bad gateway")
161
+ if ':' in title:
162
+ error_message = f"KIE API error: {title.split('|')[-1].strip()}"
163
+ else:
164
+ error_message = f"KIE API error: {title}"
165
+
166
+ print(f"❌ KIE API HTTP error: {response.status_code} - {error_message}")
167
+ raise HTTPException(
168
+ status_code=502, # Bad Gateway - the KIE API is down/unavailable
169
+ detail=error_message
170
+ )
171
+ else:
172
+ # Non-HTML error response, try to extract JSON error if possible
173
+ try:
174
+ error_data = response.json()
175
+ error_message = error_data.get('msg') or error_data.get('message') or error_data.get('detail') or f"KIE API error (HTTP {response.status_code})"
176
+ except (json.JSONDecodeError, ValueError):
177
+ # Not JSON, use text (truncated)
178
+ error_message = error_text[:200] if len(error_text) > 200 else error_text
179
+
180
+ print(f"❌ KIE API HTTP error: {response.status_code} - {error_message[:200]}")
181
+ raise HTTPException(
182
+ status_code=response.status_code,
183
+ detail=f"KIE API error: {error_message}"
184
+ )
185
+
186
+ result = response.json()
187
+ print(f"📡 KIE API result code: {result.get('code')}, msg: {result.get('msg')}")
188
+
189
+ if result.get('code') != 200:
190
+ raise HTTPException(
191
+ status_code=result.get('code', 500),
192
+ detail=result.get('msg', 'KIE API request failed')
193
+ )
194
+
195
+ task_id = result['data']['taskId']
196
+ print(f"✅ Video generation started: {task_id}")
197
+
198
+ return VideoGenerationResponse(
199
+ taskId=task_id,
200
+ status="processing"
201
+ )
202
+
203
+ except HTTPException:
204
+ raise
205
+ except httpx.HTTPStatusError as e:
206
+ error_text = e.response.text
207
+ content_type = e.response.headers.get('content-type', '').lower()
208
+
209
+ # Handle HTML error responses
210
+ if 'text/html' in content_type or error_text.strip().startswith('<!DOCTYPE') or error_text.strip().startswith('<html'):
211
+ error_msg = f"KIE API service unavailable (HTTP {e.response.status_code})"
212
+ # Try to extract meaningful error from HTML
213
+ if '<title>' in error_text:
214
+ import re
215
+ title_match = re.search(r'<title>(.*?)</title>', error_text, re.IGNORECASE | re.DOTALL)
216
+ if title_match:
217
+ title = title_match.group(1).strip()
218
+ if ':' in title:
219
+ error_msg = f"KIE API error: {title.split('|')[-1].strip()}"
220
+ else:
221
+ # Try to extract JSON error if possible
222
+ try:
223
+ error_data = e.response.json()
224
+ error_msg = error_data.get('msg') or error_data.get('message') or error_data.get('detail') or f"KIE API error (HTTP {e.response.status_code})"
225
+ except (json.JSONDecodeError, ValueError):
226
+ error_msg = error_text[:200] if len(error_text) > 200 else error_text
227
+
228
+ print(f"❌ {error_msg}")
229
+ raise HTTPException(status_code=502 if 'text/html' in content_type else e.response.status_code, detail=error_msg)
230
+ except httpx.RequestError as e:
231
+ error_msg = f"KIE API request error: {type(e).__name__} - {str(e)}"
232
+ print(f"❌ {error_msg}")
233
+ raise HTTPException(status_code=502, detail=error_msg)
234
+ except json.JSONDecodeError as e:
235
+ error_msg = f"Invalid JSON response from KIE API. The service may be unavailable."
236
+ print(f"❌ JSON decode error: {str(e)}")
237
+ raise HTTPException(status_code=502, detail=error_msg)
238
+ except Exception as e:
239
+ import traceback
240
+ error_msg = f"{type(e).__name__}: {str(e)}"
241
+ print(f"❌ Video generation error: {error_msg}")
242
+ traceback.print_exc()
243
+ raise HTTPException(
244
+ status_code=500,
245
+ detail=f"Video generation request failed: {error_msg}"
246
+ )
247
+
248
+ @router.post("/veo/callback")
249
+ async def veo_callback(callback_data: CallbackData):
250
+ """
251
+ Callback endpoint for KIE API
252
+ Receives video generation status updates
253
+ """
254
+ try:
255
+ data = callback_data.data or {}
256
+ task_id = data.get('taskId')
257
+ info = data.get('info', {})
258
+ fallback_flag = data.get('fallbackFlag')
259
+
260
+ print(f"📥 Callback received for task {task_id}: code={callback_data.code}, msg={callback_data.msg}")
261
+
262
+ # Store result
263
+ video_results[task_id] = {
264
+ 'code': callback_data.code,
265
+ 'msg': callback_data.msg,
266
+ 'taskId': task_id,
267
+ 'info': info,
268
+ 'fallbackFlag': fallback_flag,
269
+ 'timestamp': datetime.now().timestamp()
270
+ }
271
+
272
+ # Send SSE update to client
273
+ if callback_data.code == 200 and info:
274
+ await send_sse_event(task_id, {
275
+ 'status': 'succeeded',
276
+ 'url': info.get('resultUrls', [None])[0],
277
+ 'resultUrls': info.get('resultUrls', []),
278
+ 'originUrls': info.get('originUrls', []),
279
+ 'resolution': info.get('resolution'),
280
+ 'fallbackFlag': fallback_flag
281
+ })
282
+ else:
283
+ await send_sse_event(task_id, {
284
+ 'status': 'failed',
285
+ 'error': callback_data.msg,
286
+ 'code': callback_data.code
287
+ })
288
+
289
+ # Clean up old results
290
+ cleanup_old_results()
291
+
292
+ return JSONResponse(
293
+ status_code=200,
294
+ content={'code': 200, 'msg': 'success'}
295
+ )
296
+
297
+ except Exception as e:
298
+ print(f"❌ Callback processing error: {str(e)}")
299
+ raise HTTPException(
300
+ status_code=500,
301
+ detail="Failed to process callback"
302
+ )
303
+
304
+
305
+ @router.post("/veo/extend", response_model=VideoGenerationResponse)
306
+ async def extend_video(request: VideoExtendRequest):
307
+ """
308
+ Extend an existing video using KIE Veo 3.1 extend API
309
+ Takes an existing taskId and extends it with new prompt
310
+ """
311
+ try:
312
+ api_key = get_kie_api_key()
313
+
314
+ # Build public URL for callback
315
+ public_url = os.getenv('PUBLIC_URL', f"http://localhost:{os.getenv('SERVER_PORT', 4000)}")
316
+ callback_url = f"{public_url}/api/veo/callback"
317
+
318
+ print(f"🎬 Extending video from task: {request.taskId}")
319
+
320
+ # Log prompt format and seed
321
+ if isinstance(request.prompt, dict):
322
+ print(f"📝 Extending with structured JSON prompt")
323
+ else:
324
+ print(f"📝 Extending with text prompt")
325
+
326
+ if request.seeds:
327
+ print(f"🎲 Using seed: {request.seeds} (consistent lighting)")
328
+
329
+ # Call KIE extend API
330
+ async with httpx.AsyncClient(timeout=30.0) as client:
331
+ payload = {
332
+ "taskId": request.taskId,
333
+ "prompt": request.prompt,
334
+ "callBackUrl": callback_url
335
+ }
336
+
337
+ # Add optional parameters
338
+ if request.seeds is not None:
339
+ payload["seeds"] = request.seeds
340
+ if request.watermark:
341
+ payload["watermark"] = request.watermark
342
+ if request.voiceType and request.voiceType.lower() != "none":
343
+ payload["voiceType"] = request.voiceType
344
+ print(f"🎤 Using voice type: {request.voiceType}")
345
+ else:
346
+ print(f"🔇 No voice/audio requested (voiceType: {request.voiceType})")
347
+
348
+ response = await client.post(
349
+ f"{KIE_API_BASE}/api/v1/veo/extend",
350
+ headers={
351
+ "Authorization": f"Bearer {api_key}",
352
+ "Content-Type": "application/json"
353
+ },
354
+ json=payload
355
+ )
356
+
357
+ # Check for HTML error responses
358
+ if response.status_code != 200:
359
+ error_text = response.text
360
+ content_type = response.headers.get('content-type', '').lower()
361
+
362
+ if 'text/html' in content_type or error_text.strip().startswith('<!DOCTYPE') or error_text.strip().startswith('<html'):
363
+ error_message = f"KIE API service unavailable (HTTP {response.status_code})"
364
+ if '<title>' in error_text:
365
+ import re
366
+ title_match = re.search(r'<title>(.*?)</title>', error_text, re.IGNORECASE | re.DOTALL)
367
+ if title_match:
368
+ title = title_match.group(1).strip()
369
+ if ':' in title:
370
+ error_message = f"KIE API error: {title.split('|')[-1].strip()}"
371
+ raise HTTPException(status_code=502, detail=error_message)
372
+
373
+ result = response.json()
374
+
375
+ if result.get('code') != 200:
376
+ raise HTTPException(
377
+ status_code=result.get('code', 500),
378
+ detail=result.get('msg', 'KIE extend API request failed')
379
+ )
380
+
381
+ new_task_id = result['data']['taskId']
382
+ print(f"✅ Video extension started: {new_task_id}")
383
+
384
+ return VideoGenerationResponse(
385
+ taskId=new_task_id,
386
+ status="processing"
387
+ )
388
+
389
+ except HTTPException:
390
+ raise
391
+ except httpx.HTTPStatusError as e:
392
+ error_text = e.response.text
393
+ content_type = e.response.headers.get('content-type', '').lower()
394
+
395
+ if 'text/html' in content_type or error_text.strip().startswith('<!DOCTYPE') or error_text.strip().startswith('<html'):
396
+ error_msg = f"KIE API service unavailable (HTTP {e.response.status_code})"
397
+ if '<title>' in error_text:
398
+ import re
399
+ title_match = re.search(r'<title>(.*?)</title>', error_text, re.IGNORECASE | re.DOTALL)
400
+ if title_match:
401
+ title = title_match.group(1).strip()
402
+ if ':' in title:
403
+ error_msg = f"KIE API error: {title.split('|')[-1].strip()}"
404
+ else:
405
+ try:
406
+ error_data = e.response.json()
407
+ error_msg = error_data.get('msg') or error_data.get('message') or error_data.get('detail') or f"KIE API error (HTTP {e.response.status_code})"
408
+ except (json.JSONDecodeError, ValueError):
409
+ error_msg = error_text[:200] if len(error_text) > 200 else error_text
410
+
411
+ print(f"❌ {error_msg}")
412
+ raise HTTPException(status_code=502 if 'text/html' in content_type else e.response.status_code, detail=error_msg)
413
+ except httpx.RequestError as e:
414
+ error_msg = f"KIE API request error: {type(e).__name__} - {str(e)}"
415
+ print(f"❌ {error_msg}")
416
+ raise HTTPException(status_code=502, detail=error_msg)
417
+ except json.JSONDecodeError as e:
418
+ error_msg = f"Invalid JSON response from KIE API. The service may be unavailable."
419
+ print(f"❌ JSON decode error: {str(e)}")
420
+ raise HTTPException(status_code=502, detail=error_msg)
421
+ except Exception as e:
422
+ import traceback
423
+ error_msg = f"{type(e).__name__}: {str(e)}"
424
+ print(f"❌ Video extension error: {error_msg}")
425
+ traceback.print_exc()
426
+ raise HTTPException(
427
+ status_code=500,
428
+ detail=f"Video extension error: {error_msg}"
429
+ )
430
+
431
+
432
+ @router.get("/veo/events/{task_id}")
433
+ async def sse_events(task_id: str):
434
+ """
435
+ Server-Sent Events endpoint for real-time updates
436
+ """
437
+ async def event_generator():
438
+ # Create queue for this client
439
+ queue = asyncio.Queue()
440
+ sse_clients[task_id] = queue
441
+
442
+ print(f"🔌 SSE client connected for task {task_id}")
443
+
444
+ try:
445
+ # Check if result already exists
446
+ if task_id in video_results:
447
+ result = video_results[task_id]
448
+ if result['code'] == 200 and result.get('info'):
449
+ info = result['info']
450
+ event_data = {
451
+ 'status': 'succeeded',
452
+ 'url': info.get('resultUrls', [None])[0],
453
+ 'resultUrls': info.get('resultUrls', []),
454
+ 'originUrls': info.get('originUrls', []),
455
+ 'resolution': info.get('resolution'),
456
+ 'fallbackFlag': result.get('fallbackFlag')
457
+ }
458
+ else:
459
+ event_data = {
460
+ 'status': 'failed',
461
+ 'error': result['msg'],
462
+ 'code': result['code']
463
+ }
464
+ yield f"data: {json.dumps(event_data)}\n\n"
465
+
466
+ # Stream events
467
+ while True:
468
+ data = await queue.get()
469
+ yield f"data: {json.dumps(data)}\n\n"
470
+
471
+ except asyncio.CancelledError:
472
+ print(f"🔌 SSE client disconnected for task {task_id}")
473
+ finally:
474
+ if task_id in sse_clients:
475
+ del sse_clients[task_id]
476
+
477
+ return StreamingResponse(
478
+ event_generator(),
479
+ media_type="text/event-stream",
480
+ headers={
481
+ "Cache-Control": "no-cache",
482
+ "Connection": "keep-alive"
483
+ }
484
+ )
485
+
486
+ @router.get("/veo/status/{task_id}")
487
+ async def get_video_status(task_id: str):
488
+ """
489
+ Get video generation status from KIE API
490
+ """
491
+ try:
492
+ api_key = get_kie_api_key()
493
+
494
+ async with httpx.AsyncClient(timeout=30.0) as client:
495
+ response = await client.get(
496
+ f"{KIE_API_BASE}/api/v1/veo/video/{task_id}",
497
+ headers={
498
+ "Authorization": f"Bearer {api_key}"
499
+ }
500
+ )
501
+
502
+ # Check for HTML error responses
503
+ if response.status_code != 200:
504
+ error_text = response.text
505
+ content_type = response.headers.get('content-type', '').lower()
506
+
507
+ if 'text/html' in content_type or error_text.strip().startswith('<!DOCTYPE') or error_text.strip().startswith('<html'):
508
+ error_message = f"KIE API service unavailable (HTTP {response.status_code})"
509
+ if '<title>' in error_text:
510
+ import re
511
+ title_match = re.search(r'<title>(.*?)</title>', error_text, re.IGNORECASE | re.DOTALL)
512
+ if title_match:
513
+ title = title_match.group(1).strip()
514
+ if ':' in title:
515
+ error_message = f"KIE API error: {title.split('|')[-1].strip()}"
516
+ raise HTTPException(status_code=502, detail=error_message)
517
+
518
+ result = response.json()
519
+
520
+ if result.get('code') != 200:
521
+ raise HTTPException(
522
+ status_code=result.get('code', 500),
523
+ detail=result.get('msg', 'Failed to get video status')
524
+ )
525
+
526
+ # Transform response
527
+ status = result['data'].get('status')
528
+ video_url = result['data'].get('videoUrl')
529
+
530
+ return {
531
+ 'status': 'succeeded' if status == 'completed' else 'failed' if status == 'failed' else 'processing',
532
+ 'output': video_url if status == 'completed' else None,
533
+ 'url': video_url if status == 'completed' else None
534
+ }
535
+
536
+ except HTTPException:
537
+ raise
538
+ except httpx.HTTPStatusError as e:
539
+ error_text = e.response.text
540
+ content_type = e.response.headers.get('content-type', '').lower()
541
+
542
+ if 'text/html' in content_type or error_text.strip().startswith('<!DOCTYPE') or error_text.strip().startswith('<html'):
543
+ error_msg = f"KIE API service unavailable (HTTP {e.response.status_code})"
544
+ if '<title>' in error_text:
545
+ import re
546
+ title_match = re.search(r'<title>(.*?)</title>', error_text, re.IGNORECASE | re.DOTALL)
547
+ if title_match:
548
+ title = title_match.group(1).strip()
549
+ if ':' in title:
550
+ error_msg = f"KIE API error: {title.split('|')[-1].strip()}"
551
+ else:
552
+ try:
553
+ error_data = e.response.json()
554
+ error_msg = error_data.get('msg') or error_data.get('message') or error_data.get('detail') or f"KIE API error (HTTP {e.response.status_code})"
555
+ except (json.JSONDecodeError, ValueError):
556
+ error_msg = error_text[:200] if len(error_text) > 200 else error_text
557
+
558
+ print(f"❌ {error_msg}")
559
+ raise HTTPException(status_code=502 if 'text/html' in content_type else e.response.status_code, detail=error_msg)
560
+ except httpx.RequestError as e:
561
+ error_msg = f"KIE API request error: {type(e).__name__} - {str(e)}"
562
+ print(f"❌ {error_msg}")
563
+ raise HTTPException(status_code=502, detail=error_msg)
564
+ except json.JSONDecodeError as e:
565
+ error_msg = f"Invalid JSON response from KIE API. The service may be unavailable."
566
+ print(f"❌ JSON decode error: {str(e)}")
567
+ raise HTTPException(status_code=502, detail=error_msg)
568
+ except Exception as e:
569
+ import traceback
570
+ error_msg = f"{type(e).__name__}: {str(e)}"
571
+ print(f"❌ Status check error: {error_msg}")
572
+ traceback.print_exc()
573
+ raise HTTPException(
574
+ status_code=500,
575
+ detail=f"Failed to check video status: {error_msg}"
576
+ )
577
+
578
+ @router.get("/veo/download")
579
+ async def download_video(url: str):
580
+ """
581
+ Download video from external URL
582
+ Proxies the video stream to avoid CORS issues
583
+ """
584
+ if not url:
585
+ raise HTTPException(status_code=400, detail="Missing url query parameter")
586
+
587
+ try:
588
+ async with httpx.AsyncClient(timeout=60.0) as client:
589
+ response = await client.get(url)
590
+ if response.status_code != 200:
591
+ raise HTTPException(
592
+ status_code=response.status_code,
593
+ detail="Failed to download asset"
594
+ )
595
+
596
+ # Return the video content directly
597
+ return Response(
598
+ content=response.content,
599
+ media_type=response.headers.get('content-type', 'video/mp4'),
600
+ headers={
601
+ 'Content-Disposition': 'attachment; filename="video.mp4"',
602
+ 'Content-Length': str(len(response.content))
603
+ }
604
+ )
605
+
606
+ except HTTPException:
607
+ raise
608
+ except Exception as e:
609
+ print(f"❌ Download error: {str(e)}")
610
+ raise HTTPException(
611
+ status_code=500,
612
+ detail=f"Failed to download asset: {str(e)}"
613
+ )
614
+
api/whisper_service.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Whisper-based Video Analysis Service
3
+ Optimized endpoint that finds trim point and extracts frame in one call
4
+ """
5
+
6
+ from fastapi import APIRouter, HTTPException
7
+ from pydantic import BaseModel
8
+ from typing import Optional
9
+ import tempfile
10
+ import os
11
+ import httpx
12
+
13
+ router = APIRouter()
14
+
15
+ # Check Whisper availability
16
+ try:
17
+ from utils.whisper_trim import find_last_word_timestamp, transcribe_video, is_whisper_available
18
+ from utils.video_processor import extract_frame, get_video_info
19
+ WHISPER_AVAILABLE = is_whisper_available()
20
+ except ImportError:
21
+ WHISPER_AVAILABLE = False
22
+
23
+
24
+ class WhisperAnalyzeRequest(BaseModel):
25
+ video_url: str
26
+ dialogue: str # The expected dialogue/script for this segment
27
+ buffer_time: float = 0.3 # Time after last word for frame extraction
28
+ model_size: str = "base" # Whisper model size
29
+
30
+
31
+ class WhisperAnalyzeResponse(BaseModel):
32
+ success: bool
33
+ last_word_timestamp: Optional[float] = None # When last word ends
34
+ trim_point: Optional[float] = None # Recommended trim point (last_word + buffer)
35
+ frame_timestamp: Optional[float] = None # Where frame was extracted
36
+ frame_base64: Optional[str] = None # Base64 encoded frame image
37
+ video_duration: float = 0 # Total video duration
38
+ transcribed_text: Optional[str] = None # What Whisper actually heard (for consistency check)
39
+ error: Optional[str] = None
40
+
41
+
42
+ @router.post("/whisper/analyze-and-extract", response_model=WhisperAnalyzeResponse)
43
+ async def analyze_and_extract_frame(request: WhisperAnalyzeRequest):
44
+ """
45
+ Analyze video with Whisper to find last spoken word,
46
+ then extract frame at that point for visual continuity.
47
+
48
+ This is the optimized flow:
49
+ 1. Download video
50
+ 2. Use Whisper to find last spoken word timestamp
51
+ 3. Extract frame at (last_word_time + buffer)
52
+ 4. Return frame + trim metadata
53
+
54
+ The trim metadata can be used later during final merge.
55
+ """
56
+ temp_video = None
57
+
58
+ try:
59
+ # Download video to temp file
60
+ print(f"🎤 Downloading video for Whisper analysis...")
61
+ temp_video = tempfile.mktemp(suffix='.mp4')
62
+
63
+ async with httpx.AsyncClient(timeout=120.0) as client:
64
+ response = await client.get(request.video_url)
65
+ if response.status_code != 200:
66
+ return WhisperAnalyzeResponse(
67
+ success=False,
68
+ error=f"Failed to download video: {response.status_code}"
69
+ )
70
+
71
+ with open(temp_video, 'wb') as f:
72
+ f.write(response.content)
73
+
74
+ # Get video duration
75
+ video_info = get_video_info(temp_video)
76
+ video_duration = float(video_info['format']['duration'])
77
+ print(f"📹 Video duration: {video_duration:.2f}s")
78
+
79
+ # Try Whisper-based analysis
80
+ last_word_time = None
81
+ frame_base64 = None
82
+ trim_point = None
83
+ frame_timestamp = None
84
+ transcribed_text = None
85
+
86
+ if WHISPER_AVAILABLE:
87
+ try:
88
+ print(f"🎤 Running Whisper transcription (model: {request.model_size})...")
89
+
90
+ # Get full transcription and last word timestamp
91
+ transcribed_text, last_word_time = transcribe_video(
92
+ video_path=temp_video,
93
+ model_size=request.model_size
94
+ )
95
+
96
+ if last_word_time and last_word_time > 0:
97
+ print(f"✅ Last spoken word at: {last_word_time:.2f}s")
98
+
99
+ # Calculate trim point and frame timestamp
100
+ trim_point = min(last_word_time + request.buffer_time, video_duration)
101
+ frame_timestamp = min(last_word_time + request.buffer_time, video_duration - 0.1)
102
+
103
+ print(f"📍 Trim point: {trim_point:.2f}s, Frame at: {frame_timestamp:.2f}s")
104
+ else:
105
+ print(f"⚠️ Could not find last word, using fallback")
106
+
107
+ except Exception as whisper_err:
108
+ print(f"⚠️ Whisper analysis failed: {str(whisper_err)}")
109
+ else:
110
+ print("⚠️ Whisper not available, using fallback")
111
+
112
+ # Fallback: use end of video
113
+ if frame_timestamp is None:
114
+ frame_timestamp = max(0, video_duration - 0.5)
115
+ trim_point = video_duration
116
+ print(f"📍 Fallback: Frame at {frame_timestamp:.2f}s (near end)")
117
+
118
+ # Extract frame at the calculated timestamp (uncompressed for continuity)
119
+ print(f"📸 Extracting frame at {frame_timestamp:.2f}s")
120
+ frame_base64 = extract_frame(
121
+ video_path=temp_video,
122
+ timestamp=frame_timestamp,
123
+ return_base64=True,
124
+ compress=False # No compression for continuity frames
125
+ )
126
+ print(f"✅ Frame extracted successfully")
127
+
128
+ return WhisperAnalyzeResponse(
129
+ success=True,
130
+ last_word_timestamp=last_word_time,
131
+ trim_point=trim_point,
132
+ frame_timestamp=frame_timestamp,
133
+ frame_base64=frame_base64,
134
+ video_duration=video_duration,
135
+ transcribed_text=transcribed_text,
136
+ error=None
137
+ )
138
+
139
+ except Exception as e:
140
+ print(f"❌ Whisper analyze error: {str(e)}")
141
+ import traceback
142
+ traceback.print_exc()
143
+
144
+ return WhisperAnalyzeResponse(
145
+ success=False,
146
+ error=str(e)
147
+ )
148
+
149
+ finally:
150
+ # Clean up temp file
151
+ if temp_video and os.path.exists(temp_video):
152
+ try:
153
+ os.remove(temp_video)
154
+ except:
155
+ pass
156
+
157
+
158
+ @router.get("/whisper/status")
159
+ async def whisper_status():
160
+ """Check if Whisper is available and ready"""
161
+ return {
162
+ "available": WHISPER_AVAILABLE,
163
+ "message": "Whisper is ready" if WHISPER_AVAILABLE
164
+ else "Whisper not installed. Run: pip install openai-whisper moviepy"
165
+ }
166
+
frontend/.gitignore ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Logs
2
+ logs
3
+ *.log
4
+ npm-debug.log*
5
+ yarn-debug.log*
6
+ yarn-error.log*
7
+ pnpm-debug.log*
8
+ lerna-debug.log*
9
+
10
+ node_modules
11
+ dist
12
+ dist-ssr
13
+ *.local
14
+
15
+ # Editor directories and files
16
+ .vscode/*
17
+ !.vscode/extensions.json
18
+ .idea
19
+ .DS_Store
20
+ *.suo
21
+ *.ntvs*
22
+ *.njsproj
23
+ *.sln
24
+ *.sw?
frontend/FLOW.md ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Video Generation Flows
2
+
3
+ ## Overview
4
+
5
+ ```
6
+ ┌─────────────────────────────────────────────────────────────────────────────┐
7
+ │ VIDEO GENESIS STUDIO │
8
+ ├─────────────────────────────────────────────────────────────────────────────┤
9
+ │ │
10
+ │ ┌─────────────┐ ┌─────────────┐ │
11
+ │ │ │ │ │ │
12
+ │ │ KLING AI │ OR │ REPLICATE │ │
13
+ │ │ (KIE) │ │ │ │
14
+ │ └──────┬──────┘ └──────┬──────┘ │
15
+ │ │ │ │
16
+ │ ▼ ▼ │
17
+ │ ┌─────────────┐ ┌─────────────┐ │
18
+ │ │ GPT-4o │ │ Simple │ │
19
+ │ │ Segmentation│ │ Prompt │ │
20
+ │ └──────┬──────┘ └──────┬──────┘ │
21
+ │ │ │ │
22
+ │ ▼ ▼ │
23
+ │ ┌─────────────┐ ┌─────────────┐ │
24
+ │ │ Multi-Video │ │ Single │ │
25
+ │ │ Generation │ │ Video Gen │ │
26
+ │ └─────────────┘ └─────────────┘ │
27
+ │ │
28
+ └─────────────────────────────────────────────────────────────────────────────┘
29
+ ```
30
+
31
+ ---
32
+
33
+ ## Flow 1: Kling AI (Recommended)
34
+
35
+ This is the **advanced flow** for professional UGC and talking head videos.
36
+
37
+ ### Two Generation Modes
38
+
39
+ #### Mode A: 🎯 Frame Continuity (Recommended)
40
+ **Best for visual consistency** - mirrors the Replicate approach from `standalone_video_creator.py`
41
+
42
+ ```
43
+ ┌─────────────────────────────────────────────────────────────────────────────┐
44
+ │ FRAME CONTINUITY FLOW │
45
+ ├─────────────────────────────────────────────────────────────────────────────┤
46
+ │ │
47
+ │ Segment 1: │
48
+ │ ┌────────────────┐ ┌────────────────┐ ┌────────────────┐ │
49
+ │ │ Original Image │ ───► │ Generate Video │ ───► │ Extract Last │ │
50
+ │ │ (your upload) │ │ Segment 1 │ │ Frame │ │
51
+ │ └────────────────┘ └────────────────┘ └───────┬────────┘ │
52
+ │ │ │
53
+ │ ▼ │
54
+ │ Segment 2: ┌────────────────┐ │
55
+ │ ┌────────────────┐ ┌────────────────┐ │ Last Frame │ │
56
+ │ │ Frame from │ ◄─── │ Generate Video │ ◄─│ from Seg 1 │ │
57
+ │ │ Segment 1 │ │ Segment 2 │ │ (new reference)│ │
58
+ │ └────────────────┘ └───────┬────────┘ └────────────────┘ │
59
+ │ │ │
60
+ │ ▼ │
61
+ │ Segment 3: ┌────────────────┐ │
62
+ │ ┌────────────────┐ │ Extract Last │ │
63
+ │ │ Frame from │ ◄────│ Frame │ │
64
+ │ │ Segment 2 │ └────────────────┘ │
65
+ │ └────────────────┘ │
66
+ │ │ │
67
+ │ ▼ │
68
+ │ ... continues until all segments complete ... │
69
+ │ │
70
+ └─────────────────────────────────────────────────────────────────────────────┘
71
+
72
+ ✅ BENEFITS:
73
+ • Perfect visual continuity - each segment starts exactly where previous ended
74
+ • Character appearance stays consistent across all segments
75
+ • Scene/lighting matches between segments
76
+ • Same approach that makes Replicate flow work so well
77
+ ```
78
+
79
+ #### Mode B: ➕ Extend API
80
+ **Faster but potentially less consistent** - uses Kling's native extend functionality
81
+
82
+ ```
83
+ ┌─────────────────────────────────────────────────────────────────────────────┐
84
+ │ EXTEND API FLOW │
85
+ ├─────────────────────────────────────────────────────────────────────────────┤
86
+ │ │
87
+ │ ┌────────────────┐ ┌────────────────┐ │
88
+ │ │ Original Image │ ───► │ Generate Video │ ───► taskId_1 │
89
+ │ │ │ │ Segment 1 │ │
90
+ │ └────────────────┘ └────────────────┘ │
91
+ │ │ │
92
+ │ │ extend(taskId_1) │
93
+ │ ▼ │
94
+ │ ┌────────────────┐ │
95
+ │ │ Extend Video │ ───► taskId_2 │
96
+ │ │ Segment 2 │ │
97
+ │ └────────────────┘ │
98
+ │ │ │
99
+ │ │ extend(taskId_2) │
100
+ │ ▼ │
101
+ │ ┌────────────────┐ │
102
+ │ │ Extend Video │ ───► taskId_3 │
103
+ │ │ Segment 3 │ │
104
+ │ └────────────────┘ │
105
+ │ │
106
+ └─────────────────────────────────────────────────────────────────────────────┘
107
+
108
+ ⚠️ TRADEOFFS:
109
+ • Faster (no frame extraction step)
110
+ • May have slight visual drift between segments
111
+ • Relies on Kling's internal continuity handling
112
+ ```
113
+
114
+ ### Complete Step-by-Step Flow
115
+
116
+ ```
117
+ USER INPUT BACKEND EXTERNAL APIs
118
+ ─────────────────────────────────────────────────────────────────────────────
119
+
120
+ ┌──────────────────┐
121
+ │ 1. Enter Script │
122
+ │ + Upload Image│
123
+ │ + Settings │
124
+ │ + SELECT MODE │ ◄── NEW: Choose Frame Continuity or Extend
125
+ └────────┬─────────┘
126
+
127
+
128
+ ┌──────────────────┐ ┌──────────────────┐
129
+ │ 2. Submit Form │────────►│ /api/generate- │
130
+ │ │ │ prompts │
131
+ └──────────────────┘ └────────┬─────────┘
132
+
133
+
134
+ ┌──────────────────┐ ┌──────────────────┐
135
+ │ 3. GPT-4o │────────►│ OpenAI API │
136
+ │ Analysis │ │ (GPT-4o Vision) │
137
+ └────────┬─────────┘ └──────────────────┘
138
+
139
+ │ Returns structured
140
+ │ segments with:
141
+ │ - Character description
142
+ │ - Scene continuity
143
+ │ - Action timeline
144
+ │ - Dialogue sync
145
+
146
+ ┌──────────────────┐ ┌──────────────────┐
147
+ │ 4. See segments │◄────────│ Segments payload │
148
+ │ preview │ │ (2-10 segments) │
149
+ └────────┬─────────┘ └──────────────────┘
150
+
151
+ │ For each segment:
152
+
153
+ ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐
154
+ │ 5. Generating... │────────►│ /api/veo/ │────────►│ KIE Veo 3.1 API │
155
+ │ (progress UI) │ │ generate │ │ │
156
+ └──────────────────┘ └────────┬─────────┘ └────────┬─────────┘
157
+ │ │
158
+ │ SSE Events │
159
+ │◄───────────────────────────┘
160
+
161
+
162
+ ┌──────────────────┐ ┌──────────────────┐
163
+ │ 6. Video Ready │◄────────│ /api/veo/events/ │
164
+ │ Download │ │ {taskId} │
165
+ └──────────────────┘ └──────────────────┘
166
+
167
+ │ IF Frame Continuity mode:
168
+ │ └── Extract last frame → Use as next reference
169
+ │ IF Extend mode:
170
+ │ └── Use extend API with current taskId
171
+
172
+ ┌──────────────────┐
173
+ │ 7. All Complete │
174
+ │ Download All │
175
+ └──────────────────┘
176
+ ```
177
+
178
+ ### Detailed Steps
179
+
180
+ #### Step 1: User Input
181
+ ```
182
+ ┌─────────────────────────────────────────────────────────────┐
183
+ │ GENERATION FORM │
184
+ ├─────────────────────────────────────────────────────────────┤
185
+ │ │
186
+ │ 📝 Script (Required) │
187
+ │ ┌──────────────────────────────────────────────────��──┐ │
188
+ │ │ "Hey everyone! Today I want to share something │ │
189
+ │ │ amazing with you. This product changed my life... │ │
190
+ │ │ Let me show you how it works..." │ │
191
+ │ └─────────────────────────────────────────────────────┘ │
192
+ │ Word count: 85 words → ~5 segments estimated │
193
+ │ │
194
+ │ 🖼️ Character Image (Required) │
195
+ │ ┌───────────────────┐ │
196
+ │ │ [Drag & Drop] │ │
197
+ │ │ Your photo │ │
198
+ │ └───────────────────┘ │
199
+ │ │
200
+ │ ⚙️ Settings │
201
+ │ ┌────────────┐ ┌────────────┐ ┌────────────┐ │
202
+ │ │ Voice: Deep│ │Energy: Med │ │Camera: Std │ │
203
+ │ └────────────┘ └────────────┘ └────────────┘ │
204
+ │ ┌────────────┐ ┌────────────┐ ┌────────────┐ │
205
+ │ │Ratio: 9:16 │ │Seed: 12005 │ │Style: UGC │ │
206
+ │ └────────────┘ └────────────┘ └────────────┘ │
207
+ │ │
208
+ │ [🚀 Generate Video with AI] │
209
+ │ │
210
+ └─────────────────────────────────────────────────────────────┘
211
+ ```
212
+
213
+ #### Step 2-3: GPT-4o Prompt Generation
214
+ The backend sends your script + image to GPT-4o which:
215
+
216
+ 1. **Analyzes the script** for natural breakpoints
217
+ 2. **Splits into ~8 second segments** (based on speaking pace)
218
+ 3. **Generates detailed prompts** for each segment:
219
+
220
+ ```json
221
+ {
222
+ "segments": [
223
+ {
224
+ "segment_info": {
225
+ "segment_number": 1,
226
+ "total_segments": 5,
227
+ "duration": "8s",
228
+ "continuity_markers": {
229
+ "start_position": "facing camera, centered",
230
+ "end_position": "slight lean forward",
231
+ "start_expression": "neutral, friendly",
232
+ "end_expression": "engaged, excited"
233
+ }
234
+ },
235
+ "character_description": {
236
+ "current_state": "Young woman, warm smile, casual attire",
237
+ "voice_matching": "Deep, confident, natural pace"
238
+ },
239
+ "scene_continuity": {
240
+ "environment": "Modern living room, natural light",
241
+ "camera_position": "Medium shot, eye level",
242
+ "lighting_state": "Warm, soft shadows"
243
+ },
244
+ "action_timeline": {
245
+ "dialogue": "Hey everyone! Today I want to share...",
246
+ "synchronized_actions": {
247
+ "0:00-0:02": "Wave hello, direct eye contact",
248
+ "0:02-0:04": "Hands gesture outward",
249
+ "0:04-0:06": "Touch chest (sincerity)",
250
+ "0:06-0:08": "Lean slightly forward"
251
+ }
252
+ }
253
+ }
254
+ ]
255
+ }
256
+ ```
257
+
258
+ #### Step 4-5: Video Generation Loop
259
+ For each segment:
260
+
261
+ ```
262
+ ┌─────────────────────────────────────────────────────────────┐
263
+ │ GENERATION PROGRESS │
264
+ ├─────────────────────────────────────────────────────────────┤
265
+ │ │
266
+ │ ╭──────────╮ │
267
+ │ ( ) │
268
+ │ │ 45% │ │
269
+ │ ( ) │
270
+ │ ╰──────────╯ │
271
+ │ │
272
+ │ 🎬 Generating video 2 of 5... │
273
+ │ │
274
+ │ ●────────────●────────────○────────────○────────────○ │
275
+ │ ✓ ✓ ⟳ │
276
+ │ Seg 1 Seg 2 Seg 3 Seg 4 Seg 5 │
277
+ │ │
278
+ │ ┌─────────────────────────────────────────────────────┐ │
279
+ │ │ Processing: Uploaded image → KIE API → Rendering... │ │
280
+ │ └─────────────────────────────────────────────────────┘ │
281
+ │ │
282
+ │ ⏱️ Estimated time: 1-2 minutes per segment │
283
+ │ │
284
+ └─────────────────────────────────────────────────────────────┘
285
+ ```
286
+
287
+ #### Step 6-7: Completion & Download
288
+
289
+ ```
290
+ ┌─────────────────────────────────────────────────────────────┐
291
+ │ GENERATION COMPLETE! ✅ │
292
+ ├─────────────────────────────────────────────────────────────┤
293
+ │ │
294
+ │ 5 videos generated successfully │
295
+ │ │
296
+ │ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐
297
+ │ │ Seg 1 │ │ Seg 2 │ │ Seg 3 │ │ Seg 4 │ │ Seg 5 │
298
+ │ │ ▶️ │ │ ▶️ │ │ ▶️ │ │ ▶️ │ │ ▶️ │
299
+ │ │ ~8s │ │ ~8s │ │ ~8s │ │ ~8s │ │ ~8s │
300
+ │ │ [⬇️] │ │ [⬇️] │ │ [⬇️] │ │ [⬇️] │ │ [⬇️] │
301
+ │ └─────────┘ └─────────┘ └─────────┘ └─────────┘ └─────────┘
302
+ │ │
303
+ │ [📥 Download All Videos] [🔄 Generate More] │
304
+ │ │
305
+ └─────────────────────────────────────────────────────────────┘
306
+ ```
307
+
308
+ ---
309
+
310
+ ## Flow 2: Replicate (Flexible)
311
+
312
+ This is the **simple flow** for creative experimentation.
313
+
314
+ ### Step-by-Step Flow
315
+
316
+ ```
317
+ USER INPUT BACKEND EXTERNAL APIs
318
+ ─────────────────────────────────────────────────────────────────────────────
319
+
320
+ ┌──────────────────┐
321
+ │ 1. Enter Prompt │
322
+ │ + Settings │
323
+ └────────┬─────────┘
324
+
325
+
326
+ ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐
327
+ │ 2. Submit │────────►│ /api/replicate/ │────────►│ Replicate API │
328
+ │ │ │ generate │ │ (Various Models) │
329
+ └──────────────────┘ └────────┬─────────┘ └────────┬─────────┘
330
+ │ │
331
+ │ │
332
+ ▼ │
333
+ ┌──────────────────┐ ┌──────────────────┐ │
334
+ │ 3. Polling │◄────────│ /api/replicate/ │◄────────────────┘
335
+ │ for status │ │ status/{id} │
336
+ └────────┬─────────┘ └──────────────────┘
337
+
338
+
339
+ ┌──────────────────┐
340
+ │ 4. Video Ready │
341
+ │ Download │
342
+ └──────────────────┘
343
+ ```
344
+
345
+ ### Available Models
346
+
347
+ ```
348
+ ┌─────────────────────────────────────────────────────────────┐
349
+ │ REPLICATE MODELS │
350
+ ├─────────────────────────────────────────────────────────────┤
351
+ │ │
352
+ │ ┌─────────────────────────────────────────────────────┐ │
353
+ │ │ 🎬 minimax/video-01 │ │
354
+ │ │ High-quality text-to-video generation │ │
355
+ │ │ Duration: 5s | Best for: General content │ │
356
+ │ └─────────────────────────────────────────────────────┘ │
357
+ │ │
358
+ │ ┌─────────────────────────────────────────────────────┐ │
359
+ │ │ 🎥 luma/ray │ │
360
+ │ │ Cinematic video generation │ │
361
+ │ │ Duration: Variable | Best for: Artistic content │ │
362
+ │ └─────────────────────────────────────────────────────┘ │
363
+ │ │
364
+ │ ┌─────────────────────────────────────────────────────┐ │
365
+ │ │ 🖼️ stability-ai/stable-video-diffusion │ │
366
+ │ │ Image-to-video generation │ │
367
+ │ │ Duration: 4s | Best for: Animating images │ │
368
+ │ └─────────────────────────────────────────────────────┘ │
369
+ │ │
370
+ └─────────────────────────────────────────────────────────────┘
371
+ ```
372
+
373
+ ---
374
+
375
+ ## API Endpoints Reference
376
+
377
+ ### Kling AI Endpoints
378
+
379
+ | Endpoint | Method | Description |
380
+ |----------|--------|-------------|
381
+ | `/api/generate-prompts` | POST | GPT-4o script analysis & segmentation |
382
+ | `/api/upload-image` | POST | Upload character reference image |
383
+ | `/api/veo/generate` | POST | Start video generation |
384
+ | `/api/veo/extend` | POST | Extend existing video |
385
+ | `/api/veo/events/{taskId}` | GET | SSE stream for real-time updates |
386
+ | `/api/veo/status/{taskId}` | GET | Check generation status |
387
+ | `/api/veo/download` | GET | Download generated video |
388
+
389
+ ### Replicate Endpoints
390
+
391
+ | Endpoint | Method | Description |
392
+ |----------|--------|-------------|
393
+ | `/api/replicate/generate` | POST | Start video generation |
394
+ | `/api/replicate/status/{id}` | GET | Check prediction status |
395
+ | `/api/replicate/models` | GET | List available models |
396
+ | `/api/replicate/cancel/{id}` | POST | Cancel running prediction |
397
+
398
+ ---
399
+
400
+ ## Comparison
401
+
402
+ | Feature | Kling (Frame Continuity) | Kling (Extend) | Replicate |
403
+ |---------|--------------------------|----------------|-----------|
404
+ | Script Segmentation | ✅ GPT-4o auto | ✅ GPT-4o auto | ✅ GPT-4o auto |
405
+ | Multi-segment | ✅ Yes | ✅ Yes | ✅ Yes |
406
+ | Visual Consistency | ✅✅ Best | ⚠️ Good | ✅✅ Best |
407
+ | Speed | ⚠️ Slower | ✅ Fast | ⚠️ Slower |
408
+ | Frame Extraction | ✅ Yes | ❌ No | ✅ Yes |
409
+ | Voice/Audio | ✅ Yes | ✅ Yes | ❌ No |
410
+ | Best for | **Consistent UGC** | Quick drafts | Creative work |
411
+
412
+ ### Why Frame Continuity Works Better
413
+
414
+ ```
415
+ The secret from standalone_video_creator.py:
416
+
417
+ ┌────────────────────────────────────────────────────────────────────────────┐
418
+ │ │
419
+ │ PROBLEM with Extend API: │
420
+ │ • AI tries to "continue" but may drift visually │
421
+ │ • Character appearance can subtly change │
422
+ │ • Lighting/scene may shift between segments │
423
+ │ │
424
+ │ SOLUTION with Frame Continuity: │
425
+ │ • Extract the EXACT last frame of each video │
426
+ │ • Use it as the reference image for next segment │
427
+ │ • AI sees exactly what it needs to continue from │
428
+ │ • Result: Perfect visual match between segments │
429
+ │ │
430
+ │ This is why the Replicate flow in standalone_video_creator.py │
431
+ │ produces such consistent results! │
432
+ │ │
433
+ └────────────────────────────────────────────────────────────────────────────┘
434
+ ```
435
+
436
+ ---
437
+
438
+ ## Technical Architecture
439
+
440
+ ```
441
+ ┌──────────────────────────────────────────────────────────────────────────┐
442
+ │ FRONTEND │
443
+ │ (React + Vite + TS) │
444
+ │ │
445
+ │ ┌────────────────┐ ┌────────────────┐ ┌────────────────┐ │
446
+ │ │ ProviderSelect │ │ GenerationForm │ │ GenerationProg │ │
447
+ │ └───────┬────────┘ └───────┬────────┘ └───────┬────────┘ │
448
+ │ │ │ │ │
449
+ │ └───────────────────┼───────────────────┘ │
450
+ │ │ │
451
+ │ ┌─────────▼─────────┐ │
452
+ │ │ GenerationContext │ │
453
+ │ │ (State Mgmt) │ │
454
+ │ └─────────┬─────────┘ │
455
+ │ │ │
456
+ │ ┌─────────▼─────────┐ │
457
+ │ │ API Client │ │
458
+ │ │ (utils/api.ts) │ │
459
+ │ └─────────┬─────────┘ │
460
+ └──────────────────────────────┼───────────────────────────────────────────┘
461
+ │ HTTP / SSE
462
+
463
+ ┌──────────────────────────────────────────────────────────────────────────┐
464
+ │ BACKEND │
465
+ │ (FastAPI + Python) │
466
+ │ │
467
+ │ ┌────────────────┐ ┌────────────────┐ ┌────────────────┐ │
468
+ │ │ video_ │ │ replicate_ │ │ prompt_ │ │
469
+ │ │ generation.py │ │ service.py │ │ generation.py │ │
470
+ │ └───────┬────────┘ └───────┬────────┘ └───────┬────────┘ │
471
+ │ │ │ │ │
472
+ │ └───────────────────┼───────────────────┘ │
473
+ │ │ │
474
+ │ ┌─────────▼─────────┐ │
475
+ │ │ main.py │ │
476
+ │ │ (FastAPI App) │ │
477
+ │ └─────────┬─────────┘ │
478
+ └──────────────────────────────┼───────────────────────────────────────────┘
479
+ │ HTTPS
480
+
481
+ ┌──────────────────────────────────────────────────────────────────────────┐
482
+ │ EXTERNAL APIs │
483
+ │ │
484
+ │ ┌────────────────┐ ┌────────────────┐ ┌────────────────┐ │
485
+ │ │ KIE/Kling │ │ Replicate │ │ OpenAI │ │
486
+ │ │ Veo 3.1 │ │ (Models) │ │ GPT-4o │ │
487
+ │ └────────────────┘ └────────────────┘ └────────────────┘ │
488
+ │ │
489
+ └──────────────────────────────────────────────────────────────────────────┘
490
+ ```
491
+
492
+ ---
493
+
494
+ ## Quick Start
495
+
496
+ 1. **Start Backend**:
497
+ ```bash
498
+ cd /Users/sushil/Desktop/python-backend
499
+ source venv/bin/activate
500
+ python main.py
501
+ ```
502
+
503
+ 2. **Start Frontend**:
504
+ ```bash
505
+ cd frontend
506
+ npm run dev
507
+ ```
508
+
509
+ 3. **Open Browser**: http://localhost:3000
510
+
511
+ 4. **Choose Provider** → Enter script → Generate!
512
+
frontend/README.md ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Video Genesis Studio
2
+
3
+ A modern, beautiful frontend for automatic AI video generation with dual provider support (Kling AI & Replicate).
4
+
5
+ ## Features
6
+
7
+ - **Dual Provider Support**: Choose between Kling AI (Veo 3.1) and Replicate for video generation
8
+ - **Automatic Script Segmentation**: GPT-4o analyzes your script and creates optimal video segments
9
+ - **Beautiful UI**: Modern, distinctive design with ocean-dark theme
10
+ - **Real-time Progress**: Watch your videos generate with live status updates
11
+ - **Download & Preview**: Preview generated videos and download them individually or all at once
12
+
13
+ ## Getting Started
14
+
15
+ ### Prerequisites
16
+
17
+ - Node.js 18+
18
+ - Python backend running on port 4000
19
+
20
+ ### Installation
21
+
22
+ ```bash
23
+ # Navigate to frontend directory
24
+ cd frontend
25
+
26
+ # Install dependencies
27
+ npm install
28
+
29
+ # Start development server
30
+ npm run dev
31
+ ```
32
+
33
+ The frontend will be available at `http://localhost:3000`.
34
+
35
+ ### Environment Variables
36
+
37
+ Create a `.env.local` file in the frontend directory:
38
+
39
+ ```env
40
+ VITE_API_BASE_URL=http://localhost:4000
41
+ ```
42
+
43
+ ## Architecture
44
+
45
+ ```
46
+ frontend/
47
+ ├── src/
48
+ │ ├── components/ # React components
49
+ │ │ ├── Icons.tsx # SVG icons
50
+ │ │ ├── ProviderSelect.tsx # Provider selection screen
51
+ │ │ ├── GenerationForm.tsx # Video generation form
52
+ │ │ ├── GenerationProgress.tsx # Progress display
53
+ │ │ ├── GenerationComplete.tsx # Results screen
54
+ │ │ └── ErrorDisplay.tsx # Error handling
55
+ │ ├── context/ # React context
56
+ │ │ └── GenerationContext.tsx
57
+ │ ├── types/ # TypeScript types
58
+ │ ├── utils/ # Utilities & API client
59
+ │ │ └── api.ts
60
+ │ ├── App.tsx # Main application
61
+ │ ├── main.tsx # Entry point
62
+ │ └── index.css # Tailwind styles
63
+ ├── public/ # Static assets
64
+ ├── package.json
65
+ ├── tailwind.config.js
66
+ └── vite.config.ts
67
+ ```
68
+
69
+ ## Provider Flows
70
+
71
+ ### Kling AI (Recommended)
72
+
73
+ 1. Upload character reference image
74
+ 2. Enter your full script
75
+ 3. Configure generation settings (voice, camera, style)
76
+ 4. GPT-4o analyzes and segments your script
77
+ 5. Videos are generated segment by segment
78
+ 6. Download all segments when complete
79
+
80
+ ### Replicate
81
+
82
+ 1. Enter your prompt
83
+ 2. Select from available models
84
+ 3. Configure aspect ratio and duration
85
+ 4. Video is generated via Replicate API
86
+ 5. Download when complete
87
+
88
+ ## Design System
89
+
90
+ The UI uses a custom design system with:
91
+
92
+ - **Colors**: Ocean-inspired dark theme with coral and electric accent colors
93
+ - **Typography**: Clash Display (headings) + Satoshi (body)
94
+ - **Components**: Glass morphism effects, smooth animations via Framer Motion
95
+ - **Layout**: Responsive grid with fluid animations
96
+
97
+ ## Development
98
+
99
+ ```bash
100
+ # Development server with hot reload
101
+ npm run dev
102
+
103
+ # Build for production
104
+ npm run build
105
+
106
+ # Preview production build
107
+ npm run preview
108
+ ```
109
+
110
+ ## Tech Stack
111
+
112
+ - **React 19** - UI framework
113
+ - **TypeScript** - Type safety
114
+ - **Vite** - Build tool
115
+ - **Tailwind CSS** - Styling
116
+ - **Framer Motion** - Animations
117
+
118
+ ## API Integration
119
+
120
+ The frontend communicates with the Python backend at `/api/*`:
121
+
122
+ - `POST /api/generate-prompts` - Generate video prompts with GPT-4o
123
+ - `POST /api/veo/generate` - Start Kling video generation
124
+ - `GET /api/veo/events/:taskId` - SSE for generation progress
125
+ - `POST /api/replicate/generate` - Start Replicate generation
126
+ - `GET /api/replicate/status/:id` - Check Replicate status
127
+ - `GET /health` - Backend health check
frontend/eslint.config.js ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import js from '@eslint/js'
2
+ import globals from 'globals'
3
+ import reactHooks from 'eslint-plugin-react-hooks'
4
+ import reactRefresh from 'eslint-plugin-react-refresh'
5
+ import tseslint from 'typescript-eslint'
6
+ import { defineConfig, globalIgnores } from 'eslint/config'
7
+
8
+ export default defineConfig([
9
+ globalIgnores(['dist']),
10
+ {
11
+ files: ['**/*.{ts,tsx}'],
12
+ extends: [
13
+ js.configs.recommended,
14
+ tseslint.configs.recommended,
15
+ reactHooks.configs.flat.recommended,
16
+ reactRefresh.configs.vite,
17
+ ],
18
+ languageOptions: {
19
+ ecmaVersion: 2020,
20
+ globals: globals.browser,
21
+ },
22
+ },
23
+ ])
frontend/index.html ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/svg+xml" href="/vite.svg" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>Video AdGenesis Studio</title>
8
+
9
+ <!-- Fonts -->
10
+ <link rel="preconnect" href="https://fonts.googleapis.com">
11
+ <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
12
+ <link href="https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@400;500;600&display=swap" rel="stylesheet">
13
+
14
+ <!-- Clash Display & Satoshi from Fontshare -->
15
+ <link href="https://api.fontshare.com/v2/css?f[]=clash-display@400,500,600,700&f[]=satoshi@400,500,700&display=swap" rel="stylesheet">
16
+ </head>
17
+ <body>
18
+ <div id="root"></div>
19
+ <script type="module" src="/src/main.tsx"></script>
20
+ </body>
21
+ </html>
frontend/package-lock.json ADDED
@@ -0,0 +1,2776 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "video-genesis-studio",
3
+ "version": "1.0.0",
4
+ "lockfileVersion": 3,
5
+ "requires": true,
6
+ "packages": {
7
+ "": {
8
+ "name": "video-genesis-studio",
9
+ "version": "1.0.0",
10
+ "dependencies": {
11
+ "framer-motion": "^11.15.0",
12
+ "react": "^19.0.0",
13
+ "react-dom": "^19.0.0"
14
+ },
15
+ "devDependencies": {
16
+ "@types/node": "^22.14.0",
17
+ "@types/react": "^19.0.0",
18
+ "@types/react-dom": "^19.0.0",
19
+ "@vitejs/plugin-react": "^4.3.4",
20
+ "autoprefixer": "^10.4.20",
21
+ "postcss": "^8.4.49",
22
+ "tailwindcss": "^3.4.17",
23
+ "typescript": "~5.6.0",
24
+ "vite": "^6.0.0"
25
+ }
26
+ },
27
+ "node_modules/@alloc/quick-lru": {
28
+ "version": "5.2.0",
29
+ "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz",
30
+ "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==",
31
+ "dev": true,
32
+ "license": "MIT",
33
+ "engines": {
34
+ "node": ">=10"
35
+ },
36
+ "funding": {
37
+ "url": "https://github.com/sponsors/sindresorhus"
38
+ }
39
+ },
40
+ "node_modules/@babel/code-frame": {
41
+ "version": "7.27.1",
42
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz",
43
+ "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==",
44
+ "dev": true,
45
+ "license": "MIT",
46
+ "dependencies": {
47
+ "@babel/helper-validator-identifier": "^7.27.1",
48
+ "js-tokens": "^4.0.0",
49
+ "picocolors": "^1.1.1"
50
+ },
51
+ "engines": {
52
+ "node": ">=6.9.0"
53
+ }
54
+ },
55
+ "node_modules/@babel/compat-data": {
56
+ "version": "7.28.5",
57
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz",
58
+ "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==",
59
+ "dev": true,
60
+ "license": "MIT",
61
+ "engines": {
62
+ "node": ">=6.9.0"
63
+ }
64
+ },
65
+ "node_modules/@babel/core": {
66
+ "version": "7.28.5",
67
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz",
68
+ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==",
69
+ "dev": true,
70
+ "license": "MIT",
71
+ "peer": true,
72
+ "dependencies": {
73
+ "@babel/code-frame": "^7.27.1",
74
+ "@babel/generator": "^7.28.5",
75
+ "@babel/helper-compilation-targets": "^7.27.2",
76
+ "@babel/helper-module-transforms": "^7.28.3",
77
+ "@babel/helpers": "^7.28.4",
78
+ "@babel/parser": "^7.28.5",
79
+ "@babel/template": "^7.27.2",
80
+ "@babel/traverse": "^7.28.5",
81
+ "@babel/types": "^7.28.5",
82
+ "@jridgewell/remapping": "^2.3.5",
83
+ "convert-source-map": "^2.0.0",
84
+ "debug": "^4.1.0",
85
+ "gensync": "^1.0.0-beta.2",
86
+ "json5": "^2.2.3",
87
+ "semver": "^6.3.1"
88
+ },
89
+ "engines": {
90
+ "node": ">=6.9.0"
91
+ },
92
+ "funding": {
93
+ "type": "opencollective",
94
+ "url": "https://opencollective.com/babel"
95
+ }
96
+ },
97
+ "node_modules/@babel/generator": {
98
+ "version": "7.28.5",
99
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz",
100
+ "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==",
101
+ "dev": true,
102
+ "license": "MIT",
103
+ "dependencies": {
104
+ "@babel/parser": "^7.28.5",
105
+ "@babel/types": "^7.28.5",
106
+ "@jridgewell/gen-mapping": "^0.3.12",
107
+ "@jridgewell/trace-mapping": "^0.3.28",
108
+ "jsesc": "^3.0.2"
109
+ },
110
+ "engines": {
111
+ "node": ">=6.9.0"
112
+ }
113
+ },
114
+ "node_modules/@babel/helper-compilation-targets": {
115
+ "version": "7.27.2",
116
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz",
117
+ "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==",
118
+ "dev": true,
119
+ "license": "MIT",
120
+ "dependencies": {
121
+ "@babel/compat-data": "^7.27.2",
122
+ "@babel/helper-validator-option": "^7.27.1",
123
+ "browserslist": "^4.24.0",
124
+ "lru-cache": "^5.1.1",
125
+ "semver": "^6.3.1"
126
+ },
127
+ "engines": {
128
+ "node": ">=6.9.0"
129
+ }
130
+ },
131
+ "node_modules/@babel/helper-globals": {
132
+ "version": "7.28.0",
133
+ "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
134
+ "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
135
+ "dev": true,
136
+ "license": "MIT",
137
+ "engines": {
138
+ "node": ">=6.9.0"
139
+ }
140
+ },
141
+ "node_modules/@babel/helper-module-imports": {
142
+ "version": "7.27.1",
143
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz",
144
+ "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==",
145
+ "dev": true,
146
+ "license": "MIT",
147
+ "dependencies": {
148
+ "@babel/traverse": "^7.27.1",
149
+ "@babel/types": "^7.27.1"
150
+ },
151
+ "engines": {
152
+ "node": ">=6.9.0"
153
+ }
154
+ },
155
+ "node_modules/@babel/helper-module-transforms": {
156
+ "version": "7.28.3",
157
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz",
158
+ "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==",
159
+ "dev": true,
160
+ "license": "MIT",
161
+ "dependencies": {
162
+ "@babel/helper-module-imports": "^7.27.1",
163
+ "@babel/helper-validator-identifier": "^7.27.1",
164
+ "@babel/traverse": "^7.28.3"
165
+ },
166
+ "engines": {
167
+ "node": ">=6.9.0"
168
+ },
169
+ "peerDependencies": {
170
+ "@babel/core": "^7.0.0"
171
+ }
172
+ },
173
+ "node_modules/@babel/helper-plugin-utils": {
174
+ "version": "7.27.1",
175
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz",
176
+ "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==",
177
+ "dev": true,
178
+ "license": "MIT",
179
+ "engines": {
180
+ "node": ">=6.9.0"
181
+ }
182
+ },
183
+ "node_modules/@babel/helper-string-parser": {
184
+ "version": "7.27.1",
185
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
186
+ "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
187
+ "dev": true,
188
+ "license": "MIT",
189
+ "engines": {
190
+ "node": ">=6.9.0"
191
+ }
192
+ },
193
+ "node_modules/@babel/helper-validator-identifier": {
194
+ "version": "7.28.5",
195
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
196
+ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
197
+ "dev": true,
198
+ "license": "MIT",
199
+ "engines": {
200
+ "node": ">=6.9.0"
201
+ }
202
+ },
203
+ "node_modules/@babel/helper-validator-option": {
204
+ "version": "7.27.1",
205
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz",
206
+ "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
207
+ "dev": true,
208
+ "license": "MIT",
209
+ "engines": {
210
+ "node": ">=6.9.0"
211
+ }
212
+ },
213
+ "node_modules/@babel/helpers": {
214
+ "version": "7.28.4",
215
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz",
216
+ "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==",
217
+ "dev": true,
218
+ "license": "MIT",
219
+ "dependencies": {
220
+ "@babel/template": "^7.27.2",
221
+ "@babel/types": "^7.28.4"
222
+ },
223
+ "engines": {
224
+ "node": ">=6.9.0"
225
+ }
226
+ },
227
+ "node_modules/@babel/parser": {
228
+ "version": "7.28.5",
229
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
230
+ "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
231
+ "dev": true,
232
+ "license": "MIT",
233
+ "dependencies": {
234
+ "@babel/types": "^7.28.5"
235
+ },
236
+ "bin": {
237
+ "parser": "bin/babel-parser.js"
238
+ },
239
+ "engines": {
240
+ "node": ">=6.0.0"
241
+ }
242
+ },
243
+ "node_modules/@babel/plugin-transform-react-jsx-self": {
244
+ "version": "7.27.1",
245
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz",
246
+ "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==",
247
+ "dev": true,
248
+ "license": "MIT",
249
+ "dependencies": {
250
+ "@babel/helper-plugin-utils": "^7.27.1"
251
+ },
252
+ "engines": {
253
+ "node": ">=6.9.0"
254
+ },
255
+ "peerDependencies": {
256
+ "@babel/core": "^7.0.0-0"
257
+ }
258
+ },
259
+ "node_modules/@babel/plugin-transform-react-jsx-source": {
260
+ "version": "7.27.1",
261
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz",
262
+ "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==",
263
+ "dev": true,
264
+ "license": "MIT",
265
+ "dependencies": {
266
+ "@babel/helper-plugin-utils": "^7.27.1"
267
+ },
268
+ "engines": {
269
+ "node": ">=6.9.0"
270
+ },
271
+ "peerDependencies": {
272
+ "@babel/core": "^7.0.0-0"
273
+ }
274
+ },
275
+ "node_modules/@babel/template": {
276
+ "version": "7.27.2",
277
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
278
+ "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
279
+ "dev": true,
280
+ "license": "MIT",
281
+ "dependencies": {
282
+ "@babel/code-frame": "^7.27.1",
283
+ "@babel/parser": "^7.27.2",
284
+ "@babel/types": "^7.27.1"
285
+ },
286
+ "engines": {
287
+ "node": ">=6.9.0"
288
+ }
289
+ },
290
+ "node_modules/@babel/traverse": {
291
+ "version": "7.28.5",
292
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz",
293
+ "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==",
294
+ "dev": true,
295
+ "license": "MIT",
296
+ "dependencies": {
297
+ "@babel/code-frame": "^7.27.1",
298
+ "@babel/generator": "^7.28.5",
299
+ "@babel/helper-globals": "^7.28.0",
300
+ "@babel/parser": "^7.28.5",
301
+ "@babel/template": "^7.27.2",
302
+ "@babel/types": "^7.28.5",
303
+ "debug": "^4.3.1"
304
+ },
305
+ "engines": {
306
+ "node": ">=6.9.0"
307
+ }
308
+ },
309
+ "node_modules/@babel/types": {
310
+ "version": "7.28.5",
311
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
312
+ "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
313
+ "dev": true,
314
+ "license": "MIT",
315
+ "dependencies": {
316
+ "@babel/helper-string-parser": "^7.27.1",
317
+ "@babel/helper-validator-identifier": "^7.28.5"
318
+ },
319
+ "engines": {
320
+ "node": ">=6.9.0"
321
+ }
322
+ },
323
+ "node_modules/@esbuild/aix-ppc64": {
324
+ "version": "0.25.12",
325
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz",
326
+ "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==",
327
+ "cpu": [
328
+ "ppc64"
329
+ ],
330
+ "dev": true,
331
+ "license": "MIT",
332
+ "optional": true,
333
+ "os": [
334
+ "aix"
335
+ ],
336
+ "engines": {
337
+ "node": ">=18"
338
+ }
339
+ },
340
+ "node_modules/@esbuild/android-arm": {
341
+ "version": "0.25.12",
342
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz",
343
+ "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==",
344
+ "cpu": [
345
+ "arm"
346
+ ],
347
+ "dev": true,
348
+ "license": "MIT",
349
+ "optional": true,
350
+ "os": [
351
+ "android"
352
+ ],
353
+ "engines": {
354
+ "node": ">=18"
355
+ }
356
+ },
357
+ "node_modules/@esbuild/android-arm64": {
358
+ "version": "0.25.12",
359
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz",
360
+ "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==",
361
+ "cpu": [
362
+ "arm64"
363
+ ],
364
+ "dev": true,
365
+ "license": "MIT",
366
+ "optional": true,
367
+ "os": [
368
+ "android"
369
+ ],
370
+ "engines": {
371
+ "node": ">=18"
372
+ }
373
+ },
374
+ "node_modules/@esbuild/android-x64": {
375
+ "version": "0.25.12",
376
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz",
377
+ "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==",
378
+ "cpu": [
379
+ "x64"
380
+ ],
381
+ "dev": true,
382
+ "license": "MIT",
383
+ "optional": true,
384
+ "os": [
385
+ "android"
386
+ ],
387
+ "engines": {
388
+ "node": ">=18"
389
+ }
390
+ },
391
+ "node_modules/@esbuild/darwin-arm64": {
392
+ "version": "0.25.12",
393
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz",
394
+ "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==",
395
+ "cpu": [
396
+ "arm64"
397
+ ],
398
+ "dev": true,
399
+ "license": "MIT",
400
+ "optional": true,
401
+ "os": [
402
+ "darwin"
403
+ ],
404
+ "engines": {
405
+ "node": ">=18"
406
+ }
407
+ },
408
+ "node_modules/@esbuild/darwin-x64": {
409
+ "version": "0.25.12",
410
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz",
411
+ "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==",
412
+ "cpu": [
413
+ "x64"
414
+ ],
415
+ "dev": true,
416
+ "license": "MIT",
417
+ "optional": true,
418
+ "os": [
419
+ "darwin"
420
+ ],
421
+ "engines": {
422
+ "node": ">=18"
423
+ }
424
+ },
425
+ "node_modules/@esbuild/freebsd-arm64": {
426
+ "version": "0.25.12",
427
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz",
428
+ "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==",
429
+ "cpu": [
430
+ "arm64"
431
+ ],
432
+ "dev": true,
433
+ "license": "MIT",
434
+ "optional": true,
435
+ "os": [
436
+ "freebsd"
437
+ ],
438
+ "engines": {
439
+ "node": ">=18"
440
+ }
441
+ },
442
+ "node_modules/@esbuild/freebsd-x64": {
443
+ "version": "0.25.12",
444
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz",
445
+ "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==",
446
+ "cpu": [
447
+ "x64"
448
+ ],
449
+ "dev": true,
450
+ "license": "MIT",
451
+ "optional": true,
452
+ "os": [
453
+ "freebsd"
454
+ ],
455
+ "engines": {
456
+ "node": ">=18"
457
+ }
458
+ },
459
+ "node_modules/@esbuild/linux-arm": {
460
+ "version": "0.25.12",
461
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz",
462
+ "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==",
463
+ "cpu": [
464
+ "arm"
465
+ ],
466
+ "dev": true,
467
+ "license": "MIT",
468
+ "optional": true,
469
+ "os": [
470
+ "linux"
471
+ ],
472
+ "engines": {
473
+ "node": ">=18"
474
+ }
475
+ },
476
+ "node_modules/@esbuild/linux-arm64": {
477
+ "version": "0.25.12",
478
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz",
479
+ "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==",
480
+ "cpu": [
481
+ "arm64"
482
+ ],
483
+ "dev": true,
484
+ "license": "MIT",
485
+ "optional": true,
486
+ "os": [
487
+ "linux"
488
+ ],
489
+ "engines": {
490
+ "node": ">=18"
491
+ }
492
+ },
493
+ "node_modules/@esbuild/linux-ia32": {
494
+ "version": "0.25.12",
495
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz",
496
+ "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==",
497
+ "cpu": [
498
+ "ia32"
499
+ ],
500
+ "dev": true,
501
+ "license": "MIT",
502
+ "optional": true,
503
+ "os": [
504
+ "linux"
505
+ ],
506
+ "engines": {
507
+ "node": ">=18"
508
+ }
509
+ },
510
+ "node_modules/@esbuild/linux-loong64": {
511
+ "version": "0.25.12",
512
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz",
513
+ "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==",
514
+ "cpu": [
515
+ "loong64"
516
+ ],
517
+ "dev": true,
518
+ "license": "MIT",
519
+ "optional": true,
520
+ "os": [
521
+ "linux"
522
+ ],
523
+ "engines": {
524
+ "node": ">=18"
525
+ }
526
+ },
527
+ "node_modules/@esbuild/linux-mips64el": {
528
+ "version": "0.25.12",
529
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz",
530
+ "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==",
531
+ "cpu": [
532
+ "mips64el"
533
+ ],
534
+ "dev": true,
535
+ "license": "MIT",
536
+ "optional": true,
537
+ "os": [
538
+ "linux"
539
+ ],
540
+ "engines": {
541
+ "node": ">=18"
542
+ }
543
+ },
544
+ "node_modules/@esbuild/linux-ppc64": {
545
+ "version": "0.25.12",
546
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz",
547
+ "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==",
548
+ "cpu": [
549
+ "ppc64"
550
+ ],
551
+ "dev": true,
552
+ "license": "MIT",
553
+ "optional": true,
554
+ "os": [
555
+ "linux"
556
+ ],
557
+ "engines": {
558
+ "node": ">=18"
559
+ }
560
+ },
561
+ "node_modules/@esbuild/linux-riscv64": {
562
+ "version": "0.25.12",
563
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz",
564
+ "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==",
565
+ "cpu": [
566
+ "riscv64"
567
+ ],
568
+ "dev": true,
569
+ "license": "MIT",
570
+ "optional": true,
571
+ "os": [
572
+ "linux"
573
+ ],
574
+ "engines": {
575
+ "node": ">=18"
576
+ }
577
+ },
578
+ "node_modules/@esbuild/linux-s390x": {
579
+ "version": "0.25.12",
580
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz",
581
+ "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==",
582
+ "cpu": [
583
+ "s390x"
584
+ ],
585
+ "dev": true,
586
+ "license": "MIT",
587
+ "optional": true,
588
+ "os": [
589
+ "linux"
590
+ ],
591
+ "engines": {
592
+ "node": ">=18"
593
+ }
594
+ },
595
+ "node_modules/@esbuild/linux-x64": {
596
+ "version": "0.25.12",
597
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz",
598
+ "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==",
599
+ "cpu": [
600
+ "x64"
601
+ ],
602
+ "dev": true,
603
+ "license": "MIT",
604
+ "optional": true,
605
+ "os": [
606
+ "linux"
607
+ ],
608
+ "engines": {
609
+ "node": ">=18"
610
+ }
611
+ },
612
+ "node_modules/@esbuild/netbsd-arm64": {
613
+ "version": "0.25.12",
614
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz",
615
+ "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==",
616
+ "cpu": [
617
+ "arm64"
618
+ ],
619
+ "dev": true,
620
+ "license": "MIT",
621
+ "optional": true,
622
+ "os": [
623
+ "netbsd"
624
+ ],
625
+ "engines": {
626
+ "node": ">=18"
627
+ }
628
+ },
629
+ "node_modules/@esbuild/netbsd-x64": {
630
+ "version": "0.25.12",
631
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz",
632
+ "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==",
633
+ "cpu": [
634
+ "x64"
635
+ ],
636
+ "dev": true,
637
+ "license": "MIT",
638
+ "optional": true,
639
+ "os": [
640
+ "netbsd"
641
+ ],
642
+ "engines": {
643
+ "node": ">=18"
644
+ }
645
+ },
646
+ "node_modules/@esbuild/openbsd-arm64": {
647
+ "version": "0.25.12",
648
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz",
649
+ "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==",
650
+ "cpu": [
651
+ "arm64"
652
+ ],
653
+ "dev": true,
654
+ "license": "MIT",
655
+ "optional": true,
656
+ "os": [
657
+ "openbsd"
658
+ ],
659
+ "engines": {
660
+ "node": ">=18"
661
+ }
662
+ },
663
+ "node_modules/@esbuild/openbsd-x64": {
664
+ "version": "0.25.12",
665
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz",
666
+ "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==",
667
+ "cpu": [
668
+ "x64"
669
+ ],
670
+ "dev": true,
671
+ "license": "MIT",
672
+ "optional": true,
673
+ "os": [
674
+ "openbsd"
675
+ ],
676
+ "engines": {
677
+ "node": ">=18"
678
+ }
679
+ },
680
+ "node_modules/@esbuild/openharmony-arm64": {
681
+ "version": "0.25.12",
682
+ "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz",
683
+ "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==",
684
+ "cpu": [
685
+ "arm64"
686
+ ],
687
+ "dev": true,
688
+ "license": "MIT",
689
+ "optional": true,
690
+ "os": [
691
+ "openharmony"
692
+ ],
693
+ "engines": {
694
+ "node": ">=18"
695
+ }
696
+ },
697
+ "node_modules/@esbuild/sunos-x64": {
698
+ "version": "0.25.12",
699
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz",
700
+ "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==",
701
+ "cpu": [
702
+ "x64"
703
+ ],
704
+ "dev": true,
705
+ "license": "MIT",
706
+ "optional": true,
707
+ "os": [
708
+ "sunos"
709
+ ],
710
+ "engines": {
711
+ "node": ">=18"
712
+ }
713
+ },
714
+ "node_modules/@esbuild/win32-arm64": {
715
+ "version": "0.25.12",
716
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz",
717
+ "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==",
718
+ "cpu": [
719
+ "arm64"
720
+ ],
721
+ "dev": true,
722
+ "license": "MIT",
723
+ "optional": true,
724
+ "os": [
725
+ "win32"
726
+ ],
727
+ "engines": {
728
+ "node": ">=18"
729
+ }
730
+ },
731
+ "node_modules/@esbuild/win32-ia32": {
732
+ "version": "0.25.12",
733
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz",
734
+ "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==",
735
+ "cpu": [
736
+ "ia32"
737
+ ],
738
+ "dev": true,
739
+ "license": "MIT",
740
+ "optional": true,
741
+ "os": [
742
+ "win32"
743
+ ],
744
+ "engines": {
745
+ "node": ">=18"
746
+ }
747
+ },
748
+ "node_modules/@esbuild/win32-x64": {
749
+ "version": "0.25.12",
750
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz",
751
+ "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==",
752
+ "cpu": [
753
+ "x64"
754
+ ],
755
+ "dev": true,
756
+ "license": "MIT",
757
+ "optional": true,
758
+ "os": [
759
+ "win32"
760
+ ],
761
+ "engines": {
762
+ "node": ">=18"
763
+ }
764
+ },
765
+ "node_modules/@jridgewell/gen-mapping": {
766
+ "version": "0.3.13",
767
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
768
+ "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
769
+ "dev": true,
770
+ "license": "MIT",
771
+ "dependencies": {
772
+ "@jridgewell/sourcemap-codec": "^1.5.0",
773
+ "@jridgewell/trace-mapping": "^0.3.24"
774
+ }
775
+ },
776
+ "node_modules/@jridgewell/remapping": {
777
+ "version": "2.3.5",
778
+ "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
779
+ "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
780
+ "dev": true,
781
+ "license": "MIT",
782
+ "dependencies": {
783
+ "@jridgewell/gen-mapping": "^0.3.5",
784
+ "@jridgewell/trace-mapping": "^0.3.24"
785
+ }
786
+ },
787
+ "node_modules/@jridgewell/resolve-uri": {
788
+ "version": "3.1.2",
789
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
790
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
791
+ "dev": true,
792
+ "license": "MIT",
793
+ "engines": {
794
+ "node": ">=6.0.0"
795
+ }
796
+ },
797
+ "node_modules/@jridgewell/sourcemap-codec": {
798
+ "version": "1.5.5",
799
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
800
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
801
+ "dev": true,
802
+ "license": "MIT"
803
+ },
804
+ "node_modules/@jridgewell/trace-mapping": {
805
+ "version": "0.3.31",
806
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
807
+ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
808
+ "dev": true,
809
+ "license": "MIT",
810
+ "dependencies": {
811
+ "@jridgewell/resolve-uri": "^3.1.0",
812
+ "@jridgewell/sourcemap-codec": "^1.4.14"
813
+ }
814
+ },
815
+ "node_modules/@nodelib/fs.scandir": {
816
+ "version": "2.1.5",
817
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
818
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
819
+ "dev": true,
820
+ "license": "MIT",
821
+ "dependencies": {
822
+ "@nodelib/fs.stat": "2.0.5",
823
+ "run-parallel": "^1.1.9"
824
+ },
825
+ "engines": {
826
+ "node": ">= 8"
827
+ }
828
+ },
829
+ "node_modules/@nodelib/fs.stat": {
830
+ "version": "2.0.5",
831
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
832
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
833
+ "dev": true,
834
+ "license": "MIT",
835
+ "engines": {
836
+ "node": ">= 8"
837
+ }
838
+ },
839
+ "node_modules/@nodelib/fs.walk": {
840
+ "version": "1.2.8",
841
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
842
+ "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
843
+ "dev": true,
844
+ "license": "MIT",
845
+ "dependencies": {
846
+ "@nodelib/fs.scandir": "2.1.5",
847
+ "fastq": "^1.6.0"
848
+ },
849
+ "engines": {
850
+ "node": ">= 8"
851
+ }
852
+ },
853
+ "node_modules/@rolldown/pluginutils": {
854
+ "version": "1.0.0-beta.27",
855
+ "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz",
856
+ "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==",
857
+ "dev": true,
858
+ "license": "MIT"
859
+ },
860
+ "node_modules/@rollup/rollup-android-arm-eabi": {
861
+ "version": "4.53.3",
862
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.3.tgz",
863
+ "integrity": "sha512-mRSi+4cBjrRLoaal2PnqH82Wqyb+d3HsPUN/W+WslCXsZsyHa9ZeQQX/pQsZaVIWDkPcpV6jJ+3KLbTbgnwv8w==",
864
+ "cpu": [
865
+ "arm"
866
+ ],
867
+ "dev": true,
868
+ "license": "MIT",
869
+ "optional": true,
870
+ "os": [
871
+ "android"
872
+ ]
873
+ },
874
+ "node_modules/@rollup/rollup-android-arm64": {
875
+ "version": "4.53.3",
876
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.3.tgz",
877
+ "integrity": "sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w==",
878
+ "cpu": [
879
+ "arm64"
880
+ ],
881
+ "dev": true,
882
+ "license": "MIT",
883
+ "optional": true,
884
+ "os": [
885
+ "android"
886
+ ]
887
+ },
888
+ "node_modules/@rollup/rollup-darwin-arm64": {
889
+ "version": "4.53.3",
890
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.3.tgz",
891
+ "integrity": "sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA==",
892
+ "cpu": [
893
+ "arm64"
894
+ ],
895
+ "dev": true,
896
+ "license": "MIT",
897
+ "optional": true,
898
+ "os": [
899
+ "darwin"
900
+ ]
901
+ },
902
+ "node_modules/@rollup/rollup-darwin-x64": {
903
+ "version": "4.53.3",
904
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.3.tgz",
905
+ "integrity": "sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ==",
906
+ "cpu": [
907
+ "x64"
908
+ ],
909
+ "dev": true,
910
+ "license": "MIT",
911
+ "optional": true,
912
+ "os": [
913
+ "darwin"
914
+ ]
915
+ },
916
+ "node_modules/@rollup/rollup-freebsd-arm64": {
917
+ "version": "4.53.3",
918
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.3.tgz",
919
+ "integrity": "sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w==",
920
+ "cpu": [
921
+ "arm64"
922
+ ],
923
+ "dev": true,
924
+ "license": "MIT",
925
+ "optional": true,
926
+ "os": [
927
+ "freebsd"
928
+ ]
929
+ },
930
+ "node_modules/@rollup/rollup-freebsd-x64": {
931
+ "version": "4.53.3",
932
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.3.tgz",
933
+ "integrity": "sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q==",
934
+ "cpu": [
935
+ "x64"
936
+ ],
937
+ "dev": true,
938
+ "license": "MIT",
939
+ "optional": true,
940
+ "os": [
941
+ "freebsd"
942
+ ]
943
+ },
944
+ "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
945
+ "version": "4.53.3",
946
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.3.tgz",
947
+ "integrity": "sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw==",
948
+ "cpu": [
949
+ "arm"
950
+ ],
951
+ "dev": true,
952
+ "license": "MIT",
953
+ "optional": true,
954
+ "os": [
955
+ "linux"
956
+ ]
957
+ },
958
+ "node_modules/@rollup/rollup-linux-arm-musleabihf": {
959
+ "version": "4.53.3",
960
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.3.tgz",
961
+ "integrity": "sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg==",
962
+ "cpu": [
963
+ "arm"
964
+ ],
965
+ "dev": true,
966
+ "license": "MIT",
967
+ "optional": true,
968
+ "os": [
969
+ "linux"
970
+ ]
971
+ },
972
+ "node_modules/@rollup/rollup-linux-arm64-gnu": {
973
+ "version": "4.53.3",
974
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.3.tgz",
975
+ "integrity": "sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w==",
976
+ "cpu": [
977
+ "arm64"
978
+ ],
979
+ "dev": true,
980
+ "license": "MIT",
981
+ "optional": true,
982
+ "os": [
983
+ "linux"
984
+ ]
985
+ },
986
+ "node_modules/@rollup/rollup-linux-arm64-musl": {
987
+ "version": "4.53.3",
988
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.3.tgz",
989
+ "integrity": "sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A==",
990
+ "cpu": [
991
+ "arm64"
992
+ ],
993
+ "dev": true,
994
+ "license": "MIT",
995
+ "optional": true,
996
+ "os": [
997
+ "linux"
998
+ ]
999
+ },
1000
+ "node_modules/@rollup/rollup-linux-loong64-gnu": {
1001
+ "version": "4.53.3",
1002
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.3.tgz",
1003
+ "integrity": "sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g==",
1004
+ "cpu": [
1005
+ "loong64"
1006
+ ],
1007
+ "dev": true,
1008
+ "license": "MIT",
1009
+ "optional": true,
1010
+ "os": [
1011
+ "linux"
1012
+ ]
1013
+ },
1014
+ "node_modules/@rollup/rollup-linux-ppc64-gnu": {
1015
+ "version": "4.53.3",
1016
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.3.tgz",
1017
+ "integrity": "sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw==",
1018
+ "cpu": [
1019
+ "ppc64"
1020
+ ],
1021
+ "dev": true,
1022
+ "license": "MIT",
1023
+ "optional": true,
1024
+ "os": [
1025
+ "linux"
1026
+ ]
1027
+ },
1028
+ "node_modules/@rollup/rollup-linux-riscv64-gnu": {
1029
+ "version": "4.53.3",
1030
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.3.tgz",
1031
+ "integrity": "sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g==",
1032
+ "cpu": [
1033
+ "riscv64"
1034
+ ],
1035
+ "dev": true,
1036
+ "license": "MIT",
1037
+ "optional": true,
1038
+ "os": [
1039
+ "linux"
1040
+ ]
1041
+ },
1042
+ "node_modules/@rollup/rollup-linux-riscv64-musl": {
1043
+ "version": "4.53.3",
1044
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.3.tgz",
1045
+ "integrity": "sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A==",
1046
+ "cpu": [
1047
+ "riscv64"
1048
+ ],
1049
+ "dev": true,
1050
+ "license": "MIT",
1051
+ "optional": true,
1052
+ "os": [
1053
+ "linux"
1054
+ ]
1055
+ },
1056
+ "node_modules/@rollup/rollup-linux-s390x-gnu": {
1057
+ "version": "4.53.3",
1058
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.3.tgz",
1059
+ "integrity": "sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg==",
1060
+ "cpu": [
1061
+ "s390x"
1062
+ ],
1063
+ "dev": true,
1064
+ "license": "MIT",
1065
+ "optional": true,
1066
+ "os": [
1067
+ "linux"
1068
+ ]
1069
+ },
1070
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
1071
+ "version": "4.53.3",
1072
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.3.tgz",
1073
+ "integrity": "sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w==",
1074
+ "cpu": [
1075
+ "x64"
1076
+ ],
1077
+ "dev": true,
1078
+ "license": "MIT",
1079
+ "optional": true,
1080
+ "os": [
1081
+ "linux"
1082
+ ]
1083
+ },
1084
+ "node_modules/@rollup/rollup-linux-x64-musl": {
1085
+ "version": "4.53.3",
1086
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.3.tgz",
1087
+ "integrity": "sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q==",
1088
+ "cpu": [
1089
+ "x64"
1090
+ ],
1091
+ "dev": true,
1092
+ "license": "MIT",
1093
+ "optional": true,
1094
+ "os": [
1095
+ "linux"
1096
+ ]
1097
+ },
1098
+ "node_modules/@rollup/rollup-openharmony-arm64": {
1099
+ "version": "4.53.3",
1100
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.3.tgz",
1101
+ "integrity": "sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw==",
1102
+ "cpu": [
1103
+ "arm64"
1104
+ ],
1105
+ "dev": true,
1106
+ "license": "MIT",
1107
+ "optional": true,
1108
+ "os": [
1109
+ "openharmony"
1110
+ ]
1111
+ },
1112
+ "node_modules/@rollup/rollup-win32-arm64-msvc": {
1113
+ "version": "4.53.3",
1114
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.3.tgz",
1115
+ "integrity": "sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw==",
1116
+ "cpu": [
1117
+ "arm64"
1118
+ ],
1119
+ "dev": true,
1120
+ "license": "MIT",
1121
+ "optional": true,
1122
+ "os": [
1123
+ "win32"
1124
+ ]
1125
+ },
1126
+ "node_modules/@rollup/rollup-win32-ia32-msvc": {
1127
+ "version": "4.53.3",
1128
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.3.tgz",
1129
+ "integrity": "sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA==",
1130
+ "cpu": [
1131
+ "ia32"
1132
+ ],
1133
+ "dev": true,
1134
+ "license": "MIT",
1135
+ "optional": true,
1136
+ "os": [
1137
+ "win32"
1138
+ ]
1139
+ },
1140
+ "node_modules/@rollup/rollup-win32-x64-gnu": {
1141
+ "version": "4.53.3",
1142
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.3.tgz",
1143
+ "integrity": "sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg==",
1144
+ "cpu": [
1145
+ "x64"
1146
+ ],
1147
+ "dev": true,
1148
+ "license": "MIT",
1149
+ "optional": true,
1150
+ "os": [
1151
+ "win32"
1152
+ ]
1153
+ },
1154
+ "node_modules/@rollup/rollup-win32-x64-msvc": {
1155
+ "version": "4.53.3",
1156
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.3.tgz",
1157
+ "integrity": "sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ==",
1158
+ "cpu": [
1159
+ "x64"
1160
+ ],
1161
+ "dev": true,
1162
+ "license": "MIT",
1163
+ "optional": true,
1164
+ "os": [
1165
+ "win32"
1166
+ ]
1167
+ },
1168
+ "node_modules/@types/babel__core": {
1169
+ "version": "7.20.5",
1170
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
1171
+ "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
1172
+ "dev": true,
1173
+ "license": "MIT",
1174
+ "dependencies": {
1175
+ "@babel/parser": "^7.20.7",
1176
+ "@babel/types": "^7.20.7",
1177
+ "@types/babel__generator": "*",
1178
+ "@types/babel__template": "*",
1179
+ "@types/babel__traverse": "*"
1180
+ }
1181
+ },
1182
+ "node_modules/@types/babel__generator": {
1183
+ "version": "7.27.0",
1184
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz",
1185
+ "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==",
1186
+ "dev": true,
1187
+ "license": "MIT",
1188
+ "dependencies": {
1189
+ "@babel/types": "^7.0.0"
1190
+ }
1191
+ },
1192
+ "node_modules/@types/babel__template": {
1193
+ "version": "7.4.4",
1194
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
1195
+ "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
1196
+ "dev": true,
1197
+ "license": "MIT",
1198
+ "dependencies": {
1199
+ "@babel/parser": "^7.1.0",
1200
+ "@babel/types": "^7.0.0"
1201
+ }
1202
+ },
1203
+ "node_modules/@types/babel__traverse": {
1204
+ "version": "7.28.0",
1205
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz",
1206
+ "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==",
1207
+ "dev": true,
1208
+ "license": "MIT",
1209
+ "dependencies": {
1210
+ "@babel/types": "^7.28.2"
1211
+ }
1212
+ },
1213
+ "node_modules/@types/estree": {
1214
+ "version": "1.0.8",
1215
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
1216
+ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
1217
+ "dev": true,
1218
+ "license": "MIT"
1219
+ },
1220
+ "node_modules/@types/node": {
1221
+ "version": "22.19.1",
1222
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.1.tgz",
1223
+ "integrity": "sha512-LCCV0HdSZZZb34qifBsyWlUmok6W7ouER+oQIGBScS8EsZsQbrtFTUrDX4hOl+CS6p7cnNC4td+qrSVGSCTUfQ==",
1224
+ "dev": true,
1225
+ "license": "MIT",
1226
+ "peer": true,
1227
+ "dependencies": {
1228
+ "undici-types": "~6.21.0"
1229
+ }
1230
+ },
1231
+ "node_modules/@types/react": {
1232
+ "version": "19.2.7",
1233
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz",
1234
+ "integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==",
1235
+ "dev": true,
1236
+ "license": "MIT",
1237
+ "peer": true,
1238
+ "dependencies": {
1239
+ "csstype": "^3.2.2"
1240
+ }
1241
+ },
1242
+ "node_modules/@types/react-dom": {
1243
+ "version": "19.2.3",
1244
+ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz",
1245
+ "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==",
1246
+ "dev": true,
1247
+ "license": "MIT",
1248
+ "peerDependencies": {
1249
+ "@types/react": "^19.2.0"
1250
+ }
1251
+ },
1252
+ "node_modules/@vitejs/plugin-react": {
1253
+ "version": "4.7.0",
1254
+ "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz",
1255
+ "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==",
1256
+ "dev": true,
1257
+ "license": "MIT",
1258
+ "dependencies": {
1259
+ "@babel/core": "^7.28.0",
1260
+ "@babel/plugin-transform-react-jsx-self": "^7.27.1",
1261
+ "@babel/plugin-transform-react-jsx-source": "^7.27.1",
1262
+ "@rolldown/pluginutils": "1.0.0-beta.27",
1263
+ "@types/babel__core": "^7.20.5",
1264
+ "react-refresh": "^0.17.0"
1265
+ },
1266
+ "engines": {
1267
+ "node": "^14.18.0 || >=16.0.0"
1268
+ },
1269
+ "peerDependencies": {
1270
+ "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0"
1271
+ }
1272
+ },
1273
+ "node_modules/any-promise": {
1274
+ "version": "1.3.0",
1275
+ "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz",
1276
+ "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==",
1277
+ "dev": true,
1278
+ "license": "MIT"
1279
+ },
1280
+ "node_modules/anymatch": {
1281
+ "version": "3.1.3",
1282
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
1283
+ "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
1284
+ "dev": true,
1285
+ "license": "ISC",
1286
+ "dependencies": {
1287
+ "normalize-path": "^3.0.0",
1288
+ "picomatch": "^2.0.4"
1289
+ },
1290
+ "engines": {
1291
+ "node": ">= 8"
1292
+ }
1293
+ },
1294
+ "node_modules/anymatch/node_modules/picomatch": {
1295
+ "version": "2.3.1",
1296
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
1297
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
1298
+ "dev": true,
1299
+ "license": "MIT",
1300
+ "engines": {
1301
+ "node": ">=8.6"
1302
+ },
1303
+ "funding": {
1304
+ "url": "https://github.com/sponsors/jonschlinkert"
1305
+ }
1306
+ },
1307
+ "node_modules/arg": {
1308
+ "version": "5.0.2",
1309
+ "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz",
1310
+ "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==",
1311
+ "dev": true,
1312
+ "license": "MIT"
1313
+ },
1314
+ "node_modules/autoprefixer": {
1315
+ "version": "10.4.22",
1316
+ "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.22.tgz",
1317
+ "integrity": "sha512-ARe0v/t9gO28Bznv6GgqARmVqcWOV3mfgUPn9becPHMiD3o9BwlRgaeccZnwTpZ7Zwqrm+c1sUSsMxIzQzc8Xg==",
1318
+ "dev": true,
1319
+ "funding": [
1320
+ {
1321
+ "type": "opencollective",
1322
+ "url": "https://opencollective.com/postcss/"
1323
+ },
1324
+ {
1325
+ "type": "tidelift",
1326
+ "url": "https://tidelift.com/funding/github/npm/autoprefixer"
1327
+ },
1328
+ {
1329
+ "type": "github",
1330
+ "url": "https://github.com/sponsors/ai"
1331
+ }
1332
+ ],
1333
+ "license": "MIT",
1334
+ "dependencies": {
1335
+ "browserslist": "^4.27.0",
1336
+ "caniuse-lite": "^1.0.30001754",
1337
+ "fraction.js": "^5.3.4",
1338
+ "normalize-range": "^0.1.2",
1339
+ "picocolors": "^1.1.1",
1340
+ "postcss-value-parser": "^4.2.0"
1341
+ },
1342
+ "bin": {
1343
+ "autoprefixer": "bin/autoprefixer"
1344
+ },
1345
+ "engines": {
1346
+ "node": "^10 || ^12 || >=14"
1347
+ },
1348
+ "peerDependencies": {
1349
+ "postcss": "^8.1.0"
1350
+ }
1351
+ },
1352
+ "node_modules/baseline-browser-mapping": {
1353
+ "version": "2.9.3",
1354
+ "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.3.tgz",
1355
+ "integrity": "sha512-8QdH6czo+G7uBsNo0GiUfouPN1lRzKdJTGnKXwe12gkFbnnOUaUKGN55dMkfy+mnxmvjwl9zcI4VncczcVXDhA==",
1356
+ "dev": true,
1357
+ "license": "Apache-2.0",
1358
+ "bin": {
1359
+ "baseline-browser-mapping": "dist/cli.js"
1360
+ }
1361
+ },
1362
+ "node_modules/binary-extensions": {
1363
+ "version": "2.3.0",
1364
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz",
1365
+ "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==",
1366
+ "dev": true,
1367
+ "license": "MIT",
1368
+ "engines": {
1369
+ "node": ">=8"
1370
+ },
1371
+ "funding": {
1372
+ "url": "https://github.com/sponsors/sindresorhus"
1373
+ }
1374
+ },
1375
+ "node_modules/braces": {
1376
+ "version": "3.0.3",
1377
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
1378
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
1379
+ "dev": true,
1380
+ "license": "MIT",
1381
+ "dependencies": {
1382
+ "fill-range": "^7.1.1"
1383
+ },
1384
+ "engines": {
1385
+ "node": ">=8"
1386
+ }
1387
+ },
1388
+ "node_modules/browserslist": {
1389
+ "version": "4.28.1",
1390
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz",
1391
+ "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==",
1392
+ "dev": true,
1393
+ "funding": [
1394
+ {
1395
+ "type": "opencollective",
1396
+ "url": "https://opencollective.com/browserslist"
1397
+ },
1398
+ {
1399
+ "type": "tidelift",
1400
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
1401
+ },
1402
+ {
1403
+ "type": "github",
1404
+ "url": "https://github.com/sponsors/ai"
1405
+ }
1406
+ ],
1407
+ "license": "MIT",
1408
+ "peer": true,
1409
+ "dependencies": {
1410
+ "baseline-browser-mapping": "^2.9.0",
1411
+ "caniuse-lite": "^1.0.30001759",
1412
+ "electron-to-chromium": "^1.5.263",
1413
+ "node-releases": "^2.0.27",
1414
+ "update-browserslist-db": "^1.2.0"
1415
+ },
1416
+ "bin": {
1417
+ "browserslist": "cli.js"
1418
+ },
1419
+ "engines": {
1420
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
1421
+ }
1422
+ },
1423
+ "node_modules/camelcase-css": {
1424
+ "version": "2.0.1",
1425
+ "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
1426
+ "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==",
1427
+ "dev": true,
1428
+ "license": "MIT",
1429
+ "engines": {
1430
+ "node": ">= 6"
1431
+ }
1432
+ },
1433
+ "node_modules/caniuse-lite": {
1434
+ "version": "1.0.30001759",
1435
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001759.tgz",
1436
+ "integrity": "sha512-Pzfx9fOKoKvevQf8oCXoyNRQ5QyxJj+3O0Rqx2V5oxT61KGx8+n6hV/IUyJeifUci2clnmmKVpvtiqRzgiWjSw==",
1437
+ "dev": true,
1438
+ "funding": [
1439
+ {
1440
+ "type": "opencollective",
1441
+ "url": "https://opencollective.com/browserslist"
1442
+ },
1443
+ {
1444
+ "type": "tidelift",
1445
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
1446
+ },
1447
+ {
1448
+ "type": "github",
1449
+ "url": "https://github.com/sponsors/ai"
1450
+ }
1451
+ ],
1452
+ "license": "CC-BY-4.0"
1453
+ },
1454
+ "node_modules/chokidar": {
1455
+ "version": "3.6.0",
1456
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz",
1457
+ "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==",
1458
+ "dev": true,
1459
+ "license": "MIT",
1460
+ "dependencies": {
1461
+ "anymatch": "~3.1.2",
1462
+ "braces": "~3.0.2",
1463
+ "glob-parent": "~5.1.2",
1464
+ "is-binary-path": "~2.1.0",
1465
+ "is-glob": "~4.0.1",
1466
+ "normalize-path": "~3.0.0",
1467
+ "readdirp": "~3.6.0"
1468
+ },
1469
+ "engines": {
1470
+ "node": ">= 8.10.0"
1471
+ },
1472
+ "funding": {
1473
+ "url": "https://paulmillr.com/funding/"
1474
+ },
1475
+ "optionalDependencies": {
1476
+ "fsevents": "~2.3.2"
1477
+ }
1478
+ },
1479
+ "node_modules/chokidar/node_modules/glob-parent": {
1480
+ "version": "5.1.2",
1481
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
1482
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
1483
+ "dev": true,
1484
+ "license": "ISC",
1485
+ "dependencies": {
1486
+ "is-glob": "^4.0.1"
1487
+ },
1488
+ "engines": {
1489
+ "node": ">= 6"
1490
+ }
1491
+ },
1492
+ "node_modules/commander": {
1493
+ "version": "4.1.1",
1494
+ "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
1495
+ "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
1496
+ "dev": true,
1497
+ "license": "MIT",
1498
+ "engines": {
1499
+ "node": ">= 6"
1500
+ }
1501
+ },
1502
+ "node_modules/convert-source-map": {
1503
+ "version": "2.0.0",
1504
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
1505
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
1506
+ "dev": true,
1507
+ "license": "MIT"
1508
+ },
1509
+ "node_modules/cssesc": {
1510
+ "version": "3.0.0",
1511
+ "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
1512
+ "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
1513
+ "dev": true,
1514
+ "license": "MIT",
1515
+ "bin": {
1516
+ "cssesc": "bin/cssesc"
1517
+ },
1518
+ "engines": {
1519
+ "node": ">=4"
1520
+ }
1521
+ },
1522
+ "node_modules/csstype": {
1523
+ "version": "3.2.3",
1524
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
1525
+ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
1526
+ "dev": true,
1527
+ "license": "MIT"
1528
+ },
1529
+ "node_modules/debug": {
1530
+ "version": "4.4.3",
1531
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
1532
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
1533
+ "dev": true,
1534
+ "license": "MIT",
1535
+ "dependencies": {
1536
+ "ms": "^2.1.3"
1537
+ },
1538
+ "engines": {
1539
+ "node": ">=6.0"
1540
+ },
1541
+ "peerDependenciesMeta": {
1542
+ "supports-color": {
1543
+ "optional": true
1544
+ }
1545
+ }
1546
+ },
1547
+ "node_modules/didyoumean": {
1548
+ "version": "1.2.2",
1549
+ "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz",
1550
+ "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==",
1551
+ "dev": true,
1552
+ "license": "Apache-2.0"
1553
+ },
1554
+ "node_modules/dlv": {
1555
+ "version": "1.1.3",
1556
+ "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz",
1557
+ "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==",
1558
+ "dev": true,
1559
+ "license": "MIT"
1560
+ },
1561
+ "node_modules/electron-to-chromium": {
1562
+ "version": "1.5.266",
1563
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.266.tgz",
1564
+ "integrity": "sha512-kgWEglXvkEfMH7rxP5OSZZwnaDWT7J9EoZCujhnpLbfi0bbNtRkgdX2E3gt0Uer11c61qCYktB3hwkAS325sJg==",
1565
+ "dev": true,
1566
+ "license": "ISC"
1567
+ },
1568
+ "node_modules/esbuild": {
1569
+ "version": "0.25.12",
1570
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz",
1571
+ "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==",
1572
+ "dev": true,
1573
+ "hasInstallScript": true,
1574
+ "license": "MIT",
1575
+ "bin": {
1576
+ "esbuild": "bin/esbuild"
1577
+ },
1578
+ "engines": {
1579
+ "node": ">=18"
1580
+ },
1581
+ "optionalDependencies": {
1582
+ "@esbuild/aix-ppc64": "0.25.12",
1583
+ "@esbuild/android-arm": "0.25.12",
1584
+ "@esbuild/android-arm64": "0.25.12",
1585
+ "@esbuild/android-x64": "0.25.12",
1586
+ "@esbuild/darwin-arm64": "0.25.12",
1587
+ "@esbuild/darwin-x64": "0.25.12",
1588
+ "@esbuild/freebsd-arm64": "0.25.12",
1589
+ "@esbuild/freebsd-x64": "0.25.12",
1590
+ "@esbuild/linux-arm": "0.25.12",
1591
+ "@esbuild/linux-arm64": "0.25.12",
1592
+ "@esbuild/linux-ia32": "0.25.12",
1593
+ "@esbuild/linux-loong64": "0.25.12",
1594
+ "@esbuild/linux-mips64el": "0.25.12",
1595
+ "@esbuild/linux-ppc64": "0.25.12",
1596
+ "@esbuild/linux-riscv64": "0.25.12",
1597
+ "@esbuild/linux-s390x": "0.25.12",
1598
+ "@esbuild/linux-x64": "0.25.12",
1599
+ "@esbuild/netbsd-arm64": "0.25.12",
1600
+ "@esbuild/netbsd-x64": "0.25.12",
1601
+ "@esbuild/openbsd-arm64": "0.25.12",
1602
+ "@esbuild/openbsd-x64": "0.25.12",
1603
+ "@esbuild/openharmony-arm64": "0.25.12",
1604
+ "@esbuild/sunos-x64": "0.25.12",
1605
+ "@esbuild/win32-arm64": "0.25.12",
1606
+ "@esbuild/win32-ia32": "0.25.12",
1607
+ "@esbuild/win32-x64": "0.25.12"
1608
+ }
1609
+ },
1610
+ "node_modules/escalade": {
1611
+ "version": "3.2.0",
1612
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
1613
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
1614
+ "dev": true,
1615
+ "license": "MIT",
1616
+ "engines": {
1617
+ "node": ">=6"
1618
+ }
1619
+ },
1620
+ "node_modules/fast-glob": {
1621
+ "version": "3.3.3",
1622
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz",
1623
+ "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==",
1624
+ "dev": true,
1625
+ "license": "MIT",
1626
+ "dependencies": {
1627
+ "@nodelib/fs.stat": "^2.0.2",
1628
+ "@nodelib/fs.walk": "^1.2.3",
1629
+ "glob-parent": "^5.1.2",
1630
+ "merge2": "^1.3.0",
1631
+ "micromatch": "^4.0.8"
1632
+ },
1633
+ "engines": {
1634
+ "node": ">=8.6.0"
1635
+ }
1636
+ },
1637
+ "node_modules/fast-glob/node_modules/glob-parent": {
1638
+ "version": "5.1.2",
1639
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
1640
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
1641
+ "dev": true,
1642
+ "license": "ISC",
1643
+ "dependencies": {
1644
+ "is-glob": "^4.0.1"
1645
+ },
1646
+ "engines": {
1647
+ "node": ">= 6"
1648
+ }
1649
+ },
1650
+ "node_modules/fastq": {
1651
+ "version": "1.19.1",
1652
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz",
1653
+ "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==",
1654
+ "dev": true,
1655
+ "license": "ISC",
1656
+ "dependencies": {
1657
+ "reusify": "^1.0.4"
1658
+ }
1659
+ },
1660
+ "node_modules/fdir": {
1661
+ "version": "6.5.0",
1662
+ "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
1663
+ "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
1664
+ "dev": true,
1665
+ "license": "MIT",
1666
+ "engines": {
1667
+ "node": ">=12.0.0"
1668
+ },
1669
+ "peerDependencies": {
1670
+ "picomatch": "^3 || ^4"
1671
+ },
1672
+ "peerDependenciesMeta": {
1673
+ "picomatch": {
1674
+ "optional": true
1675
+ }
1676
+ }
1677
+ },
1678
+ "node_modules/fill-range": {
1679
+ "version": "7.1.1",
1680
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
1681
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
1682
+ "dev": true,
1683
+ "license": "MIT",
1684
+ "dependencies": {
1685
+ "to-regex-range": "^5.0.1"
1686
+ },
1687
+ "engines": {
1688
+ "node": ">=8"
1689
+ }
1690
+ },
1691
+ "node_modules/fraction.js": {
1692
+ "version": "5.3.4",
1693
+ "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz",
1694
+ "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==",
1695
+ "dev": true,
1696
+ "license": "MIT",
1697
+ "engines": {
1698
+ "node": "*"
1699
+ },
1700
+ "funding": {
1701
+ "type": "github",
1702
+ "url": "https://github.com/sponsors/rawify"
1703
+ }
1704
+ },
1705
+ "node_modules/framer-motion": {
1706
+ "version": "11.18.2",
1707
+ "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-11.18.2.tgz",
1708
+ "integrity": "sha512-5F5Och7wrvtLVElIpclDT0CBzMVg3dL22B64aZwHtsIY8RB4mXICLrkajK4G9R+ieSAGcgrLeae2SeUTg2pr6w==",
1709
+ "license": "MIT",
1710
+ "dependencies": {
1711
+ "motion-dom": "^11.18.1",
1712
+ "motion-utils": "^11.18.1",
1713
+ "tslib": "^2.4.0"
1714
+ },
1715
+ "peerDependencies": {
1716
+ "@emotion/is-prop-valid": "*",
1717
+ "react": "^18.0.0 || ^19.0.0",
1718
+ "react-dom": "^18.0.0 || ^19.0.0"
1719
+ },
1720
+ "peerDependenciesMeta": {
1721
+ "@emotion/is-prop-valid": {
1722
+ "optional": true
1723
+ },
1724
+ "react": {
1725
+ "optional": true
1726
+ },
1727
+ "react-dom": {
1728
+ "optional": true
1729
+ }
1730
+ }
1731
+ },
1732
+ "node_modules/fsevents": {
1733
+ "version": "2.3.3",
1734
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
1735
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
1736
+ "dev": true,
1737
+ "hasInstallScript": true,
1738
+ "license": "MIT",
1739
+ "optional": true,
1740
+ "os": [
1741
+ "darwin"
1742
+ ],
1743
+ "engines": {
1744
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
1745
+ }
1746
+ },
1747
+ "node_modules/function-bind": {
1748
+ "version": "1.1.2",
1749
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
1750
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
1751
+ "dev": true,
1752
+ "license": "MIT",
1753
+ "funding": {
1754
+ "url": "https://github.com/sponsors/ljharb"
1755
+ }
1756
+ },
1757
+ "node_modules/gensync": {
1758
+ "version": "1.0.0-beta.2",
1759
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
1760
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
1761
+ "dev": true,
1762
+ "license": "MIT",
1763
+ "engines": {
1764
+ "node": ">=6.9.0"
1765
+ }
1766
+ },
1767
+ "node_modules/glob-parent": {
1768
+ "version": "6.0.2",
1769
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
1770
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
1771
+ "dev": true,
1772
+ "license": "ISC",
1773
+ "dependencies": {
1774
+ "is-glob": "^4.0.3"
1775
+ },
1776
+ "engines": {
1777
+ "node": ">=10.13.0"
1778
+ }
1779
+ },
1780
+ "node_modules/hasown": {
1781
+ "version": "2.0.2",
1782
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
1783
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
1784
+ "dev": true,
1785
+ "license": "MIT",
1786
+ "dependencies": {
1787
+ "function-bind": "^1.1.2"
1788
+ },
1789
+ "engines": {
1790
+ "node": ">= 0.4"
1791
+ }
1792
+ },
1793
+ "node_modules/is-binary-path": {
1794
+ "version": "2.1.0",
1795
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
1796
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
1797
+ "dev": true,
1798
+ "license": "MIT",
1799
+ "dependencies": {
1800
+ "binary-extensions": "^2.0.0"
1801
+ },
1802
+ "engines": {
1803
+ "node": ">=8"
1804
+ }
1805
+ },
1806
+ "node_modules/is-core-module": {
1807
+ "version": "2.16.1",
1808
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz",
1809
+ "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==",
1810
+ "dev": true,
1811
+ "license": "MIT",
1812
+ "dependencies": {
1813
+ "hasown": "^2.0.2"
1814
+ },
1815
+ "engines": {
1816
+ "node": ">= 0.4"
1817
+ },
1818
+ "funding": {
1819
+ "url": "https://github.com/sponsors/ljharb"
1820
+ }
1821
+ },
1822
+ "node_modules/is-extglob": {
1823
+ "version": "2.1.1",
1824
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
1825
+ "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
1826
+ "dev": true,
1827
+ "license": "MIT",
1828
+ "engines": {
1829
+ "node": ">=0.10.0"
1830
+ }
1831
+ },
1832
+ "node_modules/is-glob": {
1833
+ "version": "4.0.3",
1834
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
1835
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
1836
+ "dev": true,
1837
+ "license": "MIT",
1838
+ "dependencies": {
1839
+ "is-extglob": "^2.1.1"
1840
+ },
1841
+ "engines": {
1842
+ "node": ">=0.10.0"
1843
+ }
1844
+ },
1845
+ "node_modules/is-number": {
1846
+ "version": "7.0.0",
1847
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
1848
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
1849
+ "dev": true,
1850
+ "license": "MIT",
1851
+ "engines": {
1852
+ "node": ">=0.12.0"
1853
+ }
1854
+ },
1855
+ "node_modules/jiti": {
1856
+ "version": "1.21.7",
1857
+ "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz",
1858
+ "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==",
1859
+ "dev": true,
1860
+ "license": "MIT",
1861
+ "peer": true,
1862
+ "bin": {
1863
+ "jiti": "bin/jiti.js"
1864
+ }
1865
+ },
1866
+ "node_modules/js-tokens": {
1867
+ "version": "4.0.0",
1868
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
1869
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
1870
+ "dev": true,
1871
+ "license": "MIT"
1872
+ },
1873
+ "node_modules/jsesc": {
1874
+ "version": "3.1.0",
1875
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz",
1876
+ "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
1877
+ "dev": true,
1878
+ "license": "MIT",
1879
+ "bin": {
1880
+ "jsesc": "bin/jsesc"
1881
+ },
1882
+ "engines": {
1883
+ "node": ">=6"
1884
+ }
1885
+ },
1886
+ "node_modules/json5": {
1887
+ "version": "2.2.3",
1888
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
1889
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
1890
+ "dev": true,
1891
+ "license": "MIT",
1892
+ "bin": {
1893
+ "json5": "lib/cli.js"
1894
+ },
1895
+ "engines": {
1896
+ "node": ">=6"
1897
+ }
1898
+ },
1899
+ "node_modules/lilconfig": {
1900
+ "version": "3.1.3",
1901
+ "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz",
1902
+ "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==",
1903
+ "dev": true,
1904
+ "license": "MIT",
1905
+ "engines": {
1906
+ "node": ">=14"
1907
+ },
1908
+ "funding": {
1909
+ "url": "https://github.com/sponsors/antonk52"
1910
+ }
1911
+ },
1912
+ "node_modules/lines-and-columns": {
1913
+ "version": "1.2.4",
1914
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
1915
+ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
1916
+ "dev": true,
1917
+ "license": "MIT"
1918
+ },
1919
+ "node_modules/lru-cache": {
1920
+ "version": "5.1.1",
1921
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
1922
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
1923
+ "dev": true,
1924
+ "license": "ISC",
1925
+ "dependencies": {
1926
+ "yallist": "^3.0.2"
1927
+ }
1928
+ },
1929
+ "node_modules/merge2": {
1930
+ "version": "1.4.1",
1931
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
1932
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
1933
+ "dev": true,
1934
+ "license": "MIT",
1935
+ "engines": {
1936
+ "node": ">= 8"
1937
+ }
1938
+ },
1939
+ "node_modules/micromatch": {
1940
+ "version": "4.0.8",
1941
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
1942
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
1943
+ "dev": true,
1944
+ "license": "MIT",
1945
+ "dependencies": {
1946
+ "braces": "^3.0.3",
1947
+ "picomatch": "^2.3.1"
1948
+ },
1949
+ "engines": {
1950
+ "node": ">=8.6"
1951
+ }
1952
+ },
1953
+ "node_modules/micromatch/node_modules/picomatch": {
1954
+ "version": "2.3.1",
1955
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
1956
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
1957
+ "dev": true,
1958
+ "license": "MIT",
1959
+ "engines": {
1960
+ "node": ">=8.6"
1961
+ },
1962
+ "funding": {
1963
+ "url": "https://github.com/sponsors/jonschlinkert"
1964
+ }
1965
+ },
1966
+ "node_modules/motion-dom": {
1967
+ "version": "11.18.1",
1968
+ "resolved": "https://registry.npmjs.org/motion-dom/-/motion-dom-11.18.1.tgz",
1969
+ "integrity": "sha512-g76KvA001z+atjfxczdRtw/RXOM3OMSdd1f4DL77qCTF/+avrRJiawSG4yDibEQ215sr9kpinSlX2pCTJ9zbhw==",
1970
+ "license": "MIT",
1971
+ "dependencies": {
1972
+ "motion-utils": "^11.18.1"
1973
+ }
1974
+ },
1975
+ "node_modules/motion-utils": {
1976
+ "version": "11.18.1",
1977
+ "resolved": "https://registry.npmjs.org/motion-utils/-/motion-utils-11.18.1.tgz",
1978
+ "integrity": "sha512-49Kt+HKjtbJKLtgO/LKj9Ld+6vw9BjH5d9sc40R/kVyH8GLAXgT42M2NnuPcJNuA3s9ZfZBUcwIgpmZWGEE+hA==",
1979
+ "license": "MIT"
1980
+ },
1981
+ "node_modules/ms": {
1982
+ "version": "2.1.3",
1983
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
1984
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
1985
+ "dev": true,
1986
+ "license": "MIT"
1987
+ },
1988
+ "node_modules/mz": {
1989
+ "version": "2.7.0",
1990
+ "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
1991
+ "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==",
1992
+ "dev": true,
1993
+ "license": "MIT",
1994
+ "dependencies": {
1995
+ "any-promise": "^1.0.0",
1996
+ "object-assign": "^4.0.1",
1997
+ "thenify-all": "^1.0.0"
1998
+ }
1999
+ },
2000
+ "node_modules/nanoid": {
2001
+ "version": "3.3.11",
2002
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
2003
+ "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
2004
+ "dev": true,
2005
+ "funding": [
2006
+ {
2007
+ "type": "github",
2008
+ "url": "https://github.com/sponsors/ai"
2009
+ }
2010
+ ],
2011
+ "license": "MIT",
2012
+ "bin": {
2013
+ "nanoid": "bin/nanoid.cjs"
2014
+ },
2015
+ "engines": {
2016
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
2017
+ }
2018
+ },
2019
+ "node_modules/node-releases": {
2020
+ "version": "2.0.27",
2021
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
2022
+ "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
2023
+ "dev": true,
2024
+ "license": "MIT"
2025
+ },
2026
+ "node_modules/normalize-path": {
2027
+ "version": "3.0.0",
2028
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
2029
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
2030
+ "dev": true,
2031
+ "license": "MIT",
2032
+ "engines": {
2033
+ "node": ">=0.10.0"
2034
+ }
2035
+ },
2036
+ "node_modules/normalize-range": {
2037
+ "version": "0.1.2",
2038
+ "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
2039
+ "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==",
2040
+ "dev": true,
2041
+ "license": "MIT",
2042
+ "engines": {
2043
+ "node": ">=0.10.0"
2044
+ }
2045
+ },
2046
+ "node_modules/object-assign": {
2047
+ "version": "4.1.1",
2048
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
2049
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
2050
+ "dev": true,
2051
+ "license": "MIT",
2052
+ "engines": {
2053
+ "node": ">=0.10.0"
2054
+ }
2055
+ },
2056
+ "node_modules/object-hash": {
2057
+ "version": "3.0.0",
2058
+ "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz",
2059
+ "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==",
2060
+ "dev": true,
2061
+ "license": "MIT",
2062
+ "engines": {
2063
+ "node": ">= 6"
2064
+ }
2065
+ },
2066
+ "node_modules/path-parse": {
2067
+ "version": "1.0.7",
2068
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
2069
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
2070
+ "dev": true,
2071
+ "license": "MIT"
2072
+ },
2073
+ "node_modules/picocolors": {
2074
+ "version": "1.1.1",
2075
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
2076
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
2077
+ "dev": true,
2078
+ "license": "ISC"
2079
+ },
2080
+ "node_modules/picomatch": {
2081
+ "version": "4.0.3",
2082
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
2083
+ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
2084
+ "dev": true,
2085
+ "license": "MIT",
2086
+ "peer": true,
2087
+ "engines": {
2088
+ "node": ">=12"
2089
+ },
2090
+ "funding": {
2091
+ "url": "https://github.com/sponsors/jonschlinkert"
2092
+ }
2093
+ },
2094
+ "node_modules/pify": {
2095
+ "version": "2.3.0",
2096
+ "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz",
2097
+ "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==",
2098
+ "dev": true,
2099
+ "license": "MIT",
2100
+ "engines": {
2101
+ "node": ">=0.10.0"
2102
+ }
2103
+ },
2104
+ "node_modules/pirates": {
2105
+ "version": "4.0.7",
2106
+ "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz",
2107
+ "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==",
2108
+ "dev": true,
2109
+ "license": "MIT",
2110
+ "engines": {
2111
+ "node": ">= 6"
2112
+ }
2113
+ },
2114
+ "node_modules/postcss": {
2115
+ "version": "8.5.6",
2116
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz",
2117
+ "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==",
2118
+ "dev": true,
2119
+ "funding": [
2120
+ {
2121
+ "type": "opencollective",
2122
+ "url": "https://opencollective.com/postcss/"
2123
+ },
2124
+ {
2125
+ "type": "tidelift",
2126
+ "url": "https://tidelift.com/funding/github/npm/postcss"
2127
+ },
2128
+ {
2129
+ "type": "github",
2130
+ "url": "https://github.com/sponsors/ai"
2131
+ }
2132
+ ],
2133
+ "license": "MIT",
2134
+ "peer": true,
2135
+ "dependencies": {
2136
+ "nanoid": "^3.3.11",
2137
+ "picocolors": "^1.1.1",
2138
+ "source-map-js": "^1.2.1"
2139
+ },
2140
+ "engines": {
2141
+ "node": "^10 || ^12 || >=14"
2142
+ }
2143
+ },
2144
+ "node_modules/postcss-import": {
2145
+ "version": "15.1.0",
2146
+ "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz",
2147
+ "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==",
2148
+ "dev": true,
2149
+ "license": "MIT",
2150
+ "dependencies": {
2151
+ "postcss-value-parser": "^4.0.0",
2152
+ "read-cache": "^1.0.0",
2153
+ "resolve": "^1.1.7"
2154
+ },
2155
+ "engines": {
2156
+ "node": ">=14.0.0"
2157
+ },
2158
+ "peerDependencies": {
2159
+ "postcss": "^8.0.0"
2160
+ }
2161
+ },
2162
+ "node_modules/postcss-js": {
2163
+ "version": "4.1.0",
2164
+ "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz",
2165
+ "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==",
2166
+ "dev": true,
2167
+ "funding": [
2168
+ {
2169
+ "type": "opencollective",
2170
+ "url": "https://opencollective.com/postcss/"
2171
+ },
2172
+ {
2173
+ "type": "github",
2174
+ "url": "https://github.com/sponsors/ai"
2175
+ }
2176
+ ],
2177
+ "license": "MIT",
2178
+ "dependencies": {
2179
+ "camelcase-css": "^2.0.1"
2180
+ },
2181
+ "engines": {
2182
+ "node": "^12 || ^14 || >= 16"
2183
+ },
2184
+ "peerDependencies": {
2185
+ "postcss": "^8.4.21"
2186
+ }
2187
+ },
2188
+ "node_modules/postcss-load-config": {
2189
+ "version": "6.0.1",
2190
+ "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz",
2191
+ "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==",
2192
+ "dev": true,
2193
+ "funding": [
2194
+ {
2195
+ "type": "opencollective",
2196
+ "url": "https://opencollective.com/postcss/"
2197
+ },
2198
+ {
2199
+ "type": "github",
2200
+ "url": "https://github.com/sponsors/ai"
2201
+ }
2202
+ ],
2203
+ "license": "MIT",
2204
+ "dependencies": {
2205
+ "lilconfig": "^3.1.1"
2206
+ },
2207
+ "engines": {
2208
+ "node": ">= 18"
2209
+ },
2210
+ "peerDependencies": {
2211
+ "jiti": ">=1.21.0",
2212
+ "postcss": ">=8.0.9",
2213
+ "tsx": "^4.8.1",
2214
+ "yaml": "^2.4.2"
2215
+ },
2216
+ "peerDependenciesMeta": {
2217
+ "jiti": {
2218
+ "optional": true
2219
+ },
2220
+ "postcss": {
2221
+ "optional": true
2222
+ },
2223
+ "tsx": {
2224
+ "optional": true
2225
+ },
2226
+ "yaml": {
2227
+ "optional": true
2228
+ }
2229
+ }
2230
+ },
2231
+ "node_modules/postcss-nested": {
2232
+ "version": "6.2.0",
2233
+ "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz",
2234
+ "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==",
2235
+ "dev": true,
2236
+ "funding": [
2237
+ {
2238
+ "type": "opencollective",
2239
+ "url": "https://opencollective.com/postcss/"
2240
+ },
2241
+ {
2242
+ "type": "github",
2243
+ "url": "https://github.com/sponsors/ai"
2244
+ }
2245
+ ],
2246
+ "license": "MIT",
2247
+ "dependencies": {
2248
+ "postcss-selector-parser": "^6.1.1"
2249
+ },
2250
+ "engines": {
2251
+ "node": ">=12.0"
2252
+ },
2253
+ "peerDependencies": {
2254
+ "postcss": "^8.2.14"
2255
+ }
2256
+ },
2257
+ "node_modules/postcss-selector-parser": {
2258
+ "version": "6.1.2",
2259
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz",
2260
+ "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==",
2261
+ "dev": true,
2262
+ "license": "MIT",
2263
+ "dependencies": {
2264
+ "cssesc": "^3.0.0",
2265
+ "util-deprecate": "^1.0.2"
2266
+ },
2267
+ "engines": {
2268
+ "node": ">=4"
2269
+ }
2270
+ },
2271
+ "node_modules/postcss-value-parser": {
2272
+ "version": "4.2.0",
2273
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
2274
+ "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==",
2275
+ "dev": true,
2276
+ "license": "MIT"
2277
+ },
2278
+ "node_modules/queue-microtask": {
2279
+ "version": "1.2.3",
2280
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
2281
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
2282
+ "dev": true,
2283
+ "funding": [
2284
+ {
2285
+ "type": "github",
2286
+ "url": "https://github.com/sponsors/feross"
2287
+ },
2288
+ {
2289
+ "type": "patreon",
2290
+ "url": "https://www.patreon.com/feross"
2291
+ },
2292
+ {
2293
+ "type": "consulting",
2294
+ "url": "https://feross.org/support"
2295
+ }
2296
+ ],
2297
+ "license": "MIT"
2298
+ },
2299
+ "node_modules/react": {
2300
+ "version": "19.2.1",
2301
+ "resolved": "https://registry.npmjs.org/react/-/react-19.2.1.tgz",
2302
+ "integrity": "sha512-DGrYcCWK7tvYMnWh79yrPHt+vdx9tY+1gPZa7nJQtO/p8bLTDaHp4dzwEhQB7pZ4Xe3ok4XKuEPrVuc+wlpkmw==",
2303
+ "license": "MIT",
2304
+ "peer": true,
2305
+ "engines": {
2306
+ "node": ">=0.10.0"
2307
+ }
2308
+ },
2309
+ "node_modules/react-dom": {
2310
+ "version": "19.2.1",
2311
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.1.tgz",
2312
+ "integrity": "sha512-ibrK8llX2a4eOskq1mXKu/TGZj9qzomO+sNfO98M6d9zIPOEhlBkMkBUBLd1vgS0gQsLDBzA+8jJBVXDnfHmJg==",
2313
+ "license": "MIT",
2314
+ "peer": true,
2315
+ "dependencies": {
2316
+ "scheduler": "^0.27.0"
2317
+ },
2318
+ "peerDependencies": {
2319
+ "react": "^19.2.1"
2320
+ }
2321
+ },
2322
+ "node_modules/react-refresh": {
2323
+ "version": "0.17.0",
2324
+ "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz",
2325
+ "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==",
2326
+ "dev": true,
2327
+ "license": "MIT",
2328
+ "engines": {
2329
+ "node": ">=0.10.0"
2330
+ }
2331
+ },
2332
+ "node_modules/read-cache": {
2333
+ "version": "1.0.0",
2334
+ "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz",
2335
+ "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==",
2336
+ "dev": true,
2337
+ "license": "MIT",
2338
+ "dependencies": {
2339
+ "pify": "^2.3.0"
2340
+ }
2341
+ },
2342
+ "node_modules/readdirp": {
2343
+ "version": "3.6.0",
2344
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
2345
+ "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
2346
+ "dev": true,
2347
+ "license": "MIT",
2348
+ "dependencies": {
2349
+ "picomatch": "^2.2.1"
2350
+ },
2351
+ "engines": {
2352
+ "node": ">=8.10.0"
2353
+ }
2354
+ },
2355
+ "node_modules/readdirp/node_modules/picomatch": {
2356
+ "version": "2.3.1",
2357
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
2358
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
2359
+ "dev": true,
2360
+ "license": "MIT",
2361
+ "engines": {
2362
+ "node": ">=8.6"
2363
+ },
2364
+ "funding": {
2365
+ "url": "https://github.com/sponsors/jonschlinkert"
2366
+ }
2367
+ },
2368
+ "node_modules/resolve": {
2369
+ "version": "1.22.11",
2370
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz",
2371
+ "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==",
2372
+ "dev": true,
2373
+ "license": "MIT",
2374
+ "dependencies": {
2375
+ "is-core-module": "^2.16.1",
2376
+ "path-parse": "^1.0.7",
2377
+ "supports-preserve-symlinks-flag": "^1.0.0"
2378
+ },
2379
+ "bin": {
2380
+ "resolve": "bin/resolve"
2381
+ },
2382
+ "engines": {
2383
+ "node": ">= 0.4"
2384
+ },
2385
+ "funding": {
2386
+ "url": "https://github.com/sponsors/ljharb"
2387
+ }
2388
+ },
2389
+ "node_modules/reusify": {
2390
+ "version": "1.1.0",
2391
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz",
2392
+ "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==",
2393
+ "dev": true,
2394
+ "license": "MIT",
2395
+ "engines": {
2396
+ "iojs": ">=1.0.0",
2397
+ "node": ">=0.10.0"
2398
+ }
2399
+ },
2400
+ "node_modules/rollup": {
2401
+ "version": "4.53.3",
2402
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.3.tgz",
2403
+ "integrity": "sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA==",
2404
+ "dev": true,
2405
+ "license": "MIT",
2406
+ "dependencies": {
2407
+ "@types/estree": "1.0.8"
2408
+ },
2409
+ "bin": {
2410
+ "rollup": "dist/bin/rollup"
2411
+ },
2412
+ "engines": {
2413
+ "node": ">=18.0.0",
2414
+ "npm": ">=8.0.0"
2415
+ },
2416
+ "optionalDependencies": {
2417
+ "@rollup/rollup-android-arm-eabi": "4.53.3",
2418
+ "@rollup/rollup-android-arm64": "4.53.3",
2419
+ "@rollup/rollup-darwin-arm64": "4.53.3",
2420
+ "@rollup/rollup-darwin-x64": "4.53.3",
2421
+ "@rollup/rollup-freebsd-arm64": "4.53.3",
2422
+ "@rollup/rollup-freebsd-x64": "4.53.3",
2423
+ "@rollup/rollup-linux-arm-gnueabihf": "4.53.3",
2424
+ "@rollup/rollup-linux-arm-musleabihf": "4.53.3",
2425
+ "@rollup/rollup-linux-arm64-gnu": "4.53.3",
2426
+ "@rollup/rollup-linux-arm64-musl": "4.53.3",
2427
+ "@rollup/rollup-linux-loong64-gnu": "4.53.3",
2428
+ "@rollup/rollup-linux-ppc64-gnu": "4.53.3",
2429
+ "@rollup/rollup-linux-riscv64-gnu": "4.53.3",
2430
+ "@rollup/rollup-linux-riscv64-musl": "4.53.3",
2431
+ "@rollup/rollup-linux-s390x-gnu": "4.53.3",
2432
+ "@rollup/rollup-linux-x64-gnu": "4.53.3",
2433
+ "@rollup/rollup-linux-x64-musl": "4.53.3",
2434
+ "@rollup/rollup-openharmony-arm64": "4.53.3",
2435
+ "@rollup/rollup-win32-arm64-msvc": "4.53.3",
2436
+ "@rollup/rollup-win32-ia32-msvc": "4.53.3",
2437
+ "@rollup/rollup-win32-x64-gnu": "4.53.3",
2438
+ "@rollup/rollup-win32-x64-msvc": "4.53.3",
2439
+ "fsevents": "~2.3.2"
2440
+ }
2441
+ },
2442
+ "node_modules/run-parallel": {
2443
+ "version": "1.2.0",
2444
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
2445
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
2446
+ "dev": true,
2447
+ "funding": [
2448
+ {
2449
+ "type": "github",
2450
+ "url": "https://github.com/sponsors/feross"
2451
+ },
2452
+ {
2453
+ "type": "patreon",
2454
+ "url": "https://www.patreon.com/feross"
2455
+ },
2456
+ {
2457
+ "type": "consulting",
2458
+ "url": "https://feross.org/support"
2459
+ }
2460
+ ],
2461
+ "license": "MIT",
2462
+ "dependencies": {
2463
+ "queue-microtask": "^1.2.2"
2464
+ }
2465
+ },
2466
+ "node_modules/scheduler": {
2467
+ "version": "0.27.0",
2468
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz",
2469
+ "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==",
2470
+ "license": "MIT"
2471
+ },
2472
+ "node_modules/semver": {
2473
+ "version": "6.3.1",
2474
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
2475
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
2476
+ "dev": true,
2477
+ "license": "ISC",
2478
+ "bin": {
2479
+ "semver": "bin/semver.js"
2480
+ }
2481
+ },
2482
+ "node_modules/source-map-js": {
2483
+ "version": "1.2.1",
2484
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
2485
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
2486
+ "dev": true,
2487
+ "license": "BSD-3-Clause",
2488
+ "engines": {
2489
+ "node": ">=0.10.0"
2490
+ }
2491
+ },
2492
+ "node_modules/sucrase": {
2493
+ "version": "3.35.1",
2494
+ "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz",
2495
+ "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==",
2496
+ "dev": true,
2497
+ "license": "MIT",
2498
+ "dependencies": {
2499
+ "@jridgewell/gen-mapping": "^0.3.2",
2500
+ "commander": "^4.0.0",
2501
+ "lines-and-columns": "^1.1.6",
2502
+ "mz": "^2.7.0",
2503
+ "pirates": "^4.0.1",
2504
+ "tinyglobby": "^0.2.11",
2505
+ "ts-interface-checker": "^0.1.9"
2506
+ },
2507
+ "bin": {
2508
+ "sucrase": "bin/sucrase",
2509
+ "sucrase-node": "bin/sucrase-node"
2510
+ },
2511
+ "engines": {
2512
+ "node": ">=16 || 14 >=14.17"
2513
+ }
2514
+ },
2515
+ "node_modules/supports-preserve-symlinks-flag": {
2516
+ "version": "1.0.0",
2517
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
2518
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
2519
+ "dev": true,
2520
+ "license": "MIT",
2521
+ "engines": {
2522
+ "node": ">= 0.4"
2523
+ },
2524
+ "funding": {
2525
+ "url": "https://github.com/sponsors/ljharb"
2526
+ }
2527
+ },
2528
+ "node_modules/tailwindcss": {
2529
+ "version": "3.4.18",
2530
+ "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.18.tgz",
2531
+ "integrity": "sha512-6A2rnmW5xZMdw11LYjhcI5846rt9pbLSabY5XPxo+XWdxwZaFEn47Go4NzFiHu9sNNmr/kXivP1vStfvMaK1GQ==",
2532
+ "dev": true,
2533
+ "license": "MIT",
2534
+ "dependencies": {
2535
+ "@alloc/quick-lru": "^5.2.0",
2536
+ "arg": "^5.0.2",
2537
+ "chokidar": "^3.6.0",
2538
+ "didyoumean": "^1.2.2",
2539
+ "dlv": "^1.1.3",
2540
+ "fast-glob": "^3.3.2",
2541
+ "glob-parent": "^6.0.2",
2542
+ "is-glob": "^4.0.3",
2543
+ "jiti": "^1.21.7",
2544
+ "lilconfig": "^3.1.3",
2545
+ "micromatch": "^4.0.8",
2546
+ "normalize-path": "^3.0.0",
2547
+ "object-hash": "^3.0.0",
2548
+ "picocolors": "^1.1.1",
2549
+ "postcss": "^8.4.47",
2550
+ "postcss-import": "^15.1.0",
2551
+ "postcss-js": "^4.0.1",
2552
+ "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0",
2553
+ "postcss-nested": "^6.2.0",
2554
+ "postcss-selector-parser": "^6.1.2",
2555
+ "resolve": "^1.22.8",
2556
+ "sucrase": "^3.35.0"
2557
+ },
2558
+ "bin": {
2559
+ "tailwind": "lib/cli.js",
2560
+ "tailwindcss": "lib/cli.js"
2561
+ },
2562
+ "engines": {
2563
+ "node": ">=14.0.0"
2564
+ }
2565
+ },
2566
+ "node_modules/thenify": {
2567
+ "version": "3.3.1",
2568
+ "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz",
2569
+ "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==",
2570
+ "dev": true,
2571
+ "license": "MIT",
2572
+ "dependencies": {
2573
+ "any-promise": "^1.0.0"
2574
+ }
2575
+ },
2576
+ "node_modules/thenify-all": {
2577
+ "version": "1.6.0",
2578
+ "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz",
2579
+ "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==",
2580
+ "dev": true,
2581
+ "license": "MIT",
2582
+ "dependencies": {
2583
+ "thenify": ">= 3.1.0 < 4"
2584
+ },
2585
+ "engines": {
2586
+ "node": ">=0.8"
2587
+ }
2588
+ },
2589
+ "node_modules/tinyglobby": {
2590
+ "version": "0.2.15",
2591
+ "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz",
2592
+ "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==",
2593
+ "dev": true,
2594
+ "license": "MIT",
2595
+ "dependencies": {
2596
+ "fdir": "^6.5.0",
2597
+ "picomatch": "^4.0.3"
2598
+ },
2599
+ "engines": {
2600
+ "node": ">=12.0.0"
2601
+ },
2602
+ "funding": {
2603
+ "url": "https://github.com/sponsors/SuperchupuDev"
2604
+ }
2605
+ },
2606
+ "node_modules/to-regex-range": {
2607
+ "version": "5.0.1",
2608
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
2609
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
2610
+ "dev": true,
2611
+ "license": "MIT",
2612
+ "dependencies": {
2613
+ "is-number": "^7.0.0"
2614
+ },
2615
+ "engines": {
2616
+ "node": ">=8.0"
2617
+ }
2618
+ },
2619
+ "node_modules/ts-interface-checker": {
2620
+ "version": "0.1.13",
2621
+ "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz",
2622
+ "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==",
2623
+ "dev": true,
2624
+ "license": "Apache-2.0"
2625
+ },
2626
+ "node_modules/tslib": {
2627
+ "version": "2.8.1",
2628
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
2629
+ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
2630
+ "license": "0BSD"
2631
+ },
2632
+ "node_modules/typescript": {
2633
+ "version": "5.6.3",
2634
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.3.tgz",
2635
+ "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==",
2636
+ "dev": true,
2637
+ "license": "Apache-2.0",
2638
+ "peer": true,
2639
+ "bin": {
2640
+ "tsc": "bin/tsc",
2641
+ "tsserver": "bin/tsserver"
2642
+ },
2643
+ "engines": {
2644
+ "node": ">=14.17"
2645
+ }
2646
+ },
2647
+ "node_modules/undici-types": {
2648
+ "version": "6.21.0",
2649
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
2650
+ "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
2651
+ "dev": true,
2652
+ "license": "MIT"
2653
+ },
2654
+ "node_modules/update-browserslist-db": {
2655
+ "version": "1.2.2",
2656
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.2.tgz",
2657
+ "integrity": "sha512-E85pfNzMQ9jpKkA7+TJAi4TJN+tBCuWh5rUcS/sv6cFi+1q9LYDwDI5dpUL0u/73EElyQ8d3TEaeW4sPedBqYA==",
2658
+ "dev": true,
2659
+ "funding": [
2660
+ {
2661
+ "type": "opencollective",
2662
+ "url": "https://opencollective.com/browserslist"
2663
+ },
2664
+ {
2665
+ "type": "tidelift",
2666
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
2667
+ },
2668
+ {
2669
+ "type": "github",
2670
+ "url": "https://github.com/sponsors/ai"
2671
+ }
2672
+ ],
2673
+ "license": "MIT",
2674
+ "dependencies": {
2675
+ "escalade": "^3.2.0",
2676
+ "picocolors": "^1.1.1"
2677
+ },
2678
+ "bin": {
2679
+ "update-browserslist-db": "cli.js"
2680
+ },
2681
+ "peerDependencies": {
2682
+ "browserslist": ">= 4.21.0"
2683
+ }
2684
+ },
2685
+ "node_modules/util-deprecate": {
2686
+ "version": "1.0.2",
2687
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
2688
+ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
2689
+ "dev": true,
2690
+ "license": "MIT"
2691
+ },
2692
+ "node_modules/vite": {
2693
+ "version": "6.4.1",
2694
+ "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.1.tgz",
2695
+ "integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==",
2696
+ "dev": true,
2697
+ "license": "MIT",
2698
+ "peer": true,
2699
+ "dependencies": {
2700
+ "esbuild": "^0.25.0",
2701
+ "fdir": "^6.4.4",
2702
+ "picomatch": "^4.0.2",
2703
+ "postcss": "^8.5.3",
2704
+ "rollup": "^4.34.9",
2705
+ "tinyglobby": "^0.2.13"
2706
+ },
2707
+ "bin": {
2708
+ "vite": "bin/vite.js"
2709
+ },
2710
+ "engines": {
2711
+ "node": "^18.0.0 || ^20.0.0 || >=22.0.0"
2712
+ },
2713
+ "funding": {
2714
+ "url": "https://github.com/vitejs/vite?sponsor=1"
2715
+ },
2716
+ "optionalDependencies": {
2717
+ "fsevents": "~2.3.3"
2718
+ },
2719
+ "peerDependencies": {
2720
+ "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0",
2721
+ "jiti": ">=1.21.0",
2722
+ "less": "*",
2723
+ "lightningcss": "^1.21.0",
2724
+ "sass": "*",
2725
+ "sass-embedded": "*",
2726
+ "stylus": "*",
2727
+ "sugarss": "*",
2728
+ "terser": "^5.16.0",
2729
+ "tsx": "^4.8.1",
2730
+ "yaml": "^2.4.2"
2731
+ },
2732
+ "peerDependenciesMeta": {
2733
+ "@types/node": {
2734
+ "optional": true
2735
+ },
2736
+ "jiti": {
2737
+ "optional": true
2738
+ },
2739
+ "less": {
2740
+ "optional": true
2741
+ },
2742
+ "lightningcss": {
2743
+ "optional": true
2744
+ },
2745
+ "sass": {
2746
+ "optional": true
2747
+ },
2748
+ "sass-embedded": {
2749
+ "optional": true
2750
+ },
2751
+ "stylus": {
2752
+ "optional": true
2753
+ },
2754
+ "sugarss": {
2755
+ "optional": true
2756
+ },
2757
+ "terser": {
2758
+ "optional": true
2759
+ },
2760
+ "tsx": {
2761
+ "optional": true
2762
+ },
2763
+ "yaml": {
2764
+ "optional": true
2765
+ }
2766
+ }
2767
+ },
2768
+ "node_modules/yallist": {
2769
+ "version": "3.1.1",
2770
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
2771
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
2772
+ "dev": true,
2773
+ "license": "ISC"
2774
+ }
2775
+ }
2776
+ }
frontend/package.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "video-genesis-studio",
3
+ "private": true,
4
+ "version": "1.0.0",
5
+ "type": "module",
6
+ "scripts": {
7
+ "dev": "vite",
8
+ "build": "tsc && vite build",
9
+ "preview": "vite preview"
10
+ },
11
+ "dependencies": {
12
+ "react": "^19.0.0",
13
+ "react-dom": "^19.0.0",
14
+ "framer-motion": "^11.15.0"
15
+ },
16
+ "devDependencies": {
17
+ "@types/node": "^22.14.0",
18
+ "@types/react": "^19.0.0",
19
+ "@types/react-dom": "^19.0.0",
20
+ "@vitejs/plugin-react": "^4.3.4",
21
+ "autoprefixer": "^10.4.20",
22
+ "postcss": "^8.4.49",
23
+ "tailwindcss": "^3.4.17",
24
+ "typescript": "~5.6.0",
25
+ "vite": "^6.0.0"
26
+ }
27
+ }
frontend/postcss.config.js ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ export default {
2
+ plugins: {
3
+ tailwindcss: {},
4
+ autoprefixer: {},
5
+ },
6
+ }
7
+
frontend/public/vite.svg ADDED
frontend/src/App.css ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #root {
2
+ max-width: 1280px;
3
+ margin: 0 auto;
4
+ padding: 2rem;
5
+ text-align: center;
6
+ }
7
+
8
+ .logo {
9
+ height: 6em;
10
+ padding: 1.5em;
11
+ will-change: filter;
12
+ transition: filter 300ms;
13
+ }
14
+ .logo:hover {
15
+ filter: drop-shadow(0 0 2em #646cffaa);
16
+ }
17
+ .logo.react:hover {
18
+ filter: drop-shadow(0 0 2em #61dafbaa);
19
+ }
20
+
21
+ @keyframes logo-spin {
22
+ from {
23
+ transform: rotate(0deg);
24
+ }
25
+ to {
26
+ transform: rotate(360deg);
27
+ }
28
+ }
29
+
30
+ @media (prefers-reduced-motion: no-preference) {
31
+ a:nth-of-type(2) .logo {
32
+ animation: logo-spin infinite 20s linear;
33
+ }
34
+ }
35
+
36
+ .card {
37
+ padding: 2em;
38
+ }
39
+
40
+ .read-the-docs {
41
+ color: #888;
42
+ }
frontend/src/App.tsx ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useEffect, useState } from 'react';
2
+ import { AnimatePresence, motion } from 'framer-motion';
3
+ import { GenerationProvider, useGeneration } from '@/context/GenerationContext';
4
+ import { AuthProvider, useAuth } from '@/context/AuthContext';
5
+ import {
6
+ ProviderSelect,
7
+ GenerationForm,
8
+ GenerationProgress,
9
+ GenerationComplete,
10
+ ErrorDisplay,
11
+ Login,
12
+ LogoIcon
13
+ } from '@/components';
14
+ import { checkHealth } from '@/utils/api';
15
+ import type { HealthStatus } from '@/types';
16
+
17
+ // Main App Content (uses context)
18
+ function AppContent() {
19
+ const { isAuthenticated, loading: authLoading, logout } = useAuth();
20
+ const { state, selectProvider, reset } = useGeneration();
21
+ const [healthStatus, setHealthStatus] = useState<HealthStatus | null>(null);
22
+ const [healthError, setHealthError] = useState<string | null>(null);
23
+
24
+ // Check backend health on mount (must be called before any conditional returns)
25
+ useEffect(() => {
26
+ if (isAuthenticated) {
27
+ checkHealth()
28
+ .then(setHealthStatus)
29
+ .catch((err) => setHealthError(err.message));
30
+ }
31
+ }, [isAuthenticated]);
32
+
33
+ // Show login if not authenticated
34
+ if (authLoading) {
35
+ return (
36
+ <div className="min-h-screen flex items-center justify-center bg-mesh-pattern">
37
+ <div className="text-center">
38
+ <div className="animate-spin rounded-full h-12 w-12 border-b-2 border-coral-500 mx-auto mb-4"></div>
39
+ <p className="text-void-400">Loading...</p>
40
+ </div>
41
+ </div>
42
+ );
43
+ }
44
+
45
+ if (!isAuthenticated) {
46
+ return <Login />;
47
+ }
48
+
49
+ // Render based on current step
50
+ const renderContent = () => {
51
+ switch (state.step) {
52
+ case 'idle':
53
+ return (
54
+ <ProviderSelect
55
+ onSelect={(provider) => selectProvider(provider)}
56
+ />
57
+ );
58
+
59
+ case 'configuring':
60
+ return (
61
+ <GenerationForm
62
+ provider={state.provider!}
63
+ onBack={() => reset()}
64
+ />
65
+ );
66
+
67
+ case 'generating_prompts':
68
+ case 'generating_video':
69
+ case 'processing':
70
+ return <GenerationProgress />;
71
+
72
+ case 'completed':
73
+ return <GenerationComplete />;
74
+
75
+ case 'error':
76
+ return <ErrorDisplay />;
77
+
78
+ default:
79
+ return (
80
+ <ProviderSelect
81
+ onSelect={(provider) => selectProvider(provider)}
82
+ />
83
+ );
84
+ }
85
+ };
86
+
87
+ return (
88
+ <div className="min-h-screen flex flex-col bg-mesh-pattern">
89
+ {/* Header */}
90
+ <header className="glass-dark sticky top-0 z-50">
91
+ <div className="max-w-7xl mx-auto px-6 py-4">
92
+ <div className="flex items-center justify-between">
93
+ {/* Logo */}
94
+ <button
95
+ onClick={reset}
96
+ className="flex items-center gap-3 hover:opacity-80 transition-opacity"
97
+ >
98
+ <LogoIcon size={36} />
99
+ <div>
100
+ <h1 className="text-lg font-display font-bold text-void-100">
101
+ Video AdGenesis
102
+ </h1>
103
+ <p className="text-xs text-void-500 -mt-0.5">Studio</p>
104
+ </div>
105
+ </button>
106
+
107
+ {/* Status Indicator */}
108
+ <div className="flex items-center gap-4">
109
+ {/* Provider Badge */}
110
+ {state.provider && state.step !== 'idle' && (
111
+ <motion.span
112
+ initial={{ opacity: 0, scale: 0.9 }}
113
+ animate={{ opacity: 1, scale: 1 }}
114
+ className={`
115
+ px-3 py-1 rounded-full text-xs font-semibold
116
+ ${state.provider === 'kling'
117
+ ? 'bg-coral-500/20 text-coral-300'
118
+ : 'bg-electric-500/20 text-electric-300'
119
+ }
120
+ `}
121
+ >
122
+ {state.provider === 'kling' ? 'KIE API' : 'Replicate'}
123
+ </motion.span>
124
+ )}
125
+
126
+ {/* Backend Status */}
127
+ <div className="flex items-center gap-2">
128
+ <div
129
+ className={`w-2 h-2 rounded-full ${
130
+ healthError ? 'bg-red-500' :
131
+ healthStatus ? 'bg-green-500' : 'bg-amber-500 animate-pulse'
132
+ }`}
133
+ />
134
+ <span className="text-xs text-void-400">
135
+ {healthError ? 'Backend Offline' :
136
+ healthStatus ? (healthStatus.is_dev_mode ? 'Dev Mode' : 'Production') :
137
+ 'Connecting...'}
138
+ </span>
139
+ </div>
140
+
141
+ {/* Logout Button */}
142
+ <button
143
+ onClick={logout}
144
+ className="px-3 py-1.5 text-xs font-medium text-void-400 hover:text-void-200
145
+ hover:bg-void-800 rounded-lg transition-colors"
146
+ title="Logout"
147
+ >
148
+ Logout
149
+ </button>
150
+ </div>
151
+ </div>
152
+ </div>
153
+ </header>
154
+
155
+ {/* Main Content */}
156
+ <main className="flex-grow">
157
+ <AnimatePresence mode="wait">
158
+ <motion.div
159
+ key={state.step}
160
+ initial={{ opacity: 0, y: 10 }}
161
+ animate={{ opacity: 1, y: 0 }}
162
+ exit={{ opacity: 0, y: -10 }}
163
+ transition={{ duration: 0.3 }}
164
+ >
165
+ {renderContent()}
166
+ </motion.div>
167
+ </AnimatePresence>
168
+ </main>
169
+
170
+ {/* Footer */}
171
+ <footer className="py-6 text-center">
172
+ <p className="text-xs text-void-600">
173
+ Powered by {' '}
174
+ <span className="text-coral-400">AdGenesis</span>
175
+ </p>
176
+ </footer>
177
+ </div>
178
+ );
179
+ }
180
+
181
+ // App Wrapper with Providers
182
+ function App() {
183
+ return (
184
+ <AuthProvider>
185
+ <GenerationProvider>
186
+ <AppContent />
187
+ </GenerationProvider>
188
+ </AuthProvider>
189
+ );
190
+ }
191
+
192
+ export default App;
frontend/src/assets/react.svg ADDED
frontend/src/components/ErrorDisplay.tsx ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { motion } from 'framer-motion';
2
+ import { useGeneration } from '@/context/GenerationContext';
3
+ import { XIcon, RefreshIcon, ArrowLeftIcon } from './Icons';
4
+
5
+ export const ErrorDisplay: React.FC = () => {
6
+ const { state, reset, setStep } = useGeneration();
7
+ const { error } = state;
8
+
9
+ return (
10
+ <motion.div
11
+ initial={{ opacity: 0 }}
12
+ animate={{ opacity: 1 }}
13
+ className="min-h-[60vh] flex flex-col items-center justify-center p-8"
14
+ >
15
+ <div className="max-w-lg w-full text-center">
16
+ {/* Error Icon */}
17
+ <motion.div
18
+ initial={{ scale: 0 }}
19
+ animate={{ scale: 1 }}
20
+ transition={{ type: 'spring', duration: 0.5 }}
21
+ className="w-20 h-20 mx-auto rounded-full bg-red-500/20 flex items-center justify-center mb-6"
22
+ >
23
+ <XIcon size={40} className="text-red-400" />
24
+ </motion.div>
25
+
26
+ {/* Error Message */}
27
+ <h1 className="text-3xl font-display font-bold text-void-100 mb-4">
28
+ Generation Failed
29
+ </h1>
30
+
31
+ <div className="card bg-red-500/10 border-red-500/30 mb-8">
32
+ <p className="text-red-300 text-sm">
33
+ {error || 'An unexpected error occurred during video generation.'}
34
+ </p>
35
+ </div>
36
+
37
+ {/* Actions */}
38
+ <div className="flex flex-col sm:flex-row items-center justify-center gap-4">
39
+ <button
40
+ onClick={() => setStep('configuring')}
41
+ className="btn-primary flex items-center gap-2"
42
+ >
43
+ <RefreshIcon size={20} />
44
+ Try Again
45
+ </button>
46
+
47
+ <button
48
+ onClick={reset}
49
+ className="btn-secondary flex items-center gap-2"
50
+ >
51
+ <ArrowLeftIcon size={20} />
52
+ Start Over
53
+ </button>
54
+ </div>
55
+
56
+ {/* Help Text */}
57
+ <div className="mt-8 p-4 bg-void-900/50 rounded-xl border border-void-700">
58
+ <h3 className="text-sm font-semibold text-void-300 mb-2">Troubleshooting Tips</h3>
59
+ <ul className="text-xs text-void-400 space-y-1 text-left">
60
+ <li>• Check that your API keys are properly configured</li>
61
+ <li>• Ensure your image is under 10MB and in JPG/PNG format</li>
62
+ <li>• Try a shorter script if timeouts occur</li>
63
+ <li>• Check the backend server logs for more details</li>
64
+ </ul>
65
+ </div>
66
+ </div>
67
+ </motion.div>
68
+ );
69
+ };
70
+
frontend/src/components/GenerationComplete.tsx ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import { motion } from 'framer-motion';
3
+ import { useGeneration } from '@/context/GenerationContext';
4
+ import { CheckIcon, DownloadIcon, PlayIcon, RefreshIcon, VideoIcon } from './Icons';
5
+ import { mergeVideos, ClipMetadata } from '@/utils/api';
6
+
7
+ export const GenerationComplete: React.FC = () => {
8
+ const { state, reset } = useGeneration();
9
+ const { generatedVideos, provider } = state;
10
+ const [playingIndex, setPlayingIndex] = useState<number | null>(null);
11
+ const [isMerging, setIsMerging] = useState(false);
12
+ const [mergeError, setMergeError] = useState<string | null>(null);
13
+ const [mergedVideoUrl, setMergedVideoUrl] = useState<string | null>(null);
14
+ const [isPlayingMerged, setIsPlayingMerged] = useState(false);
15
+
16
+ const accentColor = provider === 'kling' ? 'coral' : 'electric';
17
+
18
+ const handleDownload = async (video: typeof generatedVideos[0], index: number) => {
19
+ try {
20
+ const url = video.blobUrl || video.url;
21
+ const response = await fetch(url);
22
+ const blob = await response.blob();
23
+
24
+ const downloadUrl = URL.createObjectURL(blob);
25
+ const a = document.createElement('a');
26
+ a.href = downloadUrl;
27
+ a.download = `video-segment-${index + 1}.mp4`;
28
+ document.body.appendChild(a);
29
+ a.click();
30
+ document.body.removeChild(a);
31
+ URL.revokeObjectURL(downloadUrl);
32
+ } catch (error) {
33
+ console.error('Download failed:', error);
34
+ }
35
+ };
36
+
37
+ const handleDownloadAll = async () => {
38
+ for (let i = 0; i < generatedVideos.length; i++) {
39
+ await handleDownload(generatedVideos[i], i);
40
+ // Small delay between downloads
41
+ await new Promise(r => setTimeout(r, 500));
42
+ }
43
+ };
44
+
45
+ // Merge all videos into a single file
46
+ const handleMergeAndExport = async () => {
47
+ if (generatedVideos.length === 0) return;
48
+
49
+ setIsMerging(true);
50
+ setMergeError(null);
51
+
52
+ try {
53
+ // Collect all video blobs
54
+ const videoBlobs: Blob[] = [];
55
+ const clipMetadata: ClipMetadata[] = [];
56
+
57
+ for (let i = 0; i < generatedVideos.length; i++) {
58
+ const video = generatedVideos[i];
59
+ const url = video.blobUrl || video.url;
60
+
61
+ // Fetch blob from URL
62
+ const response = await fetch(url);
63
+ const blob = await response.blob();
64
+ videoBlobs.push(blob);
65
+
66
+ // Create clip metadata
67
+ // Use Whisper-detected trim point if available, otherwise use full duration
68
+ // No start trimming - keep full video from beginning
69
+ const trimStart = 0; // Always start from beginning (no overlap removal)
70
+ const trimEnd = video.trimPoint || video.duration; // Use Whisper trim point if available
71
+
72
+ clipMetadata.push({
73
+ index: i,
74
+ startTime: trimStart,
75
+ endTime: trimEnd,
76
+ type: 'video',
77
+ duration: trimEnd - trimStart,
78
+ });
79
+ }
80
+
81
+ console.log('🎬 Merging videos...', clipMetadata);
82
+
83
+ // Call merge API
84
+ const mergedBlob = await mergeVideos(videoBlobs, clipMetadata);
85
+
86
+ // Create URL for preview (don't auto-download)
87
+ const previewUrl = URL.createObjectURL(mergedBlob);
88
+ setMergedVideoUrl(previewUrl);
89
+
90
+ console.log('✅ Merged video ready for preview!');
91
+
92
+ } catch (error) {
93
+ console.error('Merge failed:', error);
94
+ setMergeError(error instanceof Error ? error.message : 'Failed to merge videos');
95
+ } finally {
96
+ setIsMerging(false);
97
+ }
98
+ };
99
+
100
+ // Download the merged video
101
+ const handleDownloadMerged = () => {
102
+ if (!mergedVideoUrl) return;
103
+
104
+ const a = document.createElement('a');
105
+ a.href = mergedVideoUrl;
106
+ a.download = `final-video-${Date.now()}.mp4`;
107
+ document.body.appendChild(a);
108
+ a.click();
109
+ document.body.removeChild(a);
110
+ };
111
+
112
+ return (
113
+ <motion.div
114
+ initial={{ opacity: 0 }}
115
+ animate={{ opacity: 1 }}
116
+ className="min-h-[60vh] flex flex-col items-center justify-center p-8"
117
+ >
118
+ <div className="max-w-4xl w-full">
119
+ {/* Success Header */}
120
+ <div className="text-center mb-12">
121
+ <motion.div
122
+ initial={{ scale: 0 }}
123
+ animate={{ scale: 1 }}
124
+ transition={{ type: 'spring', duration: 0.5 }}
125
+ className={`
126
+ w-20 h-20 mx-auto rounded-full flex items-center justify-center mb-6
127
+ ${accentColor === 'coral' ? 'bg-coral-500/20' : 'bg-electric-500/20'}
128
+ `}
129
+ >
130
+ <CheckIcon
131
+ size={40}
132
+ className={accentColor === 'coral' ? 'text-coral-400' : 'text-electric-400'}
133
+ />
134
+ </motion.div>
135
+
136
+ <motion.h1
137
+ initial={{ opacity: 0, y: 20 }}
138
+ animate={{ opacity: 1, y: 0 }}
139
+ transition={{ delay: 0.2 }}
140
+ className="text-4xl font-display font-bold mb-4"
141
+ >
142
+ <span className={accentColor === 'coral' ? 'gradient-text' : 'gradient-text-electric'}>
143
+ Generation Complete!
144
+ </span>
145
+ </motion.h1>
146
+
147
+ <motion.p
148
+ initial={{ opacity: 0 }}
149
+ animate={{ opacity: 1 }}
150
+ transition={{ delay: 0.3 }}
151
+ className="text-void-400 text-lg"
152
+ >
153
+ {generatedVideos.length} video{generatedVideos.length !== 1 ? 's' : ''} generated successfully
154
+ </motion.p>
155
+ </div>
156
+
157
+ {/* Video Grid */}
158
+ <motion.div
159
+ initial={{ opacity: 0, y: 20 }}
160
+ animate={{ opacity: 1, y: 0 }}
161
+ transition={{ delay: 0.4 }}
162
+ className="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-3 gap-6 mb-8"
163
+ >
164
+ {generatedVideos.map((video, index) => (
165
+ <div
166
+ key={video.id}
167
+ className="card group relative overflow-hidden"
168
+ >
169
+ {/* Video Preview */}
170
+ <div className="relative aspect-[9/16] bg-void-950 rounded-lg overflow-hidden mb-4">
171
+ <video
172
+ src={video.blobUrl || video.url}
173
+ className="w-full h-full object-cover"
174
+ controls={playingIndex === index}
175
+ poster={video.thumbnails[0]}
176
+ onEnded={() => setPlayingIndex(null)}
177
+ />
178
+
179
+ {playingIndex !== index && (
180
+ <button
181
+ onClick={() => setPlayingIndex(index)}
182
+ className="absolute inset-0 flex items-center justify-center bg-black/40 group-hover:bg-black/50 transition-colors"
183
+ >
184
+ <div className={`
185
+ w-14 h-14 rounded-full flex items-center justify-center
186
+ ${accentColor === 'coral' ? 'bg-coral-500' : 'bg-electric-500'}
187
+ group-hover:scale-110 transition-transform
188
+ `}>
189
+ <PlayIcon size={24} className="text-white ml-1" />
190
+ </div>
191
+ </button>
192
+ )}
193
+ </div>
194
+
195
+ {/* Video Info */}
196
+ <div className="flex items-center justify-between">
197
+ <div>
198
+ <h3 className="font-semibold text-void-200">
199
+ Segment {index + 1}
200
+ </h3>
201
+ <p className="text-xs text-void-400">
202
+ ~{Math.round(video.duration)}s duration
203
+ </p>
204
+ </div>
205
+ <button
206
+ onClick={() => handleDownload(video, index)}
207
+ className={`
208
+ p-2 rounded-lg transition-colors
209
+ ${accentColor === 'coral'
210
+ ? 'hover:bg-coral-500/20 text-coral-400'
211
+ : 'hover:bg-electric-500/20 text-electric-400'
212
+ }
213
+ `}
214
+ >
215
+ <DownloadIcon size={20} />
216
+ </button>
217
+ </div>
218
+ </div>
219
+ ))}
220
+ </motion.div>
221
+
222
+ {/* Merged Video Preview */}
223
+ {mergedVideoUrl && (
224
+ <motion.div
225
+ initial={{ opacity: 0, y: 20 }}
226
+ animate={{ opacity: 1, y: 0 }}
227
+ className="mb-8"
228
+ >
229
+ <div className={`card border-2 ${accentColor === 'coral' ? 'border-coral-500/50 bg-coral-500/5' : 'border-electric-500/50 bg-electric-500/5'}`}>
230
+ <div className="flex items-center gap-3 mb-4">
231
+ <div className={`p-2 rounded-lg ${accentColor === 'coral' ? 'bg-coral-500/20' : 'bg-electric-500/20'}`}>
232
+ <VideoIcon size={24} className={accentColor === 'coral' ? 'text-coral-400' : 'text-electric-400'} />
233
+ </div>
234
+ <div>
235
+ <h3 className="font-bold text-lg text-void-100">Final Exported Video</h3>
236
+ <p className="text-sm text-void-400">All segments merged into one video</p>
237
+ </div>
238
+ </div>
239
+
240
+ {/* Video Player */}
241
+ <div className="relative aspect-[9/16] max-w-md mx-auto bg-void-950 rounded-xl overflow-hidden mb-4">
242
+ <video
243
+ src={mergedVideoUrl}
244
+ className="w-full h-full object-contain"
245
+ controls={isPlayingMerged}
246
+ onEnded={() => setIsPlayingMerged(false)}
247
+ />
248
+
249
+ {!isPlayingMerged && (
250
+ <button
251
+ onClick={() => setIsPlayingMerged(true)}
252
+ className="absolute inset-0 flex items-center justify-center bg-black/40 hover:bg-black/50 transition-colors"
253
+ >
254
+ <div className={`
255
+ w-16 h-16 rounded-full flex items-center justify-center
256
+ ${accentColor === 'coral' ? 'bg-coral-500' : 'bg-electric-500'}
257
+ hover:scale-110 transition-transform shadow-lg
258
+ `}>
259
+ <PlayIcon size={28} className="text-white ml-1" />
260
+ </div>
261
+ </button>
262
+ )}
263
+ </div>
264
+
265
+ {/* Download Button */}
266
+ <button
267
+ onClick={handleDownloadMerged}
268
+ className={`
269
+ w-full flex items-center justify-center gap-2 py-3 font-semibold rounded-xl
270
+ ${accentColor === 'coral' ? 'btn-primary' : 'btn-electric'}
271
+ `}
272
+ >
273
+ <DownloadIcon size={20} />
274
+ Download Final Video
275
+ </button>
276
+ </div>
277
+ </motion.div>
278
+ )}
279
+
280
+ {/* Merge Error */}
281
+ {mergeError && (
282
+ <motion.div
283
+ initial={{ opacity: 0 }}
284
+ animate={{ opacity: 1 }}
285
+ className="mb-6 p-4 bg-red-500/10 border border-red-500/30 rounded-xl text-center"
286
+ >
287
+ <p className="text-red-300 text-sm">{mergeError}</p>
288
+ </motion.div>
289
+ )}
290
+
291
+ {/* Actions */}
292
+ <motion.div
293
+ initial={{ opacity: 0 }}
294
+ animate={{ opacity: 1 }}
295
+ transition={{ delay: 0.6 }}
296
+ className="flex flex-col sm:flex-row items-center justify-center gap-4"
297
+ >
298
+ {/* Primary: Merge & Export */}
299
+ {!mergedVideoUrl && (
300
+ <button
301
+ onClick={handleMergeAndExport}
302
+ disabled={isMerging || generatedVideos.length < 2}
303
+ className={`
304
+ flex items-center gap-2 px-6 py-3 font-semibold rounded-xl
305
+ ${accentColor === 'coral' ? 'btn-primary' : 'btn-electric'}
306
+ disabled:opacity-50 disabled:cursor-not-allowed
307
+ `}
308
+ >
309
+ {isMerging ? (
310
+ <>
311
+ <div className="w-5 h-5 border-2 border-white/30 border-t-white rounded-full animate-spin" />
312
+ <span>Merging...</span>
313
+ </>
314
+ ) : (
315
+ <>
316
+ <VideoIcon size={20} />
317
+ <span>Export Final Video</span>
318
+ </>
319
+ )}
320
+ </button>
321
+ )}
322
+
323
+ {/* Re-merge option if already merged */}
324
+ {mergedVideoUrl && (
325
+ <button
326
+ onClick={() => {
327
+ URL.revokeObjectURL(mergedVideoUrl);
328
+ setMergedVideoUrl(null);
329
+ handleMergeAndExport();
330
+ }}
331
+ disabled={isMerging}
332
+ className="btn-secondary flex items-center gap-2"
333
+ >
334
+ <RefreshIcon size={20} />
335
+ Re-merge Video
336
+ </button>
337
+ )}
338
+
339
+ {/* Secondary: Download All */}
340
+ <button
341
+ onClick={handleDownloadAll}
342
+ className="btn-secondary flex items-center gap-2"
343
+ >
344
+ <DownloadIcon size={20} />
345
+ Download Segments
346
+ </button>
347
+
348
+ {/* Tertiary: Generate More */}
349
+ <button
350
+ onClick={reset}
351
+ className="btn-secondary flex items-center gap-2"
352
+ >
353
+ <RefreshIcon size={20} />
354
+ Generate More
355
+ </button>
356
+ </motion.div>
357
+
358
+ {/* Tip */}
359
+ <motion.p
360
+ initial={{ opacity: 0 }}
361
+ animate={{ opacity: 1 }}
362
+ transition={{ delay: 0.8 }}
363
+ className="text-center text-void-500 text-sm mt-8"
364
+ >
365
+ {mergedVideoUrl
366
+ ? 'Your final video is ready! Download it or re-merge with different settings.'
367
+ : generatedVideos.length >= 2
368
+ ? '"Export Final Video" will merge all segments into a single video file with Whisper-optimized trim points.'
369
+ : 'Videos are ready to use in your video editor or social media.'
370
+ }
371
+ </motion.p>
372
+ </div>
373
+ </motion.div>
374
+ );
375
+ };
frontend/src/components/GenerationForm.tsx ADDED
@@ -0,0 +1,1362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useState, useCallback, useEffect, useRef } from 'react';
2
+ import { motion } from 'framer-motion';
3
+ import { useGeneration } from '@/context/GenerationContext';
4
+ import type { GenerationInputs, VideoProvider, GeneratedVideo, VeoSegment } from '@/types';
5
+ import {
6
+ SparklesIcon,
7
+ ArrowLeftIcon,
8
+ ImageIcon
9
+ } from './Icons';
10
+ import {
11
+ generatePrompts,
12
+ uploadImage,
13
+ klingGenerate,
14
+ klingExtend,
15
+ waitForKlingVideo,
16
+ generateVideoWithRetry,
17
+ downloadVideo,
18
+ getVideoDuration,
19
+ generateThumbnails,
20
+ replicateGenerate,
21
+ waitForReplicateVideo,
22
+ whisperAnalyzeAndExtract
23
+ } from '@/utils/api';
24
+
25
+ interface GenerationFormProps {
26
+ provider: VideoProvider;
27
+ onBack: () => void;
28
+ }
29
+
30
+ const voiceTypes = ['Deep', 'Warm', 'Crisp', 'None'];
31
+ const energyLevels = ['Low', 'Medium', 'High'];
32
+ const cameraStyles = ['Standard', 'Handheld', 'Steadicam', 'FPV Drone'];
33
+ const narrativeStyles = ['Standard', 'Documentary', 'Action', 'Introspective'];
34
+ const aspectRatios = ['9:16', '16:9', '1:1'];
35
+
36
+ // Generation modes
37
+ type GenerationMode = 'extend' | 'frame-continuity';
38
+
39
+ export const GenerationForm: React.FC<GenerationFormProps> = ({ provider, onBack }) => {
40
+ const { startGeneration, updateProgress, addVideo, setStep, setError, setRetryState, updateSegments, state } = useGeneration();
41
+ const { retryState, generatedVideos, segments } = state;
42
+
43
+ // Draft storage key
44
+ const draftKey = `video-gen-draft-${provider}`;
45
+
46
+ // Load draft on mount - initialize state from localStorage
47
+ const loadDraft = useCallback(() => {
48
+ try {
49
+ const savedDraft = localStorage.getItem(draftKey);
50
+ if (savedDraft) {
51
+ const draft = JSON.parse(savedDraft);
52
+ return draft;
53
+ }
54
+ } catch (error) {
55
+ console.warn('Failed to load draft:', error);
56
+ }
57
+ return null;
58
+ }, [draftKey]);
59
+
60
+ const draft = loadDraft();
61
+ const [draftRestored, setDraftRestored] = useState(!!draft);
62
+
63
+ const [formState, setFormState] = useState<GenerationInputs>(
64
+ draft?.formState || {
65
+ script: '',
66
+ style: '',
67
+ voiceType: 'Deep',
68
+ energyLevel: 'Medium',
69
+ cameraStyle: 'Standard',
70
+ narrativeStyle: 'Standard',
71
+ seedValue: 12005,
72
+ aspectRatio: '9:16',
73
+ model: provider === 'kling' ? 'veo3_fast' : 'google/veo-3',
74
+ }
75
+ );
76
+
77
+ const [imageFile, setImageFile] = useState<File | null>(null);
78
+ const [imagePreview, setImagePreview] = useState<string | null>(draft?.imagePreview || null);
79
+ const [isDragging, setIsDragging] = useState(false);
80
+ const [isGenerating, setIsGenerating] = useState(false);
81
+
82
+ // Generation mode selection
83
+ const [generationMode, setGenerationMode] = useState<GenerationMode>(draft?.generationMode || 'frame-continuity');
84
+
85
+ // Retry editing state
86
+ const [retryDialogue, setRetryDialogue] = useState('');
87
+ const [retryEnvironment, setRetryEnvironment] = useState('');
88
+ const [retryAction, setRetryAction] = useState('');
89
+
90
+ // Initialize retry fields when error occurs
91
+ useEffect(() => {
92
+ if (retryState && segments[retryState.failedSegmentIndex]) {
93
+ const seg = segments[retryState.failedSegmentIndex];
94
+ setRetryDialogue(seg.action_timeline?.dialogue || '');
95
+ setRetryEnvironment(seg.scene_continuity?.environment || '');
96
+ setRetryAction(seg.character_description?.current_state || '');
97
+ }
98
+ }, [retryState, segments]);
99
+
100
+ const handleRetrySubmit = () => {
101
+ if (!retryState) return;
102
+
103
+ const idx = retryState.failedSegmentIndex;
104
+ const updatedSegments = [...segments];
105
+
106
+ // Update the segment with edited values
107
+ if (updatedSegments[idx]) {
108
+ updatedSegments[idx] = {
109
+ ...updatedSegments[idx],
110
+ action_timeline: {
111
+ ...updatedSegments[idx].action_timeline,
112
+ dialogue: retryDialogue
113
+ },
114
+ scene_continuity: {
115
+ ...updatedSegments[idx].scene_continuity,
116
+ environment: retryEnvironment
117
+ },
118
+ character_description: {
119
+ ...updatedSegments[idx].character_description,
120
+ current_state: retryAction
121
+ }
122
+ };
123
+
124
+ updateSegments(updatedSegments);
125
+ }
126
+
127
+ // Clear error and resume
128
+ setRetryState(null);
129
+ setStep('generating_video');
130
+ setIsGenerating(true);
131
+
132
+ // Resume generation based on provider
133
+ if (provider === 'kling') {
134
+ if (generationMode === 'frame-continuity') {
135
+ handleKlingFrameContinuityFlow();
136
+ } else {
137
+ handleKlingExtendFlow();
138
+ }
139
+ } else {
140
+ handleReplicateGeneration();
141
+ }
142
+ };
143
+
144
+ const handleCancelRetry = () => {
145
+ setRetryState(null);
146
+ setIsGenerating(false);
147
+ };
148
+
149
+ // Show notification if draft was restored
150
+ useEffect(() => {
151
+ if (draftRestored) {
152
+ console.log('📝 Draft restored from localStorage');
153
+ // Auto-hide notification after 5 seconds
154
+ const timer = setTimeout(() => setDraftRestored(false), 5000);
155
+ return () => clearTimeout(timer);
156
+ }
157
+ }, [draftRestored]);
158
+
159
+ // Save draft whenever formState, imagePreview, or generationMode changes
160
+ // Skip saving on initial mount to avoid overwriting with default values
161
+ const isInitialMount = useRef(true);
162
+ useEffect(() => {
163
+ if (isInitialMount.current) {
164
+ isInitialMount.current = false;
165
+ return;
166
+ }
167
+
168
+ try {
169
+ const draft = {
170
+ formState,
171
+ imagePreview,
172
+ generationMode,
173
+ savedAt: new Date().toISOString(),
174
+ };
175
+ localStorage.setItem(draftKey, JSON.stringify(draft));
176
+ } catch (error) {
177
+ console.warn('Failed to save draft:', error);
178
+ }
179
+ }, [formState, imagePreview, generationMode, draftKey]);
180
+
181
+ // Clear draft function
182
+ const clearDraft = useCallback(() => {
183
+ try {
184
+ localStorage.removeItem(draftKey);
185
+ setDraftRestored(false);
186
+ console.log('🗑️ Draft cleared');
187
+ } catch (error) {
188
+ console.warn('Failed to clear draft:', error);
189
+ }
190
+ }, [draftKey]);
191
+
192
+ // Calculate estimated segments
193
+ const wordCount = formState.script.trim().split(/\s+/).filter(w => w).length;
194
+ const estimatedSegments = wordCount > 0 ? Math.max(1, Math.min(Math.ceil(wordCount / 17), 10)) : 0;
195
+
196
+ // Handle input changes
197
+ const handleChange = (e: React.ChangeEvent<HTMLInputElement | HTMLTextAreaElement | HTMLSelectElement>) => {
198
+ const { name, value } = e.target;
199
+ setFormState(prev => ({ ...prev, [name]: value }));
200
+ };
201
+
202
+ // Handle image upload
203
+ const handleImageUpload = useCallback((file: File) => {
204
+ if (file.type.startsWith('image/')) {
205
+ setImageFile(file);
206
+ const reader = new FileReader();
207
+ reader.onloadend = () => setImagePreview(reader.result as string);
208
+ reader.readAsDataURL(file);
209
+ }
210
+ }, []);
211
+
212
+ // Drag and drop handlers
213
+ const handleDragOver = (e: React.DragEvent) => {
214
+ e.preventDefault();
215
+ setIsDragging(true);
216
+ };
217
+
218
+ const handleDragLeave = () => setIsDragging(false);
219
+
220
+ const handleDrop = (e: React.DragEvent) => {
221
+ e.preventDefault();
222
+ setIsDragging(false);
223
+ const file = e.dataTransfer.files[0];
224
+ if (file) handleImageUpload(file);
225
+ };
226
+
227
+ // Extract last frame from video blob
228
+ const extractLastFrame = async (videoBlob: Blob): Promise<File> => {
229
+ return new Promise((resolve, reject) => {
230
+ const video = document.createElement('video');
231
+ video.preload = 'metadata';
232
+ video.muted = true;
233
+ video.src = URL.createObjectURL(videoBlob);
234
+
235
+ video.onloadedmetadata = async () => {
236
+ // Seek to near the end of the video
237
+ const targetTime = Math.max(0, video.duration - 0.1);
238
+ video.currentTime = targetTime;
239
+ };
240
+
241
+ video.onseeked = () => {
242
+ // Create canvas and draw current frame
243
+ const canvas = document.createElement('canvas');
244
+ canvas.width = video.videoWidth;
245
+ canvas.height = video.videoHeight;
246
+ const ctx = canvas.getContext('2d');
247
+
248
+ if (!ctx) {
249
+ URL.revokeObjectURL(video.src);
250
+ reject(new Error('Could not get canvas context'));
251
+ return;
252
+ }
253
+
254
+ ctx.drawImage(video, 0, 0);
255
+
256
+ // Convert to blob then to file
257
+ canvas.toBlob((blob) => {
258
+ URL.revokeObjectURL(video.src);
259
+
260
+ if (!blob) {
261
+ reject(new Error('Could not extract frame'));
262
+ return;
263
+ }
264
+
265
+ const file = new File([blob], `frame-${Date.now()}.jpg`, { type: 'image/jpeg' });
266
+ resolve(file);
267
+ }, 'image/jpeg', 0.95);
268
+ };
269
+
270
+ video.onerror = () => {
271
+ URL.revokeObjectURL(video.src);
272
+ reject(new Error('Failed to load video for frame extraction'));
273
+ };
274
+ });
275
+ };
276
+
277
+ // ============================================
278
+ // KIE GENERATION - FRAME CONTINUITY FLOW
279
+ // ============================================
280
+ // This mirrors the Replicate flow from standalone_video_creator.py:
281
+ // 1. Generate first video with original reference image
282
+ // 2. Extract last frame using the whisper analysis from generated video
283
+ // 3. Use that frame as reference for next segment
284
+ // 4. Repeat for all segments
285
+
286
+ const handleKlingFrameContinuityFlow = async () => {
287
+ if (!imageFile || !formState.script.trim()) return;
288
+
289
+ setIsGenerating(true);
290
+ setError(null);
291
+
292
+ try {
293
+ // Step 1: Generate prompts using GPT-4o
294
+ updateProgress('Analyzing script with GPT-4o...');
295
+
296
+ const formData = new FormData();
297
+ formData.append('script', formState.script);
298
+ formData.append('style', formState.style || 'clean, lifestyle UGC');
299
+ formData.append('jsonFormat', 'standard');
300
+ formData.append('continuationMode', 'true');
301
+ formData.append('voiceType', formState.voiceType || '');
302
+ formData.append('energyLevel', formState.energyLevel || '');
303
+ formData.append('settingMode', 'single');
304
+ formData.append('cameraStyle', formState.cameraStyle || '');
305
+ formData.append('narrativeStyle', formState.narrativeStyle || '');
306
+ formData.append('image', imageFile);
307
+
308
+ const payload = await generatePrompts(formData);
309
+
310
+ if (!payload?.segments?.length) {
311
+ throw new Error('No segments generated from script');
312
+ }
313
+
314
+ const segments = payload.segments;
315
+ updateProgress(`Generated ${segments.length} segments. Starting video generation...`);
316
+ startGeneration(segments);
317
+
318
+ // Track current reference image (starts with original)
319
+ let currentImageFile = imageFile;
320
+ const generatedVideos: GeneratedVideo[] = [];
321
+
322
+ // Step 2: Generate videos segment by segment with frame continuity
323
+ for (let i = 0; i < segments.length; i++) {
324
+ const segment = segments[i];
325
+ const isLastSegment = i === segments.length - 1;
326
+
327
+ updateProgress(
328
+ `Generating video ${i + 1} of ${segments.length}...${i > 0 ? ' (using last frame from previous)' : ''}`,
329
+ i,
330
+ segments.length
331
+ );
332
+
333
+ // Upload current reference image
334
+ updateProgress(`Uploading reference image for segment ${i + 1}...`);
335
+ const uploadResult = await uploadImage(currentImageFile);
336
+ const hostedImageUrl = uploadResult.url;
337
+
338
+ console.log(`🖼️ Segment ${i + 1} using image: ${i === 0 ? 'original' : 'last frame from previous'}`);
339
+
340
+ // Generate video with current reference image
341
+ updateProgress(`Submitting segment ${i + 1} to KIE Veo 3.1...`);
342
+ const generateResult = await klingGenerate({
343
+ prompt: segment,
344
+ imageUrls: [hostedImageUrl],
345
+ model: 'veo3_fast',
346
+ aspectRatio: formState.aspectRatio,
347
+ generationType: 'FIRST_AND_LAST_FRAMES_2_VIDEO',
348
+ seeds: formState.seedValue,
349
+ voiceType: formState.voiceType,
350
+ });
351
+
352
+ // Wait for completion
353
+ updateProgress(`Processing video ${i + 1}... (this may take 1-2 minutes)`);
354
+ const videoUrl = await waitForKlingVideo(generateResult.taskId);
355
+
356
+ // Download video
357
+ updateProgress(`Downloading video ${i + 1}...`);
358
+ const videoBlob = await downloadVideo(videoUrl);
359
+ const blobUrl = URL.createObjectURL(videoBlob);
360
+
361
+ // Get video duration
362
+ const videoFile = new File([videoBlob], `segment-${i + 1}.mp4`, { type: 'video/mp4' });
363
+ const duration = await getVideoDuration(videoFile);
364
+ const thumbnails = await generateThumbnails(videoFile);
365
+
366
+ // Use Whisper to find optimal trim point, extract frame, and get transcription
367
+ let trimPoint = duration; // Default to full duration
368
+ let transcribedText = ''; // What Whisper actually heard
369
+
370
+ if (!isLastSegment) {
371
+ updateProgress(`Analyzing video ${i + 1} with Whisper for optimal continuity...`);
372
+ try {
373
+ // Get dialogue from segment for Whisper analysis
374
+ const dialogue = segment.action_timeline?.dialogue || '';
375
+
376
+ const whisperResult = await whisperAnalyzeAndExtract({
377
+ video_url: videoUrl,
378
+ dialogue: dialogue,
379
+ buffer_time: 0.3,
380
+ model_size: 'base'
381
+ });
382
+
383
+ if (whisperResult.success && whisperResult.frame_base64) {
384
+ // Convert base64 frame to File for next segment
385
+ const base64Data = whisperResult.frame_base64.split(',')[1] || whisperResult.frame_base64;
386
+ const byteCharacters = atob(base64Data);
387
+ const byteNumbers = new Array(byteCharacters.length);
388
+ for (let j = 0; j < byteCharacters.length; j++) {
389
+ byteNumbers[j] = byteCharacters.charCodeAt(j);
390
+ }
391
+ const byteArray = new Uint8Array(byteNumbers);
392
+ const frameBlob = new Blob([byteArray], { type: 'image/jpeg' });
393
+ currentImageFile = new File([frameBlob], `whisper-frame-${i + 1}.jpg`, { type: 'image/jpeg' });
394
+
395
+ // Store trim point for later merge
396
+ if (whisperResult.trim_point) {
397
+ trimPoint = whisperResult.trim_point;
398
+ }
399
+
400
+ // Store transcribed text for prompt refinement
401
+ if (whisperResult.transcribed_text) {
402
+ transcribedText = whisperResult.transcribed_text;
403
+ console.log(`📝 Whisper transcription: "${transcribedText.substring(0, 100)}..."`);
404
+ }
405
+
406
+ console.log(`✅ Whisper: Last word at ${whisperResult.last_word_timestamp?.toFixed(2)}s, frame at ${whisperResult.frame_timestamp?.toFixed(2)}s, trim at ${trimPoint.toFixed(2)}s`);
407
+
408
+ // REFINE NEXT SEGMENT PROMPT with frame + transcription
409
+ const nextSegment = segments[i + 1];
410
+ if (nextSegment && currentImageFile) {
411
+ updateProgress(`Refining segment ${i + 2} prompt with visual and audio context...`);
412
+ try {
413
+ const { refinePromptWithContext } = await import('@/utils/api');
414
+ const refined = await refinePromptWithContext(
415
+ nextSegment,
416
+ currentImageFile,
417
+ transcribedText,
418
+ dialogue
419
+ );
420
+ // Update the next segment with refined prompt
421
+ segments[i + 1] = refined.refined_prompt as typeof nextSegment;
422
+ console.log(`✅ Refined segment ${i + 2} prompt for consistency`);
423
+ } catch (refineError) {
424
+ console.warn(`⚠️ Prompt refinement failed, using original:`, refineError);
425
+ }
426
+ }
427
+ } else {
428
+ // Fallback to simple last frame extraction
429
+ console.log(`⚠️ Whisper failed (${whisperResult.error}), falling back to last frame extraction`);
430
+ const lastFrameFile = await extractLastFrame(videoBlob);
431
+ currentImageFile = lastFrameFile;
432
+ }
433
+ } catch (frameError) {
434
+ console.error(`⚠️ Whisper analysis failed, using fallback:`, frameError);
435
+ try {
436
+ const lastFrameFile = await extractLastFrame(videoBlob);
437
+ currentImageFile = lastFrameFile;
438
+ } catch {
439
+ // Continue with current image if all extraction fails
440
+ }
441
+ }
442
+ }
443
+
444
+ // Add to generated videos with trim metadata
445
+ const generatedVideo: GeneratedVideo = {
446
+ id: `video-${Date.now()}-${i}`,
447
+ url: videoUrl,
448
+ blobUrl,
449
+ segment,
450
+ duration,
451
+ thumbnails,
452
+ trimPoint, // Store trim point for merge
453
+ };
454
+ generatedVideos.push(generatedVideo);
455
+ addVideo(generatedVideo);
456
+
457
+ updateProgress(`Completed video ${i + 1} of ${segments.length}`, i + 1, segments.length);
458
+ }
459
+
460
+ // All done!
461
+ clearDraft(); // Clear draft on successful generation
462
+ clearDraft(); // Clear draft on successful generation
463
+ setStep('completed');
464
+ updateProgress('All videos generated successfully!');
465
+
466
+ } catch (err) {
467
+ console.error('Generation error:', err);
468
+ const errorMessage = err instanceof Error ? err.message : 'Generation failed';
469
+
470
+ // Enable retry mode
471
+ setRetryState({
472
+ failedSegmentIndex: generatedVideos.length, // Current segment that failed
473
+ error: errorMessage
474
+ });
475
+ setStep('configuring'); // Go back to form, but with retry overlay
476
+
477
+ } finally {
478
+ setIsGenerating(false);
479
+ }
480
+ };
481
+
482
+ // ============================================
483
+ // KIE GENERATION - EXTEND API FLOW
484
+ // ============================================
485
+ // Original flow using KIE's extend API
486
+
487
+ const handleKlingExtendFlow = async () => {
488
+ if (!imageFile || !formState.script.trim()) return;
489
+
490
+ setIsGenerating(true);
491
+ setError(null);
492
+
493
+ try {
494
+ // Step 1: Generate prompts using GPT-4o
495
+ updateProgress('Analyzing script with GPT-4o...');
496
+
497
+ const formData = new FormData();
498
+ formData.append('script', formState.script);
499
+ formData.append('style', formState.style || 'clean, lifestyle UGC');
500
+ formData.append('jsonFormat', 'standard');
501
+ formData.append('continuationMode', 'true');
502
+ formData.append('voiceType', formState.voiceType || '');
503
+ formData.append('energyLevel', formState.energyLevel || '');
504
+ formData.append('settingMode', 'single');
505
+ formData.append('cameraStyle', formState.cameraStyle || '');
506
+ formData.append('narrativeStyle', formState.narrativeStyle || '');
507
+ formData.append('image', imageFile);
508
+
509
+ // Use existing segments if retrying, otherwise generate new ones
510
+ let payload: { segments: VeoSegment[] };
511
+ if (retryState && segments.length > 0) {
512
+ // Retry mode: use existing segments (they may have been edited)
513
+ payload = { segments };
514
+ updateProgress(`Using existing ${segments.length} segments for retry...`);
515
+ } else {
516
+ // Normal mode: generate new segments
517
+ payload = await generatePrompts(formData);
518
+ if (!payload?.segments?.length) {
519
+ throw new Error('No segments generated from script');
520
+ }
521
+ updateProgress(`Generated ${payload.segments.length} segments. Starting video generation...`);
522
+ startGeneration(payload.segments);
523
+ }
524
+
525
+ // Step 2: Upload reference image once
526
+ updateProgress('Uploading reference image...');
527
+ const uploadResult = await uploadImage(imageFile);
528
+ const hostedImageUrl = uploadResult.url;
529
+
530
+ // Step 3: Generate videos (resume from where we left off if retrying)
531
+ const startIndex = generatedVideos.length;
532
+ let currentTaskId: string | null = null;
533
+ let currentImageUrl = hostedImageUrl; // Start with original image
534
+
535
+ // If resuming, extract last frame from previous video for continuity
536
+ if (startIndex > 0 && generatedVideos[startIndex - 1]?.blobUrl) {
537
+ updateProgress(`Extracting last frame from segment ${startIndex} for continuity...`);
538
+ try {
539
+ const lastVideoBlob = await fetch(generatedVideos[startIndex - 1].blobUrl!).then(r => r.blob());
540
+ const lastFrameFile = await extractLastFrame(lastVideoBlob);
541
+ const frameUploadResult = await uploadImage(lastFrameFile);
542
+ currentImageUrl = frameUploadResult.url;
543
+ updateProgress(`Using frame from segment ${startIndex} for segment ${startIndex + 1}...`);
544
+ } catch (frameError) {
545
+ console.warn('Failed to extract frame, using original image:', frameError);
546
+ // Continue with original image
547
+ }
548
+ }
549
+
550
+ for (let i = startIndex; i < payload.segments.length; i++) {
551
+ const segment = payload.segments[i];
552
+
553
+ updateProgress(`Generating video ${i + 1} of ${payload.segments.length}...`, i, payload.segments.length);
554
+
555
+ // Generate video with automatic retry (retries once on failure)
556
+ updateProgress(`Processing video ${i + 1}... (this may take 1-2 minutes)`);
557
+ const videoUrl = await generateVideoWithRetry(async () => {
558
+ if (i === 0 || (i === startIndex && startIndex > 0)) {
559
+ // First segment OR resuming after failure: use generate API with current image
560
+ const generateResult = await klingGenerate({
561
+ prompt: segment,
562
+ imageUrls: [currentImageUrl],
563
+ model: 'veo3_fast',
564
+ aspectRatio: formState.aspectRatio,
565
+ generationType: 'FIRST_AND_LAST_FRAMES_2_VIDEO',
566
+ seeds: formState.seedValue,
567
+ voiceType: formState.voiceType,
568
+ });
569
+ currentTaskId = generateResult.taskId;
570
+ return generateResult;
571
+ } else {
572
+ // Subsequent segments: use extend API
573
+ const extendResult = await klingExtend(
574
+ currentTaskId!,
575
+ segment,
576
+ formState.seedValue,
577
+ formState.voiceType
578
+ );
579
+ currentTaskId = extendResult.taskId;
580
+ return extendResult;
581
+ }
582
+ }, 300000, (attempt) => {
583
+ updateProgress(`Retrying video ${i + 1}... (attempt ${attempt}/2)`);
584
+ });
585
+
586
+ // Download and save
587
+ updateProgress(`Downloading video ${i + 1}...`);
588
+ const videoBlob = await downloadVideo(videoUrl);
589
+ const blobUrl = URL.createObjectURL(videoBlob);
590
+
591
+ const videoFile = new File([videoBlob], `segment-${i + 1}.mp4`, { type: 'video/mp4' });
592
+ const duration = await getVideoDuration(videoFile);
593
+ const thumbnails = await generateThumbnails(videoFile);
594
+
595
+ addVideo({
596
+ id: `video-${Date.now()}-${i}`,
597
+ url: videoUrl,
598
+ blobUrl,
599
+ segment,
600
+ duration,
601
+ thumbnails,
602
+ });
603
+
604
+ updateProgress(`Completed video ${i + 1} of ${payload.segments.length}`, i + 1, payload.segments.length);
605
+ }
606
+
607
+ clearDraft(); // Clear draft on successful generation
608
+ setStep('completed');
609
+ updateProgress('All videos generated successfully!');
610
+
611
+ } catch (err) {
612
+ console.error('Generation error:', err);
613
+ const errorMessage = err instanceof Error ? err.message : 'Generation failed';
614
+
615
+ // Enable retry mode
616
+ setRetryState({
617
+ failedSegmentIndex: generatedVideos.length, // Current segment that failed
618
+ error: errorMessage
619
+ });
620
+ setStep('configuring'); // Go back to form, but with retry overlay
621
+
622
+ } finally {
623
+ setIsGenerating(false);
624
+ }
625
+ };
626
+
627
+ // ============================================
628
+ // REPLICATE GENERATION - FRAME CONTINUITY FLOW
629
+ // ============================================
630
+ // This mirrors the approach from standalone_video_creator.py:
631
+ // 1. Generate prompts using GPT-4o
632
+ // 2. For each segment, generate video with current reference image
633
+ // 3. Extract last frame from generated video
634
+ // 4. Use that frame as reference for next segment
635
+ // 5. Result: Perfect visual continuity across all segments
636
+
637
+ const handleReplicateGeneration = async () => {
638
+ if (!formState.script.trim()) return;
639
+
640
+ setIsGenerating(true);
641
+ setError(null);
642
+
643
+ try {
644
+ // Step 1: Generate prompts using GPT-4o
645
+ // Note: Replicate can work without an image, but for consistency we encourage one
646
+ updateProgress('Analyzing script with GPT-4o...');
647
+
648
+ const formData = new FormData();
649
+ formData.append('script', formState.script);
650
+ formData.append('style', formState.style || 'clean, lifestyle UGC');
651
+ formData.append('jsonFormat', 'standard');
652
+ formData.append('continuationMode', 'true');
653
+ formData.append('voiceType', formState.voiceType || '');
654
+ formData.append('energyLevel', formState.energyLevel || '');
655
+ formData.append('settingMode', 'single');
656
+ formData.append('cameraStyle', formState.cameraStyle || '');
657
+ formData.append('narrativeStyle', formState.narrativeStyle || '');
658
+
659
+ // If image provided, include it for GPT-4o analysis
660
+ if (imageFile) {
661
+ formData.append('image', imageFile);
662
+ } else {
663
+ // Create a placeholder image for GPT-4o (it needs one for analysis)
664
+ // In production, you might want to handle this differently
665
+ const placeholderBlob = new Blob(['placeholder'], { type: 'image/jpeg' });
666
+ formData.append('image', placeholderBlob, 'placeholder.jpg');
667
+ }
668
+
669
+ const payload = await generatePrompts(formData);
670
+
671
+ if (!payload?.segments?.length) {
672
+ throw new Error('No segments generated from script');
673
+ }
674
+
675
+ const segments = payload.segments;
676
+ updateProgress(`Generated ${segments.length} segments. Starting Replicate generation...`);
677
+ startGeneration(segments);
678
+
679
+ // Track current reference image (starts with original if provided)
680
+ let currentImageFile = imageFile;
681
+ const generatedVideos: GeneratedVideo[] = [];
682
+
683
+ // Step 2: Generate videos segment by segment with frame continuity
684
+ for (let i = 0; i < segments.length; i++) {
685
+ const segment = segments[i];
686
+ const isLastSegment = i === segments.length - 1;
687
+
688
+ updateProgress(
689
+ `Generating video ${i + 1} of ${segments.length} with Replicate...${i > 0 ? ' (using last frame)' : ''}`,
690
+ i,
691
+ segments.length
692
+ );
693
+
694
+ // Convert structured segment to text prompt for Replicate
695
+ // Replicate models typically expect text prompts
696
+ const textPrompt = convertSegmentToTextPrompt(segment);
697
+
698
+ console.log(`🎬 Segment ${i + 1} prompt:`, textPrompt.substring(0, 100) + '...');
699
+
700
+ // Upload current reference image if available
701
+ let imageUrl: string | undefined;
702
+ if (currentImageFile) {
703
+ updateProgress(`Uploading reference image for segment ${i + 1}...`);
704
+ const uploadResult = await uploadImage(currentImageFile);
705
+ imageUrl = uploadResult.url;
706
+ console.log(`🖼️ Segment ${i + 1} using image: ${i === 0 ? 'original' : 'last frame from previous'}`);
707
+ }
708
+
709
+ // Generate video with Replicate
710
+ updateProgress(`Submitting segment ${i + 1} to Replicate...`);
711
+ const generateResult = await replicateGenerate({
712
+ prompt: textPrompt,
713
+ imageUrl: imageUrl,
714
+ model: formState.model || 'google/veo-3',
715
+ aspectRatio: formState.aspectRatio,
716
+ });
717
+
718
+ // Wait for completion (polling)
719
+ updateProgress(`Processing video ${i + 1}... (this may take 2-5 minutes)`);
720
+ const videoUrl = await waitForReplicateVideo(generateResult.id);
721
+
722
+ // Download video
723
+ updateProgress(`Downloading video ${i + 1}...`);
724
+ const videoBlob = await downloadVideo(videoUrl);
725
+ const blobUrl = URL.createObjectURL(videoBlob);
726
+
727
+ // Get video duration and thumbnails
728
+ const videoFile = new File([videoBlob], `segment-${i + 1}.mp4`, { type: 'video/mp4' });
729
+ const duration = await getVideoDuration(videoFile);
730
+ const thumbnails = await generateThumbnails(videoFile);
731
+
732
+ // Use Whisper to find optimal trim point, extract frame, and get transcription
733
+ // This is more accurate than extracting the very last frame
734
+ let trimPoint = duration; // Default to full duration
735
+ let transcribedText = ''; // What Whisper actually heard
736
+
737
+ if (!isLastSegment) {
738
+ updateProgress(`Analyzing video ${i + 1} with Whisper for optimal continuity...`);
739
+ try {
740
+ // Get dialogue from segment for Whisper analysis
741
+ const dialogue = segment.action_timeline?.dialogue || textPrompt;
742
+
743
+ const whisperResult = await whisperAnalyzeAndExtract({
744
+ video_url: videoUrl,
745
+ dialogue: dialogue,
746
+ buffer_time: 0.3,
747
+ model_size: 'base'
748
+ });
749
+
750
+ if (whisperResult.success && whisperResult.frame_base64) {
751
+ // Convert base64 frame to File for next segment
752
+ const base64Data = whisperResult.frame_base64.split(',')[1] || whisperResult.frame_base64;
753
+ const byteCharacters = atob(base64Data);
754
+ const byteNumbers = new Array(byteCharacters.length);
755
+ for (let j = 0; j < byteCharacters.length; j++) {
756
+ byteNumbers[j] = byteCharacters.charCodeAt(j);
757
+ }
758
+ const byteArray = new Uint8Array(byteNumbers);
759
+ const frameBlob = new Blob([byteArray], { type: 'image/jpeg' });
760
+ currentImageFile = new File([frameBlob], `whisper-frame-${i + 1}.jpg`, { type: 'image/jpeg' });
761
+
762
+ // Store trim point for later merge
763
+ if (whisperResult.trim_point) {
764
+ trimPoint = whisperResult.trim_point;
765
+ }
766
+
767
+ // Store transcribed text for prompt refinement
768
+ if (whisperResult.transcribed_text) {
769
+ transcribedText = whisperResult.transcribed_text;
770
+ console.log(`📝 Whisper transcription: "${transcribedText.substring(0, 100)}..."`);
771
+ }
772
+
773
+ console.log(`✅ Whisper: Last word at ${whisperResult.last_word_timestamp?.toFixed(2)}s, frame at ${whisperResult.frame_timestamp?.toFixed(2)}s, trim at ${trimPoint.toFixed(2)}s`);
774
+
775
+ // REFINE NEXT SEGMENT PROMPT with frame + transcription
776
+ const nextSegment = segments[i + 1];
777
+ if (nextSegment && currentImageFile) {
778
+ updateProgress(`Refining segment ${i + 2} prompt with visual and audio context...`);
779
+ try {
780
+ const { refinePromptWithContext } = await import('@/utils/api');
781
+ const refined = await refinePromptWithContext(
782
+ nextSegment,
783
+ currentImageFile,
784
+ transcribedText,
785
+ dialogue
786
+ );
787
+ // Update the next segment with refined prompt
788
+ segments[i + 1] = refined.refined_prompt as typeof nextSegment;
789
+ console.log(`✅ Refined segment ${i + 2} prompt for consistency`);
790
+ } catch (refineError) {
791
+ console.warn(`⚠️ Prompt refinement failed, using original:`, refineError);
792
+ }
793
+ }
794
+ } else {
795
+ // Fallback to simple last frame extraction
796
+ console.log(`⚠️ Whisper failed (${whisperResult.error}), falling back to last frame extraction`);
797
+ const lastFrameFile = await extractLastFrame(videoBlob);
798
+ currentImageFile = lastFrameFile;
799
+ }
800
+ } catch (frameError) {
801
+ console.error(`⚠️ Whisper analysis failed, using fallback:`, frameError);
802
+ try {
803
+ const lastFrameFile = await extractLastFrame(videoBlob);
804
+ currentImageFile = lastFrameFile;
805
+ } catch {
806
+ // Continue with current image if all extraction fails
807
+ }
808
+ }
809
+ }
810
+
811
+ // Add to generated videos with trim metadata
812
+ const generatedVideo: GeneratedVideo = {
813
+ id: `video-${Date.now()}-${i}`,
814
+ url: videoUrl,
815
+ blobUrl,
816
+ segment,
817
+ duration,
818
+ thumbnails,
819
+ trimPoint, // Store trim point for merge
820
+ };
821
+ generatedVideos.push(generatedVideo);
822
+ addVideo(generatedVideo);
823
+
824
+ updateProgress(`Completed video ${i + 1} of ${segments.length}`, i + 1, segments.length);
825
+ }
826
+
827
+ // All done!
828
+ setStep('completed');
829
+ updateProgress('All videos generated successfully with Replicate!');
830
+
831
+ } catch (err) {
832
+ console.error('Replicate generation error:', err);
833
+ const errorMessage = err instanceof Error ? err.message : 'Replicate generation failed';
834
+
835
+ // Enable retry mode
836
+ setRetryState({
837
+ failedSegmentIndex: generatedVideos.length, // Current segment that failed
838
+ error: errorMessage
839
+ });
840
+ setStep('configuring'); // Go back to form, but with retry overlay
841
+
842
+ } finally {
843
+ setIsGenerating(false);
844
+ }
845
+ };
846
+
847
+ // Helper: Convert structured segment JSON to text prompt for Replicate
848
+ // Replicate models typically expect plain text, not structured JSON
849
+ const convertSegmentToTextPrompt = (segment: VeoSegment): string => {
850
+ const parts: string[] = [];
851
+
852
+ // Extract dialogue
853
+ const dialogue = segment.action_timeline?.dialogue;
854
+ if (dialogue) {
855
+ parts.push(`"${dialogue}"`);
856
+ }
857
+
858
+ // Extract character description
859
+ const character = segment.character_description;
860
+ if (character?.current_state) {
861
+ parts.push(`Character: ${character.current_state}`);
862
+ }
863
+
864
+ // Extract scene description
865
+ const scene = segment.scene_continuity;
866
+ if (scene?.environment) {
867
+ parts.push(`Scene: ${scene.environment}`);
868
+ }
869
+ if (scene?.lighting_state) {
870
+ parts.push(`Lighting: ${scene.lighting_state}`);
871
+ }
872
+ if (scene?.camera_position) {
873
+ parts.push(`Camera: ${scene.camera_position}`);
874
+ }
875
+ if (scene?.camera_movement) {
876
+ parts.push(`Movement: ${scene.camera_movement}`);
877
+ }
878
+
879
+ // Extract synchronized actions
880
+ const syncedActions = segment.action_timeline?.synchronized_actions;
881
+ if (syncedActions) {
882
+ const actionsList = Object.entries(syncedActions)
883
+ .filter(([, value]) => value)
884
+ .map(([key, value]) => `${key}: ${value}`)
885
+ .join('; ');
886
+ if (actionsList) {
887
+ parts.push(`Actions: ${actionsList}`);
888
+ }
889
+ }
890
+
891
+ // Add instruction to not include captions/subtitles
892
+ parts.push('Do not include any captions, subtitles, or text overlays in the video');
893
+
894
+ return parts.join('. ');
895
+ };
896
+
897
+ // Main submit handler
898
+ const handleSubmit = (e: React.FormEvent) => {
899
+ e.preventDefault();
900
+
901
+ if (provider === 'kling') {
902
+ if (generationMode === 'frame-continuity') {
903
+ handleKlingFrameContinuityFlow();
904
+ } else {
905
+ handleKlingExtendFlow();
906
+ }
907
+ } else {
908
+ handleReplicateGeneration();
909
+ }
910
+ };
911
+
912
+ const isValid = provider === 'kling'
913
+ ? !!imageFile && formState.script.trim().length > 0
914
+ : formState.script.trim().length > 0;
915
+
916
+ return (
917
+ <motion.div
918
+ initial={{ opacity: 0, x: 20 }}
919
+ animate={{ opacity: 1, x: 0 }}
920
+ exit={{ opacity: 0, x: -20 }}
921
+ className="max-w-6xl mx-auto p-8"
922
+ >
923
+ {/* Header */}
924
+ <div className="flex items-center justify-between mb-8">
925
+ <div>
926
+ <button
927
+ onClick={onBack}
928
+ className="flex items-center gap-2 text-void-400 hover:text-void-200 transition-colors mb-4"
929
+ >
930
+ <ArrowLeftIcon size={20} />
931
+ <span>Back to providers</span>
932
+ </button>
933
+ <h1 className="text-3xl font-display font-bold">
934
+ <span className={provider === 'kling' ? 'gradient-text' : 'gradient-text-electric'}>
935
+ {provider === 'kling' ? 'KIE API' : 'Replicate'}
936
+ </span>
937
+ <span className="text-void-200"> Video Generation</span>
938
+ </h1>
939
+ <p className="text-void-400 mt-2">
940
+ {provider === 'kling'
941
+ ? 'Generate professional UGC videos with AI-powered segmentation'
942
+ : 'Create unique videos with open-source models'
943
+ }
944
+ </p>
945
+ </div>
946
+ </div>
947
+
948
+ {/* Retry Modal */}
949
+ {retryState && (
950
+ <div className="fixed inset-0 z-50 flex items-center justify-center bg-black/80 backdrop-blur-sm p-4">
951
+ <motion.div
952
+ initial={{ opacity: 0, scale: 0.95 }}
953
+ animate={{ opacity: 1, scale: 1 }}
954
+ className="bg-void-900 border border-void-700 rounded-2xl p-6 max-w-2xl w-full shadow-2xl overflow-y-auto max-h-[90vh]"
955
+ >
956
+ <div className="flex items-center gap-3 mb-4 text-red-400">
957
+ <svg className="w-6 h-6" fill="none" stroke="currentColor" viewBox="0 0 24 24">
958
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 9v2m0 4h.01m-6.938 4h13.856c1.54 0 2.502-1.667 1.732-3L13.732 4c-.77-1.333-2.694-1.333-3.464 0L3.34 16c-.77 1.333.192 3 1.732 3z" />
959
+ </svg>
960
+ <h3 className="text-xl font-bold">Generation Failed</h3>
961
+ </div>
962
+
963
+ <p className="text-void-300 mb-6 p-4 bg-void-800 rounded-lg border border-void-700">
964
+ Error at segment {retryState.failedSegmentIndex + 1}: <span className="text-red-300">{retryState.error}</span>
965
+ </p>
966
+
967
+ <div className="space-y-4 mb-8">
968
+ <h4 className="font-semibold text-void-200">Edit Segment {retryState.failedSegmentIndex + 1} to fix the issue:</h4>
969
+
970
+ <div>
971
+ <label className="block text-sm font-medium text-void-400 mb-1">Dialogue</label>
972
+ <textarea
973
+ value={retryDialogue}
974
+ onChange={(e) => setRetryDialogue(e.target.value)}
975
+ className="w-full bg-void-950 border border-void-700 rounded-lg p-3 text-void-100 h-24 focus:border-coral-500 focus:outline-none"
976
+ placeholder="Adjust dialogue..."
977
+ />
978
+ </div>
979
+
980
+ <div>
981
+ <label className="block text-sm font-medium text-void-400 mb-1">Action / Character State</label>
982
+ <textarea
983
+ value={retryAction}
984
+ onChange={(e) => setRetryAction(e.target.value)}
985
+ className="w-full bg-void-950 border border-void-700 rounded-lg p-3 text-void-100 h-24 focus:border-coral-500 focus:outline-none"
986
+ placeholder="Adjust action description..."
987
+ />
988
+ </div>
989
+
990
+ <div>
991
+ <label className="block text-sm font-medium text-void-400 mb-1">Environment</label>
992
+ <textarea
993
+ value={retryEnvironment}
994
+ onChange={(e) => setRetryEnvironment(e.target.value)}
995
+ className="w-full bg-void-950 border border-void-700 rounded-lg p-3 text-void-100 h-24 focus:border-coral-500 focus:outline-none"
996
+ placeholder="Adjust environment description..."
997
+ />
998
+ </div>
999
+ </div>
1000
+
1001
+ <div className="flex justify-end gap-3">
1002
+ <button
1003
+ onClick={handleCancelRetry}
1004
+ className="px-4 py-2 rounded-lg text-void-300 hover:text-white hover:bg-void-800 transition-colors"
1005
+ >
1006
+ Cancel
1007
+ </button>
1008
+ <button
1009
+ onClick={handleRetrySubmit}
1010
+ className="px-6 py-2 bg-gradient-to-r from-coral-500 to-coral-600 text-white font-semibold rounded-lg hover:from-coral-400 hover:to-coral-500 shadow-lg shadow-coral-500/20"
1011
+ >
1012
+ Retry Generation
1013
+ </button>
1014
+ </div>
1015
+ </motion.div>
1016
+ </div>
1017
+ )}
1018
+
1019
+ {/* Draft Restored Notification */}
1020
+ {draftRestored && (
1021
+ <motion.div
1022
+ initial={{ opacity: 0, y: -20 }}
1023
+ animate={{ opacity: 1, y: 0 }}
1024
+ exit={{ opacity: 0, y: -20 }}
1025
+ className="mb-6 p-4 bg-void-800/80 border border-void-600 rounded-xl flex items-center justify-between"
1026
+ >
1027
+ <div className="flex items-center gap-3">
1028
+ <div className="w-2 h-2 rounded-full bg-green-500 animate-pulse" />
1029
+ <div>
1030
+ <p className="text-sm font-medium text-void-200">Draft restored</p>
1031
+ <p className="text-xs text-void-400">Your previous inputs have been loaded</p>
1032
+ </div>
1033
+ </div>
1034
+ <button
1035
+ onClick={() => setDraftRestored(false)}
1036
+ className="text-void-400 hover:text-void-200 transition-colors"
1037
+ >
1038
+ <svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
1039
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
1040
+ </svg>
1041
+ </button>
1042
+ </motion.div>
1043
+ )}
1044
+
1045
+ <form onSubmit={handleSubmit}>
1046
+ <div className="grid grid-cols-1 lg:grid-cols-2 gap-8">
1047
+ {/* Left Column - Script & Style */}
1048
+ <div className="space-y-6">
1049
+ {/* Script Input */}
1050
+ <div className="card">
1051
+ <label className="block text-sm font-semibold text-void-200 mb-3">
1052
+ Script <span className="text-coral-400">*</span>
1053
+ </label>
1054
+ <textarea
1055
+ name="script"
1056
+ value={formState.script}
1057
+ onChange={handleChange}
1058
+ rows={10}
1059
+ className="textarea-field font-mono text-sm"
1060
+ placeholder="Enter your complete video script here...
1061
+
1062
+ The AI will automatically analyze and segment your script into optimal video chunks, typically 8 seconds each."
1063
+ required
1064
+ />
1065
+ <div className="flex items-center justify-between mt-3">
1066
+ <p className="text-xs text-void-500">
1067
+ AI will automatically segment your script
1068
+ </p>
1069
+ {wordCount > 0 && (
1070
+ <div className="flex items-center gap-4 text-xs">
1071
+ <span className="text-void-400">{wordCount} words</span>
1072
+ <span className={`font-semibold ${provider === 'kling' ? 'text-coral-400' : 'text-electric-400'}`}>
1073
+ ~{estimatedSegments} segments
1074
+ </span>
1075
+ </div>
1076
+ )}
1077
+ </div>
1078
+ </div>
1079
+
1080
+ {/* Style Input */}
1081
+ <div className="card">
1082
+ <label className="block text-sm font-semibold text-void-200 mb-3">
1083
+ Visual Style
1084
+ </label>
1085
+ <textarea
1086
+ name="style"
1087
+ value={formState.style}
1088
+ onChange={handleChange}
1089
+ rows={3}
1090
+ className="textarea-field"
1091
+ placeholder="e.g., Cinematic, hyper-realistic, natural lighting, modern aesthetic, warm tones..."
1092
+ />
1093
+ </div>
1094
+
1095
+ {/* Generation Mode Selection (Kling only) */}
1096
+ {provider === 'kling' && (
1097
+ <div className="card border-2 border-coral-500/30 bg-coral-500/5">
1098
+ <h4 className="text-sm font-semibold text-void-200 mb-4">
1099
+ Generation Mode
1100
+ </h4>
1101
+ <div className="space-y-3">
1102
+ <label className="flex items-start gap-3 cursor-pointer group">
1103
+ <input
1104
+ type="radio"
1105
+ name="generationMode"
1106
+ value="frame-continuity"
1107
+ checked={generationMode === 'frame-continuity'}
1108
+ onChange={() => setGenerationMode('frame-continuity')}
1109
+ className="mt-1 w-4 h-4 accent-coral-500"
1110
+ />
1111
+ <div>
1112
+ <span className="text-sm font-medium text-void-100 group-hover:text-coral-400 transition-colors">
1113
+ 🎯 Frame Continuity (Recommended)
1114
+ </span>
1115
+ <p className="text-xs text-void-400 mt-0.5">
1116
+ Extract last frame from each video → Use as reference for next segment.
1117
+ <br />
1118
+ <span className="text-coral-400">Best for visual consistency across segments.</span>
1119
+ </p>
1120
+ </div>
1121
+ </label>
1122
+
1123
+ <label className="flex items-start gap-3 cursor-pointer group">
1124
+ <input
1125
+ type="radio"
1126
+ name="generationMode"
1127
+ value="extend"
1128
+ checked={generationMode === 'extend'}
1129
+ onChange={() => setGenerationMode('extend')}
1130
+ className="mt-1 w-4 h-4 accent-coral-500"
1131
+ />
1132
+ <div>
1133
+ <span className="text-sm font-medium text-void-100 group-hover:text-coral-400 transition-colors">
1134
+ ➕ Extend API
1135
+ </span>
1136
+ <p className="text-xs text-void-400 mt-0.5">
1137
+ Use KIE's native extend API for video continuation.
1138
+ <br />
1139
+ <span className="text-void-500">Faster but may have less visual consistency.</span>
1140
+ </p>
1141
+ </div>
1142
+ </label>
1143
+ </div>
1144
+ </div>
1145
+ )}
1146
+
1147
+ {/* Generation Preview */}
1148
+ {estimatedSegments > 0 && (
1149
+ <motion.div
1150
+ initial={{ opacity: 0, scale: 0.95 }}
1151
+ animate={{ opacity: 1, scale: 1 }}
1152
+ className={`card border-2 ${provider === 'kling' ? 'border-coral-500/30 bg-coral-500/5' : 'border-electric-500/30 bg-electric-500/5'}`}
1153
+ >
1154
+ <h4 className={`font-bold text-sm mb-3 ${provider === 'kling' ? 'text-coral-400' : 'text-electric-400'}`}>
1155
+ AI Analysis Preview
1156
+ </h4>
1157
+ <div className="grid grid-cols-2 gap-3 text-sm">
1158
+ <div className="flex justify-between">
1159
+ <span className="text-void-400">Words:</span>
1160
+ <span className="text-void-200 font-medium">{wordCount}</span>
1161
+ </div>
1162
+ <div className="flex justify-between">
1163
+ <span className="text-void-400">Segments:</span>
1164
+ <span className="text-void-200 font-medium">~{estimatedSegments}</span>
1165
+ </div>
1166
+ <div className="flex justify-between">
1167
+ <span className="text-void-400">Duration:</span>
1168
+ <span className="text-void-200 font-medium">~{estimatedSegments * 8}s</span>
1169
+ </div>
1170
+ <div className="flex justify-between">
1171
+ <span className="text-void-400">Mode:</span>
1172
+ <span className="text-void-200 font-medium">
1173
+ {generationMode === 'frame-continuity' ? 'Frame' : 'Extend'}
1174
+ </span>
1175
+ </div>
1176
+ </div>
1177
+ </motion.div>
1178
+ )}
1179
+ </div>
1180
+
1181
+ {/* Right Column - Image & Settings */}
1182
+ <div className="space-y-6">
1183
+ {/* Image Upload - Required for Kling, Optional for Replicate */}
1184
+ <div className="card">
1185
+ <label className="block text-sm font-semibold text-void-200 mb-3">
1186
+ Character Image {provider === 'kling' && <span className="text-coral-400">*</span>}
1187
+ {provider === 'replicate' && <span className="text-void-500 text-xs ml-2">(optional, for visual continuity)</span>}
1188
+ </label>
1189
+ <div
1190
+ onDragOver={handleDragOver}
1191
+ onDragLeave={handleDragLeave}
1192
+ onDrop={handleDrop}
1193
+ className={`dropzone flex flex-col items-center justify-center text-center min-h-[200px] ${isDragging ? 'active' : ''}`}
1194
+ >
1195
+ {imagePreview ? (
1196
+ <div className="relative">
1197
+ <img
1198
+ src={imagePreview}
1199
+ alt="Preview"
1200
+ className="max-h-40 rounded-lg shadow-lg"
1201
+ />
1202
+ <button
1203
+ type="button"
1204
+ onClick={() => { setImageFile(null); setImagePreview(null); }}
1205
+ className={`absolute -top-2 -right-2 w-6 h-6 rounded-full flex items-center justify-center text-white text-xs transition-colors ${
1206
+ provider === 'kling' ? 'bg-coral-500 hover:bg-coral-600' : 'bg-electric-500 hover:bg-electric-600'
1207
+ }`}
1208
+ >
1209
+ ×
1210
+ </button>
1211
+ </div>
1212
+ ) : (
1213
+ <>
1214
+ <ImageIcon className="text-void-500 mb-4" size={48} />
1215
+ <p className="text-void-300 mb-2">Drag and drop your image here</p>
1216
+ <p className="text-void-500 text-sm mb-4">or</p>
1217
+ <label className={`cursor-pointer ${provider === 'kling' ? 'btn-secondary' : 'btn-secondary-electric'}`}>
1218
+ <span>Browse Files</span>
1219
+ <input
1220
+ type="file"
1221
+ accept="image/*"
1222
+ className="hidden"
1223
+ onChange={(e) => e.target.files?.[0] && handleImageUpload(e.target.files[0])}
1224
+ />
1225
+ </label>
1226
+ </>
1227
+ )}
1228
+ </div>
1229
+ <p className="text-xs text-void-500 mt-2">
1230
+ {provider === 'kling'
1231
+ ? 'PNG, JPG up to 10MB. This image will be used as your character reference.'
1232
+ : 'PNG, JPG up to 10MB. Optional: Provides visual continuity across segments.'
1233
+ }
1234
+ </p>
1235
+ </div>
1236
+
1237
+ {/* Settings Grid */}
1238
+ <div className="card">
1239
+ <h4 className="text-sm font-semibold text-void-200 mb-4">Generation Settings</h4>
1240
+ <div className="grid grid-cols-2 gap-4">
1241
+ <div>
1242
+ <label className="block text-xs text-void-400 mb-1.5">Voice Type</label>
1243
+ <select
1244
+ name="voiceType"
1245
+ value={formState.voiceType}
1246
+ onChange={handleChange}
1247
+ className="select-field"
1248
+ >
1249
+ {voiceTypes.map(v => <option key={v} value={v}>{v}</option>)}
1250
+ </select>
1251
+ </div>
1252
+ <div>
1253
+ <label className="block text-xs text-void-400 mb-1.5">Energy Level</label>
1254
+ <select
1255
+ name="energyLevel"
1256
+ value={formState.energyLevel}
1257
+ onChange={handleChange}
1258
+ className="select-field"
1259
+ >
1260
+ {energyLevels.map(e => <option key={e} value={e}>{e}</option>)}
1261
+ </select>
1262
+ </div>
1263
+ <div>
1264
+ <label className="block text-xs text-void-400 mb-1.5">Camera Style</label>
1265
+ <select
1266
+ name="cameraStyle"
1267
+ value={formState.cameraStyle}
1268
+ onChange={handleChange}
1269
+ className="select-field"
1270
+ >
1271
+ {cameraStyles.map(c => <option key={c} value={c}>{c}</option>)}
1272
+ </select>
1273
+ </div>
1274
+ <div>
1275
+ <label className="block text-xs text-void-400 mb-1.5">Narrative</label>
1276
+ <select
1277
+ name="narrativeStyle"
1278
+ value={formState.narrativeStyle}
1279
+ onChange={handleChange}
1280
+ className="select-field"
1281
+ >
1282
+ {narrativeStyles.map(n => <option key={n} value={n}>{n}</option>)}
1283
+ </select>
1284
+ </div>
1285
+ <div>
1286
+ <label className="block text-xs text-void-400 mb-1.5">Aspect Ratio</label>
1287
+ <select
1288
+ name="aspectRatio"
1289
+ value={formState.aspectRatio}
1290
+ onChange={handleChange}
1291
+ className="select-field"
1292
+ >
1293
+ {aspectRatios.map(a => <option key={a} value={a}>{a}</option>)}
1294
+ </select>
1295
+ </div>
1296
+ {provider === 'kling' ? (
1297
+ <div>
1298
+ <label className="block text-xs text-void-400 mb-1.5">Seed Value</label>
1299
+ <input
1300
+ type="number"
1301
+ name="seedValue"
1302
+ value={formState.seedValue}
1303
+ onChange={handleChange}
1304
+ className="input-field"
1305
+ placeholder="12005"
1306
+ />
1307
+ </div>
1308
+ ) : (
1309
+ <div>
1310
+ <label className="block text-xs text-void-400 mb-1.5">Model</label>
1311
+ <select
1312
+ name="model"
1313
+ value={formState.model}
1314
+ onChange={handleChange}
1315
+ className="select-field"
1316
+ >
1317
+ <option value="google/veo-3">Google Veo 3</option>
1318
+ </select>
1319
+ </div>
1320
+ )}
1321
+ </div>
1322
+ <p className="text-xs text-void-500 mt-3">
1323
+ {provider === 'kling'
1324
+ ? 'Seed 12005 = Warm, flattering lighting. Use same seed for consistency.'
1325
+ : 'Google Veo 3 recommended for best quality and consistency.'
1326
+ }
1327
+ </p>
1328
+ </div>
1329
+
1330
+ {/* Submit Button */}
1331
+ <button
1332
+ type="submit"
1333
+ disabled={!isValid || isGenerating}
1334
+ className={`
1335
+ w-full py-4 font-semibold rounded-xl transition-all duration-300
1336
+ flex items-center justify-center gap-3
1337
+ ${provider === 'kling' ? 'btn-primary' : 'btn-electric'}
1338
+ disabled:opacity-50 disabled:cursor-not-allowed disabled:scale-100
1339
+ `}
1340
+ >
1341
+ {isGenerating ? (
1342
+ <>
1343
+ <div className="w-5 h-5 border-2 border-white/30 border-t-white rounded-full animate-spin" />
1344
+ <span>Generating...</span>
1345
+ </>
1346
+ ) : (
1347
+ <>
1348
+ <SparklesIcon size={20} />
1349
+ <span>
1350
+ Generate Video
1351
+ {provider === 'kling' && generationMode === 'frame-continuity' && ' (Frame Continuity)'}
1352
+ {provider === 'kling' && generationMode === 'extend' && ' (Extend API)'}
1353
+ </span>
1354
+ </>
1355
+ )}
1356
+ </button>
1357
+ </div>
1358
+ </div>
1359
+ </form>
1360
+ </motion.div>
1361
+ );
1362
+ };
frontend/src/components/GenerationProgress.tsx ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useState, useEffect, useRef } from 'react';
2
+ import { motion, AnimatePresence } from 'framer-motion';
3
+ import { useGeneration } from '@/context/GenerationContext';
4
+
5
+ // Icons
6
+ const CheckIcon = () => (
7
+ <svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
8
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M5 13l4 4L19 7" />
9
+ </svg>
10
+ );
11
+
12
+ const SpinnerIcon = () => (
13
+ <svg className="w-4 h-4 animate-spin" fill="none" viewBox="0 0 24 24">
14
+ <circle className="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" strokeWidth="4" />
15
+ <path className="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z" />
16
+ </svg>
17
+ );
18
+
19
+ const ClockIcon = () => (
20
+ <svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
21
+ <circle cx="12" cy="12" r="10" strokeWidth={2} />
22
+ <path strokeLinecap="round" strokeWidth={2} d="M12 6v6l4 2" />
23
+ </svg>
24
+ );
25
+
26
+ const VideoIcon = () => (
27
+ <svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
28
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M15 10l4.553-2.276A1 1 0 0121 8.618v6.764a1 1 0 01-1.447.894L15 14M5 18h8a2 2 0 002-2V8a2 2 0 00-2-2H5a2 2 0 00-2 2v8a2 2 0 002 2z" />
29
+ </svg>
30
+ );
31
+
32
+ const WaveformIcon = () => (
33
+ <svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
34
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M9 19V6l12-3v13M9 19c0 1.105-1.343 2-3 2s-3-.895-3-2 1.343-2 3-2 3 .895 3 2zm12-3c0 1.105-1.343 2-3 2s-3-.895-3-2 1.343-2 3-2 3 .895 3 2zM9 10l12-3" />
35
+ </svg>
36
+ );
37
+
38
+ const BrainIcon = () => (
39
+ <svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
40
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M9.663 17h4.673M12 3v1m6.364 1.636l-.707.707M21 12h-1M4 12H3m3.343-5.657l-.707-.707m2.828 9.9a5 5 0 117.072 0l-.548.547A3.374 3.374 0 0014 18.469V19a2 2 0 11-4 0v-.531c0-.895-.356-1.754-.988-2.386l-.548-.547z" />
41
+ </svg>
42
+ );
43
+
44
+ const DownloadIcon = () => (
45
+ <svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
46
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M4 16v1a3 3 0 003 3h10a3 3 0 003-3v-1m-4-4l-4 4m0 0l-4-4m4 4V4" />
47
+ </svg>
48
+ );
49
+
50
+ const ImageIcon = () => (
51
+ <svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
52
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M4 16l4.586-4.586a2 2 0 012.828 0L16 16m-2-2l1.586-1.586a2 2 0 012.828 0L20 14m-6-6h.01M6 20h12a2 2 0 002-2V6a2 2 0 00-2-2H6a2 2 0 00-2 2v12a2 2 0 002 2z" />
53
+ </svg>
54
+ );
55
+
56
+ interface ActivityLog {
57
+ id: string;
58
+ message: string;
59
+ timestamp: Date;
60
+ type: 'info' | 'success' | 'warning' | 'processing';
61
+ icon?: 'video' | 'audio' | 'brain' | 'download' | 'image';
62
+ }
63
+
64
+ export const GenerationProgress: React.FC = () => {
65
+ const { state } = useGeneration();
66
+ const { progress, provider, generatedVideos, segments } = state;
67
+
68
+ const [elapsedTime, setElapsedTime] = useState(0);
69
+ const [activityLog, setActivityLog] = useState<ActivityLog[]>([]);
70
+ const [startTime] = useState(() => Date.now());
71
+ const lastMessageRef = useRef<string>('');
72
+ const logContainerRef = useRef<HTMLDivElement>(null);
73
+
74
+ const percentage = progress.total > 0
75
+ ? Math.round((progress.current / progress.total) * 100)
76
+ : 0;
77
+
78
+ const accentColor = provider === 'kling' ? 'coral' : 'electric';
79
+ const accentClass = accentColor === 'coral' ? 'text-coral-400' : 'text-electric-400';
80
+ const accentBg = accentColor === 'coral' ? 'bg-coral-500' : 'bg-electric-500';
81
+ const accentBorder = accentColor === 'coral' ? 'border-coral-500' : 'border-electric-500';
82
+
83
+ // Update elapsed time
84
+ useEffect(() => {
85
+ const interval = setInterval(() => {
86
+ setElapsedTime(Math.floor((Date.now() - startTime) / 1000));
87
+ }, 1000);
88
+ return () => clearInterval(interval);
89
+ }, [startTime]);
90
+
91
+ // Track progress messages and add to activity log
92
+ useEffect(() => {
93
+ if (progress.message && progress.message !== lastMessageRef.current) {
94
+ lastMessageRef.current = progress.message;
95
+
96
+ // Determine log type and icon based on message content
97
+ let type: ActivityLog['type'] = 'info';
98
+ let icon: ActivityLog['icon'] = undefined;
99
+
100
+ const msg = progress.message.toLowerCase();
101
+
102
+ if (msg.includes('complete') || msg.includes('success') || msg.includes('✅')) {
103
+ type = 'success';
104
+ } else if (msg.includes('warning') || msg.includes('⚠️') || msg.includes('fallback')) {
105
+ type = 'warning';
106
+ } else if (msg.includes('generating') || msg.includes('processing') || msg.includes('submitting')) {
107
+ type = 'processing';
108
+ icon = 'video';
109
+ }
110
+
111
+ if (msg.includes('whisper') || msg.includes('audio') || msg.includes('transcri')) {
112
+ icon = 'audio';
113
+ } else if (msg.includes('prompt') || msg.includes('refin') || msg.includes('gpt')) {
114
+ icon = 'brain';
115
+ } else if (msg.includes('download')) {
116
+ icon = 'download';
117
+ } else if (msg.includes('image') || msg.includes('frame') || msg.includes('upload')) {
118
+ icon = 'image';
119
+ } else if (msg.includes('video') || msg.includes('segment')) {
120
+ icon = 'video';
121
+ }
122
+
123
+ const newLog: ActivityLog = {
124
+ id: `${Date.now()}-${Math.random()}`,
125
+ message: progress.message,
126
+ timestamp: new Date(),
127
+ type,
128
+ icon,
129
+ };
130
+
131
+ setActivityLog(prev => [...prev.slice(-20), newLog]); // Keep last 20 entries
132
+ }
133
+ }, [progress.message]);
134
+
135
+ // Auto-scroll activity log
136
+ useEffect(() => {
137
+ if (logContainerRef.current) {
138
+ logContainerRef.current.scrollTop = logContainerRef.current.scrollHeight;
139
+ }
140
+ }, [activityLog]);
141
+
142
+ // Format time
143
+ const formatTime = (seconds: number) => {
144
+ const mins = Math.floor(seconds / 60);
145
+ const secs = seconds % 60;
146
+ return `${mins}:${secs.toString().padStart(2, '0')}`;
147
+ };
148
+
149
+ // Estimate remaining time
150
+ const estimatedTotal = progress.current > 0
151
+ ? Math.round((elapsedTime / progress.current) * progress.total)
152
+ : 0;
153
+ const remainingTime = Math.max(0, estimatedTotal - elapsedTime);
154
+
155
+ // Get icon component
156
+ const getIcon = (iconType?: ActivityLog['icon']) => {
157
+ switch (iconType) {
158
+ case 'video': return <VideoIcon />;
159
+ case 'audio': return <WaveformIcon />;
160
+ case 'brain': return <BrainIcon />;
161
+ case 'download': return <DownloadIcon />;
162
+ case 'image': return <ImageIcon />;
163
+ default: return <SpinnerIcon />;
164
+ }
165
+ };
166
+
167
+ return (
168
+ <motion.div
169
+ initial={{ opacity: 0 }}
170
+ animate={{ opacity: 1 }}
171
+ className="min-h-[60vh] flex flex-col items-center justify-center p-4 md:p-8"
172
+ >
173
+ <div className="max-w-4xl w-full">
174
+ {/* Header with Time */}
175
+ <div className="flex items-center justify-between mb-8">
176
+ <div>
177
+ <h2 className="text-2xl font-bold text-void-100">Generating Videos</h2>
178
+ <p className="text-void-400 text-sm mt-1">
179
+ {provider === 'kling' ? 'Kling AI' : 'Replicate'} • {segments.length} segments
180
+ </p>
181
+ </div>
182
+ <div className="flex items-center gap-6 text-sm">
183
+ <div className="flex items-center gap-2 text-void-400">
184
+ <ClockIcon />
185
+ <span>Elapsed: <span className="text-void-200 font-mono">{formatTime(elapsedTime)}</span></span>
186
+ </div>
187
+ {progress.current > 0 && remainingTime > 0 && (
188
+ <div className="flex items-center gap-2 text-void-400">
189
+ <span>Est. remaining: <span className={`font-mono ${accentClass}`}>{formatTime(remainingTime)}</span></span>
190
+ </div>
191
+ )}
192
+ </div>
193
+ </div>
194
+
195
+ {/* Main Progress Area */}
196
+ <div className="grid grid-cols-1 lg:grid-cols-3 gap-6 mb-8">
197
+
198
+ {/* Left: Circular Progress */}
199
+ <div className="lg:col-span-1 flex flex-col items-center justify-center">
200
+ <div className="relative w-48 h-48">
201
+ {/* Background circle */}
202
+ <svg className="w-full h-full -rotate-90">
203
+ <circle
204
+ cx="96"
205
+ cy="96"
206
+ r="88"
207
+ fill="none"
208
+ stroke="currentColor"
209
+ strokeWidth="8"
210
+ className="text-void-800"
211
+ />
212
+ <motion.circle
213
+ cx="96"
214
+ cy="96"
215
+ r="88"
216
+ fill="none"
217
+ stroke={accentColor === 'coral' ? '#ff6b6b' : '#22b8cf'}
218
+ strokeWidth="8"
219
+ strokeLinecap="round"
220
+ initial={{ strokeDasharray: '0 553' }}
221
+ animate={{ strokeDasharray: `${(percentage / 100) * 553} 553` }}
222
+ transition={{ duration: 0.5, ease: 'easeOut' }}
223
+ />
224
+ </svg>
225
+
226
+ {/* Center content */}
227
+ <div className="absolute inset-0 flex flex-col items-center justify-center">
228
+ <span className={`text-5xl font-bold ${accentClass}`}>
229
+ {percentage}%
230
+ </span>
231
+ <span className="text-void-400 text-sm mt-1">
232
+ {generatedVideos.length} / {progress.total} videos
233
+ </span>
234
+ </div>
235
+ </div>
236
+
237
+ {/* Current Status */}
238
+ <motion.div
239
+ key={progress.message}
240
+ initial={{ opacity: 0, y: 5 }}
241
+ animate={{ opacity: 1, y: 0 }}
242
+ className="mt-6 text-center"
243
+ >
244
+ <p className="text-void-200 font-medium">{progress.message}</p>
245
+ </motion.div>
246
+ </div>
247
+
248
+ {/* Right: Segment Progress & Activity Log */}
249
+ <div className="lg:col-span-2 space-y-6">
250
+
251
+ {/* Segment Cards */}
252
+ <div className="card p-4">
253
+ <h3 className="text-sm font-semibold text-void-400 mb-4">Segment Progress</h3>
254
+ <div className="grid grid-cols-2 sm:grid-cols-3 md:grid-cols-4 gap-3">
255
+ {segments.map((segment, index) => {
256
+ const isCompleted = index < generatedVideos.length;
257
+ const isCurrent = index === generatedVideos.length && index < progress.total;
258
+
259
+ return (
260
+ <motion.div
261
+ key={index}
262
+ initial={{ opacity: 0, scale: 0.9 }}
263
+ animate={{ opacity: 1, scale: 1 }}
264
+ transition={{ delay: index * 0.05 }}
265
+ className={`
266
+ relative p-3 rounded-lg border-2 transition-all
267
+ ${isCompleted
268
+ ? `${accentBorder} bg-void-900`
269
+ : isCurrent
270
+ ? `${accentBorder} bg-void-800 animate-pulse-slow`
271
+ : 'border-void-700 bg-void-900/50'
272
+ }
273
+ `}
274
+ >
275
+ {/* Status indicator */}
276
+ <div className={`
277
+ absolute -top-1.5 -right-1.5 w-5 h-5 rounded-full flex items-center justify-center
278
+ ${isCompleted
279
+ ? accentBg + ' text-white'
280
+ : isCurrent
281
+ ? accentBg + ' text-white'
282
+ : 'bg-void-700 text-void-500'
283
+ }
284
+ `}>
285
+ {isCompleted ? <CheckIcon /> : isCurrent ? <SpinnerIcon /> : <span className="text-xs">{index + 1}</span>}
286
+ </div>
287
+
288
+ <div className="text-sm font-medium text-void-200">
289
+ Segment {index + 1}
290
+ </div>
291
+ <div className="text-xs text-void-500 mt-1 line-clamp-2">
292
+ {segment.action_timeline?.dialogue?.substring(0, 40) || 'Processing...'}...
293
+ </div>
294
+
295
+ {/* Thumbnail preview when completed */}
296
+ {isCompleted && generatedVideos[index]?.thumbnails?.[0] && (
297
+ <div className="mt-2 rounded overflow-hidden aspect-video bg-void-950">
298
+ <img
299
+ src={generatedVideos[index].thumbnails[0]}
300
+ alt={`Segment ${index + 1}`}
301
+ className="w-full h-full object-cover"
302
+ />
303
+ </div>
304
+ )}
305
+ </motion.div>
306
+ );
307
+ })}
308
+ </div>
309
+ </div>
310
+
311
+ {/* Activity Log */}
312
+ <div className="card p-4">
313
+ <h3 className="text-sm font-semibold text-void-400 mb-3 flex items-center gap-2">
314
+ <span className="w-2 h-2 rounded-full bg-green-500 animate-pulse" />
315
+ Live Activity
316
+ </h3>
317
+ <div
318
+ ref={logContainerRef}
319
+ className="h-48 overflow-y-auto space-y-2 scrollbar-thin scrollbar-thumb-void-600 scrollbar-track-void-800"
320
+ >
321
+ <AnimatePresence initial={false}>
322
+ {activityLog.map((log) => (
323
+ <motion.div
324
+ key={log.id}
325
+ initial={{ opacity: 0, x: -20 }}
326
+ animate={{ opacity: 1, x: 0 }}
327
+ exit={{ opacity: 0 }}
328
+ className={`
329
+ flex items-start gap-3 p-2 rounded-lg text-sm
330
+ ${log.type === 'success' ? 'bg-green-500/10 text-green-400' :
331
+ log.type === 'warning' ? 'bg-amber-500/10 text-amber-400' :
332
+ log.type === 'processing' ? `${accentColor === 'coral' ? 'bg-coral-500/10 text-coral-400' : 'bg-electric-500/10 text-electric-400'}` :
333
+ 'bg-void-800/50 text-void-300'
334
+ }
335
+ `}
336
+ >
337
+ <div className={`
338
+ flex-shrink-0 w-6 h-6 rounded flex items-center justify-center
339
+ ${log.type === 'success' ? 'bg-green-500/20' :
340
+ log.type === 'warning' ? 'bg-amber-500/20' :
341
+ log.type === 'processing' ? `${accentColor === 'coral' ? 'bg-coral-500/20' : 'bg-electric-500/20'}` :
342
+ 'bg-void-700'
343
+ }
344
+ `}>
345
+ {log.type === 'success' ? <CheckIcon /> : getIcon(log.icon)}
346
+ </div>
347
+ <div className="flex-1 min-w-0">
348
+ <p className="truncate">{log.message}</p>
349
+ <p className="text-xs opacity-60 mt-0.5">
350
+ {log.timestamp.toLocaleTimeString()}
351
+ </p>
352
+ </div>
353
+ </motion.div>
354
+ ))}
355
+ </AnimatePresence>
356
+
357
+ {activityLog.length === 0 && (
358
+ <div className="text-void-500 text-center py-8">
359
+ Waiting for activity...
360
+ </div>
361
+ )}
362
+ </div>
363
+ </div>
364
+ </div>
365
+ </div>
366
+
367
+ {/* Bottom Progress Bar */}
368
+ <div className="card p-4">
369
+ <div className="flex items-center justify-between mb-2">
370
+ <span className="text-sm text-void-400">Overall Progress</span>
371
+ <span className={`text-sm font-medium ${accentClass}`}>{percentage}%</span>
372
+ </div>
373
+ <div className="w-full bg-void-800 rounded-full h-3 overflow-hidden">
374
+ <motion.div
375
+ className={`h-full ${accentBg} relative`}
376
+ initial={{ width: 0 }}
377
+ animate={{ width: `${percentage}%` }}
378
+ transition={{ duration: 0.5 }}
379
+ >
380
+ {/* Shimmer effect */}
381
+ <div className="absolute inset-0 bg-gradient-to-r from-transparent via-white/20 to-transparent animate-shimmer" />
382
+ </motion.div>
383
+ </div>
384
+
385
+ {/* Step indicators */}
386
+ <div className="flex justify-between mt-4 text-xs text-void-500">
387
+ <div className={generatedVideos.length > 0 ? accentClass : ''}>
388
+ {generatedVideos.length > 0 ? '✓' : '○'} Generating
389
+ </div>
390
+ <div className={generatedVideos.length >= segments.length / 2 ? accentClass : ''}>
391
+ {generatedVideos.length >= segments.length / 2 ? '✓' : '○'} Halfway
392
+ </div>
393
+ <div className={generatedVideos.length === segments.length ? accentClass : ''}>
394
+ {generatedVideos.length === segments.length ? '✓' : '○'} Complete
395
+ </div>
396
+ </div>
397
+ </div>
398
+
399
+ {/* Tips */}
400
+ <motion.div
401
+ initial={{ opacity: 0 }}
402
+ animate={{ opacity: 1 }}
403
+ transition={{ delay: 0.5 }}
404
+ className="mt-6 text-center text-void-500 text-sm"
405
+ >
406
+ 💡 Each video typically takes 1-2 minutes. Stay on this page to track progress.
407
+ </motion.div>
408
+ </div>
409
+ </motion.div>
410
+ );
411
+ };
frontend/src/components/Icons.tsx ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from 'react';
2
+
3
+ interface IconProps {
4
+ className?: string;
5
+ size?: number;
6
+ }
7
+
8
+ export const VideoIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
9
+ <svg
10
+ className={className}
11
+ width={size}
12
+ height={size}
13
+ viewBox="0 0 24 24"
14
+ fill="none"
15
+ stroke="currentColor"
16
+ strokeWidth="2"
17
+ strokeLinecap="round"
18
+ strokeLinejoin="round"
19
+ >
20
+ <polygon points="23 7 16 12 23 17 23 7" />
21
+ <rect x="1" y="5" width="15" height="14" rx="2" ry="2" />
22
+ </svg>
23
+ );
24
+
25
+ export const SparklesIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
26
+ <svg
27
+ className={className}
28
+ width={size}
29
+ height={size}
30
+ viewBox="0 0 24 24"
31
+ fill="none"
32
+ stroke="currentColor"
33
+ strokeWidth="2"
34
+ strokeLinecap="round"
35
+ strokeLinejoin="round"
36
+ >
37
+ <path d="M12 3l1.912 5.813a2 2 0 001.275 1.275L21 12l-5.813 1.912a2 2 0 00-1.275 1.275L12 21l-1.912-5.813a2 2 0 00-1.275-1.275L3 12l5.813-1.912a2 2 0 001.275-1.275L12 3z" />
38
+ </svg>
39
+ );
40
+
41
+ export const UploadIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
42
+ <svg
43
+ className={className}
44
+ width={size}
45
+ height={size}
46
+ viewBox="0 0 24 24"
47
+ fill="none"
48
+ stroke="currentColor"
49
+ strokeWidth="2"
50
+ strokeLinecap="round"
51
+ strokeLinejoin="round"
52
+ >
53
+ <path d="M21 15v4a2 2 0 01-2 2H5a2 2 0 01-2-2v-4" />
54
+ <polyline points="17 8 12 3 7 8" />
55
+ <line x1="12" y1="3" x2="12" y2="15" />
56
+ </svg>
57
+ );
58
+
59
+ export const CheckIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
60
+ <svg
61
+ className={className}
62
+ width={size}
63
+ height={size}
64
+ viewBox="0 0 24 24"
65
+ fill="none"
66
+ stroke="currentColor"
67
+ strokeWidth="2"
68
+ strokeLinecap="round"
69
+ strokeLinejoin="round"
70
+ >
71
+ <polyline points="20 6 9 17 4 12" />
72
+ </svg>
73
+ );
74
+
75
+ export const XIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
76
+ <svg
77
+ className={className}
78
+ width={size}
79
+ height={size}
80
+ viewBox="0 0 24 24"
81
+ fill="none"
82
+ stroke="currentColor"
83
+ strokeWidth="2"
84
+ strokeLinecap="round"
85
+ strokeLinejoin="round"
86
+ >
87
+ <line x1="18" y1="6" x2="6" y2="18" />
88
+ <line x1="6" y1="6" x2="18" y2="18" />
89
+ </svg>
90
+ );
91
+
92
+ export const ArrowRightIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
93
+ <svg
94
+ className={className}
95
+ width={size}
96
+ height={size}
97
+ viewBox="0 0 24 24"
98
+ fill="none"
99
+ stroke="currentColor"
100
+ strokeWidth="2"
101
+ strokeLinecap="round"
102
+ strokeLinejoin="round"
103
+ >
104
+ <line x1="5" y1="12" x2="19" y2="12" />
105
+ <polyline points="12 5 19 12 12 19" />
106
+ </svg>
107
+ );
108
+
109
+ export const ArrowLeftIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
110
+ <svg
111
+ className={className}
112
+ width={size}
113
+ height={size}
114
+ viewBox="0 0 24 24"
115
+ fill="none"
116
+ stroke="currentColor"
117
+ strokeWidth="2"
118
+ strokeLinecap="round"
119
+ strokeLinejoin="round"
120
+ >
121
+ <line x1="19" y1="12" x2="5" y2="12" />
122
+ <polyline points="12 19 5 12 12 5" />
123
+ </svg>
124
+ );
125
+
126
+ export const ImageIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
127
+ <svg
128
+ className={className}
129
+ width={size}
130
+ height={size}
131
+ viewBox="0 0 24 24"
132
+ fill="none"
133
+ stroke="currentColor"
134
+ strokeWidth="2"
135
+ strokeLinecap="round"
136
+ strokeLinejoin="round"
137
+ >
138
+ <rect x="3" y="3" width="18" height="18" rx="2" ry="2" />
139
+ <circle cx="8.5" cy="8.5" r="1.5" />
140
+ <polyline points="21 15 16 10 5 21" />
141
+ </svg>
142
+ );
143
+
144
+ export const SettingsIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
145
+ <svg
146
+ className={className}
147
+ width={size}
148
+ height={size}
149
+ viewBox="0 0 24 24"
150
+ fill="none"
151
+ stroke="currentColor"
152
+ strokeWidth="2"
153
+ strokeLinecap="round"
154
+ strokeLinejoin="round"
155
+ >
156
+ <circle cx="12" cy="12" r="3" />
157
+ <path d="M19.4 15a1.65 1.65 0 00.33 1.82l.06.06a2 2 0 010 2.83 2 2 0 01-2.83 0l-.06-.06a1.65 1.65 0 00-1.82-.33 1.65 1.65 0 00-1 1.51V21a2 2 0 01-2 2 2 2 0 01-2-2v-.09A1.65 1.65 0 009 19.4a1.65 1.65 0 00-1.82.33l-.06.06a2 2 0 01-2.83 0 2 2 0 010-2.83l.06-.06a1.65 1.65 0 00.33-1.82 1.65 1.65 0 00-1.51-1H3a2 2 0 01-2-2 2 2 0 012-2h.09A1.65 1.65 0 004.6 9a1.65 1.65 0 00-.33-1.82l-.06-.06a2 2 0 010-2.83 2 2 0 012.83 0l.06.06a1.65 1.65 0 001.82.33H9a1.65 1.65 0 001-1.51V3a2 2 0 012-2 2 2 0 012 2v.09a1.65 1.65 0 001 1.51 1.65 1.65 0 001.82-.33l.06-.06a2 2 0 012.83 0 2 2 0 010 2.83l-.06.06a1.65 1.65 0 00-.33 1.82V9a1.65 1.65 0 001.51 1H21a2 2 0 012 2 2 2 0 01-2 2h-.09a1.65 1.65 0 00-1.51 1z" />
158
+ </svg>
159
+ );
160
+
161
+ export const PlayIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
162
+ <svg
163
+ className={className}
164
+ width={size}
165
+ height={size}
166
+ viewBox="0 0 24 24"
167
+ fill="currentColor"
168
+ >
169
+ <polygon points="5 3 19 12 5 21 5 3" />
170
+ </svg>
171
+ );
172
+
173
+ export const RefreshIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
174
+ <svg
175
+ className={className}
176
+ width={size}
177
+ height={size}
178
+ viewBox="0 0 24 24"
179
+ fill="none"
180
+ stroke="currentColor"
181
+ strokeWidth="2"
182
+ strokeLinecap="round"
183
+ strokeLinejoin="round"
184
+ >
185
+ <polyline points="23 4 23 10 17 10" />
186
+ <polyline points="1 20 1 14 7 14" />
187
+ <path d="M3.51 9a9 9 0 0114.85-3.36L23 10M1 14l4.64 4.36A9 9 0 0020.49 15" />
188
+ </svg>
189
+ );
190
+
191
+ export const DownloadIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
192
+ <svg
193
+ className={className}
194
+ width={size}
195
+ height={size}
196
+ viewBox="0 0 24 24"
197
+ fill="none"
198
+ stroke="currentColor"
199
+ strokeWidth="2"
200
+ strokeLinecap="round"
201
+ strokeLinejoin="round"
202
+ >
203
+ <path d="M21 15v4a2 2 0 01-2 2H5a2 2 0 01-2-2v-4" />
204
+ <polyline points="7 10 12 15 17 10" />
205
+ <line x1="12" y1="15" x2="12" y2="3" />
206
+ </svg>
207
+ );
208
+
209
+ export const ZapIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
210
+ <svg
211
+ className={className}
212
+ width={size}
213
+ height={size}
214
+ viewBox="0 0 24 24"
215
+ fill="none"
216
+ stroke="currentColor"
217
+ strokeWidth="2"
218
+ strokeLinecap="round"
219
+ strokeLinejoin="round"
220
+ >
221
+ <polygon points="13 2 3 14 12 14 11 22 21 10 12 10 13 2" />
222
+ </svg>
223
+ );
224
+
225
+ export const BrainIcon: React.FC<IconProps> = ({ className = '', size = 24 }) => (
226
+ <svg
227
+ className={className}
228
+ width={size}
229
+ height={size}
230
+ viewBox="0 0 24 24"
231
+ fill="none"
232
+ stroke="currentColor"
233
+ strokeWidth="2"
234
+ strokeLinecap="round"
235
+ strokeLinejoin="round"
236
+ >
237
+ <path d="M9.5 2A2.5 2.5 0 0112 4.5v15a2.5 2.5 0 01-4.96.44 2.5 2.5 0 01-2.96-3.08 3 3 0 01-.34-5.58 2.5 2.5 0 011.32-4.24 2.5 2.5 0 011.98-3A2.5 2.5 0 019.5 2z" />
238
+ <path d="M14.5 2A2.5 2.5 0 0012 4.5v15a2.5 2.5 0 004.96.44 2.5 2.5 0 002.96-3.08 3 3 0 00.34-5.58 2.5 2.5 0 00-1.32-4.24 2.5 2.5 0 00-1.98-3A2.5 2.5 0 0014.5 2z" />
239
+ </svg>
240
+ );
241
+
242
+ // Logo icon for the app
243
+ export const LogoIcon: React.FC<IconProps> = ({ className = '', size = 40 }) => (
244
+ <svg
245
+ className={className}
246
+ width={size}
247
+ height={size}
248
+ viewBox="0 0 40 40"
249
+ fill="none"
250
+ >
251
+ <defs>
252
+ <linearGradient id="logoGradient" x1="0%" y1="0%" x2="100%" y2="100%">
253
+ <stop offset="0%" stopColor="#ff6b6b" />
254
+ <stop offset="100%" stopColor="#22b8cf" />
255
+ </linearGradient>
256
+ </defs>
257
+ <rect x="2" y="2" width="36" height="36" rx="8" fill="url(#logoGradient)" />
258
+ <polygon points="16 12 16 28 28 20" fill="white" />
259
+ <circle cx="12" cy="12" r="3" fill="white" opacity="0.8" />
260
+ <circle cx="28" cy="28" r="3" fill="white" opacity="0.8" />
261
+ </svg>
262
+ );
263
+
264
+ // Kling logo placeholder
265
+ export const KlingLogo: React.FC<IconProps> = ({ className = '', size = 48 }) => (
266
+ <div className={`${className} flex items-center justify-center`} style={{ width: size, height: size }}>
267
+ <span className="text-3xl font-bold gradient-text">K</span>
268
+ </div>
269
+ );
270
+
271
+ // Replicate logo placeholder
272
+ export const ReplicateLogo: React.FC<IconProps> = ({ className = '', size = 48 }) => (
273
+ <div className={`${className} flex items-center justify-center`} style={{ width: size, height: size }}>
274
+ <span className="text-3xl font-bold gradient-text-electric">R</span>
275
+ </div>
276
+ );
277
+
frontend/src/components/Login.tsx ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState, useEffect } from 'react';
2
+ import { motion } from 'framer-motion';
3
+ import { useAuth } from '@/context/AuthContext';
4
+ import { LogoIcon } from './Icons';
5
+
6
+ export function Login() {
7
+ const { login } = useAuth();
8
+ const [username, setUsername] = useState('');
9
+ const [password, setPassword] = useState('');
10
+ const [error, setError] = useState<string | null>(null);
11
+ const [loading, setLoading] = useState(false);
12
+
13
+ // Debug: Log error state changes
14
+ useEffect(() => {
15
+ if (error) {
16
+ console.log('Error state updated:', error);
17
+ }
18
+ }, [error]);
19
+
20
+ const handleSubmit = async (e: React.FormEvent) => {
21
+ e.preventDefault();
22
+ setError(null);
23
+ setLoading(true);
24
+
25
+ try {
26
+ await login({ username, password });
27
+ } catch (err) {
28
+ // Extract error message from the error
29
+ let errorMessage = 'Login failed. Please check your credentials.';
30
+
31
+ if (err instanceof Error) {
32
+ errorMessage = err.message || errorMessage;
33
+ } else if (typeof err === 'string') {
34
+ errorMessage = err;
35
+ }
36
+
37
+ console.error('Login error:', err);
38
+ console.log('Setting error message:', errorMessage);
39
+ setError(errorMessage);
40
+ } finally {
41
+ setLoading(false);
42
+ }
43
+ };
44
+
45
+ return (
46
+ <div className="min-h-screen flex items-center justify-center bg-mesh-pattern p-6">
47
+ <motion.div
48
+ initial={{ opacity: 0, y: 20 }}
49
+ animate={{ opacity: 1, y: 0 }}
50
+ transition={{ duration: 0.5 }}
51
+ className="w-full max-w-md"
52
+ >
53
+ <div className="glass-dark rounded-2xl p-8 shadow-2xl">
54
+ {/* Logo and Title */}
55
+ <div className="text-center mb-8">
56
+ <div className="flex justify-center mb-4">
57
+ <LogoIcon size={64} />
58
+ </div>
59
+ <h1 className="text-3xl font-display font-bold text-void-100 mb-2">
60
+ Video AdGenesis
61
+ </h1>
62
+ <p className="text-void-400">Studio</p>
63
+ <p className="text-sm text-void-500 mt-4">
64
+ Please sign in to continue
65
+ </p>
66
+ </div>
67
+
68
+ {/* Login Form */}
69
+ <form onSubmit={handleSubmit} className="space-y-6">
70
+ {/* Username Field */}
71
+ <div>
72
+ <label htmlFor="username" className="block text-sm font-medium text-void-300 mb-2">
73
+ Username
74
+ </label>
75
+ <input
76
+ id="username"
77
+ type="text"
78
+ value={username}
79
+ onChange={(e) => {
80
+ setUsername(e.target.value);
81
+ if (error) setError(null);
82
+ }}
83
+ required
84
+ className="w-full px-4 py-3 bg-void-900/50 border border-void-700 rounded-lg
85
+ text-void-100 placeholder-void-500 focus:outline-none focus:ring-2
86
+ focus:ring-coral-500 focus:border-transparent transition-all"
87
+ placeholder="Enter your username"
88
+ autoComplete="username"
89
+ />
90
+ </div>
91
+
92
+ {/* Password Field */}
93
+ <div>
94
+ <label htmlFor="password" className="block text-sm font-medium text-void-300 mb-2">
95
+ Password
96
+ </label>
97
+ <input
98
+ id="password"
99
+ type="password"
100
+ value={password}
101
+ onChange={(e) => {
102
+ setPassword(e.target.value);
103
+ if (error) setError(null);
104
+ }}
105
+ required
106
+ className="w-full px-4 py-3 bg-void-900/50 border border-void-700 rounded-lg
107
+ text-void-100 placeholder-void-500 focus:outline-none focus:ring-2
108
+ focus:ring-coral-500 focus:border-transparent transition-all"
109
+ placeholder="Enter your password"
110
+ autoComplete="current-password"
111
+ />
112
+ </div>
113
+
114
+ {/* Error Message */}
115
+ {error ? (
116
+ <motion.div
117
+ key={`error-${error}`}
118
+ initial={{ opacity: 0, y: -10, scale: 0.95 }}
119
+ animate={{ opacity: 1, y: 0, scale: 1 }}
120
+ exit={{ opacity: 0, scale: 0.95 }}
121
+ transition={{ duration: 0.2 }}
122
+ className="p-4 bg-red-500/40 border-2 border-red-500 rounded-lg text-red-100 text-sm flex items-start gap-3 shadow-lg backdrop-blur-sm"
123
+ role="alert"
124
+ aria-live="assertive"
125
+ style={{ zIndex: 10 }}
126
+ >
127
+ <svg
128
+ className="w-5 h-5 text-red-300 flex-shrink-0 mt-0.5"
129
+ fill="none"
130
+ stroke="currentColor"
131
+ viewBox="0 0 24 24"
132
+ >
133
+ <path
134
+ strokeLinecap="round"
135
+ strokeLinejoin="round"
136
+ strokeWidth={2}
137
+ d="M12 8v4m0 4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z"
138
+ />
139
+ </svg>
140
+ <span className="flex-1 font-semibold">{error}</span>
141
+ </motion.div>
142
+ ) : null}
143
+
144
+ {/* Submit Button */}
145
+ <button
146
+ type="submit"
147
+ disabled={loading}
148
+ className="w-full py-3 px-4 bg-gradient-to-r from-coral-500 to-coral-600
149
+ text-white font-semibold rounded-lg hover:from-coral-600 hover:to-coral-700
150
+ focus:outline-none focus:ring-2 focus:ring-coral-500 focus:ring-offset-2
151
+ focus:ring-offset-void-900 disabled:opacity-50 disabled:cursor-not-allowed
152
+ transition-all transform hover:scale-[1.02] active:scale-[0.98]"
153
+ >
154
+ {loading ? (
155
+ <span className="flex items-center justify-center gap-2">
156
+ <svg className="animate-spin h-5 w-5" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24">
157
+ <circle className="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" strokeWidth="4"></circle>
158
+ <path className="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path>
159
+ </svg>
160
+ Signing in...
161
+ </span>
162
+ ) : (
163
+ 'Sign In'
164
+ )}
165
+ </button>
166
+ </form>
167
+
168
+ {/* Footer */}
169
+ <div className="mt-6 text-center">
170
+ <p className="text-xs text-void-500">
171
+ Restricted access - Authorized users only
172
+ </p>
173
+ </div>
174
+ </div>
175
+ </motion.div>
176
+ </div>
177
+ );
178
+ }
179
+
frontend/src/components/ProviderSelect.tsx ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from 'react';
2
+ import { motion } from 'framer-motion';
3
+ import { VideoProvider } from '@/types';
4
+ import { ZapIcon, BrainIcon, CheckIcon } from './Icons';
5
+
6
+ interface ProviderSelectProps {
7
+ onSelect: (provider: VideoProvider) => void;
8
+ }
9
+
10
+ const providers = [
11
+ {
12
+ id: 'kling' as VideoProvider,
13
+ name: 'Kie API',
14
+ subtitle: '(cheap use less credits)',
15
+ description: 'High-quality video generation with advanced continuity. Perfect for professional UGC and talking head videos.',
16
+ icon: ZapIcon,
17
+ color: 'coral',
18
+ features: [
19
+ 'Veo 3.1 Fast Model',
20
+ 'Image-to-Video & Text-to-Video',
21
+ 'Video Extension (Seamless)',
22
+ 'GPT-4o Prompt Generation',
23
+ '9:16 Portrait Format',
24
+ 'Voice Type Selection',
25
+ ],
26
+ badge: 'Recommended',
27
+ },
28
+ {
29
+ id: 'replicate' as VideoProvider,
30
+ name: 'Replicate API',
31
+ subtitle: '(more credits, more expensive)',
32
+ description: 'Access to diverse video generation models. Great for experimentation and unique creative styles.',
33
+ icon: BrainIcon,
34
+ color: 'electric',
35
+ features: [
36
+ 'Multiple Model Options',
37
+ 'Flexible Duration',
38
+ 'Creative Styles',
39
+ 'Cost Effective',
40
+ 'API Simplicity',
41
+ 'Community Models',
42
+ ],
43
+ badge: 'Flexible',
44
+ },
45
+ ];
46
+
47
+ export const ProviderSelect: React.FC<ProviderSelectProps> = ({ onSelect }) => {
48
+ return (
49
+ <div className="min-h-[70vh] flex flex-col items-center justify-center p-8">
50
+ {/* Hero Section */}
51
+ <motion.div
52
+ className="text-center mb-16"
53
+ initial={{ opacity: 0, y: -20 }}
54
+ animate={{ opacity: 1, y: 0 }}
55
+ transition={{ duration: 0.6 }}
56
+ >
57
+ <h1 className="text-5xl md:text-6xl font-display font-bold mb-4">
58
+ <span className="gradient-text">Video AdGenesis</span>
59
+ <span className="text-void-200"> Studio</span>
60
+ </h1>
61
+ <p className="text-xl text-void-400 max-w-2xl mx-auto">
62
+ Transform your scripts into stunning videos with AI. Choose your preferred generation engine to get started.
63
+ </p>
64
+ </motion.div>
65
+
66
+ {/* Provider Cards */}
67
+ <div className="grid grid-cols-1 md:grid-cols-2 gap-8 max-w-5xl w-full">
68
+ {providers.map((provider, index) => (
69
+ <motion.div
70
+ key={provider.id}
71
+ initial={{ opacity: 0, y: 30 }}
72
+ animate={{ opacity: 1, y: 0 }}
73
+ transition={{ duration: 0.5, delay: index * 0.15 }}
74
+ >
75
+ <button
76
+ onClick={() => onSelect(provider.id)}
77
+ className={`
78
+ w-full text-left provider-card card-interactive
79
+ ${provider.color === 'coral' ? 'hover:border-coral-500/50 hover:glow-coral' : 'hover:border-electric-500/50 hover:glow-electric'}
80
+ group transition-all duration-500
81
+ `}
82
+ >
83
+ {/* Badge */}
84
+ <div className="flex items-start justify-between mb-6">
85
+ <div className={`
86
+ w-16 h-16 rounded-2xl flex items-center justify-center
87
+ ${provider.color === 'coral'
88
+ ? 'bg-coral-500/10 text-coral-400 group-hover:bg-coral-500/20'
89
+ : 'bg-electric-500/10 text-electric-400 group-hover:bg-electric-500/20'
90
+ }
91
+ transition-colors duration-300
92
+ `}>
93
+ <provider.icon size={32} />
94
+ </div>
95
+ <span className={`
96
+ px-3 py-1 rounded-full text-xs font-semibold
97
+ ${provider.color === 'coral'
98
+ ? 'bg-coral-500/20 text-coral-300'
99
+ : 'bg-electric-500/20 text-electric-300'
100
+ }
101
+ `}>
102
+ {provider.badge}
103
+ </span>
104
+ </div>
105
+
106
+ {/* Title & Description */}
107
+ <h2 className="text-2xl font-display font-bold text-void-100 mb-1">
108
+ {provider.name}
109
+ </h2>
110
+ <p className={`text-sm mb-3 ${provider.color === 'coral' ? 'text-coral-400' : 'text-electric-400'}`}>
111
+ {provider.subtitle}
112
+ </p>
113
+ <p className="text-void-400 text-sm mb-6 leading-relaxed">
114
+ {provider.description}
115
+ </p>
116
+
117
+ {/* Features */}
118
+ <div className="grid grid-cols-2 gap-2">
119
+ {provider.features.map((feature, i) => (
120
+ <div key={i} className="flex items-center gap-2">
121
+ <CheckIcon
122
+ size={14}
123
+ className={provider.color === 'coral' ? 'text-coral-400' : 'text-electric-400'}
124
+ />
125
+ <span className="text-xs text-void-300">{feature}</span>
126
+ </div>
127
+ ))}
128
+ </div>
129
+
130
+ {/* CTA Arrow */}
131
+ <div className={`
132
+ mt-6 flex items-center gap-2 text-sm font-semibold
133
+ ${provider.color === 'coral' ? 'text-coral-400' : 'text-electric-400'}
134
+ group-hover:gap-4 transition-all duration-300
135
+ `}>
136
+ <span>Get Started</span>
137
+ <svg className="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor">
138
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M17 8l4 4m0 0l-4 4m4-4H3" />
139
+ </svg>
140
+ </div>
141
+ </button>
142
+ </motion.div>
143
+ ))}
144
+ </div>
145
+
146
+ {/* Footer Note */}
147
+ <motion.p
148
+ className="mt-12 text-void-500 text-sm text-center"
149
+ initial={{ opacity: 0 }}
150
+ animate={{ opacity: 1 }}
151
+ transition={{ delay: 0.8 }}
152
+ >
153
+ Both providers use your Python backend. API keys are configured server-side.
154
+ </motion.p>
155
+ </div>
156
+ );
157
+ };
158
+
frontend/src/components/index.ts ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ export * from './Icons';
2
+ export * from './ProviderSelect';
3
+ export * from './GenerationForm';
4
+ export * from './GenerationProgress';
5
+ export * from './GenerationComplete';
6
+ export * from './ErrorDisplay';
7
+ export * from './Login';
8
+
frontend/src/context/AuthContext.tsx ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { createContext, useContext, useState, useEffect, ReactNode } from 'react';
2
+ import { login as apiLogin, verifyAuth, logout as apiLogout, getCurrentUser } from '@/utils/api';
3
+ import type { LoginRequest, AuthUser } from '@/types';
4
+
5
+ interface AuthContextValue {
6
+ user: AuthUser | null;
7
+ loading: boolean;
8
+ login: (credentials: LoginRequest) => Promise<void>;
9
+ logout: () => void;
10
+ isAuthenticated: boolean;
11
+ }
12
+
13
+ const AuthContext = createContext<AuthContextValue | undefined>(undefined);
14
+
15
+ export function AuthProvider({ children }: { children: ReactNode }) {
16
+ const [user, setUser] = useState<AuthUser | null>(null);
17
+ const [loading, setLoading] = useState(true);
18
+
19
+ // Check authentication on mount
20
+ useEffect(() => {
21
+ checkAuth();
22
+ }, []);
23
+
24
+ async function checkAuth() {
25
+ try {
26
+ const authUser = await verifyAuth();
27
+ if (authUser.authenticated) {
28
+ setUser(authUser);
29
+ } else {
30
+ setUser(null);
31
+ }
32
+ } catch {
33
+ setUser(null);
34
+ } finally {
35
+ setLoading(false);
36
+ }
37
+ }
38
+
39
+ async function login(credentials: LoginRequest) {
40
+ setLoading(true);
41
+ try {
42
+ await apiLogin(credentials);
43
+ const authUser = await getCurrentUser();
44
+ setUser(authUser);
45
+ } catch (error) {
46
+ setUser(null);
47
+ throw error;
48
+ } finally {
49
+ setLoading(false);
50
+ }
51
+ }
52
+
53
+ function logout() {
54
+ apiLogout();
55
+ setUser(null);
56
+ }
57
+
58
+ return (
59
+ <AuthContext.Provider
60
+ value={{
61
+ user,
62
+ loading,
63
+ login,
64
+ logout,
65
+ isAuthenticated: user?.authenticated ?? false,
66
+ }}
67
+ >
68
+ {children}
69
+ </AuthContext.Provider>
70
+ );
71
+ }
72
+
73
+ export function useAuth() {
74
+ const context = useContext(AuthContext);
75
+ if (context === undefined) {
76
+ throw new Error('useAuth must be used within an AuthProvider');
77
+ }
78
+ return context;
79
+ }
80
+
frontend/src/context/GenerationContext.tsx ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { createContext, useContext, useReducer, ReactNode } from 'react';
2
+ import type {
3
+ GenerationState,
4
+ GenerationStep,
5
+ VideoProvider,
6
+ VeoSegment,
7
+ GeneratedVideo
8
+ } from '@/types';
9
+
10
+ // Initial state
11
+ const initialState: GenerationState = {
12
+ step: 'idle',
13
+ provider: null,
14
+ segments: [],
15
+ currentSegmentIndex: 0,
16
+ generatedVideos: [],
17
+ progress: {
18
+ current: 0,
19
+ total: 0,
20
+ message: '',
21
+ },
22
+ error: null,
23
+ taskId: null,
24
+ retryState: null,
25
+ };
26
+
27
+ // Action types
28
+ type GenerationAction =
29
+ | { type: 'SET_PROVIDER'; payload: VideoProvider }
30
+ | { type: 'SET_STEP'; payload: GenerationStep }
31
+ | { type: 'SET_SEGMENTS'; payload: VeoSegment[] }
32
+ | { type: 'SET_CURRENT_SEGMENT'; payload: number }
33
+ | { type: 'ADD_GENERATED_VIDEO'; payload: GeneratedVideo }
34
+ | { type: 'SET_PROGRESS'; payload: { current?: number; total?: number; message?: string } }
35
+ | { type: 'SET_ERROR'; payload: string | null }
36
+ | { type: 'SET_TASK_ID'; payload: string | null }
37
+ | { type: 'SET_RETRY_STATE'; payload: { failedSegmentIndex: number; error: string } | null }
38
+ | { type: 'RESET' };
39
+
40
+ // Reducer
41
+ function generationReducer(state: GenerationState, action: GenerationAction): GenerationState {
42
+ switch (action.type) {
43
+ case 'SET_PROVIDER':
44
+ return { ...state, provider: action.payload };
45
+ case 'SET_STEP':
46
+ return { ...state, step: action.payload };
47
+ case 'SET_SEGMENTS':
48
+ return { ...state, segments: action.payload };
49
+ case 'SET_CURRENT_SEGMENT':
50
+ return { ...state, currentSegmentIndex: action.payload };
51
+ case 'ADD_GENERATED_VIDEO':
52
+ return {
53
+ ...state,
54
+ generatedVideos: [...state.generatedVideos, action.payload]
55
+ };
56
+ case 'SET_PROGRESS':
57
+ return {
58
+ ...state,
59
+ progress: { ...state.progress, ...action.payload }
60
+ };
61
+ case 'SET_ERROR':
62
+ return { ...state, error: action.payload, step: action.payload ? 'error' : state.step };
63
+ case 'SET_TASK_ID':
64
+ return { ...state, taskId: action.payload };
65
+ case 'SET_RETRY_STATE':
66
+ return { ...state, retryState: action.payload };
67
+ case 'RESET':
68
+ return { ...initialState, provider: state.provider };
69
+ default:
70
+ return state;
71
+ }
72
+ }
73
+
74
+ // Context
75
+ interface GenerationContextValue {
76
+ state: GenerationState;
77
+ dispatch: React.Dispatch<GenerationAction>;
78
+
79
+ // Helper actions
80
+ selectProvider: (provider: VideoProvider) => void;
81
+ setStep: (step: GenerationStep) => void;
82
+ startGeneration: (segments: VeoSegment[]) => void;
83
+ advanceSegment: () => void;
84
+ addVideo: (video: GeneratedVideo) => void;
85
+ updateProgress: (message: string, current?: number, total?: number) => void;
86
+ setError: (error: string | null) => void;
87
+ setRetryState: (state: { failedSegmentIndex: number; error: string } | null) => void;
88
+ updateSegments: (segments: VeoSegment[]) => void;
89
+ reset: () => void;
90
+ }
91
+
92
+ const GenerationContext = createContext<GenerationContextValue | null>(null);
93
+
94
+ // Provider component
95
+ export function GenerationProvider({ children }: { children: ReactNode }) {
96
+ const [state, dispatch] = useReducer(generationReducer, initialState);
97
+
98
+ const value: GenerationContextValue = {
99
+ state,
100
+ dispatch,
101
+
102
+ selectProvider: (provider) => {
103
+ dispatch({ type: 'SET_PROVIDER', payload: provider });
104
+ dispatch({ type: 'SET_STEP', payload: 'configuring' });
105
+ },
106
+
107
+ setStep: (step) => {
108
+ dispatch({ type: 'SET_STEP', payload: step });
109
+ },
110
+
111
+ startGeneration: (segments) => {
112
+ dispatch({ type: 'SET_SEGMENTS', payload: segments });
113
+ dispatch({ type: 'SET_CURRENT_SEGMENT', payload: 0 });
114
+ dispatch({ type: 'SET_PROGRESS', payload: { current: 0, total: segments.length, message: 'Starting generation...' } });
115
+ dispatch({ type: 'SET_STEP', payload: 'generating_video' });
116
+ },
117
+
118
+ advanceSegment: () => {
119
+ const nextIndex = state.currentSegmentIndex + 1;
120
+ dispatch({ type: 'SET_CURRENT_SEGMENT', payload: nextIndex });
121
+ dispatch({ type: 'SET_PROGRESS', payload: { current: nextIndex } });
122
+ },
123
+
124
+ addVideo: (video) => {
125
+ dispatch({ type: 'ADD_GENERATED_VIDEO', payload: video });
126
+ },
127
+
128
+ updateProgress: (message, current, total) => {
129
+ dispatch({
130
+ type: 'SET_PROGRESS',
131
+ payload: {
132
+ message,
133
+ ...(current !== undefined && { current }),
134
+ ...(total !== undefined && { total })
135
+ }
136
+ });
137
+ },
138
+
139
+ setError: (error) => {
140
+ dispatch({ type: 'SET_ERROR', payload: error });
141
+ },
142
+
143
+ setRetryState: (retryState) => {
144
+ dispatch({ type: 'SET_RETRY_STATE', payload: retryState });
145
+ },
146
+
147
+ updateSegments: (segments) => {
148
+ dispatch({ type: 'SET_SEGMENTS', payload: segments });
149
+ },
150
+
151
+ reset: () => {
152
+ dispatch({ type: 'RESET' });
153
+ },
154
+ };
155
+
156
+ return (
157
+ <GenerationContext.Provider value={value}>
158
+ {children}
159
+ </GenerationContext.Provider>
160
+ );
161
+ }
162
+
163
+ // Hook
164
+ export function useGeneration() {
165
+ const context = useContext(GenerationContext);
166
+ if (!context) {
167
+ throw new Error('useGeneration must be used within GenerationProvider');
168
+ }
169
+ return context;
170
+ }
171
+
frontend/src/index.css ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @tailwind base;
2
+ @tailwind components;
3
+ @tailwind utilities;
4
+
5
+ /* Custom scrollbar */
6
+ ::-webkit-scrollbar {
7
+ width: 8px;
8
+ height: 8px;
9
+ }
10
+
11
+ ::-webkit-scrollbar-track {
12
+ background: rgba(16, 42, 67, 0.5);
13
+ border-radius: 4px;
14
+ }
15
+
16
+ ::-webkit-scrollbar-thumb {
17
+ background: rgba(99, 125, 152, 0.5);
18
+ border-radius: 4px;
19
+ }
20
+
21
+ ::-webkit-scrollbar-thumb:hover {
22
+ background: rgba(99, 125, 152, 0.8);
23
+ }
24
+
25
+ /* Base styles */
26
+ html {
27
+ scroll-behavior: smooth;
28
+ }
29
+
30
+ body {
31
+ @apply bg-void-950 text-void-100 antialiased;
32
+ font-family: 'Satoshi', system-ui, sans-serif;
33
+ background-image:
34
+ radial-gradient(ellipse 80% 80% at 50% -20%, rgba(255, 107, 107, 0.05), transparent),
35
+ radial-gradient(ellipse 80% 80% at 80% 50%, rgba(34, 184, 207, 0.05), transparent);
36
+ }
37
+
38
+ /* Glass morphism effect */
39
+ .glass {
40
+ @apply bg-void-900/60 backdrop-blur-xl border border-void-700/30;
41
+ }
42
+
43
+ .glass-dark {
44
+ @apply bg-void-950/80 backdrop-blur-xl border border-void-800/50;
45
+ }
46
+
47
+ /* Gradient text */
48
+ .gradient-text {
49
+ @apply bg-gradient-to-r from-coral-400 via-coral-500 to-electric-400 bg-clip-text text-transparent;
50
+ }
51
+
52
+ .gradient-text-electric {
53
+ @apply bg-gradient-to-r from-electric-400 to-electric-600 bg-clip-text text-transparent;
54
+ }
55
+
56
+ /* Button base styles */
57
+ .btn-primary {
58
+ @apply px-6 py-3 bg-gradient-to-r from-coral-500 to-coral-600 text-white font-semibold rounded-xl;
59
+ @apply hover:from-coral-400 hover:to-coral-500 transition-all duration-300;
60
+ @apply shadow-lg shadow-coral-500/25 hover:shadow-coral-500/40;
61
+ @apply active:scale-[0.98];
62
+ }
63
+
64
+ .btn-secondary {
65
+ @apply px-6 py-3 bg-void-800 text-void-100 font-semibold rounded-xl border border-void-600;
66
+ @apply hover:bg-void-700 hover:border-void-500 transition-all duration-300;
67
+ @apply active:scale-[0.98];
68
+ }
69
+
70
+ .btn-secondary-electric {
71
+ @apply px-6 py-3 bg-void-800 text-void-100 font-semibold rounded-xl border border-electric-500/50;
72
+ @apply hover:bg-electric-500/10 hover:border-electric-400 hover:text-electric-300 transition-all duration-300;
73
+ @apply active:scale-[0.98];
74
+ }
75
+
76
+ .btn-electric {
77
+ @apply px-6 py-3 bg-gradient-to-r from-electric-500 to-electric-600 text-void-950 font-semibold rounded-xl;
78
+ @apply hover:from-electric-400 hover:to-electric-500 transition-all duration-300;
79
+ @apply shadow-lg shadow-electric-500/25 hover:shadow-electric-500/40;
80
+ @apply active:scale-[0.98];
81
+ }
82
+
83
+ /* Card styles */
84
+ .card {
85
+ @apply bg-void-900/50 backdrop-blur-lg rounded-2xl border border-void-700/30 p-6;
86
+ }
87
+
88
+ .card-interactive {
89
+ @apply card cursor-pointer transition-all duration-300;
90
+ @apply hover:bg-void-800/50 hover:border-void-600/50 hover:shadow-xl;
91
+ }
92
+
93
+ /* Input styles */
94
+ .input-field {
95
+ @apply w-full bg-void-900/50 border border-void-700 rounded-xl px-4 py-3;
96
+ @apply text-void-100 placeholder-void-500;
97
+ @apply focus:outline-none focus:border-coral-500/50 focus:ring-2 focus:ring-coral-500/20;
98
+ @apply transition-all duration-200;
99
+ }
100
+
101
+ .textarea-field {
102
+ @apply input-field resize-none;
103
+ }
104
+
105
+ /* Provider card animations */
106
+ .provider-card {
107
+ @apply relative overflow-hidden;
108
+ }
109
+
110
+ .provider-card::before {
111
+ content: '';
112
+ @apply absolute inset-0 opacity-0 transition-opacity duration-500;
113
+ background: linear-gradient(135deg, transparent 0%, rgba(255, 107, 107, 0.05) 100%);
114
+ }
115
+
116
+ .provider-card:hover::before {
117
+ @apply opacity-100;
118
+ }
119
+
120
+ /* Progress ring */
121
+ .progress-ring {
122
+ transform: rotate(-90deg);
123
+ }
124
+
125
+ .progress-ring-circle {
126
+ transition: stroke-dashoffset 0.35s;
127
+ transform-origin: 50% 50%;
128
+ }
129
+
130
+ /* Shimmer loading effect */
131
+ .shimmer {
132
+ background: linear-gradient(
133
+ 90deg,
134
+ rgba(255, 255, 255, 0) 0%,
135
+ rgba(255, 255, 255, 0.05) 50%,
136
+ rgba(255, 255, 255, 0) 100%
137
+ );
138
+ background-size: 200% 100%;
139
+ animation: shimmer 2s infinite;
140
+ }
141
+
142
+ /* Floating animation */
143
+ .animate-float {
144
+ animation: float 6s ease-in-out infinite;
145
+ }
146
+
147
+ /* Glow effects */
148
+ .glow-coral {
149
+ box-shadow: 0 0 40px rgba(255, 107, 107, 0.15);
150
+ }
151
+
152
+ .glow-electric {
153
+ box-shadow: 0 0 40px rgba(34, 184, 207, 0.15);
154
+ }
155
+
156
+ /* Tab styles */
157
+ .tab-button {
158
+ @apply px-6 py-3 font-medium text-void-400 rounded-lg transition-all duration-200;
159
+ }
160
+
161
+ .tab-button.active {
162
+ @apply text-void-100 bg-void-800;
163
+ }
164
+
165
+ .tab-button:hover:not(.active) {
166
+ @apply text-void-200 bg-void-900/50;
167
+ }
168
+
169
+ /* Step indicator */
170
+ .step-dot {
171
+ @apply w-3 h-3 rounded-full bg-void-700 transition-all duration-300;
172
+ }
173
+
174
+ .step-dot.active {
175
+ @apply bg-coral-500 scale-125;
176
+ }
177
+
178
+ .step-dot.completed {
179
+ @apply bg-electric-500;
180
+ }
181
+
182
+ /* Select dropdown */
183
+ .select-field {
184
+ @apply input-field cursor-pointer appearance-none;
185
+ background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%239fb3c8' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e");
186
+ background-position: right 0.75rem center;
187
+ background-repeat: no-repeat;
188
+ background-size: 1.5em 1.5em;
189
+ padding-right: 2.5rem;
190
+ }
191
+
192
+ /* Pulse animation for live elements */
193
+ @keyframes pulse-live {
194
+ 0%, 100% {
195
+ opacity: 1;
196
+ }
197
+ 50% {
198
+ opacity: 0.5;
199
+ }
200
+ }
201
+
202
+ .animate-pulse-live {
203
+ animation: pulse-live 2s ease-in-out infinite;
204
+ }
205
+
206
+ /* Slow pulse animation */
207
+ @keyframes pulse-slow {
208
+ 0%, 100% {
209
+ opacity: 1;
210
+ transform: scale(1);
211
+ }
212
+ 50% {
213
+ opacity: 0.8;
214
+ transform: scale(1.02);
215
+ }
216
+ }
217
+
218
+ .animate-pulse-slow {
219
+ animation: pulse-slow 2s ease-in-out infinite;
220
+ }
221
+
222
+ /* Shimmer animation for progress bars */
223
+ @keyframes shimmer {
224
+ 0% {
225
+ transform: translateX(-100%);
226
+ }
227
+ 100% {
228
+ transform: translateX(100%);
229
+ }
230
+ }
231
+
232
+ .animate-shimmer {
233
+ animation: shimmer 2s infinite;
234
+ }
235
+
236
+ /* Scrollbar utilities */
237
+ .scrollbar-thin::-webkit-scrollbar {
238
+ width: 4px;
239
+ }
240
+
241
+ .scrollbar-thumb-void-600::-webkit-scrollbar-thumb {
242
+ background: rgba(99, 125, 152, 0.5);
243
+ border-radius: 4px;
244
+ }
245
+
246
+ .scrollbar-track-void-800::-webkit-scrollbar-track {
247
+ background: rgba(16, 42, 67, 0.5);
248
+ border-radius: 4px;
249
+ }
250
+
251
+ /* Drag and drop zone */
252
+ .dropzone {
253
+ @apply border-2 border-dashed border-void-600 rounded-2xl p-8;
254
+ @apply transition-all duration-300;
255
+ }
256
+
257
+ .dropzone.active {
258
+ @apply border-coral-500 bg-coral-500/5;
259
+ }
260
+
261
+ .dropzone:hover {
262
+ @apply border-void-500 bg-void-900/30;
263
+ }
frontend/src/main.tsx ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from 'react';
2
+ import ReactDOM from 'react-dom/client';
3
+ import App from './App';
4
+ import './index.css';
5
+
6
+ ReactDOM.createRoot(document.getElementById('root')!).render(
7
+ <React.StrictMode>
8
+ <App />
9
+ </React.StrictMode>
10
+ );
frontend/src/types/index.ts ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Authentication types
2
+ export interface LoginRequest {
3
+ username: string;
4
+ password: string;
5
+ }
6
+
7
+ export interface LoginResponse {
8
+ access_token: string;
9
+ token_type: string;
10
+ username: string;
11
+ }
12
+
13
+ export interface AuthUser {
14
+ username: string;
15
+ authenticated: boolean;
16
+ }
17
+
18
+ // API Provider types
19
+ export type VideoProvider = 'kling' | 'replicate';
20
+
21
+ export interface ProviderConfig {
22
+ id: VideoProvider;
23
+ name: string;
24
+ description: string;
25
+ icon: string;
26
+ color: 'coral' | 'electric';
27
+ features: string[];
28
+ models: ModelOption[];
29
+ }
30
+
31
+ export interface ModelOption {
32
+ id: string;
33
+ name: string;
34
+ description: string;
35
+ duration?: string;
36
+ }
37
+
38
+ // Video generation request types
39
+ export interface VideoGenerationRequest {
40
+ provider: VideoProvider;
41
+ prompt: string | VeoSegment;
42
+ imageUrls?: string[];
43
+ model?: string;
44
+ aspectRatio?: string;
45
+ generationType?: string;
46
+ seeds?: number;
47
+ voiceType?: string;
48
+ }
49
+
50
+ export interface KlingGenerateRequest {
51
+ prompt: string | VeoSegment;
52
+ imageUrls?: string[];
53
+ model?: string;
54
+ aspectRatio?: string;
55
+ generationType?: string;
56
+ seeds?: number;
57
+ voiceType?: string;
58
+ }
59
+
60
+ export interface ReplicateGenerateRequest {
61
+ prompt: string;
62
+ imageUrl?: string;
63
+ model?: string;
64
+ duration?: number;
65
+ aspectRatio?: string;
66
+ }
67
+
68
+ // Generation response types
69
+ export interface VideoGenerationResponse {
70
+ taskId: string;
71
+ status: 'processing' | 'succeeded' | 'failed';
72
+ }
73
+
74
+ export interface VideoStatusResponse {
75
+ status: 'processing' | 'succeeded' | 'failed';
76
+ output?: string;
77
+ url?: string;
78
+ error?: string;
79
+ }
80
+
81
+ // Segment types for GPT-4o prompt generation
82
+ export interface ContinuityMarkers {
83
+ start_position: string;
84
+ end_position: string;
85
+ start_expression: string;
86
+ end_expression: string;
87
+ start_gesture: string;
88
+ end_gesture: string;
89
+ location_status: string;
90
+ }
91
+
92
+ export interface SegmentInfo {
93
+ segment_number: number;
94
+ total_segments: number;
95
+ duration: string;
96
+ location: string;
97
+ continuity_markers: ContinuityMarkers;
98
+ }
99
+
100
+ export interface CharacterDescription {
101
+ current_state: string;
102
+ voice_matching: string;
103
+ }
104
+
105
+ export interface SynchronizedActions {
106
+ '0:00-0:02': string;
107
+ '0:02-0:04': string;
108
+ '0:04-0:06': string;
109
+ '0:06-0:08': string;
110
+ }
111
+
112
+ export interface ActionTimeline {
113
+ dialogue: string;
114
+ synchronized_actions: SynchronizedActions;
115
+ micro_expressions: string;
116
+ breathing_rhythm: string;
117
+ location_transition: string;
118
+ continuity_checkpoint: string;
119
+ }
120
+
121
+ export interface SceneContinuity {
122
+ environment: string;
123
+ camera_position: string;
124
+ camera_movement: string;
125
+ lighting_state: string;
126
+ background_elements: string;
127
+ spatial_relationships: string;
128
+ }
129
+
130
+ export interface VeoSegment {
131
+ segment_info: SegmentInfo;
132
+ character_description: CharacterDescription;
133
+ scene_continuity: SceneContinuity;
134
+ action_timeline: ActionTimeline;
135
+ }
136
+
137
+ export interface SegmentsPayload {
138
+ segments: VeoSegment[];
139
+ environment?: string;
140
+ is_dev_mode?: boolean;
141
+ max_segments?: number;
142
+ }
143
+
144
+ // Form inputs
145
+ export interface GenerationInputs {
146
+ script: string;
147
+ style: string;
148
+ voiceType?: string;
149
+ energyLevel?: string;
150
+ cameraStyle?: string;
151
+ narrativeStyle?: string;
152
+ seedValue?: number;
153
+ aspectRatio?: string;
154
+ model?: string;
155
+ }
156
+
157
+ // Generation state
158
+ export type GenerationStep =
159
+ | 'idle'
160
+ | 'configuring'
161
+ | 'generating_prompts'
162
+ | 'generating_video'
163
+ | 'processing'
164
+ | 'selecting_frame'
165
+ | 'continuation_choice'
166
+ | 'completed'
167
+ | 'error';
168
+
169
+ export interface GenerationState {
170
+ step: GenerationStep;
171
+ provider: VideoProvider | null;
172
+ segments: VeoSegment[];
173
+ currentSegmentIndex: number;
174
+ generatedVideos: GeneratedVideo[];
175
+ progress: {
176
+ current: number;
177
+ total: number;
178
+ message: string;
179
+ };
180
+ error: string | null;
181
+ taskId: string | null;
182
+ retryState: {
183
+ failedSegmentIndex: number;
184
+ error: string;
185
+ } | null;
186
+ }
187
+
188
+ export interface GeneratedVideo {
189
+ id: string;
190
+ url: string;
191
+ blobUrl?: string;
192
+ segment?: VeoSegment;
193
+ duration: number;
194
+ thumbnails: string[];
195
+ trimPoint?: number; // Whisper-detected trim point for optimal continuity
196
+ }
197
+
198
+ // Frame extraction
199
+ export interface ExtractedFrame {
200
+ timestamp: number;
201
+ frame: string;
202
+ label: string;
203
+ }
204
+
205
+ // Health check
206
+ export interface HealthStatus {
207
+ status: string;
208
+ environment: string;
209
+ is_dev_mode: boolean;
210
+ max_segments: number | null;
211
+ kie_api_configured: boolean;
212
+ gemini_api_configured: boolean;
213
+ openai_api_configured: boolean;
214
+ }
215
+
frontend/src/utils/api.ts ADDED
@@ -0,0 +1,611 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type {
2
+ VideoStatusResponse,
3
+ SegmentsPayload,
4
+ HealthStatus,
5
+ ExtractedFrame,
6
+ LoginRequest,
7
+ LoginResponse,
8
+ AuthUser
9
+ } from '@/types';
10
+
11
+ const API_BASE = import.meta.env.VITE_API_BASE_URL || 'http://localhost:4000';
12
+
13
+ // Get auth token from localStorage
14
+ function getAuthToken(): string | null {
15
+ return localStorage.getItem('auth_token');
16
+ }
17
+
18
+ // Set auth token in localStorage
19
+ export function setAuthToken(token: string): void {
20
+ localStorage.setItem('auth_token', token);
21
+ }
22
+
23
+ // Remove auth token from localStorage
24
+ export function removeAuthToken(): void {
25
+ localStorage.removeItem('auth_token');
26
+ }
27
+
28
+ // Generic API request handler
29
+ async function apiRequest<T>(
30
+ path: string,
31
+ options: RequestInit = {}
32
+ ): Promise<T> {
33
+ const url = path.startsWith('http') ? path : `${API_BASE}${path}`;
34
+ const token = getAuthToken();
35
+
36
+ // Normalize headers to a plain object
37
+ const headers: Record<string, string> = {};
38
+
39
+ // Convert Headers object or array to plain object
40
+ if (options.headers) {
41
+ if (options.headers instanceof Headers) {
42
+ options.headers.forEach((value, key) => {
43
+ headers[key] = value;
44
+ });
45
+ } else if (Array.isArray(options.headers)) {
46
+ options.headers.forEach(([key, value]) => {
47
+ headers[key] = value;
48
+ });
49
+ } else {
50
+ Object.assign(headers, options.headers);
51
+ }
52
+ }
53
+
54
+ // Add auth token if available
55
+ if (token) {
56
+ headers['Authorization'] = `Bearer ${token}`;
57
+ }
58
+
59
+ const response = await fetch(url, {
60
+ ...options,
61
+ headers,
62
+ });
63
+
64
+ if (!response.ok) {
65
+ // If unauthorized, clear token
66
+ if (response.status === 401) {
67
+ removeAuthToken();
68
+ }
69
+
70
+ let errorMessage = `Request failed with status ${response.status}`;
71
+
72
+ // Try to extract error message from response
73
+ const contentType = response.headers.get('content-type');
74
+ const isJson = contentType && contentType.includes('application/json');
75
+
76
+ try {
77
+ if (isJson) {
78
+ const errorData = await response.json();
79
+ // Try multiple common error message fields
80
+ errorMessage = errorData.detail ||
81
+ errorData.message ||
82
+ errorData.error ||
83
+ (typeof errorData === 'string' ? errorData : errorMessage);
84
+ } else {
85
+ const text = await response.text();
86
+ if (text && text.trim()) {
87
+ errorMessage = text;
88
+ } else {
89
+ // Fall back to default message based on status code
90
+ if (response.status === 401) {
91
+ errorMessage = 'Incorrect username or password.';
92
+ } else if (response.status === 403) {
93
+ errorMessage = 'Access forbidden.';
94
+ } else if (response.status === 404) {
95
+ errorMessage = 'Resource not found.';
96
+ } else if (response.status >= 500) {
97
+ errorMessage = 'Server error. Please try again later.';
98
+ }
99
+ }
100
+ }
101
+ } catch (parseError) {
102
+ // If parsing fails, use status-based default
103
+ if (response.status === 401) {
104
+ errorMessage = 'Incorrect username or password.';
105
+ } else if (response.status === 403) {
106
+ errorMessage = 'Access forbidden.';
107
+ } else if (response.status === 404) {
108
+ errorMessage = 'Resource not found.';
109
+ } else if (response.status >= 500) {
110
+ errorMessage = 'Server error. Please try again later.';
111
+ }
112
+ }
113
+
114
+ const error = new Error(errorMessage);
115
+ console.error('API Error:', {
116
+ status: response.status,
117
+ statusText: response.statusText,
118
+ message: errorMessage,
119
+ url: url,
120
+ contentType: contentType
121
+ });
122
+ throw error;
123
+ }
124
+
125
+ return response.json();
126
+ }
127
+
128
+ // Health check
129
+ export async function checkHealth(): Promise<HealthStatus> {
130
+ return apiRequest<HealthStatus>('/health');
131
+ }
132
+
133
+ // ==================== AUTHENTICATION ====================
134
+
135
+ export async function login(credentials: LoginRequest): Promise<LoginResponse> {
136
+ const response = await apiRequest<LoginResponse>('/api/auth/login', {
137
+ method: 'POST',
138
+ headers: {
139
+ 'Content-Type': 'application/json',
140
+ },
141
+ body: JSON.stringify(credentials),
142
+ });
143
+
144
+ // Store token
145
+ if (response.access_token) {
146
+ setAuthToken(response.access_token);
147
+ }
148
+
149
+ return response;
150
+ }
151
+
152
+ export async function verifyAuth(): Promise<AuthUser> {
153
+ try {
154
+ const response = await apiRequest<AuthUser>('/api/auth/verify');
155
+ return response;
156
+ } catch {
157
+ removeAuthToken();
158
+ return { username: '', authenticated: false };
159
+ }
160
+ }
161
+
162
+ export async function getCurrentUser(): Promise<AuthUser> {
163
+ try {
164
+ const response = await apiRequest<AuthUser>('/api/auth/me');
165
+ return response;
166
+ } catch {
167
+ removeAuthToken();
168
+ return { username: '', authenticated: false };
169
+ }
170
+ }
171
+
172
+ export function logout(): void {
173
+ removeAuthToken();
174
+ }
175
+
176
+ // ==================== KLING/KIE API ====================
177
+
178
+ export interface KlingGenerateParams {
179
+ prompt: string | object;
180
+ imageUrls?: string[];
181
+ model?: string;
182
+ aspectRatio?: string;
183
+ generationType?: string;
184
+ seeds?: number;
185
+ voiceType?: string;
186
+ }
187
+
188
+ export interface KlingGenerateResponse {
189
+ taskId: string;
190
+ status: string;
191
+ }
192
+
193
+ export async function klingGenerate(params: KlingGenerateParams): Promise<KlingGenerateResponse> {
194
+ return apiRequest<KlingGenerateResponse>('/api/veo/generate', {
195
+ method: 'POST',
196
+ headers: { 'Content-Type': 'application/json' },
197
+ body: JSON.stringify(params),
198
+ });
199
+ }
200
+
201
+ export async function klingExtend(taskId: string, prompt: string | object, seeds?: number, voiceType?: string): Promise<KlingGenerateResponse> {
202
+ return apiRequest<KlingGenerateResponse>('/api/veo/extend', {
203
+ method: 'POST',
204
+ headers: { 'Content-Type': 'application/json' },
205
+ body: JSON.stringify({ taskId, prompt, seeds, voiceType }),
206
+ });
207
+ }
208
+
209
+ export async function klingGetStatus(taskId: string): Promise<VideoStatusResponse> {
210
+ return apiRequest<VideoStatusResponse>(`/api/veo/status/${taskId}`);
211
+ }
212
+
213
+ export function createKlingEventSource(taskId: string): EventSource {
214
+ return new EventSource(`${API_BASE}/api/veo/events/${taskId}`);
215
+ }
216
+
217
+ // ==================== REPLICATE API ====================
218
+
219
+ export interface ReplicateGenerateParams {
220
+ prompt: string;
221
+ imageUrl?: string;
222
+ model?: string;
223
+ duration?: number;
224
+ aspectRatio?: string;
225
+ }
226
+
227
+ export interface ReplicateGenerateResponse {
228
+ id: string;
229
+ status: string;
230
+ }
231
+
232
+ export async function replicateGenerate(params: ReplicateGenerateParams): Promise<ReplicateGenerateResponse> {
233
+ return apiRequest<ReplicateGenerateResponse>('/api/replicate/generate', {
234
+ method: 'POST',
235
+ headers: { 'Content-Type': 'application/json' },
236
+ body: JSON.stringify(params),
237
+ });
238
+ }
239
+
240
+ export async function replicateGetStatus(predictionId: string): Promise<VideoStatusResponse> {
241
+ return apiRequest<VideoStatusResponse>(`/api/replicate/status/${predictionId}`);
242
+ }
243
+
244
+ // Wait for Replicate video completion by polling
245
+ export async function waitForReplicateVideo(
246
+ predictionId: string,
247
+ timeoutMs: number = 600000, // 10 minutes
248
+ pollIntervalMs: number = 15000 // Poll every 15 seconds (video gen takes 2-5 min)
249
+ ): Promise<string> {
250
+ const startTime = Date.now();
251
+
252
+ while (Date.now() - startTime < timeoutMs) {
253
+ const status = await replicateGetStatus(predictionId);
254
+
255
+ if (status.status === 'succeeded' && status.url) {
256
+ return status.url;
257
+ } else if (status.status === 'failed') {
258
+ throw new Error(status.error || 'Replicate video generation failed');
259
+ }
260
+
261
+ // Wait before next poll
262
+ await new Promise(resolve => setTimeout(resolve, pollIntervalMs));
263
+ }
264
+
265
+ throw new Error('Replicate video generation timed out');
266
+ }
267
+
268
+ // ==================== PROMPT GENERATION ====================
269
+
270
+ export async function generatePrompts(formData: FormData): Promise<SegmentsPayload> {
271
+ const response = await fetch(`${API_BASE}/api/generate-prompts`, {
272
+ method: 'POST',
273
+ body: formData,
274
+ });
275
+
276
+ if (!response.ok) {
277
+ let errorMessage = 'Failed to generate prompts';
278
+ try {
279
+ const errorData = await response.json();
280
+ errorMessage = errorData.detail || errorMessage;
281
+ } catch {
282
+ // Ignore
283
+ }
284
+ throw new Error(errorMessage);
285
+ }
286
+
287
+ return response.json();
288
+ }
289
+
290
+ export async function refinePromptContinuity(
291
+ segmentPrompt: object,
292
+ lastFrameBlob: Blob
293
+ ): Promise<{ refined_prompt: object }> {
294
+ const formData = new FormData();
295
+ formData.append('segmentPrompt', JSON.stringify(segmentPrompt));
296
+ formData.append('lastFrame', lastFrameBlob, 'last-frame.jpg');
297
+
298
+ return apiRequest<{ refined_prompt: object }>('/api/refine-prompt-continuity', {
299
+ method: 'POST',
300
+ body: formData,
301
+ });
302
+ }
303
+
304
+ // ==================== IMAGE UPLOAD ====================
305
+
306
+ export async function uploadImage(file: File): Promise<{ url: string; filename: string }> {
307
+ const formData = new FormData();
308
+ formData.append('file', file);
309
+
310
+ return apiRequest<{ url: string; filename: string }>('/api/upload-image', {
311
+ method: 'POST',
312
+ body: formData,
313
+ });
314
+ }
315
+
316
+ // ==================== FRAME EXTRACTION ====================
317
+
318
+ export interface ExtractFramesParams {
319
+ video_url: string;
320
+ script?: string;
321
+ buffer_time?: number;
322
+ num_frames?: number;
323
+ model_size?: string;
324
+ }
325
+
326
+ export interface ExtractFramesResponse {
327
+ frames: ExtractedFrame[];
328
+ }
329
+
330
+ export async function extractFrames(params: ExtractFramesParams): Promise<ExtractFramesResponse> {
331
+ return apiRequest<ExtractFramesResponse>('/api/extract-frames', {
332
+ method: 'POST',
333
+ headers: { 'Content-Type': 'application/json' },
334
+ body: JSON.stringify(params),
335
+ });
336
+ }
337
+
338
+ // ==================== WHISPER ANALYSIS ====================
339
+
340
+ export interface WhisperAnalyzeParams {
341
+ video_url: string;
342
+ dialogue: string;
343
+ buffer_time?: number;
344
+ model_size?: string;
345
+ }
346
+
347
+ export interface WhisperAnalyzeResponse {
348
+ success: boolean;
349
+ last_word_timestamp: number | null;
350
+ trim_point: number | null;
351
+ frame_timestamp: number | null;
352
+ frame_base64: string | null;
353
+ video_duration: number;
354
+ transcribed_text: string | null; // What Whisper actually heard - for prompt refinement
355
+ error: string | null;
356
+ }
357
+
358
+ /**
359
+ * Analyze video with Whisper to find last spoken word and extract frame.
360
+ * This is the optimized flow that combines Whisper analysis and frame extraction.
361
+ */
362
+ export async function whisperAnalyzeAndExtract(params: WhisperAnalyzeParams): Promise<WhisperAnalyzeResponse> {
363
+ return apiRequest<WhisperAnalyzeResponse>('/api/whisper/analyze-and-extract', {
364
+ method: 'POST',
365
+ headers: { 'Content-Type': 'application/json' },
366
+ body: JSON.stringify(params),
367
+ });
368
+ }
369
+
370
+ /**
371
+ * Refine a segment prompt using the frame AND transcription from the previous segment.
372
+ * This ensures perfect visual and audio continuity.
373
+ */
374
+ export async function refinePromptWithContext(
375
+ segmentPrompt: object,
376
+ frameFile: File,
377
+ transcribedDialogue: string,
378
+ expectedDialogue: string
379
+ ): Promise<{ refined_prompt: object; original_prompt: object }> {
380
+ const formData = new FormData();
381
+ formData.append('segmentPrompt', JSON.stringify(segmentPrompt));
382
+ formData.append('lastFrame', frameFile);
383
+ formData.append('transcribedDialogue', transcribedDialogue);
384
+ formData.append('expectedDialogue', expectedDialogue);
385
+
386
+ const response = await fetch(`${API_BASE}/api/refine-prompt-continuity`, {
387
+ method: 'POST',
388
+ body: formData,
389
+ });
390
+
391
+ if (!response.ok) {
392
+ throw new Error(`Failed to refine prompt: ${response.status}`);
393
+ }
394
+
395
+ return response.json();
396
+ }
397
+
398
+ /**
399
+ * Check if Whisper is available on the backend
400
+ */
401
+ export async function checkWhisperStatus(): Promise<{ available: boolean; message: string }> {
402
+ return apiRequest<{ available: boolean; message: string }>('/api/whisper/status');
403
+ }
404
+
405
+ // ==================== VIDEO DOWNLOAD ====================
406
+
407
+ export async function downloadVideo(url: string): Promise<Blob> {
408
+ const response = await fetch(`${API_BASE}/api/veo/download?url=${encodeURIComponent(url)}`);
409
+
410
+ if (!response.ok) {
411
+ throw new Error(`Failed to download video: ${response.status}`);
412
+ }
413
+
414
+ return response.blob();
415
+ }
416
+
417
+ // ==================== UTILITIES ====================
418
+
419
+ export async function getVideoDuration(file: File): Promise<number> {
420
+ return new Promise((resolve, reject) => {
421
+ const video = document.createElement('video');
422
+ video.preload = 'metadata';
423
+ video.src = URL.createObjectURL(file);
424
+ video.onloadedmetadata = () => {
425
+ URL.revokeObjectURL(video.src);
426
+ resolve(video.duration);
427
+ };
428
+ video.onerror = () => {
429
+ URL.revokeObjectURL(video.src);
430
+ reject(new Error('Failed to load video metadata'));
431
+ };
432
+ });
433
+ }
434
+
435
+ export async function generateThumbnails(file: File, count: number = 5): Promise<string[]> {
436
+ return new Promise((resolve, reject) => {
437
+ const video = document.createElement('video');
438
+ video.preload = 'metadata';
439
+ video.src = URL.createObjectURL(file);
440
+ video.muted = true;
441
+
442
+ video.onloadedmetadata = async () => {
443
+ const duration = video.duration;
444
+ const thumbnails: string[] = [];
445
+ const canvas = document.createElement('canvas');
446
+ const ctx = canvas.getContext('2d');
447
+
448
+ if (!ctx) {
449
+ URL.revokeObjectURL(video.src);
450
+ reject(new Error('Could not get canvas context'));
451
+ return;
452
+ }
453
+
454
+ // Use video's actual dimensions for proper aspect ratio
455
+ // Scale down while maintaining aspect ratio (max 400px on longest side)
456
+ const maxSize = 400;
457
+ const videoWidth = video.videoWidth || 1080;
458
+ const videoHeight = video.videoHeight || 1920;
459
+ const scale = Math.min(maxSize / videoWidth, maxSize / videoHeight);
460
+
461
+ canvas.width = Math.round(videoWidth * scale);
462
+ canvas.height = Math.round(videoHeight * scale);
463
+
464
+ for (let i = 0; i < count; i++) {
465
+ const time = (duration / count) * i;
466
+ video.currentTime = time;
467
+ await new Promise<void>((res) => {
468
+ video.onseeked = () => res();
469
+ });
470
+ ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
471
+ thumbnails.push(canvas.toDataURL('image/jpeg', 0.85));
472
+ }
473
+
474
+ URL.revokeObjectURL(video.src);
475
+ resolve(thumbnails);
476
+ };
477
+
478
+ video.onerror = () => {
479
+ URL.revokeObjectURL(video.src);
480
+ reject(new Error('Failed to load video'));
481
+ };
482
+ });
483
+ }
484
+
485
+ // Wait for video completion using SSE
486
+ export function waitForKlingVideo(taskId: string, timeoutMs: number = 300000): Promise<string> {
487
+ return new Promise((resolve, reject) => {
488
+ const eventSource = createKlingEventSource(taskId);
489
+ const startTime = Date.now();
490
+
491
+ const timeout = setTimeout(() => {
492
+ eventSource.close();
493
+ reject(new Error('Video generation timed out'));
494
+ }, timeoutMs);
495
+
496
+ eventSource.onmessage = (event) => {
497
+ try {
498
+ const data = JSON.parse(event.data);
499
+
500
+ if (data.status === 'succeeded' && data.url) {
501
+ clearTimeout(timeout);
502
+ eventSource.close();
503
+ resolve(data.url);
504
+ } else if (data.status === 'failed' || data.code !== undefined && data.code !== 200) {
505
+ clearTimeout(timeout);
506
+ eventSource.close();
507
+ const errorMsg = data.error || data.msg || `Video generation failed (code: ${data.code || 'unknown'})`;
508
+ reject(new Error(errorMsg));
509
+ }
510
+ } catch (err) {
511
+ console.error('Failed to parse SSE data:', err);
512
+ // If we can't parse, don't reject immediately - might be a partial message
513
+ }
514
+ };
515
+
516
+ eventSource.onerror = () => {
517
+ // If connection error and we've been waiting a while, reject
518
+ const elapsed = Date.now() - startTime;
519
+ if (elapsed > 5000) { // Wait at least 5 seconds before rejecting on connection error
520
+ clearTimeout(timeout);
521
+ eventSource.close();
522
+ reject(new Error('SSE connection error - video generation may have failed'));
523
+ }
524
+ };
525
+ });
526
+ }
527
+
528
+ // Generate video with automatic retry (retries once on failure)
529
+ export async function generateVideoWithRetry(
530
+ generateFn: () => Promise<{ taskId: string }>,
531
+ timeoutMs: number = 300000,
532
+ onRetry?: (attempt: number) => void
533
+ ): Promise<string> {
534
+ let lastError: Error | null = null;
535
+
536
+ for (let attempt = 0; attempt < 2; attempt++) {
537
+ try {
538
+ if (attempt > 0) {
539
+ console.log(`🔄 Retrying video generation (attempt ${attempt + 1}/2)...`);
540
+ if (onRetry) {
541
+ onRetry(attempt + 1);
542
+ }
543
+ }
544
+
545
+ const result = await generateFn();
546
+ const videoUrl = await waitForKlingVideo(result.taskId, timeoutMs);
547
+ return videoUrl;
548
+ } catch (error) {
549
+ lastError = error instanceof Error ? error : new Error(String(error));
550
+ console.error(`❌ Video generation attempt ${attempt + 1} failed:`, lastError.message);
551
+
552
+ // If this was the first attempt, retry once
553
+ if (attempt === 0) {
554
+ console.log('⏳ Waiting 2 seconds before retry...');
555
+ await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds before retry
556
+ continue;
557
+ }
558
+
559
+ // If both attempts failed, throw the error
560
+ throw lastError;
561
+ }
562
+ }
563
+
564
+ // This should never be reached, but TypeScript needs it
565
+ throw lastError || new Error('Video generation failed');
566
+ }
567
+
568
+ // ==================== VIDEO MERGE/EXPORT ====================
569
+
570
+ export interface ClipMetadata {
571
+ index: number;
572
+ startTime: number;
573
+ endTime: number;
574
+ type: 'video' | 'image';
575
+ duration?: number;
576
+ }
577
+
578
+ // Merge multiple video files into a single video
579
+ export async function mergeVideos(
580
+ videoBlobs: Blob[],
581
+ clipMetadata: ClipMetadata[]
582
+ ): Promise<Blob> {
583
+ const formData = new FormData();
584
+
585
+ // Add clip metadata as JSON
586
+ formData.append('clips_data', JSON.stringify(clipMetadata));
587
+
588
+ // Add video files
589
+ videoBlobs.forEach((blob, index) => {
590
+ formData.append('files', blob, `video_${index}.mp4`);
591
+ });
592
+
593
+ const response = await fetch(`${API_BASE}/api/export/merge`, {
594
+ method: 'POST',
595
+ body: formData,
596
+ });
597
+
598
+ if (!response.ok) {
599
+ let errorMessage = 'Failed to merge videos';
600
+ try {
601
+ const errorData = await response.json();
602
+ errorMessage = errorData.detail || errorMessage;
603
+ } catch {
604
+ // Ignore JSON parse errors
605
+ }
606
+ throw new Error(errorMessage);
607
+ }
608
+
609
+ return response.blob();
610
+ }
611
+
frontend/src/vite-env.d.ts ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ /// <reference types="vite/client" />
2
+
3
+ interface ImportMetaEnv {
4
+ readonly VITE_API_BASE_URL?: string;
5
+ // Add more env variables as needed
6
+ }
7
+
8
+ interface ImportMeta {
9
+ readonly env: ImportMetaEnv;
10
+ }
frontend/tailwind.config.js ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** @type {import('tailwindcss').Config} */
2
+ export default {
3
+ content: [
4
+ "./index.html",
5
+ "./src/**/*.{js,ts,jsx,tsx}",
6
+ ],
7
+ theme: {
8
+ extend: {
9
+ colors: {
10
+ // Deep ocean-inspired dark theme
11
+ void: {
12
+ 50: '#f0f4f8',
13
+ 100: '#d9e2ec',
14
+ 200: '#bcccdc',
15
+ 300: '#9fb3c8',
16
+ 400: '#829ab1',
17
+ 500: '#627d98',
18
+ 600: '#486581',
19
+ 700: '#334e68',
20
+ 800: '#243b53',
21
+ 900: '#102a43',
22
+ 950: '#0a1929',
23
+ },
24
+ // Electric coral accent
25
+ coral: {
26
+ 50: '#fff5f5',
27
+ 100: '#ffe3e3',
28
+ 200: '#ffc9c9',
29
+ 300: '#ffa8a8',
30
+ 400: '#ff8787',
31
+ 500: '#ff6b6b',
32
+ 600: '#fa5252',
33
+ 700: '#f03e3e',
34
+ 800: '#e03131',
35
+ 900: '#c92a2a',
36
+ },
37
+ // Cyan electric accent
38
+ electric: {
39
+ 50: '#e3fafc',
40
+ 100: '#c5f6fa',
41
+ 200: '#99e9f2',
42
+ 300: '#66d9e8',
43
+ 400: '#3bc9db',
44
+ 500: '#22b8cf',
45
+ 600: '#15aabf',
46
+ 700: '#1098ad',
47
+ 800: '#0c8599',
48
+ 900: '#0b7285',
49
+ },
50
+ // Golden amber for highlights
51
+ amber: {
52
+ 400: '#fbbf24',
53
+ 500: '#f59e0b',
54
+ 600: '#d97706',
55
+ },
56
+ },
57
+ fontFamily: {
58
+ display: ['Clash Display', 'system-ui', 'sans-serif'],
59
+ body: ['Satoshi', 'system-ui', 'sans-serif'],
60
+ mono: ['JetBrains Mono', 'Menlo', 'monospace'],
61
+ },
62
+ backgroundImage: {
63
+ 'gradient-radial': 'radial-gradient(var(--tw-gradient-stops))',
64
+ 'gradient-conic': 'conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))',
65
+ 'mesh-pattern': `url("data:image/svg+xml,%3Csvg width='60' height='60' viewBox='0 0 60 60' xmlns='http://www.w3.org/2000/svg'%3E%3Cg fill='none' fill-rule='evenodd'%3E%3Cg fill='%23ffffff' fill-opacity='0.03'%3E%3Cpath d='M36 34v-4h-2v4h-4v2h4v4h2v-4h4v-2h-4zm0-30V0h-2v4h-4v2h4v4h2V6h4V4h-4zM6 34v-4H4v4H0v2h4v4h2v-4h4v-2H6zM6 4V0H4v4H0v2h4v4h2V6h4V4H6z'/%3E%3C/g%3E%3C/g%3E%3C/svg%3E")`,
66
+ },
67
+ animation: {
68
+ 'float': 'float 6s ease-in-out infinite',
69
+ 'pulse-glow': 'pulse-glow 2s ease-in-out infinite',
70
+ 'shimmer': 'shimmer 2s linear infinite',
71
+ 'spin-slow': 'spin 3s linear infinite',
72
+ },
73
+ keyframes: {
74
+ float: {
75
+ '0%, 100%': { transform: 'translateY(0)' },
76
+ '50%': { transform: 'translateY(-10px)' },
77
+ },
78
+ 'pulse-glow': {
79
+ '0%, 100%': { opacity: '1', boxShadow: '0 0 20px rgba(255, 107, 107, 0.5)' },
80
+ '50%': { opacity: '0.8', boxShadow: '0 0 40px rgba(255, 107, 107, 0.8)' },
81
+ },
82
+ shimmer: {
83
+ '0%': { backgroundPosition: '-200% 0' },
84
+ '100%': { backgroundPosition: '200% 0' },
85
+ },
86
+ },
87
+ },
88
+ },
89
+ plugins: [],
90
+ }
91
+
frontend/tsconfig.app.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
4
+ "target": "ES2022",
5
+ "useDefineForClassFields": true,
6
+ "lib": ["ES2022", "DOM", "DOM.Iterable"],
7
+ "module": "ESNext",
8
+ "types": ["vite/client"],
9
+ "skipLibCheck": true,
10
+
11
+ /* Bundler mode */
12
+ "moduleResolution": "bundler",
13
+ "allowImportingTsExtensions": true,
14
+ "verbatimModuleSyntax": true,
15
+ "moduleDetection": "force",
16
+ "noEmit": true,
17
+ "jsx": "react-jsx",
18
+
19
+ /* Linting */
20
+ "strict": true,
21
+ "noUnusedLocals": true,
22
+ "noUnusedParameters": true,
23
+ "erasableSyntaxOnly": true,
24
+ "noFallthroughCasesInSwitch": true,
25
+ "noUncheckedSideEffectImports": true
26
+ },
27
+ "include": ["src"]
28
+ }
frontend/tsconfig.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "target": "ES2020",
4
+ "useDefineForClassFields": true,
5
+ "lib": ["ES2020", "DOM", "DOM.Iterable"],
6
+ "module": "ESNext",
7
+ "skipLibCheck": true,
8
+ "moduleResolution": "bundler",
9
+ "allowImportingTsExtensions": true,
10
+ "resolveJsonModule": true,
11
+ "isolatedModules": true,
12
+ "noEmit": true,
13
+ "jsx": "react-jsx",
14
+ "strict": true,
15
+ "noUnusedLocals": true,
16
+ "noUnusedParameters": true,
17
+ "noFallthroughCasesInSwitch": true,
18
+ "baseUrl": ".",
19
+ "paths": {
20
+ "@/*": ["./src/*"]
21
+ }
22
+ },
23
+ "include": ["src"],
24
+ "references": [{ "path": "./tsconfig.node.json" }]
25
+ }
frontend/tsconfig.node.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "composite": true,
4
+ "skipLibCheck": true,
5
+ "module": "ESNext",
6
+ "moduleResolution": "bundler",
7
+ "allowSyntheticDefaultImports": true
8
+ },
9
+ "include": ["vite.config.ts"]
10
+ }
frontend/vite.config.ts ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { defineConfig } from 'vite';
2
+ import react from '@vitejs/plugin-react';
3
+ import path from 'path';
4
+
5
+ export default defineConfig({
6
+ plugins: [react()],
7
+ resolve: {
8
+ alias: {
9
+ '@': path.resolve(__dirname, './src'),
10
+ },
11
+ },
12
+ server: {
13
+ port: 3000,
14
+ proxy: {
15
+ '/api': {
16
+ target: 'http://localhost:4000',
17
+ changeOrigin: true,
18
+ },
19
+ },
20
+ },
21
+ });
main.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ FastAPI Backend for React Video Editor
3
+ Handles video generation, image processing, and API integrations
4
+ """
5
+
6
+ from fastapi import FastAPI, HTTPException, Request, Response
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from fastapi.responses import StreamingResponse, JSONResponse
9
+ from fastapi.staticfiles import StaticFiles
10
+ from contextlib import asynccontextmanager
11
+ import uvicorn
12
+ import os
13
+ from dotenv import load_dotenv
14
+
15
+ from api.video_generation import router as video_router
16
+ from api.image_service import router as image_router
17
+ from api.frame_extraction import router as frame_router
18
+ from api.prompt_generation import router as prompt_router
19
+ from api.video_export import router as export_router
20
+ from api.replicate_service import router as replicate_router
21
+ from api.whisper_service import router as whisper_router
22
+ from api.auth import router as auth_router
23
+ from utils.storage import cleanup_old_files
24
+
25
+ # Load environment variables
26
+ load_dotenv('.env.local')
27
+
28
+ # Lifespan context manager for startup/shutdown
29
+ @asynccontextmanager
30
+ async def lifespan(app: FastAPI):
31
+ """Handle startup and shutdown events"""
32
+ # Startup
33
+ print("🚀 Starting FastAPI server...")
34
+ print(f"📁 Storage directory: {os.path.join(os.getcwd(), 'storage')}")
35
+
36
+ # Create storage directory if it doesn't exist
37
+ os.makedirs('storage/images', exist_ok=True)
38
+ os.makedirs('storage/videos', exist_ok=True)
39
+
40
+ # Check for API keys
41
+ kie_api_key = os.getenv('KIE_API_KEY')
42
+ if not kie_api_key:
43
+ print("⚠️ Warning: KIE_API_KEY not set. Video generation will fail.")
44
+ print(" Get your API key at: https://kie.ai/api-key")
45
+ else:
46
+ print("✅ KIE_API_KEY configured")
47
+
48
+ gemini_api_key = os.getenv('VITE_GEMINI_API_KEY')
49
+ if gemini_api_key:
50
+ print("✅ GEMINI_API_KEY configured")
51
+
52
+ openai_api_key = os.getenv('OPENAI_API_KEY')
53
+ if openai_api_key:
54
+ print("✅ OPENAI_API_KEY configured (GPT-4o prompt generation)")
55
+
56
+ replicate_api_token = os.getenv('REPLICATE_API_TOKEN')
57
+ if replicate_api_token:
58
+ print("✅ REPLICATE_API_TOKEN configured")
59
+ else:
60
+ print("⚠️ Warning: REPLICATE_API_TOKEN not set. Replicate video generation will fail.")
61
+
62
+ # Check environment mode
63
+ environment = os.getenv('ENVIRONMENT', 'dev').lower()
64
+ is_dev_mode = environment == 'dev' or environment == 'development'
65
+ if is_dev_mode:
66
+ print("🧪 DEV MODE: Segment generation limited to 2 segments")
67
+ else:
68
+ print("🚀 PROD MODE: Generating all segments")
69
+
70
+ yield
71
+
72
+ # Shutdown
73
+ print("🛑 Shutting down FastAPI server...")
74
+ cleanup_old_files()
75
+
76
+ # Create FastAPI app
77
+ app = FastAPI(
78
+ title="React Video Editor API",
79
+ description="Python backend for video generation and processing",
80
+ version="1.0.0",
81
+ lifespan=lifespan
82
+ )
83
+
84
+ # CORS middleware
85
+ app.add_middleware(
86
+ CORSMiddleware,
87
+ allow_origins=["*"], # In production, specify exact origins
88
+ allow_credentials=True,
89
+ allow_methods=["*"],
90
+ allow_headers=["*"],
91
+ )
92
+
93
+ # Include routers
94
+ app.include_router(auth_router, prefix="/api")
95
+ app.include_router(video_router, prefix="/api")
96
+ app.include_router(image_router, prefix="/api")
97
+ app.include_router(frame_router, prefix="/api")
98
+ app.include_router(prompt_router, prefix="/api")
99
+ app.include_router(export_router, prefix="/api")
100
+ app.include_router(replicate_router, prefix="/api")
101
+ app.include_router(whisper_router, prefix="/api")
102
+
103
+ # Health check endpoints (must be before catch-all route)
104
+ @app.get("/health")
105
+ async def health_check():
106
+ """Health check endpoint"""
107
+ environment = os.getenv('ENVIRONMENT', 'dev').lower()
108
+ is_dev_mode = environment == 'dev' or environment == 'development'
109
+ return {
110
+ "status": "healthy",
111
+ "environment": environment,
112
+ "is_dev_mode": is_dev_mode,
113
+ "max_segments": 2 if is_dev_mode else None,
114
+ "kie_api_configured": bool(os.getenv('KIE_API_KEY')),
115
+ "replicate_api_configured": bool(os.getenv('REPLICATE_API_TOKEN')),
116
+ "gemini_api_configured": bool(os.getenv('VITE_GEMINI_API_KEY')),
117
+ "openai_api_configured": bool(os.getenv('OPENAI_API_KEY'))
118
+ }
119
+
120
+ # Serve static files (frontend) in production
121
+ frontend_dist_path = os.path.join(os.getcwd(), "frontend", "dist")
122
+ if os.path.exists(frontend_dist_path):
123
+ # Serve static files
124
+ app.mount("/assets", StaticFiles(directory=os.path.join(frontend_dist_path, "assets")), name="assets")
125
+
126
+ # Root endpoint - serve frontend or API info
127
+ @app.get("/")
128
+ async def root():
129
+ """Root endpoint - serve frontend in production, API info in dev"""
130
+ index_path = os.path.join(frontend_dist_path, "index.html")
131
+ if os.path.exists(index_path):
132
+ from fastapi.responses import FileResponse
133
+ return FileResponse(index_path)
134
+ # Fallback if frontend not built
135
+ return {
136
+ "status": "healthy",
137
+ "service": "React Video Editor API",
138
+ "version": "1.0.0"
139
+ }
140
+
141
+ # Serve frontend index.html for all non-API routes
142
+ @app.get("/{full_path:path}")
143
+ async def serve_frontend(full_path: str):
144
+ """Serve frontend for all non-API routes"""
145
+ # Don't serve frontend for API routes or docs
146
+ if (full_path.startswith("api") or
147
+ full_path.startswith("docs") or
148
+ full_path.startswith("redoc") or
149
+ full_path.startswith("openapi.json") or
150
+ full_path == "health"):
151
+ raise HTTPException(status_code=404, detail="Not found")
152
+
153
+ # Serve index.html for frontend routes
154
+ index_path = os.path.join(frontend_dist_path, "index.html")
155
+ if os.path.exists(index_path):
156
+ from fastapi.responses import FileResponse
157
+ return FileResponse(index_path)
158
+ raise HTTPException(status_code=404, detail="Frontend not found")
159
+ else:
160
+ # Root endpoint when frontend not built
161
+ @app.get("/")
162
+ async def root():
163
+ """Root endpoint - health check"""
164
+ return {
165
+ "status": "healthy",
166
+ "service": "React Video Editor API",
167
+ "version": "1.0.0"
168
+ }
169
+
170
+ if __name__ == "__main__":
171
+ # Support both SERVER_PORT and PORT (Hugging Face uses PORT)
172
+ port = int(os.getenv('PORT') or os.getenv('SERVER_PORT', 4000))
173
+ uvicorn.run(
174
+ "main:app",
175
+ host="0.0.0.0",
176
+ port=port,
177
+ reload=True,
178
+ log_level="info"
179
+ )
180
+
models/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Models package
2
+