Upload 14 files
Browse files- README.md +350 -303
- __pycache__/api_utils.cpython-312.pyc +0 -0
- __pycache__/app.cpython-312.pyc +0 -0
- __pycache__/config.cpython-312.pyc +0 -0
- __pycache__/content_processor.cpython-312.pyc +0 -0
- __pycache__/ui.cpython-312.pyc +0 -0
- api_utils.py +71 -0
- config.py +29 -0
- content_processor.py +57 -0
- image_generator.py +179 -0
- main.py +54 -0
- metadata_generator.py +108 -0
- requirements.txt +2 -1
- ui.py +224 -0
README.md
CHANGED
|
@@ -1,303 +1,350 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
#
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
#
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
-
|
| 23 |
-
-
|
| 24 |
-
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
###
|
| 80 |
-
|
| 81 |
-
-
|
| 82 |
-
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
**
|
| 119 |
-
|
| 120 |
-
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
``
|
| 168 |
-
|
| 169 |
-
###
|
| 170 |
-
|
| 171 |
-
```bash
|
| 172 |
-
#
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
#
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
##
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
-
|
| 197 |
-
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
##
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
##
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AI Thumbnail & Metadata Generator
|
| 2 |
+
|
| 3 |
+
A modular Python application that generates YouTube thumbnails and metadata using AI models.
|
| 4 |
+
|
| 5 |
+
## File Structure
|
| 6 |
+
|
| 7 |
+
```
|
| 8 |
+
thumbnail_generator/
|
| 9 |
+
βββ main.py # Main entry point
|
| 10 |
+
βββ config.py # Configuration and constants
|
| 11 |
+
βββ ui.py # Gradio user interface
|
| 12 |
+
βββ api_utils.py # API utilities and token testing
|
| 13 |
+
βββ metadata_generator.py # Text/metadata generation
|
| 14 |
+
βββ image_generator.py # Image/thumbnail generation
|
| 15 |
+
βββ content_processor.py # Main content processing logic
|
| 16 |
+
βββ requirements.txt # Python dependencies
|
| 17 |
+
βββ README.md # This file
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
## Features
|
| 21 |
+
|
| 22 |
+
- π€ AI-powered metadata generation using OpenRouter API
|
| 23 |
+
- π¨ Dual thumbnail generation using Hugging Face FLUX models
|
| 24 |
+
- π― 6 different visual styles (Realistic, Cartoon, Cinematic, etc.)
|
| 25 |
+
- βοΈ Custom text overlay editor
|
| 26 |
+
- π₯ JSON export for metadata
|
| 27 |
+
- π Separate API key management for each service
|
| 28 |
+
|
| 29 |
+
## Setup
|
| 30 |
+
|
| 31 |
+
1. Install dependencies:
|
| 32 |
+
```bash
|
| 33 |
+
pip install -r requirements.txt
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
2. Run the application:
|
| 37 |
+
```bash
|
| 38 |
+
python main.py
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
3. Open your browser to `http://localhost:7860`
|
| 42 |
+
|
| 43 |
+
4. Set your API keys in the UI:
|
| 44 |
+
- OpenRouter API key for text generation
|
| 45 |
+
- Hugging Face API key for image generation
|
| 46 |
+
|
| 47 |
+
## API Keys
|
| 48 |
+
|
| 49 |
+
- **OpenRouter**: Get your key at [https://openrouter.ai/](https://openrouter.ai/)
|
| 50 |
+
- **Hugging Face**: Get your key at [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens)
|
| 51 |
+
|
| 52 |
+
## Usage
|
| 53 |
+
|
| 54 |
+
1. Enter your video topic
|
| 55 |
+
2. Choose thumbnail style and text model
|
| 56 |
+
3. Add custom text overlay (optional)
|
| 57 |
+
4. Generate content and download!
|
| 58 |
+
|
| 59 |
+
## Modules
|
| 60 |
+
|
| 61 |
+
- **config.py**: Contains all configuration constants and global variables
|
| 62 |
+
- **api_utils.py**: Handles API interactions and token validation
|
| 63 |
+
- **metadata_generator.py**: Generates YouTube titles, descriptions, and tags
|
| 64 |
+
- **image_generator.py**: Creates thumbnails with various styles and overlays
|
| 65 |
+
- **content_processor.py**: Orchestrates the entire content generation process
|
| 66 |
+
- **ui.py**: Gradio interface for user interaction
|
| 67 |
+
- **main.py**: Entry point that launches the application
|
| 68 |
+
- **π± Responsive UI**: Clean Gradio interface with side-by-side thumbnail comparison
|
| 69 |
+
- **π₯ JSON Export**: Download complete metadata package for easy integration
|
| 70 |
+
- **β‘ Cloud-Based**: No GPU required - runs entirely on Hugging Face Inference API
|
| 71 |
+
- **π Progress Tracking**: Real-time generation progress indicators
|
| 72 |
+
|
| 73 |
+
## π Live Demo
|
| 74 |
+
|
| 75 |
+
Try the app on Hugging Face Spaces: [Your Space URL Here]
|
| 76 |
+
|
| 77 |
+
## π οΈ Installation & Setup
|
| 78 |
+
|
| 79 |
+
### Prerequisites
|
| 80 |
+
- Python 3.8+
|
| 81 |
+
- Hugging Face account (for API access)
|
| 82 |
+
- Internet connection
|
| 83 |
+
|
| 84 |
+
### Local Installation
|
| 85 |
+
|
| 86 |
+
1. **Clone the repository**
|
| 87 |
+
```bash
|
| 88 |
+
git clone https://github.com/yourusername/ai-thumbnail-generator.git
|
| 89 |
+
cd ai-thumbnail-generator
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
2. **Install dependencies**
|
| 93 |
+
```bash
|
| 94 |
+
pip install -r requirements.txt
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
3. **Set up Hugging Face token (Optional but recommended)**
|
| 98 |
+
```bash
|
| 99 |
+
export HF_TOKEN="your_hugging_face_token_here"
|
| 100 |
+
```
|
| 101 |
+
Or set it as an environment variable in your system.
|
| 102 |
+
|
| 103 |
+
4. **Run the application**
|
| 104 |
+
```bash
|
| 105 |
+
python app.py
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
5. **Open your browser**
|
| 109 |
+
Navigate to `http://localhost:7860` to use the app
|
| 110 |
+
|
| 111 |
+
## π Usage
|
| 112 |
+
|
| 113 |
+
### Basic Workflow
|
| 114 |
+
|
| 115 |
+
1. **Enter a Topic**: Type your video topic (e.g., "AI in Healthcare", "Cooking Tips")
|
| 116 |
+
2. **Choose Settings**:
|
| 117 |
+
- **Style**: Select from 6 visual styles (Realistic, Cartoon, etc.)
|
| 118 |
+
- **Text Model**: Choose between Zephyr-7B (faster) or Mistral-7B (more creative)
|
| 119 |
+
3. **Add Text Overlay** (Optional):
|
| 120 |
+
- Enter custom title text for thumbnails
|
| 121 |
+
- Choose font style (Bold, Elegant, Clean)
|
| 122 |
+
4. **Generate**: Click "Generate Content" and watch the progress
|
| 123 |
+
5. **Review & Edit**: Modify generated metadata if needed
|
| 124 |
+
6. **Download**: Select your preferred thumbnail and download the complete package as JSON
|
| 125 |
+
|
| 126 |
+
### Advanced Features
|
| 127 |
+
|
| 128 |
+
- **Dual Generation**: Get both fast (SD-Turbo) and quality (SD-1.5) thumbnails
|
| 129 |
+
- **Style Prompting**: Each style uses carefully crafted prompts for optimal results
|
| 130 |
+
- **Text Overlay**: Automatically positions text with shadows for visibility
|
| 131 |
+
- **Metadata Export**: Complete YouTube-ready package with title, description, and tags
|
| 132 |
+
|
| 133 |
+
## π― Example Topics
|
| 134 |
+
|
| 135 |
+
### Tech & AI
|
| 136 |
+
- "Future of Artificial Intelligence"
|
| 137 |
+
- "Best Programming Languages 2024"
|
| 138 |
+
- "Cybersecurity for Beginners"
|
| 139 |
+
|
| 140 |
+
### Lifestyle & Health
|
| 141 |
+
- "Morning Routine for Productivity"
|
| 142 |
+
- "Healthy Meal Prep Ideas"
|
| 143 |
+
- "Home Workout Without Equipment"
|
| 144 |
+
|
| 145 |
+
### Business & Finance
|
| 146 |
+
- "Passive Income Strategies"
|
| 147 |
+
- "Social Media Marketing Tips"
|
| 148 |
+
- "Cryptocurrency Explained"
|
| 149 |
+
|
| 150 |
+
### Education & Skills
|
| 151 |
+
- "Learn Python in 30 Days"
|
| 152 |
+
- "Photography Composition Rules"
|
| 153 |
+
- "Public Speaking Confidence"
|
| 154 |
+
|
| 155 |
+
## π§ Configuration
|
| 156 |
+
|
| 157 |
+
### Hugging Face API Setup
|
| 158 |
+
|
| 159 |
+
The app uses Hugging Face Inference API for all AI generation:
|
| 160 |
+
|
| 161 |
+
**Text Models:**
|
| 162 |
+
- `HuggingFaceH4/zephyr-7b-beta` (Default - Fast & Reliable)
|
| 163 |
+
- `mistralai/Mistral-7B-Instruct-v0.2` (Creative & Detailed)
|
| 164 |
+
|
| 165 |
+
**Image Models:**
|
| 166 |
+
- `stabilityai/sd-turbo` (Fast generation - ~3 seconds)
|
| 167 |
+
- `runwayml/stable-diffusion-v1-5` (Quality generation - ~10 seconds)
|
| 168 |
+
|
| 169 |
+
### Environment Variables
|
| 170 |
+
|
| 171 |
+
```bash
|
| 172 |
+
HF_TOKEN=your_token_here # Optional but recommended for rate limits
|
| 173 |
+
```
|
| 174 |
+
|
| 175 |
+
## π Project Structure
|
| 176 |
+
|
| 177 |
+
```
|
| 178 |
+
ai-thumbnail-generator/
|
| 179 |
+
βββ app.py # Main Gradio application
|
| 180 |
+
βββ app.yaml # Hugging Face Spaces config
|
| 181 |
+
βββ requirements.txt # Python dependencies
|
| 182 |
+
βββ README.md # This file
|
| 183 |
+
βββ .gitignore # Git ignore patterns
|
| 184 |
+
```
|
| 185 |
+
|
| 186 |
+
## π Deployment
|
| 187 |
+
|
| 188 |
+
### Hugging Face Spaces (Recommended)
|
| 189 |
+
|
| 190 |
+
1. **Create a new Space** on [Hugging Face Spaces](https://huggingface.co/spaces)
|
| 191 |
+
2. **Choose settings**:
|
| 192 |
+
- SDK: `gradio`
|
| 193 |
+
- Hardware: `CPU basic` (sufficient for API calls)
|
| 194 |
+
3. **Upload files** or connect your GitHub repository
|
| 195 |
+
4. **Set secrets** (if using authenticated API):
|
| 196 |
+
- Go to Settings β Repository secrets
|
| 197 |
+
- Add: `HF_TOKEN` = your_hugging_face_token
|
| 198 |
+
5. **Deploy**: The app will automatically build and deploy
|
| 199 |
+
|
| 200 |
+
### Docker (Optional)
|
| 201 |
+
|
| 202 |
+
```dockerfile
|
| 203 |
+
FROM python:3.9-slim
|
| 204 |
+
|
| 205 |
+
WORKDIR /app
|
| 206 |
+
COPY requirements.txt .
|
| 207 |
+
RUN pip install -r requirements.txt
|
| 208 |
+
|
| 209 |
+
COPY . .
|
| 210 |
+
EXPOSE 7860
|
| 211 |
+
ENV HF_TOKEN=""
|
| 212 |
+
|
| 213 |
+
CMD ["python", "app.py"]
|
| 214 |
+
```
|
| 215 |
+
|
| 216 |
+
### Local Development
|
| 217 |
+
|
| 218 |
+
```bash
|
| 219 |
+
# Development server with auto-reload
|
| 220 |
+
python app.py
|
| 221 |
+
```
|
| 222 |
+
|
| 223 |
+
## π€ Models & Performance
|
| 224 |
+
|
| 225 |
+
### Text Generation
|
| 226 |
+
- **Zephyr-7B**: ~2-3 seconds, excellent for titles and descriptions
|
| 227 |
+
- **Mistral-7B**: ~3-5 seconds, more creative and detailed output
|
| 228 |
+
|
| 229 |
+
### Image Generation
|
| 230 |
+
- **SD-Turbo**: ~3-5 seconds, good quality for rapid iteration
|
| 231 |
+
- **SD-1.5**: ~8-12 seconds, higher quality for final thumbnails
|
| 232 |
+
|
| 233 |
+
### Rate Limits
|
| 234 |
+
- **Free Tier**: ~100 requests/hour per model
|
| 235 |
+
- **Pro Tier**: Higher limits with HF_TOKEN authentication
|
| 236 |
+
|
| 237 |
+
## β οΈ System Requirements
|
| 238 |
+
|
| 239 |
+
### Minimal Requirements
|
| 240 |
+
- **CPU**: Any modern processor
|
| 241 |
+
- **RAM**: 2GB available
|
| 242 |
+
- **Storage**: 1GB free space
|
| 243 |
+
- **Network**: Stable internet connection
|
| 244 |
+
- **Python**: 3.8+
|
| 245 |
+
|
| 246 |
+
### No GPU Required!
|
| 247 |
+
All processing happens on Hugging Face's cloud infrastructure.
|
| 248 |
+
|
| 249 |
+
## π Troubleshooting
|
| 250 |
+
|
| 251 |
+
### Common Issues
|
| 252 |
+
|
| 253 |
+
1. **API Rate Limits**
|
| 254 |
+
- Solution: Set up HF_TOKEN for higher limits
|
| 255 |
+
- Alternative: Wait for rate limit reset
|
| 256 |
+
|
| 257 |
+
2. **Model Loading Delays**
|
| 258 |
+
- Cause: Cold start on Hugging Face servers
|
| 259 |
+
- Solution: Wait 10-20 seconds, models will warm up
|
| 260 |
+
|
| 261 |
+
3. **Image Generation Failures**
|
| 262 |
+
- Check internet connection
|
| 263 |
+
- Verify topic isn't blocked by content filters
|
| 264 |
+
- Try different style options
|
| 265 |
+
|
| 266 |
+
4. **Text Overlay Issues**
|
| 267 |
+
- Ensure text isn't too long (< 50 characters recommended)
|
| 268 |
+
- Try different font styles
|
| 269 |
+
- Check image dimensions
|
| 270 |
+
|
| 271 |
+
### Debug Mode
|
| 272 |
+
|
| 273 |
+
Set environment variable for detailed logging:
|
| 274 |
+
```bash
|
| 275 |
+
export DEBUG=1
|
| 276 |
+
python app.py
|
| 277 |
+
```
|
| 278 |
+
|
| 279 |
+
## π€ Contributing
|
| 280 |
+
|
| 281 |
+
We welcome contributions! Here's how to get started:
|
| 282 |
+
|
| 283 |
+
1. **Fork the Project**
|
| 284 |
+
2. **Create Feature Branch** (`git checkout -b feature/AmazingFeature`)
|
| 285 |
+
3. **Make Changes** and test locally
|
| 286 |
+
4. **Commit Changes** (`git commit -m 'Add some AmazingFeature'`)
|
| 287 |
+
5. **Push to Branch** (`git push origin feature/AmazingFeature`)
|
| 288 |
+
6. **Open Pull Request**
|
| 289 |
+
|
| 290 |
+
### Development Setup
|
| 291 |
+
|
| 292 |
+
```bash
|
| 293 |
+
git clone https://github.com/yourusername/ai-thumbnail-generator.git
|
| 294 |
+
cd ai-thumbnail-generator
|
| 295 |
+
pip install -r requirements.txt
|
| 296 |
+
export HF_TOKEN="your_token"
|
| 297 |
+
python app.py
|
| 298 |
+
```
|
| 299 |
+
|
| 300 |
+
## π API Reference
|
| 301 |
+
|
| 302 |
+
### Main Functions
|
| 303 |
+
|
| 304 |
+
```python
|
| 305 |
+
# Generate metadata
|
| 306 |
+
metadata = generate_metadata(topic, model_choice="zephyr")
|
| 307 |
+
|
| 308 |
+
# Generate thumbnails
|
| 309 |
+
thumb1, thumb2 = generate_thumbnails(topic, style, text_overlay)
|
| 310 |
+
|
| 311 |
+
# Add text overlay
|
| 312 |
+
image_with_text = add_text_overlay(image, title_text, style="bold")
|
| 313 |
+
|
| 314 |
+
# Create download package
|
| 315 |
+
json_data = create_download_data(topic, metadata, thumb1, thumb2, selected)
|
| 316 |
+
```
|
| 317 |
+
|
| 318 |
+
## π License
|
| 319 |
+
|
| 320 |
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
| 321 |
+
|
| 322 |
+
## π Acknowledgments
|
| 323 |
+
|
| 324 |
+
- [Hugging Face](https://huggingface.co/) for the amazing Inference API and model hosting
|
| 325 |
+
- [Gradio](https://gradio.app/) for the intuitive UI framework
|
| 326 |
+
- [Stability AI](https://stability.ai/) for Stable Diffusion models
|
| 327 |
+
- [HuggingFace H4](https://huggingface.co/HuggingFaceH4) for the Zephyr model
|
| 328 |
+
- [Mistral AI](https://mistral.ai/) for the Mistral language model
|
| 329 |
+
|
| 330 |
+
## π Support & Community
|
| 331 |
+
|
| 332 |
+
- **Issues**: [GitHub Issues](https://github.com/yourusername/ai-thumbnail-generator/issues)
|
| 333 |
+
- **Discussions**: [GitHub Discussions](https://github.com/yourusername/ai-thumbnail-generator/discussions)
|
| 334 |
+
- **Twitter**: [@yourusername](https://twitter.com/yourusername)
|
| 335 |
+
- **Email**: your-email@example.com
|
| 336 |
+
|
| 337 |
+
## π Features Roadmap
|
| 338 |
+
|
| 339 |
+
- [ ] **Video preview generation**
|
| 340 |
+
- [ ] **Batch processing for multiple topics**
|
| 341 |
+
- [ ] **Custom style training**
|
| 342 |
+
- [ ] **A/B testing for thumbnails**
|
| 343 |
+
- [ ] **Analytics integration**
|
| 344 |
+
- [ ] **Mobile app version**
|
| 345 |
+
|
| 346 |
+
---
|
| 347 |
+
|
| 348 |
+
β **If you find this project helpful, please give it a star on GitHub!** β
|
| 349 |
+
|
| 350 |
+
**Built with β€οΈ for the creator community**
|
__pycache__/api_utils.cpython-312.pyc
ADDED
|
Binary file (3.96 kB). View file
|
|
|
__pycache__/app.cpython-312.pyc
ADDED
|
Binary file (22.8 kB). View file
|
|
|
__pycache__/config.cpython-312.pyc
ADDED
|
Binary file (1.15 kB). View file
|
|
|
__pycache__/content_processor.cpython-312.pyc
ADDED
|
Binary file (2.27 kB). View file
|
|
|
__pycache__/ui.cpython-312.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
api_utils.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import time
|
| 3 |
+
from config import current_hf_token
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def test_hf_token(token):
|
| 7 |
+
"""Test Hugging Face token by calling the user endpoint"""
|
| 8 |
+
if not token or not token.strip():
|
| 9 |
+
return "β Please enter a token first"
|
| 10 |
+
|
| 11 |
+
url = "https://huggingface.co/api/whoami-v2"
|
| 12 |
+
headers = {"Authorization": f"Bearer {token.strip()}"}
|
| 13 |
+
try:
|
| 14 |
+
resp = requests.get(url, headers=headers, timeout=10)
|
| 15 |
+
if resp.status_code == 200:
|
| 16 |
+
data = resp.json()
|
| 17 |
+
user_name = data.get('name', 'unknown')
|
| 18 |
+
|
| 19 |
+
# Test inference providers access
|
| 20 |
+
test_url = "https://router.huggingface.co/v1/models"
|
| 21 |
+
test_resp = requests.get(test_url, headers=headers, timeout=10)
|
| 22 |
+
|
| 23 |
+
if test_resp.status_code == 200:
|
| 24 |
+
return f"β
Token valid! User: {user_name} - Inference Providers access confirmed!"
|
| 25 |
+
else:
|
| 26 |
+
return f"β οΈ Token valid for user {user_name}, but may lack Inference Providers permissions. Check token settings."
|
| 27 |
+
elif resp.status_code == 401:
|
| 28 |
+
return "β Invalid token. Please check and try again."
|
| 29 |
+
else:
|
| 30 |
+
return f"β Error: {resp.status_code} {resp.text[:100]}"
|
| 31 |
+
except Exception as e:
|
| 32 |
+
return f"β Connection error: {e}"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def query_hf_api(api_url, payload, max_retries=3):
|
| 36 |
+
"""Query Hugging Face Inference API with retries"""
|
| 37 |
+
global current_hf_token
|
| 38 |
+
token = current_hf_token
|
| 39 |
+
headers = {"Authorization": f"Bearer {token}"} if token else {}
|
| 40 |
+
print(f"π Calling API: {api_url}")
|
| 41 |
+
print(f"π Using Hugging Face token: {'Yes' if token else 'No (public access)'}")
|
| 42 |
+
|
| 43 |
+
for attempt in range(max_retries):
|
| 44 |
+
try:
|
| 45 |
+
response = requests.post(api_url, headers=headers, json=payload, timeout=60)
|
| 46 |
+
print(f"π‘ Response status: {response.status_code}")
|
| 47 |
+
|
| 48 |
+
if response.status_code == 200:
|
| 49 |
+
print("β
API call successful!")
|
| 50 |
+
return response
|
| 51 |
+
elif response.status_code == 404:
|
| 52 |
+
print(f"β Model not found (404). Model may not be available.")
|
| 53 |
+
break # Don't retry 404 errors
|
| 54 |
+
elif response.status_code == 503:
|
| 55 |
+
print(f"β³ Model loading, waiting... (attempt {attempt + 1})")
|
| 56 |
+
time.sleep(15)
|
| 57 |
+
elif response.status_code == 429:
|
| 58 |
+
print(f"β±οΈ Rate limited, waiting... (attempt {attempt + 1})")
|
| 59 |
+
time.sleep(20)
|
| 60 |
+
elif response.status_code == 401:
|
| 61 |
+
print(f"π Authentication error. Check your token.")
|
| 62 |
+
break # Don't retry auth errors
|
| 63 |
+
else:
|
| 64 |
+
print(f"β API Error {response.status_code}: {response.text[:500]}")
|
| 65 |
+
time.sleep(5)
|
| 66 |
+
except Exception as e:
|
| 67 |
+
print(f"β Request failed (attempt {attempt + 1}): {e}")
|
| 68 |
+
time.sleep(5)
|
| 69 |
+
|
| 70 |
+
print("π₯ All API attempts failed!")
|
| 71 |
+
return None
|
config.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Configuration file for AI Thumbnail & Metadata Generator
|
| 2 |
+
|
| 3 |
+
# API configuration
|
| 4 |
+
OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions"
|
| 5 |
+
HF_IMAGE_API_URL = "https://api-inference.huggingface.co/models/"
|
| 6 |
+
|
| 7 |
+
# Model configurations
|
| 8 |
+
TEXT_MODELS = {
|
| 9 |
+
"deepseek-r1-free": "deepseek/deepseek-r1:free" # Use deepseek model for OpenRouter
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
IMAGE_MODELS = {
|
| 13 |
+
"fast": "black-forest-labs/FLUX.1-schnell", # Fast FLUX model
|
| 14 |
+
"quality": "black-forest-labs/FLUX.1-dev" # Quality FLUX model
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
# Style prompts for different thumbnail styles
|
| 18 |
+
STYLE_PROMPTS = {
|
| 19 |
+
"Realistic": "photorealistic, high quality, professional photography, detailed, sharp focus",
|
| 20 |
+
"Cartoon": "cartoon style, animated, colorful, fun, illustrated, digital art, vibrant",
|
| 21 |
+
"Cinematic": "cinematic lighting, dramatic, movie poster style, epic, atmospheric, high contrast",
|
| 22 |
+
"Minimalist": "minimalist design, clean, simple, modern, elegant, white background, typography",
|
| 23 |
+
"Gaming": "gaming style, neon colors, futuristic, glowing effects, action-packed",
|
| 24 |
+
"Tech": "tech style, sleek, modern, blue and white, professional, corporate"
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
# Global variables for API keys
|
| 28 |
+
current_hf_token = ""
|
| 29 |
+
current_openrouter_token = ""
|
content_processor.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def create_download_data(topic, metadata, thumbnail1, thumbnail2, selected_thumbnail):
|
| 6 |
+
"""Create downloadable JSON data"""
|
| 7 |
+
# Parse metadata
|
| 8 |
+
lines = metadata.split('\n')
|
| 9 |
+
title = ""
|
| 10 |
+
description = ""
|
| 11 |
+
tags = ""
|
| 12 |
+
|
| 13 |
+
for line in lines:
|
| 14 |
+
if line.startswith('TITLE:'):
|
| 15 |
+
title = line.replace('TITLE:', '').strip()
|
| 16 |
+
elif line.startswith('DESCRIPTION:'):
|
| 17 |
+
description = line.replace('DESCRIPTION:', '').strip()
|
| 18 |
+
elif line.startswith('TAGS:'):
|
| 19 |
+
tags = line.replace('TAGS:', '').strip()
|
| 20 |
+
|
| 21 |
+
data = {
|
| 22 |
+
"topic": topic,
|
| 23 |
+
"generated_at": datetime.now().isoformat(),
|
| 24 |
+
"metadata": {
|
| 25 |
+
"title": title,
|
| 26 |
+
"description": description,
|
| 27 |
+
"tags": tags.split(', ') if tags else []
|
| 28 |
+
},
|
| 29 |
+
"selected_thumbnail": selected_thumbnail,
|
| 30 |
+
"thumbnails_generated": 2
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
return json.dumps(data, indent=2)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def process_content(topic, style, model_choice, text_overlay, overlay_style):
|
| 37 |
+
"""Main function to generate all content"""
|
| 38 |
+
if not topic.strip():
|
| 39 |
+
return "Please enter a topic!", None, None, ""
|
| 40 |
+
|
| 41 |
+
print(f"Processing: {topic}")
|
| 42 |
+
|
| 43 |
+
# Generate metadata
|
| 44 |
+
print("Generating metadata...")
|
| 45 |
+
from metadata_generator import generate_metadata
|
| 46 |
+
metadata = generate_metadata(topic, model_choice)
|
| 47 |
+
|
| 48 |
+
print("Generating thumbnails...")
|
| 49 |
+
from image_generator import generate_thumbnails
|
| 50 |
+
thumbnail1, thumbnail2 = generate_thumbnails(topic, style, text_overlay, overlay_style)
|
| 51 |
+
|
| 52 |
+
print("Complete!")
|
| 53 |
+
|
| 54 |
+
# Create download data
|
| 55 |
+
download_data = create_download_data(topic, metadata, thumbnail1, thumbnail2, "thumbnail1")
|
| 56 |
+
|
| 57 |
+
return metadata, thumbnail1, thumbnail2, download_data
|
image_generator.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 3 |
+
from config import IMAGE_MODELS, HF_IMAGE_API_URL, STYLE_PROMPTS
|
| 4 |
+
from api_utils import query_hf_api
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def create_placeholder_image(prompt):
|
| 8 |
+
"""Create a placeholder image when generation fails"""
|
| 9 |
+
try:
|
| 10 |
+
img = Image.new('RGB', (1280, 720), color=(100, 149, 237))
|
| 11 |
+
draw = ImageDraw.Draw(img)
|
| 12 |
+
|
| 13 |
+
try:
|
| 14 |
+
font = ImageFont.truetype("arial.ttf", 36)
|
| 15 |
+
except:
|
| 16 |
+
try:
|
| 17 |
+
font = ImageFont.load_default()
|
| 18 |
+
except:
|
| 19 |
+
font = None
|
| 20 |
+
|
| 21 |
+
# Add title
|
| 22 |
+
title = "Placeholder Thumbnail"
|
| 23 |
+
if font:
|
| 24 |
+
bbox = draw.textbbox((0, 0), title, font=font)
|
| 25 |
+
text_width = bbox[2] - bbox[0]
|
| 26 |
+
x = (1280 - text_width) // 2
|
| 27 |
+
draw.text((x, 200), title, fill='white', font=font)
|
| 28 |
+
|
| 29 |
+
# Add prompt
|
| 30 |
+
prompt_text = f"Topic: {prompt[:50]}..."
|
| 31 |
+
if font:
|
| 32 |
+
bbox = draw.textbbox((0, 0), prompt_text, font=font)
|
| 33 |
+
text_width = bbox[2] - bbox[0]
|
| 34 |
+
x = (1280 - text_width) // 2
|
| 35 |
+
draw.text((x, 300), prompt_text, fill='lightgray', font=font)
|
| 36 |
+
|
| 37 |
+
# Add note
|
| 38 |
+
note = "AI generation failed - using placeholder"
|
| 39 |
+
if font:
|
| 40 |
+
bbox = draw.textbbox((0, 0), note, font=font)
|
| 41 |
+
text_width = bbox[2] - bbox[0]
|
| 42 |
+
x = (1280 - text_width) // 2
|
| 43 |
+
draw.text((x, 400), note, fill='yellow', font=font)
|
| 44 |
+
|
| 45 |
+
return img
|
| 46 |
+
except Exception as e:
|
| 47 |
+
print(f"Error creating placeholder: {e}")
|
| 48 |
+
# Ultimate fallback - solid color
|
| 49 |
+
return Image.new('RGB', (1280, 720), color=(100, 149, 237))
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def add_text_overlay(image, title_text, style="bold"):
|
| 53 |
+
"""Add text overlay to image"""
|
| 54 |
+
if image is None:
|
| 55 |
+
return None
|
| 56 |
+
|
| 57 |
+
# Create a copy to avoid modifying original
|
| 58 |
+
img = image.copy()
|
| 59 |
+
draw = ImageDraw.Draw(img)
|
| 60 |
+
|
| 61 |
+
# Get image dimensions
|
| 62 |
+
width, height = img.size
|
| 63 |
+
|
| 64 |
+
# Try to load fonts
|
| 65 |
+
try:
|
| 66 |
+
if style == "bold":
|
| 67 |
+
font_size = max(24, width // 20)
|
| 68 |
+
font = ImageFont.truetype("arial.ttf", font_size)
|
| 69 |
+
elif style == "elegant":
|
| 70 |
+
font_size = max(20, width // 25)
|
| 71 |
+
font = ImageFont.truetype("times.ttf", font_size)
|
| 72 |
+
else: # clean
|
| 73 |
+
font_size = max(18, width // 30)
|
| 74 |
+
font = ImageFont.truetype("calibri.ttf", font_size)
|
| 75 |
+
except:
|
| 76 |
+
font = ImageFont.load_default()
|
| 77 |
+
|
| 78 |
+
# Wrap text to fit image width
|
| 79 |
+
words = title_text.split()
|
| 80 |
+
lines = []
|
| 81 |
+
current_line = ""
|
| 82 |
+
max_width = width * 0.8
|
| 83 |
+
|
| 84 |
+
for word in words:
|
| 85 |
+
test_line = current_line + " " + word if current_line else word
|
| 86 |
+
bbox = draw.textbbox((0, 0), test_line, font=font)
|
| 87 |
+
if bbox[2] - bbox[0] < max_width:
|
| 88 |
+
current_line = test_line
|
| 89 |
+
else:
|
| 90 |
+
if current_line:
|
| 91 |
+
lines.append(current_line)
|
| 92 |
+
current_line = word
|
| 93 |
+
|
| 94 |
+
if current_line:
|
| 95 |
+
lines.append(current_line)
|
| 96 |
+
|
| 97 |
+
# Position text (top third of image)
|
| 98 |
+
y_start = height // 6
|
| 99 |
+
line_height = font_size + 5
|
| 100 |
+
|
| 101 |
+
for i, line in enumerate(lines[:3]): # Max 3 lines
|
| 102 |
+
bbox = draw.textbbox((0, 0), line, font=font)
|
| 103 |
+
text_width = bbox[2] - bbox[0]
|
| 104 |
+
x = (width - text_width) // 2
|
| 105 |
+
y = y_start + (i * line_height)
|
| 106 |
+
|
| 107 |
+
# Draw shadow/outline for better visibility
|
| 108 |
+
for dx, dy in [(-2, -2), (-2, 2), (2, -2), (2, 2)]:
|
| 109 |
+
draw.text((x + dx, y + dy), line, fill='black', font=font)
|
| 110 |
+
|
| 111 |
+
# Draw main text
|
| 112 |
+
draw.text((x, y), line, fill='white', font=font)
|
| 113 |
+
|
| 114 |
+
return img
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def generate_image(prompt, model_choice="fast"):
|
| 118 |
+
"""Generate image using Hugging Face Inference API"""
|
| 119 |
+
try:
|
| 120 |
+
model_name = IMAGE_MODELS[model_choice]
|
| 121 |
+
api_url = HF_IMAGE_API_URL + model_name
|
| 122 |
+
|
| 123 |
+
payload = {"inputs": prompt}
|
| 124 |
+
|
| 125 |
+
print(f"Attempting to generate image with {model_choice}...")
|
| 126 |
+
response = query_hf_api(api_url, payload)
|
| 127 |
+
|
| 128 |
+
if response and response.status_code == 200:
|
| 129 |
+
try:
|
| 130 |
+
image = Image.open(io.BytesIO(response.content))
|
| 131 |
+
print(f"β
Image generated successfully with {model_choice}")
|
| 132 |
+
return image
|
| 133 |
+
except Exception as img_error:
|
| 134 |
+
print(f"β Error opening image: {img_error}")
|
| 135 |
+
return create_placeholder_image(prompt)
|
| 136 |
+
else:
|
| 137 |
+
print(f"β Image generation failed for {model_choice}")
|
| 138 |
+
return create_placeholder_image(prompt)
|
| 139 |
+
|
| 140 |
+
except Exception as e:
|
| 141 |
+
print(f"β Error generating image with {model_choice}: {e}")
|
| 142 |
+
return create_placeholder_image(prompt)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def generate_thumbnails(topic, style, text_overlay="", overlay_style="bold"):
|
| 146 |
+
"""Generate two thumbnails with different models"""
|
| 147 |
+
print(f"Generating thumbnails for: {topic} in {style} style")
|
| 148 |
+
|
| 149 |
+
# Get style prompt
|
| 150 |
+
style_prompt = STYLE_PROMPTS.get(style, STYLE_PROMPTS["Realistic"])
|
| 151 |
+
|
| 152 |
+
# Create enhanced prompts
|
| 153 |
+
base_prompt = f"YouTube thumbnail, {topic}, {style_prompt}, eye-catching, professional, high contrast, vibrant colors, no text"
|
| 154 |
+
|
| 155 |
+
# Generate with both models
|
| 156 |
+
prompt1 = f"{base_prompt}, centered composition"
|
| 157 |
+
prompt2 = f"{base_prompt}, dynamic angle, creative layout"
|
| 158 |
+
|
| 159 |
+
print("Generating thumbnail 1 (Fast)...")
|
| 160 |
+
thumbnail1 = generate_image(prompt1, "fast")
|
| 161 |
+
|
| 162 |
+
print("Generating thumbnail 2 (Quality)...")
|
| 163 |
+
thumbnail2 = generate_image(prompt2, "quality")
|
| 164 |
+
|
| 165 |
+
# Resize to YouTube thumbnail dimensions (16:9)
|
| 166 |
+
target_size = (1280, 720)
|
| 167 |
+
if thumbnail1:
|
| 168 |
+
thumbnail1 = thumbnail1.resize(target_size, Image.Resampling.LANCZOS)
|
| 169 |
+
if thumbnail2:
|
| 170 |
+
thumbnail2 = thumbnail2.resize(target_size, Image.Resampling.LANCZOS)
|
| 171 |
+
|
| 172 |
+
# Add text overlay if provided
|
| 173 |
+
if text_overlay.strip():
|
| 174 |
+
if thumbnail1:
|
| 175 |
+
thumbnail1 = add_text_overlay(thumbnail1, text_overlay, overlay_style)
|
| 176 |
+
if thumbnail2:
|
| 177 |
+
thumbnail2 = add_text_overlay(thumbnail2, text_overlay, overlay_style)
|
| 178 |
+
|
| 179 |
+
return thumbnail1, thumbnail2
|
main.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
AI Thumbnail & Metadata Generator
|
| 4 |
+
Main entry point for the application
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
from config import current_hf_token, current_openrouter_token
|
| 9 |
+
from ui import create_gradio_ui
|
| 10 |
+
|
| 11 |
+
# Load environment variables from .env file
|
| 12 |
+
try:
|
| 13 |
+
from dotenv import load_dotenv
|
| 14 |
+
load_dotenv()
|
| 15 |
+
except ImportError:
|
| 16 |
+
print("β οΈ python-dotenv not installed. Using system environment variables only.")
|
| 17 |
+
|
| 18 |
+
def main():
|
| 19 |
+
"""Main function to launch the application"""
|
| 20 |
+
print("π Starting AI Thumbnail & Metadata Generator...")
|
| 21 |
+
print("π‘ Using OpenRouter API for text generation and Hugging Face for images")
|
| 22 |
+
print("β οΈ Note: Set API keys in the app UI for authenticated access")
|
| 23 |
+
|
| 24 |
+
# Check if tokens are available from environment
|
| 25 |
+
hf_env_token = os.getenv('HF_TOKEN')
|
| 26 |
+
openrouter_env_token = os.getenv('OPENROUTER_TOKEN')
|
| 27 |
+
|
| 28 |
+
if hf_env_token:
|
| 29 |
+
print("β
Hugging Face token detected from environment")
|
| 30 |
+
# Set global token if found in environment
|
| 31 |
+
import config
|
| 32 |
+
config.current_hf_token = hf_env_token
|
| 33 |
+
|
| 34 |
+
if openrouter_env_token:
|
| 35 |
+
print("β
OpenRouter token detected from environment")
|
| 36 |
+
# Set global token if found in environment
|
| 37 |
+
import config
|
| 38 |
+
config.current_openrouter_token = openrouter_env_token
|
| 39 |
+
|
| 40 |
+
if not hf_env_token and not openrouter_env_token:
|
| 41 |
+
print("β οΈ No API tokens found in environment - use the app UI to set them")
|
| 42 |
+
|
| 43 |
+
# Create and launch the Gradio app
|
| 44 |
+
app = create_gradio_ui()
|
| 45 |
+
|
| 46 |
+
app.launch(
|
| 47 |
+
share=False,
|
| 48 |
+
server_name="0.0.0.0",
|
| 49 |
+
server_port=7860,
|
| 50 |
+
show_error=True
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
if __name__ == "__main__":
|
| 54 |
+
main()
|
metadata_generator.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import random
|
| 3 |
+
import re
|
| 4 |
+
from config import TEXT_MODELS, OPENROUTER_API_URL, current_openrouter_token
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def create_smart_fallback_metadata(topic):
|
| 8 |
+
"""Create smart fallback metadata when AI generation fails"""
|
| 9 |
+
|
| 10 |
+
# Smart title templates
|
| 11 |
+
title_templates = [
|
| 12 |
+
f"Ultimate {topic} Guide",
|
| 13 |
+
f"{topic} Secrets Revealed",
|
| 14 |
+
f"Master {topic} in Minutes",
|
| 15 |
+
f"{topic} Pro Tips & Tricks",
|
| 16 |
+
f"Everything About {topic}",
|
| 17 |
+
f"{topic} Made Simple",
|
| 18 |
+
f"The Complete {topic} Tutorial"
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
# Smart description templates
|
| 22 |
+
desc_templates = [
|
| 23 |
+
f"Learn everything you need to know about {topic} in this comprehensive guide. Perfect for beginners and experts alike!",
|
| 24 |
+
f"Discover the best {topic} techniques and strategies. Transform your skills with these proven methods!",
|
| 25 |
+
f"Master {topic} with this step-by-step tutorial. Get professional results every time!",
|
| 26 |
+
f"Unlock the secrets of {topic}. This detailed guide covers everything from basics to advanced techniques!",
|
| 27 |
+
f"The ultimate {topic} resource you've been looking for. Clear explanations and practical examples included!"
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
# Generate relevant tags based on topic
|
| 31 |
+
base_tags = [topic.lower().replace(" ", "-")]
|
| 32 |
+
topic_words = topic.lower().split()
|
| 33 |
+
|
| 34 |
+
common_tags = ["tutorial", "guide", "tips", "howto", "learn", "beginner", "expert", "professional"]
|
| 35 |
+
selected_tags = base_tags + topic_words + random.sample(common_tags, 3)
|
| 36 |
+
|
| 37 |
+
return f"""TITLE: {random.choice(title_templates)}
|
| 38 |
+
DESCRIPTION: {random.choice(desc_templates)}
|
| 39 |
+
TAGS: {", ".join(selected_tags[:7])}"""
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def generate_metadata(topic, model_choice="deepseek-r1-free"):
|
| 43 |
+
"""Generate YouTube metadata using OpenRouter API"""
|
| 44 |
+
try:
|
| 45 |
+
print(f"π€ Generating metadata with {model_choice} for: {topic}")
|
| 46 |
+
model_name = TEXT_MODELS[model_choice]
|
| 47 |
+
global current_openrouter_token
|
| 48 |
+
if not current_openrouter_token:
|
| 49 |
+
print("β οΈ No OpenRouter API key provided, using smart fallback response")
|
| 50 |
+
return create_smart_fallback_metadata(topic)
|
| 51 |
+
|
| 52 |
+
# OpenRouter expects OpenAI-style chat payload
|
| 53 |
+
messages = [
|
| 54 |
+
{
|
| 55 |
+
"role": "user",
|
| 56 |
+
"content": f"Create a YouTube title, description, and tags for a video about {topic}. Format: TITLE: [title] DESCRIPTION: [description] TAGS: [tags]"
|
| 57 |
+
}
|
| 58 |
+
]
|
| 59 |
+
payload = {
|
| 60 |
+
"model": model_name,
|
| 61 |
+
"messages": messages,
|
| 62 |
+
"max_tokens": 200,
|
| 63 |
+
"temperature": 0.7
|
| 64 |
+
}
|
| 65 |
+
headers = {
|
| 66 |
+
"Authorization": f"Bearer {current_openrouter_token}",
|
| 67 |
+
"Content-Type": "application/json"
|
| 68 |
+
}
|
| 69 |
+
print(f"π Calling OpenRouter API for {model_name}")
|
| 70 |
+
response = requests.post(OPENROUTER_API_URL, headers=headers, json=payload, timeout=60)
|
| 71 |
+
print(f"π‘ Response status: {response.status_code}")
|
| 72 |
+
if response.status_code == 200:
|
| 73 |
+
result = response.json()
|
| 74 |
+
print(f"π Raw API response: {result}")
|
| 75 |
+
if "choices" in result and len(result["choices"]) > 0:
|
| 76 |
+
message = result["choices"][0]["message"]
|
| 77 |
+
generated_text = message.get("content", "")
|
| 78 |
+
if generated_text.strip():
|
| 79 |
+
print(f"β
Generated text: {generated_text[:200]}...")
|
| 80 |
+
return generated_text.strip()
|
| 81 |
+
# Fallback to reasoning if content is empty
|
| 82 |
+
reasoning_text = message.get("reasoning", "")
|
| 83 |
+
if reasoning_text.strip():
|
| 84 |
+
print(f"β οΈ Using reasoning as fallback: {reasoning_text[:200]}...")
|
| 85 |
+
# Try to extract title, description, tags from reasoning
|
| 86 |
+
title_match = re.search(r'title.*?"([^"]+)"', reasoning_text, re.IGNORECASE)
|
| 87 |
+
description_match = re.search(r'description.*?"([^"]+)"', reasoning_text, re.IGNORECASE)
|
| 88 |
+
tags_match = re.search(r'tags.*?([\w, ]+)', reasoning_text, re.IGNORECASE)
|
| 89 |
+
title = title_match.group(1) if title_match else f"{topic}: AI Insights"
|
| 90 |
+
description = description_match.group(1) if description_match else f"Explore how AI is transforming {topic}. Discover trends, breakthroughs, and real-world examples in this video."
|
| 91 |
+
tags = tags_match.group(1) if tags_match else f"ai, {topic.lower().replace(' ', '-')}, healthcare, technology, innovation"
|
| 92 |
+
formatted = f"TITLE: {title}\nDESCRIPTION: {description}\nTAGS: {tags}"
|
| 93 |
+
return formatted
|
| 94 |
+
print("β No usable content or reasoning in response; using smart fallback.")
|
| 95 |
+
return create_smart_fallback_metadata(topic)
|
| 96 |
+
else:
|
| 97 |
+
print("β No choices in response")
|
| 98 |
+
elif response.status_code == 401:
|
| 99 |
+
print(f"π Authentication error. Invalid API key.")
|
| 100 |
+
elif response.status_code == 403:
|
| 101 |
+
print(f"π Forbidden. API key may not have required permissions.")
|
| 102 |
+
else:
|
| 103 |
+
print(f"β API Error {response.status_code}: {response.text[:500]}")
|
| 104 |
+
print("β οΈ Using smart fallback response...")
|
| 105 |
+
return create_smart_fallback_metadata(topic)
|
| 106 |
+
except Exception as e:
|
| 107 |
+
print(f"β Error generating metadata: {e}")
|
| 108 |
+
return create_smart_fallback_metadata(topic)
|
requirements.txt
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
gradio>=4.0.0
|
| 2 |
requests
|
| 3 |
Pillow
|
| 4 |
-
python-dateutil
|
|
|
|
|
|
| 1 |
gradio>=4.0.0
|
| 2 |
requests
|
| 3 |
Pillow
|
| 4 |
+
python-dateutil
|
| 5 |
+
python-dotenv
|
ui.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import json
|
| 3 |
+
from config import STYLE_PROMPTS, current_hf_token, current_openrouter_token
|
| 4 |
+
from api_utils import test_hf_token
|
| 5 |
+
from content_processor import process_content
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def create_gradio_ui():
|
| 9 |
+
"""Create and return the Gradio interface"""
|
| 10 |
+
with gr.Blocks(title="AI Thumbnail & Metadata Generator", theme=gr.themes.Soft()) as app:
|
| 11 |
+
gr.Markdown("""
|
| 12 |
+
## π API Key Management
|
| 13 |
+
**β οΈ Important:**
|
| 14 |
+
- You need a valid OpenRouter API key for text generation (metadata).
|
| 15 |
+
- You need a valid Hugging Face API key for image generation (thumbnails).
|
| 16 |
+
|
| 17 |
+
Get your OpenRouter API key at [https://openrouter.ai/](https://openrouter.ai/) (sign up and generate your key)
|
| 18 |
+
Get your Hugging Face API key at [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens)
|
| 19 |
+
""")
|
| 20 |
+
with gr.Row():
|
| 21 |
+
openrouter_token_input = gr.Textbox(label="OpenRouter API Key", placeholder="Paste your OpenRouter API key here", value="", type="password")
|
| 22 |
+
set_openrouter_token_btn = gr.Button("Set OpenRouter Key", variant="primary")
|
| 23 |
+
clear_openrouter_token_btn = gr.Button("Clear OpenRouter Key", variant="secondary")
|
| 24 |
+
test_openrouter_token_btn = gr.Button("Test OpenRouter Key", variant="secondary")
|
| 25 |
+
openrouter_token_status = gr.Textbox(label="OpenRouter Key Status", interactive=False)
|
| 26 |
+
|
| 27 |
+
with gr.Row():
|
| 28 |
+
hf_token_input = gr.Textbox(label="Hugging Face API Key", placeholder="Paste your Hugging Face API key here", value="", type="password")
|
| 29 |
+
set_hf_token_btn = gr.Button("Set HF Key", variant="primary")
|
| 30 |
+
clear_hf_token_btn = gr.Button("Clear HF Key", variant="secondary")
|
| 31 |
+
test_hf_token_btn = gr.Button("Test HF Key", variant="secondary")
|
| 32 |
+
hf_token_status = gr.Textbox(label="HF Key Status", interactive=False)
|
| 33 |
+
|
| 34 |
+
# Store keys separately
|
| 35 |
+
def set_openrouter_token_callback(token):
|
| 36 |
+
global current_openrouter_token
|
| 37 |
+
current_openrouter_token = token.strip()
|
| 38 |
+
return "β
OpenRouter API key set!"
|
| 39 |
+
|
| 40 |
+
def clear_openrouter_token_callback():
|
| 41 |
+
global current_openrouter_token
|
| 42 |
+
current_openrouter_token = ""
|
| 43 |
+
return "ποΈ OpenRouter API key cleared."
|
| 44 |
+
|
| 45 |
+
def set_hf_token_callback(token):
|
| 46 |
+
global current_hf_token
|
| 47 |
+
current_hf_token = token.strip()
|
| 48 |
+
return "β
Hugging Face API key set!"
|
| 49 |
+
|
| 50 |
+
def clear_hf_token_callback():
|
| 51 |
+
global current_hf_token
|
| 52 |
+
current_hf_token = ""
|
| 53 |
+
return "ποΈ Hugging Face API key cleared."
|
| 54 |
+
|
| 55 |
+
set_openrouter_token_btn.click(fn=set_openrouter_token_callback, inputs=openrouter_token_input, outputs=openrouter_token_status)
|
| 56 |
+
clear_openrouter_token_btn.click(fn=clear_openrouter_token_callback, inputs=None, outputs=openrouter_token_status)
|
| 57 |
+
test_openrouter_token_btn.click(fn=lambda k: "β
Key format looks valid!" if k and len(k) > 10 else "β Please enter a valid OpenRouter API key.", inputs=openrouter_token_input, outputs=openrouter_token_status)
|
| 58 |
+
|
| 59 |
+
set_hf_token_btn.click(fn=set_hf_token_callback, inputs=hf_token_input, outputs=hf_token_status)
|
| 60 |
+
clear_hf_token_btn.click(fn=clear_hf_token_callback, inputs=None, outputs=hf_token_status)
|
| 61 |
+
test_hf_token_btn.click(fn=test_hf_token, inputs=hf_token_input, outputs=hf_token_status)
|
| 62 |
+
|
| 63 |
+
gr.Markdown("""
|
| 64 |
+
# π¨ AI Thumbnail & Metadata Generator
|
| 65 |
+
|
| 66 |
+
Generate catchy YouTube titles, descriptions, tags, and stunning thumbnails using AI models!
|
| 67 |
+
|
| 68 |
+
**β¨ Features:**
|
| 69 |
+
- π€ AI-powered metadata generation
|
| 70 |
+
- π¨ Dual thumbnail generation (Fast & Quality)
|
| 71 |
+
- π― 6 different visual styles
|
| 72 |
+
- βοΈ Custom text overlay editor
|
| 73 |
+
- π₯ Download metadata as JSON
|
| 74 |
+
|
| 75 |
+
**How to use:**
|
| 76 |
+
1. Enter your video topic
|
| 77 |
+
2. Choose thumbnail style and text model
|
| 78 |
+
3. Add custom text overlay (optional)
|
| 79 |
+
4. Generate content and download!
|
| 80 |
+
""")
|
| 81 |
+
|
| 82 |
+
with gr.Row():
|
| 83 |
+
with gr.Column(scale=1):
|
| 84 |
+
# Input section
|
| 85 |
+
gr.Markdown("### π Input Settings")
|
| 86 |
+
|
| 87 |
+
topic_input = gr.Textbox(
|
| 88 |
+
label="Video Topic",
|
| 89 |
+
placeholder="e.g., AI in Healthcare, Cooking Tips, Travel Photography...",
|
| 90 |
+
lines=2
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
with gr.Row():
|
| 94 |
+
style_dropdown = gr.Dropdown(
|
| 95 |
+
choices=list(STYLE_PROMPTS.keys()),
|
| 96 |
+
value="Realistic",
|
| 97 |
+
label="Thumbnail Style"
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
model_dropdown = gr.Dropdown(
|
| 101 |
+
choices=["deepseek-r1-free"],
|
| 102 |
+
value="deepseek-r1-free",
|
| 103 |
+
label="Text Model"
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
gr.Markdown("### βοΈ Text Overlay (Optional)")
|
| 107 |
+
|
| 108 |
+
text_overlay_input = gr.Textbox(
|
| 109 |
+
label="Custom Title Text",
|
| 110 |
+
placeholder="Leave empty to use generated title...",
|
| 111 |
+
lines=2
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
overlay_style_dropdown = gr.Dropdown(
|
| 115 |
+
choices=["bold", "elegant", "clean"],
|
| 116 |
+
value="bold",
|
| 117 |
+
label="Text Style"
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
generate_btn = gr.Button("π Generate Content", variant="primary", size="lg")
|
| 121 |
+
|
| 122 |
+
# Metadata section
|
| 123 |
+
gr.Markdown("### π Generated Metadata")
|
| 124 |
+
metadata_output = gr.Textbox(
|
| 125 |
+
label="YouTube Title, Description & Tags",
|
| 126 |
+
lines=8,
|
| 127 |
+
placeholder="Generated metadata will appear here...",
|
| 128 |
+
info="βοΈ Edit this text before using it for your video!"
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
# Download section - simplified
|
| 132 |
+
gr.Markdown("### π₯ Export Data")
|
| 133 |
+
export_output = gr.Textbox(
|
| 134 |
+
label="π Copy this JSON data",
|
| 135 |
+
lines=5,
|
| 136 |
+
placeholder="JSON export will appear here...",
|
| 137 |
+
info="Copy this data to save your metadata"
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
download_data = gr.Textbox(
|
| 141 |
+
label="Metadata JSON",
|
| 142 |
+
lines=3,
|
| 143 |
+
placeholder="JSON data will appear here...",
|
| 144 |
+
visible=False
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
with gr.Column(scale=2):
|
| 148 |
+
# Thumbnails section
|
| 149 |
+
gr.Markdown("### πΌοΈ Generated Thumbnails")
|
| 150 |
+
|
| 151 |
+
with gr.Row():
|
| 152 |
+
thumbnail1_output = gr.Image(
|
| 153 |
+
label="π Fast Generation (FLUX.1-schnell)",
|
| 154 |
+
type="pil",
|
| 155 |
+
show_download_button=True
|
| 156 |
+
)
|
| 157 |
+
thumbnail2_output = gr.Image(
|
| 158 |
+
label="π Quality Generation (FLUX.1-dev)",
|
| 159 |
+
type="pil",
|
| 160 |
+
show_download_button=True
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
# Thumbnail selection for JSON export
|
| 164 |
+
with gr.Row():
|
| 165 |
+
select_thumb1_btn = gr.Button("π₯ Use Fast Thumbnail", size="sm")
|
| 166 |
+
select_thumb2_btn = gr.Button("π₯ Use Quality Thumbnail", size="sm")
|
| 167 |
+
|
| 168 |
+
# Event handlers
|
| 169 |
+
generate_btn.click(
|
| 170 |
+
fn=process_content,
|
| 171 |
+
inputs=[topic_input, style_dropdown, model_dropdown, text_overlay_input, overlay_style_dropdown],
|
| 172 |
+
outputs=[metadata_output, thumbnail1_output, thumbnail2_output, download_data]
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
# Thumbnail selection for export
|
| 176 |
+
def update_export_data(topic, metadata, download_data, selected):
|
| 177 |
+
if download_data:
|
| 178 |
+
try:
|
| 179 |
+
data = json.loads(download_data)
|
| 180 |
+
data["selected_thumbnail"] = selected
|
| 181 |
+
return json.dumps(data, indent=2)
|
| 182 |
+
except Exception as e:
|
| 183 |
+
print(f"Export error: {e}")
|
| 184 |
+
return f"Error creating export: {e}"
|
| 185 |
+
return ""
|
| 186 |
+
|
| 187 |
+
select_thumb1_btn.click(
|
| 188 |
+
fn=lambda t, m, d: update_export_data(t, m, d, "fast_thumbnail"),
|
| 189 |
+
inputs=[topic_input, metadata_output, download_data],
|
| 190 |
+
outputs=[export_output]
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
select_thumb2_btn.click(
|
| 194 |
+
fn=lambda t, m, d: update_export_data(t, m, d, "quality_thumbnail"),
|
| 195 |
+
inputs=[topic_input, metadata_output, download_data],
|
| 196 |
+
outputs=[export_output]
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# Example inputs
|
| 200 |
+
gr.Markdown("""
|
| 201 |
+
### π‘ Example Topics to Try:
|
| 202 |
+
|
| 203 |
+
**Tech & AI:**
|
| 204 |
+
- "Future of Artificial Intelligence"
|
| 205 |
+
- "Best Programming Languages 2024"
|
| 206 |
+
- "Cybersecurity for Beginners"
|
| 207 |
+
|
| 208 |
+
**Lifestyle & Health:**
|
| 209 |
+
- "Morning Routine for Productivity"
|
| 210 |
+
- "Healthy Meal Prep Ideas"
|
| 211 |
+
- "Home Workout Without Equipment"
|
| 212 |
+
|
| 213 |
+
**Business & Finance:**
|
| 214 |
+
- "Passive Income Strategies"
|
| 215 |
+
- "Social Media Marketing Tips"
|
| 216 |
+
- "Cryptocurrency Explained"
|
| 217 |
+
|
| 218 |
+
**Education & Skills:**
|
| 219 |
+
- "Learn Python in 30 Days"
|
| 220 |
+
- "Photography Composition Rules"
|
| 221 |
+
- "Public Speaking Confidence"
|
| 222 |
+
""")
|
| 223 |
+
|
| 224 |
+
return app
|