sd commited on
Upload 44 files
Browse files- .gitattributes +6 -35
- .gitignore +41 -0
- API_DOCUMENTATION.md +118 -0
- Dockerfile +64 -0
- README.md +26 -10
- frame.jpg +3 -0
- id-maker/.gemini/settings.json +11 -0
- id-maker/.gitignore +19 -0
- id-maker/DOCUMENTATION.md +148 -0
- id-maker/Dockerfile +56 -0
- id-maker/EL_HELAL_Studio.spec +92 -0
- id-maker/README.md +10 -0
- id-maker/assets/My_Style.cube +3 -0
- id-maker/assets/frame-1.png +3 -0
- id-maker/assets/frame-2.png +3 -0
- id-maker/assets/frame-525d94d5.png +3 -0
- id-maker/assets/logo.png +3 -0
- id-maker/bucket.py +50 -0
- id-maker/config/settings.json +66 -0
- id-maker/context.md +82 -0
- id-maker/core/My_Style.cube +3 -0
- id-maker/core/__init__.py +1 -0
- id-maker/core/color_steal.py +377 -0
- id-maker/core/crop.py +119 -0
- id-maker/core/layout_engine.py +362 -0
- id-maker/core/pipeline.py +112 -0
- id-maker/core/process_images.py +214 -0
- id-maker/core/restoration.py +96 -0
- id-maker/core/retouch.py +158 -0
- id-maker/core/trained_curves.npz +3 -0
- id-maker/core/white_bg.py +61 -0
- id-maker/desktop_launcher.py +59 -0
- id-maker/docker-compose.yml +17 -0
- id-maker/gui/gui.py +415 -0
- id-maker/gui/main.py +31 -0
- id-maker/requirements.txt +20 -0
- id-maker/tools/problems.md +102 -0
- id-maker/tools/scan_fonts.py +31 -0
- id-maker/tools/verify_layout.py +37 -0
- id-maker/web/server.py +486 -0
- id-maker/web/web_storage/index.html +1483 -0
- requirements.txt +32 -0
- result.jpg +3 -0
- start.sh +9 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,6 @@
|
|
| 1 |
-
*.
|
| 2 |
-
*.
|
| 3 |
-
*.
|
| 4 |
-
*.
|
| 5 |
-
*.
|
| 6 |
-
*.
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.cube filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.py[cod]
|
| 3 |
+
*$py.class
|
| 4 |
+
.env
|
| 5 |
+
.venv
|
| 6 |
+
env/
|
| 7 |
+
venv/
|
| 8 |
+
ENV/
|
| 9 |
+
.DS_Store
|
| 10 |
+
.vscode/
|
| 11 |
+
.idea/
|
| 12 |
+
*.log
|
| 13 |
+
*.egg-info/
|
| 14 |
+
*.egg/
|
| 15 |
+
.eggs/
|
| 16 |
+
build/
|
| 17 |
+
dist/
|
| 18 |
+
|
| 19 |
+
# Fonts that are downloaded by Dockerfile
|
| 20 |
+
*.ttf
|
| 21 |
+
*.TTF
|
| 22 |
+
|
| 23 |
+
# id-maker storage
|
| 24 |
+
id-maker/storage/uploads/*
|
| 25 |
+
id-maker/storage/processed/*
|
| 26 |
+
id-maker/storage/results/*
|
| 27 |
+
!id-maker/storage/uploads/.gitkeep
|
| 28 |
+
!id-maker/storage/processed/.gitkeep
|
| 29 |
+
!id-maker/storage/results/.gitkeep
|
| 30 |
+
|
| 31 |
+
# codeformer storage
|
| 32 |
+
codeformer/static/uploads/*
|
| 33 |
+
codeformer/static/results/*
|
| 34 |
+
!codeformer/static/uploads/.gitkeep
|
| 35 |
+
!codeformer/static/results/.gitkeep
|
| 36 |
+
codeformer/weights/*
|
| 37 |
+
codeformer/inputs/*
|
| 38 |
+
codeformer/output/*
|
| 39 |
+
|
| 40 |
+
# root temp
|
| 41 |
+
tmp/
|
API_DOCUMENTATION.md
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CodeFormer API Documentation
|
| 2 |
+
|
| 3 |
+
This document describes the programmatic interface for the CodeFormer Face Restoration service.
|
| 4 |
+
|
| 5 |
+
## Base URL
|
| 6 |
+
The API is accessible at:
|
| 7 |
+
`https://esmailx50-job.hf.space` (or your specific Hugging Face Space URL)
|
| 8 |
+
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
## 1. Process Images
|
| 12 |
+
Processes one or more images for face restoration and enhancement.
|
| 13 |
+
|
| 14 |
+
- **Endpoint:** `/api/process`
|
| 15 |
+
- **Method:** `POST`
|
| 16 |
+
- **Consumes:** `multipart/form-data` OR `application/json`
|
| 17 |
+
|
| 18 |
+
### Parameters
|
| 19 |
+
| Parameter | Type | Default | Description |
|
| 20 |
+
| :--- | :--- | :--- | :--- |
|
| 21 |
+
| `fidelity` | float | `0.5` | Fidelity weight ($w$). Range [0, 1]. Lower is more "hallucinated" detail, higher is more identity preservation. |
|
| 22 |
+
| `upscale` | int | `2` | Final upscaling factor. Supported: `1`, `2`, `4`. |
|
| 23 |
+
| `background_enhance` | bool | `false` | Enhance the background using Real-ESRGAN. |
|
| 24 |
+
| `face_upsample` | bool | `false` | Upsample restored faces using Real-ESRGAN. |
|
| 25 |
+
| `return_base64` | bool | `false` | If true, includes the processed image as a base64 string in the JSON response. |
|
| 26 |
+
|
| 27 |
+
### Input Formats
|
| 28 |
+
|
| 29 |
+
#### A. Multipart Form Data (`multipart/form-data`)
|
| 30 |
+
Useful for uploading files directly.
|
| 31 |
+
- `image`: One or more image files (as a list).
|
| 32 |
+
- Other parameters as form fields.
|
| 33 |
+
|
| 34 |
+
**Example (curl):**
|
| 35 |
+
```bash
|
| 36 |
+
curl -X POST
|
| 37 |
+
-F "image=@my_photo.jpg"
|
| 38 |
+
-F "fidelity=0.7"
|
| 39 |
+
-F "background_enhance=true"
|
| 40 |
+
https://esmailx50-job.hf.space/api/process
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
#### B. JSON (`application/json`)
|
| 44 |
+
Useful for sending base64-encoded image data.
|
| 45 |
+
- `image_base64`: A single base64 string (with or without data URI prefix).
|
| 46 |
+
- `images_base64`: (Optional) A list of base64 strings for batch processing.
|
| 47 |
+
- Other parameters as JSON keys.
|
| 48 |
+
|
| 49 |
+
**Example (curl):**
|
| 50 |
+
```bash
|
| 51 |
+
curl -X POST
|
| 52 |
+
-H "Content-Type: application/json"
|
| 53 |
+
-d '{
|
| 54 |
+
"image_base64": "data:image/png;base64,iVBORw0KG...",
|
| 55 |
+
"fidelity": 0.5,
|
| 56 |
+
"return_base64": true
|
| 57 |
+
}'
|
| 58 |
+
https://esmailx50-job.hf.space/api/process
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
### Success Response
|
| 62 |
+
```json
|
| 63 |
+
{
|
| 64 |
+
"status": "success",
|
| 65 |
+
"count": 1,
|
| 66 |
+
"results": [
|
| 67 |
+
{
|
| 68 |
+
"original_name": "image.png",
|
| 69 |
+
"filename": "api_result_uuid.png",
|
| 70 |
+
"image_url": "https://.../static/results/api_result_uuid.png",
|
| 71 |
+
"image_base64": "iVBORw0KG..." // Only if return_base64 was true
|
| 72 |
+
}
|
| 73 |
+
]
|
| 74 |
+
}
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
### Error Response
|
| 78 |
+
```json
|
| 79 |
+
{
|
| 80 |
+
"status": "error",
|
| 81 |
+
"message": "Detailed error message here"
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
---
|
| 86 |
+
|
| 87 |
+
## 2. Health Check
|
| 88 |
+
Checks if the service is online and returns the compute device being used.
|
| 89 |
+
|
| 90 |
+
- **Endpoint:** `/api/health`
|
| 91 |
+
- **Method:** `GET`
|
| 92 |
+
|
| 93 |
+
**Success Response:**
|
| 94 |
+
```json
|
| 95 |
+
{
|
| 96 |
+
"status": "online",
|
| 97 |
+
"device": "cuda" // or "cpu"
|
| 98 |
+
}
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
---
|
| 102 |
+
|
| 103 |
+
## CORS & Integration
|
| 104 |
+
Cross-Origin Resource Sharing (CORS) is enabled for all routes. This allows you to call the API directly from browser-based applications (React, Vue, etc.) without hitting "Same-Origin Policy" blocks.
|
| 105 |
+
|
| 106 |
+
**Javascript Example (Fetch):**
|
| 107 |
+
```javascript
|
| 108 |
+
const formData = new FormData();
|
| 109 |
+
formData.append('image', fileInput.files[0]);
|
| 110 |
+
formData.append('fidelity', '0.5');
|
| 111 |
+
|
| 112 |
+
const response = await fetch('https://esmailx50-job.hf.space/api/process', {
|
| 113 |
+
method: 'POST',
|
| 114 |
+
body: formData
|
| 115 |
+
});
|
| 116 |
+
const data = await response.json();
|
| 117 |
+
console.log(data.results[0].image_url);
|
| 118 |
+
```
|
Dockerfile
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM pytorch/pytorch:2.1.0-cuda11.8-cudnn8-devel
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
| 6 |
+
|
| 7 |
+
# Install system dependencies for both apps
|
| 8 |
+
# Includes OpenCV deps, Font rendering, and build tools
|
| 9 |
+
RUN apt-get update && apt-get install -y \
|
| 10 |
+
wget \
|
| 11 |
+
fontconfig \
|
| 12 |
+
libfontconfig1 \
|
| 13 |
+
libgl1 \
|
| 14 |
+
libglib2.0-0 \
|
| 15 |
+
libsm6 \
|
| 16 |
+
libxext6 \
|
| 17 |
+
libxrender1 \
|
| 18 |
+
libasound2 \
|
| 19 |
+
fonts-dejavu-core \
|
| 20 |
+
fonts-liberation \
|
| 21 |
+
fonts-noto-core \
|
| 22 |
+
fonts-noto-extra \
|
| 23 |
+
fonts-noto-color-emoji \
|
| 24 |
+
libraqm0 \
|
| 25 |
+
libfreetype6 \
|
| 26 |
+
libfribidi0 \
|
| 27 |
+
libharfbuzz0b \
|
| 28 |
+
libprotobuf-dev \
|
| 29 |
+
protobuf-compiler \
|
| 30 |
+
git \
|
| 31 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 32 |
+
|
| 33 |
+
# Copy root requirements and install
|
| 34 |
+
COPY requirements.txt .
|
| 35 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 36 |
+
|
| 37 |
+
# Copy all application code
|
| 38 |
+
COPY . .
|
| 39 |
+
|
| 40 |
+
# Download high-quality fonts for id-maker to bypass Git LFS issues
|
| 41 |
+
RUN mkdir -p id-maker/assets && \
|
| 42 |
+
wget -O id-maker/assets/tahomabd.ttf "https://raw.githubusercontent.com/Esmaill1/color-stealer/main/tahomabd.ttf" && \
|
| 43 |
+
wget -O id-maker/assets/TYBAH.TTF "https://raw.githubusercontent.com/Esmaill1/color-stealer/main/TYBAH.TTF" && \
|
| 44 |
+
wget -O id-maker/assets/arialbd.ttf "https://raw.githubusercontent.com/Esmaill1/color-stealer/main/arialbd.ttf"
|
| 45 |
+
|
| 46 |
+
# Set up storage directories and permissions
|
| 47 |
+
WORKDIR /app
|
| 48 |
+
RUN mkdir -p id-maker/storage/uploads id-maker/storage/processed id-maker/storage/results && \
|
| 49 |
+
chmod -R 777 /app
|
| 50 |
+
|
| 51 |
+
# Create the startup script to run both services
|
| 52 |
+
COPY start.sh .
|
| 53 |
+
RUN chmod +x start.sh
|
| 54 |
+
|
| 55 |
+
# Configure Hugging Face user
|
| 56 |
+
RUN useradd -m -u 1000 user
|
| 57 |
+
USER user
|
| 58 |
+
ENV HOME=/home/user \
|
| 59 |
+
PATH=/home/user/.local/bin:$PATH
|
| 60 |
+
|
| 61 |
+
EXPOSE 7860
|
| 62 |
+
|
| 63 |
+
# Start both services
|
| 64 |
+
CMD ["./start.sh"]
|
README.md
CHANGED
|
@@ -1,10 +1,26 @@
|
|
| 1 |
-
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk: docker
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: ID Maker with CodeFormer
|
| 3 |
+
emoji: 🪪
|
| 4 |
+
colorFrom: yellow
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
pinned: false
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# ID Maker Studio with Integrated CodeFormer
|
| 12 |
+
|
| 13 |
+
This Space combines **ID Maker Studio** (FastAPI) and **CodeFormer Face Restoration** (Flask) into a single high-performance pipeline for student ID production.
|
| 14 |
+
|
| 15 |
+
## Features
|
| 16 |
+
- **Surgical Retouching**: Removes blemishes while preserving skin texture.
|
| 17 |
+
- **AI Background Removal**: Using BiRefNet (RMBG-2.0).
|
| 18 |
+
- **Face Restoration**: Integrated CodeFormer API.
|
| 19 |
+
- **Arabic Typography**: Full support for RTL script and professional fonts.
|
| 20 |
+
- **Dynamic Layout**: Custom 300 DPI print-ready ID sheets.
|
| 21 |
+
|
| 22 |
+
## How it works
|
| 23 |
+
The `id-maker` web interface (port 7860) communicates with an internal `codeformer` API (port 8001) running in the same container.
|
| 24 |
+
|
| 25 |
+
---
|
| 26 |
+
*Created by Esmail*
|
frame.jpg
ADDED
|
Git LFS Details
|
id-maker/.gemini/settings.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"mcpServers": {
|
| 3 |
+
"context7": {
|
| 4 |
+
"command": "npx -y @upstash/context7-mcp --api-key ",
|
| 5 |
+
"args": [],
|
| 6 |
+
"env": {
|
| 7 |
+
"CONTEXT7_API_KEY": "ctx7sk-cfad07f8-ab56-4227-96ad-d2a79de35059"
|
| 8 |
+
}
|
| 9 |
+
}
|
| 10 |
+
}
|
| 11 |
+
}
|
id-maker/.gitignore
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.py[cod]
|
| 3 |
+
*$py.class
|
| 4 |
+
.env
|
| 5 |
+
.venv
|
| 6 |
+
env/
|
| 7 |
+
venv/
|
| 8 |
+
ENV/
|
| 9 |
+
storage/uploads/*
|
| 10 |
+
storage/processed/*
|
| 11 |
+
storage/results/*
|
| 12 |
+
!storage/uploads/.gitkeep
|
| 13 |
+
!storage/processed/.gitkeep
|
| 14 |
+
!storage/results/.gitkeep
|
| 15 |
+
.DS_Store
|
| 16 |
+
.vscode/
|
| 17 |
+
.idea/
|
| 18 |
+
*.log
|
| 19 |
+
venu/
|
id-maker/DOCUMENTATION.md
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 📜 ID Maker Studio: Technical Master Documentation
|
| 2 |
+
|
| 3 |
+
This document serves as the comprehensive technical map for the **EL HELAL Studio Photo Pipeline**.
|
| 4 |
+
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
## 🏗 High-Level Architecture
|
| 8 |
+
|
| 9 |
+
The system is a modular Python-based suite designed to automate the conversion of raw student portraits into professional, print-ready ID sheets. It bridges the gap between complex AI models and a production studio environment.
|
| 10 |
+
|
| 11 |
+
### 🧩 Component Breakdown
|
| 12 |
+
- **`/core` (The Brain):** Pure logic and AI processing. It is UI-agnostic and handles image math, landmark detection, and layout composition.
|
| 13 |
+
- **`/web` (The Primary Interface):** A modern FastAPI backend coupled with a localized Arabic (RTL) frontend for batch processing.
|
| 14 |
+
- **`/storage` (The Data):** Centralized storage for uploads, processed images, and final results.
|
| 15 |
+
- **`/config` (The Settings):** Stores `settings.json` for global configuration.
|
| 16 |
+
- **`/tools` (The Utilities):** Dev scripts, troubleshooting guides, and verification tools.
|
| 17 |
+
- **`/assets` (The Identity):** Centralized storage for branding assets (logo), typography (Arabic fonts), and color grading LUTs.
|
| 18 |
+
- **`/gui` (Legacy):** A Tkinter desktop wrapper for offline/workstation usage.
|
| 19 |
+
|
| 20 |
+
---
|
| 21 |
+
|
| 22 |
+
## 🚀 The 5-Step AI Pipeline
|
| 23 |
+
|
| 24 |
+
Every photo processed by the studio follows a strictly sequenced pipeline:
|
| 25 |
+
|
| 26 |
+
### 1. Auto-Crop & Face Detection (`crop.py`)
|
| 27 |
+
- **Technology:** OpenCV Haar Cascades.
|
| 28 |
+
- **Logic:** Detects the largest face, centers it, and calculates a 5:7 (4x6cm) aspect ratio crop.
|
| 29 |
+
- **Fallback:** Centers the crop if no face is detected to ensure the pipeline never breaks.
|
| 30 |
+
|
| 31 |
+
### 2. AI Background Removal (`process_images.py`)
|
| 32 |
+
- **Model:** **BiRefNet (RMBG-2.0)**.
|
| 33 |
+
- **Optimization:** Automatically detects and utilizes CUDA/GPU. In CPU environments (like HF Spaces), it uses dynamic quantization for speed.
|
| 34 |
+
- **Resilience:** Includes critical monkeypatches for `transformers 4.50+` to handle tied weights and meta-tensor materialization bugs.
|
| 35 |
+
|
| 36 |
+
### 3. Color Grading Style Transfer (`color_steal.py`)
|
| 37 |
+
- **Mechanism:** Analyzes "Before" and "After" pairs to learn R, G, and B curves.
|
| 38 |
+
- **Smoothing:** Uses **Savitzky-Golay filters** to prevent color banding.
|
| 39 |
+
- **Application:** Applies learned styles via vectorized NumPy operations for near-instant processing.
|
| 40 |
+
|
| 41 |
+
### 4. Surgical Retouching (`retouch.py`)
|
| 42 |
+
- **Landmarking:** Uses **MediaPipe Face Mesh** (468 points) to generate a precise skin mask, excluding eyes, lips, and hair.
|
| 43 |
+
- **Frequency Separation:** Splits the image into **High Frequency** (texture/pores) and **Low Frequency** (tone/color).
|
| 44 |
+
- **Blemish Removal:** Detects anomalies on the High-Freq layer and inpaints them using surrounding texture.
|
| 45 |
+
- **Result:** Pores and skin texture are 100% preserved; only defects are removed.
|
| 46 |
+
|
| 47 |
+
### 5. Layout Composition (`layout_engine.py`)
|
| 48 |
+
- **Rendering:** Composes a 300 DPI canvas for printing.
|
| 49 |
+
- **Localization:** Uses `arabic_reshaper` and `python-bidi` for correct Arabic script rendering.
|
| 50 |
+
- **Dynamic Assets:** Overlays IDs with specific offsets and studio branding (logos).
|
| 51 |
+
- **Customization:** Supports dynamic frame color selection (passed via API) for the large side panel.
|
| 52 |
+
|
| 53 |
+
---
|
| 54 |
+
|
| 55 |
+
## ⚙️ Configuration & Real-Time Tuning
|
| 56 |
+
|
| 57 |
+
The system is controlled by `core/settings.json`.
|
| 58 |
+
- **Hot Reloading:** The layout engine reloads this file on **every request**. You can adjust `id_font_size`, `grid_gap`, or `retouch_sensitivity` and see the changes in the next processed photo without restarting the server.
|
| 59 |
+
|
| 60 |
+
### 💾 Backup & Restoration
|
| 61 |
+
The system supports full state backup via the web interface.
|
| 62 |
+
- **Export:** Creates a ZIP file containing:
|
| 63 |
+
- Global `settings.json`.
|
| 64 |
+
- All custom assets (frames, logos) in `assets/`.
|
| 65 |
+
- Client-side preferences (theme, saved colors).
|
| 66 |
+
- **Import:** Restores the configuration and assets from a ZIP file and refreshes the client state.
|
| 67 |
+
|
| 68 |
+
---
|
| 69 |
+
|
| 70 |
+
## 🐍 Environment & Dependency Management
|
| 71 |
+
|
| 72 |
+
The project requires a carefully managed Python environment to avoid common AI library conflicts.
|
| 73 |
+
|
| 74 |
+
### Known Conflicts & Fixes
|
| 75 |
+
- **TensorFlow vs. Transformers:** Standard installations of `tensorflow` (especially nightly versions) conflict with `transformers` and `numpy 2.x`, causing `AttributeError: module 'numpy' has no attribute 'dtypes'` and Protobuf descriptor errors.
|
| 76 |
+
- **Resolution:** **Uninstall TensorFlow.** The pipeline is 100% PyTorch-based. Removing TensorFlow resolves these import crashes immediately.
|
| 77 |
+
- **Pinned Versions:**
|
| 78 |
+
- `numpy < 2.0.0`: Required for compatibility with `basicsr` and older `torchvision` utilities.
|
| 79 |
+
- `protobuf <= 3.20.3`: Prevents "Double Registration" errors in multi-model environments.
|
| 80 |
+
|
| 81 |
+
### Environment Setup (Conda)
|
| 82 |
+
```bash
|
| 83 |
+
conda create -n idmaker python=3.10
|
| 84 |
+
conda activate idmaker
|
| 85 |
+
pip install -r requirements.txt
|
| 86 |
+
# Ensure no conflicting packages remain
|
| 87 |
+
pip uninstall tensorflow tb-nightly tensorboard
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
---
|
| 91 |
+
|
| 92 |
+
## ☁️ CodeFormer Restoration API
|
| 93 |
+
|
| 94 |
+
The `id-maker` system integrates with an external **CodeFormer** service for high-fidelity face restoration. This is handled via a dedicated REST API.
|
| 95 |
+
|
| 96 |
+
### Endpoint: `/api/restore` (POST)
|
| 97 |
+
The API accepts an image and returns a JSON response containing a URL to the restored result.
|
| 98 |
+
|
| 99 |
+
**Request Parameters (`multipart/form-data`):**
|
| 100 |
+
- `image`: The source image file (JPG/PNG).
|
| 101 |
+
- `fidelity`: (Float, 0.0 - 1.0) Controls the balance between restoration quality (1.0) and fidelity to the original (0.0).
|
| 102 |
+
- `upscale`: (Integer, 1-4) Final output magnification.
|
| 103 |
+
- `background_enhance`: (Boolean string, "true"/"false") Whether to enhance the non-face areas using Real-ESRGAN.
|
| 104 |
+
- `face_upsample`: (Boolean string, "true"/"false") Whether to apply dedicated face upsampling.
|
| 105 |
+
|
| 106 |
+
**Success Response (JSON):**
|
| 107 |
+
```json
|
| 108 |
+
{
|
| 109 |
+
"status": "success",
|
| 110 |
+
"results": [
|
| 111 |
+
{ "image_url": "https://service-url/static/results/result_uuid.png" }
|
| 112 |
+
],
|
| 113 |
+
"message": "Restoration complete"
|
| 114 |
+
}
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
### Configuration
|
| 118 |
+
The target API URL is controlled in `id-maker/config/settings.json` under `api.codeformer_url` or via the `CODEFORMER_API_URL` environment variable.
|
| 119 |
+
|
| 120 |
+
---
|
| 121 |
+
|
| 122 |
+
## 🐳 Deployment & Cloud Readiness
|
| 123 |
+
|
| 124 |
+
The project is optimized for high-availability environments.
|
| 125 |
+
|
| 126 |
+
### Docker Environment
|
| 127 |
+
- **Base:** `python:3.10-slim`.
|
| 128 |
+
- **System Deps:** Requires `libgl1` (OpenCV), `libraqm0` (Font rendering), and `libharfbuzz0b` (Arabic shaping).
|
| 129 |
+
|
| 130 |
+
### Hugging Face Spaces
|
| 131 |
+
- **Transformers Fix:** Patches `PretrainedConfig` to allow custom model loading without attribute errors.
|
| 132 |
+
- **LFS Support:** Binary files (`.ttf`, `.cube`, `.png`) are managed via Git LFS to ensure integrity.
|
| 133 |
+
|
| 134 |
+
---
|
| 135 |
+
|
| 136 |
+
## 🛠 Troubleshooting (Common Pitfalls)
|
| 137 |
+
|
| 138 |
+
| Issue | Root Cause | Solution |
|
| 139 |
+
|-------|------------|----------|
|
| 140 |
+
| **"Tofu" Boxes in Text** | Missing or corrupted fonts. | Ensure `assets/arialbd.ttf` is not a Git LFS pointer (size > 300KB). |
|
| 141 |
+
| **NumPy AttributeError** | Conflict between NumPy 2.x and TensorFlow/Transformers. | Uninstall `tensorflow` and ensure `numpy < 2.0.0` is installed. |
|
| 142 |
+
| **[Errno 10048] Socket Bind** | Port 7860 is already in use by another server process. | Close the previous server instance or set a new `PORT` environment variable. |
|
| 143 |
+
| **Meta-Tensor Error** | Transformers 4.50+ CPU bug. | Handled by `torch.linspace` monkeypatch in `process_images.py`. |
|
| 144 |
+
| **Slow Processing** | CPU bottleneck. | Ensure `torch` is using multiple threads or enable CUDA. |
|
| 145 |
+
|
| 146 |
+
---
|
| 147 |
+
|
| 148 |
+
*Last Updated: February 2026 — EL HELAL Studio Engineering*
|
id-maker/Dockerfile
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use an official Python runtime as a parent image
|
| 2 |
+
# We use the full image to ensure all AI/OpenCV dependencies are compatible
|
| 3 |
+
FROM python:3.10
|
| 4 |
+
|
| 5 |
+
# Set environment variables
|
| 6 |
+
ENV PYTHONDONTWRITEBYTECODE 1
|
| 7 |
+
ENV PYTHONUNBUFFERED 1
|
| 8 |
+
|
| 9 |
+
# Install system dependencies for OpenCV, AI models, and Font Rendering
|
| 10 |
+
RUN apt-get update && apt-get install -y \
|
| 11 |
+
wget \
|
| 12 |
+
fontconfig \
|
| 13 |
+
libfontconfig1 \
|
| 14 |
+
libgl1 \
|
| 15 |
+
libglib2.0-0 \
|
| 16 |
+
libsm6 \
|
| 17 |
+
libxext6 \
|
| 18 |
+
libxrender1 \
|
| 19 |
+
libasound2 \
|
| 20 |
+
fonts-dejavu-core \
|
| 21 |
+
fonts-liberation \
|
| 22 |
+
fonts-noto-core \
|
| 23 |
+
fonts-noto-extra \
|
| 24 |
+
fonts-noto-color-emoji \
|
| 25 |
+
libraqm0 \
|
| 26 |
+
libfreetype6 \
|
| 27 |
+
libfribidi0 \
|
| 28 |
+
libharfbuzz0b \
|
| 29 |
+
libprotobuf-dev \
|
| 30 |
+
protobuf-compiler \
|
| 31 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 32 |
+
|
| 33 |
+
# Set the working directory in the container
|
| 34 |
+
WORKDIR /app
|
| 35 |
+
|
| 36 |
+
# Download high-quality fonts to bypass Git LFS issues
|
| 37 |
+
RUN mkdir -p assets && \
|
| 38 |
+
wget -O assets/tahomabd.ttf "https://raw.githubusercontent.com/Esmaill1/color-stealer/main/tahomabd.ttf" && \
|
| 39 |
+
wget -O assets/TYBAH.TTF "https://raw.githubusercontent.com/Esmaill1/color-stealer/main/TYBAH.TTF" && \
|
| 40 |
+
wget -O assets/arialbd.ttf "https://raw.githubusercontent.com/Esmaill1/color-stealer/main/arialbd.ttf"
|
| 41 |
+
|
| 42 |
+
# Copy the requirements file into the container
|
| 43 |
+
COPY requirements.txt .
|
| 44 |
+
|
| 45 |
+
# Install any needed packages specified in requirements.txt
|
| 46 |
+
# We use --no-cache-dir to keep the image small
|
| 47 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 48 |
+
|
| 49 |
+
# Copy the rest of the application code
|
| 50 |
+
COPY . .
|
| 51 |
+
|
| 52 |
+
# Expose port 7860 for Hugging Face Spaces
|
| 53 |
+
EXPOSE 7860
|
| 54 |
+
|
| 55 |
+
# Run the server
|
| 56 |
+
CMD ["python", "web/server.py"]
|
id-maker/EL_HELAL_Studio.spec
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- mode: python ; coding: utf-8 -*-
|
| 2 |
+
import sys
|
| 3 |
+
import os
|
| 4 |
+
from PyInstaller.utils.hooks import collect_data_files, collect_submodules
|
| 5 |
+
|
| 6 |
+
block_cipher = None
|
| 7 |
+
|
| 8 |
+
# Collect data files from dependencies that might be missed
|
| 9 |
+
datas = [
|
| 10 |
+
('assets', 'assets'),
|
| 11 |
+
('config', 'config'),
|
| 12 |
+
('web/web_storage', 'web/web_storage'),
|
| 13 |
+
('core', 'core'),
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
# Specifically collect hidden imports for heavy libraries
|
| 17 |
+
hiddenimports = [
|
| 18 |
+
'uvicorn.logging',
|
| 19 |
+
'uvicorn.loops',
|
| 20 |
+
'uvicorn.loops.auto',
|
| 21 |
+
'uvicorn.protocols',
|
| 22 |
+
'uvicorn.protocols.http',
|
| 23 |
+
'uvicorn.protocols.http.auto',
|
| 24 |
+
'uvicorn.protocols.websockets',
|
| 25 |
+
'uvicorn.protocols.websockets.auto',
|
| 26 |
+
'uvicorn.lifespan',
|
| 27 |
+
'uvicorn.lifespan.on',
|
| 28 |
+
'fastapi',
|
| 29 |
+
'mediapipe',
|
| 30 |
+
'transformers',
|
| 31 |
+
'torch',
|
| 32 |
+
'webview.platforms.qt', # Or other platforms depending on target
|
| 33 |
+
'webview.platforms.edgechromic',
|
| 34 |
+
'webview.platforms.winforms',
|
| 35 |
+
]
|
| 36 |
+
|
| 37 |
+
# Add submodules for libraries that use dynamic imports
|
| 38 |
+
hiddenimports += collect_submodules('fastapi')
|
| 39 |
+
hiddenimports += collect_submodules('uvicorn')
|
| 40 |
+
hiddenimports += collect_submodules('jinja2')
|
| 41 |
+
|
| 42 |
+
# Collect metadata/data files for heavy libraries
|
| 43 |
+
datas += collect_data_files('mediapipe')
|
| 44 |
+
datas += collect_data_files('transformers')
|
| 45 |
+
|
| 46 |
+
a = Analysis(
|
| 47 |
+
['desktop_launcher.py'],
|
| 48 |
+
pathex=[],
|
| 49 |
+
binaries=[],
|
| 50 |
+
datas=datas,
|
| 51 |
+
hiddenimports=hiddenimports,
|
| 52 |
+
hookspath=[],
|
| 53 |
+
hooksconfig={},
|
| 54 |
+
runtime_hooks=[],
|
| 55 |
+
excludes=[],
|
| 56 |
+
win_no_prefer_redirects=False,
|
| 57 |
+
win_private_assemblies=False,
|
| 58 |
+
cipher=block_cipher,
|
| 59 |
+
noarchive=False,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
|
| 63 |
+
|
| 64 |
+
exe = EXE(
|
| 65 |
+
pyz,
|
| 66 |
+
a.scripts,
|
| 67 |
+
[],
|
| 68 |
+
exclude_binaries=True,
|
| 69 |
+
name='EL_HELAL_Studio',
|
| 70 |
+
debug=False,
|
| 71 |
+
bootloader_ignore_signals=False,
|
| 72 |
+
strip=False,
|
| 73 |
+
upx=True,
|
| 74 |
+
console=False, # Set to False to hide terminal window on launch
|
| 75 |
+
disable_windowed_traceback=False,
|
| 76 |
+
argv_emulation=False,
|
| 77 |
+
target_arch=None,
|
| 78 |
+
codesign_identity=None,
|
| 79 |
+
entitlements_file=None,
|
| 80 |
+
icon=['assets/logo.png'], # Use your logo as the app icon
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
coll = COLLECT(
|
| 84 |
+
exe,
|
| 85 |
+
a.binaries,
|
| 86 |
+
a.zipfiles,
|
| 87 |
+
a.datas,
|
| 88 |
+
strip=False,
|
| 89 |
+
upx=True,
|
| 90 |
+
upx_exclude=[],
|
| 91 |
+
name='EL_HELAL_Studio',
|
| 92 |
+
)
|
id-maker/README.md
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Id Making
|
| 3 |
+
emoji: 🦀
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
id-maker/assets/My_Style.cube
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:309fba1cdcd3865252c3cd87c772263d3726dd7e05cf1da607001f436ccf557f
|
| 3 |
+
size 7340082
|
id-maker/assets/frame-1.png
ADDED
|
Git LFS Details
|
id-maker/assets/frame-2.png
ADDED
|
Git LFS Details
|
id-maker/assets/frame-525d94d5.png
ADDED
|
Git LFS Details
|
id-maker/assets/logo.png
ADDED
|
Git LFS Details
|
id-maker/bucket.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import boto3
|
| 3 |
+
import urllib3
|
| 4 |
+
|
| 5 |
+
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
| 6 |
+
|
| 7 |
+
# Hardcoded
|
| 8 |
+
ENDPOINT_URL = 'https://ba1add5d3df1b47fa4893182b8f5761e.eu.r2.cloudflarestorage.comhttps://ba1add5d3df1b47fa4893182b8f5761e.r2.cloudflarestorage.com'
|
| 9 |
+
BUCKET_NAME = 'fonts'
|
| 10 |
+
ACCESS_KEY_ID = '915b4668f3b146f10a869c753f0d6a12'
|
| 11 |
+
SECRET_ACCESS_KEY = 'bcb6246a36b9732a9e1accf3fb7e3004d2cb130f46dd4c393408a3ef4433b869'
|
| 12 |
+
LOCAL_DOWNLOAD_DIR = 'downloaded_fonts'
|
| 13 |
+
|
| 14 |
+
print(f"Connecting to: {ENDPOINT_URL}")
|
| 15 |
+
print(f"Bucket: {BUCKET_NAME}")
|
| 16 |
+
|
| 17 |
+
s3 = boto3.client(
|
| 18 |
+
's3',
|
| 19 |
+
endpoint_url=ENDPOINT_URL,
|
| 20 |
+
aws_access_key_id=ACCESS_KEY_ID,
|
| 21 |
+
aws_secret_access_key=SECRET_ACCESS_KEY,
|
| 22 |
+
verify=False
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
if not os.path.exists(LOCAL_DOWNLOAD_DIR):
|
| 26 |
+
os.makedirs(LOCAL_DOWNLOAD_DIR)
|
| 27 |
+
print(f"Created: {LOCAL_DOWNLOAD_DIR}")
|
| 28 |
+
|
| 29 |
+
print("Downloading files...")
|
| 30 |
+
paginator = s3.get_paginator('list_objects_v2')
|
| 31 |
+
file_count = 0
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
for page in paginator.paginate(Bucket=BUCKET_NAME):
|
| 35 |
+
if 'Contents' in page:
|
| 36 |
+
for obj in page['Contents']:
|
| 37 |
+
key = obj['Key']
|
| 38 |
+
local_path = os.path.join(LOCAL_DOWNLOAD_DIR, key)
|
| 39 |
+
|
| 40 |
+
local_dir = os.path.dirname(local_path)
|
| 41 |
+
if local_dir and not os.path.exists(local_dir):
|
| 42 |
+
os.makedirs(local_dir)
|
| 43 |
+
|
| 44 |
+
print(f" Downloading: {key}")
|
| 45 |
+
s3.download_file(BUCKET_NAME, key, local_path)
|
| 46 |
+
file_count += 1
|
| 47 |
+
|
| 48 |
+
print(f"\n✓ Done! Downloaded {file_count} files to {LOCAL_DOWNLOAD_DIR}")
|
| 49 |
+
except Exception as e:
|
| 50 |
+
print(f"✗ Error: {e}")
|
id-maker/config/settings.json
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"layout": {
|
| 3 |
+
"dpi": 300,
|
| 4 |
+
"output_w_cm": 25.7,
|
| 5 |
+
"output_h_cm": 12.7,
|
| 6 |
+
"grid_rows": 2,
|
| 7 |
+
"grid_cols": 4,
|
| 8 |
+
"grid_gap": 10,
|
| 9 |
+
"grid_margin": 15,
|
| 10 |
+
"photo_bottom_pad_cm": 0.7,
|
| 11 |
+
"brand_border": 50,
|
| 12 |
+
"section_gap": 5,
|
| 13 |
+
"photo_stroke_width": 2,
|
| 14 |
+
"brand_bottom_offset": 110,
|
| 15 |
+
"large_photo_bottom_pad": 100
|
| 16 |
+
},
|
| 17 |
+
"overlays": {
|
| 18 |
+
"logo_size_small": 70,
|
| 19 |
+
"logo_size_large": 95,
|
| 20 |
+
"logo_margin": 8,
|
| 21 |
+
"id_font_size": 63,
|
| 22 |
+
"name_font_size": 42,
|
| 23 |
+
"date_font_size": 19,
|
| 24 |
+
"large_date_font_size": 24,
|
| 25 |
+
"id_lift_offset": 45,
|
| 26 |
+
"id_char_spacing": -8
|
| 27 |
+
},
|
| 28 |
+
"retouch": {
|
| 29 |
+
"enabled": true,
|
| 30 |
+
"sensitivity": 3,
|
| 31 |
+
"tone_smoothing": 0.6
|
| 32 |
+
},
|
| 33 |
+
"api": {
|
| 34 |
+
"codeformer_url": "https://tegaje3com-codeformer.hf.space/api/process"
|
| 35 |
+
},
|
| 36 |
+
"colors": {
|
| 37 |
+
"maroon": [
|
| 38 |
+
139,
|
| 39 |
+
69,
|
| 40 |
+
19
|
| 41 |
+
],
|
| 42 |
+
"dark_red": [
|
| 43 |
+
180,
|
| 44 |
+
0,
|
| 45 |
+
0
|
| 46 |
+
],
|
| 47 |
+
"gold": [
|
| 48 |
+
200,
|
| 49 |
+
150,
|
| 50 |
+
12
|
| 51 |
+
],
|
| 52 |
+
"white": [
|
| 53 |
+
255,
|
| 54 |
+
255,
|
| 55 |
+
255
|
| 56 |
+
],
|
| 57 |
+
"text_dark": [
|
| 58 |
+
60,
|
| 59 |
+
60,
|
| 60 |
+
60
|
| 61 |
+
]
|
| 62 |
+
},
|
| 63 |
+
"restoration": {
|
| 64 |
+
"fidelity": 1
|
| 65 |
+
}
|
| 66 |
+
}
|
id-maker/context.md
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🧠 Project Context: ID Maker Studio (EL HELAL Pipeline)
|
| 2 |
+
|
| 3 |
+
This document provides a high-level overview of the project's intent, architecture, and critical technical state for developers and AI agents.
|
| 4 |
+
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
## 🎯 Project Intent
|
| 8 |
+
**ID Maker Studio** is a professional-grade image processing pipeline designed for **EL HELAL Studio**. Its primary goal is to automate the conversion of raw student portraits into high-resolution (300 DPI), print-ready ID sheets.
|
| 9 |
+
|
| 10 |
+
The system handles everything from blemish removal and color grading to complex Arabic typography and branding.
|
| 11 |
+
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
## 🏗 High-Level Architecture
|
| 15 |
+
|
| 16 |
+
The project is strictly modular, separating core business logic from user interfaces.
|
| 17 |
+
|
| 18 |
+
### 1. The Brain (`/core`)
|
| 19 |
+
- **Pipeline Orchestration:** Sequentially runs 5 critical steps.
|
| 20 |
+
- **AI Logic:**
|
| 21 |
+
- **Crop:** OpenCV-based face detection (5:7 ratio).
|
| 22 |
+
- **Background Removal:** BiRefNet (RMBG-2.0) running on CPU (quantized).
|
| 23 |
+
- **Retouching:** MediaPipe Face Mesh + Frequency Separation (preserves 100% skin texture).
|
| 24 |
+
- **Color Steal:** Custom algorithm to learn and apply professional color curves.
|
| 25 |
+
- **Layout Engine:** Composite engine using Pillow. Handles complex Arabic script via manual shaping and reordering.
|
| 26 |
+
|
| 27 |
+
### 2. The Interfaces
|
| 28 |
+
- **Web Interface (`/web`):** The primary production tool. Built with FastAPI and a localized Arabic (RTL) frontend.
|
| 29 |
+
- **3-Column Layout:** Queue (Right) — Preview (Center) — Options & Settings (Left).
|
| 30 |
+
- **Dark/Light Theme:** Toggleable via header button, persisted in localStorage.
|
| 31 |
+
- **Batch Processing Counter:** Shows `1/5`, `2/5`... overlay with dim background during batch processing.
|
| 32 |
+
- **Per-Image Delete:** Hover delete button on each queue item.
|
| 33 |
+
- **Settings API:** Real-time slider-based tuning of retouch sensitivity, skin smoothing, font sizes — saved to `config/settings.json`.
|
| 34 |
+
- **Keyboard Shortcuts:** Arrow navigation, Delete, Enter (save & next), Ctrl+S (process), Escape.
|
| 35 |
+
- **Before/After Toggle:** Quick comparison between original and processed result.
|
| 36 |
+
- **Zoom Modal:** Scroll-wheel zoom for detailed inspection.
|
| 37 |
+
- **Mobile Drawer:** Responsive drawer for queue on mobile devices.
|
| 38 |
+
- **Frame Color Picker:** Customizes the large side panel color (passed per-request).
|
| 39 |
+
- **High-Res Download:** Single-image save button now fetches the full 300 DPI result.
|
| 40 |
+
- **Backup System:** One-click Export/Import of settings, assets, and preferences.
|
| 41 |
+
- **Desktop GUI (`/gui`):** Legacy Tkinter application for offline machine usage.
|
| 42 |
+
|
| 43 |
+
### 3. Data & Config
|
| 44 |
+
- **`/storage`:** Managed directory for uploads, processing, and results.
|
| 45 |
+
- **`/config`:** Centralized `settings.json` for real-time layout tuning (DPI, margins, font sizes).
|
| 46 |
+
- **`/assets`:** Branding marks and font files.
|
| 47 |
+
|
| 48 |
+
---
|
| 49 |
+
|
| 50 |
+
## 🚀 Deployment & Environment
|
| 51 |
+
|
| 52 |
+
- **Infrastructure:** Optimized for **Docker** and **Hugging Face Spaces**.
|
| 53 |
+
- **Hardware Context:** Targeted for **2 CPUs / 16GB RAM**.
|
| 54 |
+
- RAM allows for large image buffers.
|
| 55 |
+
- CPU is the bottleneck; inference is sequential and quantized.
|
| 56 |
+
- **Binary Management:** Uses a hybrid approach. Smaller assets in Git; critical fonts are **automatically downloaded from GitHub** during Docker build to bypass Git LFS issues.
|
| 57 |
+
|
| 58 |
+
---
|
| 59 |
+
|
| 60 |
+
## ⚠️ Critical Technical "Gotchas"
|
| 61 |
+
|
| 62 |
+
- **Arabic Rendering:** In the container environment (standard LTR), Arabic is rendered by:
|
| 63 |
+
1. Reshaping characters (connecting letters).
|
| 64 |
+
2. **Manually reversing** the string characters to ensure correct visual flow without requiring `libraqm`.
|
| 65 |
+
- **MediaPipe Stability:** Explicitly imports submodules and includes safety checks to skip retouching if binary dependencies fail to initialize.
|
| 66 |
+
- **Transformers Monkeypatch:** Contains deep patches in `process_images.py` to handle `transformers 4.50+` tied-weight and meta-tensor bugs on CPU.
|
| 67 |
+
- **Dynamic Config:** `layout_engine.py` reloads `settings.json` on every request to allow "live" tuning of the output.
|
| 68 |
+
|
| 69 |
+
---
|
| 70 |
+
|
| 71 |
+
## 📂 Quick File Map
|
| 72 |
+
- `web/server.py`: FastAPI entry point. Includes Settings API (`GET/POST /settings`).
|
| 73 |
+
- `web/web_storage/index.html`: 3-column RTL frontend (Queue | Preview | Options).
|
| 74 |
+
- `core/layout_engine.py`: Final sheet composition logic.
|
| 75 |
+
- `core/retouch.py`: Advanced skin processing.
|
| 76 |
+
- `config/settings.json`: Live-reloadable settings (retouch sensitivity, font sizes, etc.).
|
| 77 |
+
- `Dockerfile`: Production environment definition.
|
| 78 |
+
- `tools/problems.md`: Historical log of technical hurdles and fixes.
|
| 79 |
+
|
| 80 |
+
---
|
| 81 |
+
|
| 82 |
+
*Last Updated: February 2026 — Web UI v2 (3-Column Layout, Batch Counter, Theme Toggle)*
|
id-maker/core/My_Style.cube
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:309fba1cdcd3865252c3cd87c772263d3726dd7e05cf1da607001f436ccf557f
|
| 3 |
+
size 7340082
|
id-maker/core/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Core logic package
|
id-maker/core/color_steal.py
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import os
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from scipy.interpolate import interp1d
|
| 5 |
+
from scipy.signal import savgol_filter
|
| 6 |
+
|
| 7 |
+
# This file stores the learned color curves so we don't need to re-train every time
|
| 8 |
+
_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 9 |
+
MODEL_CACHE = os.path.join(_DIR, "trained_curves.npz")
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def get_pairs_from_folders(before_folder, after_folder):
|
| 13 |
+
"""
|
| 14 |
+
Scans 'before' and 'after' folders to find matching image pairs based on filenames.
|
| 15 |
+
Assumes files are named like '12_d.jpg' and '12_l.jpg' where '12' is the common prefix.
|
| 16 |
+
"""
|
| 17 |
+
before_files = {}
|
| 18 |
+
after_files = {}
|
| 19 |
+
valid_ext = (".jpg", ".jpeg", ".png", ".tif", ".tiff")
|
| 20 |
+
|
| 21 |
+
# 1. Index 'before' files by their numeric prefix
|
| 22 |
+
for f in os.listdir(before_folder):
|
| 23 |
+
if f.lower().endswith(valid_ext):
|
| 24 |
+
prefix = f.split("_")[0] # e.g. "12" from "12_d.jpg"
|
| 25 |
+
before_files[prefix] = os.path.join(before_folder, f)
|
| 26 |
+
|
| 27 |
+
# 2. Index 'after' files by their numeric prefix
|
| 28 |
+
for f in os.listdir(after_folder):
|
| 29 |
+
if f.lower().endswith(valid_ext):
|
| 30 |
+
prefix = f.split("_")[0]
|
| 31 |
+
after_files[prefix] = os.path.join(after_folder, f)
|
| 32 |
+
|
| 33 |
+
# 3. Match them up
|
| 34 |
+
pairs = []
|
| 35 |
+
matched = sorted(set(before_files.keys()) & set(after_files.keys()))
|
| 36 |
+
for prefix in matched:
|
| 37 |
+
pairs.append((before_files[prefix], after_files[prefix]))
|
| 38 |
+
|
| 39 |
+
# log unmatched files for debugging
|
| 40 |
+
unmatched_before = set(before_files.keys()) - set(after_files.keys())
|
| 41 |
+
unmatched_after = set(after_files.keys()) - set(before_files.keys())
|
| 42 |
+
if unmatched_before:
|
| 43 |
+
print(
|
| 44 |
+
f" Warning: No match in after/ for: {[before_files[p] for p in unmatched_before]}"
|
| 45 |
+
)
|
| 46 |
+
if unmatched_after:
|
| 47 |
+
print(
|
| 48 |
+
f" Warning: No match in before/ for: {[after_files[p] for p in unmatched_after]}"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
print(f" Found {len(pairs)} matched pairs")
|
| 52 |
+
return pairs
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def extract_curves_from_pairs(pairs):
|
| 56 |
+
"""
|
| 57 |
+
The CORE function. It analyzes pixel differences between raw (before) and edited (after) photos
|
| 58 |
+
to "learn" the color grading style.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
luts: A list of 3 arrays [Red_Curve, Green_Curve, Blue_Curve] representing the color mapping.
|
| 62 |
+
"""
|
| 63 |
+
# We'll store every single pixel value from all images here
|
| 64 |
+
channel_src = [[], [], []] # R, G, B source pixels (Before)
|
| 65 |
+
channel_tgt = [[], [], []] # R, G, B target pixels (After)
|
| 66 |
+
|
| 67 |
+
for i, (raw_path, edited_path) in enumerate(pairs, 1):
|
| 68 |
+
print(f" Loading pair {i}/{len(pairs)}: {raw_path} -> {edited_path}")
|
| 69 |
+
|
| 70 |
+
raw_ref = np.array(Image.open(raw_path).convert("RGB"))
|
| 71 |
+
edited_ref = np.array(Image.open(edited_path).convert("RGB"))
|
| 72 |
+
|
| 73 |
+
# Safety check: if edited image was cropped/resized, scale it to match the raw source
|
| 74 |
+
if raw_ref.shape != edited_ref.shape:
|
| 75 |
+
edited_ref = np.array(
|
| 76 |
+
Image.open(edited_path)
|
| 77 |
+
.convert("RGB")
|
| 78 |
+
.resize((raw_ref.shape[1], raw_ref.shape[0]), Image.Resampling.LANCZOS)
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
# Flatten image into a long list of pixels and add to our big collection
|
| 82 |
+
for ch in range(3):
|
| 83 |
+
channel_src[ch].append(raw_ref[:, :, ch].flatten())
|
| 84 |
+
channel_tgt[ch].append(edited_ref[:, :, ch].flatten())
|
| 85 |
+
|
| 86 |
+
# Combine data from all images into one massive array per channel
|
| 87 |
+
for ch in range(3):
|
| 88 |
+
channel_src[ch] = np.concatenate(channel_src[ch])
|
| 89 |
+
channel_tgt[ch] = np.concatenate(channel_tgt[ch])
|
| 90 |
+
|
| 91 |
+
total_pixels = len(channel_src[0])
|
| 92 |
+
print(f" Total training pixels: {total_pixels:,} (from {len(pairs)} pairs)")
|
| 93 |
+
|
| 94 |
+
# Build the LUT (Look Up Table) for each channel (R, G, B)
|
| 95 |
+
luts = []
|
| 96 |
+
channel_names = ["Red", "Green", "Blue"]
|
| 97 |
+
x_bins = np.arange(256) # Input pixel values 0-255
|
| 98 |
+
|
| 99 |
+
for ch in range(3):
|
| 100 |
+
src_flat = channel_src[ch]
|
| 101 |
+
tgt_flat = channel_tgt[ch]
|
| 102 |
+
|
| 103 |
+
# Calculate average target value for every possible input value (0-255)
|
| 104 |
+
# e.g., "When input Red is 100, what is the average output Red?"
|
| 105 |
+
y_means = []
|
| 106 |
+
y_counts = []
|
| 107 |
+
for val in x_bins:
|
| 108 |
+
mask = src_flat == val
|
| 109 |
+
count = np.sum(mask)
|
| 110 |
+
if count > 0:
|
| 111 |
+
y_means.append(np.mean(tgt_flat[mask]))
|
| 112 |
+
y_counts.append(count)
|
| 113 |
+
else:
|
| 114 |
+
y_means.append(np.nan) # No data for this specific pixel value
|
| 115 |
+
y_counts.append(0)
|
| 116 |
+
|
| 117 |
+
y_means = np.array(y_means)
|
| 118 |
+
y_counts = np.array(y_counts)
|
| 119 |
+
|
| 120 |
+
# Report how much of the color range we actually saw
|
| 121 |
+
coverage = np.sum(y_counts > 0)
|
| 122 |
+
print(f" {channel_names[ch]}: {coverage}/256 values covered")
|
| 123 |
+
|
| 124 |
+
valid_mask = ~np.isnan(y_means)
|
| 125 |
+
if not valid_mask.any():
|
| 126 |
+
print(f" ERROR: {channel_names[ch]} channel has no data!")
|
| 127 |
+
return None
|
| 128 |
+
|
| 129 |
+
# --- Weighting Logic ---
|
| 130 |
+
# We trust values that appeared frequently in the images more than rare pixel values.
|
| 131 |
+
max_count = np.max(y_counts[y_counts > 0])
|
| 132 |
+
|
| 133 |
+
# Weighted interpolation: if we have low confidence in a value (low count),
|
| 134 |
+
# allow it to be smoothed by its neighbors.
|
| 135 |
+
y_means_weighted = y_means.copy()
|
| 136 |
+
for i in np.where(valid_mask)[0]:
|
| 137 |
+
if y_counts[i] < 100: # If we saw this pixel value fewer than 100 times...
|
| 138 |
+
# Look at neighbors to sanity check
|
| 139 |
+
neighbors = y_means[max(0, i - 2) : min(256, i + 3)]
|
| 140 |
+
valid_neighbors = neighbors[~np.isnan(neighbors)]
|
| 141 |
+
if len(valid_neighbors) > 0:
|
| 142 |
+
y_means_weighted[i] = np.mean(valid_neighbors)
|
| 143 |
+
|
| 144 |
+
# Fill in missing values using linear interpolation
|
| 145 |
+
interpolator = interp1d(
|
| 146 |
+
x_bins[valid_mask],
|
| 147 |
+
y_means_weighted[valid_mask],
|
| 148 |
+
kind="linear",
|
| 149 |
+
fill_value="extrapolate",
|
| 150 |
+
bounds_error=False,
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
full_lut = interpolator(np.arange(256))
|
| 154 |
+
|
| 155 |
+
# --- Smoothing ---
|
| 156 |
+
# Apply Savitzky-Golay filter to make the curve smooth and "organic"
|
| 157 |
+
# This prevents color banding in the final image.
|
| 158 |
+
try:
|
| 159 |
+
full_lut = savgol_filter(full_lut, window_length=11, polyorder=2)
|
| 160 |
+
except ValueError:
|
| 161 |
+
# Fallback for small datasets
|
| 162 |
+
full_lut = savgol_filter(full_lut, window_length=5, polyorder=1)
|
| 163 |
+
|
| 164 |
+
# Ensure values stay valid (0-255) and integer type
|
| 165 |
+
full_lut = full_lut.clip(0, 255).astype(np.uint8)
|
| 166 |
+
luts.append(full_lut)
|
| 167 |
+
|
| 168 |
+
return luts
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def save_trained_curves(luts, filename=None):
|
| 172 |
+
"""Saves trained LUT curves to disk (as a .npz file)."""
|
| 173 |
+
filename = filename or MODEL_CACHE
|
| 174 |
+
np.savez(filename, r=luts[0], g=luts[1], b=luts[2])
|
| 175 |
+
print(f" Curves saved to {filename}")
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def load_trained_curves(filename=None):
|
| 179 |
+
"""
|
| 180 |
+
Checks for a saved model file.
|
| 181 |
+
Returns the curves (array) if found, logic None otherwise.
|
| 182 |
+
"""
|
| 183 |
+
filename = filename or MODEL_CACHE
|
| 184 |
+
if not os.path.exists(filename):
|
| 185 |
+
return None
|
| 186 |
+
|
| 187 |
+
data = np.load(filename)
|
| 188 |
+
print(f" Loaded cached curves from {filename} (skipping training)")
|
| 189 |
+
return [data["r"], data["g"], data["b"]]
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def save_cube_file(luts, filename, lut_size=64):
|
| 193 |
+
"""
|
| 194 |
+
Exports the style as a standard .cube file.
|
| 195 |
+
Use this file in Photoshop, Premiere, DaVinci Resolve, OBS, etc.
|
| 196 |
+
"""
|
| 197 |
+
print(f" Saving {lut_size}x{lut_size}x{lut_size} .cube file: {filename}")
|
| 198 |
+
|
| 199 |
+
with open(filename, "w") as f:
|
| 200 |
+
f.write(f'TITLE "Multi_Pair_Style_Match"\n')
|
| 201 |
+
f.write(f"LUT_3D_SIZE {lut_size}\n\n")
|
| 202 |
+
|
| 203 |
+
# Generate the 3D grid and apply our learned curves to it
|
| 204 |
+
domain = np.linspace(0, 255, lut_size).astype(int)
|
| 205 |
+
for b_val in domain:
|
| 206 |
+
for g_val in domain:
|
| 207 |
+
for r_val in domain:
|
| 208 |
+
# Apply R, G, B curves independently
|
| 209 |
+
new_r = luts[0][r_val] / 255.0
|
| 210 |
+
new_g = luts[1][g_val] / 255.0
|
| 211 |
+
new_b = luts[2][b_val] / 255.0
|
| 212 |
+
f.write(f"{new_r:.6f} {new_g:.6f} {new_b:.6f}\n")
|
| 213 |
+
|
| 214 |
+
print(f" .cube file saved!")
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def apply_to_folder(luts, target_folder, output_folder):
|
| 218 |
+
"""
|
| 219 |
+
Takes the learned curves and applies them to every image in 'target_folder'.
|
| 220 |
+
Saves result to 'output_folder'.
|
| 221 |
+
"""
|
| 222 |
+
if not os.path.exists(output_folder):
|
| 223 |
+
os.makedirs(output_folder)
|
| 224 |
+
|
| 225 |
+
files = [
|
| 226 |
+
f
|
| 227 |
+
for f in os.listdir(target_folder)
|
| 228 |
+
if f.lower().endswith((".jpg", ".jpeg", ".png", ".tif", ".tiff"))
|
| 229 |
+
]
|
| 230 |
+
|
| 231 |
+
if not files:
|
| 232 |
+
print(f" No images found in {target_folder}")
|
| 233 |
+
return
|
| 234 |
+
|
| 235 |
+
print(f" Processing {len(files)} images...")
|
| 236 |
+
for filename in files:
|
| 237 |
+
img_path = os.path.join(target_folder, filename)
|
| 238 |
+
img = Image.open(img_path)
|
| 239 |
+
original_format = img.format # Preserve original format info
|
| 240 |
+
|
| 241 |
+
# Handle Transparency (Alpha Channel):
|
| 242 |
+
# We must separate the alpha channel, grade the RGB channels, then put alpha back.
|
| 243 |
+
has_alpha = img.mode in ("RGBA", "LA", "PA")
|
| 244 |
+
alpha_channel = None
|
| 245 |
+
|
| 246 |
+
if has_alpha:
|
| 247 |
+
img_rgba = img.convert("RGBA")
|
| 248 |
+
alpha_channel = np.array(img_rgba)[:, :, 3] # Save the transparency layer
|
| 249 |
+
img_rgb = img_rgba.convert("RGB")
|
| 250 |
+
print(f" (Transparent PNG detected — alpha will be preserved)")
|
| 251 |
+
else:
|
| 252 |
+
img_rgb = img.convert("RGB")
|
| 253 |
+
|
| 254 |
+
img_array = np.array(img_rgb)
|
| 255 |
+
|
| 256 |
+
# Apply LUT per channel (Vectorized operation = very fast)
|
| 257 |
+
result_array = img_array.copy()
|
| 258 |
+
for ch in range(3):
|
| 259 |
+
# Map every pixel value using our lookup table
|
| 260 |
+
result_array[:, :, ch] = luts[ch][img_array[:, :, ch]]
|
| 261 |
+
|
| 262 |
+
# RESTORE Alpha Channel
|
| 263 |
+
if has_alpha and alpha_channel is not None:
|
| 264 |
+
result_rgba = np.dstack((result_array, alpha_channel))
|
| 265 |
+
result_img = Image.fromarray(result_rgba, "RGBA")
|
| 266 |
+
else:
|
| 267 |
+
result_img = Image.fromarray(result_array)
|
| 268 |
+
|
| 269 |
+
# Preserve EXIF metadata (camera settings, date, etc.)
|
| 270 |
+
exif_data = img.info.get("exif", None)
|
| 271 |
+
|
| 272 |
+
# Save options based on file type
|
| 273 |
+
name, ext = os.path.splitext(filename)
|
| 274 |
+
save_path = os.path.join(output_folder, f"Graded_{filename}")
|
| 275 |
+
|
| 276 |
+
save_kwargs = {}
|
| 277 |
+
if exif_data:
|
| 278 |
+
save_kwargs["exif"] = exif_data
|
| 279 |
+
|
| 280 |
+
if ext.lower() in (".jpg", ".jpeg"):
|
| 281 |
+
if has_alpha: # JPG can't store alpha, must convert to RGB
|
| 282 |
+
result_img = result_img.convert("RGB")
|
| 283 |
+
save_kwargs["quality"] = 100
|
| 284 |
+
save_kwargs["subsampling"] = 0 # Highest color quality (4:4:4)
|
| 285 |
+
elif ext.lower() == ".png":
|
| 286 |
+
save_kwargs["compress_level"] = 1 # Low compression = faster
|
| 287 |
+
elif ext.lower() in (".tif", ".tiff"):
|
| 288 |
+
save_kwargs["compression"] = "tiff_lzw"
|
| 289 |
+
|
| 290 |
+
result_img.save(save_path, **save_kwargs)
|
| 291 |
+
print(f" Done: {filename} ({img_array.shape[1]}x{img_array.shape[0]})")
|
| 292 |
+
print(f" All images saved at original resolution & maximum quality.")
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def apply_to_image(luts, img: Image.Image) -> Image.Image:
|
| 296 |
+
"""
|
| 297 |
+
Applies the learned curves to a single PIL Image and returns the result.
|
| 298 |
+
Handles transparency (alpha channel) correctly.
|
| 299 |
+
"""
|
| 300 |
+
# Handle Transparency (Alpha Channel)
|
| 301 |
+
has_alpha = img.mode in ("RGBA", "LA", "PA")
|
| 302 |
+
alpha_channel = None
|
| 303 |
+
|
| 304 |
+
if has_alpha:
|
| 305 |
+
img_rgba = img.convert("RGBA")
|
| 306 |
+
alpha_channel = np.array(img_rgba)[:, :, 3] # Save the transparency layer
|
| 307 |
+
img_rgb = img_rgba.convert("RGB")
|
| 308 |
+
else:
|
| 309 |
+
img_rgb = img.convert("RGB")
|
| 310 |
+
|
| 311 |
+
img_array = np.array(img_rgb)
|
| 312 |
+
|
| 313 |
+
# Apply LUT per channel
|
| 314 |
+
result_array = img_array.copy()
|
| 315 |
+
for ch in range(3):
|
| 316 |
+
result_array[:, :, ch] = luts[ch][img_array[:, :, ch]]
|
| 317 |
+
|
| 318 |
+
# Restore Alpha Channel
|
| 319 |
+
if has_alpha and alpha_channel is not None:
|
| 320 |
+
result_rgba = np.dstack((result_array, alpha_channel))
|
| 321 |
+
return Image.fromarray(result_rgba, "RGBA")
|
| 322 |
+
else:
|
| 323 |
+
return Image.fromarray(result_array)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
if __name__ == "__main__":
|
| 327 |
+
# ==========================================
|
| 328 |
+
# EXECUTION SECTION
|
| 329 |
+
# ==========================================
|
| 330 |
+
|
| 331 |
+
# Folder setup
|
| 332 |
+
BEFORE_FOLDER = "./before"
|
| 333 |
+
AFTER_FOLDER = "./after"
|
| 334 |
+
|
| 335 |
+
# 1. Attempt to load existing model first to save time
|
| 336 |
+
luts = load_trained_curves()
|
| 337 |
+
|
| 338 |
+
# 2. If NO model helps, we attempt to learn from images
|
| 339 |
+
if luts is None:
|
| 340 |
+
# Check if folders actually exist before crashing
|
| 341 |
+
if os.path.exists(BEFORE_FOLDER) and os.path.exists(AFTER_FOLDER):
|
| 342 |
+
pairs = get_pairs_from_folders(BEFORE_FOLDER, AFTER_FOLDER)
|
| 343 |
+
|
| 344 |
+
if not pairs:
|
| 345 |
+
# Error only if we have NO model AND NO training data
|
| 346 |
+
print(
|
| 347 |
+
"ERROR: No matching pairs found. Make sure before/ and after/ folders"
|
| 348 |
+
)
|
| 349 |
+
print(
|
| 350 |
+
" have images with matching prefixes (e.g. 12_d.jpg <-> 12_l.jpg)"
|
| 351 |
+
)
|
| 352 |
+
else:
|
| 353 |
+
print("=" * 50)
|
| 354 |
+
print("PHASE 1: Learning Color Grade")
|
| 355 |
+
print("=" * 50)
|
| 356 |
+
luts = extract_curves_from_pairs(pairs)
|
| 357 |
+
if luts:
|
| 358 |
+
save_trained_curves(luts) # Cache for next time
|
| 359 |
+
else:
|
| 360 |
+
print("Model not found and training folders are missing. Cannot proceed.")
|
| 361 |
+
|
| 362 |
+
# 3. If we successfully loaded or learned the style, apply it
|
| 363 |
+
if luts:
|
| 364 |
+
# Export .cube file (LUT) for external use
|
| 365 |
+
if not os.path.exists("My_Style.cube"):
|
| 366 |
+
print("\n" + "=" * 50)
|
| 367 |
+
print("PHASE 2: Exporting .cube LUT")
|
| 368 |
+
print("=" * 50)
|
| 369 |
+
save_cube_file(luts, "My_Style.cube", lut_size=64)
|
| 370 |
+
else:
|
| 371 |
+
print("\n My_Style.cube already exists, skipping export.")
|
| 372 |
+
|
| 373 |
+
# Batch-apply to all photos in the 'crop' folder
|
| 374 |
+
print("\n" + "=" * 50)
|
| 375 |
+
print("PHASE 3: Batch Processing")
|
| 376 |
+
print("=" * 50)
|
| 377 |
+
apply_to_folder(luts, "./trans", "./colored")
|
id-maker/core/crop.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import os
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import sys
|
| 5 |
+
import cv2.data
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
# Locate the standard frontal face XML classifier provided by OpenCV
|
| 9 |
+
cascade_path = os.path.join(
|
| 10 |
+
cv2.data.haarcascades, "haarcascade_frontalface_default.xml"
|
| 11 |
+
)
|
| 12 |
+
face_cascade = cv2.CascadeClassifier(cascade_path)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _load_image_exif_safe(image_path):
|
| 16 |
+
"""Loads an image using PIL, handles EXIF orientation, and converts to OpenCV BGR."""
|
| 17 |
+
try:
|
| 18 |
+
from PIL import ImageOps
|
| 19 |
+
pil_img = Image.open(image_path)
|
| 20 |
+
pil_img = ImageOps.exif_transpose(pil_img)
|
| 21 |
+
# Convert to BGR for OpenCV
|
| 22 |
+
return cv2.cvtColor(np.array(pil_img.convert("RGB")), cv2.COLOR_RGB2BGR)
|
| 23 |
+
except Exception as e:
|
| 24 |
+
print(f"Error loading image safe: {e}")
|
| 25 |
+
return None
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_auto_crop_rect(image_path):
|
| 29 |
+
"""
|
| 30 |
+
Detects a face and calculates the 5:7 crop rectangle.
|
| 31 |
+
Returns (x1, y1, x2, y2) in original image coordinates or None.
|
| 32 |
+
"""
|
| 33 |
+
image = _load_image_exif_safe(image_path)
|
| 34 |
+
if image is None:
|
| 35 |
+
return None
|
| 36 |
+
h, w, _ = image.shape
|
| 37 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 38 |
+
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
|
| 39 |
+
|
| 40 |
+
if len(faces) == 0:
|
| 41 |
+
# Fallback: Center crop if no face found
|
| 42 |
+
aspect_ratio = 5 / 7
|
| 43 |
+
crop_h = int(h * 0.8)
|
| 44 |
+
crop_w = int(crop_h * aspect_ratio)
|
| 45 |
+
x1 = (w - crop_w) // 2
|
| 46 |
+
y1 = (h - crop_h) // 2
|
| 47 |
+
return (x1, y1, x1 + crop_w, y1 + crop_h)
|
| 48 |
+
|
| 49 |
+
faces = sorted(faces, key=lambda x: x[2] * x[3], reverse=True)
|
| 50 |
+
(x, y, fw, fh) = faces[0]
|
| 51 |
+
cx, cy = x + fw // 2, y + fh // 2
|
| 52 |
+
aspect_ratio = 5 / 7
|
| 53 |
+
|
| 54 |
+
crop_height = int(min(h, w / aspect_ratio) * 0.7)
|
| 55 |
+
crop_width = int(crop_height * aspect_ratio)
|
| 56 |
+
|
| 57 |
+
head_top = y - int(fh * 0.35)
|
| 58 |
+
HEAD_SPACE_RATIO = 0.10
|
| 59 |
+
y1 = max(0, head_top - int(crop_height * HEAD_SPACE_RATIO))
|
| 60 |
+
x1 = max(0, cx - crop_width // 2)
|
| 61 |
+
|
| 62 |
+
x2 = min(w, x1 + crop_width)
|
| 63 |
+
y2 = min(h, y1 + crop_height)
|
| 64 |
+
|
| 65 |
+
# Adjust to maintain size
|
| 66 |
+
if x2 - x1 < crop_width:
|
| 67 |
+
x1 = max(0, x2 - crop_width)
|
| 68 |
+
if y2 - y1 < crop_height:
|
| 69 |
+
y1 = max(0, y2 - crop_height)
|
| 70 |
+
|
| 71 |
+
return (int(x1), int(y1), int(x1 + crop_width), int(y1 + crop_height))
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def apply_custom_crop(image_path, output_path, rect):
|
| 75 |
+
"""
|
| 76 |
+
Applies a specific (x1, y1, x2, y2) crop and resizes to 10x14cm @ 300DPI.
|
| 77 |
+
"""
|
| 78 |
+
x1, y1, x2, y2 = rect
|
| 79 |
+
try:
|
| 80 |
+
image = _load_image_exif_safe(image_path)
|
| 81 |
+
if image is None: return False
|
| 82 |
+
|
| 83 |
+
cropped = image[y1:y2, x1:x2]
|
| 84 |
+
# Use Lanczos resampling for better quality
|
| 85 |
+
final = cv2.resize(cropped, (1181, 1654), interpolation=cv2.INTER_LANCZOS4)
|
| 86 |
+
final_rgb = cv2.cvtColor(final, cv2.COLOR_BGR2RGB)
|
| 87 |
+
pil_img = Image.fromarray(final_rgb)
|
| 88 |
+
|
| 89 |
+
ext = os.path.splitext(output_path)[1].lower()
|
| 90 |
+
if ext == ".png":
|
| 91 |
+
pil_img.save(output_path, dpi=(300, 300), compress_level=1) # Low compression for speed, lossless
|
| 92 |
+
else:
|
| 93 |
+
pil_img.save(output_path, dpi=(300, 300), quality=100, subsampling=0)
|
| 94 |
+
return True
|
| 95 |
+
except Exception as e:
|
| 96 |
+
print(f"Error applying custom crop: {e}")
|
| 97 |
+
return False
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def crop_to_4x6_opencv(image_path, output_path):
|
| 101 |
+
"""Standard AI auto-crop."""
|
| 102 |
+
rect = get_auto_crop_rect(image_path)
|
| 103 |
+
if rect:
|
| 104 |
+
return apply_custom_crop(image_path, output_path, rect)
|
| 105 |
+
return False
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def batch_process(input_folder, output_folder):
|
| 109 |
+
if not os.path.exists(output_folder):
|
| 110 |
+
os.makedirs(output_folder)
|
| 111 |
+
files = [
|
| 112 |
+
f
|
| 113 |
+
for f in os.listdir(input_folder)
|
| 114 |
+
if f.lower().endswith((".jpg", ".jpeg", ".png"))
|
| 115 |
+
]
|
| 116 |
+
for filename in files:
|
| 117 |
+
crop_to_4x6_opencv(
|
| 118 |
+
os.path.join(input_folder, filename), os.path.join(output_folder, filename)
|
| 119 |
+
)
|
id-maker/core/layout_engine.py
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
EL HELAL Studio – Photo Layout Engine
|
| 3 |
+
Dynamically loads settings from settings.json
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from PIL import Image, ImageDraw, ImageFont, features, ImageOps
|
| 7 |
+
import arabic_reshaper
|
| 8 |
+
from bidi.algorithm import get_display
|
| 9 |
+
import os
|
| 10 |
+
import json
|
| 11 |
+
from datetime import date
|
| 12 |
+
|
| 13 |
+
# ──────────────────────────────────────────────────────────────
|
| 14 |
+
# Paths & Config Loading
|
| 15 |
+
# ──────────────────────────────────────────────────────────────
|
| 16 |
+
# Use the directory of this script as a base
|
| 17 |
+
_CORE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 18 |
+
# The project root is one level up from 'core'
|
| 19 |
+
_ROOT_DIR = os.path.abspath(os.path.join(_CORE_DIR, ".."))
|
| 20 |
+
|
| 21 |
+
# Search for assets in both 'assets' folder and locally
|
| 22 |
+
def find_asset(filename):
|
| 23 |
+
# 1. Try assets/ folder in root
|
| 24 |
+
p1 = os.path.join(_ROOT_DIR, "assets", filename)
|
| 25 |
+
if os.path.exists(p1): return p1
|
| 26 |
+
# 2. Try locally in core/
|
| 27 |
+
p2 = os.path.join(_CORE_DIR, filename)
|
| 28 |
+
if os.path.exists(p2): return p2
|
| 29 |
+
# 3. Try root directly
|
| 30 |
+
p3 = os.path.join(_ROOT_DIR, filename)
|
| 31 |
+
if os.path.exists(p3): return p3
|
| 32 |
+
return p1 # Fallback to default path
|
| 33 |
+
|
| 34 |
+
LOGO_PATH = find_asset("logo.png")
|
| 35 |
+
ARABIC_FONT_PATH = find_asset("TYBAH.TTF")
|
| 36 |
+
SETTINGS_PATH = os.path.join(_ROOT_DIR, "config", "settings.json")
|
| 37 |
+
|
| 38 |
+
def load_settings():
|
| 39 |
+
defaults = {
|
| 40 |
+
"layout": {
|
| 41 |
+
"dpi": 300, "output_w_cm": 25.7, "output_h_cm": 12.7,
|
| 42 |
+
"grid_rows": 2, "grid_cols": 4, "grid_gap": 10, "grid_margin": 15,
|
| 43 |
+
"photo_bottom_pad_cm": 0.7, "brand_border": 50, "section_gap": 5,
|
| 44 |
+
"photo_stroke_width": 1, "brand_bottom_offset": 110,
|
| 45 |
+
"large_photo_bottom_pad": 100
|
| 46 |
+
},
|
| 47 |
+
"overlays": {
|
| 48 |
+
"logo_size_small": 77, "logo_size_large": 95, "logo_margin": 8,
|
| 49 |
+
"id_font_size": 50, "name_font_size": 30, "date_font_size": 19,
|
| 50 |
+
"large_date_font_size": 24, "id_lift_offset": 45, "id_char_spacing": -3
|
| 51 |
+
},
|
| 52 |
+
"colors": {
|
| 53 |
+
"maroon": [60, 0, 0], "dark_red": [180, 0, 0], "gold": [200, 150, 12],
|
| 54 |
+
"white": [255, 255, 255], "text_dark": [60, 60, 60]
|
| 55 |
+
},
|
| 56 |
+
"retouch": {
|
| 57 |
+
"enabled": True, "sensitivity": 3.0, "tone_smoothing": 0.6
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
if os.path.exists(SETTINGS_PATH):
|
| 61 |
+
try:
|
| 62 |
+
with open(SETTINGS_PATH, "r") as f:
|
| 63 |
+
user_settings = json.load(f)
|
| 64 |
+
# Merge user settings into defaults
|
| 65 |
+
for key, val in user_settings.items():
|
| 66 |
+
if key in defaults and isinstance(val, dict):
|
| 67 |
+
defaults[key].update(val)
|
| 68 |
+
else:
|
| 69 |
+
defaults[key] = val
|
| 70 |
+
except Exception as e:
|
| 71 |
+
print(f"Error loading settings.json: {e}")
|
| 72 |
+
return defaults
|
| 73 |
+
|
| 74 |
+
S = load_settings()
|
| 75 |
+
|
| 76 |
+
# Derived Constants
|
| 77 |
+
DPI = S["layout"]["dpi"]
|
| 78 |
+
OUTPUT_WIDTH = round(S["layout"]["output_w_cm"] / 2.54 * DPI)
|
| 79 |
+
OUTPUT_HEIGHT = round(S["layout"]["output_h_cm"] / 2.54 * DPI)
|
| 80 |
+
PHOTO_BOTTOM_PAD = round(S["layout"]["photo_bottom_pad_cm"] / 2.54 * DPI)
|
| 81 |
+
|
| 82 |
+
def c(key): return tuple(S["colors"][key])
|
| 83 |
+
WHITE = c("white")
|
| 84 |
+
MAROON = c("maroon")
|
| 85 |
+
DARK_RED = c("dark_red")
|
| 86 |
+
TEXT_DARK = c("text_dark")
|
| 87 |
+
|
| 88 |
+
# ──────────────────────────────────────────────────────────────
|
| 89 |
+
# Helpers
|
| 90 |
+
# ──────────────────────────────────────────────────────────────
|
| 91 |
+
|
| 92 |
+
def _load_logo() -> Image.Image | None:
|
| 93 |
+
if os.path.exists(LOGO_PATH):
|
| 94 |
+
try:
|
| 95 |
+
return Image.open(LOGO_PATH).convert("RGBA")
|
| 96 |
+
except Exception as e:
|
| 97 |
+
print(f"Error loading logo from {LOGO_PATH}: {e}")
|
| 98 |
+
else:
|
| 99 |
+
print(f"Logo not found at: {LOGO_PATH}")
|
| 100 |
+
return None
|
| 101 |
+
|
| 102 |
+
def _load_frame(frame_name: str) -> Image.Image | None:
|
| 103 |
+
if not frame_name: return None
|
| 104 |
+
# Security: basic check to prevent directory traversal
|
| 105 |
+
if ".." in frame_name or "/" in frame_name or "\\" in frame_name:
|
| 106 |
+
print(f"Security: Invalid frame name '{frame_name}'")
|
| 107 |
+
return None
|
| 108 |
+
|
| 109 |
+
path = find_asset(frame_name)
|
| 110 |
+
if os.path.exists(path):
|
| 111 |
+
try:
|
| 112 |
+
return Image.open(path).convert("RGBA")
|
| 113 |
+
except Exception as e:
|
| 114 |
+
print(f"Error loading frame from {path}: {e}")
|
| 115 |
+
else:
|
| 116 |
+
print(f"Frame not found at: {path}")
|
| 117 |
+
return None
|
| 118 |
+
|
| 119 |
+
def _load_font_with_fallback(size: int, is_arabic: bool = False) -> ImageFont.FreeTypeFont:
|
| 120 |
+
"""Aggressive font loader with deep system search."""
|
| 121 |
+
# 1. Assets (Downloaded via Dockerfile - Guaranteed binary files if links work)
|
| 122 |
+
candidates = [
|
| 123 |
+
os.path.join(_ROOT_DIR, "assets", "arialbd.ttf"),
|
| 124 |
+
os.path.join(_ROOT_DIR, "assets", "tahomabd.ttf"),
|
| 125 |
+
os.path.join(_ROOT_DIR, "assets", "TYBAH.TTF")
|
| 126 |
+
]
|
| 127 |
+
|
| 128 |
+
# 2. Add System Fonts based on priority
|
| 129 |
+
if os.name == "nt": # Windows
|
| 130 |
+
candidates += ["C:/Windows/Fonts/arialbd.ttf", "C:/Windows/Fonts/tahomabd.ttf"]
|
| 131 |
+
else: # Linux / Docker - SCAN SYSTEM
|
| 132 |
+
# We look for Noto (Arabic) and DejaVu (English/Fallback)
|
| 133 |
+
search_dirs = ["/usr/share/fonts", "/usr/local/share/fonts"]
|
| 134 |
+
found_system_fonts = []
|
| 135 |
+
for d in search_dirs:
|
| 136 |
+
if os.path.exists(d):
|
| 137 |
+
for root, _, files in os.walk(d):
|
| 138 |
+
for f in files:
|
| 139 |
+
if "NotoSansArabic-Bold" in f or "DejaVuSans-Bold" in f or "FreeSansBold" in f:
|
| 140 |
+
found_system_fonts.append(os.path.join(root, f))
|
| 141 |
+
|
| 142 |
+
# Prioritize Noto for Arabic, DejaVu for English
|
| 143 |
+
if is_arabic:
|
| 144 |
+
found_system_fonts.sort(key=lambda x: "Noto" in x, reverse=True)
|
| 145 |
+
else:
|
| 146 |
+
found_system_fonts.sort(key=lambda x: "DejaVu" in x, reverse=True)
|
| 147 |
+
|
| 148 |
+
candidates += found_system_fonts
|
| 149 |
+
|
| 150 |
+
# 3. Final Search and Load
|
| 151 |
+
for path in candidates:
|
| 152 |
+
if path and os.path.exists(path):
|
| 153 |
+
try:
|
| 154 |
+
f_size = os.path.getsize(path)
|
| 155 |
+
if f_size < 2000: continue # Skip pointers and empty files
|
| 156 |
+
|
| 157 |
+
font = ImageFont.truetype(path, size)
|
| 158 |
+
print(f"DEBUG: Using {os.path.basename(path)} for {'ARABIC' if is_arabic else 'ENGLISH'} (Size: {f_size})")
|
| 159 |
+
return font
|
| 160 |
+
except:
|
| 161 |
+
continue
|
| 162 |
+
|
| 163 |
+
print("CRITICAL: All font loads failed. Falling back to default.")
|
| 164 |
+
return ImageFont.load_default()
|
| 165 |
+
|
| 166 |
+
def _find_font(size: int) -> ImageFont.FreeTypeFont:
|
| 167 |
+
return _load_font_with_fallback(size, is_arabic=False)
|
| 168 |
+
|
| 169 |
+
def _arabic_font(size: int) -> ImageFont.FreeTypeFont:
|
| 170 |
+
return _load_font_with_fallback(size, is_arabic=True)
|
| 171 |
+
|
| 172 |
+
def _reshape_arabic(text: str) -> str:
|
| 173 |
+
if not text: return ""
|
| 174 |
+
try:
|
| 175 |
+
# 1. Reshape the text to handle ligatures and character connections
|
| 176 |
+
reshaped_text = arabic_reshaper.reshape(text)
|
| 177 |
+
|
| 178 |
+
# 2. Reorder for RTL
|
| 179 |
+
# If Raqm is available (usually Linux/Docker), Pillow handles reordering.
|
| 180 |
+
# If Raqm is missing (usually Windows), we must use get_display.
|
| 181 |
+
if features.check("raqm"):
|
| 182 |
+
return reshaped_text
|
| 183 |
+
|
| 184 |
+
return get_display(reshaped_text)
|
| 185 |
+
except Exception as e:
|
| 186 |
+
print(f"DEBUG: Arabic Shaping Error: {e}")
|
| 187 |
+
return text
|
| 188 |
+
def _resize_to_fit(img: Image.Image, max_w: int, max_h: int) -> Image.Image:
|
| 189 |
+
w, h = img.size
|
| 190 |
+
scale = min(max_w / w, max_h / h)
|
| 191 |
+
return img.resize((int(w * scale), int(h * scale)), Image.LANCZOS)
|
| 192 |
+
|
| 193 |
+
def _add_inner_stroke(img: Image.Image, color=(200, 200, 200), width=1) -> Image.Image:
|
| 194 |
+
"""Adds a thin inner border to the image."""
|
| 195 |
+
if width <= 0: return img
|
| 196 |
+
res = img.copy()
|
| 197 |
+
draw = ImageDraw.Draw(res)
|
| 198 |
+
w, h = res.size
|
| 199 |
+
for i in range(width):
|
| 200 |
+
draw.rectangle([i, i, w - 1 - i, h - 1 - i], outline=color)
|
| 201 |
+
return res
|
| 202 |
+
|
| 203 |
+
def _paste_logo_with_stroke(target: Image.Image, logo: Image.Image, x: int, y: int, stroke_width: int = 2):
|
| 204 |
+
mask = logo.split()[-1]
|
| 205 |
+
white_img = Image.new("RGBA", logo.size, (255, 255, 255, 255))
|
| 206 |
+
for dx in range(-stroke_width, stroke_width + 1):
|
| 207 |
+
for dy in range(-stroke_width, stroke_width + 1):
|
| 208 |
+
if dx*dx + dy*dy <= stroke_width*stroke_width:
|
| 209 |
+
target.paste(white_img, (x + dx, y + dy), mask)
|
| 210 |
+
target.paste(logo, (x, y), logo)
|
| 211 |
+
|
| 212 |
+
def _to_arabic_digits(text: str) -> str:
|
| 213 |
+
latin_to_arabic = str.maketrans("0123456789", "٠١٢٣٤٥٦٧٨٩")
|
| 214 |
+
return text.translate(latin_to_arabic)
|
| 215 |
+
|
| 216 |
+
def _draw_text_with_spacing(draw: ImageDraw.ImageDraw, x: int, y: int, text: str, font: ImageFont.FreeTypeFont, fill: tuple, spacing: int = 0):
|
| 217 |
+
# Use standard draw. Complex script shaping is handled by reshaper/bidi before this call.
|
| 218 |
+
if spacing == 0:
|
| 219 |
+
draw.text((x, y), text, fill=fill, font=font)
|
| 220 |
+
return
|
| 221 |
+
|
| 222 |
+
curr_x = x
|
| 223 |
+
for char in text:
|
| 224 |
+
draw.text((curr_x, y), char, fill=fill, font=font)
|
| 225 |
+
curr_x += font.getlength(char) + spacing
|
| 226 |
+
|
| 227 |
+
def _today_str() -> str:
|
| 228 |
+
d = date.today()
|
| 229 |
+
return f"{d.day}.{d.month}.{d.year}"
|
| 230 |
+
|
| 231 |
+
# ──────────────────────────────────────────────────────────────
|
| 232 |
+
# Main API
|
| 233 |
+
# ──────────────────────────────────────────────────────────────
|
| 234 |
+
|
| 235 |
+
def generate_layout(input_image: Image.Image, person_name: str = "", id_number: str = "",
|
| 236 |
+
add_studio_name: bool = True, add_logo: bool = True, add_date: bool = True,
|
| 237 |
+
frame_color: tuple = None, frame_name: str = None) -> Image.Image:
|
| 238 |
+
# Reload settings to ensure any changes to settings.json are applied immediately
|
| 239 |
+
global S, DPI, OUTPUT_WIDTH, OUTPUT_HEIGHT, PHOTO_BOTTOM_PAD, WHITE, MAROON, DARK_RED, TEXT_DARK
|
| 240 |
+
S = load_settings()
|
| 241 |
+
DPI = S["layout"]["dpi"]
|
| 242 |
+
OUTPUT_WIDTH = round(S["layout"]["output_w_cm"] / 2.54 * DPI)
|
| 243 |
+
OUTPUT_HEIGHT = round(S["layout"]["output_h_cm"] / 2.54 * DPI)
|
| 244 |
+
PHOTO_BOTTOM_PAD = round(S["layout"]["photo_bottom_pad_cm"] / 2.54 * DPI)
|
| 245 |
+
WHITE = c("white")
|
| 246 |
+
MAROON = c("maroon")
|
| 247 |
+
DARK_RED = c("dark_red")
|
| 248 |
+
TEXT_DARK = c("text_dark")
|
| 249 |
+
|
| 250 |
+
# Determine frame color
|
| 251 |
+
side_panel_color = frame_color if frame_color else MAROON
|
| 252 |
+
|
| 253 |
+
print(f"LAYOUT: Starting generation | Name: '{person_name}' | ID: '{id_number}'")
|
| 254 |
+
print(f"LAYOUT: Options | Logo: {add_logo} | Studio: {add_studio_name} | Date: {add_date}")
|
| 255 |
+
print(f"LAYOUT: Font Sizes | ID: {S['overlays']['id_font_size']} | Name: {S['overlays']['name_font_size']}")
|
| 256 |
+
|
| 257 |
+
if input_image.mode in ("RGBA", "LA") or (input_image.mode == "P" and "transparency" in input_image.info):
|
| 258 |
+
img = Image.new("RGB", input_image.size, WHITE)
|
| 259 |
+
img.paste(input_image, (0, 0), input_image.convert("RGBA"))
|
| 260 |
+
else:
|
| 261 |
+
img = input_image.convert("RGB")
|
| 262 |
+
|
| 263 |
+
logo = _load_logo() if add_logo else None
|
| 264 |
+
frame_img = _load_frame(frame_name) if frame_name else None
|
| 265 |
+
today = _today_str()
|
| 266 |
+
studio_date_text = f"EL HELAL {today}" if add_studio_name and add_date else \
|
| 267 |
+
"EL HELAL" if add_studio_name else \
|
| 268 |
+
today if add_date else ""
|
| 269 |
+
|
| 270 |
+
f_date = _find_font(S["overlays"]["date_font_size"])
|
| 271 |
+
f_id = _find_font(S["overlays"]["id_font_size"])
|
| 272 |
+
f_name = _arabic_font(S["overlays"]["name_font_size"])
|
| 273 |
+
f_date_l = _find_font(S["overlays"]["large_date_font_size"])
|
| 274 |
+
f_brand = _find_font(52)
|
| 275 |
+
|
| 276 |
+
display_name = _reshape_arabic(person_name)
|
| 277 |
+
id_display = _to_arabic_digits(id_number)
|
| 278 |
+
|
| 279 |
+
canvas = Image.new("RGB", (OUTPUT_WIDTH, OUTPUT_HEIGHT), WHITE)
|
| 280 |
+
draw = ImageDraw.Draw(canvas)
|
| 281 |
+
|
| 282 |
+
brand_w = round(9.2 / 2.54 * DPI)
|
| 283 |
+
grid_w = OUTPUT_WIDTH - brand_w - S["layout"]["section_gap"]
|
| 284 |
+
|
| 285 |
+
avail_w = grid_w - 2*S["layout"]["grid_margin"] - (S["layout"]["grid_cols"]-1)*S["layout"]["grid_gap"]
|
| 286 |
+
cell_w = avail_w // S["layout"]["grid_cols"]
|
| 287 |
+
avail_h = OUTPUT_HEIGHT - 2*S["layout"]["grid_margin"] - (S["layout"]["grid_rows"]-1)*S["layout"]["grid_gap"]
|
| 288 |
+
cell_h = avail_h // S["layout"]["grid_rows"]
|
| 289 |
+
|
| 290 |
+
photo_h = cell_h - PHOTO_BOTTOM_PAD
|
| 291 |
+
small_raw = _resize_to_fit(img, cell_w, photo_h)
|
| 292 |
+
# Add thin inner stroke using settings
|
| 293 |
+
small = _add_inner_stroke(small_raw, color=(210, 210, 210), width=S["layout"]["photo_stroke_width"])
|
| 294 |
+
sw, sh = small.size
|
| 295 |
+
|
| 296 |
+
small_dec = Image.new("RGBA", (sw, sh), (255, 255, 255, 0))
|
| 297 |
+
small_dec.paste(small, (0, 0))
|
| 298 |
+
|
| 299 |
+
if id_display:
|
| 300 |
+
id_draw = ImageDraw.Draw(small_dec)
|
| 301 |
+
sp = S["overlays"]["id_char_spacing"]
|
| 302 |
+
tw = sum(f_id.getlength(c) for c in id_display) + (len(id_display)-1)*sp
|
| 303 |
+
tx, ty = (sw-tw)//2, sh - S["overlays"]["id_font_size"] - S["overlays"]["id_lift_offset"]
|
| 304 |
+
for off in [(-2,-2), (2,-2), (-2,2), (2,2), (0,-2), (0,2), (-2,0), (2,0)]:
|
| 305 |
+
_draw_text_with_spacing(id_draw, tx+off[0], ty+off[1], id_display, f_id, WHITE, sp)
|
| 306 |
+
_draw_text_with_spacing(id_draw, tx, ty, id_display, f_id, TEXT_DARK, sp)
|
| 307 |
+
|
| 308 |
+
if logo:
|
| 309 |
+
ls = S["overlays"]["logo_size_small"]
|
| 310 |
+
l_img = _resize_to_fit(logo, ls, ls)
|
| 311 |
+
_paste_logo_with_stroke(small_dec, l_img, S["overlays"]["logo_margin"], sh - l_img.size[1] - S["overlays"]["logo_margin"])
|
| 312 |
+
|
| 313 |
+
small_final = Image.new("RGB", small_dec.size, WHITE)
|
| 314 |
+
small_final.paste(small_dec, (0, 0), small_dec)
|
| 315 |
+
|
| 316 |
+
for r in range(S["layout"]["grid_rows"]):
|
| 317 |
+
for col in range(S["layout"]["grid_cols"]):
|
| 318 |
+
x = S["layout"]["grid_margin"] + col*(cell_w + S["layout"]["grid_gap"]) + (cell_w - sw)//2
|
| 319 |
+
y = S["layout"]["grid_margin"] + r*(cell_h + S["layout"]["grid_gap"])
|
| 320 |
+
canvas.paste(small_final, (x, y))
|
| 321 |
+
if studio_date_text:
|
| 322 |
+
draw.text((x + 5, y + sh + 1), studio_date_text, fill=DARK_RED, font=f_date)
|
| 323 |
+
if display_name:
|
| 324 |
+
nb = f_name.getbbox(display_name)
|
| 325 |
+
nx = x + (sw - (nb[2]-nb[0]))//2
|
| 326 |
+
# Draw reshaped/bidi text normally
|
| 327 |
+
draw.text((nx, y + sh + 23), display_name, fill=(0,0,0), font=f_name)
|
| 328 |
+
|
| 329 |
+
bx = grid_w + S["layout"]["section_gap"]
|
| 330 |
+
draw.rectangle([bx, 0, OUTPUT_WIDTH, OUTPUT_HEIGHT], fill=side_panel_color)
|
| 331 |
+
|
| 332 |
+
lav_w = brand_w - 2*S["layout"]["brand_border"]
|
| 333 |
+
lav_h = OUTPUT_HEIGHT - 2*S["layout"]["brand_border"] - S["layout"]["large_photo_bottom_pad"]
|
| 334 |
+
large_raw = _resize_to_fit(img, lav_w, lav_h)
|
| 335 |
+
large = _add_inner_stroke(large_raw, color=(210, 210, 210), width=S["layout"]["photo_stroke_width"])
|
| 336 |
+
lw, lh = large.size
|
| 337 |
+
px = bx + (brand_w - lw)//2
|
| 338 |
+
py = S["layout"]["brand_border"] + (lav_h - lh)//2
|
| 339 |
+
|
| 340 |
+
draw.rectangle([px-6, py-6, px+lw+6, py+lh+6], fill=WHITE)
|
| 341 |
+
canvas.paste(large, (px, py))
|
| 342 |
+
|
| 343 |
+
if frame_img:
|
| 344 |
+
# Resize frame to fit the photo + white frame exactly (6px padding on all sides)
|
| 345 |
+
frame_w, frame_h = lw + 12, lh + 12
|
| 346 |
+
f_overlay = frame_img.resize((frame_w, frame_h), Image.LANCZOS)
|
| 347 |
+
canvas.paste(f_overlay, (px - 6, py - 6), f_overlay)
|
| 348 |
+
|
| 349 |
+
if logo:
|
| 350 |
+
ls = S["overlays"]["logo_size_large"]
|
| 351 |
+
l_l = _resize_to_fit(logo, ls, ls)
|
| 352 |
+
_paste_logo_with_stroke(canvas, l_l, px + 15, py + lh - l_l.size[1] - 15)
|
| 353 |
+
|
| 354 |
+
if add_date:
|
| 355 |
+
draw.text((px + lw//2, py + lh - 40), studio_date_text, fill=DARK_RED, font=f_date_l, anchor="ms")
|
| 356 |
+
|
| 357 |
+
if add_studio_name:
|
| 358 |
+
btb = f_brand.getbbox("EL HELAL Studio")
|
| 359 |
+
draw.text((bx + (brand_w - (btb[2]-btb[0]))//2, OUTPUT_HEIGHT - S["layout"]["brand_bottom_offset"]), "EL HELAL Studio", fill=WHITE, font=f_brand)
|
| 360 |
+
|
| 361 |
+
canvas.info["dpi"] = (DPI, DPI)
|
| 362 |
+
return canvas
|
id-maker/core/pipeline.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import argparse
|
| 4 |
+
import sys
|
| 5 |
+
import torch
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
# Import functions from existing scripts
|
| 10 |
+
# We might need to handle the monkeypatch for transformers in process_images
|
| 11 |
+
import crop
|
| 12 |
+
import process_images
|
| 13 |
+
import color_steal
|
| 14 |
+
import white_bg
|
| 15 |
+
import restoration
|
| 16 |
+
|
| 17 |
+
def run_pipeline(raw_dir, crop_dir, trans_dir, colored_dir, white_dir, curves_file, restore=False, fidelity=0.5):
|
| 18 |
+
start_total = time.time()
|
| 19 |
+
|
| 20 |
+
# Step 0: Face Restoration
|
| 21 |
+
current_raw_dir = raw_dir
|
| 22 |
+
if restore:
|
| 23 |
+
print("\n" + "="*50)
|
| 24 |
+
print("STEP 0: Face Restoration (CodeFormer)")
|
| 25 |
+
print("="*50)
|
| 26 |
+
restored_dir = os.path.join(os.path.dirname(crop_dir), "restored")
|
| 27 |
+
restoration.batch_restore(raw_dir, restored_dir, fidelity=fidelity)
|
| 28 |
+
current_raw_dir = restored_dir
|
| 29 |
+
|
| 30 |
+
# Step 1: Crop
|
| 31 |
+
print("\n" + "="*50)
|
| 32 |
+
print("STEP 1: Cropping and Face Detection")
|
| 33 |
+
print("="*50)
|
| 34 |
+
crop.batch_process(current_raw_dir, crop_dir)
|
| 35 |
+
|
| 36 |
+
# Step 2: Background Removal
|
| 37 |
+
print("\n" + "="*50)
|
| 38 |
+
print("STEP 2: Background Removal (AI)")
|
| 39 |
+
print("="*50)
|
| 40 |
+
|
| 41 |
+
# Setup model (this is the heavy part)
|
| 42 |
+
model, device = process_images.setup_model()
|
| 43 |
+
transform = process_images.get_transform()
|
| 44 |
+
|
| 45 |
+
input_path = Path(crop_dir)
|
| 46 |
+
output_path = Path(trans_dir)
|
| 47 |
+
output_path.mkdir(parents=True, exist_ok=True)
|
| 48 |
+
|
| 49 |
+
files = [f for f in input_path.iterdir() if f.suffix.lower() in process_images.ALLOWED_EXTENSIONS]
|
| 50 |
+
if not files:
|
| 51 |
+
print(f"No images found in {crop_dir} for background removal.")
|
| 52 |
+
else:
|
| 53 |
+
for idx, file_path in enumerate(files, 1):
|
| 54 |
+
try:
|
| 55 |
+
print(f"[{idx}/{len(files)}] Removing background: {file_path.name}...", end='', flush=True)
|
| 56 |
+
img = Image.open(file_path)
|
| 57 |
+
from PIL import ImageOps
|
| 58 |
+
img = ImageOps.exif_transpose(img)
|
| 59 |
+
img = img.convert('RGB')
|
| 60 |
+
result = process_images.remove_background(model, img, transform)
|
| 61 |
+
out_name = file_path.stem + "_rmbg.png"
|
| 62 |
+
result.save(output_path / out_name, "PNG")
|
| 63 |
+
print(" Done.")
|
| 64 |
+
except Exception as e:
|
| 65 |
+
print(f" Failed! {e}")
|
| 66 |
+
|
| 67 |
+
# Step 3: Color Grading
|
| 68 |
+
print("\n" + "="*50)
|
| 69 |
+
print("STEP 3: Color Grading")
|
| 70 |
+
print("="*50)
|
| 71 |
+
luts = color_steal.load_trained_curves(curves_file)
|
| 72 |
+
if not luts:
|
| 73 |
+
print(f"Warning: No trained curves found at {curves_file}. Skipping color grading.")
|
| 74 |
+
# If no grading, we might want to copy trans to colored or just skip to step 4 using trans_dir
|
| 75 |
+
# For simplicity, let's assume we need curves or we skip this step and use trans_dir for step 4
|
| 76 |
+
current_input_for_white = trans_dir
|
| 77 |
+
else:
|
| 78 |
+
color_steal.apply_to_folder(luts, trans_dir, colored_dir)
|
| 79 |
+
current_input_for_white = colored_dir
|
| 80 |
+
|
| 81 |
+
# Step 4: White Background
|
| 82 |
+
print("\n" + "="*50)
|
| 83 |
+
print("STEP 4: Adding White Background & Finalizing")
|
| 84 |
+
print("="*50)
|
| 85 |
+
white_bg.add_white_background(current_input_for_white, white_dir)
|
| 86 |
+
|
| 87 |
+
end_total = time.time()
|
| 88 |
+
print("\n" + "="*50)
|
| 89 |
+
print(f"PIPELINE COMPLETE in {end_total - start_total:.2f} seconds")
|
| 90 |
+
print(f"Final results are in: {os.path.abspath(white_dir)}")
|
| 91 |
+
print("="*50)
|
| 92 |
+
|
| 93 |
+
if __name__ == "__main__":
|
| 94 |
+
parser = argparse.ArgumentParser(description="Full Image Processing Pipeline")
|
| 95 |
+
parser.add_argument("--raw", default="raw", help="Folder with raw images")
|
| 96 |
+
parser.add_argument("--crop", default="crop", help="Folder for cropped images")
|
| 97 |
+
parser.add_argument("--trans", default="trans", help="Folder for transparent images")
|
| 98 |
+
parser.add_argument("--colored", default="colored", help="Folder for color-graded images")
|
| 99 |
+
parser.add_argument("--white", default="white", help="Folder for final results")
|
| 100 |
+
parser.add_argument("--curves", default="trained_curves.npz", help="Pre-trained curves file")
|
| 101 |
+
parser.add_argument("--restore", action="store_true", help="Enable face restoration using CodeFormer")
|
| 102 |
+
parser.add_argument("--fidelity", type=float, default=0.5, help="CodeFormer fidelity (0-1, lower is more restoration)")
|
| 103 |
+
|
| 104 |
+
args = parser.parse_args()
|
| 105 |
+
|
| 106 |
+
# Ensure all directories exist
|
| 107 |
+
for d in [args.raw, args.crop, args.trans, args.colored, args.white]:
|
| 108 |
+
if not os.path.exists(d):
|
| 109 |
+
os.makedirs(d)
|
| 110 |
+
print(f"Created directory: {d}")
|
| 111 |
+
|
| 112 |
+
run_pipeline(args.raw, args.crop, args.trans, args.colored, args.white, args.curves, restore=args.restore, fidelity=args.fidelity)
|
id-maker/core/process_images.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import argparse
|
| 4 |
+
import time
|
| 5 |
+
import traceback
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from PIL import Image, ImageOps
|
| 8 |
+
import torch
|
| 9 |
+
from torchvision import transforms
|
| 10 |
+
|
| 11 |
+
# ---- Monkeypatch for transformers 4.50+ compatibility with custom Config classes ----
|
| 12 |
+
from transformers import configuration_utils
|
| 13 |
+
_original_get_text_config = configuration_utils.PretrainedConfig.get_text_config
|
| 14 |
+
|
| 15 |
+
def _patched_get_text_config(self, *args, **kwargs):
|
| 16 |
+
if not hasattr(self, 'is_encoder_decoder'):
|
| 17 |
+
self.is_encoder_decoder = False
|
| 18 |
+
return _original_get_text_config(self, *args, **kwargs)
|
| 19 |
+
|
| 20 |
+
configuration_utils.PretrainedConfig.get_text_config = _patched_get_text_config
|
| 21 |
+
# ---- End Monkeypatch ----
|
| 22 |
+
|
| 23 |
+
# ---- Monkeypatch for BiRefNet/RMBG-2.0 meta-tensor bug during initialization ----
|
| 24 |
+
_orig_linspace = torch.linspace
|
| 25 |
+
def _patched_linspace(*args, **kwargs):
|
| 26 |
+
t = _orig_linspace(*args, **kwargs)
|
| 27 |
+
if t.is_meta:
|
| 28 |
+
return _orig_linspace(*args, **{**kwargs, "device": "cpu"})
|
| 29 |
+
return t
|
| 30 |
+
torch.linspace = _patched_linspace
|
| 31 |
+
# ---- End Monkeypatch ----
|
| 32 |
+
|
| 33 |
+
# ---- Monkeypatch for BiRefNet tied weights compatibility with transformers 4.50+ ----
|
| 34 |
+
def patch_birefnet_tied_weights():
|
| 35 |
+
try:
|
| 36 |
+
from transformers import PreTrainedModel
|
| 37 |
+
|
| 38 |
+
# Force the property to always return a dict, even if _tied_weights_keys is None
|
| 39 |
+
def _get_all_tied_weights_keys(self):
|
| 40 |
+
return getattr(self, "_tied_weights_keys", {}) or {}
|
| 41 |
+
|
| 42 |
+
PreTrainedModel.all_tied_weights_keys = property(_get_all_tied_weights_keys)
|
| 43 |
+
print("Applied robust BiRefNet tied weights patch")
|
| 44 |
+
|
| 45 |
+
except Exception as e:
|
| 46 |
+
print(f"Failed to apply BiRefNet tied weights patch: {e}")
|
| 47 |
+
|
| 48 |
+
patch_birefnet_tied_weights()
|
| 49 |
+
# ---- End Monkeypatch ----
|
| 50 |
+
|
| 51 |
+
from transformers import AutoModelForImageSegmentation, AutoConfig
|
| 52 |
+
import retouch
|
| 53 |
+
|
| 54 |
+
# Try to import devicetorch (from your project dependencies)
|
| 55 |
+
try:
|
| 56 |
+
import devicetorch
|
| 57 |
+
except ImportError:
|
| 58 |
+
print("Error: 'devicetorch' not found. Please run this script from the project root or install requirements.")
|
| 59 |
+
sys.exit(1)
|
| 60 |
+
|
| 61 |
+
# Configure allowed extensions
|
| 62 |
+
ALLOWED_EXTENSIONS = {'.png', '.jpg', '.jpeg', '.gif', '.webp', '.bmp'}
|
| 63 |
+
|
| 64 |
+
def setup_model():
|
| 65 |
+
"""Load and configure the RMBG-2.0 model"""
|
| 66 |
+
print("Loading BRIA-RMBG-2.0 model...")
|
| 67 |
+
|
| 68 |
+
# 1. Device Selection
|
| 69 |
+
device = devicetorch.get(torch)
|
| 70 |
+
print(f"Device: {device}")
|
| 71 |
+
|
| 72 |
+
if device == 'cpu':
|
| 73 |
+
torch.set_num_threads(max(1, os.cpu_count() or 1))
|
| 74 |
+
|
| 75 |
+
# 2. Load Model
|
| 76 |
+
try:
|
| 77 |
+
print("Loading model config...")
|
| 78 |
+
config = AutoConfig.from_pretrained("cocktailpeanut/rm", trust_remote_code=True)
|
| 79 |
+
|
| 80 |
+
# Explicitly set low_cpu_mem_usage=False to avoid meta-tensor issues
|
| 81 |
+
model = AutoModelForImageSegmentation.from_pretrained(
|
| 82 |
+
"cocktailpeanut/rm",
|
| 83 |
+
config=config,
|
| 84 |
+
trust_remote_code=True,
|
| 85 |
+
low_cpu_mem_usage=False
|
| 86 |
+
)
|
| 87 |
+
model = devicetorch.to(torch, model)
|
| 88 |
+
model.eval()
|
| 89 |
+
except Exception as e:
|
| 90 |
+
print(f"Error loading model: {e}")
|
| 91 |
+
traceback.print_exc()
|
| 92 |
+
sys.exit(1)
|
| 93 |
+
|
| 94 |
+
# 3. CPU Optimization (Optional)
|
| 95 |
+
if device == 'cpu':
|
| 96 |
+
print("Applying Dynamic Quantization for CPU speedup...")
|
| 97 |
+
try:
|
| 98 |
+
model = torch.quantization.quantize_dynamic(
|
| 99 |
+
model, {torch.nn.Linear}, dtype=torch.qint8
|
| 100 |
+
)
|
| 101 |
+
except Exception:
|
| 102 |
+
pass
|
| 103 |
+
|
| 104 |
+
return model, device
|
| 105 |
+
|
| 106 |
+
def get_transform():
|
| 107 |
+
"""Get the specific image transformation required by the model"""
|
| 108 |
+
return transforms.Compose([
|
| 109 |
+
transforms.Resize((1024, 1024)),
|
| 110 |
+
transforms.ToTensor(),
|
| 111 |
+
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
| 112 |
+
])
|
| 113 |
+
|
| 114 |
+
def remove_background(model, image, transform):
|
| 115 |
+
"""Process a single image"""
|
| 116 |
+
# Keep original size for later resizing
|
| 117 |
+
orig_size = image.size
|
| 118 |
+
|
| 119 |
+
# Preprocess
|
| 120 |
+
input_tensor = transform(image).unsqueeze(0)
|
| 121 |
+
input_tensor = devicetorch.to(torch, input_tensor)
|
| 122 |
+
|
| 123 |
+
# Inference
|
| 124 |
+
with torch.inference_mode():
|
| 125 |
+
outputs = model(input_tensor)
|
| 126 |
+
if isinstance(outputs, (list, tuple)):
|
| 127 |
+
preds = outputs[-1].sigmoid().cpu()
|
| 128 |
+
else:
|
| 129 |
+
preds = outputs.sigmoid().cpu()
|
| 130 |
+
|
| 131 |
+
# Post-process mask
|
| 132 |
+
pred = preds[0].squeeze()
|
| 133 |
+
pred_pil = transforms.ToPILImage()(pred)
|
| 134 |
+
mask = pred_pil.resize(orig_size)
|
| 135 |
+
|
| 136 |
+
# Apply mask
|
| 137 |
+
result = image.copy()
|
| 138 |
+
result.putalpha(mask)
|
| 139 |
+
|
| 140 |
+
# Cleanup VRAM if needed
|
| 141 |
+
devicetorch.empty_cache(torch)
|
| 142 |
+
|
| 143 |
+
return result
|
| 144 |
+
|
| 145 |
+
def retouch_face(image, sensitivity=3.0, tone_smoothing=0.6):
|
| 146 |
+
"""Wrapper for the surgical retouch logic with detailed logging"""
|
| 147 |
+
start_time = time.time()
|
| 148 |
+
try:
|
| 149 |
+
retouched_img, count = retouch.retouch_image_pil(image, sensitivity, tone_smoothing)
|
| 150 |
+
duration = (time.time() - start_time) * 1000
|
| 151 |
+
print(f"RETOUCH: Success | Blemishes: {count} | Time: {duration:.1f}ms")
|
| 152 |
+
return retouched_img
|
| 153 |
+
except Exception as e:
|
| 154 |
+
print(f"RETOUCH: Failed | Error: {e}")
|
| 155 |
+
return image
|
| 156 |
+
|
| 157 |
+
def main():
|
| 158 |
+
parser = argparse.ArgumentParser(description="Batch Background Removal Tool")
|
| 159 |
+
parser.add_argument('--input', '-i', required=True, help="Input folder containing images")
|
| 160 |
+
parser.add_argument('--output', '-o', required=True, help="Output folder for processed images")
|
| 161 |
+
args = parser.parse_args()
|
| 162 |
+
|
| 163 |
+
input_path = Path(args.input)
|
| 164 |
+
output_path = Path(args.output)
|
| 165 |
+
|
| 166 |
+
if not input_path.exists():
|
| 167 |
+
print(f"Error: Input folder '{input_path}' does not exist.")
|
| 168 |
+
sys.exit(1)
|
| 169 |
+
|
| 170 |
+
# Create output folder if it doesn't exist
|
| 171 |
+
output_path.mkdir(parents=True, exist_ok=True)
|
| 172 |
+
|
| 173 |
+
# Setup
|
| 174 |
+
model, device = setup_model()
|
| 175 |
+
transform = get_transform()
|
| 176 |
+
|
| 177 |
+
# Process files
|
| 178 |
+
files = [f for f in input_path.iterdir() if f.suffix.lower() in ALLOWED_EXTENSIONS]
|
| 179 |
+
total = len(files)
|
| 180 |
+
|
| 181 |
+
print(f"\nFound {total} images. Starting processing...")
|
| 182 |
+
print("-" * 50)
|
| 183 |
+
|
| 184 |
+
start_time = time.time()
|
| 185 |
+
for idx, file_path in enumerate(files, 1):
|
| 186 |
+
try:
|
| 187 |
+
filename = file_path.name
|
| 188 |
+
print(f"[{idx}/{total}] Processing {filename}...", end='', flush=True)
|
| 189 |
+
|
| 190 |
+
# Load image and handle orientation
|
| 191 |
+
img = Image.open(file_path)
|
| 192 |
+
img = ImageOps.exif_transpose(img)
|
| 193 |
+
img = img.convert('RGB')
|
| 194 |
+
|
| 195 |
+
# Process
|
| 196 |
+
result = remove_background(model, img, transform)
|
| 197 |
+
|
| 198 |
+
# Save (force PNG for transparency)
|
| 199 |
+
out_name = file_path.stem + "_rmbg.png"
|
| 200 |
+
out_file = output_path / out_name
|
| 201 |
+
result.save(out_file, "PNG")
|
| 202 |
+
|
| 203 |
+
print(" Done.")
|
| 204 |
+
|
| 205 |
+
except Exception as e:
|
| 206 |
+
print(f" Failed! Error: {e}")
|
| 207 |
+
|
| 208 |
+
duration = time.time() - start_time
|
| 209 |
+
print("-" * 50)
|
| 210 |
+
print(f"Finished! Processed {total} images in {duration:.2f} seconds.")
|
| 211 |
+
print(f"Output saved to: {output_path.absolute()}")
|
| 212 |
+
|
| 213 |
+
if __name__ == "__main__":
|
| 214 |
+
main()
|
id-maker/core/restoration.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import requests
|
| 3 |
+
import io
|
| 4 |
+
from PIL import Image
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from layout_engine import load_settings
|
| 7 |
+
|
| 8 |
+
# The URL should be configured in settings.json or as an environment variable
|
| 9 |
+
def get_api_url():
|
| 10 |
+
settings = load_settings()
|
| 11 |
+
return os.environ.get("CODEFORMER_API_URL", settings.get("api", {}).get("codeformer_url", "http://localhost:7860/api/restore"))
|
| 12 |
+
|
| 13 |
+
def restore_image(img, fidelity=0.5, upscale=1, return_pil=True):
|
| 14 |
+
"""
|
| 15 |
+
Calls external CodeFormer API to restore the image.
|
| 16 |
+
img: PIL Image or path to image
|
| 17 |
+
"""
|
| 18 |
+
url = get_api_url()
|
| 19 |
+
|
| 20 |
+
# 1. Prepare the image buffer
|
| 21 |
+
img_buffer = io.BytesIO()
|
| 22 |
+
if isinstance(img, (str, Path)):
|
| 23 |
+
with open(img, 'rb') as f:
|
| 24 |
+
img_data = f.read()
|
| 25 |
+
# Basic mime type guess
|
| 26 |
+
ext = str(img).lower()
|
| 27 |
+
mime = 'image/png' if ext.endswith('.png') else 'image/jpeg'
|
| 28 |
+
fname = 'input.png' if ext.endswith('.png') else 'input.jpg'
|
| 29 |
+
elif hasattr(img, 'save'): # PIL Image
|
| 30 |
+
# Send as PNG to avoid compression artifacts before restoration
|
| 31 |
+
img.save(img_buffer, format='PNG')
|
| 32 |
+
img_data = img_buffer.getvalue()
|
| 33 |
+
mime = 'image/png'
|
| 34 |
+
fname = 'input.png'
|
| 35 |
+
else:
|
| 36 |
+
raise ValueError("Unsupported image type for restoration")
|
| 37 |
+
|
| 38 |
+
# 2. Call the API
|
| 39 |
+
try:
|
| 40 |
+
print(f"Calling CodeFormer API at {url}...")
|
| 41 |
+
files = {'image': (fname, img_data, mime)}
|
| 42 |
+
data = {
|
| 43 |
+
'fidelity': str(fidelity),
|
| 44 |
+
'upscale': str(upscale),
|
| 45 |
+
'background_enhance': 'false',
|
| 46 |
+
'face_upsample': 'false'
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
# The API returns JSON with an image_url
|
| 50 |
+
response = requests.post(url, files=files, data=data, timeout=120)
|
| 51 |
+
response.raise_for_status()
|
| 52 |
+
result_json = response.json()
|
| 53 |
+
|
| 54 |
+
if result_json.get('status') == 'success' and result_json.get('results'):
|
| 55 |
+
restored_url = result_json['results'][0]['image_url']
|
| 56 |
+
print(f"Restoration successful. Downloading result from: {restored_url}")
|
| 57 |
+
|
| 58 |
+
# Download the actual image bytes
|
| 59 |
+
img_res = requests.get(restored_url, timeout=60)
|
| 60 |
+
img_res.raise_for_status()
|
| 61 |
+
restored_pil = Image.open(io.BytesIO(img_res.content))
|
| 62 |
+
else:
|
| 63 |
+
raise ValueError(f"API Error: {result_json.get('message', 'Unknown error')}")
|
| 64 |
+
|
| 65 |
+
if return_pil:
|
| 66 |
+
return restored_pil
|
| 67 |
+
else:
|
| 68 |
+
import numpy as np
|
| 69 |
+
import cv2
|
| 70 |
+
return cv2.cvtColor(np.array(restored_pil), cv2.COLOR_RGB2BGR)
|
| 71 |
+
|
| 72 |
+
except Exception as e:
|
| 73 |
+
print(f"API Restoration Error: {e}")
|
| 74 |
+
# Fallback to original image on failure
|
| 75 |
+
if hasattr(img, 'convert'):
|
| 76 |
+
return img
|
| 77 |
+
else:
|
| 78 |
+
return Image.open(io.BytesIO(img_data))
|
| 79 |
+
|
| 80 |
+
def init_restoration_model(device_str=None):
|
| 81 |
+
"""No-op for API-based restoration, but kept for compatibility."""
|
| 82 |
+
print("Using Remote CodeFormer API (No local model loaded)")
|
| 83 |
+
return None, "remote"
|
| 84 |
+
|
| 85 |
+
def batch_restore(input_folder, output_folder, fidelity=0.5):
|
| 86 |
+
"""CLI compatibility for remote restoration."""
|
| 87 |
+
if not os.path.exists(output_folder):
|
| 88 |
+
os.makedirs(output_folder)
|
| 89 |
+
|
| 90 |
+
files = [f for f in os.listdir(input_folder) if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
|
| 91 |
+
|
| 92 |
+
for filename in files:
|
| 93 |
+
print(f"Restoring {filename} via API...")
|
| 94 |
+
img_path = os.path.join(input_folder, filename)
|
| 95 |
+
restored_img = restore_image(img_path, fidelity=fidelity, return_pil=True)
|
| 96 |
+
restored_img.save(os.path.join(output_folder, filename), quality=95)
|
id-maker/core/retouch.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
"""
|
| 3 |
+
Surgical Skin Retouching Script - Refactored for PIL Integration
|
| 4 |
+
"""
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
# Robust Mediapipe Loading
|
| 10 |
+
try:
|
| 11 |
+
import mediapipe as mp
|
| 12 |
+
import mediapipe.solutions.face_mesh as mp_face_mesh
|
| 13 |
+
import mediapipe.solutions.drawing_utils as mp_drawing
|
| 14 |
+
except (ImportError, ModuleNotFoundError):
|
| 15 |
+
try:
|
| 16 |
+
import mediapipe as mp
|
| 17 |
+
mp_face_mesh = mp.solutions.face_mesh
|
| 18 |
+
mp_drawing = mp.solutions.drawing_utils
|
| 19 |
+
except (AttributeError, ImportError, ModuleNotFoundError):
|
| 20 |
+
mp_face_mesh = None
|
| 21 |
+
mp_drawing = None
|
| 22 |
+
|
| 23 |
+
# Landmarks constants
|
| 24 |
+
FACE_OVAL = [10, 338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288,
|
| 25 |
+
397, 365, 379, 378, 400, 377, 152, 148, 176, 149, 150, 136,
|
| 26 |
+
172, 58, 132, 93, 234, 127, 162, 21, 54, 103, 67, 109]
|
| 27 |
+
|
| 28 |
+
LEFT_EYE = [362, 382, 381, 380, 374, 373, 390, 249, 263, 466, 388, 387, 386, 385, 384, 398]
|
| 29 |
+
RIGHT_EYE = [33, 7, 163, 144, 145, 153, 154, 155, 133, 173, 157, 158, 159, 160, 161, 246]
|
| 30 |
+
LEFT_EYEBROW = [336, 296, 334, 293, 300, 276, 283, 282, 295, 285]
|
| 31 |
+
RIGHT_EYEBROW = [70, 63, 105, 66, 107, 55, 65, 52, 53, 46]
|
| 32 |
+
LIPS_OUTER = [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291, 409, 270, 269, 267, 0, 37, 39, 40, 185]
|
| 33 |
+
LIPS_INNER = [78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308, 415, 310, 311, 312, 13, 82, 81, 80, 191]
|
| 34 |
+
NOSE_TIP = [1, 2, 98, 327]
|
| 35 |
+
|
| 36 |
+
def get_landmark_points(landmarks, indices, width, height):
|
| 37 |
+
pts = []
|
| 38 |
+
for i in indices:
|
| 39 |
+
lm = landmarks[i]
|
| 40 |
+
pts.append((int(lm.x * width), int(lm.y * height)))
|
| 41 |
+
return np.array(pts)
|
| 42 |
+
|
| 43 |
+
def create_skin_mask(image, landmarks, width, height, feature_guard=5):
|
| 44 |
+
mask = np.zeros((height, width), dtype=np.uint8)
|
| 45 |
+
face_pts = get_landmark_points(landmarks, FACE_OVAL, width, height)
|
| 46 |
+
cv2.fillConvexPoly(mask, cv2.convexHull(face_pts), 255)
|
| 47 |
+
|
| 48 |
+
exclusions = [LEFT_EYE, RIGHT_EYE, LEFT_EYEBROW, RIGHT_EYEBROW,
|
| 49 |
+
LIPS_OUTER, LIPS_INNER, NOSE_TIP]
|
| 50 |
+
for feature_indices in exclusions:
|
| 51 |
+
pts = get_landmark_points(landmarks, feature_indices, width, height)
|
| 52 |
+
hull = cv2.convexHull(pts)
|
| 53 |
+
cv2.fillConvexPoly(mask, hull, 0)
|
| 54 |
+
cv2.polylines(mask, [hull], True, 0, feature_guard)
|
| 55 |
+
|
| 56 |
+
img_ycrcb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
|
| 57 |
+
lower = np.array([0, 133, 77], dtype=np.uint8)
|
| 58 |
+
upper = np.array([255, 173, 127], dtype=np.uint8)
|
| 59 |
+
color_mask = cv2.inRange(img_ycrcb, lower, upper)
|
| 60 |
+
mask = cv2.bitwise_and(mask, color_mask)
|
| 61 |
+
|
| 62 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
|
| 63 |
+
mask = cv2.erode(mask, kernel, iterations=1)
|
| 64 |
+
return mask
|
| 65 |
+
|
| 66 |
+
def frequency_separation(image, blur_radius=15):
|
| 67 |
+
img_float = image.astype(np.float64)
|
| 68 |
+
if blur_radius % 2 == 0: blur_radius += 1
|
| 69 |
+
low_freq = cv2.GaussianBlur(img_float, (blur_radius, blur_radius), 0)
|
| 70 |
+
high_freq = img_float - low_freq
|
| 71 |
+
return low_freq, high_freq
|
| 72 |
+
|
| 73 |
+
def detect_blemishes(high_freq, skin_mask, sensitivity=3.0, min_size=3, max_size=50):
|
| 74 |
+
hf_gray = cv2.cvtColor(np.clip(high_freq + 128, 0, 255).astype(np.uint8), cv2.COLOR_BGR2GRAY)
|
| 75 |
+
dog_fine = cv2.GaussianBlur(hf_gray.astype(np.float64), (3, 3), 1)
|
| 76 |
+
dog_coarse = cv2.GaussianBlur(hf_gray.astype(np.float64), (15, 15), 5)
|
| 77 |
+
dog = dog_fine - dog_coarse
|
| 78 |
+
|
| 79 |
+
skin_pixels = dog[skin_mask > 0]
|
| 80 |
+
if len(skin_pixels) == 0: return np.zeros_like(skin_mask)
|
| 81 |
+
|
| 82 |
+
mean_val, std_val = np.mean(skin_pixels), np.std(skin_pixels)
|
| 83 |
+
threshold, threshold_bright = mean_val - sensitivity * std_val, mean_val + sensitivity * std_val
|
| 84 |
+
|
| 85 |
+
blemish_mask = np.zeros_like(skin_mask)
|
| 86 |
+
blemish_mask[(dog < threshold) & (skin_mask > 0)] = 255
|
| 87 |
+
blemish_mask[(dog > threshold_bright) & (skin_mask > 0)] = 255
|
| 88 |
+
|
| 89 |
+
contours, _ = cv2.findContours(blemish_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 90 |
+
filtered_mask = np.zeros_like(blemish_mask)
|
| 91 |
+
for cnt in contours:
|
| 92 |
+
area = cv2.contourArea(cnt)
|
| 93 |
+
if min_size * min_size <= area <= max_size * max_size:
|
| 94 |
+
cv2.drawContours(filtered_mask, [cnt], -1, 255, -1)
|
| 95 |
+
|
| 96 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
|
| 97 |
+
return cv2.dilate(filtered_mask, kernel, iterations=1)
|
| 98 |
+
|
| 99 |
+
def even_skin_tone(low_freq, skin_mask, strength=0.4):
|
| 100 |
+
mask_float = cv2.GaussianBlur(skin_mask.astype(np.float64)/255.0, (31, 31), 10)
|
| 101 |
+
mask_3ch = np.stack([mask_float] * 3, axis=-1)
|
| 102 |
+
smoothed = cv2.GaussianBlur(low_freq, (31, 31), 10)
|
| 103 |
+
return low_freq + (smoothed - low_freq) * mask_3ch * strength
|
| 104 |
+
|
| 105 |
+
def retouch_image_pil(pil_image, sensitivity=3.0, tone_smoothing=0.6):
|
| 106 |
+
# Handle Transparency (Alpha Channel)
|
| 107 |
+
has_alpha = pil_image.mode in ("RGBA", "LA", "PA")
|
| 108 |
+
alpha_channel = None
|
| 109 |
+
if has_alpha:
|
| 110 |
+
alpha_channel = pil_image.getchannel('A')
|
| 111 |
+
|
| 112 |
+
# PIL to BGR (always process RGB part)
|
| 113 |
+
image = cv2.cvtColor(np.array(pil_image.convert("RGB")), cv2.COLOR_RGB2BGR)
|
| 114 |
+
height, width = image.shape[:2]
|
| 115 |
+
|
| 116 |
+
# Face Detection
|
| 117 |
+
if mp_face_mesh is None:
|
| 118 |
+
print("RETOUCH: Skipped (Mediapipe FaceMesh not loaded)")
|
| 119 |
+
return pil_image, 0
|
| 120 |
+
|
| 121 |
+
with mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, refine_landmarks=True) as face_mesh:
|
| 122 |
+
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
| 123 |
+
if not results.multi_face_landmarks:
|
| 124 |
+
return pil_image, 0 # No face found, return original with 0 count
|
| 125 |
+
|
| 126 |
+
landmarks = results.multi_face_landmarks[0].landmark
|
| 127 |
+
|
| 128 |
+
# Processing
|
| 129 |
+
skin_mask = create_skin_mask(image, landmarks, width, height)
|
| 130 |
+
low_freq, high_freq = frequency_separation(image)
|
| 131 |
+
blemish_mask = detect_blemishes(high_freq, skin_mask, sensitivity)
|
| 132 |
+
|
| 133 |
+
hf_uint8 = np.clip(high_freq + 128, 0, 255).astype(np.uint8)
|
| 134 |
+
hf_inpainted = cv2.inpaint(hf_uint8, blemish_mask, inpaintRadius=5, flags=cv2.INPAINT_TELEA)
|
| 135 |
+
high_freq_clean = hf_inpainted.astype(np.float64) - 128.0
|
| 136 |
+
|
| 137 |
+
if tone_smoothing > 0:
|
| 138 |
+
low_freq = even_skin_tone(low_freq, skin_mask, tone_smoothing)
|
| 139 |
+
|
| 140 |
+
result = np.clip(low_freq + high_freq_clean, 0, 255).astype(np.uint8)
|
| 141 |
+
|
| 142 |
+
# Seamless Blend
|
| 143 |
+
mask_f = cv2.GaussianBlur(skin_mask.astype(np.float64)/255.0, (21, 21), 7)
|
| 144 |
+
mask_3ch = np.stack([mask_f] * 3, axis=-1)
|
| 145 |
+
final_bgr = (result * mask_3ch + image * (1.0 - mask_3ch)).astype(np.uint8)
|
| 146 |
+
|
| 147 |
+
# Count blemishes for logging
|
| 148 |
+
blemish_count = len(cv2.findContours(blemish_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0])
|
| 149 |
+
|
| 150 |
+
# BGR to PIL
|
| 151 |
+
final_pil = Image.fromarray(cv2.cvtColor(final_bgr, cv2.COLOR_BGR2RGB))
|
| 152 |
+
|
| 153 |
+
# Restore Alpha Channel
|
| 154 |
+
if has_alpha and alpha_channel is not None:
|
| 155 |
+
final_pil.putalpha(alpha_channel)
|
| 156 |
+
return final_pil, blemish_count
|
| 157 |
+
else:
|
| 158 |
+
return final_pil, blemish_count
|
id-maker/core/trained_curves.npz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f9686847cc033368e66bddaf4ebb214cc0442876e2d3451d55209b45a1632b85
|
| 3 |
+
size 1492
|
id-maker/core/white_bg.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import argparse
|
| 3 |
+
from PIL import Image
|
| 4 |
+
|
| 5 |
+
# Constants for 10cm x 14cm at 300 DPI
|
| 6 |
+
# Calculation: (10 / 2.54 * 300) = 1181 px width
|
| 7 |
+
# (14 / 2.54 * 300) = 1654 px height
|
| 8 |
+
TARGET_WIDTH = 1181
|
| 9 |
+
TARGET_HEIGHT = 1654
|
| 10 |
+
|
| 11 |
+
def add_white_background(input_folder, output_folder):
|
| 12 |
+
if not os.path.exists(output_folder):
|
| 13 |
+
os.makedirs(output_folder)
|
| 14 |
+
print(f"Created output folder: {output_folder}")
|
| 15 |
+
|
| 16 |
+
valid_exts = ('.png', '.tiff', '.tif')
|
| 17 |
+
files = [f for f in os.listdir(input_folder) if f.lower().endswith(valid_exts)]
|
| 18 |
+
|
| 19 |
+
if not files:
|
| 20 |
+
print(f"No transparent images (PNG/TIFF) found in {input_folder}")
|
| 21 |
+
return
|
| 22 |
+
|
| 23 |
+
print(f"Found {len(files)} images. Adding white background...")
|
| 24 |
+
|
| 25 |
+
for filename in files:
|
| 26 |
+
img_path = os.path.join(input_folder, filename)
|
| 27 |
+
out_path = os.path.join(output_folder, os.path.splitext(filename)[0] + ".jpg")
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
print(f"Processing: {filename}...")
|
| 31 |
+
|
| 32 |
+
# 1. Load Image (Ensure RGBA to handle transparency)
|
| 33 |
+
foreground = Image.open(img_path).convert("RGBA")
|
| 34 |
+
|
| 35 |
+
# 2. Smart Resize to Target (10x14cm)
|
| 36 |
+
# Create a white canvas of the exact target size
|
| 37 |
+
final_image = Image.new("RGB", (TARGET_WIDTH, TARGET_HEIGHT), (255, 255, 255))
|
| 38 |
+
|
| 39 |
+
# Resize the foreground to fit/fill
|
| 40 |
+
# Using LANCZOS for high quality downscaling/upscaling
|
| 41 |
+
fg_resized = foreground.resize((TARGET_WIDTH, TARGET_HEIGHT), Image.Resampling.LANCZOS)
|
| 42 |
+
|
| 43 |
+
# 3. Composite Foreground onto White Canvas
|
| 44 |
+
# We use the alpha channel of the resized foreground as the mask
|
| 45 |
+
final_image.paste(fg_resized, (0, 0), fg_resized)
|
| 46 |
+
|
| 47 |
+
# 4. Save with 300 DPI metadata
|
| 48 |
+
final_image.save(out_path, quality=95, dpi=(300, 300))
|
| 49 |
+
print(f" -> Saved to {out_path} (10x14cm @ 300DPI)")
|
| 50 |
+
|
| 51 |
+
except Exception as e:
|
| 52 |
+
print(f" ERROR processing {filename}: {e}")
|
| 53 |
+
|
| 54 |
+
if __name__ == "__main__":
|
| 55 |
+
parser = argparse.ArgumentParser(description="Add white background to transparent images and resize to 10x14cm @ 300DPI")
|
| 56 |
+
parser.add_argument("input", help="Input folder containing transparent images (PNG)")
|
| 57 |
+
parser.add_argument("output", help="Output folder")
|
| 58 |
+
|
| 59 |
+
args = parser.parse_args()
|
| 60 |
+
|
| 61 |
+
add_white_background(args.input, args.output)
|
id-maker/desktop_launcher.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import webview
|
| 3 |
+
import threading
|
| 4 |
+
import uvicorn
|
| 5 |
+
import sys
|
| 6 |
+
import os
|
| 7 |
+
import time
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
# --- PyInstaller Path Logic ---
|
| 11 |
+
if getattr(sys, 'frozen', False):
|
| 12 |
+
# If we are running in a bundle, sys._MEIPASS is our root
|
| 13 |
+
ROOT_DIR = Path(sys._MEIPASS)
|
| 14 |
+
else:
|
| 15 |
+
# Running in development
|
| 16 |
+
ROOT_DIR = Path(__file__).parent.absolute()
|
| 17 |
+
|
| 18 |
+
print(f"Engine Root: {ROOT_DIR}")
|
| 19 |
+
|
| 20 |
+
# Add core and web directories to python path so imports work correctly
|
| 21 |
+
sys.path.append(str(ROOT_DIR / "core"))
|
| 22 |
+
sys.path.append(str(ROOT_DIR / "web"))
|
| 23 |
+
|
| 24 |
+
# Now we can import the app from our server file
|
| 25 |
+
try:
|
| 26 |
+
from web.server import app
|
| 27 |
+
except ImportError:
|
| 28 |
+
# Fallback if structure is different
|
| 29 |
+
from server import app
|
| 30 |
+
|
| 31 |
+
def start_server():
|
| 32 |
+
"""Starts the FastAPI server in a background thread."""
|
| 33 |
+
# Using a fixed local port for the desktop app
|
| 34 |
+
uvicorn.run(app, host="127.0.0.1", port=25025, log_level="error")
|
| 35 |
+
|
| 36 |
+
if __name__ == "__main__":
|
| 37 |
+
print("Starting EL HELAL Studio Desktop Engine...")
|
| 38 |
+
|
| 39 |
+
# 1. Start FastAPI in a background thread
|
| 40 |
+
t = threading.Thread(target=start_server, daemon=True)
|
| 41 |
+
t.start()
|
| 42 |
+
|
| 43 |
+
# 2. Brief wait for the server to initialize
|
| 44 |
+
time.sleep(2)
|
| 45 |
+
|
| 46 |
+
# 3. Create and launch the native desktop window
|
| 47 |
+
# This will use the OS's native web renderer (Edge WebView2 on Win, WebKit on Mac/Linux)
|
| 48 |
+
window = webview.create_window(
|
| 49 |
+
title='EL HELAL Studio — ID Maker',
|
| 50 |
+
url='http://127.0.0.1:25025',
|
| 51 |
+
width=1200,
|
| 52 |
+
height=900,
|
| 53 |
+
resizable=True,
|
| 54 |
+
confirm_close=True,
|
| 55 |
+
background_color='#ffffff'
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
print("Window launching. Close the window to exit the application.")
|
| 59 |
+
webview.start()
|
id-maker/docker-compose.yml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: '3.8'
|
| 2 |
+
|
| 3 |
+
services:
|
| 4 |
+
studio-web:
|
| 5 |
+
build: .
|
| 6 |
+
ports:
|
| 7 |
+
- "8000:8000"
|
| 8 |
+
volumes:
|
| 9 |
+
- studio_storage:/app/storage
|
| 10 |
+
- ./assets:/app/assets
|
| 11 |
+
- ./core:/app/core
|
| 12 |
+
environment:
|
| 13 |
+
- PORT=8000
|
| 14 |
+
restart: unless-stopped
|
| 15 |
+
|
| 16 |
+
volumes:
|
| 17 |
+
studio_storage:
|
id-maker/gui/gui.py
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
EL HELAL Studio – Professional Photo Workflow
|
| 3 |
+
Integrated Pipeline with Memory Optimization and Manual Crop
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import tkinter as tk
|
| 7 |
+
from tkinter import ttk, filedialog, messagebox
|
| 8 |
+
from PIL import Image, ImageTk, ImageOps
|
| 9 |
+
import os
|
| 10 |
+
import threading
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
import time
|
| 13 |
+
import sys
|
| 14 |
+
|
| 15 |
+
# Add core directory to python path
|
| 16 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'core')))
|
| 17 |
+
|
| 18 |
+
# Import our processing tools
|
| 19 |
+
import crop
|
| 20 |
+
import process_images
|
| 21 |
+
import color_steal
|
| 22 |
+
from layout_engine import generate_layout
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class CropDialog(tk.Toplevel):
|
| 26 |
+
"""Interactive window to adjust crop manually."""
|
| 27 |
+
def __init__(self, parent, image_path, current_rect, callback):
|
| 28 |
+
super().__init__(parent)
|
| 29 |
+
self.title("Adjust Crop (5:7 Aspect Ratio)")
|
| 30 |
+
self.image_path = image_path
|
| 31 |
+
self.callback = callback
|
| 32 |
+
|
| 33 |
+
# Load original image for reference
|
| 34 |
+
self.orig_pil = Image.open(image_path)
|
| 35 |
+
self.w, self.h = self.orig_pil.size
|
| 36 |
+
|
| 37 |
+
# Scale for display
|
| 38 |
+
screen_h = self.winfo_screenheight()
|
| 39 |
+
target_h = int(screen_h * 0.8)
|
| 40 |
+
self.display_scale = min(target_h / self.h, 1.0)
|
| 41 |
+
|
| 42 |
+
self.display_w = int(self.w * self.display_scale)
|
| 43 |
+
self.display_h = int(self.h * self.display_scale)
|
| 44 |
+
|
| 45 |
+
self.display_img = self.orig_pil.resize((self.display_w, self.display_h), Image.LANCZOS)
|
| 46 |
+
self.tk_img = ImageTk.PhotoImage(self.display_img)
|
| 47 |
+
|
| 48 |
+
# Rect in original coordinates (x1, y1, x2, y2)
|
| 49 |
+
self.rect = list(current_rect) if current_rect else [0, 0, 100, 140]
|
| 50 |
+
|
| 51 |
+
# UI Layout
|
| 52 |
+
self.canvas = tk.Canvas(self, width=self.display_w, height=self.display_h, bg="black", highlightthickness=0)
|
| 53 |
+
self.canvas.pack(pady=10, padx=10)
|
| 54 |
+
self.canvas.create_image(0, 0, image=self.tk_img, anchor=tk.NW)
|
| 55 |
+
|
| 56 |
+
self.rect_id = self.canvas.create_rectangle(0, 0, 0, 0, outline="yellow", width=3)
|
| 57 |
+
self._update_canvas_rect()
|
| 58 |
+
|
| 59 |
+
ctrl = tk.Frame(self)
|
| 60 |
+
ctrl.pack(fill=tk.X, padx=20, pady=5)
|
| 61 |
+
tk.Label(ctrl, text="Drag the yellow box to move the crop. The size is fixed to 5:7.", font=("Arial", 10, "italic")).pack()
|
| 62 |
+
|
| 63 |
+
btn_frame = tk.Frame(self)
|
| 64 |
+
btn_frame.pack(pady=15)
|
| 65 |
+
tk.Button(btn_frame, text=" Cancel ", command=self.destroy, width=10).pack(side=tk.LEFT, padx=10)
|
| 66 |
+
tk.Button(btn_frame, text=" Apply & Reprocess ", bg="#27ae60", fg="white",
|
| 67 |
+
command=self._apply, width=20, font=("Arial", 10, "bold")).pack(side=tk.LEFT, padx=10)
|
| 68 |
+
|
| 69 |
+
# Mouse Events
|
| 70 |
+
self.canvas.bind("<B1-Motion>", self._on_drag)
|
| 71 |
+
self.canvas.bind("<Button-1>", self._on_click)
|
| 72 |
+
|
| 73 |
+
# Center the window
|
| 74 |
+
self.update_idletasks()
|
| 75 |
+
wx = (self.winfo_screenwidth() - self.winfo_width()) // 2
|
| 76 |
+
wy = (self.winfo_screenheight() - self.winfo_height()) // 2
|
| 77 |
+
self.geometry(f"+{wx}+{wy}")
|
| 78 |
+
|
| 79 |
+
self.grab_set() # Modal
|
| 80 |
+
|
| 81 |
+
def _update_canvas_rect(self):
|
| 82 |
+
x1, y1, x2, y2 = self.rect
|
| 83 |
+
self.canvas.coords(self.rect_id,
|
| 84 |
+
x1 * self.display_scale, y1 * self.display_scale,
|
| 85 |
+
x2 * self.display_scale, y2 * self.display_scale)
|
| 86 |
+
|
| 87 |
+
def _on_click(self, event):
|
| 88 |
+
self.start_x = event.x
|
| 89 |
+
self.start_y = event.y
|
| 90 |
+
|
| 91 |
+
def _on_drag(self, event):
|
| 92 |
+
dx = (event.x - self.start_x) / self.display_scale
|
| 93 |
+
dy = (event.y - self.start_y) / self.display_scale
|
| 94 |
+
|
| 95 |
+
rw = self.rect[2] - self.rect[0]
|
| 96 |
+
rh = self.rect[3] - self.rect[1]
|
| 97 |
+
|
| 98 |
+
nx1 = self.rect[0] + dx
|
| 99 |
+
ny1 = self.rect[1] + dy
|
| 100 |
+
|
| 101 |
+
if nx1 < 0: nx1 = 0
|
| 102 |
+
if ny1 < 0: ny1 = 0
|
| 103 |
+
if nx1 + rw > self.w: nx1 = self.w - rw
|
| 104 |
+
if ny1 + rh > self.h: ny1 = self.h - rh
|
| 105 |
+
|
| 106 |
+
self.rect[0] = nx1
|
| 107 |
+
self.rect[1] = ny1
|
| 108 |
+
self.rect[2] = nx1 + rw
|
| 109 |
+
self.rect[3] = ny1 + rh
|
| 110 |
+
|
| 111 |
+
self.start_x = event.x
|
| 112 |
+
self.start_y = event.y
|
| 113 |
+
self._update_canvas_rect()
|
| 114 |
+
|
| 115 |
+
def _apply(self):
|
| 116 |
+
self.callback(tuple(map(int, self.rect)))
|
| 117 |
+
self.destroy()
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class StudioApp:
|
| 121 |
+
"""Main application with memory-efficient batch handling."""
|
| 122 |
+
def __init__(self, root: tk.Tk):
|
| 123 |
+
self.root = root
|
| 124 |
+
self.root.title("EL HELAL Studio — Professional Workflow")
|
| 125 |
+
self.root.minsize(1000, 900)
|
| 126 |
+
self.root.configure(bg="#f0f0f0")
|
| 127 |
+
|
| 128 |
+
self._image_data: list[dict] = []
|
| 129 |
+
self._current_index = 0
|
| 130 |
+
self._phase = "empty" # "empty" | "input" | "preview"
|
| 131 |
+
|
| 132 |
+
self.model = None
|
| 133 |
+
self.transform = None
|
| 134 |
+
self.luts = color_steal.load_trained_curves()
|
| 135 |
+
self.is_model_ready = False
|
| 136 |
+
|
| 137 |
+
self._build_ui()
|
| 138 |
+
self.root.bind("<Configure>", self._on_resize)
|
| 139 |
+
|
| 140 |
+
threading.Thread(target=self._warm_up_model, daemon=True).start()
|
| 141 |
+
|
| 142 |
+
def _warm_up_model(self):
|
| 143 |
+
try:
|
| 144 |
+
self._set_status("Initializing AI Engine...")
|
| 145 |
+
self.model, _ = process_images.setup_model()
|
| 146 |
+
self.transform = process_images.get_transform()
|
| 147 |
+
self.is_model_ready = True
|
| 148 |
+
self._set_status("AI Engine Ready.")
|
| 149 |
+
except Exception as e:
|
| 150 |
+
self._set_status(f"Critical Error: AI Model failed to load ({e})")
|
| 151 |
+
|
| 152 |
+
def _build_ui(self):
|
| 153 |
+
# Header
|
| 154 |
+
header = tk.Frame(self.root, bg="#1a2634", pady=15)
|
| 155 |
+
header.pack(fill=tk.X)
|
| 156 |
+
tk.Label(header, text="EL HELAL Studio", font=("Arial", 26, "bold"), fg="#e8b923", bg="#1a2634").pack()
|
| 157 |
+
tk.Label(header, text="Memory Optimized Pipeline: Auto-Crop | AI Background | Color Grade | Layout",
|
| 158 |
+
font=("Arial", 10), fg="white", bg="#1a2634").pack()
|
| 159 |
+
|
| 160 |
+
# Input Area
|
| 161 |
+
input_frame = tk.Frame(self.root, bg="#f0f0f0", pady=10)
|
| 162 |
+
input_frame.pack(fill=tk.X, padx=20)
|
| 163 |
+
tk.Label(input_frame, text="Name (الاسم):", font=("Arial", 11, "bold"), bg="#f0f0f0").pack(side=tk.LEFT, padx=(0, 5))
|
| 164 |
+
self.entry_name = tk.Entry(input_frame, font=("Arial", 14), width=30, justify=tk.RIGHT)
|
| 165 |
+
self.entry_name.pack(side=tk.LEFT, padx=(0, 25))
|
| 166 |
+
tk.Label(input_frame, text="ID (الرقم):", font=("Arial", 11, "bold"), bg="#f0f0f0").pack(side=tk.LEFT, padx=(0, 5))
|
| 167 |
+
self.entry_id = tk.Entry(input_frame, font=("Arial", 14), width=20)
|
| 168 |
+
self.entry_id.pack(side=tk.LEFT)
|
| 169 |
+
|
| 170 |
+
# Toolbar
|
| 171 |
+
toolbar = tk.Frame(self.root, bg="#f0f0f0", pady=10)
|
| 172 |
+
toolbar.pack(fill=tk.X, padx=20)
|
| 173 |
+
self.btn_open = tk.Button(toolbar, text=" 📂 Select Photos ", command=self._open_files,
|
| 174 |
+
bg="#3498db", fg="white", relief=tk.FLAT, padx=15, pady=8, font=("Arial", 10, "bold"))
|
| 175 |
+
self.btn_open.pack(side=tk.LEFT, padx=5)
|
| 176 |
+
self.btn_process = tk.Button(toolbar, text=" ⚡ Start All ", command=self._process_all,
|
| 177 |
+
bg="#e67e22", fg="white", relief=tk.FLAT, padx=15, pady=8,
|
| 178 |
+
font=("Arial", 10, "bold"), state=tk.DISABLED)
|
| 179 |
+
self.btn_process.pack(side=tk.LEFT, padx=5)
|
| 180 |
+
self.btn_save = tk.Button(toolbar, text=" 💾 Save All ", command=self._save_all,
|
| 181 |
+
bg="#27ae60", fg="white", relief=tk.FLAT, padx=15, pady=8,
|
| 182 |
+
font=("Arial", 10, "bold"), state=tk.DISABLED)
|
| 183 |
+
self.btn_save.pack(side=tk.LEFT, padx=5)
|
| 184 |
+
self.btn_edit_crop = tk.Button(toolbar, text=" ✂ Edit Crop ", command=self._edit_crop,
|
| 185 |
+
bg="#95a5a6", fg="white", relief=tk.FLAT, padx=15, pady=8,
|
| 186 |
+
font=("Arial", 10, "bold"), state=tk.DISABLED)
|
| 187 |
+
self.btn_edit_crop.pack(side=tk.LEFT, padx=(30, 0))
|
| 188 |
+
|
| 189 |
+
# Navigation
|
| 190 |
+
nav_frame = tk.Frame(toolbar, bg="#f0f0f0"); nav_frame.pack(side=tk.RIGHT)
|
| 191 |
+
self.btn_prev = tk.Button(nav_frame, text=" ◀ ", command=self._prev_image, state=tk.DISABLED, width=4)
|
| 192 |
+
self.btn_prev.pack(side=tk.LEFT, padx=2)
|
| 193 |
+
self.lbl_counter = tk.Label(nav_frame, text="", font=("Arial", 11, "bold"), bg="#f0f0f0", width=8)
|
| 194 |
+
self.lbl_counter.pack(side=tk.LEFT)
|
| 195 |
+
self.btn_next = tk.Button(nav_frame, text=" ▶ ", command=self._next_image, state=tk.DISABLED, width=4)
|
| 196 |
+
self.btn_next.pack(side=tk.LEFT, padx=2)
|
| 197 |
+
|
| 198 |
+
# Progress
|
| 199 |
+
self.progress = ttk.Progressbar(self.root, orient=tk.HORIZONTAL, mode='determinate')
|
| 200 |
+
self.progress.pack(fill=tk.X, padx=20, pady=(0, 10))
|
| 201 |
+
|
| 202 |
+
# Canvas
|
| 203 |
+
canvas_frame = tk.Frame(self.root, bg="#ddd", bd=1, relief=tk.SUNKEN)
|
| 204 |
+
canvas_frame.pack(fill=tk.BOTH, expand=True, padx=20, pady=5)
|
| 205 |
+
self.canvas = tk.Canvas(canvas_frame, bg="white", highlightthickness=0)
|
| 206 |
+
self.canvas.pack(fill=tk.BOTH, expand=True)
|
| 207 |
+
|
| 208 |
+
self.status = tk.Label(self.root, text="Ready", font=("Arial", 10), bg="#1a2634",
|
| 209 |
+
fg="white", anchor=tk.W, padx=15, pady=8)
|
| 210 |
+
self.status.pack(fill=tk.X, side=tk.BOTTOM)
|
| 211 |
+
|
| 212 |
+
# ── Operations ──
|
| 213 |
+
|
| 214 |
+
def _open_files(self):
|
| 215 |
+
paths = filedialog.askopenfilenames(
|
| 216 |
+
title="Select Student Photos",
|
| 217 |
+
filetypes=[("Image files", "*.jpg *.jpeg *.png *.bmp *.tiff")]
|
| 218 |
+
)
|
| 219 |
+
if not paths: return
|
| 220 |
+
|
| 221 |
+
self._image_data = []
|
| 222 |
+
for path in paths:
|
| 223 |
+
# Generate thumbnail for preview to save memory
|
| 224 |
+
try:
|
| 225 |
+
with Image.open(path) as img:
|
| 226 |
+
img.thumbnail((800, 800), Image.LANCZOS)
|
| 227 |
+
thumb = img.copy()
|
| 228 |
+
|
| 229 |
+
self._image_data.append({
|
| 230 |
+
"path": path,
|
| 231 |
+
"name": "",
|
| 232 |
+
"id": "",
|
| 233 |
+
"thumb": thumb, # Store small version only
|
| 234 |
+
"result": None,
|
| 235 |
+
"crop_rect": None
|
| 236 |
+
})
|
| 237 |
+
except Exception as e:
|
| 238 |
+
print(f"Error loading {path}: {e}")
|
| 239 |
+
|
| 240 |
+
self._current_index = 0
|
| 241 |
+
self._phase = "input"
|
| 242 |
+
self._load_current_fields()
|
| 243 |
+
self._update_nav()
|
| 244 |
+
self._update_ui_state()
|
| 245 |
+
self._show_current()
|
| 246 |
+
self._set_status(f"Loaded {len(self._image_data)} photos. Memory usage optimized.")
|
| 247 |
+
|
| 248 |
+
def _process_all(self):
|
| 249 |
+
self._save_current_fields()
|
| 250 |
+
if not self.is_model_ready:
|
| 251 |
+
messagebox.showwarning("Wait", "AI Model is still loading.")
|
| 252 |
+
return
|
| 253 |
+
|
| 254 |
+
self.progress['value'] = 0
|
| 255 |
+
self.progress['maximum'] = len(self._image_data)
|
| 256 |
+
self.btn_open.config(state=tk.DISABLED)
|
| 257 |
+
self.btn_process.config(state=tk.DISABLED)
|
| 258 |
+
|
| 259 |
+
threading.Thread(target=self._run_pipeline_batch, daemon=True).start()
|
| 260 |
+
|
| 261 |
+
def _run_pipeline_batch(self):
|
| 262 |
+
total = len(self._image_data)
|
| 263 |
+
for i in range(total):
|
| 264 |
+
self.root.after(0, self._set_status, f"Processing {i+1}/{total}...")
|
| 265 |
+
self._process_single_image(i)
|
| 266 |
+
self.root.after(0, lambda v=i+1: self.progress.configure(value=v))
|
| 267 |
+
self.root.after(0, self._on_batch_done)
|
| 268 |
+
|
| 269 |
+
def _process_single_image(self, idx):
|
| 270 |
+
data = self._image_data[idx]
|
| 271 |
+
try:
|
| 272 |
+
# 1. CROP (Face Detection) - Loads full resolution from disk
|
| 273 |
+
temp_crop = f"temp_{idx}_{int(time.time())}.jpg"
|
| 274 |
+
if not data["crop_rect"]:
|
| 275 |
+
data["crop_rect"] = crop.get_auto_crop_rect(data["path"])
|
| 276 |
+
|
| 277 |
+
if not crop.apply_custom_crop(data["path"], temp_crop, data["crop_rect"]):
|
| 278 |
+
raise Exception("Crop failed")
|
| 279 |
+
|
| 280 |
+
cropped_img = Image.open(temp_crop)
|
| 281 |
+
|
| 282 |
+
# 2. BACKGROUND REMOVAL (AI)
|
| 283 |
+
trans = process_images.remove_background(self.model, cropped_img, self.transform)
|
| 284 |
+
|
| 285 |
+
# 3. COLOR GRADING
|
| 286 |
+
graded = color_steal.apply_to_image(self.luts, trans) if self.luts else trans
|
| 287 |
+
|
| 288 |
+
# 4. FINAL LAYOUT
|
| 289 |
+
data["result"] = generate_layout(graded, data["name"], data["id"])
|
| 290 |
+
|
| 291 |
+
if os.path.exists(temp_crop): os.remove(temp_crop)
|
| 292 |
+
|
| 293 |
+
except Exception as e:
|
| 294 |
+
print(f"Error processing index {idx}: {e}")
|
| 295 |
+
data["result"] = None
|
| 296 |
+
|
| 297 |
+
def _on_batch_done(self):
|
| 298 |
+
self._phase = "preview"
|
| 299 |
+
self.btn_open.config(state=tk.NORMAL)
|
| 300 |
+
self.btn_process.config(state=tk.NORMAL)
|
| 301 |
+
for i, d in enumerate(self._image_data):
|
| 302 |
+
if d["result"]:
|
| 303 |
+
self._current_index = i
|
| 304 |
+
break
|
| 305 |
+
self._update_ui_state()
|
| 306 |
+
self._load_current_fields()
|
| 307 |
+
self._update_nav()
|
| 308 |
+
self._show_current()
|
| 309 |
+
self._set_status("Processing complete.")
|
| 310 |
+
|
| 311 |
+
def _edit_crop(self):
|
| 312 |
+
data = self._image_data[self._current_index]
|
| 313 |
+
if not data["crop_rect"]:
|
| 314 |
+
data["crop_rect"] = crop.get_auto_crop_rect(data["path"])
|
| 315 |
+
|
| 316 |
+
def on_apply(new_rect):
|
| 317 |
+
data["crop_rect"] = new_rect
|
| 318 |
+
self._set_status("Reprocessing image...")
|
| 319 |
+
threading.Thread(target=self._reprocess_current, daemon=True).start()
|
| 320 |
+
|
| 321 |
+
CropDialog(self.root, data["path"], data["crop_rect"], on_apply)
|
| 322 |
+
|
| 323 |
+
def _reprocess_current(self):
|
| 324 |
+
self._process_single_image(self._current_index)
|
| 325 |
+
self.root.after(0, self._show_current)
|
| 326 |
+
self.root.after(0, self._set_status, "Reprocess Done.")
|
| 327 |
+
|
| 328 |
+
# ── UI Sync ──
|
| 329 |
+
|
| 330 |
+
def _update_ui_state(self):
|
| 331 |
+
if not self._image_data:
|
| 332 |
+
self.btn_process.config(state=tk.DISABLED); self.btn_save.config(state=tk.DISABLED); self.btn_edit_crop.config(state=tk.DISABLED)
|
| 333 |
+
return
|
| 334 |
+
self.btn_process.config(state=tk.NORMAL)
|
| 335 |
+
self.btn_save.config(state=tk.NORMAL if self._phase == "preview" else tk.DISABLED)
|
| 336 |
+
self.btn_edit_crop.config(state=tk.NORMAL)
|
| 337 |
+
|
| 338 |
+
def _show_current(self):
|
| 339 |
+
if not self._image_data: return
|
| 340 |
+
data = self._image_data[self._current_index]
|
| 341 |
+
|
| 342 |
+
if self._phase == "preview" and data["result"]:
|
| 343 |
+
img = data["result"]
|
| 344 |
+
else:
|
| 345 |
+
# Use thumbnail for navigation preview to stay memory-efficient
|
| 346 |
+
img = data["thumb"]
|
| 347 |
+
|
| 348 |
+
self._show_image_on_canvas(img)
|
| 349 |
+
|
| 350 |
+
def _show_image_on_canvas(self, img):
|
| 351 |
+
self.canvas.update_idletasks()
|
| 352 |
+
cw, ch = self.canvas.winfo_width(), self.canvas.winfo_height()
|
| 353 |
+
if cw < 10 or ch < 10: return
|
| 354 |
+
|
| 355 |
+
iw, ih = img.size
|
| 356 |
+
scale = min(cw/iw, ch/ih, 1.0)
|
| 357 |
+
preview = img.resize((int(iw*scale), int(ih*scale)), Image.LANCZOS)
|
| 358 |
+
self.tk_preview = ImageTk.PhotoImage(preview)
|
| 359 |
+
self.canvas.delete("all")
|
| 360 |
+
self.canvas.create_image(cw//2, ch//2, image=self.tk_preview, anchor=tk.CENTER)
|
| 361 |
+
|
| 362 |
+
def _save_current_fields(self):
|
| 363 |
+
if not self._image_data: return
|
| 364 |
+
data = self._image_data[self._current_index]
|
| 365 |
+
data["name"] = self.entry_name.get().strip()
|
| 366 |
+
data["id"] = self.entry_id.get().strip()
|
| 367 |
+
|
| 368 |
+
def _load_current_fields(self):
|
| 369 |
+
if not self._image_data: return
|
| 370 |
+
data = self._image_data[self._current_index]
|
| 371 |
+
self.entry_name.delete(0, tk.END); self.entry_name.insert(0, data["name"])
|
| 372 |
+
self.entry_id.delete(0, tk.END); self.entry_id.insert(0, data["id"])
|
| 373 |
+
|
| 374 |
+
def _prev_image(self):
|
| 375 |
+
if self._current_index > 0:
|
| 376 |
+
self._save_current_fields(); self._current_index -= 1
|
| 377 |
+
self._load_current_fields(); self._update_nav(); self._show_current()
|
| 378 |
+
|
| 379 |
+
def _next_image(self):
|
| 380 |
+
if self._current_index < len(self._image_data) - 1:
|
| 381 |
+
self._save_current_fields(); self._current_index += 1
|
| 382 |
+
self._load_current_fields(); self._update_nav(); self._show_current()
|
| 383 |
+
|
| 384 |
+
def _update_nav(self):
|
| 385 |
+
n = len(self._image_data)
|
| 386 |
+
self.btn_prev.config(state=tk.NORMAL if self._current_index > 0 else tk.DISABLED)
|
| 387 |
+
self.btn_next.config(state=tk.NORMAL if self._current_index < n-1 else tk.DISABLED)
|
| 388 |
+
self.lbl_counter.config(text=f"{self._current_index+1} / {n}" if n else "")
|
| 389 |
+
|
| 390 |
+
def _save_all(self):
|
| 391 |
+
results = [d for d in self._image_data if d["result"]]
|
| 392 |
+
if not results: return
|
| 393 |
+
folder = filedialog.askdirectory(title="Select Save Folder")
|
| 394 |
+
if not folder: return
|
| 395 |
+
count = 0
|
| 396 |
+
for d in results:
|
| 397 |
+
try:
|
| 398 |
+
name = Path(d["path"]).stem + "_layout.jpg"
|
| 399 |
+
d["result"].save(os.path.join(folder, name), "JPEG", quality=95, dpi=(300, 300))
|
| 400 |
+
count += 1
|
| 401 |
+
except: pass
|
| 402 |
+
messagebox.showinfo("Success", f"Saved {count} layouts.")
|
| 403 |
+
|
| 404 |
+
def _on_resize(self, event):
|
| 405 |
+
if self._image_data: self._show_current()
|
| 406 |
+
|
| 407 |
+
def _set_status(self, msg):
|
| 408 |
+
self.status.config(text=msg); self.root.update_idletasks()
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
if __name__ == "__main__":
|
| 412 |
+
root = tk.Tk()
|
| 413 |
+
w, h = 1100, 950
|
| 414 |
+
root.geometry(f"{w}x{h}+{(root.winfo_screenwidth()-w)//2}+{(root.winfo_screenheight()-h)//2}")
|
| 415 |
+
StudioApp(root); root.mainloop()
|
id-maker/gui/main.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
EL HELAL Studio — Photo Layout Generator
|
| 3 |
+
=========================================
|
| 4 |
+
Run this file to launch the desktop application.
|
| 5 |
+
|
| 6 |
+
Usage:
|
| 7 |
+
python main.py
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import tkinter as tk
|
| 11 |
+
from gui import StudioApp
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def main():
|
| 15 |
+
root = tk.Tk()
|
| 16 |
+
|
| 17 |
+
# Centre window on screen
|
| 18 |
+
root.update_idletasks()
|
| 19 |
+
sw = root.winfo_screenwidth()
|
| 20 |
+
sh = root.winfo_screenheight()
|
| 21 |
+
w, h = 900, 1000
|
| 22 |
+
x = (sw - w) // 2
|
| 23 |
+
y = max((sh - h) // 2 - 30, 0)
|
| 24 |
+
root.geometry(f"{w}x{h}+{x}+{y}")
|
| 25 |
+
|
| 26 |
+
app = StudioApp(root) # noqa: F841
|
| 27 |
+
root.mainloop()
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
if __name__ == "__main__":
|
| 31 |
+
main()
|
id-maker/requirements.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Pillow>=10.0.0
|
| 2 |
+
arabic-reshaper
|
| 3 |
+
python-bidi
|
| 4 |
+
opencv-python
|
| 5 |
+
numpy<2.0.0
|
| 6 |
+
protobuf<=3.20.3
|
| 7 |
+
torch
|
| 8 |
+
torchvision
|
| 9 |
+
transformers==4.48.2
|
| 10 |
+
scipy
|
| 11 |
+
mediapipe==0.10.9
|
| 12 |
+
devicetorch
|
| 13 |
+
timm
|
| 14 |
+
kornia
|
| 15 |
+
accelerate
|
| 16 |
+
fastapi
|
| 17 |
+
uvicorn
|
| 18 |
+
python-multipart
|
| 19 |
+
jinja2
|
| 20 |
+
pywebview
|
id-maker/tools/problems.md
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Resolved Technical Issues & Deployment Guide
|
| 2 |
+
|
| 3 |
+
This document tracks critical problems encountered during development and deployment (especially for Docker and Hugging Face Spaces) and their corresponding solutions.
|
| 4 |
+
|
| 5 |
+
## 1. Font Rendering & Layout Consistency
|
| 6 |
+
|
| 7 |
+
### Problem: "Boxes" instead of Arabic Text
|
| 8 |
+
- **Symptom**: Arabic names appeared as empty boxes (tofu) or incorrectly rendered characters.
|
| 9 |
+
- **Cause**: The system was attempting to load Windows-specific font paths (e.g., `C:/Windows/Fonts/...`) which do not exist in Linux/Docker environments.
|
| 10 |
+
- **Solution**:
|
| 11 |
+
- Implemented platform-agnostic font discovery in `core/layout_engine.py`.
|
| 12 |
+
- Added automatic detection of bundled fonts in the `assets/` directory.
|
| 13 |
+
- Prioritized `arialbd.ttf` for Arabic support over legacy fonts.
|
| 14 |
+
|
| 15 |
+
### Problem: Settings Changes Not Reflecting
|
| 16 |
+
- **Symptom**: Changing `id_font_size` in `settings.json` had no effect until the app was restarted.
|
| 17 |
+
- **Cause**: Settings were loaded once at module import time and cached as global constants.
|
| 18 |
+
- **Solution**: Modified `generate_layout` to call `load_settings()` at the start of every execution, ensuring real-time updates from the JSON file.
|
| 19 |
+
|
| 20 |
+
---
|
| 21 |
+
|
| 22 |
+
## 2. Hugging Face Spaces (Docker) Deployment
|
| 23 |
+
|
| 24 |
+
### Problem: Obsolete `libgl1-mesa-glx`
|
| 25 |
+
- **Symptom**: Docker build failed with `E: Package 'libgl1-mesa-glx' has no installation candidate`.
|
| 26 |
+
- **Cause**: The base Debian image (Trixie/Sid) used by HF has obsoleted this package.
|
| 27 |
+
- **Solution**: Updated `Dockerfile` to use `libgl1` which provides the necessary OpenGL libraries for OpenCV.
|
| 28 |
+
|
| 29 |
+
### Problem: Transformers 4.50+ Compatibility (BiRefNet/RMBG-2.0)
|
| 30 |
+
- **Symptom**: `AttributeError: 'BiRefNet' object has no attribute 'all_tied_weights_keys'` or `'NoneType' object has no attribute 'keys'`.
|
| 31 |
+
- **Cause**: The custom model code for BiRefNet/RMBG-2.0 is incompatible with internal changes in recent `transformers` versions regarding tied weight tracking.
|
| 32 |
+
- **Solution**:
|
| 33 |
+
- Applied a robust monkeypatch in `core/process_images.py` to the `PreTrainedModel` class.
|
| 34 |
+
- Forced `all_tied_weights_keys` to always return an empty dictionary `{}` instead of `None`.
|
| 35 |
+
- Pinned `transformers==4.48.2` in `requirements.txt` for a stable environment.
|
| 36 |
+
|
| 37 |
+
### Problem: Meta-Tensor Initialization Bug
|
| 38 |
+
- **Symptom**: Model failed to load on CPU due to "meta tensors" not being correctly materialized.
|
| 39 |
+
- **Cause**: A bug in how `torch.linspace` interacts with transformers' `low_cpu_mem_usage` flag for custom models.
|
| 40 |
+
- **Solution**: Monkeypatched `torch.linspace` in `core/process_images.py` to force materialization on CPU when a meta-tensor is detected.
|
| 41 |
+
|
| 42 |
+
---
|
| 43 |
+
|
| 44 |
+
## 3. Git & LFS Management
|
| 45 |
+
|
| 46 |
+
### Problem: Binary File Rejection
|
| 47 |
+
- **Symptom**: `remote: Your push was rejected because it contains binary files.`
|
| 48 |
+
- **Cause**: Pushing large `.jpg`, `.png`, or `.cube` files directly to Hugging Face without Git LFS, or having them in the Git history from previous commits.
|
| 49 |
+
- **Solution**:
|
| 50 |
+
- Configured `.gitattributes` to track `*.png`, `*.TTF`, `*.npz`, and `*.cube` with LFS.
|
| 51 |
+
- Used `git filter-branch` to purge large binaries (`raw/`, `white/`, and root `.jpg` files) from the entire Git history to reduce repo size and satisfy HF hooks.
|
| 52 |
+
|
| 53 |
+
---
|
| 54 |
+
|
| 55 |
+
## 5. Image Processing Pipeline & Dependencies
|
| 56 |
+
|
| 57 |
+
### Problem: `KeyError: 'setting text direction... not supported without libraqm'`
|
| 58 |
+
- **Symptom**: Application crashes on Windows when attempting to render Arabic text in the layout.
|
| 59 |
+
- **Cause**: Pillow's `direction` and `features` parameters require the `libraqm` library, which is difficult to install on Windows.
|
| 60 |
+
- **Solution**:
|
| 61 |
+
- Added a safety check using `PIL.features.check("raqm")`.
|
| 62 |
+
- Implemented a fallback that relies on `arabic-reshaper` and `python-bidi` for manual shaping/reordering when `raqm` is missing.
|
| 63 |
+
|
| 64 |
+
### Problem: Background Removal failing when Retouching is enabled
|
| 65 |
+
- **Symptom**: Background removal appeared "ignored" or reverted to original background after processing.
|
| 66 |
+
- **Cause**: The `retouch_image_pil` function in `core/retouch.py` was converting the image to RGB for OpenCV processing, stripping the Alpha channel (transparency) created by the BG removal step.
|
| 67 |
+
- **Solution**:
|
| 68 |
+
- Updated `retouch_image_pil` to detect and save the Alpha channel before processing.
|
| 69 |
+
- Modified the logic to restore the Alpha channel to the final retouched PIL image before returning it to the pipeline.
|
| 70 |
+
|
| 71 |
+
### Problem: BiRefNet Model Inference Error
|
| 72 |
+
- **Symptom**: `TypeError` or indexing errors during background removal inference.
|
| 73 |
+
- **Cause**: Inconsistent model output formats (list of tensors vs. a single tensor) depending on the environment or `transformers` version.
|
| 74 |
+
- **Solution**: Updated `remove_background` in `core/process_images.py` to check if output is a list/tuple and handle both cases robustly.
|
| 75 |
+
|
| 76 |
+
### Problem: `AttributeError: module 'mediapipe' has no attribute 'solutions'`
|
| 77 |
+
- **Symptom**: Skin retouching fails in the Docker container with this error.
|
| 78 |
+
- **Cause**: Inconsistent behavior of the `mediapipe` package initialization in some Linux environments.
|
| 79 |
+
- **Solution**:
|
| 80 |
+
- Explicitly imported submodules like `mediapipe.solutions.face_mesh` at the top of the file.
|
| 81 |
+
- Switched from `python:3.10-slim` to the full `python:3.10` image to ensure a complete build environment.
|
| 82 |
+
- Added `libprotobuf-dev` and `protobuf-compiler` to the `Dockerfile`.
|
| 83 |
+
|
| 84 |
+
### Problem: Arabic/English Text appearing as "Boxes" (Tofu)
|
| 85 |
+
- **Symptom**: All text on the print layout appears as empty squares in the Hugging Face Space.
|
| 86 |
+
- **Cause**: The container lacked fonts with proper character support, and binary `.ttf` files were often corrupted as 130-byte Git LFS pointers.
|
| 87 |
+
- **Solution**:
|
| 88 |
+
- **Automated Downloads**: Updated `Dockerfile` to use `wget` to pull real binary fonts directly from GitHub during the build.
|
| 89 |
+
- **Deep Search**: Implemented a recursive font discovery system in `core/layout_engine.py` that scans `/usr/share/fonts`.
|
| 90 |
+
- **System Fallbacks**: Installed `fonts-noto-extra` and `fonts-dejavu-core` as guaranteed system-level backups.
|
| 91 |
+
|
| 92 |
+
### Problem: Arabic Text appearing "Connected but Reversed"
|
| 93 |
+
- **Symptom**: Arabic letters connect correctly but flow from Left-to-Right (e.g., "م ح م د" instead of "محمد").
|
| 94 |
+
- **Cause**: Inconsistent behavior between local Windows (missing `libraqm`) and Docker Linux (has `libraqm`). Using `get_display` in an environment that already supports complex scripts causes a "double-reversal".
|
| 95 |
+
- **Solution**:
|
| 96 |
+
- **Intelligent Detection**: Updated `_reshape_arabic` in `core/layout_engine.py` to check for Raqm support via `PIL.features.check("raqm")`.
|
| 97 |
+
- **Conditional Reordering**: The system now only applies `python-bidi` reordering if Raqm is **absent**. This ensures perfect rendering in both environments without manual code changes.
|
| 98 |
+
|
| 99 |
+
### Problem: Manual Cropping ignored or shifted
|
| 100 |
+
- **Symptom**: After manually adjusting the crop in the web interface, the result still looked like the AI auto-crop or was completely wrong.
|
| 101 |
+
- **Cause**: The backend was using `cv2.imdecode` which ignores EXIF orientation tags. Since the frontend cropper works on correctly oriented thumbnails, the coordinates sent to the backend didn't match the raw image orientation on disk.
|
| 102 |
+
- **Solution**: Updated `core/crop.py` to use a `_load_image_exif_safe` helper that uses PIL to transpose the image before converting it to OpenCV format. This ensures coordinates from the web UI always match the backend image state.
|
id-maker/tools/scan_fonts.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
from PIL import Image, ImageFont, ImageDraw
|
| 5 |
+
|
| 6 |
+
def list_fonts():
|
| 7 |
+
print("--- FONT SCANNER ---")
|
| 8 |
+
paths = [
|
| 9 |
+
"/usr/share/fonts",
|
| 10 |
+
"/usr/share/fonts/truetype",
|
| 11 |
+
"/usr/share/fonts/truetype/noto",
|
| 12 |
+
"/usr/share/fonts/truetype/dejavu",
|
| 13 |
+
"/usr/share/fonts/truetype/liberation",
|
| 14 |
+
"assets"
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
for p in paths:
|
| 18 |
+
if os.path.exists(p):
|
| 19 |
+
print(f"
|
| 20 |
+
Directory: {p}")
|
| 21 |
+
files = [f for f in os.listdir(p) if f.lower().endswith(('.ttf', '.otf'))]
|
| 22 |
+
for f in sorted(files):
|
| 23 |
+
full_path = os.path.join(p, f)
|
| 24 |
+
size = os.path.getsize(full_path)
|
| 25 |
+
print(f" - {f} ({size} bytes)")
|
| 26 |
+
else:
|
| 27 |
+
print(f"
|
| 28 |
+
Directory NOT FOUND: {p}")
|
| 29 |
+
|
| 30 |
+
if __name__ == "__main__":
|
| 31 |
+
list_fonts()
|
id-maker/tools/verify_layout.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
from PIL import Image
|
| 5 |
+
|
| 6 |
+
# Add root directory to python path
|
| 7 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 8 |
+
|
| 9 |
+
from core.layout_engine import generate_layout
|
| 10 |
+
|
| 11 |
+
def test_layout():
|
| 12 |
+
print("Starting layout verification...")
|
| 13 |
+
|
| 14 |
+
# Create a dummy input image
|
| 15 |
+
dummy_input = Image.new("RGB", (600, 800), (200, 200, 200))
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
# Generate the layout with dummy data
|
| 19 |
+
print("Generating layout...")
|
| 20 |
+
result = generate_layout(
|
| 21 |
+
dummy_input,
|
| 22 |
+
person_name="محمد أحمد اسماعيل ",
|
| 23 |
+
id_number="1234567"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# Save the result
|
| 27 |
+
output_path = "layout_verification_result.jpg"
|
| 28 |
+
result.save(output_path, quality=95)
|
| 29 |
+
|
| 30 |
+
print(f"Success! Layout generated and saved to: {output_path}")
|
| 31 |
+
print(f"Current Settings Used: ID Font Size = {generate_layout.__globals__['S']['overlays']['id_font_size']}")
|
| 32 |
+
|
| 33 |
+
except Exception as e:
|
| 34 |
+
print(f"ERROR: {e}")
|
| 35 |
+
|
| 36 |
+
if __name__ == "__main__":
|
| 37 |
+
test_layout()
|
id-maker/web/server.py
ADDED
|
@@ -0,0 +1,486 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
EL HELAL Studio — Web Backend (FastAPI)
|
| 3 |
+
Integrated with Auto-Cleanup and Custom Cropping
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from fastapi import FastAPI, UploadFile, File, Form, BackgroundTasks
|
| 7 |
+
from fastapi.responses import JSONResponse, FileResponse, StreamingResponse
|
| 8 |
+
from fastapi.staticfiles import StaticFiles
|
| 9 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 10 |
+
from contextlib import asynccontextmanager
|
| 11 |
+
import uvicorn
|
| 12 |
+
import shutil
|
| 13 |
+
import os
|
| 14 |
+
import json
|
| 15 |
+
import uuid
|
| 16 |
+
import zipfile
|
| 17 |
+
import io
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
from PIL import Image
|
| 20 |
+
import threading
|
| 21 |
+
import sys
|
| 22 |
+
import asyncio
|
| 23 |
+
import time
|
| 24 |
+
|
| 25 |
+
# Add core directory to python path
|
| 26 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'core')))
|
| 27 |
+
|
| 28 |
+
# Import existing tools
|
| 29 |
+
import crop
|
| 30 |
+
import process_images
|
| 31 |
+
import color_steal
|
| 32 |
+
import retouch
|
| 33 |
+
import restoration
|
| 34 |
+
from layout_engine import generate_layout, load_settings
|
| 35 |
+
|
| 36 |
+
# Setup Directories
|
| 37 |
+
WEB_DIR = Path(os.path.dirname(__file__)) / "web_storage"
|
| 38 |
+
ROOT_DIR = Path(os.path.dirname(__file__)).parent
|
| 39 |
+
STORAGE_DIR = ROOT_DIR / "storage"
|
| 40 |
+
ASSETS_DIR = ROOT_DIR / "assets"
|
| 41 |
+
|
| 42 |
+
UPLOAD_DIR = STORAGE_DIR / "uploads"
|
| 43 |
+
PROCESSED_DIR = STORAGE_DIR / "processed"
|
| 44 |
+
RESULT_DIR = STORAGE_DIR / "results"
|
| 45 |
+
|
| 46 |
+
for d in [UPLOAD_DIR, PROCESSED_DIR, RESULT_DIR]:
|
| 47 |
+
d.mkdir(parents=True, exist_ok=True)
|
| 48 |
+
|
| 49 |
+
# Global Model State
|
| 50 |
+
models = {
|
| 51 |
+
"model": None,
|
| 52 |
+
"transform": None,
|
| 53 |
+
"restoration": None,
|
| 54 |
+
"luts": color_steal.load_trained_curves(),
|
| 55 |
+
"ready": False
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
def warm_up_ai():
|
| 59 |
+
print("AI Model: Loading in background...")
|
| 60 |
+
try:
|
| 61 |
+
models["model"], _ = process_images.setup_model()
|
| 62 |
+
models["transform"] = process_images.get_transform()
|
| 63 |
+
print("Restoration Model: API Mode Active")
|
| 64 |
+
# No local restoration model initialization needed
|
| 65 |
+
models["ready"] = True
|
| 66 |
+
print("AI Model: READY")
|
| 67 |
+
except Exception as e:
|
| 68 |
+
print(f"AI Model: FAILED to load - {e}")
|
| 69 |
+
|
| 70 |
+
async def cleanup_task():
|
| 71 |
+
"""Background task to delete files older than 24 hours."""
|
| 72 |
+
while True:
|
| 73 |
+
print("Cleanup: Checking for old files...")
|
| 74 |
+
now = time.time()
|
| 75 |
+
count = 0
|
| 76 |
+
for folder in [UPLOAD_DIR, PROCESSED_DIR, RESULT_DIR]:
|
| 77 |
+
for path in folder.glob("*"):
|
| 78 |
+
if path.is_file() and (now - path.stat().st_mtime) > 86400: # 24 hours
|
| 79 |
+
path.unlink()
|
| 80 |
+
count += 1
|
| 81 |
+
if count > 0: print(f"Cleanup: Removed {count} old files.")
|
| 82 |
+
await asyncio.sleep(3600) # Run every hour
|
| 83 |
+
|
| 84 |
+
@asynccontextmanager
|
| 85 |
+
async def lifespan(app: FastAPI):
|
| 86 |
+
# Startup
|
| 87 |
+
threading.Thread(target=warm_up_ai, daemon=True).start()
|
| 88 |
+
asyncio.create_task(cleanup_task())
|
| 89 |
+
yield
|
| 90 |
+
# Shutdown
|
| 91 |
+
pass
|
| 92 |
+
|
| 93 |
+
app = FastAPI(title="EL HELAL Studio API", lifespan=lifespan)
|
| 94 |
+
|
| 95 |
+
app.add_middleware(
|
| 96 |
+
CORSMiddleware,
|
| 97 |
+
allow_origins=["*"],
|
| 98 |
+
allow_methods=["*"],
|
| 99 |
+
allow_headers=["*"],
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# ── API Endpoints ──
|
| 103 |
+
|
| 104 |
+
@app.get("/")
|
| 105 |
+
async def read_index():
|
| 106 |
+
return FileResponse(WEB_DIR / "index.html")
|
| 107 |
+
|
| 108 |
+
@app.get("/status")
|
| 109 |
+
async def get_status():
|
| 110 |
+
return {"ai_ready": models["ready"]}
|
| 111 |
+
|
| 112 |
+
@app.post("/upload")
|
| 113 |
+
async def upload_image(file: UploadFile = File(...)):
|
| 114 |
+
file_id = str(uuid.uuid4())
|
| 115 |
+
ext = Path(file.filename).suffix
|
| 116 |
+
file_path = UPLOAD_DIR / f"{file_id}{ext}"
|
| 117 |
+
|
| 118 |
+
with file_path.open("wb") as buffer:
|
| 119 |
+
shutil.copyfileobj(file.file, buffer)
|
| 120 |
+
|
| 121 |
+
with Image.open(file_path) as img:
|
| 122 |
+
from PIL import ImageOps
|
| 123 |
+
# FIX: Handle EXIF orientation (rotation)
|
| 124 |
+
img = ImageOps.exif_transpose(img)
|
| 125 |
+
|
| 126 |
+
# Get original dimensions after transposition for the web cropper
|
| 127 |
+
width, height = img.size
|
| 128 |
+
|
| 129 |
+
# Create a faster, smaller thumbnail for the UI (200x200 is plenty for the 72px grid)
|
| 130 |
+
img.thumbnail((200, 200), Image.BILINEAR)
|
| 131 |
+
thumb_path = UPLOAD_DIR / f"{file_id}_thumb.jpg"
|
| 132 |
+
if img.mode in ("RGBA", "LA"):
|
| 133 |
+
bg = Image.new("RGB", img.size, (255, 255, 255))
|
| 134 |
+
bg.paste(img, mask=img.split()[-1])
|
| 135 |
+
bg.save(thumb_path, "JPEG", quality=60)
|
| 136 |
+
else:
|
| 137 |
+
img.convert("RGB").save(thumb_path, "JPEG", quality=60)
|
| 138 |
+
return {
|
| 139 |
+
"id": file_id,
|
| 140 |
+
"filename": file.filename,
|
| 141 |
+
"thumb_url": f"/static/uploads/{file_id}_thumb.jpg",
|
| 142 |
+
"width": width,
|
| 143 |
+
"height": height
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
@app.post("/process/{file_id}")
|
| 147 |
+
async def process_image(
|
| 148 |
+
file_id: str,
|
| 149 |
+
name: str = Form(""),
|
| 150 |
+
id_number: str = Form(""),
|
| 151 |
+
# Steps toggles
|
| 152 |
+
do_restore: bool = Form(False),
|
| 153 |
+
fidelity: float = Form(0.5),
|
| 154 |
+
do_rmbg: bool = Form(True),
|
| 155 |
+
do_color: bool = Form(True),
|
| 156 |
+
do_retouch: bool = Form(True),
|
| 157 |
+
do_crop: bool = Form(True),
|
| 158 |
+
# Branding toggles
|
| 159 |
+
add_studio_name: bool = Form(True),
|
| 160 |
+
add_logo: bool = Form(True),
|
| 161 |
+
add_date: bool = Form(True),
|
| 162 |
+
# Layout customization
|
| 163 |
+
frame_color: str = Form(None),
|
| 164 |
+
frame_name: str = Form(None),
|
| 165 |
+
# Optional manual crop coordinates
|
| 166 |
+
x1: int = Form(None),
|
| 167 |
+
y1: int = Form(None),
|
| 168 |
+
x2: int = Form(None),
|
| 169 |
+
y2: int = Form(None)
|
| 170 |
+
):
|
| 171 |
+
if not models["ready"]:
|
| 172 |
+
return JSONResponse(status_code=503, content={"error": "AI Model not ready"})
|
| 173 |
+
|
| 174 |
+
files = list(UPLOAD_DIR.glob(f"{file_id}.*"))
|
| 175 |
+
if not files: return JSONResponse(status_code=404, content={"error": "File not found"})
|
| 176 |
+
orig_path = files[0]
|
| 177 |
+
|
| 178 |
+
# Parse Frame Color
|
| 179 |
+
layout_color = None
|
| 180 |
+
if frame_color and frame_color.startswith("#"):
|
| 181 |
+
try:
|
| 182 |
+
c = frame_color.lstrip("#")
|
| 183 |
+
layout_color = tuple(int(c[i:i+2], 16) for i in (0, 2, 4))
|
| 184 |
+
except:
|
| 185 |
+
pass
|
| 186 |
+
|
| 187 |
+
try:
|
| 188 |
+
# 0. FACE RESTORATION (Step 0)
|
| 189 |
+
current_source_path = orig_path
|
| 190 |
+
if do_restore:
|
| 191 |
+
print(f"Pipeline: Restoring face for {file_id} (Fidelity: {fidelity})...")
|
| 192 |
+
restored_img_pil = restoration.restore_image(str(orig_path), fidelity=fidelity, return_pil=True)
|
| 193 |
+
# We use a PIL Image for restoration result
|
| 194 |
+
# We don't necessarily need to save it, but let's keep it in memory
|
| 195 |
+
# Actually, the next step (crop) might expect a path or PIL image
|
| 196 |
+
source_img = restored_img_pil
|
| 197 |
+
else:
|
| 198 |
+
source_img = Image.open(orig_path)
|
| 199 |
+
from PIL import ImageOps
|
| 200 |
+
source_img = ImageOps.exif_transpose(source_img)
|
| 201 |
+
|
| 202 |
+
# Use PNG for intermediate crop to prevent generation loss
|
| 203 |
+
temp_crop = PROCESSED_DIR / f"{file_id}_processed_crop.png"
|
| 204 |
+
|
| 205 |
+
# 1. CROP (Manual, Auto, or Skip)
|
| 206 |
+
if x1 is not None and y1 is not None:
|
| 207 |
+
print(f"Pipeline: Applying manual crop for {file_id} | Rect: ({x1}, {y1}, {x2}, {y2})")
|
| 208 |
+
rect = (x1, y1, x2, y2)
|
| 209 |
+
# If we used restoration, we need to apply crop to the PIL image
|
| 210 |
+
if do_restore:
|
| 211 |
+
# Save restored image as PNG for lossless intermediate step
|
| 212 |
+
restored_temp = PROCESSED_DIR / f"{file_id}_restored.png"
|
| 213 |
+
source_img.save(restored_temp, "PNG")
|
| 214 |
+
crop.apply_custom_crop(str(restored_temp), str(temp_crop), rect)
|
| 215 |
+
cropped_img = Image.open(temp_crop)
|
| 216 |
+
if restored_temp.exists(): restored_temp.unlink()
|
| 217 |
+
else:
|
| 218 |
+
crop.apply_custom_crop(str(orig_path), str(temp_crop), rect)
|
| 219 |
+
cropped_img = Image.open(temp_crop)
|
| 220 |
+
elif do_crop:
|
| 221 |
+
print(f"Pipeline: Applying auto crop for {file_id}...")
|
| 222 |
+
if do_restore:
|
| 223 |
+
restored_temp = PROCESSED_DIR / f"{file_id}_restored.png"
|
| 224 |
+
source_img.save(restored_temp, "PNG")
|
| 225 |
+
crop.crop_to_4x6_opencv(str(restored_temp), str(temp_crop))
|
| 226 |
+
cropped_img = Image.open(temp_crop)
|
| 227 |
+
if restored_temp.exists(): restored_temp.unlink()
|
| 228 |
+
else:
|
| 229 |
+
crop.crop_to_4x6_opencv(str(orig_path), str(temp_crop))
|
| 230 |
+
cropped_img = Image.open(temp_crop)
|
| 231 |
+
else:
|
| 232 |
+
print(f"Pipeline: Skipping crop for {file_id}")
|
| 233 |
+
cropped_img = source_img
|
| 234 |
+
|
| 235 |
+
# 2. BACKGROUND REMOVAL
|
| 236 |
+
if do_rmbg:
|
| 237 |
+
print(f"Pipeline: Removing background for {file_id}...")
|
| 238 |
+
processed_img = process_images.remove_background(models["model"], cropped_img, models["transform"])
|
| 239 |
+
print(f"Pipeline: BG Removal Done. Image Mode: {processed_img.mode}")
|
| 240 |
+
else:
|
| 241 |
+
print(f"Pipeline: Skipping background removal for {file_id}")
|
| 242 |
+
processed_img = cropped_img
|
| 243 |
+
|
| 244 |
+
# 3. COLOR GRADING
|
| 245 |
+
if do_color and models["luts"]:
|
| 246 |
+
print(f"Pipeline: Applying color grading for {file_id}...")
|
| 247 |
+
graded_img = color_steal.apply_to_image(models["luts"], processed_img)
|
| 248 |
+
print(f"Pipeline: Color Grading Done. Image Mode: {graded_img.mode}")
|
| 249 |
+
else:
|
| 250 |
+
print(f"Pipeline: Skipping color grading for {file_id}")
|
| 251 |
+
graded_img = processed_img
|
| 252 |
+
|
| 253 |
+
# 4. RETOUCH
|
| 254 |
+
current_settings = load_settings()
|
| 255 |
+
# Retouch happens if BOTH the UI checkbox is checked AND it's enabled in global settings
|
| 256 |
+
if do_retouch and current_settings.get("retouch", {}).get("enabled", False):
|
| 257 |
+
retouch_cfg = current_settings["retouch"]
|
| 258 |
+
print(f"Pipeline: Retouching face for {file_id} (Sensitivity: {retouch_cfg.get('sensitivity', 3.0)})")
|
| 259 |
+
final_processed, count = retouch.retouch_image_pil(
|
| 260 |
+
graded_img,
|
| 261 |
+
sensitivity=retouch_cfg.get("sensitivity", 3.0),
|
| 262 |
+
tone_smoothing=retouch_cfg.get("tone_smoothing", 0.6)
|
| 263 |
+
)
|
| 264 |
+
print(f"Pipeline: Retouch Done. Blemishes: {count}. Image Mode: {final_processed.mode}")
|
| 265 |
+
else:
|
| 266 |
+
print(f"Pipeline: Retouching skipped for {file_id}")
|
| 267 |
+
final_processed = graded_img
|
| 268 |
+
|
| 269 |
+
print(f"Pipeline: Generating final layout for {file_id}...")
|
| 270 |
+
final_layout = generate_layout(
|
| 271 |
+
final_processed, name, id_number,
|
| 272 |
+
add_studio_name=add_studio_name,
|
| 273 |
+
add_logo=add_logo,
|
| 274 |
+
add_date=add_date,
|
| 275 |
+
frame_color=layout_color,
|
| 276 |
+
frame_name=frame_name
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
result_path = RESULT_DIR / f"{file_id}_layout.jpg"
|
| 280 |
+
# Save high-quality JPEG (100% quality, no chroma subsampling)
|
| 281 |
+
final_layout.save(result_path, "JPEG", quality=100, subsampling=0, dpi=(300, 300))
|
| 282 |
+
|
| 283 |
+
# 5. Generate a lightweight WEB PREVIEW (max 900px width) for the UI
|
| 284 |
+
preview_path = RESULT_DIR / f"{file_id}_preview.jpg"
|
| 285 |
+
pw, ph = final_layout.size
|
| 286 |
+
p_scale = 900 / pw if pw > 900 else 1.0
|
| 287 |
+
if p_scale < 1.0:
|
| 288 |
+
preview_img = final_layout.resize((int(pw * p_scale), int(ph * p_scale)), Image.BILINEAR)
|
| 289 |
+
preview_img.save(preview_path, "JPEG", quality=70)
|
| 290 |
+
else:
|
| 291 |
+
final_layout.save(preview_path, "JPEG", quality=70)
|
| 292 |
+
|
| 293 |
+
if temp_crop.exists(): temp_crop.unlink()
|
| 294 |
+
|
| 295 |
+
return {
|
| 296 |
+
"id": file_id,
|
| 297 |
+
"result_url": f"/static/results/{file_id}_layout.jpg",
|
| 298 |
+
"preview_url": f"/static/results/{file_id}_preview.jpg"
|
| 299 |
+
}
|
| 300 |
+
except Exception as e:
|
| 301 |
+
import traceback
|
| 302 |
+
traceback.print_exc()
|
| 303 |
+
return JSONResponse(status_code=500, content={"error": str(e)})
|
| 304 |
+
|
| 305 |
+
@app.post("/clear-all")
|
| 306 |
+
async def clear_all():
|
| 307 |
+
"""Manually clear all uploaded and processed files."""
|
| 308 |
+
count = 0
|
| 309 |
+
try:
|
| 310 |
+
for folder in [UPLOAD_DIR, PROCESSED_DIR, RESULT_DIR]:
|
| 311 |
+
for path in folder.glob("*"):
|
| 312 |
+
if path.is_file() and not path.name.endswith(".gitkeep"):
|
| 313 |
+
path.unlink()
|
| 314 |
+
count += 1
|
| 315 |
+
return {"status": "success", "removed_count": count}
|
| 316 |
+
except Exception as e:
|
| 317 |
+
return JSONResponse(status_code=500, content={"error": f"Failed to clear storage: {str(e)}"})
|
| 318 |
+
|
| 319 |
+
@app.get("/frames")
|
| 320 |
+
async def list_frames():
|
| 321 |
+
"""List available frame overlays."""
|
| 322 |
+
frames = []
|
| 323 |
+
if ASSETS_DIR.exists():
|
| 324 |
+
for f in ASSETS_DIR.glob("frame-*.png"):
|
| 325 |
+
frames.append({
|
| 326 |
+
"filename": f.name,
|
| 327 |
+
"url": f"/assets/{f.name}"
|
| 328 |
+
})
|
| 329 |
+
return {"frames": frames}
|
| 330 |
+
|
| 331 |
+
@app.post("/frames")
|
| 332 |
+
async def upload_frame(file: UploadFile = File(...)):
|
| 333 |
+
"""Upload a new custom frame."""
|
| 334 |
+
if not ASSETS_DIR.exists(): ASSETS_DIR.mkdir(parents=True)
|
| 335 |
+
|
| 336 |
+
# Validation
|
| 337 |
+
if file.content_type not in ["image/png", "image/jpeg", "image/webp"]:
|
| 338 |
+
return JSONResponse(status_code=400, content={"error": "Invalid file type. Use PNG/JPG."})
|
| 339 |
+
|
| 340 |
+
ext = Path(file.filename).suffix
|
| 341 |
+
frame_id = f"frame-{uuid.uuid4().hex[:8]}"
|
| 342 |
+
filename = f"{frame_id}{ext}" if ext else f"{frame_id}.png"
|
| 343 |
+
file_path = ASSETS_DIR / filename
|
| 344 |
+
|
| 345 |
+
try:
|
| 346 |
+
with file_path.open("wb") as buffer:
|
| 347 |
+
shutil.copyfileobj(file.file, buffer)
|
| 348 |
+
return {
|
| 349 |
+
"status": "success",
|
| 350 |
+
"frame": {
|
| 351 |
+
"filename": filename,
|
| 352 |
+
"url": f"/assets/{filename}"
|
| 353 |
+
}
|
| 354 |
+
}
|
| 355 |
+
except Exception as e:
|
| 356 |
+
return JSONResponse(status_code=500, content={"error": str(e)})
|
| 357 |
+
|
| 358 |
+
@app.delete("/frames/{filename}")
|
| 359 |
+
async def delete_frame(filename: str):
|
| 360 |
+
"""Delete a frame file."""
|
| 361 |
+
# Security check: prevent directory traversal and ensure it's a frame file
|
| 362 |
+
if ".." in filename or "/" in filename or "\\" in filename:
|
| 363 |
+
return JSONResponse(status_code=400, content={"error": "Invalid filename"})
|
| 364 |
+
|
| 365 |
+
if not filename.startswith("frame-"):
|
| 366 |
+
return JSONResponse(status_code=400, content={"error": "Can only delete frame files"})
|
| 367 |
+
|
| 368 |
+
file_path = ASSETS_DIR / filename
|
| 369 |
+
|
| 370 |
+
if not file_path.exists():
|
| 371 |
+
return JSONResponse(status_code=404, content={"error": "Frame not found"})
|
| 372 |
+
|
| 373 |
+
try:
|
| 374 |
+
file_path.unlink()
|
| 375 |
+
return {"status": "success"}
|
| 376 |
+
except Exception as e:
|
| 377 |
+
return JSONResponse(status_code=500, content={"error": str(e)})
|
| 378 |
+
|
| 379 |
+
# ── Backup & Restore API ──
|
| 380 |
+
|
| 381 |
+
@app.post("/backup/export")
|
| 382 |
+
async def export_backup(client_data: dict):
|
| 383 |
+
"""Export settings, assets, and client data as a ZIP file."""
|
| 384 |
+
mem_zip = io.BytesIO()
|
| 385 |
+
|
| 386 |
+
with zipfile.ZipFile(mem_zip, mode="w", compression=zipfile.ZIP_DEFLATED) as zf:
|
| 387 |
+
# 1. Config
|
| 388 |
+
if SETTINGS_PATH.exists():
|
| 389 |
+
zf.write(SETTINGS_PATH, arcname="settings.json")
|
| 390 |
+
|
| 391 |
+
# 2. Assets (Frames, Logos)
|
| 392 |
+
if ASSETS_DIR.exists():
|
| 393 |
+
for f in ASSETS_DIR.glob("*"):
|
| 394 |
+
if f.is_file():
|
| 395 |
+
zf.write(f, arcname=f"assets/{f.name}")
|
| 396 |
+
|
| 397 |
+
# 3. Client Data
|
| 398 |
+
zf.writestr("client_data.json", json.dumps(client_data, indent=2))
|
| 399 |
+
|
| 400 |
+
mem_zip.seek(0)
|
| 401 |
+
filename = f"studio_backup_{int(time.time())}.zip"
|
| 402 |
+
return StreamingResponse(
|
| 403 |
+
mem_zip,
|
| 404 |
+
media_type="application/zip",
|
| 405 |
+
headers={"Content-Disposition": f"attachment; filename={filename}"}
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
@app.post("/backup/import")
|
| 409 |
+
async def import_backup(file: UploadFile = File(...)):
|
| 410 |
+
"""Import a backup ZIP file."""
|
| 411 |
+
if not file.filename.endswith(".zip"):
|
| 412 |
+
return JSONResponse(status_code=400, content={"error": "Must be a .zip file"})
|
| 413 |
+
|
| 414 |
+
try:
|
| 415 |
+
content = await file.read()
|
| 416 |
+
with zipfile.ZipFile(io.BytesIO(content)) as zf:
|
| 417 |
+
# 1. Restore Config
|
| 418 |
+
if "settings.json" in zf.namelist():
|
| 419 |
+
# Ensure config dir exists
|
| 420 |
+
SETTINGS_PATH.parent.mkdir(parents=True, exist_ok=True)
|
| 421 |
+
with open(SETTINGS_PATH, "wb") as f:
|
| 422 |
+
f.write(zf.read("settings.json"))
|
| 423 |
+
|
| 424 |
+
# 2. Restore Assets
|
| 425 |
+
if not ASSETS_DIR.exists(): ASSETS_DIR.mkdir(parents=True, exist_ok=True)
|
| 426 |
+
for name in zf.namelist():
|
| 427 |
+
if name.startswith("assets/") and not name.endswith("/"):
|
| 428 |
+
# Safe extraction: ignore directory traversal attempts
|
| 429 |
+
clean_name = os.path.basename(name)
|
| 430 |
+
if clean_name:
|
| 431 |
+
with open(ASSETS_DIR / clean_name, "wb") as f:
|
| 432 |
+
f.write(zf.read(name))
|
| 433 |
+
|
| 434 |
+
# 3. Read Client Data
|
| 435 |
+
client_data = {}
|
| 436 |
+
if "client_data.json" in zf.namelist():
|
| 437 |
+
client_data = json.loads(zf.read("client_data.json"))
|
| 438 |
+
|
| 439 |
+
return {"status": "success", "client_data": client_data}
|
| 440 |
+
|
| 441 |
+
except Exception as e:
|
| 442 |
+
return JSONResponse(status_code=500, content={"error": f"Import failed: {str(e)}"})
|
| 443 |
+
|
| 444 |
+
# ── Settings API ──
|
| 445 |
+
SETTINGS_PATH = ROOT_DIR / "config" / "settings.json"
|
| 446 |
+
|
| 447 |
+
@app.get("/settings")
|
| 448 |
+
async def get_settings():
|
| 449 |
+
"""Return current settings.json contents."""
|
| 450 |
+
try:
|
| 451 |
+
if SETTINGS_PATH.exists():
|
| 452 |
+
with open(SETTINGS_PATH, "r") as f:
|
| 453 |
+
return json.load(f)
|
| 454 |
+
return {}
|
| 455 |
+
except Exception as e:
|
| 456 |
+
return JSONResponse(status_code=500, content={"error": str(e)})
|
| 457 |
+
|
| 458 |
+
@app.post("/settings")
|
| 459 |
+
async def update_settings(data: dict):
|
| 460 |
+
"""Merge incoming settings into settings.json (partial update)."""
|
| 461 |
+
try:
|
| 462 |
+
current = {}
|
| 463 |
+
if SETTINGS_PATH.exists():
|
| 464 |
+
with open(SETTINGS_PATH, "r") as f:
|
| 465 |
+
current = json.load(f)
|
| 466 |
+
# Deep merge one level
|
| 467 |
+
for key, val in data.items():
|
| 468 |
+
if key in current and isinstance(val, dict) and isinstance(current[key], dict):
|
| 469 |
+
current[key].update(val)
|
| 470 |
+
else:
|
| 471 |
+
current[key] = val
|
| 472 |
+
SETTINGS_PATH.parent.mkdir(parents=True, exist_ok=True)
|
| 473 |
+
with open(SETTINGS_PATH, "w") as f:
|
| 474 |
+
json.dump(current, f, indent=4, ensure_ascii=False)
|
| 475 |
+
return {"status": "success"}
|
| 476 |
+
except Exception as e:
|
| 477 |
+
return JSONResponse(status_code=500, content={"error": str(e)})
|
| 478 |
+
|
| 479 |
+
app.mount("/static", StaticFiles(directory=str(STORAGE_DIR)), name="static")
|
| 480 |
+
if ASSETS_DIR.exists():
|
| 481 |
+
app.mount("/assets", StaticFiles(directory=str(ASSETS_DIR)), name="assets")
|
| 482 |
+
|
| 483 |
+
if __name__ == "__main__":
|
| 484 |
+
# Hugging Face Spaces uses port 7860 by default
|
| 485 |
+
port = int(os.environ.get("PORT", 7860))
|
| 486 |
+
uvicorn.run(app, host="0.0.0.0", port=port)
|
id-maker/web/web_storage/index.html
ADDED
|
@@ -0,0 +1,1483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="ar" dir="rtl" data-theme="dark">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>ستوديو الهلال — نظام معالجة الصور</title>
|
| 7 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
| 8 |
+
<script>
|
| 9 |
+
tailwind.config = {
|
| 10 |
+
darkMode: ['selector', '[data-theme="dark"]'],
|
| 11 |
+
}
|
| 12 |
+
</script>
|
| 13 |
+
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css" rel="stylesheet">
|
| 14 |
+
<!-- Cropper.js -->
|
| 15 |
+
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.5.13/cropper.min.css">
|
| 16 |
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.5.13/cropper.min.js"></script>
|
| 17 |
+
<!-- JSZip for Download All -->
|
| 18 |
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/jszip/3.10.1/jszip.min.js"></script>
|
| 19 |
+
<style>
|
| 20 |
+
@import url('https://fonts.googleapis.com/css2?family=Cairo:wght@300;400;600;700&display=swap');
|
| 21 |
+
|
| 22 |
+
/* ── Theme Variables ── */
|
| 23 |
+
:root {
|
| 24 |
+
--bg-body: #f1f5f9;
|
| 25 |
+
--bg-card: #ffffff;
|
| 26 |
+
--bg-card-alt: #f8fafc;
|
| 27 |
+
--bg-input: #f1f5f9;
|
| 28 |
+
--border-card: #e2e8f0;
|
| 29 |
+
--border-input: #cbd5e1;
|
| 30 |
+
--text-primary: #0f172a;
|
| 31 |
+
--text-secondary: #475569;
|
| 32 |
+
--text-muted: #94a3b8;
|
| 33 |
+
--scrollbar-track: #f1f5f9;
|
| 34 |
+
--scrollbar-thumb: #cbd5e1;
|
| 35 |
+
--overlay-bg: rgba(255,255,255,0.92);
|
| 36 |
+
}
|
| 37 |
+
[data-theme="dark"] {
|
| 38 |
+
--bg-body: #0f172a;
|
| 39 |
+
--bg-card: #1e293b;
|
| 40 |
+
--bg-card-alt: #0f172a;
|
| 41 |
+
--bg-input: #0f172a;
|
| 42 |
+
--border-card: #334155;
|
| 43 |
+
--border-input: #334155;
|
| 44 |
+
--text-primary: #f8fafc;
|
| 45 |
+
--text-secondary: #94a3b8;
|
| 46 |
+
--text-muted: #475569;
|
| 47 |
+
--scrollbar-track: #0f172a;
|
| 48 |
+
--scrollbar-thumb: #334155;
|
| 49 |
+
--overlay-bg: rgba(15,23,42,0.92);
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
body {
|
| 53 |
+
font-family: 'Cairo', sans-serif;
|
| 54 |
+
background-color: var(--bg-body);
|
| 55 |
+
color: var(--text-primary);
|
| 56 |
+
transition: background-color 0.3s, color 0.3s;
|
| 57 |
+
}
|
| 58 |
+
.studio-card {
|
| 59 |
+
background: var(--bg-card);
|
| 60 |
+
border: 1px solid var(--border-card);
|
| 61 |
+
transition: background-color 0.3s, border-color 0.3s;
|
| 62 |
+
}
|
| 63 |
+
.studio-input {
|
| 64 |
+
background: var(--bg-input);
|
| 65 |
+
border-color: var(--border-input);
|
| 66 |
+
color: var(--text-primary);
|
| 67 |
+
}
|
| 68 |
+
.text-theme-primary { color: var(--text-primary); }
|
| 69 |
+
.text-theme-secondary { color: var(--text-secondary); }
|
| 70 |
+
.text-theme-muted { color: var(--text-muted); }
|
| 71 |
+
|
| 72 |
+
.gradient-text { background: linear-gradient(to left, #fbbf24, #f59e0b); -webkit-background-clip: text; -webkit-text-fill-color: transparent; }
|
| 73 |
+
.cropper-view-box, .cropper-face { border-radius: 4px; outline: 2px solid #fbbf24; }
|
| 74 |
+
::-webkit-scrollbar { width: 6px; }
|
| 75 |
+
::-webkit-scrollbar-track { background: var(--scrollbar-track); }
|
| 76 |
+
::-webkit-scrollbar-thumb { background: var(--scrollbar-thumb); border-radius: 10px; }
|
| 77 |
+
|
| 78 |
+
/* Indicators & Glow */
|
| 79 |
+
.badge { font-size: 8px; padding: 1px 4px; border-radius: 3px; font-weight: 800; text-transform: uppercase; }
|
| 80 |
+
.badge-rmbg { background: #10b981; color: #064e3b; }
|
| 81 |
+
.badge-retouch { background: #3b82f6; color: #1e3a8a; }
|
| 82 |
+
.badge-crop { background: #f59e0b; color: #78350f; }
|
| 83 |
+
.badge-color { background: #a855f7; color: #4c1d95; }
|
| 84 |
+
.badge-quality { font-size: 7px; padding: 1px 3px; border-radius: 2px; font-weight: 700; }
|
| 85 |
+
.badge-hd { background: #10b981; color: white; }
|
| 86 |
+
.badge-lowres { background: #ef4444; color: white; }
|
| 87 |
+
.badge-mid { background: #f59e0b; color: white; }
|
| 88 |
+
|
| 89 |
+
.drop-glow { border-color: #fbbf24 !important; box-shadow: 0 0 20px rgba(251, 191, 36, 0.3); }
|
| 90 |
+
|
| 91 |
+
/* Skeleton Animation */
|
| 92 |
+
@keyframes shimmer {
|
| 93 |
+
0% { background-position: -468px 0 }
|
| 94 |
+
100% { background-position: 468px 0 }
|
| 95 |
+
}
|
| 96 |
+
.skeleton {
|
| 97 |
+
background: var(--bg-card);
|
| 98 |
+
background-image: linear-gradient(to right, var(--bg-card) 0%, var(--border-card) 20%, var(--bg-card) 40%, var(--bg-card) 100%);
|
| 99 |
+
background-repeat: no-repeat;
|
| 100 |
+
background-size: 800px 104px;
|
| 101 |
+
animation: shimmer 1.5s infinite linear;
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
.preview-transition { transition: opacity 0.3s ease-in-out, transform 0.3s ease-in-out; }
|
| 105 |
+
.processing-spin { animation: spin 2s linear infinite; }
|
| 106 |
+
@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
|
| 107 |
+
|
| 108 |
+
/* ── Toast Notifications ── */
|
| 109 |
+
#toast-container {
|
| 110 |
+
position: fixed; bottom: 24px; left: 50%; transform: translateX(-50%);
|
| 111 |
+
z-index: 9999; display: flex; flex-direction: column-reverse; gap: 8px; align-items: center;
|
| 112 |
+
pointer-events: none;
|
| 113 |
+
}
|
| 114 |
+
.toast {
|
| 115 |
+
pointer-events: auto;
|
| 116 |
+
padding: 12px 24px; border-radius: 12px; font-size: 14px; font-weight: 600;
|
| 117 |
+
display: flex; align-items: center; gap: 10px;
|
| 118 |
+
box-shadow: 0 8px 30px rgba(0,0,0,0.2);
|
| 119 |
+
animation: toastIn 0.35s ease-out;
|
| 120 |
+
max-width: 420px;
|
| 121 |
+
}
|
| 122 |
+
.toast-success { background: #065f46; color: #d1fae5; }
|
| 123 |
+
.toast-error { background: #7f1d1d; color: #fecaca; }
|
| 124 |
+
.toast-info { background: #1e3a5f; color: #bfdbfe; }
|
| 125 |
+
.toast-warning { background: #78350f; color: #fef3c7; }
|
| 126 |
+
@keyframes toastIn { from { opacity: 0; transform: translateY(20px); } to { opacity: 1; transform: translateY(0); } }
|
| 127 |
+
@keyframes toastOut { from { opacity: 1; transform: translateY(0); } to { opacity: 0; transform: translateY(20px); } }
|
| 128 |
+
|
| 129 |
+
/* ── Upload Progress ── */
|
| 130 |
+
.upload-progress-bar {
|
| 131 |
+
height: 4px; border-radius: 2px; background: var(--border-card); overflow: hidden; margin-top: 8px;
|
| 132 |
+
}
|
| 133 |
+
.upload-progress-fill {
|
| 134 |
+
height: 100%; background: linear-gradient(90deg, #fbbf24, #f59e0b);
|
| 135 |
+
border-radius: 2px; transition: width 0.15s linear; width: 0%;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
/* ── Zoom Modal ── */
|
| 139 |
+
#zoom-modal {
|
| 140 |
+
position: fixed; inset: 0; z-index: 100; display: none;
|
| 141 |
+
background: rgba(0,0,0,0.9); cursor: zoom-out;
|
| 142 |
+
align-items: center; justify-content: center;
|
| 143 |
+
}
|
| 144 |
+
#zoom-modal.active { display: flex; }
|
| 145 |
+
#zoom-modal img {
|
| 146 |
+
max-width: 95vw; max-height: 95vh; object-fit: contain;
|
| 147 |
+
transform-origin: center; transition: transform 0.2s;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
/* ── Settings Panel ── */
|
| 151 |
+
.settings-panel { max-height: 0; overflow: hidden; transition: max-height 0.35s ease-out, opacity 0.25s; opacity: 0; }
|
| 152 |
+
.settings-panel.open { max-height: 500px; opacity: 1; }
|
| 153 |
+
|
| 154 |
+
/* ── Mobile Drawer ── */
|
| 155 |
+
.mobile-drawer-overlay {
|
| 156 |
+
position: fixed; inset: 0; background: rgba(0,0,0,0.5); z-index: 80; display: none;
|
| 157 |
+
}
|
| 158 |
+
.mobile-drawer-overlay.active { display: block; }
|
| 159 |
+
.mobile-drawer {
|
| 160 |
+
position: fixed; top: 0; bottom: 0; width: 85%; max-width: 380px;
|
| 161 |
+
z-index: 81; transition: transform 0.3s ease;
|
| 162 |
+
overflow-y: auto; padding: 20px;
|
| 163 |
+
background: var(--bg-card);
|
| 164 |
+
}
|
| 165 |
+
/* RTL: drawer slides from left */
|
| 166 |
+
[dir="rtl"] .mobile-drawer { left: 0; right: auto; transform: translateX(-100%); }
|
| 167 |
+
.mobile-drawer.active { transform: translateX(0) !important; }
|
| 168 |
+
|
| 169 |
+
/* ── Before/After Toggle ── */
|
| 170 |
+
.ba-toggle {
|
| 171 |
+
position: absolute; top: 12px; left: 12px; z-index: 10;
|
| 172 |
+
backdrop-filter: blur(8px);
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
/* ── Empty State Animation ── */
|
| 176 |
+
@keyframes float { 0%,100% { transform: translateY(0); } 50% { transform: translateY(-10px); } }
|
| 177 |
+
.float-anim { animation: float 3s ease-in-out infinite; }
|
| 178 |
+
@keyframes pointDown { 0%,100% { transform: translateY(0); } 50% { transform: translateY(8px); } }
|
| 179 |
+
.point-anim { animation: pointDown 1.5s ease-in-out infinite; }
|
| 180 |
+
|
| 181 |
+
/* ── Keyboard Shortcuts Modal ── */
|
| 182 |
+
#shortcuts-modal {
|
| 183 |
+
position: fixed; inset: 0; z-index: 90; display: none;
|
| 184 |
+
background: rgba(0,0,0,0.6); align-items: center; justify-content: center;
|
| 185 |
+
backdrop-filter: blur(4px);
|
| 186 |
+
}
|
| 187 |
+
#shortcuts-modal.active { display: flex; }
|
| 188 |
+
|
| 189 |
+
/* Horizontal queue slider */
|
| 190 |
+
.queue-slider {
|
| 191 |
+
display: flex; flex-direction: row; gap: 10px;
|
| 192 |
+
flex-wrap: wrap;
|
| 193 |
+
max-height: 340px;
|
| 194 |
+
overflow-y: auto; overflow-x: hidden;
|
| 195 |
+
padding: 8px 4px; scroll-behavior: smooth;
|
| 196 |
+
scrollbar-width: thin;
|
| 197 |
+
}
|
| 198 |
+
.queue-slider::-webkit-scrollbar { width: 5px; }
|
| 199 |
+
.queue-slider::-webkit-scrollbar-track { background: var(--scrollbar-track); border-radius: 10px; }
|
| 200 |
+
.queue-slider::-webkit-scrollbar-thumb { background: var(--scrollbar-thumb); border-radius: 10px; }
|
| 201 |
+
.queue-slider::-webkit-scrollbar-thumb:hover { background: #fbbf24; }
|
| 202 |
+
|
| 203 |
+
.queue-slide {
|
| 204 |
+
flex: 0 0 auto; width: 72px; cursor: pointer;
|
| 205 |
+
position: relative; border-radius: 10px;
|
| 206 |
+
transition: transform 0.15s, box-shadow 0.15s;
|
| 207 |
+
}
|
| 208 |
+
.queue-slide:hover { transform: translateY(-2px); }
|
| 209 |
+
.queue-slide.active {
|
| 210 |
+
box-shadow: 0 0 0 2px #fbbf24;
|
| 211 |
+
transform: translateY(-2px);
|
| 212 |
+
}
|
| 213 |
+
.queue-slide img {
|
| 214 |
+
width: 72px; height: 72px; object-fit: cover;
|
| 215 |
+
border-radius: 10px; display: block;
|
| 216 |
+
border: 1px solid var(--border-card);
|
| 217 |
+
}
|
| 218 |
+
.queue-slide .slide-status {
|
| 219 |
+
position: absolute; top: -4px; right: -4px;
|
| 220 |
+
width: 16px; height: 16px; border-radius: 50%;
|
| 221 |
+
display: flex; align-items: center; justify-content: center;
|
| 222 |
+
font-size: 9px; z-index: 5;
|
| 223 |
+
background: var(--bg-card); box-shadow: 0 1px 3px rgba(0,0,0,0.3);
|
| 224 |
+
}
|
| 225 |
+
.queue-slide .slide-delete {
|
| 226 |
+
position: absolute; top: -4px; left: -4px;
|
| 227 |
+
width: 18px; height: 18px; border-radius: 50%;
|
| 228 |
+
background: #ef4444; color: white;
|
| 229 |
+
display: flex; align-items: center; justify-content: center;
|
| 230 |
+
font-size: 8px; z-index: 5;
|
| 231 |
+
opacity: 0; transition: opacity 0.15s;
|
| 232 |
+
border: none; cursor: pointer;
|
| 233 |
+
}
|
| 234 |
+
.queue-slide:hover .slide-delete { opacity: 1; }
|
| 235 |
+
.queue-slide .slide-name {
|
| 236 |
+
font-size: 9px; text-align: center; margin-top: 4px;
|
| 237 |
+
overflow: hidden; text-overflow: ellipsis; white-space: nowrap;
|
| 238 |
+
color: var(--text-secondary); font-weight: 600;
|
| 239 |
+
max-width: 72px;
|
| 240 |
+
}
|
| 241 |
+
</style>
|
| 242 |
+
</head>
|
| 243 |
+
<body class="min-h-screen p-4 md:p-10">
|
| 244 |
+
|
| 245 |
+
<!-- Toast Container -->
|
| 246 |
+
<div id="toast-container"></div>
|
| 247 |
+
|
| 248 |
+
<!-- Zoom Modal (#14) -->
|
| 249 |
+
<div id="zoom-modal" onclick="closeZoom()">
|
| 250 |
+
<img id="zoom-img" src="" alt="Zoomed preview">
|
| 251 |
+
</div>
|
| 252 |
+
|
| 253 |
+
<!-- Keyboard Shortcuts Modal -->
|
| 254 |
+
<div id="shortcuts-modal" onclick="this.classList.remove('active')">
|
| 255 |
+
<div class="studio-card rounded-2xl p-6 max-w-sm w-full mx-4 shadow-2xl" onclick="event.stopPropagation()">
|
| 256 |
+
<h3 class="font-bold text-lg mb-4 text-right gradient-text">اختصارات لوحة المفاتيح</h3>
|
| 257 |
+
<div class="space-y-3 text-sm" dir="ltr">
|
| 258 |
+
<div class="flex justify-between"><kbd class="bg-slate-700 text-white px-2 py-1 rounded text-xs">← →</kbd><span class="text-theme-secondary">Navigate images</span></div>
|
| 259 |
+
<div class="flex justify-between"><kbd class="bg-slate-700 text-white px-2 py-1 rounded text-xs">Delete</kbd><span class="text-theme-secondary">Delete selected</span></div>
|
| 260 |
+
<div class="flex justify-between"><kbd class="bg-slate-700 text-white px-2 py-1 rounded text-xs">Enter</kbd><span class="text-theme-secondary">Save & Next</span></div>
|
| 261 |
+
<div class="flex justify-between"><kbd class="bg-slate-700 text-white px-2 py-1 rounded text-xs">Ctrl+S</kbd><span class="text-theme-secondary">Process current</span></div>
|
| 262 |
+
<div class="flex justify-between"><kbd class="bg-slate-700 text-white px-2 py-1 rounded text-xs">Escape</kbd><span class="text-theme-secondary">Close modals</span></div>
|
| 263 |
+
</div>
|
| 264 |
+
<button onclick="document.getElementById('shortcuts-modal').classList.remove('active')" class="mt-5 w-full py-2 bg-yellow-500 text-slate-900 rounded-lg font-bold">حسناً</button>
|
| 265 |
+
</div>
|
| 266 |
+
</div>
|
| 267 |
+
|
| 268 |
+
<!-- Mobile Drawer Overlay -->
|
| 269 |
+
<div class="mobile-drawer-overlay lg:hidden" id="drawer-overlay" onclick="toggleDrawer(false)"></div>
|
| 270 |
+
<div class="mobile-drawer lg:hidden studio-card" id="mobile-drawer">
|
| 271 |
+
<div class="flex justify-between items-center mb-4">
|
| 272 |
+
<h3 class="font-bold text-sm text-theme-secondary">قائمة المعالجة</h3>
|
| 273 |
+
<button onclick="toggleDrawer(false)" class="w-8 h-8 rounded-full bg-slate-700 flex items-center justify-center"><i class="fa-solid fa-xmark text-white text-sm"></i></button>
|
| 274 |
+
</div>
|
| 275 |
+
<div id="mobile-image-list" class="flex flex-col gap-3 overflow-y-auto"></div>
|
| 276 |
+
</div>
|
| 277 |
+
|
| 278 |
+
<!-- Header -->
|
| 279 |
+
<header class="max-w-[1400px] mx-auto mb-8 flex flex-col md:flex-row justify-between items-center gap-4">
|
| 280 |
+
<div>
|
| 281 |
+
<h1 class="text-4xl font-bold gradient-text text-right">ستوديو الهلال</h1>
|
| 282 |
+
<p class="text-theme-secondary text-right">سير عمل متكامل</p>
|
| 283 |
+
</div>
|
| 284 |
+
<div class="flex flex-wrap justify-center md:justify-end gap-2">
|
| 285 |
+
<a href="https://esmailx50-job.hf.space" target="_blank" class="flex items-center gap-2 px-3 py-2 rounded-full bg-indigo-900/30 border border-indigo-500/50 text-sm text-indigo-300 hover:bg-indigo-500 hover:text-white transition-all font-bold">
|
| 286 |
+
<i class="fa-solid fa-wand-magic-sparkles"></i>
|
| 287 |
+
<span class="hidden sm:inline">أداة تحسين الجودة</span>
|
| 288 |
+
</a>
|
| 289 |
+
<a href="https://esmailx50-rmbg2.hf.space" target="_blank" class="flex items-center gap-2 px-3 py-2 rounded-full bg-rose-900/30 border border-rose-500/50 text-sm text-rose-300 hover:bg-rose-500 hover:text-white transition-all font-bold">
|
| 290 |
+
<i class="fa-solid fa-eraser"></i>
|
| 291 |
+
<span class="hidden sm:inline">أد��ة إزالة الخلفية</span>
|
| 292 |
+
</a>
|
| 293 |
+
|
| 294 |
+
<!-- Theme Toggle (#11) -->
|
| 295 |
+
<button id="theme-toggle" onclick="toggleTheme()" title="تبديل السمة" class="w-10 h-10 rounded-full bg-slate-800 border border-slate-700 flex items-center justify-center hover:bg-slate-700 transition-all">
|
| 296 |
+
<i class="fa-solid fa-sun text-yellow-400 text-sm" id="theme-icon"></i>
|
| 297 |
+
</button>
|
| 298 |
+
|
| 299 |
+
<!-- Keyboard Shortcuts Hint -->
|
| 300 |
+
<button onclick="document.getElementById('shortcuts-modal').classList.add('active')" title="اختصارات لوحة المفاتيح" class="w-10 h-10 rounded-full bg-slate-800 border border-slate-700 flex items-center justify-center hover:bg-slate-700 transition-all">
|
| 301 |
+
<i class="fa-regular fa-keyboard text-slate-400 text-sm"></i>
|
| 302 |
+
</button>
|
| 303 |
+
|
| 304 |
+
<div id="ai-status" class="flex items-center gap-2 px-4 py-2 rounded-full bg-slate-800 border border-slate-700 text-sm text-slate-400">
|
| 305 |
+
<div class="w-3 h-3 rounded-full bg-yellow-500 animate-pulse"></div>
|
| 306 |
+
</div>
|
| 307 |
+
</div>
|
| 308 |
+
</header>
|
| 309 |
+
|
| 310 |
+
<main class="max-w-[1400px] mx-auto grid grid-cols-1 lg:grid-cols-12 gap-6 lg:gap-6">
|
| 311 |
+
|
| 312 |
+
<!-- Mobile Queue Toggle Button (#10) -->
|
| 313 |
+
<div class="lg:hidden flex gap-3">
|
| 314 |
+
<button onclick="toggleDrawer(true)" class="flex-1 studio-card rounded-xl p-3 flex items-center justify-center gap-3 active:scale-95 transition-transform">
|
| 315 |
+
<i class="fa-solid fa-list text-yellow-500"></i>
|
| 316 |
+
<span class="font-bold text-sm text-theme-secondary">قائمة الصور</span>
|
| 317 |
+
<span id="mobile-queue-count" class="text-xs bg-yellow-500 text-slate-900 px-2 py-0.5 rounded-full font-bold">0</span>
|
| 318 |
+
</button>
|
| 319 |
+
</div>
|
| 320 |
+
|
| 321 |
+
<!-- ── COLUMN 1: QUEUE (RIGHT SIDE) ── -->
|
| 322 |
+
<div class="hidden lg:flex lg:col-span-3 flex-col gap-4 order-1">
|
| 323 |
+
<div class="studio-card rounded-2xl p-5 text-center border-dashed border-2 hover:border-yellow-500 transition-all cursor-pointer group" style="border-color: var(--border-card);" id="drop-zone">
|
| 324 |
+
<input type="file" id="file-input" multiple class="hidden" accept="image/*">
|
| 325 |
+
<div class="float-anim">
|
| 326 |
+
<i class="fa-solid fa-cloud-arrow-up text-4xl mb-2 text-slate-400 group-hover:text-yellow-500 transition-colors"></i>
|
| 327 |
+
</div>
|
| 328 |
+
<h3 class="font-semibold text-sm text-theme-primary">ارفع صورة</h3>
|
| 329 |
+
<p class="text-theme-muted text-[10px] mt-1">أو اسحب وأفلت هنا</p>
|
| 330 |
+
<div id="upload-progress-wrapper" class="hidden">
|
| 331 |
+
<div class="upload-progress-bar">
|
| 332 |
+
<div id="upload-progress-fill" class="upload-progress-fill"></div>
|
| 333 |
+
</div>
|
| 334 |
+
<p id="upload-progress-text" class="text-xs text-theme-muted mt-1">جاري الرفع...</p>
|
| 335 |
+
</div>
|
| 336 |
+
</div>
|
| 337 |
+
|
| 338 |
+
<div class="studio-card rounded-2xl p-4 flex flex-col">
|
| 339 |
+
<div class="flex justify-between items-center mb-2 px-1">
|
| 340 |
+
<h3 class="font-bold uppercase text-[10px] tracking-widest text-theme-muted">قائمة المعالجة</h3>
|
| 341 |
+
<span id="queue-count" class="text-[10px] bg-slate-700 text-white px-2 py-0.5 rounded">٠ صور</span>
|
| 342 |
+
</div>
|
| 343 |
+
<div id="image-list" class="queue-slider">
|
| 344 |
+
<div class="text-center w-full py-6" id="empty-queue-placeholder">
|
| 345 |
+
<p class="text-theme-muted text-xs font-medium">القائمة فارغة</p>
|
| 346 |
+
</div>
|
| 347 |
+
</div>
|
| 348 |
+
</div>
|
| 349 |
+
</div>
|
| 350 |
+
|
| 351 |
+
<!-- Mobile Drop Zone (shown only on mobile when drawer is closed) -->
|
| 352 |
+
<div class="lg:hidden">
|
| 353 |
+
<div class="studio-card rounded-2xl p-4 text-center border-dashed border-2 hover:border-yellow-500 transition-all cursor-pointer group" style="border-color: var(--border-card);" id="drop-zone-mobile">
|
| 354 |
+
<div class="float-anim inline-block">
|
| 355 |
+
<i class="fa-solid fa-cloud-arrow-up text-3xl text-slate-400 group-hover:text-yellow-500 transition-colors"></i>
|
| 356 |
+
</div>
|
| 357 |
+
<p class="text-sm text-theme-muted mt-2">اضغط هنا لرفع الصور</p>
|
| 358 |
+
<div id="upload-progress-wrapper-mobile" class="hidden">
|
| 359 |
+
<div class="upload-progress-bar">
|
| 360 |
+
<div id="upload-progress-fill-mobile" class="upload-progress-fill"></div>
|
| 361 |
+
</div>
|
| 362 |
+
<p id="upload-progress-text-mobile" class="text-xs text-theme-muted mt-1">جاري الرفع...</p>
|
| 363 |
+
</div>
|
| 364 |
+
</div>
|
| 365 |
+
</div>
|
| 366 |
+
|
| 367 |
+
<!-- ── COLUMN 2: CENTER (inputs + preview) ── -->
|
| 368 |
+
<div class="lg:col-span-6 flex flex-col gap-6 order-2">
|
| 369 |
+
|
| 370 |
+
<!-- Data Input Card -->
|
| 371 |
+
<div class="studio-card rounded-2xl p-4 shadow-lg">
|
| 372 |
+
<div class="flex items-center justify-between mb-4 flex-wrap gap-2">
|
| 373 |
+
<div class="flex items-center gap-3">
|
| 374 |
+
<h3 class="font-bold uppercase text-xs tracking-widest text-theme-muted">بيانات الطالب</h3>
|
| 375 |
+
<span id="current-filename" class="text-xs font-bold text-theme-secondary truncate max-w-[140px]" style="border-right: 1px solid var(--border-card); padding-right: 12px;"></span>
|
| 376 |
+
</div>
|
| 377 |
+
<!-- Moved navigation here -->
|
| 378 |
+
<div class="flex items-center gap-2" dir="ltr">
|
| 379 |
+
<button onclick="navigate(-1)" class="w-8 h-8 rounded-full bg-slate-800 hover:bg-slate-700 border border-slate-700 flex items-center justify-center transition-all">
|
| 380 |
+
<i class="fa-solid fa-chevron-left text-slate-400 text-xs"></i>
|
| 381 |
+
</button>
|
| 382 |
+
<span id="nav-counter" class="text-xs font-mono font-bold text-yellow-500 mx-1">0/0</span>
|
| 383 |
+
<button onclick="navigate(1)" class="w-8 h-8 rounded-full bg-slate-800 hover:bg-slate-700 border border-slate-700 flex items-center justify-center transition-all">
|
| 384 |
+
<i class="fa-solid fa-chevron-right text-slate-400 text-xs"></i>
|
| 385 |
+
</button>
|
| 386 |
+
</div>
|
| 387 |
+
</div>
|
| 388 |
+
|
| 389 |
+
<div class="grid grid-cols-1 md:grid-cols-2 gap-4">
|
| 390 |
+
<div>
|
| 391 |
+
<label class="block text-xs font-bold text-theme-muted uppercase mb-2 tracking-widest text-right">اسم الطالب</label>
|
| 392 |
+
<input type="text" id="student-name" dir="rtl" class="w-full studio-input border rounded-lg px-4 py-2.5 focus:ring-2 focus:ring-yellow-500 outline-none transition-all text-right" placeholder="الاسم هنا...">
|
| 393 |
+
</div>
|
| 394 |
+
<div>
|
| 395 |
+
<label class="block text-xs font-bold text-theme-muted uppercase mb-2 tracking-widest text-right">الرقم القومي</label>
|
| 396 |
+
<input type="text" id="student-id" class="w-full studio-input border rounded-lg px-4 py-2.5 focus:ring-2 focus:ring-yellow-500 outline-none transition-all text-right" placeholder="٣٠٠٠...">
|
| 397 |
+
</div>
|
| 398 |
+
</div>
|
| 399 |
+
</div>
|
| 400 |
+
|
| 401 |
+
<!-- Editor/Preview Area -->
|
| 402 |
+
<div class="studio-card rounded-2xl p-2 flex-grow relative min-h-[450px] flex items-center justify-center overflow-hidden" id="main-area">
|
| 403 |
+
|
| 404 |
+
<!-- Before/After Toggle (#3) -->
|
| 405 |
+
<div class="ba-toggle hidden" id="ba-toggle-wrapper">
|
| 406 |
+
<button onclick="toggleBeforeAfter()" id="ba-toggle-btn" class="px-3 py-1.5 rounded-lg text-xs font-bold flex items-center gap-2 transition-all" style="background: rgba(0,0,0,0.6); color: white; border: 1px solid rgba(255,255,255,0.15);">
|
| 407 |
+
<i class="fa-solid fa-eye"></i>
|
| 408 |
+
<span id="ba-toggle-label">الأصلية</span>
|
| 409 |
+
</button>
|
| 410 |
+
</div>
|
| 411 |
+
|
| 412 |
+
<!-- Zoom Hint (#14) -->
|
| 413 |
+
<button onclick="openZoom()" id="zoom-hint" class="hidden absolute bottom-3 left-3 z-10 px-3 py-1.5 rounded-lg text-xs font-bold flex items-center gap-2 transition-all hover:scale-105" style="background: rgba(0,0,0,0.6); color: white; border: 1px solid rgba(255,255,255,0.15);">
|
| 414 |
+
<i class="fa-solid fa-magnifying-glass-plus"></i>
|
| 415 |
+
تكبير
|
| 416 |
+
</button>
|
| 417 |
+
|
| 418 |
+
<!-- Cropper Mode -->
|
| 419 |
+
<div id="cropper-container" class="hidden w-full h-full p-4 flex flex-col gap-4">
|
| 420 |
+
<div class="flex-1 overflow-hidden rounded-lg bg-black">
|
| 421 |
+
<img id="cropper-img" src="">
|
| 422 |
+
</div>
|
| 423 |
+
<div class="flex justify-end gap-4">
|
| 424 |
+
<button onclick="cancelCrop()" class="px-6 py-2 bg-slate-700 rounded-lg font-bold text-white">إلغاء</button>
|
| 425 |
+
<button onclick="applyCrop()" class="px-6 py-2 bg-yellow-500 text-slate-900 rounded-lg font-bold">حفظ وإعادة المعالجة</button>
|
| 426 |
+
</div>
|
| 427 |
+
</div>
|
| 428 |
+
|
| 429 |
+
<!-- Preview Mode -->
|
| 430 |
+
<div id="preview-container" class="w-full h-full flex items-center justify-center">
|
| 431 |
+
<div id="preview-skeleton" class="hidden w-[80%] h-[80%] skeleton rounded-lg shadow-2xl opacity-50"></div>
|
| 432 |
+
<img id="main-preview" src="" class="max-w-full max-h-[500px] hidden rounded shadow-2xl preview-transition opacity-0 cursor-zoom-in" onclick="openZoom()">
|
| 433 |
+
<!-- Improved Empty State (#13) -->
|
| 434 |
+
<div id="preview-placeholder" class="text-center px-4" style="color: var(--text-muted);">
|
| 435 |
+
<div class="float-anim inline-block mb-4">
|
| 436 |
+
<svg width="100" height="100" viewBox="0 0 100 100" fill="none" class="mx-auto opacity-25">
|
| 437 |
+
<rect x="12" y="18" width="76" height="64" rx="10" stroke="currentColor" stroke-width="2.5"/>
|
| 438 |
+
<circle cx="35" cy="40" r="8" stroke="currentColor" stroke-width="2.5"/>
|
| 439 |
+
<path d="M12 70 L35 50 L52 62 L65 45 L88 65" stroke="currentColor" stroke-width="2.5" stroke-linecap="round" stroke-linejoin="round"/>
|
| 440 |
+
<path d="M62 22 L62 14 M62 14 L56 20 M62 14 L68 20" stroke="currentColor" stroke-width="2" stroke-linecap="round"/>
|
| 441 |
+
</svg>
|
| 442 |
+
</div>
|
| 443 |
+
<p class="font-semibold text-sm">اختر صورة من القائمة</p>
|
| 444 |
+
</div>
|
| 445 |
+
</div>
|
| 446 |
+
|
| 447 |
+
<!-- Batch Processing Overlay -->
|
| 448 |
+
<div id="batch-overlay" class="absolute inset-0 hidden items-center justify-center flex-col gap-5 z-50 rounded-2xl" style="background: rgba(0,0,0,0.7); backdrop-filter: blur(6px);">
|
| 449 |
+
<div class="w-14 h-14 border-4 border-yellow-500 border-t-transparent rounded-full animate-spin"></div>
|
| 450 |
+
<p id="batch-counter" class="text-5xl font-extrabold text-yellow-400 font-mono tracking-wider" dir="ltr">0/0</p>
|
| 451 |
+
<p class="text-sm text-slate-300 font-semibold">جاري المعالجة...</p>
|
| 452 |
+
</div>
|
| 453 |
+
</div>
|
| 454 |
+
</div>
|
| 455 |
+
|
| 456 |
+
<!-- ── COLUMN 3: OPTIONS (LEFT SIDE) ── -->
|
| 457 |
+
<div class="lg:col-span-3 flex flex-col gap-6 order-3">
|
| 458 |
+
<div class="studio-card rounded-2xl p-5 shadow-xl sticky top-6">
|
| 459 |
+
<label class="block text-[10px] font-bold text-theme-muted uppercase mb-4 tracking-widest text-right">الخيارات والإعدادات</label>
|
| 460 |
+
|
| 461 |
+
<!-- Processing Steps -->
|
| 462 |
+
<div class="space-y-3 mb-6" dir="rtl">
|
| 463 |
+
<label class="flex items-center gap-3 cursor-pointer group p-2 rounded-lg hover:bg-slate-500/5 transition-colors">
|
| 464 |
+
<input type="checkbox" id="step-restore" class="w-5 h-5 rounded border-slate-700 bg-slate-800 text-cyan-500 focus:ring-cyan-500">
|
| 465 |
+
<span class="text-sm font-medium text-theme-secondary group-hover:text-cyan-500 transition-colors">ترميم الوجه (AI)</span>
|
| 466 |
+
</label>
|
| 467 |
+
<label class="flex items-center gap-3 cursor-pointer group p-2 rounded-lg hover:bg-slate-500/5 transition-colors">
|
| 468 |
+
<input type="checkbox" id="step-retouch" checked class="w-5 h-5 rounded border-slate-700 bg-slate-800 text-yellow-500 focus:ring-yellow-500">
|
| 469 |
+
<span class="text-sm font-medium text-theme-secondary group-hover:text-yellow-500 transition-colors">تجميل البشرة</span>
|
| 470 |
+
</label>
|
| 471 |
+
<label class="flex items-center gap-3 cursor-pointer group p-2 rounded-lg hover:bg-slate-500/5 transition-colors">
|
| 472 |
+
<input type="checkbox" id="step-color" checked class="w-5 h-5 rounded border-slate-700 bg-slate-800 text-yellow-500 focus:ring-yellow-500">
|
| 473 |
+
<span class="text-sm font-medium text-theme-secondary group-hover:text-yellow-500 transition-colors">تحسين الألوان</span>
|
| 474 |
+
</label>
|
| 475 |
+
<label class="flex items-center gap-3 cursor-pointer group p-2 rounded-lg hover:bg-slate-500/5 transition-colors">
|
| 476 |
+
<input type="checkbox" id="step-rmbg" checked class="w-5 h-5 rounded border-slate-700 bg-slate-800 text-yellow-500 focus:ring-yellow-500">
|
| 477 |
+
<span class="text-sm font-medium text-theme-secondary group-hover:text-yellow-500 transition-colors">إزالة الخلفية</span>
|
| 478 |
+
</label>
|
| 479 |
+
<label class="flex items-center gap-3 cursor-pointer group p-2 rounded-lg hover:bg-slate-500/5 transition-colors">
|
| 480 |
+
<input type="checkbox" id="step-crop" checked class="w-5 h-5 rounded border-slate-700 bg-slate-800 text-yellow-500 focus:ring-yellow-500">
|
| 481 |
+
<span class="text-sm font-medium text-theme-secondary group-hover:text-yellow-500 transition-colors">قص تلقائي</span>
|
| 482 |
+
</label>
|
| 483 |
+
|
| 484 |
+
<div class="border-t my-3" style="border-color: var(--border-card);"></div>
|
| 485 |
+
|
| 486 |
+
<label class="flex items-center gap-3 cursor-pointer group p-2 rounded-lg hover:bg-slate-500/5 transition-colors">
|
| 487 |
+
<input type="checkbox" id="add-studio" checked class="w-5 h-5 rounded border-slate-700 bg-slate-800 text-emerald-500 focus:ring-emerald-500">
|
| 488 |
+
<span class="text-sm font-medium text-theme-secondary group-hover:text-emerald-500 transition-colors">اسم الاستوديو</span>
|
| 489 |
+
</label>
|
| 490 |
+
<label class="flex items-center gap-3 cursor-pointer group p-2 rounded-lg hover:bg-slate-500/5 transition-colors">
|
| 491 |
+
<input type="checkbox" id="add-logo" checked class="w-5 h-5 rounded border-slate-700 bg-slate-800 text-emerald-500 focus:ring-emerald-500">
|
| 492 |
+
<span class="text-sm font-medium text-theme-secondary group-hover:text-emerald-500 transition-colors">شعار الاستوديو</span>
|
| 493 |
+
</label>
|
| 494 |
+
<label class="flex items-center gap-3 cursor-pointer group p-2 rounded-lg hover:bg-slate-500/5 transition-colors">
|
| 495 |
+
<input type="checkbox" id="add-date" checked class="w-5 h-5 rounded border-slate-700 bg-slate-800 text-emerald-500 focus:ring-emerald-500">
|
| 496 |
+
<span class="text-sm font-medium text-theme-secondary group-hover:text-emerald-500 transition-colors">إضافة التاريخ</span>
|
| 497 |
+
</label>
|
| 498 |
+
|
| 499 |
+
<div class="border-t my-3" style="border-color: var(--border-card);"></div>
|
| 500 |
+
|
| 501 |
+
<!-- Frame Selection -->
|
| 502 |
+
<div class="p-2 rounded-lg hover:bg-slate-500/5 transition-colors">
|
| 503 |
+
<div class="flex items-center justify-between mb-2">
|
| 504 |
+
<span class="text-sm font-medium text-theme-secondary">الإطار الزخرفي</span>
|
| 505 |
+
<div class="flex gap-1">
|
| 506 |
+
<button onclick="deleteFrame()" class="text-xs bg-rose-900/30 border border-rose-500/50 hover:bg-rose-500 text-rose-300 hover:text-white w-6 h-6 flex items-center justify-center rounded transition-all" title="حذف الإطار المحدد">
|
| 507 |
+
<i class="fa-solid fa-xmark"></i>
|
| 508 |
+
</button>
|
| 509 |
+
<button onclick="document.getElementById('frame-upload').click()" class="text-xs bg-slate-700 hover:bg-slate-600 text-white px-2 py-1 rounded transition-colors">
|
| 510 |
+
<i class="fa-solid fa-plus"></i> رفع
|
| 511 |
+
</button>
|
| 512 |
+
</div>
|
| 513 |
+
<input type="file" id="frame-upload" class="hidden" accept="image/*" onchange="uploadFrame(this.files[0])">
|
| 514 |
+
</div>
|
| 515 |
+
<select id="frame-select" class="w-full bg-slate-800 border border-slate-700 text-slate-300 text-xs rounded p-2 focus:ring-2 focus:ring-pink-500 outline-none">
|
| 516 |
+
<option value="">(بدون إطار)</option>
|
| 517 |
+
</select>
|
| 518 |
+
<div id="frame-preview-box" class="mt-2 w-full h-16 bg-slate-800/50 rounded border border-slate-700/50 flex items-center justify-center overflow-hidden hidden">
|
| 519 |
+
<img id="frame-preview-img" src="" class="h-full object-contain opacity-80">
|
| 520 |
+
</div>
|
| 521 |
+
</div>
|
| 522 |
+
|
| 523 |
+
<!-- Frame Color Picker -->
|
| 524 |
+
<div class="p-2 rounded-lg hover:bg-slate-500/5 transition-colors">
|
| 525 |
+
<div class="flex items-center justify-between mb-2">
|
| 526 |
+
<span class="text-sm font-medium text-theme-secondary">لون الإطار</span>
|
| 527 |
+
<div class="flex items-center gap-2">
|
| 528 |
+
<button onclick="saveCurrentColor()" class="text-xs bg-slate-700 hover:bg-slate-600 text-white px-2 py-1 rounded transition-colors" title="حفظ اللون الحالي">
|
| 529 |
+
<i class="fa-solid fa-floppy-disk"></i>
|
| 530 |
+
</button>
|
| 531 |
+
<input type="color" id="frame-color" value="#3c0000" class="w-8 h-8 rounded cursor-pointer bg-transparent border-0 p-0">
|
| 532 |
+
</div>
|
| 533 |
+
</div>
|
| 534 |
+
<div id="saved-colors-container" class="flex flex-wrap gap-2 min-h-[24px]">
|
| 535 |
+
<!-- Saved colors will appear here -->
|
| 536 |
+
</div>
|
| 537 |
+
<p class="text-[9px] text-theme-muted mt-1 text-center">اضغط لاختيار اللون، زر يمين للحذف</p>
|
| 538 |
+
</div>
|
| 539 |
+
</div>
|
| 540 |
+
|
| 541 |
+
<!-- Settings Panel (#5) - Always visible in side column? Or collapsible? Let's keep collapsible but cleaner -->
|
| 542 |
+
<div class="mb-6">
|
| 543 |
+
<button onclick="toggleSettings()" class="w-full justify-between flex items-center gap-2 text-xs text-theme-muted hover:text-yellow-500 transition-colors font-bold p-2 bg-slate-500/5 rounded-lg">
|
| 544 |
+
<div class="flex items-center gap-2">
|
| 545 |
+
<i class="fa-solid fa-sliders text-xs"></i>
|
| 546 |
+
<span>إعدادات متقدمة</span>
|
| 547 |
+
</div>
|
| 548 |
+
<i class="fa-solid fa-chevron-down text-[10px] transition-transform" id="settings-chevron"></i>
|
| 549 |
+
</button>
|
| 550 |
+
<div id="settings-panel" class="settings-panel mt-2">
|
| 551 |
+
<div class="p-3 rounded-lg border space-y-4 text-right" style="background: var(--bg-card-alt); border-color: var(--border-card);">
|
| 552 |
+
<!-- Restoration Fidelity -->
|
| 553 |
+
<div>
|
| 554 |
+
<div class="flex justify-between items-center mb-1">
|
| 555 |
+
<label class="text-[10px] font-bold text-theme-muted">قوة الترميم (Fidelity)</label>
|
| 556 |
+
<span id="fidelity-val" class="text-[10px] font-mono text-cyan-500">0.5</span>
|
| 557 |
+
</div>
|
| 558 |
+
<input type="range" id="setting-fidelity" min="0" max="1" step="0.1" value="0.5" class="w-full accent-cyan-500 h-1.5" oninput="document.getElementById('fidelity-val').textContent=this.value">
|
| 559 |
+
<p class="text-[8px] text-theme-muted mt-1">الأقل = ترميم أقوى، الأعلى = حفاظ على ملامح الأصل</p>
|
| 560 |
+
</div>
|
| 561 |
+
<!-- Retouch Sensitivity -->
|
| 562 |
+
<div>
|
| 563 |
+
<div class="flex justify-between items-center mb-1">
|
| 564 |
+
<label class="text-[10px] font-bold text-theme-muted">حساسية التجميل</label>
|
| 565 |
+
<span id="sensitivity-val" class="text-[10px] font-mono text-yellow-500">3.0</span>
|
| 566 |
+
</div>
|
| 567 |
+
<input type="range" id="setting-sensitivity" min="1" max="5" step="0.5" value="3.0" class="w-full accent-yellow-500 h-1.5" oninput="document.getElementById('sensitivity-val').textContent=this.value">
|
| 568 |
+
</div>
|
| 569 |
+
<!-- Tone Smoothing -->
|
| 570 |
+
<div>
|
| 571 |
+
<div class="flex justify-between items-center mb-1">
|
| 572 |
+
<label class="text-[10px] font-bold text-theme-muted">نعومة البشرة</label>
|
| 573 |
+
<span id="smoothing-val" class="text-[10px] font-mono text-yellow-500">0.6</span>
|
| 574 |
+
</div>
|
| 575 |
+
<input type="range" id="setting-smoothing" min="0" max="1" step="0.1" value="0.6" class="w-full accent-yellow-500 h-1.5" oninput="document.getElementById('smoothing-val').textContent=parseFloat(this.value).toFixed(1)">
|
| 576 |
+
</div>
|
| 577 |
+
<!-- ID Font Size -->
|
| 578 |
+
<div>
|
| 579 |
+
<div class="flex justify-between items-center mb-1">
|
| 580 |
+
<label class="text-[10px] font-bold text-theme-muted">حجم خط الرقم</label>
|
| 581 |
+
<span id="fontsize-val" class="text-[10px] font-mono text-yellow-500">63</span>
|
| 582 |
+
</div>
|
| 583 |
+
<input type="range" id="setting-fontsize" min="30" max="120" step="1" value="63" class="w-full accent-yellow-500 h-1.5" oninput="document.getElementById('fontsize-val').textContent=this.value">
|
| 584 |
+
</div>
|
| 585 |
+
<!-- Name Font Size -->
|
| 586 |
+
<div>
|
| 587 |
+
<div class="flex justify-between items-center mb-1">
|
| 588 |
+
<label class="text-[10px] font-bold text-theme-muted">حجم خط الاسم</label>
|
| 589 |
+
<span id="namefontsize-val" class="text-[10px] font-mono text-yellow-500">43</span>
|
| 590 |
+
</div>
|
| 591 |
+
<input type="range" id="setting-namefontsize" min="20" max="80" step="1" value="43" class="w-full accent-yellow-500 h-1.5" oninput="document.getElementById('namefontsize-val').textContent=this.value">
|
| 592 |
+
</div>
|
| 593 |
+
<button onclick="saveSettings()" class="w-full py-2 bg-yellow-500 hover:bg-yellow-600 text-slate-900 rounded-lg font-bold text-xs transition-all flex items-center justify-center gap-2">
|
| 594 |
+
<i class="fa-solid fa-floppy-disk"></i> حفظ الإعدادات
|
| 595 |
+
</button>
|
| 596 |
+
|
| 597 |
+
<!-- Backup Controls -->
|
| 598 |
+
<div class="flex gap-2 pt-2 border-t border-slate-600/30">
|
| 599 |
+
<button onclick="exportSettings()" class="flex-1 py-2 bg-slate-700 hover:bg-slate-600 text-white rounded-lg font-bold text-[10px] transition-all flex items-center justify-center gap-2">
|
| 600 |
+
<i class="fa-solid fa-file-export"></i> تصدير Backup
|
| 601 |
+
</button>
|
| 602 |
+
<button onclick="document.getElementById('import-file').click()" class="flex-1 py-2 bg-slate-700 hover:bg-slate-600 text-white rounded-lg font-bold text-[10px] transition-all flex items-center justify-center gap-2">
|
| 603 |
+
<i class="fa-solid fa-file-import"></i> استيراد
|
| 604 |
+
</button>
|
| 605 |
+
<input type="file" id="import-file" class="hidden" accept=".zip" onchange="importSettings(this.files[0])">
|
| 606 |
+
</div>
|
| 607 |
+
</div>
|
| 608 |
+
</div>
|
| 609 |
+
</div>
|
| 610 |
+
|
| 611 |
+
<!-- Actions List -->
|
| 612 |
+
<div class="flex flex-col gap-2">
|
| 613 |
+
<button id="process-all-btn" disabled class="w-full bg-yellow-500 hover:bg-yellow-600 disabled:opacity-50 disabled:cursor-not-allowed text-slate-900 font-bold py-3 rounded-lg transition-all flex items-center justify-center gap-2 text-sm shadow-lg shadow-yellow-500/20">
|
| 614 |
+
<i class="fa-solid fa-play"></i> بدء معالجة الكل
|
| 615 |
+
</button>
|
| 616 |
+
|
| 617 |
+
<div class="grid grid-cols-2 gap-2 mt-2">
|
| 618 |
+
<!-- Re-process Single (#7) -->
|
| 619 |
+
<button id="reprocess-btn" disabled onclick="reprocessCurrent()" title="إعادة معالجة هذه الصورة" class="bg-orange-600 hover:bg-orange-700 disabled:opacity-50 disabled:cursor-not-allowed text-white font-bold py-2 rounded-lg transition-all flex items-center justify-center gap-2 text-xs">
|
| 620 |
+
<i class="fa-solid fa-rotate"></i> إعادة
|
| 621 |
+
</button>
|
| 622 |
+
|
| 623 |
+
<button id="edit-crop-btn" disabled class="bg-slate-700 hover:bg-slate-600 text-white font-bold py-2 rounded-lg transition-all flex items-center justify-center gap-2 text-xs">
|
| 624 |
+
<i class="fa-solid fa-crop-simple"></i> قص
|
| 625 |
+
</button>
|
| 626 |
+
</div>
|
| 627 |
+
|
| 628 |
+
<div class="grid grid-cols-2 gap-2 mt-1">
|
| 629 |
+
<button id="save-btn" disabled title="تحميل" class="bg-emerald-600 hover:bg-emerald-700 disabled:opacity-50 text-white font-bold py-2 rounded-lg transition-all">
|
| 630 |
+
<i class="fa-solid fa-download"></i>
|
| 631 |
+
</button>
|
| 632 |
+
<button id="download-all-btn" disabled title="ضغط الكل" class="bg-blue-600 hover:bg-blue-700 disabled:opacity-50 text-white font-bold py-2 rounded-lg transition-all flex items-center justify-center gap-1">
|
| 633 |
+
<i class="fa-solid fa-file-zipper"></i>
|
| 634 |
+
</button>
|
| 635 |
+
</div>
|
| 636 |
+
|
| 637 |
+
<button id="clear-all-btn" title="حذف الكل" class="mt-4 w-full bg-transparent border border-rose-600/30 hover:bg-rose-600 hover:text-white text-rose-500 font-bold py-2 rounded-lg transition-all text-xs">
|
| 638 |
+
<i class="fa-solid fa-trash-can ml-1"></i> حذف جميع الصور
|
| 639 |
+
</button>
|
| 640 |
+
</div>
|
| 641 |
+
|
| 642 |
+
</div>
|
| 643 |
+
</div>
|
| 644 |
+
|
| 645 |
+
</main>
|
| 646 |
+
|
| 647 |
+
<script>
|
| 648 |
+
// ══════════════════════════════════════════════════
|
| 649 |
+
// STATE
|
| 650 |
+
// ══════════════════════════════════════════════════
|
| 651 |
+
let imageData = [];
|
| 652 |
+
let currentIndex = -1;
|
| 653 |
+
let cropper = null;
|
| 654 |
+
let isAIReady = false;
|
| 655 |
+
let showingOriginal = false; // Before/After state
|
| 656 |
+
|
| 657 |
+
// ══════════════════════════════════════════════════
|
| 658 |
+
// UI ELEMENTS
|
| 659 |
+
// ══════════════════════════════════════════════════
|
| 660 |
+
const fileInput = document.getElementById('file-input');
|
| 661 |
+
const imageList = document.getElementById('image-list');
|
| 662 |
+
const aiStatus = document.getElementById('ai-status');
|
| 663 |
+
const processAllBtn = document.getElementById('process-all-btn');
|
| 664 |
+
const downloadAllBtn = document.getElementById('download-all-btn');
|
| 665 |
+
const clearAllBtn = document.getElementById('clear-all-btn');
|
| 666 |
+
const editBtn = document.getElementById('edit-crop-btn');
|
| 667 |
+
const saveBtn = document.getElementById('save-btn');
|
| 668 |
+
const reprocessBtn = document.getElementById('reprocess-btn');
|
| 669 |
+
const mainPreview = document.getElementById('main-preview');
|
| 670 |
+
const previewPlaceholder = document.getElementById('preview-placeholder');
|
| 671 |
+
const previewContainer = document.getElementById('preview-container');
|
| 672 |
+
const cropperContainer = document.getElementById('cropper-container');
|
| 673 |
+
const cropperImg = document.getElementById('cropper-img');
|
| 674 |
+
const baToggleWrapper = document.getElementById('ba-toggle-wrapper');
|
| 675 |
+
const zoomHint = document.getElementById('zoom-hint');
|
| 676 |
+
|
| 677 |
+
// ══════════════════════════════════════════════════
|
| 678 |
+
// THEME TOGGLE (#11)
|
| 679 |
+
// ══════════════════════════════════════════════════
|
| 680 |
+
function initTheme() {
|
| 681 |
+
const saved = localStorage.getItem('studio-theme') || 'dark';
|
| 682 |
+
document.documentElement.setAttribute('data-theme', saved);
|
| 683 |
+
updateThemeIcon(saved);
|
| 684 |
+
}
|
| 685 |
+
function toggleTheme() {
|
| 686 |
+
const current = document.documentElement.getAttribute('data-theme');
|
| 687 |
+
const next = current === 'dark' ? 'light' : 'dark';
|
| 688 |
+
document.documentElement.setAttribute('data-theme', next);
|
| 689 |
+
localStorage.setItem('studio-theme', next);
|
| 690 |
+
updateThemeIcon(next);
|
| 691 |
+
}
|
| 692 |
+
function updateThemeIcon(theme) {
|
| 693 |
+
const icon = document.getElementById('theme-icon');
|
| 694 |
+
if (theme === 'dark') {
|
| 695 |
+
icon.className = 'fa-solid fa-sun text-yellow-400 text-sm';
|
| 696 |
+
} else {
|
| 697 |
+
icon.className = 'fa-solid fa-moon text-slate-600 text-sm';
|
| 698 |
+
}
|
| 699 |
+
}
|
| 700 |
+
initTheme();
|
| 701 |
+
|
| 702 |
+
// ══════════════════════════════════════════════════
|
| 703 |
+
// TOAST NOTIFICATIONS
|
| 704 |
+
// ══════════════════════════════════════════════════
|
| 705 |
+
function showToast(message, type = 'info', duration = 4000) {
|
| 706 |
+
const container = document.getElementById('toast-container');
|
| 707 |
+
const toast = document.createElement('div');
|
| 708 |
+
const icons = { success: 'fa-circle-check', error: 'fa-circle-exclamation', info: 'fa-circle-info', warning: 'fa-triangle-exclamation' };
|
| 709 |
+
toast.className = `toast toast-${type}`;
|
| 710 |
+
toast.innerHTML = `<i class="fa-solid ${icons[type] || icons.info}"></i><span>${message}</span>`;
|
| 711 |
+
container.appendChild(toast);
|
| 712 |
+
setTimeout(() => {
|
| 713 |
+
toast.style.animation = 'toastOut 0.3s ease-in forwards';
|
| 714 |
+
setTimeout(() => toast.remove(), 300);
|
| 715 |
+
}, duration);
|
| 716 |
+
}
|
| 717 |
+
|
| 718 |
+
// ══════════════════════════════════════════════════
|
| 719 |
+
// SETTINGS PANEL (#5)
|
| 720 |
+
// ══════════════════════════════════════════════════
|
| 721 |
+
function toggleSettings() {
|
| 722 |
+
const panel = document.getElementById('settings-panel');
|
| 723 |
+
const chevron = document.getElementById('settings-chevron');
|
| 724 |
+
panel.classList.toggle('open');
|
| 725 |
+
chevron.style.transform = panel.classList.contains('open') ? 'rotate(180deg)' : '';
|
| 726 |
+
}
|
| 727 |
+
|
| 728 |
+
async function loadSettingsFromServer() {
|
| 729 |
+
try {
|
| 730 |
+
const res = await fetch('/settings');
|
| 731 |
+
const data = await res.json();
|
| 732 |
+
if (data.restoration && data.restoration.fidelity !== undefined) {
|
| 733 |
+
document.getElementById('setting-fidelity').value = data.restoration.fidelity;
|
| 734 |
+
document.getElementById('fidelity-val').textContent = parseFloat(data.restoration.fidelity).toFixed(1);
|
| 735 |
+
}
|
| 736 |
+
if (data.retouch) {
|
| 737 |
+
if (data.retouch.sensitivity !== undefined) {
|
| 738 |
+
document.getElementById('setting-sensitivity').value = data.retouch.sensitivity;
|
| 739 |
+
document.getElementById('sensitivity-val').textContent = data.retouch.sensitivity;
|
| 740 |
+
}
|
| 741 |
+
if (data.retouch.tone_smoothing !== undefined) {
|
| 742 |
+
document.getElementById('setting-smoothing').value = data.retouch.tone_smoothing;
|
| 743 |
+
document.getElementById('smoothing-val').textContent = parseFloat(data.retouch.tone_smoothing).toFixed(1);
|
| 744 |
+
}
|
| 745 |
+
}
|
| 746 |
+
if (data.overlays) {
|
| 747 |
+
if (data.overlays.id_font_size !== undefined) {
|
| 748 |
+
document.getElementById('setting-fontsize').value = data.overlays.id_font_size;
|
| 749 |
+
document.getElementById('fontsize-val').textContent = data.overlays.id_font_size;
|
| 750 |
+
}
|
| 751 |
+
if (data.overlays.name_font_size !== undefined) {
|
| 752 |
+
document.getElementById('setting-namefontsize').value = data.overlays.name_font_size;
|
| 753 |
+
document.getElementById('namefontsize-val').textContent = data.overlays.name_font_size;
|
| 754 |
+
}
|
| 755 |
+
}
|
| 756 |
+
} catch (e) { console.warn('Could not load settings', e); }
|
| 757 |
+
}
|
| 758 |
+
|
| 759 |
+
async function saveSettings() {
|
| 760 |
+
const payload = {
|
| 761 |
+
restoration: {
|
| 762 |
+
fidelity: parseFloat(document.getElementById('setting-fidelity').value)
|
| 763 |
+
},
|
| 764 |
+
retouch: {
|
| 765 |
+
sensitivity: parseFloat(document.getElementById('setting-sensitivity').value),
|
| 766 |
+
tone_smoothing: parseFloat(document.getElementById('setting-smoothing').value)
|
| 767 |
+
},
|
| 768 |
+
overlays: {
|
| 769 |
+
id_font_size: parseInt(document.getElementById('setting-fontsize').value),
|
| 770 |
+
name_font_size: parseInt(document.getElementById('setting-namefontsize').value)
|
| 771 |
+
}
|
| 772 |
+
};
|
| 773 |
+
try {
|
| 774 |
+
const res = await fetch('/settings', {
|
| 775 |
+
method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(payload)
|
| 776 |
+
});
|
| 777 |
+
const result = await res.json();
|
| 778 |
+
if (result.status === 'success') {
|
| 779 |
+
showToast('تم حفظ الإعدادات بنجاح', 'success');
|
| 780 |
+
} else {
|
| 781 |
+
showToast('فشل حفظ الإعدادات', 'error');
|
| 782 |
+
}
|
| 783 |
+
} catch (e) {
|
| 784 |
+
showToast('خطأ في الاتصال بالسيرفر', 'error');
|
| 785 |
+
}
|
| 786 |
+
}
|
| 787 |
+
|
| 788 |
+
async function exportSettings() {
|
| 789 |
+
// Gather client-side data
|
| 790 |
+
const clientData = {
|
| 791 |
+
theme: localStorage.getItem('studio-theme'),
|
| 792 |
+
savedColors: JSON.parse(localStorage.getItem('studio-saved-colors') || '[]')
|
| 793 |
+
};
|
| 794 |
+
|
| 795 |
+
try {
|
| 796 |
+
showToast("جاري تحضير ملف النسخة الاحتياطية...", "info");
|
| 797 |
+
const res = await fetch('/backup/export', {
|
| 798 |
+
method: 'POST',
|
| 799 |
+
headers: { 'Content-Type': 'application/json' },
|
| 800 |
+
body: JSON.stringify(clientData)
|
| 801 |
+
});
|
| 802 |
+
|
| 803 |
+
if (res.ok) {
|
| 804 |
+
const blob = await res.blob();
|
| 805 |
+
const url = window.URL.createObjectURL(blob);
|
| 806 |
+
const a = document.createElement('a');
|
| 807 |
+
a.href = url;
|
| 808 |
+
a.download = `studio_backup_${new Date().toISOString().slice(0,10)}.zip`;
|
| 809 |
+
document.body.appendChild(a);
|
| 810 |
+
a.click();
|
| 811 |
+
a.remove();
|
| 812 |
+
showToast("تم تنزيل النسخة الاحتياطية", "success");
|
| 813 |
+
} else {
|
| 814 |
+
showToast("فشل التصدير", "error");
|
| 815 |
+
}
|
| 816 |
+
} catch (e) {
|
| 817 |
+
showToast("خطأ في الاتصال", "error");
|
| 818 |
+
}
|
| 819 |
+
}
|
| 820 |
+
|
| 821 |
+
async function importSettings(file) {
|
| 822 |
+
if (!file) return;
|
| 823 |
+
const fd = new FormData();
|
| 824 |
+
fd.append('file', file);
|
| 825 |
+
|
| 826 |
+
if(!confirm("سيتم استبدال جميع الإعدادات الحالية والإطارات بالنسخة المستوردة. هل أنت متأكد؟")) {
|
| 827 |
+
document.getElementById('import-file').value = ""; // Reset
|
| 828 |
+
return;
|
| 829 |
+
}
|
| 830 |
+
|
| 831 |
+
try {
|
| 832 |
+
showToast("جاري استعادة النسخة الاحتياطية...", "info");
|
| 833 |
+
const res = await fetch('/backup/import', { method: 'POST', body: fd });
|
| 834 |
+
if (res.ok) {
|
| 835 |
+
const data = await res.json();
|
| 836 |
+
|
| 837 |
+
// Restore client data
|
| 838 |
+
if (data.client_data) {
|
| 839 |
+
if (data.client_data.theme) localStorage.setItem('studio-theme', data.client_data.theme);
|
| 840 |
+
if (data.client_data.savedColors) localStorage.setItem('studio-saved-colors', JSON.stringify(data.client_data.savedColors));
|
| 841 |
+
}
|
| 842 |
+
|
| 843 |
+
showToast("تم الاستعادة بنجاح! سيتم تحديث الصفحة...", "success");
|
| 844 |
+
setTimeout(() => window.location.reload(), 1500);
|
| 845 |
+
} else {
|
| 846 |
+
const err = await res.json();
|
| 847 |
+
showToast(err.error || "فشل الاستيراد", "error");
|
| 848 |
+
}
|
| 849 |
+
} catch (e) {
|
| 850 |
+
showToast("خطأ في الاتصال", "error");
|
| 851 |
+
}
|
| 852 |
+
document.getElementById('import-file').value = ""; // Reset
|
| 853 |
+
}
|
| 854 |
+
|
| 855 |
+
loadSettingsFromServer();
|
| 856 |
+
|
| 857 |
+
// ══════════════════════════════════════════════════
|
| 858 |
+
// MOBILE DRAWER (#10)
|
| 859 |
+
// ══════════════════════════════════════════════════
|
| 860 |
+
function toggleDrawer(open) {
|
| 861 |
+
document.getElementById('drawer-overlay').classList.toggle('active', open);
|
| 862 |
+
document.getElementById('mobile-drawer').classList.toggle('active', open);
|
| 863 |
+
}
|
| 864 |
+
const dropZoneMobile = document.getElementById('drop-zone-mobile');
|
| 865 |
+
if (dropZoneMobile) {
|
| 866 |
+
dropZoneMobile.onclick = () => fileInput.click();
|
| 867 |
+
}
|
| 868 |
+
|
| 869 |
+
const frameSelect = document.getElementById('frame-select');
|
| 870 |
+
const framePreviewBox = document.getElementById('frame-preview-box');
|
| 871 |
+
const framePreviewImg = document.getElementById('frame-preview-img');
|
| 872 |
+
|
| 873 |
+
// Frame Logic
|
| 874 |
+
async function loadFrames() {
|
| 875 |
+
try {
|
| 876 |
+
const res = await fetch('/frames');
|
| 877 |
+
const data = await res.json();
|
| 878 |
+
const current = frameSelect.value;
|
| 879 |
+
frameSelect.innerHTML = '<option value="">(بدون إطار)</option>';
|
| 880 |
+
data.frames.forEach(f => {
|
| 881 |
+
const opt = document.createElement('option');
|
| 882 |
+
opt.value = f.filename;
|
| 883 |
+
opt.textContent = f.filename;
|
| 884 |
+
frameSelect.appendChild(opt);
|
| 885 |
+
});
|
| 886 |
+
frameSelect.value = current;
|
| 887 |
+
updateFramePreview();
|
| 888 |
+
} catch (e) { console.error("Failed to load frames", e); }
|
| 889 |
+
}
|
| 890 |
+
|
| 891 |
+
frameSelect.onchange = updateFramePreview;
|
| 892 |
+
|
| 893 |
+
function updateFramePreview() {
|
| 894 |
+
const val = frameSelect.value;
|
| 895 |
+
if (val) {
|
| 896 |
+
framePreviewBox.classList.remove('hidden');
|
| 897 |
+
framePreviewImg.src = `/assets/${val}`;
|
| 898 |
+
} else {
|
| 899 |
+
framePreviewBox.classList.add('hidden');
|
| 900 |
+
}
|
| 901 |
+
}
|
| 902 |
+
|
| 903 |
+
async function uploadFrame(file) {
|
| 904 |
+
if (!file) return;
|
| 905 |
+
const fd = new FormData();
|
| 906 |
+
fd.append('file', file);
|
| 907 |
+
try {
|
| 908 |
+
showToast("جاري رفع الإطار...", "info");
|
| 909 |
+
const res = await fetch('/frames', { method: 'POST', body: fd });
|
| 910 |
+
if (res.ok) {
|
| 911 |
+
showToast("تم رفع الإطار بنجاح", "success");
|
| 912 |
+
await loadFrames();
|
| 913 |
+
const data = await res.json();
|
| 914 |
+
if(data.frame) {
|
| 915 |
+
frameSelect.value = data.frame.filename; // Select new frame
|
| 916 |
+
updateFramePreview();
|
| 917 |
+
}
|
| 918 |
+
} else {
|
| 919 |
+
showToast("فشل الرفع", "error");
|
| 920 |
+
}
|
| 921 |
+
} catch (e) {
|
| 922 |
+
showToast("خطأ في الاتصال", "error");
|
| 923 |
+
}
|
| 924 |
+
}
|
| 925 |
+
|
| 926 |
+
async function deleteFrame() {
|
| 927 |
+
const val = frameSelect.value;
|
| 928 |
+
if (!val) return showToast("لم يتم تحديد إطار للحذف", "warning");
|
| 929 |
+
if (!confirm(`هل أنت متأكد من حذف الإطار "${val}"؟`)) return;
|
| 930 |
+
|
| 931 |
+
try {
|
| 932 |
+
const res = await fetch(`/frames/${val}`, { method: 'DELETE' });
|
| 933 |
+
if (res.ok) {
|
| 934 |
+
showToast("تم الحذف بنجاح", "success");
|
| 935 |
+
await loadFrames();
|
| 936 |
+
// Clear selection if it was deleted (loadFrames keeps current if possible, but if deleted it falls back to empty)
|
| 937 |
+
if (frameSelect.value === val) frameSelect.value = "";
|
| 938 |
+
updateFramePreview();
|
| 939 |
+
} else {
|
| 940 |
+
const err = await res.json();
|
| 941 |
+
showToast(err.error || "فشل الحذف", "error");
|
| 942 |
+
}
|
| 943 |
+
} catch (e) {
|
| 944 |
+
showToast("خطأ في الاتصال", "error");
|
| 945 |
+
}
|
| 946 |
+
}
|
| 947 |
+
|
| 948 |
+
// ══════════════════════════════════════════════════
|
| 949 |
+
// SAVED COLORS LOGIC
|
| 950 |
+
// ══════════════════════════════════════════════════
|
| 951 |
+
function loadSavedColors() {
|
| 952 |
+
const container = document.getElementById('saved-colors-container');
|
| 953 |
+
const saved = JSON.parse(localStorage.getItem('studio-saved-colors') || '[]');
|
| 954 |
+
container.innerHTML = '';
|
| 955 |
+
|
| 956 |
+
// Add Default Maroon
|
| 957 |
+
const defaultColor = "#3c0000";
|
| 958 |
+
const defDiv = document.createElement('div');
|
| 959 |
+
defDiv.className = "w-6 h-6 rounded-full cursor-pointer border border-slate-500 hover:scale-110 transition-transform bg-[#3c0000]";
|
| 960 |
+
defDiv.onclick = () => { document.getElementById('frame-color').value = defaultColor; };
|
| 961 |
+
defDiv.title = "Default Maroon";
|
| 962 |
+
container.appendChild(defDiv);
|
| 963 |
+
|
| 964 |
+
saved.forEach(color => {
|
| 965 |
+
const div = document.createElement('div');
|
| 966 |
+
div.className = "w-6 h-6 rounded-full cursor-pointer border border-slate-500 hover:scale-110 transition-transform relative";
|
| 967 |
+
div.style.backgroundColor = color;
|
| 968 |
+
div.onclick = () => { document.getElementById('frame-color').value = color; };
|
| 969 |
+
div.oncontextmenu = (e) => {
|
| 970 |
+
e.preventDefault();
|
| 971 |
+
if(confirm("حذف هذا اللون؟")) deleteColor(color);
|
| 972 |
+
};
|
| 973 |
+
container.appendChild(div);
|
| 974 |
+
});
|
| 975 |
+
}
|
| 976 |
+
|
| 977 |
+
function saveCurrentColor() {
|
| 978 |
+
const color = document.getElementById('frame-color').value;
|
| 979 |
+
let saved = JSON.parse(localStorage.getItem('studio-saved-colors') || '[]');
|
| 980 |
+
if (!saved.includes(color) && color !== "#3c0000") {
|
| 981 |
+
saved.push(color);
|
| 982 |
+
localStorage.setItem('studio-saved-colors', JSON.stringify(saved));
|
| 983 |
+
loadSavedColors();
|
| 984 |
+
showToast("تم حفظ اللون", "success");
|
| 985 |
+
} else if (color === "#3c0000") {
|
| 986 |
+
showToast("هذا هو اللون الافتراضي", "info");
|
| 987 |
+
} else {
|
| 988 |
+
showToast("اللون محفوظ مسبقاً", "warning");
|
| 989 |
+
}
|
| 990 |
+
}
|
| 991 |
+
|
| 992 |
+
function deleteColor(colorToDelete) {
|
| 993 |
+
let saved = JSON.parse(localStorage.getItem('studio-saved-colors') || '[]');
|
| 994 |
+
saved = saved.filter(c => c !== colorToDelete);
|
| 995 |
+
localStorage.setItem('studio-saved-colors', JSON.stringify(saved));
|
| 996 |
+
loadSavedColors();
|
| 997 |
+
}
|
| 998 |
+
|
| 999 |
+
loadSavedColors();
|
| 1000 |
+
loadFrames(); // Initial load
|
| 1001 |
+
|
| 1002 |
+
// ══════════════════════════════════════════════════
|
| 1003 |
+
// AI STATUS CHECK
|
| 1004 |
+
// ══════════════════════════════════════════════════
|
| 1005 |
+
async function checkStatus() {
|
| 1006 |
+
try {
|
| 1007 |
+
const res = await fetch('/status');
|
| 1008 |
+
const data = await res.json();
|
| 1009 |
+
if (data.ai_ready) {
|
| 1010 |
+
isAIReady = true;
|
| 1011 |
+
aiStatus.innerHTML = '<div class="w-3 h-3 rounded-full bg-emerald-500"></div>';
|
| 1012 |
+
aiStatus.className = "flex items-center gap-2 px-4 py-2 rounded-full bg-emerald-900/20 border border-emerald-800/50 text-sm text-emerald-400 font-bold";
|
| 1013 |
+
}
|
| 1014 |
+
} catch (e) {}
|
| 1015 |
+
}
|
| 1016 |
+
setInterval(checkStatus, 2000);
|
| 1017 |
+
|
| 1018 |
+
// ══════════════════════════════════════════════════
|
| 1019 |
+
// UPLOAD & QUEUE
|
| 1020 |
+
// ══════════════════════════════════════════════════
|
| 1021 |
+
const dropZone = document.getElementById('drop-zone');
|
| 1022 |
+
dropZone.onclick = () => fileInput.click();
|
| 1023 |
+
|
| 1024 |
+
['dragenter', 'dragover'].forEach(name => {
|
| 1025 |
+
dropZone.addEventListener(name, (e) => { e.preventDefault(); dropZone.classList.add('drop-glow'); }, false);
|
| 1026 |
+
});
|
| 1027 |
+
['dragleave', 'drop'].forEach(name => {
|
| 1028 |
+
dropZone.addEventListener(name, (e) => { e.preventDefault(); dropZone.classList.remove('drop-glow'); }, false);
|
| 1029 |
+
});
|
| 1030 |
+
dropZone.addEventListener('drop', (e) => { handleFiles(e.dataTransfer.files); }, false);
|
| 1031 |
+
fileInput.onchange = (e) => handleFiles(e.target.files);
|
| 1032 |
+
|
| 1033 |
+
function uploadFileWithProgress(file) {
|
| 1034 |
+
return new Promise((resolve, reject) => {
|
| 1035 |
+
const xhr = new XMLHttpRequest();
|
| 1036 |
+
const formData = new FormData();
|
| 1037 |
+
formData.append('file', file);
|
| 1038 |
+
const wrappers = [document.getElementById('upload-progress-wrapper'), document.getElementById('upload-progress-wrapper-mobile')];
|
| 1039 |
+
const fills = [document.getElementById('upload-progress-fill'), document.getElementById('upload-progress-fill-mobile')];
|
| 1040 |
+
const texts = [document.getElementById('upload-progress-text'), document.getElementById('upload-progress-text-mobile')];
|
| 1041 |
+
|
| 1042 |
+
wrappers.forEach(w => { if (w) w.classList.remove('hidden'); });
|
| 1043 |
+
xhr.upload.onprogress = (e) => {
|
| 1044 |
+
if (e.lengthComputable) {
|
| 1045 |
+
const pct = Math.round((e.loaded / e.total) * 100);
|
| 1046 |
+
fills.forEach(f => { if (f) f.style.width = pct + '%'; });
|
| 1047 |
+
texts.forEach(t => { if (t) t.textContent = `جاري الرفع... ${pct}%`; });
|
| 1048 |
+
}
|
| 1049 |
+
};
|
| 1050 |
+
xhr.onload = () => {
|
| 1051 |
+
wrappers.forEach(w => { if (w) w.classList.add('hidden'); });
|
| 1052 |
+
fills.forEach(f => { if (f) f.style.width = '0%'; });
|
| 1053 |
+
if (xhr.status >= 200 && xhr.status < 300) resolve(JSON.parse(xhr.responseText));
|
| 1054 |
+
else reject(new Error(`Upload failed: ${xhr.status}`));
|
| 1055 |
+
};
|
| 1056 |
+
xhr.onerror = () => {
|
| 1057 |
+
wrappers.forEach(w => { if (w) w.classList.add('hidden'); });
|
| 1058 |
+
reject(new Error('Network error'));
|
| 1059 |
+
};
|
| 1060 |
+
xhr.open('POST', '/upload');
|
| 1061 |
+
xhr.send(formData);
|
| 1062 |
+
});
|
| 1063 |
+
}
|
| 1064 |
+
|
| 1065 |
+
async function handleFiles(files) {
|
| 1066 |
+
for (let file of files) {
|
| 1067 |
+
try {
|
| 1068 |
+
const data = await uploadFileWithProgress(file);
|
| 1069 |
+
imageData.push({ ...data, name: "", id_num: "", result_url: null, preview_url: null, version: 1, custom_crop: null, steps: null, status: 'waiting' });
|
| 1070 |
+
renderQueue();
|
| 1071 |
+
if (currentIndex === -1) selectImage(imageData.length - 1);
|
| 1072 |
+
} catch (e) {
|
| 1073 |
+
showToast(`فشل رفع ${file.name}`, 'error');
|
| 1074 |
+
}
|
| 1075 |
+
}
|
| 1076 |
+
fileInput.value = "";
|
| 1077 |
+
}
|
| 1078 |
+
|
| 1079 |
+
function getQualityBadge(width, height) {
|
| 1080 |
+
const megapixels = (width * height) / 1000000;
|
| 1081 |
+
const minDim = Math.min(width, height);
|
| 1082 |
+
if (minDim >= 1500 || megapixels >= 3) return '<span class="badge-quality badge-hd">HD</span>';
|
| 1083 |
+
if (minDim >= 800 || megapixels >= 1) return '<span class="badge-quality badge-mid">OK</span>';
|
| 1084 |
+
return '<span class="badge-quality badge-lowres" title="جودة منخفضة">⚠ LOW</span>';
|
| 1085 |
+
}
|
| 1086 |
+
|
| 1087 |
+
// ══════════════════════════════════════════════════
|
| 1088 |
+
// QUEUE RENDER - WITH DELETE BUTTON
|
| 1089 |
+
// ══════════════════════════════════════════════════
|
| 1090 |
+
function renderQueue() {
|
| 1091 |
+
const count = imageData.length;
|
| 1092 |
+
document.getElementById('queue-count').innerText = `${count} صور`;
|
| 1093 |
+
document.getElementById('nav-counter').innerText = count > 0 ? `${currentIndex + 1}/${count}` : "0/0";
|
| 1094 |
+
const mobileCount = document.getElementById('mobile-queue-count');
|
| 1095 |
+
if (mobileCount) mobileCount.innerText = count;
|
| 1096 |
+
|
| 1097 |
+
if (count === 0) {
|
| 1098 |
+
const emptyHTML = `
|
| 1099 |
+
<div class="text-center py-12">
|
| 1100 |
+
<div class="float-anim inline-block mb-4">
|
| 1101 |
+
<i class="fa-solid fa-layer-group text-3xl opacity-30"></i>
|
| 1102 |
+
</div>
|
| 1103 |
+
<p class="text-theme-muted text-sm font-medium">القائمة فارغة</p>
|
| 1104 |
+
</div>`;
|
| 1105 |
+
imageList.innerHTML = emptyHTML;
|
| 1106 |
+
const mobileList = document.getElementById('mobile-image-list');
|
| 1107 |
+
if (mobileList) mobileList.innerHTML = `<div class="text-center py-10 text-theme-muted italic text-sm">القائمة فارغة</div>`;
|
| 1108 |
+
processAllBtn.disabled = true;
|
| 1109 |
+
downloadAllBtn.disabled = true;
|
| 1110 |
+
editBtn.disabled = true;
|
| 1111 |
+
saveBtn.disabled = true;
|
| 1112 |
+
reprocessBtn.disabled = true;
|
| 1113 |
+
return;
|
| 1114 |
+
}
|
| 1115 |
+
|
| 1116 |
+
const html = imageData.map((img, idx) => {
|
| 1117 |
+
let statusIcon = '';
|
| 1118 |
+
if (img.status === 'waiting') statusIcon = '<i class="fa-regular fa-clock text-slate-500"></i>';
|
| 1119 |
+
else if (img.status === 'processing') statusIcon = '<i class="fa-solid fa-gear processing-spin text-yellow-500"></i>';
|
| 1120 |
+
else if (img.status === 'done') statusIcon = '<i class="fa-solid fa-circle-check text-emerald-500"></i>';
|
| 1121 |
+
else if (img.status === 'error') statusIcon = '<i class="fa-solid fa-circle-exclamation text-rose-500"></i>';
|
| 1122 |
+
|
| 1123 |
+
return `
|
| 1124 |
+
<div onclick="selectImage(${idx})" class="queue-slide ${currentIndex === idx ? 'active' : ''}">
|
| 1125 |
+
<img src="${img.preview_url ? img.preview_url + '?v=' + img.version : img.thumb_url}" alt="">
|
| 1126 |
+
<div class="slide-status">${statusIcon}</div>
|
| 1127 |
+
<button onclick="deleteImage(event, ${idx})" class="slide-delete" title="حذف"><i class="fa-solid fa-xmark"></i></button>
|
| 1128 |
+
<div class="slide-name">${img.filename}</div>
|
| 1129 |
+
</div>
|
| 1130 |
+
`}).join('');
|
| 1131 |
+
|
| 1132 |
+
imageList.innerHTML = html;
|
| 1133 |
+
const mobileList = document.getElementById('mobile-image-list');
|
| 1134 |
+
if (mobileList) mobileList.innerHTML = html;
|
| 1135 |
+
|
| 1136 |
+
// Auto-scroll to the active slide
|
| 1137 |
+
const activeSlide = imageList.querySelector('.queue-slide.active');
|
| 1138 |
+
if (activeSlide) activeSlide.scrollIntoView({ behavior: 'smooth', block: 'nearest', inline: 'center' });
|
| 1139 |
+
|
| 1140 |
+
processAllBtn.disabled = count === 0;
|
| 1141 |
+
const processedCount = imageData.filter(d => d.result_url).length;
|
| 1142 |
+
downloadAllBtn.disabled = processedCount === 0;
|
| 1143 |
+
}
|
| 1144 |
+
|
| 1145 |
+
// ══════════════════════════════════════════════════
|
| 1146 |
+
// SELECTION & NAVIGATION
|
| 1147 |
+
// ══════════════════════════════════════════════════
|
| 1148 |
+
function navigate(direction) {
|
| 1149 |
+
if (imageData.length === 0) return;
|
| 1150 |
+
let nextIndex = currentIndex + direction;
|
| 1151 |
+
if (nextIndex >= 0 && nextIndex < imageData.length) selectImage(nextIndex);
|
| 1152 |
+
}
|
| 1153 |
+
|
| 1154 |
+
function selectImage(idx) {
|
| 1155 |
+
saveCurrentFields();
|
| 1156 |
+
currentIndex = idx;
|
| 1157 |
+
showingOriginal = false;
|
| 1158 |
+
const data = imageData[idx];
|
| 1159 |
+
|
| 1160 |
+
document.getElementById('student-name').value = data.name;
|
| 1161 |
+
document.getElementById('student-id').value = data.id_num;
|
| 1162 |
+
document.getElementById('current-filename').innerText = data.filename;
|
| 1163 |
+
|
| 1164 |
+
baToggleWrapper.classList.toggle('hidden', !data.result_url);
|
| 1165 |
+
document.getElementById('ba-toggle-label').textContent = 'الأصلية';
|
| 1166 |
+
zoomHint.classList.toggle('hidden', !data.result_url && !data.thumb_url);
|
| 1167 |
+
reprocessBtn.disabled = !data.result_url;
|
| 1168 |
+
|
| 1169 |
+
const previewSkeleton = document.getElementById('preview-skeleton');
|
| 1170 |
+
const url = data.preview_url ? data.preview_url + '?v=' + data.version :
|
| 1171 |
+
data.result_url ? data.result_url + '?v=' + data.version :
|
| 1172 |
+
data.thumb_url;
|
| 1173 |
+
|
| 1174 |
+
if (!data.result_url) {
|
| 1175 |
+
previewSkeleton.classList.remove('hidden');
|
| 1176 |
+
} else {
|
| 1177 |
+
previewSkeleton.classList.add('hidden');
|
| 1178 |
+
}
|
| 1179 |
+
previewPlaceholder.classList.add('hidden');
|
| 1180 |
+
|
| 1181 |
+
// Load new image
|
| 1182 |
+
const tempImg = new Image();
|
| 1183 |
+
tempImg.onload = () => {
|
| 1184 |
+
mainPreview.src = tempImg.src;
|
| 1185 |
+
mainPreview.classList.remove('hidden');
|
| 1186 |
+
mainPreview.classList.remove('opacity-0');
|
| 1187 |
+
mainPreview.classList.add('opacity-100');
|
| 1188 |
+
previewSkeleton.classList.add('hidden');
|
| 1189 |
+
};
|
| 1190 |
+
tempImg.onerror = () => {
|
| 1191 |
+
console.error('Failed to load preview:', url);
|
| 1192 |
+
previewSkeleton.classList.add('hidden');
|
| 1193 |
+
};
|
| 1194 |
+
tempImg.src = url;
|
| 1195 |
+
|
| 1196 |
+
editBtn.disabled = false;
|
| 1197 |
+
saveBtn.disabled = !data.result_url;
|
| 1198 |
+
renderQueue();
|
| 1199 |
+
toggleDrawer(false);
|
| 1200 |
+
}
|
| 1201 |
+
|
| 1202 |
+
function saveCurrentFields() {
|
| 1203 |
+
if (currentIndex === -1) return;
|
| 1204 |
+
imageData[currentIndex].name = document.getElementById('student-name').value;
|
| 1205 |
+
imageData[currentIndex].id_num = document.getElementById('student-id').value;
|
| 1206 |
+
}
|
| 1207 |
+
|
| 1208 |
+
function toggleBeforeAfter() {
|
| 1209 |
+
if (currentIndex === -1) return;
|
| 1210 |
+
const data = imageData[currentIndex];
|
| 1211 |
+
if (!data.result_url) return;
|
| 1212 |
+
showingOriginal = !showingOriginal;
|
| 1213 |
+
const label = document.getElementById('ba-toggle-label');
|
| 1214 |
+
const t = new Date().getTime();
|
| 1215 |
+
mainPreview.classList.add('opacity-0');
|
| 1216 |
+
setTimeout(() => {
|
| 1217 |
+
if (showingOriginal) {
|
| 1218 |
+
mainPreview.src = data.thumb_url;
|
| 1219 |
+
label.textContent = 'النتيجة';
|
| 1220 |
+
} else {
|
| 1221 |
+
mainPreview.src = (data.preview_url || data.result_url) + '?v=' + data.version;
|
| 1222 |
+
label.textContent = 'الأصلية';
|
| 1223 |
+
}
|
| 1224 |
+
mainPreview.onload = () => {
|
| 1225 |
+
mainPreview.classList.remove('opacity-0');
|
| 1226 |
+
mainPreview.classList.add('opacity-100');
|
| 1227 |
+
};
|
| 1228 |
+
}, 150);
|
| 1229 |
+
}
|
| 1230 |
+
|
| 1231 |
+
// ══════════════════════════════════════════════════
|
| 1232 |
+
// ZOOM
|
| 1233 |
+
// ══════════════════════════════════════════════════
|
| 1234 |
+
function openZoom() {
|
| 1235 |
+
if (!mainPreview.src) return;
|
| 1236 |
+
const modal = document.getElementById('zoom-modal');
|
| 1237 |
+
const zoomImg = document.getElementById('zoom-img');
|
| 1238 |
+
zoomImg.src = mainPreview.src;
|
| 1239 |
+
zoomImg.style.transform = 'scale(1)';
|
| 1240 |
+
modal.classList.add('active');
|
| 1241 |
+
modal.onwheel = (e) => {
|
| 1242 |
+
e.preventDefault();
|
| 1243 |
+
const current = parseFloat(zoomImg.style.transform.replace('scale(', '').replace(')', '')) || 1;
|
| 1244 |
+
const delta = e.deltaY > 0 ? -0.15 : 0.15;
|
| 1245 |
+
const next = Math.max(0.5, Math.min(5, current + delta));
|
| 1246 |
+
zoomImg.style.transform = `scale(${next})`;
|
| 1247 |
+
};
|
| 1248 |
+
}
|
| 1249 |
+
function closeZoom() { document.getElementById('zoom-modal').classList.remove('active'); }
|
| 1250 |
+
|
| 1251 |
+
// ══════════════════════════════════════════════════
|
| 1252 |
+
// PROCESSING
|
| 1253 |
+
// ══════════════════════════════════════════════════
|
| 1254 |
+
processAllBtn.onclick = async () => {
|
| 1255 |
+
if (!isAIReady) return showToast("انتظر تحميل المحرك...", 'warning');
|
| 1256 |
+
saveCurrentFields();
|
| 1257 |
+
processAllBtn.disabled = true;
|
| 1258 |
+
fileInput.disabled = true;
|
| 1259 |
+
|
| 1260 |
+
const toProcess = imageData.filter(d => !d.result_url);
|
| 1261 |
+
const total = toProcess.length;
|
| 1262 |
+
if (total === 0) { processAllBtn.disabled = false; fileInput.disabled = false; return; }
|
| 1263 |
+
|
| 1264 |
+
// Show batch overlay with counter
|
| 1265 |
+
const overlay = document.getElementById('batch-overlay');
|
| 1266 |
+
const counter = document.getElementById('batch-counter');
|
| 1267 |
+
overlay.classList.replace('hidden', 'flex');
|
| 1268 |
+
let done = 0;
|
| 1269 |
+
counter.textContent = `0/${total}`;
|
| 1270 |
+
|
| 1271 |
+
for (let i = 0; i < imageData.length; i++) {
|
| 1272 |
+
if (imageData[i].result_url) continue;
|
| 1273 |
+
await runPipelineForIndex(i);
|
| 1274 |
+
done++;
|
| 1275 |
+
counter.textContent = `${done}/${total}`;
|
| 1276 |
+
}
|
| 1277 |
+
|
| 1278 |
+
// Hide overlay
|
| 1279 |
+
overlay.classList.replace('flex', 'hidden');
|
| 1280 |
+
processAllBtn.disabled = false;
|
| 1281 |
+
fileInput.disabled = false;
|
| 1282 |
+
|
| 1283 |
+
// Refresh preview to show result of currently selected image
|
| 1284 |
+
if (currentIndex !== -1) selectImage(currentIndex);
|
| 1285 |
+
showToast(`تمت معالجة ${done} صورة`, 'success');
|
| 1286 |
+
};
|
| 1287 |
+
|
| 1288 |
+
async function runPipelineForIndex(idx, isCustom = false) {
|
| 1289 |
+
const data = imageData[idx];
|
| 1290 |
+
data.status = 'processing';
|
| 1291 |
+
renderQueue();
|
| 1292 |
+
const formData = new FormData();
|
| 1293 |
+
formData.append('name', data.name);
|
| 1294 |
+
formData.append('id_number', data.id_num);
|
| 1295 |
+
formData.append('do_restore', document.getElementById('step-restore').checked);
|
| 1296 |
+
formData.append('fidelity', document.getElementById('setting-fidelity').value);
|
| 1297 |
+
formData.append('do_rmbg', document.getElementById('step-rmbg').checked);
|
| 1298 |
+
formData.append('do_color', document.getElementById('step-color').checked);
|
| 1299 |
+
formData.append('do_retouch', document.getElementById('step-retouch').checked);
|
| 1300 |
+
formData.append('do_crop', document.getElementById('step-crop').checked);
|
| 1301 |
+
formData.append('add_studio_name', document.getElementById('add-studio').checked);
|
| 1302 |
+
formData.append('add_logo', document.getElementById('add-logo').checked);
|
| 1303 |
+
formData.append('add_date', document.getElementById('add-date').checked);
|
| 1304 |
+
|
| 1305 |
+
const frameVal = document.getElementById('frame-select').value;
|
| 1306 |
+
if (frameVal) formData.append('frame_name', frameVal);
|
| 1307 |
+
|
| 1308 |
+
// New Frame Color
|
| 1309 |
+
const frameColor = document.getElementById('frame-color');
|
| 1310 |
+
if (frameColor) formData.append('frame_color', frameColor.value);
|
| 1311 |
+
|
| 1312 |
+
if (isCustom && data.custom_crop) {
|
| 1313 |
+
formData.append('x1', data.custom_crop.x1);
|
| 1314 |
+
formData.append('y1', data.custom_crop.y1);
|
| 1315 |
+
formData.append('x2', data.custom_crop.x2);
|
| 1316 |
+
formData.append('y2', data.custom_crop.y2);
|
| 1317 |
+
}
|
| 1318 |
+
try {
|
| 1319 |
+
const res = await fetch(`/process/${data.id}`, { method: 'POST', body: formData });
|
| 1320 |
+
const result = await res.json();
|
| 1321 |
+
if (result.error) throw new Error(result.error);
|
| 1322 |
+
imageData[idx].result_url = result.result_url;
|
| 1323 |
+
imageData[idx].preview_url = result.preview_url;
|
| 1324 |
+
imageData[idx].version++;
|
| 1325 |
+
imageData[idx].status = 'done';
|
| 1326 |
+
imageData[idx].steps = {
|
| 1327 |
+
restore: document.getElementById('step-restore').checked,
|
| 1328 |
+
rmbg: document.getElementById('step-rmbg').checked,
|
| 1329 |
+
color: document.getElementById('step-color').checked,
|
| 1330 |
+
retouch: document.getElementById('step-retouch').checked,
|
| 1331 |
+
crop: document.getElementById('step-crop').checked || !!data.custom_crop
|
| 1332 |
+
};
|
| 1333 |
+
} catch (e) {
|
| 1334 |
+
console.error("Processing", e);
|
| 1335 |
+
imageData[idx].status = 'error';
|
| 1336 |
+
showToast(`خطأ في ${data.filename}`, 'error');
|
| 1337 |
+
}
|
| 1338 |
+
renderQueue();
|
| 1339 |
+
}
|
| 1340 |
+
|
| 1341 |
+
async function reprocessCurrent() {
|
| 1342 |
+
if (currentIndex === -1 || !isAIReady) return;
|
| 1343 |
+
saveCurrentFields();
|
| 1344 |
+
const data = imageData[currentIndex];
|
| 1345 |
+
data.result_url = null;
|
| 1346 |
+
data.status = 'waiting';
|
| 1347 |
+
data.steps = null;
|
| 1348 |
+
renderQueue();
|
| 1349 |
+
await runPipelineForIndex(currentIndex, !!data.custom_crop);
|
| 1350 |
+
selectImage(currentIndex);
|
| 1351 |
+
showToast('تمت إعادة المعالجة', 'success');
|
| 1352 |
+
}
|
| 1353 |
+
|
| 1354 |
+
// ══════════════════════════════════════════════════
|
| 1355 |
+
// CROP & DOWNLOAD
|
| 1356 |
+
// ══════════════════════════════════════════════════
|
| 1357 |
+
editBtn.onclick = () => {
|
| 1358 |
+
const data = imageData[currentIndex];
|
| 1359 |
+
cropperImg.src = data.thumb_url;
|
| 1360 |
+
previewContainer.classList.add('hidden');
|
| 1361 |
+
baToggleWrapper.classList.add('hidden');
|
| 1362 |
+
zoomHint.classList.add('hidden');
|
| 1363 |
+
cropperContainer.classList.remove('hidden');
|
| 1364 |
+
if (cropper) cropper.destroy();
|
| 1365 |
+
cropper = new Cropper(cropperImg, { aspectRatio: 5/7, viewMode: 1, autoCropArea: 0.8 });
|
| 1366 |
+
};
|
| 1367 |
+
function cancelCrop() {
|
| 1368 |
+
cropperContainer.classList.add('hidden');
|
| 1369 |
+
previewContainer.classList.remove('hidden');
|
| 1370 |
+
if (currentIndex >= 0 && imageData[currentIndex].result_url) baToggleWrapper.classList.remove('hidden');
|
| 1371 |
+
zoomHint.classList.remove('hidden');
|
| 1372 |
+
}
|
| 1373 |
+
async function applyCrop() {
|
| 1374 |
+
const cropData = cropper.getData(true);
|
| 1375 |
+
const imgData = imageData[currentIndex];
|
| 1376 |
+
const displayImg = cropper.getImageData();
|
| 1377 |
+
const scaleX = imgData.width / displayImg.naturalWidth;
|
| 1378 |
+
const scaleY = imgData.height / displayImg.naturalHeight;
|
| 1379 |
+
imgData.custom_crop = {
|
| 1380 |
+
x1: Math.max(0, Math.round(cropData.x * scaleX)),
|
| 1381 |
+
y1: Math.max(0, Math.round(cropData.y * scaleY)),
|
| 1382 |
+
x2: Math.min(imgData.width, Math.round((cropData.x + cropData.width) * scaleX)),
|
| 1383 |
+
y2: Math.min(imgData.height, Math.round((cropData.y + cropData.height) * scaleY))
|
| 1384 |
+
};
|
| 1385 |
+
cancelCrop();
|
| 1386 |
+
imgData.result_url = null;
|
| 1387 |
+
imgData.status = 'waiting';
|
| 1388 |
+
await runPipelineForIndex(currentIndex, true);
|
| 1389 |
+
selectImage(currentIndex);
|
| 1390 |
+
}
|
| 1391 |
+
|
| 1392 |
+
downloadAllBtn.onclick = async () => {
|
| 1393 |
+
const processed = imageData.filter(d => d.result_url);
|
| 1394 |
+
if (processed.length === 0) return;
|
| 1395 |
+
const overlay = document.getElementById('batch-overlay');
|
| 1396 |
+
const counter = document.getElementById('batch-counter');
|
| 1397 |
+
overlay.classList.replace('hidden', 'flex');
|
| 1398 |
+
counter.textContent = 'ZIP...';
|
| 1399 |
+
const zip = new JSZip();
|
| 1400 |
+
const folder = zip.folder("studio_layouts");
|
| 1401 |
+
for (let data of processed) {
|
| 1402 |
+
const res = await fetch(data.result_url);
|
| 1403 |
+
const blob = await res.blob();
|
| 1404 |
+
folder.file(`${PathStem(data.filename)}_layout.jpg`, blob);
|
| 1405 |
+
}
|
| 1406 |
+
const content = await zip.generateAsync({ type: "blob" });
|
| 1407 |
+
const link = document.createElement('a');
|
| 1408 |
+
link.href = URL.createObjectURL(content);
|
| 1409 |
+
link.download = `Studio_Batch_${new Date().getTime()}.zip`;
|
| 1410 |
+
link.click();
|
| 1411 |
+
overlay.classList.replace('flex', 'hidden');
|
| 1412 |
+
showToast(`تم تحميل ${processed.length} صورة`, 'success');
|
| 1413 |
+
};
|
| 1414 |
+
|
| 1415 |
+
clearAllBtn.onclick = async () => {
|
| 1416 |
+
if (!confirm("هل تريد حذف جميع الصور؟")) return;
|
| 1417 |
+
try {
|
| 1418 |
+
const res = await fetch('/clear-all', { method: 'POST' });
|
| 1419 |
+
imageData = [];
|
| 1420 |
+
currentIndex = -1;
|
| 1421 |
+
fileInput.value = "";
|
| 1422 |
+
mainPreview.classList.add('hidden');
|
| 1423 |
+
previewPlaceholder.classList.remove('hidden');
|
| 1424 |
+
baToggleWrapper.classList.add('hidden');
|
| 1425 |
+
zoomHint.classList.add('hidden');
|
| 1426 |
+
document.getElementById('student-name').value = "";
|
| 1427 |
+
document.getElementById('student-id').value = "";
|
| 1428 |
+
document.getElementById('current-filename').innerText = "";
|
| 1429 |
+
renderQueue();
|
| 1430 |
+
showToast("تم الحذف", 'success');
|
| 1431 |
+
} catch (e) {}
|
| 1432 |
+
};
|
| 1433 |
+
|
| 1434 |
+
function deleteImage(e, idx) {
|
| 1435 |
+
e.stopPropagation();
|
| 1436 |
+
if(!confirm("حذف الصورة؟")) return;
|
| 1437 |
+
imageData.splice(idx, 1);
|
| 1438 |
+
if (imageData.length === 0) {
|
| 1439 |
+
currentIndex = -1;
|
| 1440 |
+
mainPreview.classList.add('hidden');
|
| 1441 |
+
previewPlaceholder.classList.remove('hidden');
|
| 1442 |
+
baToggleWrapper.classList.add('hidden');
|
| 1443 |
+
zoomHint.classList.add('hidden');
|
| 1444 |
+
document.getElementById('student-name').value = "";
|
| 1445 |
+
} else {
|
| 1446 |
+
if (currentIndex === idx) {
|
| 1447 |
+
currentIndex = Math.max(0, currentIndex - 1);
|
| 1448 |
+
selectImage(currentIndex);
|
| 1449 |
+
} else if (currentIndex > idx) {
|
| 1450 |
+
currentIndex--;
|
| 1451 |
+
}
|
| 1452 |
+
}
|
| 1453 |
+
renderQueue();
|
| 1454 |
+
}
|
| 1455 |
+
|
| 1456 |
+
// Global Delete function for header button (keep for keyboard/compatibility)
|
| 1457 |
+
function deleteSelected() {
|
| 1458 |
+
if (currentIndex !== -1) deleteImage({stopPropagation:()=>{}}, currentIndex);
|
| 1459 |
+
}
|
| 1460 |
+
|
| 1461 |
+
saveBtn.onclick = () => {
|
| 1462 |
+
const data = imageData[currentIndex];
|
| 1463 |
+
if (!data || !data.result_url) return;
|
| 1464 |
+
const link = document.createElement('a');
|
| 1465 |
+
link.href = data.result_url;
|
| 1466 |
+
link.download = `${PathStem(data.filename)}_layout.jpg`;
|
| 1467 |
+
link.click();
|
| 1468 |
+
};
|
| 1469 |
+
|
| 1470 |
+
function PathStem(filename) { return filename.substring(0, filename.lastIndexOf('.')) || filename; }
|
| 1471 |
+
|
| 1472 |
+
window.addEventListener('keydown', (e) => {
|
| 1473 |
+
if (e.target.tagName === 'INPUT' || e.target.tagName === 'TEXTAREA') return;
|
| 1474 |
+
if (e.key === 'Escape') { closeZoom(); document.getElementById('shortcuts-modal').classList.remove('active'); toggleDrawer(false); return; }
|
| 1475 |
+
if (e.key === 'ArrowRight') navigate(1);
|
| 1476 |
+
else if (e.key === 'ArrowLeft') navigate(-1);
|
| 1477 |
+
else if (e.key === 'Delete') deleteSelected();
|
| 1478 |
+
else if (e.key === 'Enter') { saveCurrentFields(); if (currentIndex < imageData.length - 1) navigate(1); }
|
| 1479 |
+
else if (e.key === 's' && (e.ctrlKey || e.metaKey)) { e.preventDefault(); saveCurrentFields(); runPipelineForIndex(currentIndex); }
|
| 1480 |
+
});
|
| 1481 |
+
</script>
|
| 1482 |
+
</body>
|
| 1483 |
+
</html>
|
requirements.txt
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core ML
|
| 2 |
+
torch
|
| 3 |
+
torchvision
|
| 4 |
+
torchaudio
|
| 5 |
+
transformers==4.48.2
|
| 6 |
+
accelerate
|
| 7 |
+
timm
|
| 8 |
+
kornia
|
| 9 |
+
mediapipe==0.10.9
|
| 10 |
+
devicetorch
|
| 11 |
+
|
| 12 |
+
# Web Frameworks
|
| 13 |
+
fastapi
|
| 14 |
+
uvicorn
|
| 15 |
+
flask
|
| 16 |
+
python-multipart
|
| 17 |
+
jinja2
|
| 18 |
+
MarkupSafe
|
| 19 |
+
requests
|
| 20 |
+
|
| 21 |
+
# Image Processing
|
| 22 |
+
Pillow>=10.0.0
|
| 23 |
+
arabic-reshaper
|
| 24 |
+
python-bidi
|
| 25 |
+
opencv-python
|
| 26 |
+
numpy<2.0.0
|
| 27 |
+
protobuf<=3.20.3
|
| 28 |
+
scipy
|
| 29 |
+
scikit-image
|
| 30 |
+
|
| 31 |
+
# GUI (Optional for web but in id-maker reqs)
|
| 32 |
+
pywebview
|
result.jpg
ADDED
|
Git LFS Details
|
start.sh
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Start ID Maker on the main HF port (7860)
|
| 4 |
+
export PORT=7860
|
| 5 |
+
# CODEFORMER_API_URL is NOT set here, so it will use the value in id-maker/config/settings.json
|
| 6 |
+
# (which points to the external HF Space)
|
| 7 |
+
|
| 8 |
+
echo "Starting ID Maker Web UI on port $PORT..."
|
| 9 |
+
cd /app/id-maker && python web/server.py
|