basic
Browse files- .env.example +2 -0
- .gitignore +4 -0
- .huggingface.yaml +10 -0
- README.md +42 -7
- app.py +75 -0
- requirements.txt +4 -0
.env.example
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
REPLICATE_API_TOKEN=your_replicate_api_key_here
|
| 2 |
+
HF_TOKEN=your_huggingface_token_here
|
.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
.env
|
| 4 |
+
outs/
|
.huggingface.yaml
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
sdk: gradio
|
| 2 |
+
sdk_version: 5.0.0
|
| 3 |
+
|
| 4 |
+
build:
|
| 5 |
+
gpu: false
|
| 6 |
+
python_version: "3.10"
|
| 7 |
+
|
| 8 |
+
runtime:
|
| 9 |
+
model:
|
| 10 |
+
gpus: 0
|
README.md
CHANGED
|
@@ -1,14 +1,49 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 5.31.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
-
short_description:
|
| 12 |
---
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Image Color Palette Extractor
|
| 3 |
+
emoji: 🎨
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: red
|
| 6 |
sdk: gradio
|
|
|
|
| 7 |
app_file: app.py
|
| 8 |
pinned: false
|
| 9 |
license: mit
|
| 10 |
+
short_description: Extract dominant colors from images using advanced clustering
|
| 11 |
---
|
| 12 |
|
| 13 |
+
## Image Color Palette Extractor
|
| 14 |
+
|
| 15 |
+
This application extracts dominant colors from images using an advanced color clustering algorithm. It uses LAB color space for better color perception and provides a visual palette of the most prominent colors in the image.
|
| 16 |
+
|
| 17 |
+
## Features
|
| 18 |
+
|
| 19 |
+
- Upload an image or select a preset image.
|
| 20 |
+
- Detect and segment features in the image.
|
| 21 |
+
- Apply AI-powered smile enhancement to improve dental appearance.
|
| 22 |
+
- Load models lazily for improved Docker deployment performance.
|
| 23 |
+
- Health check endpoint for monitoring application status.
|
| 24 |
+
|
| 25 |
+
## USAGE
|
| 26 |
+
|
| 27 |
+
1. Upload your image or select a preset
|
| 28 |
+
2. Click "Generate Smile" to enhance the smile in the image
|
| 29 |
+
3. The enhanced image will appear on the right
|
| 30 |
+
|
| 31 |
+
## Deployment Notes
|
| 32 |
+
|
| 33 |
+
This application uses lazy loading for the segmentation model to improve performance in Docker containers. The model is loaded only when needed for the first time, which helps with deployment on platforms like Hugging Face Spaces.
|
| 34 |
+
|
| 35 |
+
### Environment Variables
|
| 36 |
+
|
| 37 |
+
- `REPLICATE_API_TOKEN`: Your Replicate API token for accessing the flux-fill-dev model
|
| 38 |
+
|
| 39 |
+
### Docker Deployment
|
| 40 |
+
|
| 41 |
+
```bash
|
| 42 |
+
docker build -t smile-enhancer .
|
| 43 |
+
docker run -p 7860:7860 smile-enhancer
|
| 44 |
+
docker run -p 7860:7860 --env REPLICATE_API_TOKEN=your_token_here smile-enhancer
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
### Hugging Face Deployment
|
| 48 |
+
|
| 49 |
+
The application is configured to work with Hugging Face Spaces using the Docker SDK. Make sure to set the `REPLICATE_API_TOKEN` secret in your Space settings.
|
app.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import numpy as np
|
| 3 |
+
import cv2
|
| 4 |
+
from sklearn.cluster import KMeans
|
| 5 |
+
from collections import Counter
|
| 6 |
+
|
| 7 |
+
def extract_palette(image, num_colors=5):
|
| 8 |
+
# Convert to LAB color space for better color perception
|
| 9 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
|
| 10 |
+
|
| 11 |
+
# Reshape the image into a list of pixels
|
| 12 |
+
pixels = image.reshape(-1, 3)
|
| 13 |
+
|
| 14 |
+
# Sample pixels while maintaining color distribution
|
| 15 |
+
num_pixels = len(pixels)
|
| 16 |
+
if num_pixels > 20000: # Increased sample size
|
| 17 |
+
# Count frequency of colors
|
| 18 |
+
pixel_tuples = [tuple(p) for p in pixels]
|
| 19 |
+
color_counts = Counter(pixel_tuples)
|
| 20 |
+
unique_colors = np.array(list(color_counts.keys()))
|
| 21 |
+
weights = np.array(list(color_counts.values()))
|
| 22 |
+
weights = weights / weights.sum()
|
| 23 |
+
|
| 24 |
+
# Sample based on color frequency
|
| 25 |
+
indices = np.random.choice(len(unique_colors), 20000, p=weights, replace=True)
|
| 26 |
+
pixels = unique_colors[indices]
|
| 27 |
+
|
| 28 |
+
# Apply KMeans clustering with better parameters
|
| 29 |
+
kmeans = KMeans(n_clusters=num_colors, n_init='auto', random_state=42)
|
| 30 |
+
kmeans.fit(pixels)
|
| 31 |
+
|
| 32 |
+
# Convert centers back to RGB
|
| 33 |
+
centers = kmeans.cluster_centers_.astype(np.uint8)
|
| 34 |
+
centers = centers.reshape(1, -1, 3)
|
| 35 |
+
centers = cv2.cvtColor(centers, cv2.COLOR_LAB2RGB)
|
| 36 |
+
centers = centers.reshape(-1, 3)
|
| 37 |
+
|
| 38 |
+
# Sort colors by frequency
|
| 39 |
+
labels = kmeans.labels_
|
| 40 |
+
counts = np.bincount(labels)
|
| 41 |
+
colors = centers[np.argsort(-counts)]
|
| 42 |
+
|
| 43 |
+
return colors
|
| 44 |
+
|
| 45 |
+
def visualize_palette(image, num_colors):
|
| 46 |
+
# Extract palette
|
| 47 |
+
palette = extract_palette(image, num_colors)
|
| 48 |
+
|
| 49 |
+
# Create a color palette image using OpenCV
|
| 50 |
+
cell_width = 100
|
| 51 |
+
cell_height = 100
|
| 52 |
+
palette_image = np.zeros((cell_height, cell_width * len(palette), 3), dtype=np.uint8)
|
| 53 |
+
|
| 54 |
+
# Fill each cell with a color
|
| 55 |
+
for i, color in enumerate(palette):
|
| 56 |
+
start_x = i * cell_width
|
| 57 |
+
end_x = start_x + cell_width
|
| 58 |
+
palette_image[:, start_x:end_x] = color
|
| 59 |
+
|
| 60 |
+
output_path = "outs/palette.png"
|
| 61 |
+
cv2.imwrite(output_path, cv2.cvtColor(palette_image, cv2.COLOR_RGB2BGR))
|
| 62 |
+
|
| 63 |
+
return output_path
|
| 64 |
+
|
| 65 |
+
# Gradio Interface
|
| 66 |
+
interface = gr.Interface(
|
| 67 |
+
fn=visualize_palette,
|
| 68 |
+
inputs=[gr.Image(type="numpy"), gr.Slider(2, 10, step=1, value=5, label="Number of Colors")],
|
| 69 |
+
outputs="image",
|
| 70 |
+
title="Color Palette Extractor",
|
| 71 |
+
description="Upload an image and extract a fixed number of dominant colors."
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Launch the app
|
| 75 |
+
interface.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio>=4.19.2
|
| 2 |
+
numpy>=1.24.0
|
| 3 |
+
opencv-python-headless>=4.8.0
|
| 4 |
+
scikit-learn>=1.3.0
|