Spaces:
Sleeping
Sleeping
File size: 5,470 Bytes
9217d38 5dc59e4 9217d38 5dc59e4 9217d38 5dc59e4 0045776 41fdb7f 5dc59e4 0045776 540fae5 0045776 5161bf3 0045776 5161bf3 0045776 5161bf3 0045776 9217d38 0045776 9217d38 540fae5 9217d38 540fae5 9217d38 540fae5 9217d38 7e0eb53 5dc59e4 9217d38 5dc59e4 10b2015 5dc59e4 9217d38 540fae5 5161bf3 9217d38 5dc59e4 9217d38 5dc59e4 41fdb7f 5dc59e4 9217d38 5dc59e4 0045776 5dc59e4 540fae5 5dc59e4 7e0eb53 5dc59e4 10b2015 5dc59e4 7e0eb53 10b2015 5dc59e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import os
import cv2
import numpy as np
import gradio as gr
from PIL import Image
import pyheif
import uuid
from diffusers import StableDiffusionPipeline
import torch
# --- OUTPUT DIRECTORY ---
OUTPUT_DIR = "outputs"
os.makedirs(OUTPUT_DIR, exist_ok=True)
# --- DEVICE SETUP ---
device = "cpu" # Force CPU
# --- LOAD STABLE DIFFUSION PIPELINE ---
MODEL_NAME = "Lykon/anything-cartoon" # original model
PUBLIC_MODEL = "runwayml/stable-diffusion-v1-5" # fallback
HF_TOKEN = os.getenv("HF_TOKEN", None) # optional token for private repo
try:
pipe = StableDiffusionPipeline.from_pretrained(
MODEL_NAME,
torch_dtype=torch.float32,
use_auth_token=HF_TOKEN
)
except Exception as e:
print(f"Failed to load '{MODEL_NAME}': {e}")
print(f"Falling back to public model '{PUBLIC_MODEL}'")
pipe = StableDiffusionPipeline.from_pretrained(
PUBLIC_MODEL,
torch_dtype=torch.float32
)
pipe.to(device)
# --- HELPER FUNCTIONS ---
def load_image(file):
ext = os.path.splitext(file.name)[1].lower() if hasattr(file, "name") else os.path.splitext(file)[1].lower()
if ext in [".heic", ".heif"]:
if hasattr(file, "read"):
heif_file = pyheif.read_heif(file.read())
else:
with open(file, "rb") as f:
heif_file = pyheif.read_heif(f.read())
image = Image.frombytes(
heif_file.mode,
heif_file.size,
heif_file.data,
"raw",
heif_file.mode,
heif_file.stride,
)
return image
else:
if hasattr(file, "seek"):
file.seek(0)
return Image.open(file)
else:
return Image.open(file)
def apply_filters(file, mode):
image = load_image(file)
img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
if mode == "Gray":
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
processed = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
elif mode == "Scratch":
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
denoised = cv2.fastNlMeansDenoising(gray, h=10)
processed = cv2.cvtColor(denoised, cv2.COLOR_GRAY2RGB)
elif mode == "Pencil Sketch":
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
inv = 255 - gray
blur = cv2.GaussianBlur(inv, (21, 21), 0)
sketch = cv2.divide(gray, 255 - blur, scale=256.0)
processed = cv2.cvtColor(sketch, cv2.COLOR_GRAY2RGB)
elif mode == "Cartoon":
num_down = 2
num_bilateral = 7
img_color = img.copy()
for _ in range(num_down):
img_color = cv2.pyrDown(img_color)
for _ in range(num_bilateral):
img_color = cv2.bilateralFilter(img_color, d=9, sigmaColor=75, sigmaSpace=75)
for _ in range(num_down):
img_color = cv2.pyrUp(img_color)
if img_color.shape != img.shape:
img_color = cv2.resize(img_color, (img.shape[1], img.shape[0]))
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blur = cv2.medianBlur(img_gray, 7)
edges = cv2.adaptiveThreshold(img_blur, 255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 9, 2)
edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
cartoon = cv2.bitwise_and(img_color, edges)
processed = cv2.cvtColor(cartoon, cv2.COLOR_BGR2RGB)
elif mode == "AI Cartoon":
pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
prompt = "cartoon, anime style, vibrant colors, detailed, smooth"
# CPU only
cartoon_img = pipe(prompt=prompt, image=pil_img, num_inference_steps=30).images[0]
processed = np.array(cartoon_img)
elif mode == "Sepia":
sepia_filter = np.array([[0.272, 0.534, 0.131],
[0.349, 0.686, 0.168],
[0.393, 0.769, 0.189]])
sepia = cv2.transform(img, sepia_filter)
sepia = np.clip(sepia, 0, 255).astype(np.uint8)
processed = cv2.cvtColor(sepia, cv2.COLOR_BGR2RGB)
elif mode == "Edge Detection":
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 100, 200)
processed = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
elif mode == "HSV":
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
processed = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
else:
processed = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_filename = f"{uuid.uuid4().hex}_{mode.replace(' ', '_')}.png"
output_path = os.path.join(OUTPUT_DIR, output_filename)
Image.fromarray(processed).save(output_path)
return processed, output_path
# --- GRADIO INTERFACE ---
demo = gr.Interface(
fn=apply_filters,
inputs=[
gr.File(label="Upload Image (PNG, JPG, HEIC)", type="filepath"), # FIXED
gr.Radio(
choices=["Gray", "Scratch", "Pencil Sketch", "Cartoon", "AI Cartoon",
"Sepia", "Edge Detection", "RGB", "HSV"],
value="Gray",
label="Filter Mode"
),
],
outputs=[
gr.Image(type="numpy", label="Filtered Image"),
gr.File(label="⬇️ Download Image")
],
title="🎨 Image Filter Lab",
description="Apply filters including AI Cartoon. Supports iPhone HEIC images.",
allow_flagging="never",
theme="soft"
)
if __name__ == "__main__":
demo.launch()
|