Update app.py
Browse files
app.py
CHANGED
|
@@ -1,75 +1,41 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
from PIL import Image
|
| 3 |
import torch
|
| 4 |
from torchvision import transforms
|
| 5 |
-
|
| 6 |
-
import
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
# ---------------------
|
| 11 |
-
class ToonifyLite(nn.Module):
|
| 12 |
-
def __init__(self):
|
| 13 |
-
super().__init__()
|
| 14 |
-
# Encoder ساده
|
| 15 |
-
self.encoder = nn.Sequential(
|
| 16 |
-
nn.Conv2d(3, 32, 3, 1, 1), nn.ReLU(),
|
| 17 |
-
nn.Conv2d(32, 64, 3, 2, 1), nn.ReLU(),
|
| 18 |
-
nn.Conv2d(64, 128, 3, 2, 1), nn.ReLU(),
|
| 19 |
-
nn.Conv2d(128, 256, 3, 2, 1), nn.ReLU()
|
| 20 |
-
)
|
| 21 |
-
# Decoder سبک
|
| 22 |
-
self.decoder = nn.Sequential(
|
| 23 |
-
nn.Conv2d(256, 128, 3, 1, 1), nn.ReLU(),
|
| 24 |
-
nn.Upsample(scale_factor=2, mode="nearest"),
|
| 25 |
-
nn.Conv2d(128, 64, 3, 1, 1), nn.ReLU(),
|
| 26 |
-
nn.Upsample(scale_factor=2, mode="nearest"),
|
| 27 |
-
nn.Conv2d(64, 32, 3, 1, 1), nn.ReLU(),
|
| 28 |
-
nn.Upsample(scale_factor=2, mode="nearest"),
|
| 29 |
-
nn.Conv2d(32, 3, 3, 1, 1)
|
| 30 |
-
)
|
| 31 |
-
|
| 32 |
-
def forward(self, x):
|
| 33 |
-
feat = self.encoder(x)
|
| 34 |
-
out = self.decoder(feat)
|
| 35 |
-
return out
|
| 36 |
|
| 37 |
-
#
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
device = torch.device("cpu")
|
| 41 |
-
model = ToonifyLite().to(device)
|
| 42 |
model.eval()
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
# تابع تبدیل
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
transforms.Resize((
|
| 50 |
-
transforms.ToTensor()
|
| 51 |
])
|
| 52 |
-
|
| 53 |
|
| 54 |
with torch.no_grad():
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
# نرمال سازی خروجی
|
| 58 |
-
output = output.squeeze(0).cpu()
|
| 59 |
-
output = (output - output.min()) / (output.max() - output.min() + 1e-5)
|
| 60 |
-
output = output.clamp(0, 1)
|
| 61 |
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
-
#
|
| 65 |
-
# رابط کاربری Gradio
|
| 66 |
-
# ---------------------
|
| 67 |
iface = gr.Interface(
|
| 68 |
-
fn=
|
| 69 |
-
inputs=gr.Image(type="pil"
|
| 70 |
-
outputs=gr.Image(type="pil"
|
| 71 |
-
title="Toonify
|
| 72 |
-
description="
|
| 73 |
)
|
| 74 |
|
| 75 |
-
iface.launch(
|
|
|
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
from torchvision import transforms
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import requests
|
| 5 |
+
from io import BytesIO
|
| 6 |
+
import gradio as gr
|
| 7 |
+
from config import MODEL_URL, DEVICE, IMAGE_SIZE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
+
# بارگذاری مدل مستقیم از HF
|
| 10 |
+
model_path = MODEL_URL
|
| 11 |
+
model = torch.hub.load('justinpinkney/toonify', 'toonify')
|
|
|
|
|
|
|
| 12 |
model.eval()
|
| 13 |
+
model.to(DEVICE)
|
| 14 |
+
|
| 15 |
+
# تابع تبدیل عکس به Toonify
|
| 16 |
+
def convert_to_toon(image: Image.Image) -> Image.Image:
|
| 17 |
+
# تغییر اندازه و تبدیل به tensor
|
| 18 |
+
preprocess = transforms.Compose([
|
| 19 |
+
transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),
|
| 20 |
+
transforms.ToTensor(),
|
| 21 |
])
|
| 22 |
+
img_tensor = preprocess(image).unsqueeze(0).to(DEVICE)
|
| 23 |
|
| 24 |
with torch.no_grad():
|
| 25 |
+
output_tensor = model(img_tensor)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
+
# تبدیل خروجی به تصویر PIL
|
| 28 |
+
output_image = output_tensor.squeeze().cpu()
|
| 29 |
+
output_image = transforms.ToPILImage()(output_image)
|
| 30 |
+
return output_image
|
| 31 |
|
| 32 |
+
# رابط Gradio
|
|
|
|
|
|
|
| 33 |
iface = gr.Interface(
|
| 34 |
+
fn=convert_to_toon,
|
| 35 |
+
inputs=gr.Image(type="pil"),
|
| 36 |
+
outputs=gr.Image(type="pil"),
|
| 37 |
+
title="Toonify Face Converter",
|
| 38 |
+
description="تبدیل عکس شما به کارتون مدرن با حفظ جزئیات چهره، بدون نیاز به پرامپت!"
|
| 39 |
)
|
| 40 |
|
| 41 |
+
iface.launch()
|