Update main.py
Browse files
main.py
CHANGED
|
@@ -2,28 +2,36 @@ from __future__ import annotations
|
|
| 2 |
from fastapi import FastAPI, File, UploadFile
|
| 3 |
from fastapi.responses import FileResponse
|
| 4 |
from fastapi.staticfiles import StaticFiles
|
|
|
|
|
|
|
| 5 |
import shutil
|
| 6 |
-
import
|
| 7 |
import numpy as np
|
| 8 |
-
|
| 9 |
-
from
|
|
|
|
|
|
|
| 10 |
|
| 11 |
app = FastAPI()
|
| 12 |
-
model =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
@app.post("/upload/")
|
| 15 |
-
async def process_image(file: UploadFile = File(...)):
|
|
|
|
|
|
|
|
|
|
| 16 |
# Save the uploaded image locally
|
| 17 |
with open("uploaded_image.jpg", "wb") as buffer:
|
| 18 |
shutil.copyfileobj(file.file, buffer)
|
| 19 |
|
| 20 |
-
# Load the model (assuming 'cartoon1' is always used)
|
| 21 |
-
exstyle, load_info = model.load_model('cartoon1')
|
| 22 |
-
|
| 23 |
# Process the uploaded image
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
processed_image, message = model.image_toonify(aligned_face, exstyle, style_degree=0.5, style_type='cartoon1')
|
| 27 |
|
| 28 |
# Save the processed image
|
| 29 |
with open("result_image.jpg", "wb") as result_buffer:
|
|
@@ -32,6 +40,7 @@ async def process_image(file: UploadFile = File(...)):
|
|
| 32 |
# Return the processed image
|
| 33 |
return FileResponse("result_image.jpg", media_type="image/jpeg", headers={"Content-Disposition": "attachment; filename=result_image.jpg"})
|
| 34 |
|
|
|
|
| 35 |
app.mount("/", StaticFiles(directory="AB", html=True), name="static")
|
| 36 |
|
| 37 |
@app.get("/")
|
|
|
|
| 2 |
from fastapi import FastAPI, File, UploadFile
|
| 3 |
from fastapi.responses import FileResponse
|
| 4 |
from fastapi.staticfiles import StaticFiles
|
| 5 |
+
from fastapi import FastAPI, File, UploadFile, Form
|
| 6 |
+
from fastapi.responses import FileResponse
|
| 7 |
import shutil
|
| 8 |
+
import cv2
|
| 9 |
import numpy as np
|
| 10 |
+
import dlib
|
| 11 |
+
from torchvision import transforms
|
| 12 |
+
import torch.nn.functional as F
|
| 13 |
+
from vtoonify_model import Model # Importing the Model class from vtoonify_model.py
|
| 14 |
|
| 15 |
app = FastAPI()
|
| 16 |
+
model = None
|
| 17 |
+
|
| 18 |
+
@app.on_event("startup")
|
| 19 |
+
async def load_model():
|
| 20 |
+
global model
|
| 21 |
+
model = Model(device='cuda' if torch.cuda.is_available() else 'cpu')
|
| 22 |
|
| 23 |
@app.post("/upload/")
|
| 24 |
+
async def process_image(file: UploadFile = File(...), top: int = Form(...), bottom: int = Form(...), left: int = Form(...), right: int = Form(...)):
|
| 25 |
+
if model is None:
|
| 26 |
+
return {"error": "Model not loaded."}
|
| 27 |
+
|
| 28 |
# Save the uploaded image locally
|
| 29 |
with open("uploaded_image.jpg", "wb") as buffer:
|
| 30 |
shutil.copyfileobj(file.file, buffer)
|
| 31 |
|
|
|
|
|
|
|
|
|
|
| 32 |
# Process the uploaded image
|
| 33 |
+
aligned_face, instyle, message = model.detect_and_align_image("uploaded_image.jpg", top, bottom, left, right)
|
| 34 |
+
processed_image, message = model.image_toonify(aligned_face, instyle, model.exstyle, style_degree=0.5, style_type='cartoon1')
|
|
|
|
| 35 |
|
| 36 |
# Save the processed image
|
| 37 |
with open("result_image.jpg", "wb") as result_buffer:
|
|
|
|
| 40 |
# Return the processed image
|
| 41 |
return FileResponse("result_image.jpg", media_type="image/jpeg", headers={"Content-Disposition": "attachment; filename=result_image.jpg"})
|
| 42 |
|
| 43 |
+
|
| 44 |
app.mount("/", StaticFiles(directory="AB", html=True), name="static")
|
| 45 |
|
| 46 |
@app.get("/")
|