| from flask import Flask, request, send_file, jsonify
|
| from werkzeug.utils import secure_filename
|
| import os
|
| from PIL import Image, ImageDraw, ImageFont
|
| import numpy as np
|
| from datetime import datetime
|
| from PIL import Image
|
| import numpy as np
|
| import os
|
| import torch
|
| from datetime import datetime
|
| import time
|
| import collections
|
| from utils import init_weight_dtype, resize_and_crop, resize_and_padding
|
| from model.pipeline import CatVTONPipeline
|
| from model.cloth_masker import AutoMasker, vis_mask
|
| from diffusers.image_processor import VaeImageProcessor
|
| from huggingface_hub import snapshot_download
|
|
|
|
|
| def image_grid(imgs, rows, cols):
|
| assert len(imgs) == rows * cols
|
|
|
| w, h = imgs[0].size
|
| grid = Image.new("RGB", size=(cols * w, rows * h))
|
|
|
| for i, img in enumerate(imgs):
|
| grid.paste(img, box=(i % cols * w, i // cols * h))
|
| return grid
|
|
|
|
|
| def inference(
|
| person_image,
|
| mask_image,
|
| cloth_image,
|
| cloth_type,
|
| image_size=(1024, 768),
|
| num_inference_steps=50,
|
| guidance_scale=2.5,
|
| seed=42,
|
| show_type="result only"
|
| ):
|
| start_time = time.time()
|
| height, width = image_size
|
|
|
| if len(np.unique(np.array(mask_image))) == 1:
|
| mask_image = None
|
| else:
|
| mask_image = np.array(mask_image)
|
| mask_image[mask_image > 0] = 255
|
| mask_image = Image.fromarray(mask_image)
|
|
|
| date_str = datetime.now().strftime("%Y%m%d%H%M%S")
|
| result_save_path = os.path.join(tmp_folder, date_str[:8], date_str[8:] + ".png")
|
| if not os.path.exists(os.path.join(tmp_folder, date_str[:8])):
|
| os.makedirs(os.path.join(tmp_folder, date_str[:8]))
|
|
|
| generator = None
|
| if seed != -1:
|
| generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
| person_image = resize_and_crop(person_image, (width, height))
|
| cloth_image = resize_and_padding(cloth_image, (width, height))
|
|
|
|
|
| if mask_image is not None:
|
| mask_image = resize_and_crop(mask_image, (width, height))
|
| else:
|
| mask_image = automasker(
|
| person_image,
|
| cloth_type
|
| )['mask']
|
|
|
| mask_image = mask_processor.blur(mask_image, blur_factor=9)
|
|
|
| result_image = pipeline(
|
| image=person_image,
|
| condition_image=cloth_image,
|
| mask=mask_image,
|
| num_inference_steps=num_inference_steps,
|
| guidance_scale=guidance_scale,
|
| generator=generator
|
| )[0]
|
|
|
| print("FPS: ", 1.0 / (time.time() - start_time))
|
|
|
|
|
| masked_person = vis_mask(person_image, mask_image)
|
| save_result_image = image_grid([person_image, masked_person, cloth_image, result_image], 1, 4)
|
| save_result_image.save(result_save_path)
|
|
|
| if show_type == "result only":
|
| return result_image
|
| else:
|
| width, height = person_image.size
|
| if show_type == "input & result":
|
| condition_width = width // 2
|
| conditions = image_grid([person_image, cloth_image], 2, 1)
|
| else:
|
| condition_width = width // 3
|
| conditions = image_grid([person_image, masked_person , cloth_image], 3, 1)
|
| conditions = conditions.resize((condition_width, height), Image.NEAREST)
|
| new_result_image = Image.new("RGB", (width + condition_width + 5, height))
|
| new_result_image.paste(conditions, (0, 0))
|
| new_result_image.paste(result_image, (condition_width + 5, 0))
|
| return new_result_image
|
|
|
|
|
| base_model_path='booksforcharlie/stable-diffusion-inpainting'
|
| allow_tf32=True
|
| mixed_precision='bf16'
|
| resume_path='zhengchong/CatVTON'
|
| tmp_folder = "./tmp"
|
|
|
| repo_path = snapshot_download(repo_id=resume_path)
|
| cloth_type = "upper"
|
| image_size = (1024, 768)
|
| num_inference_steps = 50
|
| guidance_scale = 2.5
|
| seed = 42
|
| show_type = "all"
|
| automasker = AutoMasker(
|
| densepose_ckpt=os.path.join(repo_path, "DensePose"),
|
| schp_ckpt=os.path.join(repo_path, "SCHP"),
|
| device='cuda',
|
| )
|
|
|
| pipeline = CatVTONPipeline(base_ckpt=base_model_path,
|
| attn_ckpt=repo_path,
|
| attn_ckpt_version="mix",
|
| weight_dtype=init_weight_dtype(mixed_precision),
|
| use_tf32=allow_tf32,
|
| device='cuda')
|
|
|
| mask_processor = VaeImageProcessor(vae_scale_factor=8, do_normalize=False, do_binarize=True, do_convert_grayscale=True)
|
|
|
|
|
|
|
|
|
|
|
| app = Flask(__name__)
|
|
|
|
|
| UPLOAD_FOLDER = 'uploads'
|
| os.makedirs(UPLOAD_FOLDER, exist_ok=True)
|
| RESULT_FOLDER = 'results'
|
| os.makedirs(RESULT_FOLDER, exist_ok=True)
|
| app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
| app.config['RESULT_FOLDER'] = RESULT_FOLDER
|
|
|
|
|
|
|
| @app.route('/inference', methods=['POST'])
|
| def vton_inference():
|
|
|
|
|
|
|
|
|
| uploaded_files = {key: None for key in request.files}
|
| for key in request.files:
|
| file = request.files[key]
|
| try:
|
| reject_filename = ['none', 'null']
|
| if file and (file.filename != '' or file.filename.lower() not in reject_filename):
|
|
|
| save_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
|
| image = Image.open(file.stream)
|
| image.save(save_path)
|
| uploaded_files[key] = {
|
| "filename": file.filename,
|
| "value": image.convert("RGB") if key != "mask" else image.convert("L")
|
| }
|
| except Exception as e:
|
| return jsonify({"error": f"Error processing file for key {key}: {str(e)}"}), 500
|
|
|
| if not uploaded_files['person'] and not uploaded_files['cloth']:
|
| return jsonify({"message": "You must upload person and cloth image to virtual try-on"}), 500
|
|
|
| vton_img = inference(uploaded_files['person'],
|
| uploaded_files['mask'],
|
| uploaded_files['cloth'],
|
| cloth_type,
|
| image_size,
|
| num_inference_steps,
|
| guidance_scale,
|
| seed,
|
| show_type)
|
|
|
|
|
| result_filename = current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
|
| result_path = os.path.join(app.config['RESULT_FOLDER'], f"{result_filename}.jpg")
|
| vton_img.save(result_path)
|
| return send_file(result_path, mimetype='image/jpeg')
|
|
|
|
|
| if __name__ == '__main__':
|
| app.run(debug=True, host='0.0.0.0') |