Spaces:
Sleeping
Sleeping
chips commited on
Commit ·
3300d90
1
Parent(s): 1a719bd
First implementation of base image generator
Browse files- app.py +10 -3
- base_generator.py +41 -0
app.py
CHANGED
|
@@ -4,6 +4,7 @@ from fastapi.responses import JSONResponse
|
|
| 4 |
from io import BytesIO
|
| 5 |
import fal_client
|
| 6 |
import os
|
|
|
|
| 7 |
|
| 8 |
|
| 9 |
app = FastAPI()
|
|
@@ -14,21 +15,27 @@ def greet_json():
|
|
| 14 |
|
| 15 |
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
# Function related to virtual outfit try on
|
| 18 |
|
| 19 |
#figure out how to deal with front vs back images of garments
|
| 20 |
|
| 21 |
|
| 22 |
def combine_garment_images(Upper_garment: UploadFile = File(...), Lower_garment: UploadFile = File(...)):
|
| 23 |
-
|
| 24 |
-
return(
|
| 25 |
|
| 26 |
|
| 27 |
# Endpoints related to virtual outfit try on
|
| 28 |
# HOW DO WE DICESERN FRONT VS BACK IMAGES OF GARMENTS?
|
| 29 |
|
| 30 |
@app.post("/styleTalent")
|
| 31 |
-
async def style_talent(talent_image: UploadFile = File(...), garment_image: UploadFile = File(...)):
|
| 32 |
#Save the uploaded talent image tempoarily
|
| 33 |
talent_image_bytes = await talent_image.read()
|
| 34 |
file_path = f"/tmp/{talent_image.filename}"
|
|
|
|
| 4 |
from io import BytesIO
|
| 5 |
import fal_client
|
| 6 |
import os
|
| 7 |
+
import base_generator
|
| 8 |
|
| 9 |
|
| 10 |
app = FastAPI()
|
|
|
|
| 15 |
|
| 16 |
|
| 17 |
|
| 18 |
+
#endpoints related to base image generation
|
| 19 |
+
|
| 20 |
+
@app.post("/makeBaseImage")
|
| 21 |
+
async def make_base_image(image: UploadFile = File(...)):
|
| 22 |
+
return("something")
|
| 23 |
+
|
| 24 |
# Function related to virtual outfit try on
|
| 25 |
|
| 26 |
#figure out how to deal with front vs back images of garments
|
| 27 |
|
| 28 |
|
| 29 |
def combine_garment_images(Upper_garment: UploadFile = File(...), Lower_garment: UploadFile = File(...)):
|
| 30 |
+
result = base_generator.create_image()
|
| 31 |
+
return(result)
|
| 32 |
|
| 33 |
|
| 34 |
# Endpoints related to virtual outfit try on
|
| 35 |
# HOW DO WE DICESERN FRONT VS BACK IMAGES OF GARMENTS?
|
| 36 |
|
| 37 |
@app.post("/styleTalent")
|
| 38 |
+
async def style_talent( talent_image: UploadFile = File(...), garment_image: UploadFile = File(...)):
|
| 39 |
#Save the uploaded talent image tempoarily
|
| 40 |
talent_image_bytes = await talent_image.read()
|
| 41 |
file_path = f"/tmp/{talent_image.filename}"
|
base_generator.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import random
|
| 3 |
+
import requests
|
| 4 |
+
|
| 5 |
+
#Todo
|
| 6 |
+
# Something to select the pose from the list
|
| 7 |
+
# a way to support loras directly
|
| 8 |
+
# A way to get a description of the outfit
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def run_workflow(body):
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
url = "https://comfy.icu/api/v1/workflows/" + body['workflow_id'] + "/runs"
|
| 15 |
+
headers = {
|
| 16 |
+
"accept": "application/json",
|
| 17 |
+
"content-type": "application/json",
|
| 18 |
+
"authorization": "Bearer " + os.environ['COMFYICU_API_KEY']
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
response = requests.post(url, headers=headers, json=body)
|
| 22 |
+
return response.json()
|
| 23 |
+
|
| 24 |
+
def create_image(character_lora, character_keyword, outfit_desc, pose_id):
|
| 25 |
+
seed = random.randint(0, 1000000)
|
| 26 |
+
prompt = {"56": {"_meta": {"title": "CLIP Text Encode (Prompt)"}, "inputs": {"clip": ["365", 1], "text": ""}, "class_type": "CLIPTextEncode"}, "159": {"_meta": {"title": "Load VAE"}, "inputs": {"vae_name": "flux1-ae.safetensors"}, "class_type": "VAELoader"}, "175": {"_meta": {"title": "Apply ControlNet"}, "inputs": {"vae": ["159", 0], "image": ["369", 0], "negative": ["56", 0], "positive": ["199", 0], "strength": 0.7000000000000001, "control_net": ["260", 0], "end_percent": 0.5, "start_percent": 0}, "class_type": "ControlNetApplyAdvanced"}, "199": {"_meta": {"title": "CLIP Text Encode (Prompt)"}, "inputs": {"clip": ["365", 1], "text": f"Fashion model {character_keyword} wearing {outfit_desc}. posing in front of white background"}, "class_type": "CLIPTextEncode"}, "260": {"_meta": {"title": "Load ControlNet Model"}, "inputs": {"control_net_name": "flux.1-dev-controlnet-union.safetensors"}, "class_type": "ControlNetLoader"}, "263": {"_meta": {"title": "Save Image"}, "inputs": {"images": ["311", 0], "filename_prefix": "ControlNet"}, "class_type": "SaveImage"}, "307": {"_meta": {"title": "FluxGuidance"}, "inputs": {"guidance": 3.5, "conditioning": ["175", 0]}, "class_type": "FluxGuidance"}, "308": {"_meta": {"title": "KSampler"}, "inputs": {"cfg": 1, "seed": seed, "model": ["365", 0], "steps": 20, "denoise": 1, "negative": ["335", 0], "positive": ["307", 0], "scheduler": "simple", "latent_image": ["344", 0], "sampler_name": "euler"}, "class_type": "KSampler"}, "310": {"_meta": {"title": "DualCLIPLoader"}, "inputs": {"type": "flux", "device": "default", "clip_name1": "t5xxl_fp8_e4m3fn.safetensors", "clip_name2": "clip_l.safetensors"}, "class_type": "DualCLIPLoader"}, "311": {"_meta": {"title": "VAE Decode"}, "inputs": {"vae": ["159", 0], "samples": ["308", 0]}, "class_type": "VAEDecode"}, "335": {"_meta": {"title": "FluxGuidance"}, "inputs": {"guidance": 3.5, "conditioning": ["175", 1]}, "class_type": "FluxGuidance"}, "344": {"_meta": {"title": "Empty Latent Image"}, "inputs": {"width": 544, "height": 960, "batch_size": 1}, "class_type": "EmptyLatentImage"}, "363": {"_meta": {"title": "Load Diffusion Model"}, "inputs": {"unet_name": "flux1-dev-fp8-e4m3fn.safetensors", "weight_dtype": "fp8_e4m3fn"}, "class_type": "UNETLoader"}, "365": {"_meta": {"title": "Load LoRA"}, "inputs": {"clip": ["310", 0], "model": ["363", 0], "lora_name": character_lora, "strength_clip": 0.99, "strength_model": 0.84}, "class_type": "LoraLoader"}, "369": {"_meta": {"title": "Load Image"}, "inputs": {"image": "Pose_Female_Front_full_standing_02.webp_00001_.png", "upload": "image"}, "class_type": "LoadImage"}}
|
| 27 |
+
|
| 28 |
+
files = {"/input/Pose_Female_Front_full_standing_02.webp_00001_.png": "https://comfy.icu/api/v1/view/workflows/SqG44yXRdRzxGQmfWwlSt/input/Pose_Female_Front_full_standing_02.webp_00001_.png", "/models/loras/7Jd1cwsai241yWWSPDW_k_pytorch_lora_weights.safetensors": "https://v3.fal.media/files/lion/7Jd1cwsai241yWWSPDW_k_pytorch_lora_weights.safetensors", "/models/loras/N5sJtK8XVftjPlIj3idOB_pytorch_lora_weights.safetensors": "https://v3.fal.media/files/monkey/N5sJtK8XVftjPlIj3idOB_pytorch_lora_weights.safetensors", "/models/loras/xVhN3ierb8IFqGRNOQpBT_pytorch_lora_weights.safetensors": "https://v3.fal.media/files/tiger/xVhN3ierb8IFqGRNOQpBT_pytorch_lora_weights.safetensors", "/models/loras/yOA2a06KWEMR-ewq3j8io_pytorch_lora_weights.safetensors": "https://v3.fal.media/files/rabbit/yOA2a06KWEMR-ewq3j8io_pytorch_lora_weights.safetensors"};
|
| 29 |
+
|
| 30 |
+
workflow_id = "SqG44yXRdRzxGQmfWwlSt"
|
| 31 |
+
|
| 32 |
+
run = run_workflow({"workflow_id": workflow_id, "prompt": prompt, "files": files})
|
| 33 |
+
print(run)
|
| 34 |
+
|
| 35 |
+
if __name__ == "__main__":
|
| 36 |
+
character_lora = "yOA2a06KWEMR-ewq3j8io_pytorch_lora_weights.safetensors"
|
| 37 |
+
character_keyword = "crxter_scandi"
|
| 38 |
+
outfit_desc = "blue strap top and pink skirt "
|
| 39 |
+
pose_id = 0
|
| 40 |
+
|
| 41 |
+
create_image(character_lora, character_keyword, outfit_desc, pose_id)
|