iamnow commited on
Commit
bb101d8
·
verified ·
1 Parent(s): e6c0ad0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -17
app.py CHANGED
@@ -1,20 +1,54 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  from gradio_client import Client, handle_file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- client = Client("life4cut/ff-v1")
10
- result = client.predict(
11
- dict={"background":handle_file('https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png'),"layers":[],"composite":None},
12
- garm_img=handle_file('https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png'),
13
- garment_des="Hello!!",
14
- is_checked=True,
15
- is_checked_crop=False,
16
- denoise_steps=30,
17
- seed=42,
18
- api_name="/tryon"
19
- )
20
- print(result)
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import gradio as gr
 
 
 
 
 
3
  from gradio_client import Client, handle_file
4
+ from PIL import Image
5
+
6
+ def predict(imgs, garm_img):
7
+ print(imgs, garm_img)
8
+ client = Client("life4cut/ff-v1", hf_token=HF_TOKEN_FF)
9
+ result = client.predict(
10
+ #dict={"background":handle_file('https://s3.ap-northeast-2.amazonaws.com/life4cut.app/images/temp/111.png'),"layers":[],"composite":None},
11
+ #garm_img=handle_file('https://s3.ap-northeast-2.amazonaws.com/life4cut.app/images/temp/00_style5.png'),
12
+ dict={"background":handle_file(imgs),"layers":[],"composite":None},
13
+ garm_img=handle_file(garm_img),
14
+ garment_des="Hello!!",
15
+ is_checked=True,
16
+ is_checked_crop=False,
17
+ denoise_steps=30,
18
+ seed=42,
19
+ api_name="/tryon"
20
+ )
21
+ #print(result)
22
+ return result[0], result[1]
23
+ #print(result[1])
24
+
25
+ # View the image
26
+ #Image.open(result[0])
27
+ #Image.open(result)
28
+ #example_path = os.path.join(os.path.dirname(__file__), 'example')
29
+ #garm_list = os.listdir(os.path.join(example_path,"cloth"))
30
+ #garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list]
31
 
32
+ image_blocks = gr.Blocks().queue()
33
+ with image_blocks as demo:
34
+ gr.Markdown("## fashion filter")
35
+ with gr.Row():
36
+ with gr.Column():
37
+ #imgs = gr.ImageEditor(sources='upload', type="pil", label='Human. Mask with pen or use auto-masking', interactive=True)
38
+ imgs = gr.Image(sources='upload', type="filepath", label='Human. Mask with pen or use auto-masking')
39
+ with gr.Row():
40
+ is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
41
+ with gr.Row():
42
+ is_checked_crop = gr.Checkbox(label="Yes", info="Use auto-crop & resizing",value=False)
43
+ with gr.Column():
44
+ garm_img = gr.Image(label="Garment", sources='upload', type="filepath")
45
+ with gr.Column():
46
+ # image_out = gr.Image(label="Output", elem_id="output-img", height=400)
47
+ masked_img = gr.Image(label="Masked image output", elem_id="masked-img",show_share_button=False)
48
+ with gr.Column():
49
+ # image_out = gr.Image(label="Output", elem_id="output-img", height=400)
50
+ image_out = gr.Image(label="Output", elem_id="output-img",show_share_button=False)
51
+ with gr.Column():
52
+ try_button = gr.Button(value="predict")
53
+ try_button.click(fn=predict, inputs=[imgs, garm_img], outputs=[image_out,masked_img])
54
+ image_blocks.launch()