JarvisLabs commited on
Commit
b283998
·
verified ·
1 Parent(s): bda352a

Upload 140 files

Browse files
.gitattributes CHANGED
@@ -87,3 +87,4 @@ Test_images/example_outputs/Re_light_output5.png filter=lfs diff=lfs merge=lfs -
87
  Test_images/IP_Material_2.jpg filter=lfs diff=lfs merge=lfs -text
88
  Test_images/lora_support_examples/Galverse.png filter=lfs diff=lfs merge=lfs -text
89
  Test_images/Skirt_1.png filter=lfs diff=lfs merge=lfs -text
 
 
87
  Test_images/IP_Material_2.jpg filter=lfs diff=lfs merge=lfs -text
88
  Test_images/lora_support_examples/Galverse.png filter=lfs diff=lfs merge=lfs -text
89
  Test_images/Skirt_1.png filter=lfs diff=lfs merge=lfs -text
90
+ HowTo/Gen.png filter=lfs diff=lfs merge=lfs -text
HowTo/Gen.png ADDED

Git LFS Details

  • SHA256: cb632860e32c3e676f575506ea6e601c55f037987ba1ba4b33980642ad60f188
  • Pointer size: 132 Bytes
  • Size of remote file: 1.14 MB
app.py CHANGED
@@ -23,6 +23,10 @@ os.makedirs(base_dir, exist_ok=True)
23
 
24
  # Function to convert and save the image to a dated folder, and update the gallery
25
  def update_gallery_local(img):
 
 
 
 
26
  print(type(img), len(gallery_list))
27
 
28
  try:
 
23
 
24
  # Function to convert and save the image to a dated folder, and update the gallery
25
  def update_gallery_local(img):
26
+
27
+ if img is None:
28
+ return gallery_list
29
+
30
  print(type(img), len(gallery_list))
31
 
32
  try:
gen_tab.py CHANGED
@@ -50,6 +50,7 @@ def create_gen_tab():
50
  "4. Options in extra control net. Extraでコントロールネットのオプションを選択します。\n"
51
  "5. Click 'Generate' to create the image. Generateをクリックして画像を生成します。\n"
52
  "6. click button to move image to other tab.ボタンをクリックして画像を他のタブに移動します。 \n")
 
53
  #Gen generatpor options
54
  with gr.Row():
55
  with gr.Column():
 
50
  "4. Options in extra control net. Extraでコントロールネットのオプションを選択します。\n"
51
  "5. Click 'Generate' to create the image. Generateをクリックして画像を生成します。\n"
52
  "6. click button to move image to other tab.ボタンをクリックして画像を他のタブに移動します。 \n")
53
+ gr.Image(value="HowTo/Gen.png",label=None,interactive=False)
54
  #Gen generatpor options
55
  with gr.Row():
56
  with gr.Column():
ipadapter_tab.py CHANGED
@@ -1,93 +1,90 @@
1
- from src.fal_api import fal_ipadapter_api
2
- import gradio as gr
3
-
4
-
5
- def create_ipadaptor_tab():
6
-
7
- with gr.TabItem("Cloth IP Adapter",id="cloth_ip_adapter"):
8
-
9
- with gr.Accordion("HowTo",open=False):
10
- gr.Markdown("""
11
- # Cloth IP Adapter 服のIPアタォーター \n
12
- ・1 Select base image ベース画像を選択します。 \n
13
- ・2 Prompt area you want the IP to be applied IPを適用するためのプロンプトエリアを選択します。 \n
14
- ・3 Select IP image IP画像を選択します。 \n
15
- ・4 Click Generate 生成をクリックします。 \n
16
-
17
- """)
18
- gr.Image(value="HowTo/Flow_IP_ClothAdapter.jpg",label=None,interactive=False)
19
-
20
-
21
- with gr.Row():
22
- with gr.Column():
23
- api_inp = gr.Image(label="Base Image")
24
-
25
- with gr.Accordion("Example base images", open=False):
26
- human_examples = gr.Examples(
27
- examples=[
28
- ["Test_images/Woman_1.png"],
29
- ["Test_images/man_1.png"],
30
- ["Test_images/example_outputs/input_4.png"],
31
- ["Test_images/Woman_2.png"],
32
- ["Test_images/Woman_3.png"],
33
- ["Test_images/man_2.png"],
34
- ["Test_images/Woman_4.png"],
35
- ["Test_images/Woman_5.png"],
36
- ["Test_images/anime_woman_1.png"],
37
- ["Test_images/anime_woman_2.png"],
38
- ["Test_images/anime_woman_3.png"],
39
- ["Test_images/Jump.png"],
40
- ["Test_images/Walk_1.png"],
41
- ["Test_images/Walk_2.png"],
42
- ["Test_images/Walk_3.png"],
43
- ["Test_images/Walk_4.png"]
44
- ],
45
- inputs=[api_inp],
46
- examples_per_page=14
47
- )
48
-
49
- with gr.Column():
50
- ip_image = gr.Image(label="IP Adapter Image")
51
-
52
- with gr.Accordion("Example IP images ", open=False):
53
- ip_examples = gr.Examples(
54
- examples=[
55
- ["Test_images/style_1.jpg"],
56
- ["Test_images/style_2.png"],
57
- ["Test_images/Style_3.png"],
58
- ["Test_images/Style_4.png"],
59
- ["Test_images/pattern_1.png"],
60
- ["Test_images/pattern_2.jpg"],
61
- ["Test_images/pattern_3.jpg"],
62
- ["Test_images/pattern_4.jpg"],
63
- ["Test_images/Jacket_1.png"],
64
- ["Test_images/Suit_4.png"],
65
- ["Test_images/dress_5.png"],
66
- ["Test_images/Shirt_1.png"],
67
- ["https://replicate.delivery/pbxt/Kl23gJODaW7EuxrDzBG9dcgqRdMaYSWmBQ9UexnwPiL7AnIr/3.jpg"],
68
- ["https://replicate.delivery/pbxt/Kl2WefehduxwWcQc5OrrBH6AkojQ6OqyQSKBvBLrroSpEBim/f2f0488a-180e-4d7e-9907-f26f92ac5f16.jpg"],
69
- ["https://replicate.delivery/pbxt/Kl2VlUibviSP8Kq5ULLJmMOWorog1YFu0zTreqhqX97c62ku/572a1fc9-a114-4d5b-8c7c-85aa5648c7b4.jpg"],
70
- ["https://replicate.delivery/pbxt/Kl2VCw1UVIJsYw9r8iqSYUMm65ePJhfYOLNolOE8CwxfRjX2/28481ff0-0829-42af-a658-fb96be2abb3d.jpg"],
71
- ],
72
- inputs=[ip_image],
73
- examples_per_page=14
74
- )
75
- with gr.Row():
76
- ap_prompt = gr.Textbox(label="clothes prompt",value="clothes",info="Clothe are you want to use 希望の服のエリア (日本語可能)")
77
- with gr.Row():
78
- ip_btn = gr.Button("Generate",elem_id="gen_btn")
79
- with gr.Row():
80
- api_out = gr.Image(label="Output",type="filepath",elem_id="output_image")
81
- with gr.Row(): #Move gen out row
82
-
83
- move_to_cnmk =gr.Button("Move to Control net mockup ")
84
- move_to_relight= gr.Button("Move to Relight")
85
-
86
-
87
- with gr.Row():
88
- gr.Examples(examples=[
89
- ["Test_images/example_outputs/input_4.png","Test_images/Jacket_1.png","Jacket","Test_images/example_outputs/output_clothIP_1.png"],
90
- ["Test_images/Woman_2.png","Test_images/pattern_4.jpg","Dress","Test_images/example_outputs/output_clothIP_2.png"],
91
- ],inputs=[api_inp,ip_image,ap_prompt,api_out])
92
- ip_btn.click(fal_ipadapter_api,inputs=[api_inp,ip_image,ap_prompt],outputs=api_out)
93
  return api_inp,api_out,move_to_cnmk,move_to_relight
 
1
+ from src.fal_api import fal_ipadapter_api
2
+ import gradio as gr
3
+
4
+
5
+ def create_ipadaptor_tab():
6
+
7
+ with gr.TabItem("Cloth IP Adapter",id="cloth_ip_adapter"):
8
+
9
+ with gr.Accordion("HowTo",open=False):
10
+ gr.Markdown("""
11
+ # Cloth IP Adapter 服のIPアタォーター \n
12
+ ・1 Select base image ベース画像を選択します。 \n
13
+ ・2 Prompt area you want the IP to be applied IPを適用するためのプロンプトエリアを選択します。 \n
14
+ ・3 Select IP image IP画像を選択します。 \n
15
+ ・4 Click Generate 生成をクリックします。 \n
16
+
17
+ """)
18
+ gr.Image(value="HowTo/Flow_IP_ClothAdapter.jpg",label=None,interactive=False)
19
+
20
+
21
+ with gr.Row():
22
+ with gr.Column():
23
+ api_inp = gr.Image(label="Base Image")
24
+
25
+ with gr.Accordion("Example base images", open=True):
26
+ human_examples = gr.Examples(
27
+ examples=[
28
+ ["Test_images/example_outputs/input_4.png"],
29
+ ["Test_images/man_1.png"],
30
+ ["Test_images/Woman_2.png"],
31
+ ["Test_images/Woman_3.png"],
32
+ ["Test_images/man_2.png"],
33
+ ["Test_images/Woman_4.png"],
34
+ ["Test_images/Woman_5.png"],
35
+ ["Test_images/anime_woman_1.png"],
36
+ ["Test_images/anime_woman_2.png"],
37
+ ["Test_images/anime_woman_3.png"],
38
+ ["Test_images/Walk_1.png"],
39
+ ["Test_images/Walk_2.png"],
40
+ ["Test_images/Walk_3.png"],
41
+ ["Test_images/Walk_4.png"]
42
+ ],
43
+ inputs=[api_inp],
44
+ examples_per_page=14
45
+ )
46
+
47
+ with gr.Column():
48
+ ip_image = gr.Image(label="IP Adapter Image")
49
+
50
+ with gr.Accordion("Example IP images ", open=True):
51
+ ip_examples = gr.Examples(
52
+ examples=[
53
+ ["Test_images/Jacket_1.png"],
54
+ ["Test_images/Suit_4.png"],
55
+ ["Test_images/dress_5.png"],
56
+ ["Test_images/Shirt_1.png"],
57
+
58
+ ["Test_images/Style_4.png"],
59
+ ["Test_images/pattern_1.png"],
60
+ ["Test_images/pattern_2.jpg"],
61
+ ["Test_images/pattern_3.jpg"],
62
+ ["Test_images/pattern_4.jpg"],
63
+
64
+ ["https://replicate.delivery/pbxt/Kl23gJODaW7EuxrDzBG9dcgqRdMaYSWmBQ9UexnwPiL7AnIr/3.jpg"],
65
+ ["https://replicate.delivery/pbxt/Kl2WefehduxwWcQc5OrrBH6AkojQ6OqyQSKBvBLrroSpEBim/f2f0488a-180e-4d7e-9907-f26f92ac5f16.jpg"],
66
+ ["https://replicate.delivery/pbxt/Kl2VlUibviSP8Kq5ULLJmMOWorog1YFu0zTreqhqX97c62ku/572a1fc9-a114-4d5b-8c7c-85aa5648c7b4.jpg"],
67
+
68
+ ],
69
+ inputs=[ip_image],
70
+ examples_per_page=14
71
+ )
72
+ with gr.Row():
73
+ ap_prompt = gr.Textbox(label="clothes prompt",value="clothes",info="Clothe are you want to use 希望の服のエリア (日本語可能)")
74
+ with gr.Row():
75
+ ip_btn = gr.Button("Generate",elem_id="gen_btn")
76
+ with gr.Row():
77
+ api_out = gr.Image(label="Output",type="filepath",elem_id="output_image")
78
+ with gr.Row(): #Move gen out row
79
+
80
+ move_to_cnmk =gr.Button("Move to Control net mockup ")
81
+ move_to_relight= gr.Button("Move to Relight")
82
+
83
+
84
+ with gr.Row():
85
+ gr.Examples(examples=[
86
+ ["Test_images/example_outputs/input_4.png","Test_images/Jacket_1.png","Jacket","Test_images/example_outputs/output_clothIP_1.png"],
87
+ ["Test_images/Woman_2.png","Test_images/pattern_4.jpg","Dress","Test_images/example_outputs/output_clothIP_2.png"],
88
+ ],inputs=[api_inp,ip_image,ap_prompt,api_out])
89
+ ip_btn.click(fal_ipadapter_api,inputs=[api_inp,ip_image,ap_prompt],outputs=api_out)
 
 
 
90
  return api_inp,api_out,move_to_cnmk,move_to_relight
src/__pycache__/deepl.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
src/__pycache__/fal_api.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
src/__pycache__/helpers.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
src/__pycache__/rep_api.cpython-310.pyc ADDED
Binary file (8.98 kB). View file
 
src/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.34 kB). View file
 
src/rep_api.py CHANGED
@@ -12,7 +12,12 @@ def generate_image_control_net(prompt,lora_model,api_path,aspect_ratio,lora_scal
12
  use_control_net,control_net_type,control_net_img,control_net_strength,
13
  num_outputs=1,guidance_scale=3.5,seed=None,
14
  ):
15
- print(prompt,lora_model,api_path,aspect_ratio,use_control_net)
 
 
 
 
 
16
  inputs = {
17
  "prompt": detect_and_translate(prompt),
18
  "output_format": "png",
@@ -51,7 +56,7 @@ def generate_image_control_net(prompt,lora_model,api_path,aspect_ratio,lora_scal
51
  api_path,
52
  input=inputs
53
  )
54
- print(output)
55
  return output[0]
56
 
57
 
@@ -61,8 +66,10 @@ def generate_image_replicate(prompt,lora_model,api_path,aspect_ratio,model,lora_
61
  num_outputs=1,guidance_scale=3.5,seed=None,
62
 
63
  ):
64
- print(prompt,lora_model,api_path,aspect_ratio)
65
-
 
 
66
  #if model=="dev":
67
  num_inference_steps=30
68
  if model=="schnell":
@@ -87,7 +94,7 @@ def generate_image_replicate(prompt,lora_model,api_path,aspect_ratio,model,lora_
87
  api_path,
88
  input=inputs
89
  )
90
- print(output)
91
  return output[0]
92
  def replicate_bgcontrolnet(img,prompt,background_prompt, sampler_name= "DPM++ SDE Karras",
93
  negative_prompt="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, mutated hands and fingers:1.4), (deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation"
@@ -108,7 +115,7 @@ def replicate_bgcontrolnet(img,prompt,background_prompt, sampler_name= "DPM++ SD
108
  return output["image"]
109
 
110
  def replicate_caption_api(image,model,context_text):
111
- print(model,context_text)
112
  base64_image = image_to_base64(image)
113
  if model=="blip":
114
  output = replicate.run(
@@ -121,7 +128,7 @@ def replicate_caption_api(image,model,context_text):
121
  "use_nucleus_sampling": False
122
  }
123
  )
124
- print(output)
125
 
126
  elif model=="llava-16":
127
  output = replicate.run(
@@ -135,7 +142,7 @@ def replicate_caption_api(image,model,context_text):
135
  "temperature": 0.2
136
  }
137
  )
138
- print(output)
139
  output = "".join(output)
140
 
141
  elif model=="img2prompt":
@@ -145,7 +152,7 @@ def replicate_caption_api(image,model,context_text):
145
  "image":base64_image
146
  }
147
  )
148
- print(output)
149
  return output
150
 
151
  def update_replicate_api_key(api_key):
@@ -168,7 +175,7 @@ def virtual_try_on(crop, seed, steps, category, garm_img, human_img, garment_d
168
  "garment_des": garment_des
169
  }
170
  )
171
- print(output)
172
  return output
173
 
174
 
@@ -180,7 +187,7 @@ def process_images(files,model,context_text,token_string):
180
  images = []
181
  textbox =""
182
  for file in files:
183
- print(file)
184
  image = Image.open(file)
185
  if model=="None":
186
  caption="[Insert cap here]"
@@ -217,7 +224,7 @@ def traning_function(zip_path,training_model,training_destination,seed,token_str
217
  BB_defult="https://f005.backblazeb2.com/file/"
218
  if BB_defult not in zip_path:
219
  zip_path=BB_uploadfile(zip_path,os.path.basename(zip_path),BB_bucket_name)
220
- print(zip_path)
221
  training_logs = f"Using zip traning file at: {zip_path}\n"
222
  yield training_logs, None
223
  input={
@@ -230,11 +237,11 @@ def traning_function(zip_path,training_model,training_destination,seed,token_str
230
  "seed": seed,
231
  "input_images": zip_path
232
  }
233
- print(training_destination)
234
  username,model_name=training_destination.split("/")
235
  assert replicate_create_model(username,model_name,visibility="private",hardware="gpu-a40-large"),"Error in creating model on replicate, check API key and username is correct "
236
 
237
- print(input)
238
  try:
239
  training = replicate.trainings.create(
240
  destination=training_destination,
@@ -293,7 +300,7 @@ def sam_segment(image,prompt,negative_prompt,adjustment_factor=-15):
293
  out_items={}
294
  for item in output:
295
  # https://replicate.com/schananas/grounded_sam/api#output-schema
296
- print(item)
297
  out_items[os.path.basename(item).split(".")[0]]=item
298
  return out_items
299
 
@@ -311,7 +318,7 @@ def replicate_zest(img,material_img="https://replicate.delivery/pbxt/Kl23gJODaW7
311
  "material_image":material_img
312
  }
313
  )
314
- print(output)
315
  return output
316
 
317
 
@@ -334,7 +341,7 @@ def replicate_iclight_BG(img,prompt,bg_img,light_source="Use Background Image",
334
  ):
335
  assert light_source in light_source_options, "Please select a correct ligh source option"
336
  width, height = img.size
337
- print(width,height)
338
  img=image_to_base64(img)
339
  #if light_source=="Use Background Image":
340
  if bg_img is None:
 
12
  use_control_net,control_net_type,control_net_img,control_net_strength,
13
  num_outputs=1,guidance_scale=3.5,seed=None,
14
  ):
15
+ #print(prompt,lora_model,api_path,aspect_ratio,use_control_net)
16
+ #print(prompt,len(prompt),type(prompt),prompt is None)
17
+ if len(prompt)==0:
18
+ prompt=os.environ["default_promt"]
19
+ #print(prompt,lora_model,api_path,aspect_ratio)
20
+
21
  inputs = {
22
  "prompt": detect_and_translate(prompt),
23
  "output_format": "png",
 
56
  api_path,
57
  input=inputs
58
  )
59
+ #print(output)
60
  return output[0]
61
 
62
 
 
66
  num_outputs=1,guidance_scale=3.5,seed=None,
67
 
68
  ):
69
+ #print(prompt,len(prompt),type(prompt),prompt is None)
70
+ if len(prompt)==0:
71
+ prompt=os.environ["default_promt"]
72
+ #print(prompt,lora_model,api_path,aspect_ratio)
73
  #if model=="dev":
74
  num_inference_steps=30
75
  if model=="schnell":
 
94
  api_path,
95
  input=inputs
96
  )
97
+ #print(output)
98
  return output[0]
99
  def replicate_bgcontrolnet(img,prompt,background_prompt, sampler_name= "DPM++ SDE Karras",
100
  negative_prompt="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, mutated hands and fingers:1.4), (deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation"
 
115
  return output["image"]
116
 
117
  def replicate_caption_api(image,model,context_text):
118
+ #print(model,context_text)
119
  base64_image = image_to_base64(image)
120
  if model=="blip":
121
  output = replicate.run(
 
128
  "use_nucleus_sampling": False
129
  }
130
  )
131
+ #print(output)
132
 
133
  elif model=="llava-16":
134
  output = replicate.run(
 
142
  "temperature": 0.2
143
  }
144
  )
145
+ #print(output)
146
  output = "".join(output)
147
 
148
  elif model=="img2prompt":
 
152
  "image":base64_image
153
  }
154
  )
155
+ #print(output)
156
  return output
157
 
158
  def update_replicate_api_key(api_key):
 
175
  "garment_des": garment_des
176
  }
177
  )
178
+ #print(output)
179
  return output
180
 
181
 
 
187
  images = []
188
  textbox =""
189
  for file in files:
190
+ #print(file)
191
  image = Image.open(file)
192
  if model=="None":
193
  caption="[Insert cap here]"
 
224
  BB_defult="https://f005.backblazeb2.com/file/"
225
  if BB_defult not in zip_path:
226
  zip_path=BB_uploadfile(zip_path,os.path.basename(zip_path),BB_bucket_name)
227
+ #print(zip_path)
228
  training_logs = f"Using zip traning file at: {zip_path}\n"
229
  yield training_logs, None
230
  input={
 
237
  "seed": seed,
238
  "input_images": zip_path
239
  }
240
+ #print(training_destination)
241
  username,model_name=training_destination.split("/")
242
  assert replicate_create_model(username,model_name,visibility="private",hardware="gpu-a40-large"),"Error in creating model on replicate, check API key and username is correct "
243
 
244
+ #print(input)
245
  try:
246
  training = replicate.trainings.create(
247
  destination=training_destination,
 
300
  out_items={}
301
  for item in output:
302
  # https://replicate.com/schananas/grounded_sam/api#output-schema
303
+ #print(item)
304
  out_items[os.path.basename(item).split(".")[0]]=item
305
  return out_items
306
 
 
318
  "material_image":material_img
319
  }
320
  )
321
+ #print(output)
322
  return output
323
 
324
 
 
341
  ):
342
  assert light_source in light_source_options, "Please select a correct ligh source option"
343
  width, height = img.size
344
+ #print(width,height)
345
  img=image_to_base64(img)
346
  #if light_source=="Use Background Image":
347
  if bg_img is None:
virtualtryon_tab.py CHANGED
@@ -21,30 +21,32 @@ def create_virtualtryon_tab():
21
  human_img = gr.Image(label="Human Image")
22
  crop = gr.Checkbox(label="Crop", value=True)
23
  seed = gr.Number(label="Seed", value=42)
 
24
  steps = gr.Number(label="Steps", value=30)
25
  category = gr.Dropdown(["upper_body", "lower_body", "dresses"], label="Category", value="upper_body")
26
  garment_des = gr.Textbox(label="Garment Description")
27
- with gr.Accordion("Example People", open=False):
28
- human_examples = gr.Examples(
29
- examples=[
30
- ["Test_images/Woman_1.png"],
31
- ["Test_images/prompt_support_examples/Man_1.png"],
32
- ["Test_images/Woman_2.png"],
33
- ["Test_images/prompt_support_examples/Man_2.png"],
34
- ["Test_images/Woman_3.png"],
35
- ["Test_images/man_1.png"],
36
- ["Test_images/Woman_4.png"],
37
- ["Test_images/Woman_5.png"],
38
- ["Test_images/anime_woman_1.png"],
39
- ["Test_images/anime_woman_2.png"],
40
- ["Test_images/anime_woman_3.png"],
41
- ["Test_images/Jump.png"],
42
-
43
-
44
- ],
45
- inputs=[human_img],
46
- examples_per_page=24
47
- )
 
48
  with gr.Column():
49
  garm_img = gr.Image(label="Garment Image")
50
  with gr.Accordion("Example Clothes", open=True):
 
21
  human_img = gr.Image(label="Human Image")
22
  crop = gr.Checkbox(label="Crop", value=True)
23
  seed = gr.Number(label="Seed", value=42)
24
+ with gr.Column():
25
  steps = gr.Number(label="Steps", value=30)
26
  category = gr.Dropdown(["upper_body", "lower_body", "dresses"], label="Category", value="upper_body")
27
  garment_des = gr.Textbox(label="Garment Description")
28
+ with gr.Row():
29
+ with gr.Accordion("Example People", open=False):
30
+ human_examples = gr.Examples(
31
+ examples=[
32
+ ["Test_images/Woman_1.png"],
33
+ ["Test_images/prompt_support_examples/Man_1.png"],
34
+ ["Test_images/Woman_2.png"],
35
+ ["Test_images/prompt_support_examples/Man_2.png"],
36
+ ["Test_images/Woman_3.png"],
37
+ ["Test_images/man_1.png"],
38
+ ["Test_images/Woman_4.png"],
39
+ ["Test_images/Woman_5.png"],
40
+ ["Test_images/anime_woman_1.png"],
41
+ ["Test_images/anime_woman_2.png"],
42
+ ["Test_images/anime_woman_3.png"],
43
+ ["Test_images/Jump.png"],
44
+
45
+
46
+ ],
47
+ inputs=[human_img],
48
+ examples_per_page=24
49
+ )
50
  with gr.Column():
51
  garm_img = gr.Image(label="Garment Image")
52
  with gr.Accordion("Example Clothes", open=True):