Update app.py
Browse files
app.py
CHANGED
|
@@ -12,13 +12,16 @@ def create_key(seed=0):
|
|
| 12 |
return jax.random.PRNGKey(seed)
|
| 13 |
|
| 14 |
def addp5sketch(url):
|
| 15 |
-
iframe = f'<iframe src ={url} style="border:none;height:
|
| 16 |
return gr.HTML(iframe)
|
| 17 |
|
| 18 |
def wandb_report(url):
|
| 19 |
iframe = f'<iframe src ={url} style="border:none;height:1024px;width:100%"/frame>'
|
| 20 |
return gr.HTML(iframe)
|
| 21 |
|
|
|
|
|
|
|
|
|
|
| 22 |
report_url = 'https://wandb.ai/john-fozard/dog-cat-pose/runs/kmwcvae5'
|
| 23 |
sketch_url = 'https://editor.p5js.org/kfahn/full/Ntzq9HWhx'
|
| 24 |
|
|
@@ -67,29 +70,26 @@ def infer(prompts, negative_prompts, image):
|
|
| 67 |
return output
|
| 68 |
|
| 69 |
with gr.Blocks(theme='kfahn/AnimalPose') as demo:
|
| 70 |
-
gr.Markdown(
|
| 71 |
-
"""
|
| 72 |
-
# Animal Pose Control Net
|
| 73 |
-
## This is a demo of Animal Pose ControlNet, which is a model trained on runwayml/stable-diffusion-v1-5 with new type of conditioning.
|
| 74 |
-
[Dataset](https://huggingface.co/datasets/JFoz/dog-poses-controlnet-dataset)
|
| 75 |
-
[Diffusers model](https://huggingface.co/JFoz/dog-pose)
|
| 76 |
-
[Github](https://github.com/fi4cr/animalpose)
|
| 77 |
-
[Training Report](https://wandb.ai/john-fozard/AP10K-pose/runs/wn89ezaw)
|
| 78 |
-
""")
|
| 79 |
with gr.Row():
|
| 80 |
with gr.Column():
|
| 81 |
-
prompts = gr.Textbox(label="Prompt", placeholder="yellow dog
|
| 82 |
negative_prompts = gr.Textbox(label="Negative Prompt", value="lowres, bad muzzle, bad anatomy, missing ears, missing paws")
|
| 83 |
conditioning_image = gr.Image(label="Conditioning Image")
|
| 84 |
run_btn = gr.Button("Run")
|
| 85 |
with gr.Column():
|
| 86 |
keypoint_tool = addp5sketch(sketch_url)
|
| 87 |
-
#keypoint_tool = gr.HTML(lines)
|
| 88 |
output = gr.Image(
|
| 89 |
label="Result",
|
| 90 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
|
| 92 |
-
run_btn.click(fn=infer, inputs = [prompts, negative_prompts, conditioning_image], outputs = output)
|
| 93 |
|
| 94 |
#gr.Interface(fn=infer, inputs = ["text", "text", "image"], outputs = output,
|
| 95 |
#examples=[["a Labrador crossing the road", "low quality", "myimage.jpg"]])
|
|
|
|
| 12 |
return jax.random.PRNGKey(seed)
|
| 13 |
|
| 14 |
def addp5sketch(url):
|
| 15 |
+
iframe = f'<iframe src ={url} style="border:none;height:300px;width:100%"/frame>'
|
| 16 |
return gr.HTML(iframe)
|
| 17 |
|
| 18 |
def wandb_report(url):
|
| 19 |
iframe = f'<iframe src ={url} style="border:none;height:1024px;width:100%"/frame>'
|
| 20 |
return gr.HTML(iframe)
|
| 21 |
|
| 22 |
+
title = 'Animal Pose Control Net'
|
| 23 |
+
description = 'This is a demo of Animal Pose ControlNet, which is a model trained on runwayml/stable-diffusion-v1-5 with new type of conditioning.'
|
| 24 |
+
|
| 25 |
report_url = 'https://wandb.ai/john-fozard/dog-cat-pose/runs/kmwcvae5'
|
| 26 |
sketch_url = 'https://editor.p5js.org/kfahn/full/Ntzq9HWhx'
|
| 27 |
|
|
|
|
| 70 |
return output
|
| 71 |
|
| 72 |
with gr.Blocks(theme='kfahn/AnimalPose') as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
with gr.Row():
|
| 74 |
with gr.Column():
|
| 75 |
+
prompts = gr.Textbox(label="Prompt", placeholder="yellow dog standing on a lawn, best quality, highres")
|
| 76 |
negative_prompts = gr.Textbox(label="Negative Prompt", value="lowres, bad muzzle, bad anatomy, missing ears, missing paws")
|
| 77 |
conditioning_image = gr.Image(label="Conditioning Image")
|
| 78 |
run_btn = gr.Button("Run")
|
| 79 |
with gr.Column():
|
| 80 |
keypoint_tool = addp5sketch(sketch_url)
|
|
|
|
| 81 |
output = gr.Image(
|
| 82 |
label="Result",
|
| 83 |
)
|
| 84 |
+
gr.Markdown(
|
| 85 |
+
"""
|
| 86 |
+
[Dataset](https://huggingface.co/datasets/JFoz/dog-poses-controlnet-dataset)
|
| 87 |
+
[Diffusers model](https://huggingface.co/JFoz/dog-pose)
|
| 88 |
+
[Github](https://github.com/fi4cr/animalpose)
|
| 89 |
+
[Training Report](https://wandb.ai/john-fozard/dog-cat-pose/runs/kmwcvae5)
|
| 90 |
+
""")
|
| 91 |
|
| 92 |
+
run_btn.click(fn=infer, title = title, description = description, inputs = [prompts, negative_prompts, conditioning_image], outputs = output)
|
| 93 |
|
| 94 |
#gr.Interface(fn=infer, inputs = ["text", "text", "image"], outputs = output,
|
| 95 |
#examples=[["a Labrador crossing the road", "low quality", "myimage.jpg"]])
|