Spaces:
Build error
Build error
aswin-raghavan commited on
Commit ·
fc374dd
1
Parent(s): d3c7bca
redo load random image
Browse files
app.py
CHANGED
|
@@ -5,6 +5,8 @@ import numpy as np
|
|
| 5 |
from PIL import Image
|
| 6 |
from transformers import CLIPProcessor, CLIPModel
|
| 7 |
import pandas as pd
|
|
|
|
|
|
|
| 8 |
|
| 9 |
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 10 |
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
@@ -40,20 +42,25 @@ def update_table_up(img, df, state):
|
|
| 40 |
def update_table_down(img, df, state):
|
| 41 |
return update_table(img, df, state, 0)
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
with gr.Blocks(title="End-User Personalization") as demo:
|
| 45 |
-
|
| 46 |
# start_button = gr.Button(label="Start")
|
| 47 |
image_display = gr.Image()
|
|
|
|
| 48 |
# text_display = gr.Text()
|
| 49 |
with gr.Row():
|
| 50 |
upvote = gr.Button("👍")
|
| 51 |
downvote = gr.Button("👎")
|
| 52 |
annotated_samples = gr.Dataframe(headers=['image_name', 'label', 'image_embed'], row_count=(1, 'dynamic'),
|
| 53 |
col_count=(3, 'fixed'), label='Annotations', wrap=True)
|
| 54 |
-
upvote.click(update_table_up, inputs=[image_display, annotated_samples,
|
| 55 |
-
downvote.click(update_table_down, inputs=[image_display, annotated_samples,
|
| 56 |
-
examples = gr.Examples(examples=[["dog.jpg"], ["colombia.jpg"], ["germany.jpg"]], inputs=[image_display])
|
| 57 |
|
| 58 |
# iface = gr.Interface(shot,
|
| 59 |
# ["image", "text"],
|
|
@@ -63,5 +70,5 @@ with gr.Blocks(title="End-User Personalization") as demo:
|
|
| 63 |
# ["colombia.jpg", "germany,belgium,colombia"]],
|
| 64 |
# description="Add a picture and a list of labels separated by commas",
|
| 65 |
# title="CLIP feature extractor")
|
| 66 |
-
demo.load(
|
| 67 |
demo.launch(show_error=True, debug=True)
|
|
|
|
| 5 |
from PIL import Image
|
| 6 |
from transformers import CLIPProcessor, CLIPModel
|
| 7 |
import pandas as pd
|
| 8 |
+
from glob import glob
|
| 9 |
+
from random import choice
|
| 10 |
|
| 11 |
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 12 |
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
|
| 42 |
def update_table_down(img, df, state):
|
| 43 |
return update_table(img, df, state, 0)
|
| 44 |
|
| 45 |
+
def get_random_image(state):
|
| 46 |
+
idx = choice(len(state))
|
| 47 |
+
return state[idx], state[idx]
|
| 48 |
+
|
| 49 |
|
| 50 |
with gr.Blocks(title="End-User Personalization") as demo:
|
| 51 |
+
images = gr.State(glob('/images/**/*.jpg'))
|
| 52 |
# start_button = gr.Button(label="Start")
|
| 53 |
image_display = gr.Image()
|
| 54 |
+
image_fname = gr.Textbox()
|
| 55 |
# text_display = gr.Text()
|
| 56 |
with gr.Row():
|
| 57 |
upvote = gr.Button("👍")
|
| 58 |
downvote = gr.Button("👎")
|
| 59 |
annotated_samples = gr.Dataframe(headers=['image_name', 'label', 'image_embed'], row_count=(1, 'dynamic'),
|
| 60 |
col_count=(3, 'fixed'), label='Annotations', wrap=True)
|
| 61 |
+
upvote.click(update_table_up, inputs=[image_display, annotated_samples, images], outputs=[image_display, annotated_samples, images])
|
| 62 |
+
downvote.click(update_table_down, inputs=[image_display, annotated_samples, images], outputs=[image_display,annotated_samples, images])
|
| 63 |
+
# examples = gr.Examples(examples=[["dog.jpg"], ["colombia.jpg"], ["germany.jpg"]], inputs=[image_display])
|
| 64 |
|
| 65 |
# iface = gr.Interface(shot,
|
| 66 |
# ["image", "text"],
|
|
|
|
| 70 |
# ["colombia.jpg", "germany,belgium,colombia"]],
|
| 71 |
# description="Add a picture and a list of labels separated by commas",
|
| 72 |
# title="CLIP feature extractor")
|
| 73 |
+
demo.load(get_random_image, inputs=[images], outputs=[image_display, image_fname])
|
| 74 |
demo.launch(show_error=True, debug=True)
|