Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -34,28 +34,12 @@ def generate_caption(image):
|
|
| 34 |
caption = texts[best_match]
|
| 35 |
return f"Best match: {caption} (Confidence: {probs[0][best_match].item():.2f})"
|
| 36 |
|
| 37 |
-
note = gr.Markdown("""
|
| 38 |
-
**Note:** This model detects the best match from the following options:
|
| 39 |
-
- a photo of a cat
|
| 40 |
-
- a photo of a dog
|
| 41 |
-
- a photo of a man
|
| 42 |
-
- a photo of a woman
|
| 43 |
-
- a photo of a laptop
|
| 44 |
-
- a photo of a smartphone
|
| 45 |
-
- a photo of a city
|
| 46 |
-
- a photo of a landscape
|
| 47 |
-
- a photo of food
|
| 48 |
-
- a photo of a car
|
| 49 |
-
""")
|
| 50 |
-
|
| 51 |
-
note.render()
|
| 52 |
-
|
| 53 |
iface = gr.Interface(
|
| 54 |
fn=generate_caption,
|
| 55 |
inputs=gr.Image(type="pil"),
|
| 56 |
outputs=gr.Textbox(label="Generated Caption"),
|
| 57 |
title="Image Captioning with CLIP",
|
| 58 |
-
description="Upload an image and get a dynamically generated caption using CLIP
|
| 59 |
)
|
| 60 |
|
| 61 |
iface.launch()
|
|
|
|
| 34 |
caption = texts[best_match]
|
| 35 |
return f"Best match: {caption} (Confidence: {probs[0][best_match].item():.2f})"
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
iface = gr.Interface(
|
| 38 |
fn=generate_caption,
|
| 39 |
inputs=gr.Image(type="pil"),
|
| 40 |
outputs=gr.Textbox(label="Generated Caption"),
|
| 41 |
title="Image Captioning with CLIP",
|
| 42 |
+
description="Upload an image and get a dynamically generated caption using CLIP.\n\n**Detectable categories:**\n- a photo of a cat\n- a photo of a dog\n- a photo of a man\n- a photo of a woman\n- a photo of a laptop\n- a photo of a smartphone\n- a photo of a city\n- a photo of a landscape\n- a photo of food\n- a photo of a car"
|
| 43 |
)
|
| 44 |
|
| 45 |
iface.launch()
|