Spaces:
Runtime error
Runtime error
Commit
·
937eb55
0
Parent(s):
Duplicate from A-Celsius/Caption-Generator
Browse filesCo-authored-by: Akshay Chauhan <A-Celsius@users.noreply.huggingface.co>
- .gitattributes +34 -0
- README.md +13 -0
- app.py +48 -0
- requirements.txt +2 -0
.gitattributes
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Caption Generator
|
| 3 |
+
emoji: 🦀
|
| 4 |
+
colorFrom: purple
|
| 5 |
+
colorTo: yellow
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 3.28.2
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
duplicated_from: A-Celsius/Caption-Generator
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 3 |
+
import torch
|
| 4 |
+
import gradio as gr
|
| 5 |
+
|
| 6 |
+
model_name = "Salesforce/blip-image-captioning-base"
|
| 7 |
+
|
| 8 |
+
caption_processor = BlipProcessor.from_pretrained(model_name)
|
| 9 |
+
model = BlipForConditionalGeneration.from_pretrained(model_name)
|
| 10 |
+
|
| 11 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 12 |
+
model.to(device)
|
| 13 |
+
|
| 14 |
+
def generate_captions(image, num_captions=5,size=(512, 512)):
|
| 15 |
+
image = image.resize(size)
|
| 16 |
+
if image.mode != 'RGB':
|
| 17 |
+
image = image.convert('RGB')
|
| 18 |
+
pixel_values = caption_processor(image, return_tensors='pt').to(device)
|
| 19 |
+
|
| 20 |
+
caption_ids = model.generate(
|
| 21 |
+
**pixel_values,
|
| 22 |
+
max_length=30,
|
| 23 |
+
num_beams=5,
|
| 24 |
+
num_return_sequences=num_captions,
|
| 25 |
+
temperature=1.0
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
captions = [
|
| 29 |
+
caption_processor.decode(ids, skip_special_tokens=True)
|
| 30 |
+
for ids in caption_ids
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
return captions
|
| 34 |
+
|
| 35 |
+
from gradio.components import Image, Textbox,Slider
|
| 36 |
+
|
| 37 |
+
interface = gr.Interface(
|
| 38 |
+
fn=generate_captions,
|
| 39 |
+
inputs=[
|
| 40 |
+
Image(type="pil", label="Input Image"),
|
| 41 |
+
Slider(minimum=1, maximum=5, step=1, label="Number of Captions")
|
| 42 |
+
],
|
| 43 |
+
outputs=Textbox(type="text", label="Captions"),
|
| 44 |
+
title="Image Caption Generator",
|
| 45 |
+
description="AI tool that creates captions based on the image provided by the user.",
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
interface.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch
|
| 2 |
+
transformers
|