Spaces:
Runtime error
Runtime error
Tevfik istanbullu
commited on
Upload folder using huggingface_hub
Browse files- .gitattributes +2 -35
- 1.jpg +0 -0
- 2.jpg +0 -0
- 3.jpg +0 -0
- 4.jpg +0 -0
- Image Caption Generator.py +56 -0
- README.md +2 -8
- requirements.txt +5 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,2 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
# Auto detect text files and perform LF normalization
|
| 2 |
+
* text=auto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1.jpg
ADDED
|
2.jpg
ADDED
|
3.jpg
ADDED
|
4.jpg
ADDED
|
Image Caption Generator.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import numpy as np
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from transformers import AutoProcessor, BlipForConditionalGeneration
|
| 5 |
+
import os
|
| 6 |
+
# Load the pretrained processor and model
|
| 7 |
+
processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 8 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 9 |
+
|
| 10 |
+
def caption_image(input_image: np.ndarray):
|
| 11 |
+
# Convert numpy array to PIL Image and convert to RGB
|
| 12 |
+
raw_image = Image.fromarray(input_image).convert('RGB')
|
| 13 |
+
|
| 14 |
+
# Process the image
|
| 15 |
+
inputs = processor(raw_image, return_tensors="pt")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Generate a caption for the image
|
| 19 |
+
out = model.generate(**inputs,max_length=50)
|
| 20 |
+
|
| 21 |
+
# Decode the generated tokens to text
|
| 22 |
+
caption = processor.decode(out[0], skip_special_tokens=True)
|
| 23 |
+
|
| 24 |
+
return caption
|
| 25 |
+
|
| 26 |
+
# Save the data to the Hugging Face dataset
|
| 27 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 28 |
+
hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-images-data")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# Define examples
|
| 32 |
+
examples = [
|
| 33 |
+
["1.jpg"],
|
| 34 |
+
["2.jpg"],
|
| 35 |
+
["3.jpg"],
|
| 36 |
+
["4.jpg"],
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# Create a Gradio interface
|
| 42 |
+
|
| 43 |
+
iface = gr.Interface(
|
| 44 |
+
fn=caption_image,
|
| 45 |
+
inputs=gr.Image(),
|
| 46 |
+
outputs=gr.Textbox(label="Generated Caption", lines=2),
|
| 47 |
+
title="🔍 Image Caption Generator 🖼️",
|
| 48 |
+
description = "Generate stunning captions for your images with our AI-powered model! 🌟\n\n🚫📚 Note: Please avoid entering any sensitive or personal information, as inputs may be reviewed or used for training purposes.",
|
| 49 |
+
allow_flagging="auto",
|
| 50 |
+
flagging_callback=hf_writer,
|
| 51 |
+
examples=examples,
|
| 52 |
+
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
iface.launch()
|
| 56 |
+
|
README.md
CHANGED
|
@@ -1,12 +1,6 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
|
| 4 |
-
colorFrom: yellow
|
| 5 |
-
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 4.31.5
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
---
|
| 11 |
-
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Image_Caption_Generator
|
| 3 |
+
app_file: Image Caption Generator.py
|
|
|
|
|
|
|
| 4 |
sdk: gradio
|
| 5 |
sdk_version: 4.31.5
|
|
|
|
|
|
|
| 6 |
---
|
|
|
|
|
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio==4.31.5
|
| 2 |
+
numpy==1.26.4
|
| 3 |
+
Pillow==10.3.0
|
| 4 |
+
transformers==4.41.1
|
| 5 |
+
torch==2.3.0
|