Spaces:
Sleeping
Sleeping
Test DF again
Browse files
app.py
CHANGED
|
@@ -1,20 +1,14 @@
|
|
| 1 |
# Import gradio - app framework
|
| 2 |
import gradio as gr
|
| 3 |
|
| 4 |
-
# Two image datasources are available.
|
| 5 |
-
# Minor adjustments (add/remove # to deactivate/activate) to switch between datasources.
|
| 6 |
-
# AA comments refer to images in the DataFrame / from Coco database
|
| 7 |
-
# BB comments refer to images stored in local Gradio app folder
|
| 8 |
-
|
| 9 |
-
# Import os and random to support random selection of image (from folder)
|
| 10 |
-
import os
|
| 11 |
-
import random
|
| 12 |
|
| 13 |
# Import pandas datasets, transformers, torch
|
| 14 |
import pandas as pd
|
| 15 |
-
|
|
|
|
| 16 |
from datasets import load_dataset
|
| 17 |
|
|
|
|
| 18 |
from transformers import (
|
| 19 |
BlipProcessor,
|
| 20 |
BlipForConditionalGeneration,
|
|
@@ -25,70 +19,77 @@ from transformers import (
|
|
| 25 |
)
|
| 26 |
|
| 27 |
|
|
|
|
|
|
|
| 28 |
from PIL import Image
|
| 29 |
import torch
|
| 30 |
|
| 31 |
-
|
| 32 |
# Get merve/coco dataset
|
| 33 |
from datasets import load_dataset
|
| 34 |
|
|
|
|
| 35 |
#Load dataset (detection-datasets/coco)
|
| 36 |
dataset = load_dataset("henryscheible/coco_val2014_tiny", split="validation")
|
| 37 |
|
|
|
|
| 38 |
# Reduce dataset to 20 rows, i.e., get sample
|
| 39 |
samples = dataset.select(range(20))
|
| 40 |
|
|
|
|
| 41 |
#Convert to dataframe
|
| 42 |
df = pd.DataFrame(samples)
|
| 43 |
|
| 44 |
-
# BB: Direct to Photos folder
|
| 45 |
-
IMAGE_FOLDER = "Photos"
|
| 46 |
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
]
|
| 52 |
|
| 53 |
#Load the image captioning model (Salesforce/blip-image-captioning-large)
|
| 54 |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 55 |
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 56 |
|
|
|
|
| 57 |
#Load transformer for translating captions from English to Spanish
|
| 58 |
model_name = "Helsinki-NLP/opus-mt-en-es"
|
| 59 |
trans_tokenizer = MarianTokenizer.from_pretrained(model_name)
|
| 60 |
trans_model = MarianMTModel.from_pretrained(model_name)
|
| 61 |
|
| 62 |
|
| 63 |
-
#Configure captioning function
|
| 64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
def caption_random_image():
|
| 66 |
|
| 67 |
-
# AA: pick random row - from DF
|
| 68 |
-
##sample = df.sample(1).iloc[0]
|
| 69 |
|
| 70 |
-
#
|
| 71 |
-
|
| 72 |
|
| 73 |
-
# BB: Load into PIL - image from folder - image from folder
|
| 74 |
-
image = Image.open(img_path).convert("RGB")
|
| 75 |
|
|
|
|
|
|
|
| 76 |
|
| 77 |
-
# AA: Image - for DF
|
| 78 |
-
##image = sample["image"]
|
| 79 |
|
| 80 |
# Unconditional image captioning
|
| 81 |
inputs = processor(image, return_tensors="pt")
|
| 82 |
|
|
|
|
| 83 |
out = model.generate(**inputs)
|
| 84 |
caption_eng = processor.decode(out[0], skip_special_tokens=True)
|
| 85 |
|
|
|
|
| 86 |
# Translate caption from English to Spanish
|
| 87 |
trans_inputs = trans_tokenizer.encode(caption_eng, return_tensors="pt")
|
| 88 |
trans_out = trans_model.generate(trans_inputs)
|
| 89 |
caption_es = trans_tokenizer.decode(trans_out[0], skip_special_tokens=True)
|
| 90 |
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
|
| 94 |
demo = gr.Interface(
|
|
@@ -96,17 +97,15 @@ demo = gr.Interface(
|
|
| 96 |
inputs=None,
|
| 97 |
outputs=[
|
| 98 |
gr.Image(type="pil", label="Random Image"),
|
| 99 |
-
gr.Textbox(label="Caption (English)")
|
| 100 |
-
gr.Textbox(label="Caption (Spanish)")
|
| 101 |
],
|
| 102 |
title="Image Captioning (with English to Spanish translation)",
|
| 103 |
-
description="Selects a random image
|
| 104 |
)
|
| 105 |
|
| 106 |
|
| 107 |
|
| 108 |
-
demo.launch()
|
| 109 |
-
|
| 110 |
|
| 111 |
|
| 112 |
|
|
|
|
|
|
| 1 |
# Import gradio - app framework
|
| 2 |
import gradio as gr
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
# Import pandas datasets, transformers, torch
|
| 6 |
import pandas as pd
|
| 7 |
+
#import torch
|
| 8 |
+
#import tensorflow as tf
|
| 9 |
from datasets import load_dataset
|
| 10 |
|
| 11 |
+
|
| 12 |
from transformers import (
|
| 13 |
BlipProcessor,
|
| 14 |
BlipForConditionalGeneration,
|
|
|
|
| 19 |
)
|
| 20 |
|
| 21 |
|
| 22 |
+
|
| 23 |
+
|
| 24 |
from PIL import Image
|
| 25 |
import torch
|
| 26 |
|
| 27 |
+
|
| 28 |
# Get merve/coco dataset
|
| 29 |
from datasets import load_dataset
|
| 30 |
|
| 31 |
+
|
| 32 |
#Load dataset (detection-datasets/coco)
|
| 33 |
dataset = load_dataset("henryscheible/coco_val2014_tiny", split="validation")
|
| 34 |
|
| 35 |
+
|
| 36 |
# Reduce dataset to 20 rows, i.e., get sample
|
| 37 |
samples = dataset.select(range(20))
|
| 38 |
|
| 39 |
+
|
| 40 |
#Convert to dataframe
|
| 41 |
df = pd.DataFrame(samples)
|
| 42 |
|
|
|
|
|
|
|
| 43 |
|
| 44 |
+
## print ("Print to show the 20 images available.")
|
| 45 |
+
## print ("The app will then select an image for further exploration.")
|
| 46 |
+
## print(df.head(20))
|
| 47 |
+
|
|
|
|
| 48 |
|
| 49 |
#Load the image captioning model (Salesforce/blip-image-captioning-large)
|
| 50 |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 51 |
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 52 |
|
| 53 |
+
|
| 54 |
#Load transformer for translating captions from English to Spanish
|
| 55 |
model_name = "Helsinki-NLP/opus-mt-en-es"
|
| 56 |
trans_tokenizer = MarianTokenizer.from_pretrained(model_name)
|
| 57 |
trans_model = MarianMTModel.from_pretrained(model_name)
|
| 58 |
|
| 59 |
|
|
|
|
| 60 |
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
#Configure captioning function
|
| 65 |
def caption_random_image():
|
| 66 |
|
|
|
|
|
|
|
| 67 |
|
| 68 |
+
# pick random row
|
| 69 |
+
sample = df.sample(1).iloc[0]
|
| 70 |
|
|
|
|
|
|
|
| 71 |
|
| 72 |
+
# 'image' field contains an actual PIL image
|
| 73 |
+
image = sample["image"]
|
| 74 |
|
|
|
|
|
|
|
| 75 |
|
| 76 |
# Unconditional image captioning
|
| 77 |
inputs = processor(image, return_tensors="pt")
|
| 78 |
|
| 79 |
+
|
| 80 |
out = model.generate(**inputs)
|
| 81 |
caption_eng = processor.decode(out[0], skip_special_tokens=True)
|
| 82 |
|
| 83 |
+
|
| 84 |
# Translate caption from English to Spanish
|
| 85 |
trans_inputs = trans_tokenizer.encode(caption_eng, return_tensors="pt")
|
| 86 |
trans_out = trans_model.generate(trans_inputs)
|
| 87 |
caption_es = trans_tokenizer.decode(trans_out[0], skip_special_tokens=True)
|
| 88 |
|
| 89 |
+
|
| 90 |
+
return image, caption_eng
|
| 91 |
+
|
| 92 |
+
|
| 93 |
|
| 94 |
|
| 95 |
demo = gr.Interface(
|
|
|
|
| 97 |
inputs=None,
|
| 98 |
outputs=[
|
| 99 |
gr.Image(type="pil", label="Random Image"),
|
| 100 |
+
gr.Textbox(label="Caption (English)")
|
|
|
|
| 101 |
],
|
| 102 |
title="Image Captioning (with English to Spanish translation)",
|
| 103 |
+
description="Selects a random COCO image from 20 samples; generates a BLIP caption; then translates the (English) caption to Spanish."
|
| 104 |
)
|
| 105 |
|
| 106 |
|
| 107 |
|
|
|
|
|
|
|
| 108 |
|
| 109 |
|
| 110 |
|
| 111 |
+
demo.launch()
|