gopalagra's picture
Update app.py
1add24e verified
raw
history blame
6.49 kB
# app.py
# import gradio as gr
# from transformers import BlipProcessor, BlipForConditionalGeneration
# from gtts import gTTS
# import io
# from PIL import Image
# # -------------------------------
# # Load BLIP-base model (lighter version)
# # -------------------------------
# processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
# model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
# # -------------------------------
# # Generate caption function
# # -------------------------------
# # def generate_caption_tts(image):
# # caption = generate_caption(model, processor, image)
# # audio_file = text_to_audio_file(caption)
# # return caption, audio_file # return file path, not BytesIO
# # -------------------------------
# # Convert text to speech using gTTS
# # -------------------------------
# import tempfile
# import pyttsx3
# def text_to_audio_file(text):
# # Create a temporary file
# tmp_file = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
# tmp_path = tmp_file.name
# tmp_file.close()
# engine = pyttsx3.init()
# engine.save_to_file(text, tmp_path)
# engine.runAndWait()
# return tmp_path
# def generate_caption_from_image(model, processor, image):
# # image: PIL.Image
# inputs = processor(images=image, return_tensors="pt")
# out = model.generate(**inputs)
# caption = processor.decode(out[0], skip_special_tokens=True)
# return caption
# # -------------------------------
# # Gradio interface: Caption + Audio
# # -------------------------------
# def generate_caption_tts(image):
# caption = generate_caption_from_image(model, processor, image) # uses global model/processor
# # audio_file = text_to_audio_file(caption)
# return caption
# interface = gr.Interface(
# fn=generate_caption_tts,
# inputs=gr.Image(type="numpy"),
# outputs=[gr.Textbox(label="Generated Caption")],
# title="Image Captioning for Visually Impaired",
# description="Upload an image, get a caption and audio description."
# )
# interface.launch()
# # demo.launch(share=True)
import gradio as gr
from transformers import (
BlipProcessor,
BlipForConditionalGeneration,
BlipForQuestionAnswering,
pipeline
)
from PIL import Image
import torch
from gtts import gTTS
import tempfile
# ----------------------
# Device setup
# ----------------------
device = "cuda" if torch.cuda.is_available() else "cpu"
# ----------------------
# Load Models Once
# ----------------------
print("πŸ”„ Loading models...")
# Captioning
caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large").to(device)
# VQA
vqa_processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
vqa_model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(device)
# Translation
translation_models = {
"Hindi": pipeline("translation", model="Helsinki-NLP/opus-mt-en-hi"),
"French": pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr"),
"Spanish": pipeline("translation", model="Helsinki-NLP/opus-mt-en-es"),
}
# Safety Moderation Pipeline
moderation_model = pipeline("text-classification", model="unitary/toxic-bert")
print("βœ… All models loaded!")
# ----------------------
# Safety Filter Function
# ----------------------
def is_caption_safe(caption):
result = moderation_model(caption)[0]
label = result["label"]
score = result["score"]
# toxic-bert gives "toxic" or "non-toxic"
if label.lower() == "toxic" and score > 0.7:
return False
return True
# ----------------------
# Caption + Translate + Speak
# ----------------------
def generate_caption_translate_speak(image, target_lang):
# Step 1: Caption
inputs = caption_processor(images=image, return_tensors="pt").to(device)
with torch.no_grad():
out = caption_model.generate(**inputs, max_new_tokens=50)
english_caption = caption_processor.decode(out[0], skip_special_tokens=True)
# Step 1.5: Safety Check
if not is_caption_safe(english_caption):
return "⚠️ Warning: Unsafe or inappropriate content detected!", "", None
# Step 2: Translate
if target_lang in translation_models:
translated = translation_models[target_lang](english_caption)[0]['translation_text']
else:
translated = "Translation not available"
# Step 3: Generate Speech (English caption for now)
tts = gTTS(english_caption, lang="en")
tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
tts.save(tmp_file.name)
return english_caption, translated, tmp_file.name
# ----------------------
# VQA
# ----------------------
def vqa_answer(image, question):
inputs = vqa_processor(image, question, return_tensors="pt").to(device)
with torch.no_grad():
out = vqa_model.generate(**inputs, max_new_tokens=50)
answer = vqa_processor.decode(out[0], skip_special_tokens=True)
# Run safety filter on answers too
if not is_caption_safe(answer):
return "⚠️ Warning: Unsafe or inappropriate content detected!"
return answer
# ----------------------
# Gradio UI
# ----------------------
with gr.Blocks(title="BLIP Vision App") as demo:
gr.Markdown("## πŸ–ΌοΈ BLIP: Image Captioning + Translation + Speech + VQA (with Safety Filter)")
with gr.Tab("Caption + Translate + Speak"):
with gr.Row():
img_in = gr.Image(type="pil", label="Upload Image")
lang_in = gr.Dropdown(["Hindi", "French", "Spanish"], label="Translate To", value="Hindi")
eng_out = gr.Textbox(label="English Caption")
trans_out = gr.Textbox(label="Translated Caption")
audio_out = gr.Audio(label="Spoken Caption", type="filepath")
btn1 = gr.Button("Generate Caption, Translate & Speak")
btn1.click(generate_caption_translate_speak, inputs=[img_in, lang_in], outputs=[eng_out, trans_out, audio_out])
with gr.Tab("Visual Question Answering (VQA)"):
with gr.Row():
img_vqa = gr.Image(type="pil", label="Upload Image")
q_in = gr.Textbox(label="Ask a Question about the Image")
ans_out = gr.Textbox(label="Answer")
btn2 = gr.Button("Ask")
btn2.click(vqa_answer, inputs=[img_vqa, q_in], outputs=ans_out)
demo.launch()