|
|
|
|
|
from transformers import pipeline |
|
|
import gradio as gr |
|
|
from PIL import Image, ImageDraw |
|
|
import scipy.io.wavfile as wavfile |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
object_detector = pipeline("object-detection", model="facebook/detr-resnet-50") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
narrator = pipeline("text-to-speech", model="kakao-enterprise/vits-ljs") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_audio(text): |
|
|
|
|
|
narrated_text = narrator(text) |
|
|
|
|
|
|
|
|
wavfile.write("finetuned_output.wav", rate=narrated_text["sampling_rate"], |
|
|
data=narrated_text["audio"][0]) |
|
|
|
|
|
|
|
|
return "finetuned_output.wav" |
|
|
|
|
|
def read_objects(detection_objects): |
|
|
|
|
|
object_counts = {} |
|
|
|
|
|
|
|
|
for detection in detection_objects: |
|
|
label = detection['label'] |
|
|
if label in object_counts: |
|
|
object_counts[label] += 1 |
|
|
else: |
|
|
object_counts[label] = 1 |
|
|
|
|
|
|
|
|
response = "This picture contains" |
|
|
labels = list(object_counts.keys()) |
|
|
for i, label in enumerate(labels): |
|
|
response += f" {object_counts[label]} {label}" |
|
|
if object_counts[label] > 1: |
|
|
response += "s" |
|
|
if i < len(labels) - 2: |
|
|
response += "," |
|
|
elif i == len(labels) - 2: |
|
|
response += " and" |
|
|
|
|
|
response += "." |
|
|
|
|
|
return response |
|
|
|
|
|
def draw_bounding_boxes(image, object_detections): |
|
|
""" |
|
|
Draws bounding boxes around detected objects on a PIL image. |
|
|
|
|
|
Args: |
|
|
image (PIL.Image): The input image. |
|
|
object_detections (list): A list of dictionaries, where each dictionary represents a detected object. |
|
|
Each dictionary should have the following keys: |
|
|
- 'score': the confidence score of the detection |
|
|
- 'label': the label of the detected object |
|
|
- 'box': a dictionary with keys 'xmin', 'ymin', 'xmax', 'ymax' |
|
|
representing the bounding box coordinates. |
|
|
|
|
|
Returns: |
|
|
PIL.Image: The input image with bounding boxes drawn around the detected objects. |
|
|
""" |
|
|
draw = ImageDraw.Draw(image) |
|
|
for detection in object_detections: |
|
|
box = detection['box'] |
|
|
label = detection['label'] |
|
|
score = detection['score'] |
|
|
|
|
|
|
|
|
draw.rectangle((box['xmin'], box['ymin'], box['xmax'], box['ymax']), outline=(255, 0, 0), width=2) |
|
|
|
|
|
|
|
|
text = f"{label} ({score:.2f})" |
|
|
draw.text((box['xmin'], box['ymin'] - 20), text, fill=(255, 0, 0)) |
|
|
|
|
|
return image |
|
|
|
|
|
def detect_object(image): |
|
|
|
|
|
output = object_detector(image) |
|
|
processed_image = draw_bounding_boxes(image, output) |
|
|
natural_text = read_objects(output) |
|
|
processed_audio = generate_audio(natural_text) |
|
|
return processed_image, processed_audio |
|
|
|
|
|
gr.close_all() |
|
|
|
|
|
demo = gr.Interface(fn=detect_object, |
|
|
inputs=[gr.Image(label="Select Image",type="pil")], |
|
|
outputs=[gr.Image(label="Processed Image", type="pil"), gr.Audio(label="Generated Audio")], |
|
|
title="@IT AI Enthusiast (https://www.youtube.com/@itaienthusiast/) - Project 7: Object Detector with Audio", |
|
|
description="THIS APPLICATION WILL BE USED TO HIGHLIGHT OBJECTS AND GIVES AUDIO DESCRIPTION FOR THE PROVIDED INPUT IMAGE.") |
|
|
demo.launch() |