deekshitha9876 commited on
Commit
41c4492
·
verified ·
1 Parent(s): eb8ddf5

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +127 -0
app.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image, ImageDraw, ImageFont
3
+ import scipy.io.wavfile as wavfile
4
+
5
+
6
+
7
+ from transformers import pipeline
8
+
9
+
10
+
11
+
12
+ narrator = pipeline("text-to-speech",
13
+ model="kakao-enterprise/vits-ljs")
14
+
15
+ object_detector = pipeline("object-detection",
16
+ model="facebook/detr-resnet-50")
17
+
18
+
19
+ def generate_audio(text):
20
+
21
+ narrated_text = narrator(text)
22
+
23
+ wavfile.write("output.wav", rate=narrated_text["sampling_rate"],
24
+ data=narrated_text["audio"][0])
25
+
26
+
27
+ return "output.wav"
28
+
29
+
30
+ def read_objects(detection_objects):
31
+
32
+ object_counts = {}
33
+
34
+
35
+ for detection in detection_objects:
36
+ label = detection['label']
37
+ if label in object_counts:
38
+ object_counts[label] += 1
39
+ else:
40
+ object_counts[label] = 1
41
+
42
+
43
+ response = "This picture contains"
44
+ labels = list(object_counts.keys())
45
+ for i, label in enumerate(labels):
46
+ response += f" {object_counts[label]} {label}"
47
+ if object_counts[label] > 1:
48
+ response += "s"
49
+ if i < len(labels) - 2:
50
+ response += ","
51
+ elif i == len(labels) - 2:
52
+ response += " and"
53
+
54
+ response += "."
55
+
56
+ return response
57
+
58
+
59
+
60
+ def draw_bounding_boxes(image, detections, font_path=None, font_size=20):
61
+ """
62
+ Draws bounding boxes on the given image based on the detections.
63
+ :param image: PIL.Image object
64
+ :param detections: List of detection results, where each result is a dictionary containing
65
+ 'score', 'label', and 'box' keys. 'box' itself is a dictionary with 'xmin',
66
+ 'ymin', 'xmax', 'ymax'.
67
+ :param font_path: Path to the TrueType font file to use for text.
68
+ :param font_size: Size of the font to use for text.
69
+ :return: PIL.Image object with bounding boxes drawn.
70
+ """
71
+
72
+ draw_image = image.copy()
73
+ draw = ImageDraw.Draw(draw_image)
74
+
75
+
76
+ if font_path:
77
+ font = ImageFont.truetype(font_path, font_size)
78
+ else:
79
+
80
+ font = ImageFont.load_default()
81
+
82
+
83
+ for detection in detections:
84
+ box = detection['box']
85
+ xmin = box['xmin']
86
+ ymin = box['ymin']
87
+ xmax = box['xmax']
88
+ ymax = box['ymax']
89
+
90
+
91
+ draw.rectangle([(xmin, ymin), (xmax, ymax)], outline="red", width=3)
92
+
93
+
94
+ label = detection['label']
95
+ score = detection['score']
96
+ text = f"{label} {score:.2f}"
97
+
98
+
99
+ if font_path:
100
+ text_size = draw.textbbox((xmin, ymin), text, font=font)
101
+ else:
102
+
103
+ text_size = draw.textbbox((xmin, ymin), text)
104
+
105
+ draw.rectangle([(text_size[0], text_size[1]), (text_size[2], text_size[3])], fill="red")
106
+ draw.text((xmin, ymin), text, fill="white", font=font)
107
+
108
+ return draw_image
109
+
110
+
111
+ def detect_object(image):
112
+ raw_image = image
113
+ output = object_detector(raw_image)
114
+ processed_image = draw_bounding_boxes(raw_image, output)
115
+ natural_text = read_objects(output)
116
+ processed_audio = generate_audio(natural_text)
117
+ return processed_image, processed_audio
118
+
119
+
120
+ demo = gr.Interface(fn=detect_object,
121
+ inputs=[gr.Image(label="Select Image",type="pil")],
122
+ outputs=[gr.Image(label="Processed Image", type="pil"), gr.Audio(label="Generated Audio")],
123
+ title="@GenAILearniverse Project 7: Object Detector with Audio",
124
+ description="THIS APPLICATION WILL BE USED TO HIGHLIGHT OBJECTS AND GIVES AUDIO DESCRIPTION FOR THE PROVIDED INPUT IMAGE.")
125
+ demo.launch()
126
+
127
+ # print(output)