prograk commited on
Commit
dba6b94
·
verified ·
1 Parent(s): e763e16

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -0
app.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image, ImageDraw, ImageFont
3
+ import scipy.io.wavfile as wavfile
4
+
5
+ # Use a pipeline as a high-level helper
6
+ from transformers import pipeline
7
+
8
+ # model_path = "../Models/models--facebook--detr-resnet-50/snapshots/1d5f47bd3bdd2c4bbfa585418ffe6da5028b4c0b"
9
+ # model_path2 = "../Models/models--kakao-enterprise--vits-ljs/snapshots/3bcb8321394f671bd948ebf0d086d694dda95464"
10
+
11
+ # object_detector = pipeline("object-detection", model=model_path)
12
+ object_detector = pipeline("object-detection", model="facebook/detr-resnet-50")
13
+ # narrator = pipeline("text-to-speech", model=model_path2)
14
+ narrator = pipeline("text-to-speech", model="kakao-enterprise/vits-ljs")
15
+
16
+ # Define the function to generate audio from text
17
+ def generate_audio(text):
18
+ # Generate the narrated text
19
+ narrated_text = narrator(text)
20
+
21
+ # Save the audio to a WAV file
22
+ wavfile.write("output.wav", rate=narrated_text["sampling_rate"],
23
+ data=narrated_text["audio"][0])
24
+
25
+ # Return the path to the saved audio file
26
+ return "output.wav"
27
+
28
+ def read_objects(detection_objects):
29
+ # Initialize counters for each object label
30
+ object_counts = {}
31
+
32
+ # Count the occurrences of each label
33
+ for detection in detection_objects:
34
+ label = detection['label']
35
+ if label in object_counts:
36
+ object_counts[label] += 1
37
+ else:
38
+ object_counts[label] = 1
39
+
40
+ # Generate the response string
41
+ response = "This picture contains"
42
+ labels = list(object_counts.keys())
43
+ for i, label in enumerate(labels):
44
+ response += f" {object_counts[label]} {label}"
45
+ if object_counts[label] > 1:
46
+ response += "s"
47
+ if i < len(labels) - 2:
48
+ response += ","
49
+ elif i == len(labels) - 2:
50
+ response += " and"
51
+
52
+ response += "."
53
+
54
+ return response
55
+
56
+ def draw_bounding_boxes(image, detections, font_path=None, font_size=20):
57
+ """
58
+ Draws bounding boxes on the given image based on the detections.
59
+ :param image: PIL.Image object
60
+ :param detections: List of detection results, where each result is a dictionary containing
61
+ 'score', 'label', and 'box' keys. 'box' itself is a dictionary with 'xmin',
62
+ 'ymin', 'xmax', 'ymax'.
63
+ :param font_path: Path to the TrueType font file to use for text.
64
+ :param font_size: Size of the font to use for text.
65
+ :return: PIL.Image object with bounding boxes drawn.
66
+ """
67
+ # Make a copy of the image to draw on
68
+ draw_image = image.copy()
69
+ draw = ImageDraw.Draw(draw_image)
70
+
71
+ # Load custom font or default font if path not provided
72
+ if font_path:
73
+ font = ImageFont.truetype(font_path, font_size)
74
+ else:
75
+ # When font_path is not provided, load default font but it's size is fixed
76
+ font = ImageFont.load_default()
77
+ # Increase font size workaround by using a TTF font file, if needed, can download and specify the path
78
+
79
+ for detection in detections:
80
+ box = detection['box']
81
+ xmin = box['xmin']
82
+ ymin = box['ymin']
83
+ xmax = box['xmax']
84
+ ymax = box['ymax']
85
+
86
+ # Draw the bounding box
87
+ draw.rectangle([(xmin, ymin), (xmax, ymax)], outline="red", width=3)
88
+
89
+ # Optionally, you can also draw the label and score
90
+ label = detection['label']
91
+ score = detection['score']
92
+ text = f"{label} {score:.2f}"
93
+
94
+ # Draw text with background rectangle for visibility
95
+ if font_path: # Use the custom font with increased size
96
+ text_size = draw.textbbox((xmin, ymin), text, font=font)
97
+ else:
98
+ # Calculate text size using the default font
99
+ text_size = draw.textbbox((xmin, ymin), text)
100
+
101
+ draw.rectangle([(text_size[0], text_size[1]), (text_size[2], text_size[3])], fill="red")
102
+ draw.text((xmin, ymin), text, fill="white", font=font)
103
+
104
+ return draw_image
105
+
106
+ # raw_image = Image.open("../Files/dog.png")
107
+ #
108
+ # output = object_detector(raw_image)
109
+ #
110
+ # process_image = draw_bounding_boxes(raw_image, output)
111
+ #
112
+ # process_image.show()
113
+
114
+ # print(output)
115
+
116
+ def detect_object(image):
117
+ raw_image = image
118
+ output = object_detector(raw_image)
119
+ processed_image = draw_bounding_boxes(raw_image, output)
120
+ natural_text = read_objects(output)
121
+ processed_audio = generate_audio(natural_text)
122
+ return processed_image, processed_audio
123
+
124
+ gr.close_all()
125
+
126
+ demo = gr.Interface(fn=detect_object,
127
+ inputs=[gr.Image(label="Select Image",type="pil")],
128
+ outputs=[gr.Image(label="Processed Image", type="pil"), gr.Audio(label="Generated Audio")],
129
+ title="@GenAILearniverse Project 7: Object Detector with Audio",
130
+ description="THIS APPLICATION WILL BE USED TO HIGHLIGHT OBJECTS AND GIVES AUDIO DESCRIPTION FOR THE PROVIDED INPUT IMAGE.")
131
+
132
+ demo.launch()