jovian commited on
Commit
ed2b47e
·
1 Parent(s): 07d60ca
Files changed (7) hide show
  1. .gitignore +1 -0
  2. .gradio/certificate.pem +31 -0
  3. app.py +589 -0
  4. backup.py +438 -0
  5. model/best.pt +3 -0
  6. model/company_model.pt +3 -0
  7. requirements.txt +10 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ venv/
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
app.py ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import cv2
4
+ from sahi.predict import get_sliced_prediction
5
+ from sahi import AutoDetectionModel
6
+ from PIL import Image
7
+ import plotly.graph_objects as go
8
+ import torch
9
+ import spaces
10
+
11
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
12
+
13
+
14
+ class Detection:
15
+
16
+ def __init__(self):
17
+ # Set the model path and confidence threshold
18
+ yolov8_model_path = "./model/best.pt" # Update to your model path
19
+
20
+ # Initialize the AutoDetectionModel
21
+ self.model = AutoDetectionModel.from_pretrained(
22
+ model_type='yolov8',
23
+ model_path=yolov8_model_path,
24
+ confidence_threshold=0.3,
25
+ device='cpu' # Change to 'cuda:0' if you are using a GPU
26
+ )
27
+
28
+ def detect_from_image(self, image):
29
+ # Perform sliced prediction with SAHI
30
+ results = get_sliced_prediction(
31
+ image=image,
32
+ detection_model=self.model,
33
+ slice_height=256,
34
+ slice_width=256,
35
+ overlap_height_ratio=0.2,
36
+ overlap_width_ratio=0.2,
37
+ postprocess_type='NMS',
38
+ postprocess_match_metric='IOU',
39
+ postprocess_match_threshold=0.1,
40
+ postprocess_class_agnostic=True,
41
+ )
42
+
43
+ # Retrieve COCO annotations
44
+ coco_annotations = results.to_coco_annotations()
45
+ return coco_annotations
46
+
47
+ def draw_annotations(self, image, annotations):
48
+ """Draw bounding boxes on the image based on COCO annotations using OpenCV."""
49
+ # Define colors for each category in BGR (OpenCV uses BGR format)
50
+ category_styles = {
51
+ 'Nicks': {'color': (255, 60, 60), 'thickness': 2}, # Nicks (Red)
52
+ 'Dents': {'color': (255, 148, 156), 'thickness': 2}, # Dents (Light Red)
53
+ 'Scratches': {'color': (255, 116, 28), 'thickness': 2}, # Scratches (Orange)
54
+ 'Pittings': {'color': (255, 180, 28), 'thickness': 2} # Pittings (Yellow)
55
+ }
56
+
57
+ for annotation in annotations:
58
+ bbox = annotation['bbox'] # Extract the bounding box
59
+ category_name = annotation['category_name']
60
+ score = annotation.get('score', 0) # Extract confidence score, default to 0 if not present
61
+
62
+ # Get color and thickness for the current category
63
+ style = category_styles.get(category_name, {'color': (255, 0, 0), 'thickness': 2}) # Default to red if not found
64
+
65
+ # Draw rectangle
66
+ cv2.rectangle(image,
67
+ (int(bbox[0]), int(bbox[1])),
68
+ (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])),
69
+ style['color'],
70
+ style['thickness'])
71
+
72
+ # Prepare text with category and confidence score
73
+ text = f"{category_name}: {score:.2f}" # Format the score to two decimal places
74
+
75
+ # Put category text with score
76
+ cv2.putText(image,
77
+ text,
78
+ (int(bbox[0]), int(bbox[1] - 10)), # Position above the rectangle
79
+ cv2.FONT_HERSHEY_SIMPLEX,
80
+ 0.5,
81
+ style['color'],
82
+ 2)
83
+
84
+ return image
85
+
86
+
87
+ def generate_individual_graphs(self, annotations):
88
+ """Generate individual area distribution histograms for each defect category."""
89
+ # Dictionary to hold areas for each category
90
+ category_areas = {
91
+ 'Nicks': [],
92
+ 'Dents': [],
93
+ 'Scratches': [],
94
+ 'Pittings': []
95
+ }
96
+
97
+ # Populate the category_areas dictionary
98
+ for annotation in annotations:
99
+ category_name = annotation['category_name']
100
+ area = annotation['bbox'][2] * annotation['bbox'][3] # Width * Height
101
+ if category_name in category_areas:
102
+ category_areas[category_name].append(area)
103
+
104
+ # Create individual area distribution histograms for each ctegory
105
+ individual_graphs = {}
106
+ for category in ['Nicks', 'Dents', 'Scratches', 'Pittings']:
107
+ areas = category_areas[category]
108
+ fig = go.Figure()
109
+ if areas: # Check if there are areas to plot
110
+ # Create a histogram and store the frequencies
111
+ histogram_data = go.Histogram(
112
+ x=areas,
113
+ name=category,
114
+ marker_color=self.get_color(category), # Use associated color
115
+ opacity=1,
116
+ nbinsx=50 # Number of bins
117
+ )
118
+ fig.add_trace(histogram_data)
119
+
120
+ # Get the frequencies and edges for swapping axes
121
+ frequencies = histogram_data.y
122
+ edges = histogram_data.x
123
+
124
+ # Create a bar chart to swap the axes
125
+ fig = go.Figure(data=[
126
+ go.Bar(
127
+ x=frequencies, # Frequencies on x-axis
128
+ y=edges, # Edges on y-axis
129
+ name=category,
130
+ marker_color=self.get_color(category), # Use associated color
131
+ opacity=1
132
+ )
133
+ ])
134
+ else: # Generate an empty graph if no areas
135
+ fig.add_trace(go.Bar(x=[], y=[], name=category)) # Empty graph
136
+
137
+ # Update layout with swapped axes
138
+ fig.update_layout(
139
+ title=f'Size Distribution of {category}',
140
+ xaxis_title='Frequency', # Frequency on x-axis
141
+ yaxis_title='Size', # Area on y-axis
142
+ showlegend=True
143
+ )
144
+ individual_graphs[category] = fig
145
+
146
+ return individual_graphs['Nicks'], individual_graphs['Dents'], individual_graphs['Scratches'], individual_graphs['Pittings']
147
+
148
+
149
+
150
+
151
+
152
+ def generate_frequency_graph(self, annotations):
153
+ """Generate a frequency bar chart for defect categories."""
154
+ category_counts = {
155
+ 'Nicks': 0,
156
+ 'Dents': 0,
157
+ 'Scratches': 0,
158
+ 'Pittings': 0
159
+ }
160
+
161
+ # Count occurrences of each defect category
162
+ for annotation in annotations:
163
+ category_name = annotation['category_name']
164
+ if category_name in category_counts:
165
+ category_counts[category_name] += 1
166
+
167
+ # Create a bar chart for frequency
168
+ freq_chart = go.Figure()
169
+ category_colors = {
170
+ 'Nicks': 'rgba(255, 60, 60, 0.7)', # Red
171
+ 'Dents': 'rgba(255, 148, 156, 0.7)', # Light Red
172
+ 'Scratches': 'rgba(255, 116, 28, 0.7)', # Orange
173
+ 'Pittings': 'rgba(255, 180, 28, 0.7)' # Yellow
174
+ }
175
+
176
+ for category, count in category_counts.items():
177
+ freq_chart.add_trace(go.Bar(
178
+ x=[category],
179
+ y=[count],
180
+ name=category,
181
+ marker_color=category_colors.get(category, 'blue') # Default to blue if not found
182
+ ))
183
+
184
+ freq_chart.update_layout(
185
+ title='Frequency of Defects',
186
+ xaxis_title='Defect Category',
187
+ yaxis_title='Count',
188
+ barmode='group'
189
+ )
190
+
191
+ return freq_chart
192
+
193
+
194
+ def get_color(self, category_name):
195
+ """Get the color associated with a category name."""
196
+ category_styles = {
197
+ 'Nicks': 'rgba(255, 60, 60, 0.7)', # Red
198
+ 'Dents': 'rgba(255, 148, 156, 0.7)', # Light Red
199
+ 'Scratches': 'rgba(255, 116, 28, 0.7)', # Orange
200
+ 'Pittings': 'rgba(255, 180, 28, 0.7)' # Yellow
201
+ }
202
+ return category_styles.get(category_name, (255, 0, 0)) # Default to red if not found
203
+
204
+
205
+
206
+ detection = Detection()
207
+
208
+ def upload_image(image):
209
+ """Process the uploaded image (if needed) and display it."""
210
+ return image
211
+
212
+ @spaces.GPU
213
+ def apply_detection(image):
214
+ """Run object detection on the uploaded image and return the annotated image."""
215
+ # Convert image from PIL to NumPy array
216
+ img = np.array(image)
217
+
218
+ # Perform detection and get COCO annotations
219
+ annotations = detection.detect_from_image(img)
220
+
221
+ # Draw the annotations on the image using OpenCV
222
+ annotated_image = detection.draw_annotations(img, annotations)
223
+
224
+ # Convert back to PIL format for Gradio output
225
+ return Image.fromarray(annotated_image), annotations
226
+
227
+ def generate_graphs_btn(annotations):
228
+ """Generate interactive graphs from the annotations."""
229
+ # Generate individual graphs for each defect category
230
+ individual_graphs = detection.generate_individual_graphs(annotations)
231
+ frequency_graph = detection.generate_frequency_graph(annotations)
232
+ return individual_graphs
233
+
234
+
235
+
236
+
237
+
238
+ # Function to handle login authentication
239
+ def login_auth(username, password):
240
+ if username != password:
241
+ raise gr.Error("Username or Password is wrong") # Raise an error on failed login
242
+ return True # Return True if authentication is successful
243
+
244
+
245
+
246
+ # Function to create individual bar charts for each defect type
247
+ def generate_confidence_bar_chart(annotations):
248
+ # Categorize confidence scores
249
+ confidence_bins = {'<25%': 0, '25%-75%': 0, '>75%': 0}
250
+ defect_bins = {
251
+ "Nicks": confidence_bins.copy(),
252
+ "Dents": confidence_bins.copy(),
253
+ "Scratches": confidence_bins.copy(),
254
+ "Pittings": confidence_bins.copy(),
255
+ }
256
+
257
+ # Populate bins based on annotations
258
+ for annotation in annotations:
259
+ defect = annotation["category_name"]
260
+ score = annotation["score"] * 100 # Convert to percentage
261
+ if score < 25:
262
+ defect_bins[defect]['<25%'] += 1
263
+ elif 25 <= score <= 75:
264
+ defect_bins[defect]['25%-75%'] += 1
265
+ else:
266
+ defect_bins[defect]['>75%'] += 1
267
+
268
+ # Define colors for each defect
269
+ category_styles = {
270
+ 'Nicks': 'rgba(255, 60, 60, 0.7)', # Red
271
+ 'Dents': 'rgba(255, 148, 156, 0.7)', # Light Red
272
+ 'Scratches': 'rgba(255, 116, 28, 0.7)', # Orange
273
+ 'Pittings': 'rgba(255, 180, 28, 0.7)' # Yellow
274
+ }
275
+
276
+ # Generate individual charts
277
+ charts = []
278
+ for defect, bins in defect_bins.items():
279
+ fig = go.Figure()
280
+ fig.add_trace(go.Bar(
281
+ name=defect,
282
+ x=list(bins.keys()), # Confidence ranges
283
+ y=list(bins.values()), # Counts
284
+ text=[f"{v} defects" for v in bins.values()], # Hover text
285
+ hoverinfo="text",
286
+ marker_color=category_styles.get(defect, 'rgba(255, 0, 0, 0.7)') # Default to red
287
+ ))
288
+
289
+ # Customize layout
290
+ fig.update_layout(
291
+ title=f"{defect} Confidence Score Distribution",
292
+ xaxis_title="Confidence Range",
293
+ yaxis_title="Defect Count",
294
+ template="plotly_white"
295
+ )
296
+ charts.append(fig)
297
+
298
+ return charts # Return list of charts
299
+
300
+
301
+
302
+
303
+
304
+
305
+ # Gradio interface components
306
+ with gr.Blocks() as demo:
307
+
308
+ # State variable to track login status
309
+ login_successful = gr.State(value=False)
310
+
311
+
312
+
313
+ with gr.Row(visible=False) as header_row:
314
+ gr.HTML("""
315
+
316
+
317
+ <style>
318
+ @import url('https://fonts.googleapis.com/css2?family=Ubuntu:wght@300;400;500;700&family=Montserrat:wght@700&family=Open+Sans&family=Poppins:wght@300;400;500;600;700;800&display=swap');
319
+
320
+ *{
321
+ margin: 0;
322
+ padding: 0;
323
+ box-sizing: border-box;
324
+ font-family: 'Ubuntu',sans-serif;
325
+ }
326
+
327
+ a{
328
+ text-decoration: none;
329
+ color: #000;
330
+ }
331
+
332
+
333
+ body{
334
+ background-color: #fff;
335
+ }
336
+
337
+
338
+ .gradio-container-5-4-0 .prose * {
339
+ color: #083484;
340
+ }
341
+
342
+ .gradio-container-5-4-0 .prose :first-child {
343
+ margin-top: 85px
344
+ }
345
+
346
+
347
+ header{
348
+ padding: 0 80px;
349
+ height: calc(100vh-80px);
350
+ display: flex;
351
+ align-items: center;
352
+ justify-content: space-between;
353
+ }
354
+
355
+ header .left h1 {
356
+ font-size: 80px;
357
+ display: flex;
358
+ justify-content: center;
359
+ margin-top: 100px;
360
+
361
+ }
362
+
363
+ header .left span{
364
+ font-size: 80px;
365
+ color: #083484;
366
+ display: flex;
367
+ justify-content: center;
368
+ }
369
+
370
+ header .left .second-line{
371
+ font-size: 80px;
372
+ color: #083484;
373
+ display: flex;
374
+ justify-content: center;
375
+ font-weight: 400;
376
+
377
+ }
378
+
379
+ header .left p{
380
+ margin-top: 35px;
381
+ font-stretch: ultra-condensed;
382
+ color: #777;
383
+ display: flex;
384
+ justify-content: center;
385
+ text-align: center;
386
+ margin-bottom: 10px;
387
+ }
388
+
389
+ header .left a{
390
+ display: flex;
391
+ align-items: center;
392
+ background: #083484;
393
+ width: 150px;
394
+ padding: 8px;
395
+ border-radius: 60px;
396
+ }
397
+
398
+ header .left a i{
399
+ background-color: #fff;
400
+ font-size: 24px;
401
+ border-radius: 50%;
402
+ padding: 8px;
403
+ }
404
+
405
+ header .left a span{
406
+ color: #fff;
407
+ margin-left: 22px;
408
+ }
409
+
410
+ .place {
411
+ padding:30px;
412
+ text-align: center;
413
+ overflow: auto;
414
+ margin-top: 500px;
415
+ }
416
+
417
+ .sub-header {
418
+ font-size: 4em;
419
+ text-align: center;
420
+ color: #083484;
421
+ font-family: 'Montserrat',sans-serif;
422
+ }
423
+
424
+ .gradio-container-5-4-0 .prose h1, .gradio-container-5-4-0 .prose h2, .gradio-container-5-4-0 .prose h3, .gradio-container-5-4-0 .prose h4, .gradio-container-5-4-0 .prose h5 {
425
+ margin: var(--spacing-xxl) 0 var(--spacing-lg);
426
+ font-weight: var(--prose-header-text-weight);
427
+ line-height: 1.3;
428
+ color: #083484;
429
+ text-align: center;}
430
+ }
431
+
432
+
433
+ </style>
434
+
435
+ <header>
436
+ <div class="left">
437
+ <h1><span>OIS</span><br></h1>
438
+ <span class="second-line">AI Detection Model</span>
439
+ <p>
440
+ The OIS AI Detection Model enhances manufacturing by using the powerful YOLOv11 algorithm on
441
+ a Raspberry Pi for real-time, on-device defect detection. It automates quality control,
442
+ reduces human error, and minimizes downtime. With a user-friendly web interface,
443
+ the model enables offline swift defect identification, seamless integration into
444
+ production, and improving both efficiency and product quality.
445
+ </p>
446
+ </div>
447
+
448
+ </header>
449
+
450
+ <section class="place">
451
+
452
+ <p class="sub-header">OFFLINE DETECTION</p>
453
+
454
+ </section>
455
+
456
+ """)
457
+
458
+
459
+ with gr.Row(visible=False) as input_row:
460
+ # Image Upload and Display in two columns
461
+ with gr.Column():
462
+ gr.Markdown("### Input (Supported Image: bmp,jpg,png,jpeg,gif)")
463
+ upload_image_component = gr.Image(type="pil", label="Select Image")
464
+
465
+ with gr.Column():
466
+ gr.Markdown("### Output")
467
+ output_image_component = gr.Image(type="pil", label="Annotated Image")
468
+ apply_detection_btn = gr.Button("Apply Detection", variant='primary')
469
+ output_annotations = gr.State() # Store annotations
470
+ apply_detection_btn.click(apply_detection, inputs=upload_image_component, outputs=[output_image_component, output_annotations])
471
+
472
+
473
+
474
+
475
+ # Row for the graphs
476
+ with gr.Row(visible=False) as area_graph_row:
477
+ # Individual graphs for each defect category
478
+ nicks_graph_component = gr.Plot(label="Nicks Size Distribution")
479
+ dents_graph_component = gr.Plot(label="Dents Size Distribution")
480
+ scratches_graph_component = gr.Plot(label="Scratches Size Distribution")
481
+ pittings_graph_component = gr.Plot(label="Pittings Size Distribution")
482
+
483
+
484
+
485
+
486
+ # Button to generate graphs
487
+ with gr.Row(visible=False) as area_btn_row:
488
+ graph_btn = gr.Button("Generate Size Distribution Graphs",variant='primary')
489
+ graph_btn.click(generate_graphs_btn, inputs=output_annotations, outputs=[
490
+ nicks_graph_component, dents_graph_component,
491
+ scratches_graph_component, pittings_graph_component
492
+ ])
493
+
494
+
495
+
496
+ # Row for frequency graph
497
+ with gr.Row(visible=False) as frequency_graph_row:
498
+ frequency_graph_component = gr.Plot(label="Defect Frequency Distribution") # Frequency Graph
499
+
500
+
501
+
502
+
503
+ # Row for frequency graph btn
504
+ with gr.Row(visible=False) as frequency_btn_row:
505
+ freq_graph_btn = gr.Button("Generate Frequency Graph",variant='primary')
506
+ freq_graph_btn.click(detection.generate_frequency_graph,
507
+ inputs=output_annotations,
508
+ outputs=frequency_graph_component)
509
+
510
+
511
+
512
+ # Gradio row for confidence bar chart
513
+ with gr.Row(visible=False) as confidence_bar_chart_row:
514
+ nicks_confidence_bar_chart = gr.Plot(label="Nicks Confidence Score Distribution")
515
+ dents_confidence_bar_chart = gr.Plot(label="Dents Confidence Score Distribution")
516
+ scratches_confidence_bar_chart = gr.Plot(label="Scratches Confidence Score Distribution")
517
+ pittings_confidence_bar_chart = gr.Plot(label="Pittings Confidence Score Distribution")
518
+
519
+
520
+
521
+ with gr.Row(visible=False) as confidence_btn_row:
522
+ confidence_chart_btn = gr.Button("Generate Confidence Chart", variant="primary")
523
+ confidence_chart_btn.click(
524
+ generate_confidence_bar_chart,
525
+ inputs=output_annotations, # Pass the annotations
526
+ outputs=[nicks_confidence_bar_chart,dents_confidence_bar_chart,scratches_confidence_bar_chart,pittings_confidence_bar_chart]
527
+ )
528
+
529
+ # Login row, initially visible
530
+ with gr.Row(visible=True) as login_row:
531
+ with gr.Column():
532
+ gr.Markdown(value="<div style='text-align: center;'><h2>Login Page</h2></div>")
533
+ with gr.Row():
534
+ with gr.Column(scale=2):
535
+ gr.Markdown("")
536
+ with gr.Column(scale=1, variant='panel'):
537
+ username_tbox = gr.Textbox(label="User Name", interactive=True)
538
+ password_tbox = gr.Textbox(label="Password", interactive=True, type='password')
539
+ submit_btn = gr.Button(value='Submit', variant='primary', size='sm')
540
+
541
+ # On clicking the submit button
542
+ submit_btn.click(
543
+ login_auth,
544
+ inputs=[username_tbox, password_tbox],
545
+ outputs=login_successful # Set state variable on successful login
546
+ ).then(
547
+ lambda login_state: (
548
+ gr.update(visible=login_state), # Show header_row
549
+ gr.update(visible=login_state), # Show input_row
550
+ gr.update(visible=login_state), # Show area_graph_row
551
+ gr.update(visible=login_state), # Show area_btn_row
552
+ gr.update(visible=login_state), # Show frequency_graph_row
553
+ gr.update(visible=login_state), # Show frequency_btn_row
554
+ gr.update(visible=login_state), #Show Confidence chart
555
+ gr.update(visible=login_state), #Show Confidence btn
556
+ gr.update(visible=not login_state) # for login
557
+ ),
558
+ inputs=login_successful,
559
+ outputs=[header_row,
560
+ input_row,
561
+ area_graph_row,
562
+ area_btn_row,
563
+ frequency_graph_row,
564
+ frequency_btn_row,
565
+ confidence_bar_chart_row,
566
+ confidence_btn_row,
567
+ login_row]
568
+ )
569
+
570
+ with gr.Column(scale=2):
571
+ gr.Markdown("")
572
+
573
+
574
+
575
+
576
+ # Launch the Gradio interface
577
+ demo.launch(share=True,show_api=False)
578
+
579
+
580
+
581
+
582
+
583
+
584
+
585
+
586
+
587
+
588
+
589
+
backup.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import cv2
4
+ from sahi.predict import get_sliced_prediction
5
+ from sahi import AutoDetectionModel
6
+ from PIL import Image
7
+ import plotly.graph_objects as go
8
+ import torch
9
+ import spaces
10
+
11
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
12
+
13
+
14
+ class Detection:
15
+
16
+ def __init__(self):
17
+ # Set the model path and confidence threshold
18
+ yolov8_model_path = "./model/best.pt" # Update to your model path
19
+
20
+ # Initialize the AutoDetectionModel
21
+ self.model = AutoDetectionModel.from_pretrained(
22
+ model_type='yolov8',
23
+ model_path=yolov8_model_path,
24
+ confidence_threshold=0.3,
25
+ device='cpu' # Change to 'cuda:0' if you are using a GPU
26
+ )
27
+
28
+ def detect_from_image(self, image):
29
+ # Perform sliced prediction with SAHI
30
+ results = get_sliced_prediction(
31
+ image=image,
32
+ detection_model=self.model,
33
+ slice_height=256,
34
+ slice_width=256,
35
+ overlap_height_ratio=0.2,
36
+ overlap_width_ratio=0.2,
37
+ postprocess_type='NMS',
38
+ postprocess_match_metric='IOU',
39
+ postprocess_match_threshold=0.1,
40
+ postprocess_class_agnostic=True,
41
+ )
42
+
43
+ # Retrieve COCO annotations
44
+ coco_annotations = results.to_coco_annotations()
45
+ return coco_annotations
46
+
47
+ def draw_annotations(self, image, annotations):
48
+ """Draw bounding boxes on the image based on COCO annotations using OpenCV."""
49
+ # Define colors for each category in BGR (OpenCV uses BGR format)
50
+ category_styles = {
51
+ 'Nicks': {'color': (255, 60, 60), 'thickness': 2}, # Nicks (Red)
52
+ 'Dents': {'color': (255, 148, 156), 'thickness': 2}, # Dents (Light Red)
53
+ 'Scratches': {'color': (255, 116, 28), 'thickness': 2}, # Scratches (Orange)
54
+ 'Pittings': {'color': (255, 180, 28), 'thickness': 2} # Pittings (Yellow)
55
+ }
56
+
57
+ for annotation in annotations:
58
+ bbox = annotation['bbox'] # Extract the bounding box
59
+ category_name = annotation['category_name']
60
+ score = annotation.get('score', 0) # Extract confidence score, default to 0 if not present
61
+
62
+ # Get color and thickness for the current category
63
+ style = category_styles.get(category_name, {'color': (255, 0, 0), 'thickness': 2}) # Default to red if not found
64
+
65
+ # Draw rectangle
66
+ cv2.rectangle(image,
67
+ (int(bbox[0]), int(bbox[1])),
68
+ (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])),
69
+ style['color'],
70
+ style['thickness'])
71
+
72
+ # Prepare text with category and confidence score
73
+ text = f"{category_name}: {score:.2f}" # Format the score to two decimal places
74
+
75
+ # Put category text with score
76
+ cv2.putText(image,
77
+ text,
78
+ (int(bbox[0]), int(bbox[1] - 10)), # Position above the rectangle
79
+ cv2.FONT_HERSHEY_SIMPLEX,
80
+ 0.5,
81
+ style['color'],
82
+ 2)
83
+
84
+ return image
85
+
86
+
87
+ def generate_individual_graphs(self, annotations):
88
+ """Generate individual area distribution histograms for each defect category."""
89
+ # Dictionary to hold areas for each category
90
+ category_areas = {
91
+ 'Nicks': [],
92
+ 'Dents': [],
93
+ 'Scratches': [],
94
+ 'Pittings': []
95
+ }
96
+
97
+ # Populate the category_areas dictionary
98
+ for annotation in annotations:
99
+ category_name = annotation['category_name']
100
+ area = annotation['bbox'][2] * annotation['bbox'][3] # Width * Height
101
+ if category_name in category_areas:
102
+ category_areas[category_name].append(area)
103
+
104
+ # Create individual area distribution histograms for each ctegory
105
+ individual_graphs = {}
106
+ for category in ['Nicks', 'Dents', 'Scratches', 'Pittings']:
107
+ areas = category_areas[category]
108
+ fig = go.Figure()
109
+ if areas: # Check if there are areas to plot
110
+ # Create a histogram and store the frequencies
111
+ histogram_data = go.Histogram(
112
+ x=areas,
113
+ name=category,
114
+ marker_color=self.get_color(category), # Use associated color
115
+ opacity=1,
116
+ nbinsx=10 # Number of bins
117
+ )
118
+ fig.add_trace(histogram_data)
119
+
120
+ # Get the frequencies and edges for swapping axes
121
+ frequencies = histogram_data.y
122
+ edges = histogram_data.x
123
+
124
+ # Create a bar chart to swap the axes
125
+ fig = go.Figure(data=[
126
+ go.Bar(
127
+ x=frequencies, # Frequencies on x-axis
128
+ y=edges, # Edges on y-axis
129
+ name=category,
130
+ marker_color=self.get_color(category), # Use associated color
131
+ opacity=1
132
+ )
133
+ ])
134
+ else: # Generate an empty graph if no areas
135
+ fig.add_trace(go.Bar(x=[], y=[], name=category)) # Empty graph
136
+
137
+ # Update layout with swapped axes
138
+ fig.update_layout(
139
+ title=f'Area Distribution of {category}',
140
+ xaxis_title='Frequency', # Frequency on x-axis
141
+ yaxis_title='Area', # Area on y-axis
142
+ showlegend=True
143
+ )
144
+ individual_graphs[category] = fig
145
+
146
+ return individual_graphs['Nicks'], individual_graphs['Dents'], individual_graphs['Scratches'], individual_graphs['Pittings']
147
+
148
+
149
+
150
+
151
+
152
+ def generate_frequency_graph(self, annotations):
153
+ """Generate a frequency bar chart for defect categories."""
154
+ category_counts = {
155
+ 'Nicks': 0,
156
+ 'Dents': 0,
157
+ 'Scratches': 0,
158
+ 'Pittings': 0
159
+ }
160
+
161
+ # Count occurrences of each defect category
162
+ for annotation in annotations:
163
+ category_name = annotation['category_name']
164
+ if category_name in category_counts:
165
+ category_counts[category_name] += 1
166
+
167
+ # Create a bar chart for frequency
168
+ freq_chart = go.Figure()
169
+ category_colors = {
170
+ 'Nicks': 'rgba(255, 60, 60, 0.7)', # Red
171
+ 'Dents': 'rgba(255, 148, 156, 0.7)', # Light Red
172
+ 'Scratches': 'rgba(255, 116, 28, 0.7)', # Orange
173
+ 'Pittings': 'rgba(255, 180, 28, 0.7)' # Yellow
174
+ }
175
+
176
+ for category, count in category_counts.items():
177
+ freq_chart.add_trace(go.Bar(
178
+ x=[category],
179
+ y=[count],
180
+ name=category,
181
+ marker_color=category_colors.get(category, 'blue') # Default to blue if not found
182
+ ))
183
+
184
+ freq_chart.update_layout(
185
+ title='Frequency of Defects',
186
+ xaxis_title='Defect Category',
187
+ yaxis_title='Count',
188
+ barmode='group'
189
+ )
190
+
191
+ return freq_chart
192
+
193
+
194
+ def get_color(self, category_name):
195
+ """Get the color associated with a category name."""
196
+ category_styles = {
197
+ 'Nicks': 'rgba(255, 60, 60, 0.7)', # Red
198
+ 'Dents': 'rgba(255, 148, 156, 0.7)', # Light Red
199
+ 'Scratches': 'rgba(255, 116, 28, 0.7)', # Orange
200
+ 'Pittings': 'rgba(255, 180, 28, 0.7)' # Yellow
201
+ }
202
+ return category_styles.get(category_name, (255, 0, 0)) # Default to red if not found
203
+
204
+
205
+
206
+ detection = Detection()
207
+
208
+ def upload_image(image):
209
+ """Process the uploaded image (if needed) and display it."""
210
+ return image
211
+
212
+ @spaces.GPU
213
+ def apply_detection(image):
214
+ """Run object detection on the uploaded image and return the annotated image."""
215
+ # Convert image from PIL to NumPy array
216
+ img = np.array(image)
217
+
218
+ # Perform detection and get COCO annotations
219
+ annotations = detection.detect_from_image(img)
220
+
221
+ # Draw the annotations on the image using OpenCV
222
+ annotated_image = detection.draw_annotations(img, annotations)
223
+
224
+ # Convert back to PIL format for Gradio output
225
+ return Image.fromarray(annotated_image), annotations
226
+
227
+ def generate_graphs_btn(annotations):
228
+ """Generate interactive graphs from the annotations."""
229
+ # Generate individual graphs for each defect category
230
+ individual_graphs = detection.generate_individual_graphs(annotations)
231
+ frequency_graph = detection.generate_frequency_graph(annotations)
232
+ return individual_graphs
233
+
234
+ css = """
235
+
236
+ @import url('https://fonts.googleapis.com/css2?family=Ubuntu:wght@300;400;500;700&family=Montserrat:wght@700&family=Open+Sans&family=Poppins:wght@300;400;500;600;700;800&display=swap');
237
+
238
+ *{
239
+ margin: 0;
240
+ padding: 0;
241
+ box-sizing: border-box;
242
+ font-family: 'Ubuntu',sans-serif;
243
+ }
244
+
245
+ a{
246
+ text-decoration: none;
247
+ color: #000;
248
+ }
249
+
250
+
251
+ body{
252
+ background-color: #fff;
253
+ }
254
+
255
+
256
+
257
+ header{
258
+ padding: 0 80px;
259
+ height: calc(100vh-80px);
260
+ display: flex;
261
+ align-items: center;
262
+ justify-content: space-between;
263
+ }
264
+
265
+ header .left h1 {
266
+ font-size: 80px;
267
+ display: flex;
268
+ justify-content: center;
269
+ margin-top: 17rem;
270
+
271
+ }
272
+
273
+ header .left span{
274
+ font-size: 80px;
275
+ color: #083484;
276
+ display: flex;
277
+ justify-content: center;
278
+
279
+ }
280
+ header .left .second-line{
281
+ font-size: 80px;
282
+ color: #083484;
283
+ display: flex;
284
+ justify-content: center;
285
+ font-weight: 400;
286
+
287
+ }
288
+
289
+ header .left p{
290
+ margin-top: 35px;
291
+ font-stretch: ultra-condensed;
292
+ color: #777;
293
+ display: flex;
294
+ justify-content: center;
295
+ text-align: center;
296
+ margin-bottom: 10px;
297
+ }
298
+
299
+ header .left a{
300
+ display: flex;
301
+ align-items: center;
302
+ background: #083484;
303
+ width: 150px;
304
+ padding: 8px;
305
+ border-radius: 60px;
306
+ }
307
+
308
+ header .left a i{
309
+ background-color: #fff;
310
+ font-size: 24px;
311
+ border-radius: 50%;
312
+ padding: 8px;
313
+ }
314
+
315
+ header .left a span{
316
+ color: #fff;
317
+ margin-left: 22px;
318
+ }
319
+
320
+ .container {
321
+ padding:30px;
322
+ text-align: center;
323
+ overflow: auto;
324
+ margin-top: 500px;
325
+ }
326
+
327
+ .sub-header {
328
+ font-size: 4em;
329
+ text-align: center;
330
+ color: #083484;
331
+ font-family: 'Montserrat',sans-serif;
332
+ }
333
+
334
+
335
+
336
+
337
+ """
338
+
339
+
340
+
341
+ js_func = """
342
+ function refresh() {
343
+ const url = new URL(window.location);
344
+
345
+ if (url.searchParams.get('__theme') !== 'light') {
346
+ url.searchParams.set('__theme', 'light');
347
+ window.location.href = url.href;
348
+ }
349
+ }
350
+
351
+ """
352
+
353
+
354
+
355
+ # Gradio interface components
356
+ with gr.Blocks(css = css,js=js_func) as demo:
357
+
358
+ gr.HTML("""
359
+
360
+ <header>
361
+ <div class="left">
362
+ <h1><span>OIS</span><br></h1>
363
+ <span class="second-line">AI Detection Model</span>
364
+ <p>
365
+ The OIS AI Detection Model enhances manufacturing by using the powerful YOLOv11 algorithm on
366
+ a Raspberry Pi for real-time, on-device defect detection. It automates quality control,
367
+ reduces human error, and minimizes downtime. With a user-friendly web interface,
368
+ the model enables offline swift defect identification, seamless integration into
369
+ production, and improving both efficiency and product quality.
370
+ </p>
371
+ </div>
372
+
373
+ </header>
374
+
375
+ <section class="container">
376
+
377
+ <p class="sub-header">OFFLINE DETECTION</p>
378
+
379
+ </section>
380
+
381
+ """)
382
+
383
+
384
+ with gr.Row():
385
+ # Image Upload and Display in two columns
386
+ with gr.Column():
387
+ gr.Markdown("### Input")
388
+ upload_image_component = gr.Image(type="pil", label="Select Image")
389
+
390
+ with gr.Column():
391
+ gr.Markdown("### Output")
392
+ output_image_component = gr.Image(type="pil", label="Annotated Image")
393
+ apply_detection_btn = gr.Button("Apply Detection")
394
+ output_annotations = gr.State() # Store annotations
395
+ apply_detection_btn.click(apply_detection, inputs=upload_image_component, outputs=[output_image_component, output_annotations])
396
+
397
+
398
+ # Row for the graphs
399
+ with gr.Row():
400
+ # Individual graphs for each defect category
401
+ nicks_graph_component = gr.Plot(label="Nicks Area Distribution")
402
+ dents_graph_component = gr.Plot(label="Dents Area Distribution")
403
+ scratches_graph_component = gr.Plot(label="Scratches Area Distribution")
404
+ pittings_graph_component = gr.Plot(label="Pittings Area Distribution")
405
+
406
+ # Button to generate graphs
407
+ with gr.Row():
408
+ graph_btn = gr.Button("Generate Area Distribution Graphs")
409
+ graph_btn.click(generate_graphs_btn, inputs=output_annotations, outputs=[
410
+ nicks_graph_component, dents_graph_component,
411
+ scratches_graph_component, pittings_graph_component
412
+ ])
413
+
414
+ # Row for frequency graph
415
+ with gr.Row():
416
+ frequency_graph_component = gr.Plot(label="Defect Frequency Distribution") # Frequency Graph
417
+
418
+ # Row for frequency graph btn
419
+ with gr.Row():
420
+ freq_graph_btn = gr.Button("Generate Frequency Graph")
421
+ freq_graph_btn.click(detection.generate_frequency_graph,
422
+ inputs=output_annotations,
423
+ outputs=frequency_graph_component)
424
+
425
+ # Launch the Gradio interface
426
+ demo.launch(share=True)
427
+
428
+
429
+
430
+
431
+
432
+
433
+
434
+
435
+
436
+
437
+
438
+
model/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67424dbaf2d9c3f07f356a59c37187ef1a7b9f59ebabf77c5cb7f9cb9507f107
3
+ size 38138560
model/company_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e2510291d55581f170275335dccbab9c2b91d85db4602bc399e15a0f7a24662
3
+ size 5461843
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu124
2
+ torch
3
+ torchvision
4
+ opencv-python
5
+ gradio==5.4.0
6
+ sahi==0.11.18
7
+ pillow
8
+ plotly==5.24.1
9
+ ultralytics==8.3.24
10
+