claytonsds commited on
Commit
76f601a
·
verified ·
1 Parent(s): 0175916

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +144 -0
app.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import numpy as np
4
+ import cv2
5
+ import json
6
+ from VisionGauge.models import VisionGauge
7
+
8
+ model = VisionGauge()
9
+
10
+ def VisionGauge_Inference(imagem):
11
+ if imagem is None:
12
+ return None, "No image received."
13
+
14
+ frame_rgb = imagem.copy()
15
+
16
+ # Resize
17
+ target_width = 640
18
+ h, w = frame_rgb.shape[:2]
19
+ scale = target_width / w
20
+ new_h = int(h * scale)
21
+ frame_rgb = cv2.resize(frame_rgb, (target_width, new_h))
22
+
23
+ # Convert to tensor
24
+ img_tensor = (
25
+ torch.from_numpy(frame_rgb)
26
+ .permute(2, 0, 1)
27
+ .float()
28
+ .unsqueeze(0)
29
+ )
30
+
31
+ # Model inference
32
+ boxes, preds = model.predict(img_tensor)
33
+ boxes = boxes[0]
34
+ preds = preds[0]
35
+
36
+ # Annotate frame
37
+ frame_bgr = cv2.cvtColor(frame_rgb, cv2.COLOR_RGB2BGR)
38
+ annotated_bgr = model.annotate_frame(
39
+ frame_bgr,
40
+ boxes,
41
+ preds.squeeze(-1),
42
+ frame_color="#551bb3",
43
+ font_color="#ffffff",
44
+ fontsize=10,
45
+ frame_thickness=4,
46
+ )
47
+ annotated_rgb = cv2.cvtColor(annotated_bgr, cv2.COLOR_BGR2RGB)
48
+
49
+ # Prepare JSON results
50
+ resultados = {} # dictionary to store boxes indexed by ID
51
+
52
+ for i in range(boxes.shape[0]):
53
+ x1, y1, x2, y2 = boxes[i].int().tolist()
54
+
55
+ # Skip invalid boxes
56
+ if x1 == y1 == x2 == y2 == 0:
57
+ continue
58
+
59
+ pred = preds[i].item()
60
+
61
+ resultados[str(i)] = {
62
+ "coords": {
63
+ "x1": x1,
64
+ "y1": y1,
65
+ "x2": x2,
66
+ "y2": y2
67
+ },
68
+ "h_p": round(pred, 2)
69
+ }
70
+
71
+ # If no objects detected, return empty image dictionary
72
+ if not resultados:
73
+ resultado_json = json.dumps({"values": {}}, indent=2)
74
+ else:
75
+ resultado_json = json.dumps({"values": resultados}, indent=2)
76
+
77
+ return annotated_rgb, resultado_json
78
+
79
+
80
+ def update_mode(mode):
81
+ if mode == "Image":
82
+ return (
83
+ gr.update(visible=True),
84
+ gr.update(visible=False),
85
+ gr.update(visible=True),
86
+ )
87
+ else:
88
+ return (
89
+ gr.update(visible=False),
90
+ gr.update(visible=True),
91
+ gr.update(visible=False),
92
+ )
93
+
94
+
95
+ with gr.Blocks() as demo:
96
+
97
+ gr.Markdown("# VisionGauge Demo")
98
+
99
+ mode_selector = gr.Radio(
100
+ ["Image", "Live Capture"],
101
+ value="Image",
102
+ label="Select Input Mode"
103
+ )
104
+
105
+ input_img = gr.Image(
106
+ sources=["upload"],
107
+ type="numpy",
108
+ visible=True, webcam_options=gr.WebcamOptions(mirror=False)
109
+ )
110
+
111
+ webcam_img = gr.Image(
112
+ sources=["webcam"],
113
+ type="numpy",
114
+ streaming=True,
115
+ visible=False, webcam_options=gr.WebcamOptions(mirror=False),
116
+ )
117
+
118
+ output_img = gr.Image(label="Result")
119
+ output_txt = gr.Textbox(label="Predictions", show_label=True, buttons=["copy"])
120
+
121
+ btn = gr.Button("Run model", visible=True)
122
+
123
+ # Update interface when changing mode
124
+ mode_selector.change(
125
+ update_mode,
126
+ inputs=mode_selector,
127
+ outputs=[input_img, webcam_img, btn]
128
+ )
129
+
130
+ # IMAGE mode (button)
131
+ btn.click(
132
+ VisionGauge_Inference,
133
+ inputs=input_img,
134
+ outputs=[output_img, output_txt]
135
+ )
136
+
137
+ # LIVE mode (automatic stream)
138
+ webcam_img.stream(
139
+ VisionGauge_Inference,
140
+ inputs=webcam_img,
141
+ outputs=[output_img, output_txt],
142
+ )
143
+
144
+ demo.launch(share=True)