Spaces:
Sleeping
Sleeping
Shreyansh Khaitan commited on
initial
Browse files- app.py +18 -22
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -3,17 +3,21 @@ from inference_sdk import InferenceHTTPClient
|
|
| 3 |
from PIL import Image, ImageDraw, ImageEnhance
|
| 4 |
import matplotlib.pyplot as plt
|
| 5 |
import os
|
|
|
|
|
|
|
| 6 |
from collections import defaultdict
|
|
|
|
| 7 |
|
|
|
|
| 8 |
API_KEY = os.getenv("ROBOFLOW_API_KEY")
|
| 9 |
-
|
| 10 |
-
# Initialize the Roboflow client
|
| 11 |
CLIENT = InferenceHTTPClient(
|
| 12 |
api_url="https://detect.roboflow.com",
|
| 13 |
api_key=API_KEY
|
| 14 |
)
|
| 15 |
|
| 16 |
-
|
|
|
|
|
|
|
| 17 |
# Define colors for each component type
|
| 18 |
COLORS = {
|
| 19 |
"BYPASS DAMPER": (255, 0, 255), # Magenta
|
|
@@ -35,7 +39,7 @@ def enhance_image(image):
|
|
| 35 |
image = ImageEnhance.Contrast(image).enhance(1.2)
|
| 36 |
return image.convert('RGB') # Convert back to RGB
|
| 37 |
|
| 38 |
-
def detect_components(image_pil, confidence_threshold, grid_size
|
| 39 |
"""
|
| 40 |
Detects components in an image using the AI model with segmentation.
|
| 41 |
|
|
@@ -43,10 +47,9 @@ def detect_components(image_pil, confidence_threshold, grid_size, model_id):
|
|
| 43 |
image_pil: PIL Image object
|
| 44 |
confidence_threshold: Minimum confidence level for detections
|
| 45 |
grid_size: Tuple (rows, cols) for grid segmentation
|
| 46 |
-
model_id: Roboflow model ID
|
| 47 |
|
| 48 |
Returns:
|
| 49 |
-
Tuple of (final image with detections, component counts)
|
| 50 |
"""
|
| 51 |
width, height = image_pil.size
|
| 52 |
grid_rows, grid_cols = grid_size
|
|
@@ -86,7 +89,7 @@ def detect_components(image_pil, confidence_threshold, grid_size, model_id):
|
|
| 86 |
|
| 87 |
try:
|
| 88 |
# Run inference
|
| 89 |
-
result = CLIENT.infer(segment_path, model_id=
|
| 90 |
filtered_predictions = [pred for pred in result["predictions"]
|
| 91 |
if pred["confidence"] >= confidence_threshold]
|
| 92 |
|
|
@@ -143,11 +146,11 @@ def detect_components(image_pil, confidence_threshold, grid_size, model_id):
|
|
| 143 |
)
|
| 144 |
|
| 145 |
# Create a table of counts
|
| 146 |
-
count_table = "\n".join([f"{label}: {count}" for label, count in final_counts.items()])
|
| 147 |
|
| 148 |
return final_image, count_table, pass_results
|
| 149 |
|
| 150 |
-
def process_image(input_image, confidence_threshold, grid_rows, grid_cols
|
| 151 |
"""
|
| 152 |
Process the input image and return results for Gradio interface.
|
| 153 |
|
|
@@ -155,14 +158,10 @@ def process_image(input_image, confidence_threshold, grid_rows, grid_cols, model
|
|
| 155 |
input_image: Input image (numpy array from Gradio)
|
| 156 |
confidence_threshold: Confidence threshold slider value
|
| 157 |
grid_rows, grid_cols: Grid size
|
| 158 |
-
model_id: Model ID input
|
| 159 |
|
| 160 |
Returns:
|
| 161 |
Tuple of (final image, component counts, pass images)
|
| 162 |
"""
|
| 163 |
-
if model_id.strip() == "":
|
| 164 |
-
return None, "Error: Please provide a valid Model ID", None, None, None
|
| 165 |
-
|
| 166 |
if API_KEY is None:
|
| 167 |
return None, "Error: ROBOFLOW_API_KEY environment variable is not set", None, None, None
|
| 168 |
|
|
@@ -174,8 +173,7 @@ def process_image(input_image, confidence_threshold, grid_rows, grid_cols, model
|
|
| 174 |
final_image, count_table, pass_results = detect_components(
|
| 175 |
input_pil,
|
| 176 |
confidence_threshold,
|
| 177 |
-
(grid_rows, grid_cols)
|
| 178 |
-
model_id
|
| 179 |
)
|
| 180 |
|
| 181 |
# Convert individual pass images for display
|
|
@@ -200,7 +198,6 @@ with gr.Blocks(title="HVAC Component Detection") as demo:
|
|
| 200 |
with gr.Row():
|
| 201 |
with gr.Column():
|
| 202 |
input_image = gr.Image(label="Input Image", type="numpy")
|
| 203 |
-
model_id = gr.Textbox(label="Roboflow Model ID", placeholder="Enter your model ID")
|
| 204 |
confidence = gr.Slider(minimum=0.1, maximum=1.0, value=0.3, step=0.05,
|
| 205 |
label="Confidence Threshold")
|
| 206 |
|
|
@@ -221,17 +218,16 @@ with gr.Blocks(title="HVAC Component Detection") as demo:
|
|
| 221 |
|
| 222 |
submit_btn.click(
|
| 223 |
fn=process_image,
|
| 224 |
-
inputs=[input_image, confidence, grid_rows, grid_cols
|
| 225 |
outputs=[output_image, component_counts, pass1_output, pass2_output, pass3_output]
|
| 226 |
)
|
| 227 |
|
| 228 |
gr.Markdown("""
|
| 229 |
## Instructions
|
| 230 |
-
1.
|
| 231 |
-
2.
|
| 232 |
-
3.
|
| 233 |
-
4.
|
| 234 |
-
5. View the results from all three detection passes
|
| 235 |
|
| 236 |
Note: This application requires a ROBOFLOW_API_KEY environment variable.
|
| 237 |
""")
|
|
|
|
| 3 |
from PIL import Image, ImageDraw, ImageEnhance
|
| 4 |
import matplotlib.pyplot as plt
|
| 5 |
import os
|
| 6 |
+
import tempfile
|
| 7 |
+
import io
|
| 8 |
from collections import defaultdict
|
| 9 |
+
import numpy as np
|
| 10 |
|
| 11 |
+
# Initialize the Roboflow client with API key from environment variable
|
| 12 |
API_KEY = os.getenv("ROBOFLOW_API_KEY")
|
|
|
|
|
|
|
| 13 |
CLIENT = InferenceHTTPClient(
|
| 14 |
api_url="https://detect.roboflow.com",
|
| 15 |
api_key=API_KEY
|
| 16 |
)
|
| 17 |
|
| 18 |
+
# Model ID is predefined
|
| 19 |
+
MODEL_ID = "hvac-trial/1" # Replace with your actual model ID if different
|
| 20 |
+
|
| 21 |
# Define colors for each component type
|
| 22 |
COLORS = {
|
| 23 |
"BYPASS DAMPER": (255, 0, 255), # Magenta
|
|
|
|
| 39 |
image = ImageEnhance.Contrast(image).enhance(1.2)
|
| 40 |
return image.convert('RGB') # Convert back to RGB
|
| 41 |
|
| 42 |
+
def detect_components(image_pil, confidence_threshold, grid_size):
|
| 43 |
"""
|
| 44 |
Detects components in an image using the AI model with segmentation.
|
| 45 |
|
|
|
|
| 47 |
image_pil: PIL Image object
|
| 48 |
confidence_threshold: Minimum confidence level for detections
|
| 49 |
grid_size: Tuple (rows, cols) for grid segmentation
|
|
|
|
| 50 |
|
| 51 |
Returns:
|
| 52 |
+
Tuple of (final image with detections, component counts, pass results)
|
| 53 |
"""
|
| 54 |
width, height = image_pil.size
|
| 55 |
grid_rows, grid_cols = grid_size
|
|
|
|
| 89 |
|
| 90 |
try:
|
| 91 |
# Run inference
|
| 92 |
+
result = CLIENT.infer(segment_path, model_id=MODEL_ID)
|
| 93 |
filtered_predictions = [pred for pred in result["predictions"]
|
| 94 |
if pred["confidence"] >= confidence_threshold]
|
| 95 |
|
|
|
|
| 146 |
)
|
| 147 |
|
| 148 |
# Create a table of counts
|
| 149 |
+
count_table = "\n".join([f"{label}: {count}" for label, count in sorted(final_counts.items())])
|
| 150 |
|
| 151 |
return final_image, count_table, pass_results
|
| 152 |
|
| 153 |
+
def process_image(input_image, confidence_threshold, grid_rows, grid_cols):
|
| 154 |
"""
|
| 155 |
Process the input image and return results for Gradio interface.
|
| 156 |
|
|
|
|
| 158 |
input_image: Input image (numpy array from Gradio)
|
| 159 |
confidence_threshold: Confidence threshold slider value
|
| 160 |
grid_rows, grid_cols: Grid size
|
|
|
|
| 161 |
|
| 162 |
Returns:
|
| 163 |
Tuple of (final image, component counts, pass images)
|
| 164 |
"""
|
|
|
|
|
|
|
|
|
|
| 165 |
if API_KEY is None:
|
| 166 |
return None, "Error: ROBOFLOW_API_KEY environment variable is not set", None, None, None
|
| 167 |
|
|
|
|
| 173 |
final_image, count_table, pass_results = detect_components(
|
| 174 |
input_pil,
|
| 175 |
confidence_threshold,
|
| 176 |
+
(grid_rows, grid_cols)
|
|
|
|
| 177 |
)
|
| 178 |
|
| 179 |
# Convert individual pass images for display
|
|
|
|
| 198 |
with gr.Row():
|
| 199 |
with gr.Column():
|
| 200 |
input_image = gr.Image(label="Input Image", type="numpy")
|
|
|
|
| 201 |
confidence = gr.Slider(minimum=0.1, maximum=1.0, value=0.3, step=0.05,
|
| 202 |
label="Confidence Threshold")
|
| 203 |
|
|
|
|
| 218 |
|
| 219 |
submit_btn.click(
|
| 220 |
fn=process_image,
|
| 221 |
+
inputs=[input_image, confidence, grid_rows, grid_cols],
|
| 222 |
outputs=[output_image, component_counts, pass1_output, pass2_output, pass3_output]
|
| 223 |
)
|
| 224 |
|
| 225 |
gr.Markdown("""
|
| 226 |
## Instructions
|
| 227 |
+
1. Upload an image containing HVAC components
|
| 228 |
+
2. Adjust the confidence threshold and grid size if needed
|
| 229 |
+
3. Click "Detect Components" to start the detection process
|
| 230 |
+
4. View the results from all three detection passes
|
|
|
|
| 231 |
|
| 232 |
Note: This application requires a ROBOFLOW_API_KEY environment variable.
|
| 233 |
""")
|
requirements.txt
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
inference-sdk
|
| 2 |
pillow
|
| 3 |
matplotlib
|
| 4 |
-
gradio
|
|
|
|
|
|
| 1 |
inference-sdk
|
| 2 |
pillow
|
| 3 |
matplotlib
|
| 4 |
+
gradio
|
| 5 |
+
numpy
|