Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
CHANGED
|
@@ -1,76 +1,37 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
import
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
max_fps=5,
|
| 39 |
-
on_prediction=on_prediction
|
| 40 |
-
)
|
| 41 |
-
|
| 42 |
-
pipeline.start()
|
| 43 |
-
pipeline.join()
|
| 44 |
-
|
| 45 |
-
os.unlink(temp_video_path)
|
| 46 |
-
|
| 47 |
-
if not annotated_frames:
|
| 48 |
-
return None, None, "No faults detected or output frames returned."
|
| 49 |
-
|
| 50 |
-
# Save annotated video
|
| 51 |
-
height, width, _ = annotated_frames[0].shape
|
| 52 |
-
output_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
|
| 53 |
-
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), 5, (width, height))
|
| 54 |
-
for frame in annotated_frames:
|
| 55 |
-
out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
| 56 |
-
out.release()
|
| 57 |
-
|
| 58 |
-
video_bytes = open(output_path, "rb").read()
|
| 59 |
-
os.unlink(output_path)
|
| 60 |
-
|
| 61 |
-
# Prepare dataframe as CSV text for Gradio table
|
| 62 |
-
df = pd.DataFrame(prediction_log)
|
| 63 |
-
|
| 64 |
-
return video_bytes, df, "Inference completed successfully."
|
| 65 |
-
|
| 66 |
-
with gr.Blocks() as demo:
|
| 67 |
-
gr.Markdown("# Solar Panel Fault Detection (Roboflow + Gradio)")
|
| 68 |
-
video_input = gr.Video(label="Upload Thermal Video", source="upload", type="file")
|
| 69 |
-
output_video = gr.Video(label="Annotated Output Video")
|
| 70 |
-
output_table = gr.Dataframe(headers=["Class", "Confidence", "Box"], interactive=False)
|
| 71 |
-
status = gr.Textbox(label="Status", interactive=False)
|
| 72 |
-
|
| 73 |
-
run_btn = gr.Button("Run Inference")
|
| 74 |
-
run_btn.click(fn=run_inference, inputs=video_input, outputs=[output_video, output_table, status])
|
| 75 |
-
|
| 76 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
from PIL import Image
|
| 4 |
+
|
| 5 |
+
# Load the model from Hugging Face Hub
|
| 6 |
+
model = pipeline("object-detection", model="NaveenKumar5/Solar_panel_fault_detection")
|
| 7 |
+
|
| 8 |
+
def detect_faults(image):
|
| 9 |
+
results = model(image)
|
| 10 |
+
annotated_image = image.copy()
|
| 11 |
+
|
| 12 |
+
for result in results:
|
| 13 |
+
box = result["box"]
|
| 14 |
+
label = result["label"]
|
| 15 |
+
score = result["score"]
|
| 16 |
+
|
| 17 |
+
annotated_image = annotated_image.convert("RGB")
|
| 18 |
+
draw = ImageDraw.Draw(annotated_image)
|
| 19 |
+
draw.rectangle(
|
| 20 |
+
[box["xmin"], box["ymin"], box["xmax"], box["ymax"]],
|
| 21 |
+
outline="red",
|
| 22 |
+
width=3
|
| 23 |
+
)
|
| 24 |
+
draw.text((box["xmin"], box["ymin"] - 10), f"{label} ({score:.2f})", fill="red")
|
| 25 |
+
|
| 26 |
+
return annotated_image
|
| 27 |
+
|
| 28 |
+
demo = gr.Interface(
|
| 29 |
+
fn=detect_faults,
|
| 30 |
+
inputs=gr.Image(type="pil"),
|
| 31 |
+
outputs=gr.Image(type="pil"),
|
| 32 |
+
title="Solar Panel Fault Detection",
|
| 33 |
+
description="Upload an image of a solar panel to detect faults using a custom-trained model."
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
if __name__ == "__main__":
|
| 37 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|