NaveenKumar5 commited on
Commit
817fe63
·
verified ·
1 Parent(s): caaac79

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -75
app.py CHANGED
@@ -1,76 +1,37 @@
1
  import gradio as gr
2
- import cv2
3
- import numpy as np
4
- import tempfile
5
- import os
6
- import pandas as pd
7
- from inference import InferencePipeline
8
-
9
- def run_inference(video_file):
10
- if video_file is None:
11
- return None, None, "Please upload a video file."
12
-
13
- temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
14
- with open(temp_video_path, "wb") as f:
15
- f.write(video_file.read())
16
-
17
- annotated_frames = []
18
- prediction_log = []
19
-
20
- def on_prediction(result, frame):
21
- if result.get("output_image") is not None:
22
- image_np = result["output_image"].numpy_image
23
- annotated_frames.append(image_np)
24
-
25
- if result.get("predictions"):
26
- for pred in result["predictions"].get("predictions", []):
27
- prediction_log.append({
28
- "Class": pred["class"],
29
- "Confidence": round(pred["confidence"], 2),
30
- "Box": f'({pred["x"]}, {pred["y"]}, {pred["width"]}, {pred["height"]})'
31
- })
32
-
33
- pipeline = InferencePipeline.init_with_workflow(
34
- api_key="dxkgGGHSZ3DI8XzVn29U",
35
- workspace_name="naveen-kumar-hnmil",
36
- workflow_id="detect-count-and-visualize-8",
37
- video_reference=temp_video_path,
38
- max_fps=5,
39
- on_prediction=on_prediction
40
- )
41
-
42
- pipeline.start()
43
- pipeline.join()
44
-
45
- os.unlink(temp_video_path)
46
-
47
- if not annotated_frames:
48
- return None, None, "No faults detected or output frames returned."
49
-
50
- # Save annotated video
51
- height, width, _ = annotated_frames[0].shape
52
- output_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
53
- out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), 5, (width, height))
54
- for frame in annotated_frames:
55
- out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
56
- out.release()
57
-
58
- video_bytes = open(output_path, "rb").read()
59
- os.unlink(output_path)
60
-
61
- # Prepare dataframe as CSV text for Gradio table
62
- df = pd.DataFrame(prediction_log)
63
-
64
- return video_bytes, df, "Inference completed successfully."
65
-
66
- with gr.Blocks() as demo:
67
- gr.Markdown("# Solar Panel Fault Detection (Roboflow + Gradio)")
68
- video_input = gr.Video(label="Upload Thermal Video", source="upload", type="file")
69
- output_video = gr.Video(label="Annotated Output Video")
70
- output_table = gr.Dataframe(headers=["Class", "Confidence", "Box"], interactive=False)
71
- status = gr.Textbox(label="Status", interactive=False)
72
-
73
- run_btn = gr.Button("Run Inference")
74
- run_btn.click(fn=run_inference, inputs=video_input, outputs=[output_video, output_table, status])
75
-
76
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
+ from PIL import Image
4
+
5
+ # Load the model from Hugging Face Hub
6
+ model = pipeline("object-detection", model="NaveenKumar5/Solar_panel_fault_detection")
7
+
8
+ def detect_faults(image):
9
+ results = model(image)
10
+ annotated_image = image.copy()
11
+
12
+ for result in results:
13
+ box = result["box"]
14
+ label = result["label"]
15
+ score = result["score"]
16
+
17
+ annotated_image = annotated_image.convert("RGB")
18
+ draw = ImageDraw.Draw(annotated_image)
19
+ draw.rectangle(
20
+ [box["xmin"], box["ymin"], box["xmax"], box["ymax"]],
21
+ outline="red",
22
+ width=3
23
+ )
24
+ draw.text((box["xmin"], box["ymin"] - 10), f"{label} ({score:.2f})", fill="red")
25
+
26
+ return annotated_image
27
+
28
+ demo = gr.Interface(
29
+ fn=detect_faults,
30
+ inputs=gr.Image(type="pil"),
31
+ outputs=gr.Image(type="pil"),
32
+ title="Solar Panel Fault Detection",
33
+ description="Upload an image of a solar panel to detect faults using a custom-trained model."
34
+ )
35
+
36
+ if __name__ == "__main__":
37
+ demo.launch()