NaveenKumar5 commited on
Commit
5d8b64f
·
verified ·
1 Parent(s): e53fa42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -45
app.py CHANGED
@@ -1,48 +1,76 @@
1
  import gradio as gr
2
- from inference_sdk import InferenceHTTPClient
3
- from PIL import Image
4
- import requests
5
-
6
- # Initialize the Roboflow client
7
- client = InferenceHTTPClient(
8
- api_url="https://detect.roboflow.com",
9
- api_key="dxkgGGHSZ3DI8XzVn29U"
10
- )
11
-
12
- def predict(image: Image.Image):
13
- try:
14
- # Save image temporarily
15
- image.save("temp.jpg")
16
- with open("temp.jpg", "rb") as f:
17
- result = client.run_workflow(
18
- workspace_name="naveen-kumar-hnmil",
19
- workflow_id="detect-count-and-visualize-5",
20
- images={"image": f},
21
- use_cache=True
22
- )
23
-
24
- # Extract annotated image
25
- annotated_url = result.get("visualizations", {}).get("image")
26
- if not annotated_url:
27
- return "Error: No visualization returned from Roboflow."
28
-
29
- response = requests.get(annotated_url, stream=True)
30
- if response.status_code != 200:
31
- return f"Error fetching image from: {annotated_url}"
32
-
33
- return Image.open(response.raw)
34
-
35
- except Exception as e:
36
- return f"Exception: {str(e)}"
37
-
38
- # Gradio UI
39
- demo = gr.Interface(
40
- fn=predict,
41
- inputs=gr.Image(type="pil"),
42
- outputs="image",
43
- title="Solar Panel Fault Detection",
44
- description="Upload an image and get predictions from your Roboflow workflow."
45
- )
46
 
47
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ import tempfile
5
+ import os
6
+ import pandas as pd
7
+ from inference import InferencePipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ def run_inference(video_file):
10
+ if video_file is None:
11
+ return None, None, "Please upload a video file."
12
+
13
+ temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
14
+ with open(temp_video_path, "wb") as f:
15
+ f.write(video_file.read())
16
+
17
+ annotated_frames = []
18
+ prediction_log = []
19
+
20
+ def on_prediction(result, frame):
21
+ if result.get("output_image") is not None:
22
+ image_np = result["output_image"].numpy_image
23
+ annotated_frames.append(image_np)
24
+
25
+ if result.get("predictions"):
26
+ for pred in result["predictions"].get("predictions", []):
27
+ prediction_log.append({
28
+ "Class": pred["class"],
29
+ "Confidence": round(pred["confidence"], 2),
30
+ "Box": f'({pred["x"]}, {pred["y"]}, {pred["width"]}, {pred["height"]})'
31
+ })
32
+
33
+ pipeline = InferencePipeline.init_with_workflow(
34
+ api_key="dxkgGGHSZ3DI8XzVn29U",
35
+ workspace_name="naveen-kumar-hnmil",
36
+ workflow_id="detect-count-and-visualize-8",
37
+ video_reference=temp_video_path,
38
+ max_fps=5,
39
+ on_prediction=on_prediction
40
+ )
41
+
42
+ pipeline.start()
43
+ pipeline.join()
44
+
45
+ os.unlink(temp_video_path)
46
 
47
+ if not annotated_frames:
48
+ return None, None, "No faults detected or output frames returned."
49
+
50
+ # Save annotated video
51
+ height, width, _ = annotated_frames[0].shape
52
+ output_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
53
+ out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), 5, (width, height))
54
+ for frame in annotated_frames:
55
+ out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
56
+ out.release()
57
+
58
+ video_bytes = open(output_path, "rb").read()
59
+ os.unlink(output_path)
60
+
61
+ # Prepare dataframe as CSV text for Gradio table
62
+ df = pd.DataFrame(prediction_log)
63
+
64
+ return video_bytes, df, "Inference completed successfully."
65
+
66
+ with gr.Blocks() as demo:
67
+ gr.Markdown("# Solar Panel Fault Detection (Roboflow + Gradio)")
68
+ video_input = gr.Video(label="Upload Thermal Video", source="upload", type="file")
69
+ output_video = gr.Video(label="Annotated Output Video")
70
+ output_table = gr.Dataframe(headers=["Class", "Confidence", "Box"], interactive=False)
71
+ status = gr.Textbox(label="Status", interactive=False)
72
+
73
+ run_btn = gr.Button("Run Inference")
74
+ run_btn.click(fn=run_inference, inputs=video_input, outputs=[output_video, output_table, status])
75
+
76
+ demo.launch()