NaveenKumar5 commited on
Commit
eac2d14
·
verified ·
1 Parent(s): f1c829c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -7
app.py CHANGED
@@ -1,13 +1,77 @@
 
1
  import gradio as gr
2
- from inference import predict
 
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  iface = gr.Interface(
5
- fn=predict,
6
- inputs=gr.Image(type="pil"),
7
- outputs="image",
8
- title="Solar Panel Fault Detection",
9
- description="Upload a thermal image of a solar panel to detect faults using YOLOv5 trained via Roboflow."
 
 
 
10
  )
11
 
12
  if __name__ == "__main__":
13
- iface.launch()
 
1
+ # app.py (for Gradio)
2
  import gradio as gr
3
+ import cv2
4
+ import numpy as np
5
+ from Yolov8n_train import run_inference_on_video # Import your function
6
 
7
+ # Define the Gradio interface
8
+ def process_uploaded_video(video_file):
9
+ if video_file is None:
10
+ return None, "Please upload a video file."
11
+
12
+ video_path = video_file.name # Gradio passes a NamedTemporaryFile object
13
+
14
+ # Run your inference pipeline
15
+ # This will return a list of numpy images (frames)
16
+ processed_frames = run_inference_on_video(video_path)
17
+
18
+ if not processed_frames:
19
+ return None, "No frames processed. Check pipeline configuration or video input."
20
+
21
+ # For Gradio, you might want to return the first processed frame as an image,
22
+ # or create a GIF/video from the processed frames if they are too many.
23
+ # For simplicity, let's just return the first frame as an example.
24
+ # In a real scenario, you might want to create a video from `processed_frames`.
25
+
26
+ # Example: Return the first processed frame as an image
27
+ # If your pipeline processes entire videos and you want to output a video,
28
+ # you'd need to re-encode `processed_frames` into a video format.
29
+ # This can be complex depending on the number of frames.
30
+
31
+ # Option 1: Return the first processed image if your workflow outputs a single image
32
+ # This part needs to be adapted based on what `run_inference_on_video` actually returns.
33
+ # If `run_inference_on_video` processed the entire video and now you want to show
34
+ # the output video, you'd need to save it and then return its path.
35
+
36
+ # Let's assume for a moment that `run_inference_on_video` somehow makes the
37
+ # output available as a path or something similar that Gradio can display.
38
+ # Or, if it's processing frame by frame and you want to show the results as a sequence.
39
+
40
+ # Simpler approach: If you want to demonstrate a *live* webcam, Gradio has an `Image(source="webcam")`
41
+ # but that processes frame-by-frame on the client, and then sends to your backend.
42
+ # Your `InferencePipeline` directly interacts with the video source.
43
+
44
+ # Let's pivot slightly to a more common Space pattern:
45
+ # 1. User uploads video.
46
+ # 2. Your backend processes it.
47
+ # 3. Your backend saves the processed video.
48
+ # 4. Gradio displays the processed video.
49
+
50
+ # Reworking `run_inference_on_video` for a better Gradio fit:
51
+ # `run_inference_on_video` should probably return the path to the *output video file*.
52
+
53
+ # For now, let's keep `processed_frames` as a list of images.
54
+ # We can create a simple GIF for demonstration if there are many frames.
55
+ if processed_frames:
56
+ # Save the first frame as an image (simple demo)
57
+ output_image_path = "output_frame.jpg"
58
+ cv2.imwrite(output_image_path, processed_frames[0])
59
+ return output_image_path, "Video processed. Displaying first output frame."
60
+ else:
61
+ return None, "No frames processed. Check pipeline configuration or video input."
62
+
63
+
64
+ # Gradio Interface setup
65
  iface = gr.Interface(
66
+ fn=process_uploaded_video,
67
+ inputs=gr.File(type="filepath", label="Upload Video File"), # Allow user to upload video
68
+ outputs=[
69
+ gr.Image(label="Processed Output (First Frame)", type="filepath"), # Display image output
70
+ gr.Textbox(label="Status") # For messages
71
+ ],
72
+ title="Yolov8n Video Detection and Counting",
73
+ description="Upload a video file to run the detection and counting workflow."
74
  )
75
 
76
  if __name__ == "__main__":
77
+ iface.launch()