Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -62,4 +62,185 @@ def resize_frame(frame, width=None):
|
|
| 62 |
# Function to process a batch of frames
|
| 63 |
async def detect_faults_batch(frames, processor, model, device):
|
| 64 |
try:
|
| 65 |
-
frames = [resize_frame(frame,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
# Function to process a batch of frames
|
| 63 |
async def detect_faults_batch(frames, processor, model, device):
|
| 64 |
try:
|
| 65 |
+
frames = [resize_frame(frame, resize_width) for frame in frames]
|
| 66 |
+
inputs = processor(images=frames, return_tensors="pt").to(device)
|
| 67 |
+
with torch.no_grad():
|
| 68 |
+
outputs = model(**inputs)
|
| 69 |
+
target_sizes = torch.tensor([frame.shape[:2] for frame in frames]).to(device)
|
| 70 |
+
results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)
|
| 71 |
+
|
| 72 |
+
annotated_frames = []
|
| 73 |
+
all_faults = []
|
| 74 |
+
|
| 75 |
+
for frame, result in zip(frames, results):
|
| 76 |
+
faults = {"Thermal Fault": False, "Dust Fault": False, "Power Generation Fault": False}
|
| 77 |
+
annotated_frame = frame.copy()
|
| 78 |
+
|
| 79 |
+
for score, label, box in zip(result["scores"], result["labels"], result["boxes"]):
|
| 80 |
+
box = [int(i) for i in box.tolist()]
|
| 81 |
+
roi = frame[box[1]:box[3], box[0]:box[2]]
|
| 82 |
+
mean_intensity = np.mean(roi)
|
| 83 |
+
|
| 84 |
+
if mean_intensity > 200:
|
| 85 |
+
faults["Thermal Fault"] = True
|
| 86 |
+
cv2.rectangle(annotated_frame, (box[0], box[1]), (box[2], box[3]), (255, 0, 0), 2)
|
| 87 |
+
cv2.putText(annotated_frame, "Thermal Fault", (box[0], box[1]-10),
|
| 88 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
|
| 89 |
+
elif mean_intensity < 100:
|
| 90 |
+
faults["Dust Fault"] = True
|
| 91 |
+
cv2.rectangle(annotated_frame, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
|
| 92 |
+
cv2.putText(annotated_frame, "Dust Fault", (box[0], box[1]-10),
|
| 93 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
|
| 94 |
+
|
| 95 |
+
if faults["Thermal Fault"] or faults["Dust Fault"]:
|
| 96 |
+
faults["Power Generation Fault"] = True
|
| 97 |
+
|
| 98 |
+
annotated_frames.append(annotated_frame)
|
| 99 |
+
all_faults.append(faults)
|
| 100 |
+
|
| 101 |
+
if torch.cuda.is_available():
|
| 102 |
+
torch.cuda.empty_cache()
|
| 103 |
+
|
| 104 |
+
return annotated_frames, all_faults
|
| 105 |
+
except Exception as e:
|
| 106 |
+
st.error(f"Error during fault detection: {str(e)}")
|
| 107 |
+
return [], []
|
| 108 |
+
|
| 109 |
+
# Function to process video
|
| 110 |
+
async def process_video(video_path, frame_skip, batch_size):
|
| 111 |
+
try:
|
| 112 |
+
cap = cv2.VideoCapture(video_path)
|
| 113 |
+
if not cap.isOpened():
|
| 114 |
+
st.error("Error: Could not open video file.")
|
| 115 |
+
return None, None
|
| 116 |
+
|
| 117 |
+
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 118 |
+
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 119 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
| 120 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 121 |
+
|
| 122 |
+
out_width = resize_width if resize_width else frame_width
|
| 123 |
+
out_height = int(out_width * frame_height / frame_width) if resize_width else frame_height
|
| 124 |
+
|
| 125 |
+
output_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
|
| 126 |
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
| 127 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (out_width, out_height))
|
| 128 |
+
|
| 129 |
+
video_faults = {"Thermal Fault": False, "Dust Fault": False, "Power Generation Fault": False}
|
| 130 |
+
frame_count = 0
|
| 131 |
+
frames_batch = []
|
| 132 |
+
processed_frames = 0
|
| 133 |
+
|
| 134 |
+
with st.spinner("Analyzing video..."):
|
| 135 |
+
progress = st.progress(0)
|
| 136 |
+
executor = ThreadPoolExecutor(max_workers=2)
|
| 137 |
+
|
| 138 |
+
while cap.isOpened():
|
| 139 |
+
ret, frame = cap.read()
|
| 140 |
+
if not ret:
|
| 141 |
+
break
|
| 142 |
+
|
| 143 |
+
if frame_count % frame_skip != 0:
|
| 144 |
+
frame = resize_frame(frame, resize_width)
|
| 145 |
+
out.write(frame)
|
| 146 |
+
frame_count += 1
|
| 147 |
+
processed_frames += 1
|
| 148 |
+
progress.progress(min(processed_frames / total_frames, 1.0))
|
| 149 |
+
continue
|
| 150 |
+
|
| 151 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 152 |
+
frames_batch.append(frame_rgb)
|
| 153 |
+
|
| 154 |
+
if len(frames_batch) >= batch_size:
|
| 155 |
+
annotated_frames, batch_faults = await detect_faults_batch(frames_batch, processor, model, device)
|
| 156 |
+
for annotated_frame, faults in zip(annotated_frames, batch_faults):
|
| 157 |
+
for fault in video_faults:
|
| 158 |
+
video_faults[fault] |= faults[fault]
|
| 159 |
+
annotated_frame_bgr = cv2.cvtColor(annotated_frame, cv2.COLOR_RGB2BGR)
|
| 160 |
+
out.write(annotated_frame_bgr)
|
| 161 |
+
|
| 162 |
+
frames_batch = []
|
| 163 |
+
processed_frames += batch_size
|
| 164 |
+
progress.progress(min(processed_frames / total_frames, 1.0))
|
| 165 |
+
|
| 166 |
+
frame_count += 1
|
| 167 |
+
|
| 168 |
+
if frames_batch:
|
| 169 |
+
annotated_frames, batch_faults = await detect_faults_batch(frames_batch, processor, model, device)
|
| 170 |
+
for annotated_frame, faults in zip(annotated_frames, batch_faults):
|
| 171 |
+
for fault in video_faults:
|
| 172 |
+
video_faults[fault] |= faults[fault]
|
| 173 |
+
annotated_frame_bgr = cv2.cvtColor(annotated_frame, cv2.COLOR_RGB2BGR)
|
| 174 |
+
out.write(annotated_frame_bgr)
|
| 175 |
+
|
| 176 |
+
processed_frames += len(frames_batch)
|
| 177 |
+
progress.progress(min(processed_frames / total_frames, 1.0))
|
| 178 |
+
|
| 179 |
+
cap.release()
|
| 180 |
+
out.release()
|
| 181 |
+
return output_path, video_faults
|
| 182 |
+
|
| 183 |
+
except Exception as e:
|
| 184 |
+
st.error(f"Error processing video: {str(e)}")
|
| 185 |
+
return None, None
|
| 186 |
+
finally:
|
| 187 |
+
if 'cap' in locals() and cap.isOpened():
|
| 188 |
+
cap.release()
|
| 189 |
+
if 'out' in locals():
|
| 190 |
+
out.release()
|
| 191 |
+
|
| 192 |
+
# File uploader
|
| 193 |
+
uploaded_file = st.file_uploader("Upload a thermal video", type=["mp4"])
|
| 194 |
+
|
| 195 |
+
if uploaded_file is not None:
|
| 196 |
+
try:
|
| 197 |
+
tfile = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
|
| 198 |
+
tfile.write(uploaded_file.read())
|
| 199 |
+
tfile.close()
|
| 200 |
+
|
| 201 |
+
st.video(tfile.name, format="video/mp4")
|
| 202 |
+
|
| 203 |
+
# Create a new event loop for Streamlit's ScriptRunner thread
|
| 204 |
+
loop = asyncio.new_event_loop()
|
| 205 |
+
asyncio.set_event_loop(loop)
|
| 206 |
+
try:
|
| 207 |
+
output_path, video_faults = loop.run_until_complete(process_video(tfile.name, frame_skip, batch_size))
|
| 208 |
+
finally:
|
| 209 |
+
loop.close()
|
| 210 |
+
|
| 211 |
+
if output_path and video_faults:
|
| 212 |
+
st.subheader("Fault Detection Results")
|
| 213 |
+
st.video(output_path, format="video/mp4")
|
| 214 |
+
|
| 215 |
+
st.write("**Detected Faults in Video:**")
|
| 216 |
+
for fault, detected in video_faults.items():
|
| 217 |
+
status = "Detected" if detected else "Not Detected"
|
| 218 |
+
color = "red" if detected else "green"
|
| 219 |
+
st.markdown(f"- **{fault}**: <span style='color:{color}'>{status}</span>", unsafe_allow_html=True)
|
| 220 |
+
|
| 221 |
+
if any(video_faults.values()):
|
| 222 |
+
st.subheader("Recommendations")
|
| 223 |
+
if video_faults["Thermal Fault"]:
|
| 224 |
+
st.write("- **Thermal Fault**: Inspect for damaged components or overheating issues.")
|
| 225 |
+
if video_faults["Dust Fault"]:
|
| 226 |
+
st.write("- **Dust Fault**: Schedule cleaning to remove dust accumulation.")
|
| 227 |
+
if video_faults["Power Generation Fault"]:
|
| 228 |
+
st.write("- **Power Generation Fault**: Investigate efficiency issues due to detected faults.")
|
| 229 |
+
else:
|
| 230 |
+
st.write("No faults detected. The solar panel appears to be functioning normally.")
|
| 231 |
+
|
| 232 |
+
if os.path.exists(output_path):
|
| 233 |
+
os.unlink(output_path)
|
| 234 |
+
|
| 235 |
+
if os.path.exists(tfile.name):
|
| 236 |
+
os.unlink(tfile.name)
|
| 237 |
+
|
| 238 |
+
except Exception as e:
|
| 239 |
+
st.error(f"Error handling uploaded file: {str(e)}")
|
| 240 |
+
finally:
|
| 241 |
+
if 'tfile' in locals() and os.path.exists(tfile.name):
|
| 242 |
+
os.unlink(tfile.name)
|
| 243 |
+
|
| 244 |
+
# Footer
|
| 245 |
+
st.markdown("---")
|
| 246 |
+
st.write("Built with Streamlit, Hugging Face Transformers, and OpenCV for Solar Panel Fault Detection PoC")
|