", None
frame_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
if face_mesh is not None and mp is not None:
result = face_mesh.process(frame_rgb)
if not result.multi_face_landmarks:
return "
Your hemoglobin is a bit low โ this could mean mild anemia.
"
if test_values["Iron"] < 60 or test_values["Ferritin"] < 30:
summary += "
Low iron storage detected โ consider an iron profile test.
"
if test_values["Bilirubin"] > 1.2:
summary += "
Elevated bilirubin โ possible jaundice. Recommend LFT.
"
if test_values["HbA1c"] > 5.7:
summary += "
High HbA1c โ prediabetes indication. Recommend glucose check.
"
if spo2 < 95:
summary += "
Low SpOโ โ suggest retesting with a pulse oximeter.
"
summary += "
๐ก Tip: This is an AI-based estimate. Please follow up with a lab.
"
html_output += summary
html_output += "
"
html_output += "
๐ Book a Lab Test
Prefer confirmation? Find certified labs near you.
"
html_output += "
"
# Return the single final frame (blurred for display only)
return html_output, blur_for_display(frame_rgb)
# ==============================
# Video path (assemble once; show only final blurred keyframe)
# ==============================
def analyze_video(video_path: Optional[str]) -> Tuple[str, Optional[np.ndarray]]:
if not video_path or not os.path.exists(video_path):
return "
โ ๏ธ Face video missing or unreadable.
", None
cap = cv2.VideoCapture(video_path)
frame_sample = None
frames = 0
while True:
ret, frame = cap.read()
if not ret:
break
if frame_sample is None:
frame_sample = frame.copy()
frames += 1
cap.release()
if frame_sample is None or frames < 30:
return "
"
if test_values["Hemoglobin"] < 13.5:
summary += "
Your hemoglobin is a bit low โ this could mean mild anemia.
"
if test_values["Iron"] < 60 or test_values["Ferritin"] < 30:
summary += "
Low iron storage detected โ consider an iron profile test.
"
if test_values["Bilirubin"] > 1.2:
summary += "
Elevated bilirubin โ possible jaundice. Recommend LFT.
"
if test_values["HbA1c"] > 5.7:
summary += "
High HbA1c โ prediabetes indication. Recommend glucose check.
"
if spo2 < 95:
summary += "
Low SpOโ โ suggest retesting with a pulse oximeter.
"
summary += "
๐ก Tip: This is an AI-based estimate. Please follow up with a lab.
"
html_output += summary
html_output += "
"
html_output += "
๐ Book a Lab Test
Prefer confirmation? Find certified labs near you.
"
html_output += "
"
return html_output, blur_for_display(frame_rgb)
# ==============================
# WebRTC frame collector (no progress timer to avoid constant re-renders)
# ==============================
class HRCollectorVideoProcessor:
def __init__(self):
self.frames = deque(maxlen=30 * 90) # up to ~90s @30fps
self.recording = False
def recv(self, frame: av.VideoFrame) -> av.VideoFrame:
img = frame.to_ndarray(format="bgr24")
if self.recording:
self.frames.append(img)
return av.VideoFrame.from_ndarray(img, format="bgr24")
def start(self):
self.recording = True
self.frames.clear()
def stop_and_dump_to_file(self) -> Optional[str]:
self.recording = False
if len(self.frames) < 30: # ~1s
return None
h, w = self.frames[0].shape[:2]
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
tmp_path = tmp.name
tmp.close()
out = cv2.VideoWriter(tmp_path, fourcc, 30.0, (w, h))
for f in self.frames:
out.write(f)
out.release()
return tmp_path
# ==============================
# Streamlit UI
# ==============================
st.markdown("""
# ๐ง Face-Based Lab Test AI Report
Use **Image** (Hb) or **Video** (HR). Only a single **blurred preview** is shown **after** analysis to avoid flicker.
""")
# Session state to hold captured assets between form submits
if "captured_image_bgr" not in st.session_state:
st.session_state.captured_image_bgr = None
if "captured_video_path" not in st.session_state:
st.session_state.captured_video_path = None
# NEW: buffer raw camera bytes in case user forgets to click "Save Image"
if "img_bytes_buffer" not in st.session_state:
st.session_state.img_bytes_buffer = None
mode = st.radio("Choose Input Mode", ["Image", "Video"], horizontal=True)
col_left, col_right = st.columns([1.2, 1], gap="large")
with col_left:
if mode == "Image":
st.subheader("๐ธ Face / Eye Image")
# Wrap inputs in a form so widget changes don't re-run the app
with st.form("img_form", clear_on_submit=False):
img_source = st.radio("Source", ["Camera", "Upload"], horizontal=True, key="img_source")
uploaded_img = None
if img_source == "Camera":
# KEY ADDED + auto-buffer captured bytes
cam_img = st.camera_input("Capture image", key="camera_img")
if cam_img is not None:
st.session_state.img_bytes_buffer = cam_img.getvalue()
else:
file_up = st.file_uploader("Upload JPG/PNG", type=["jpg", "jpeg", "png"])
if file_up is not None:
uploaded_img = file_up.read()
submitted_img = st.form_submit_button("Save Image")
if submitted_img:
# Prefer explicit upload; else fall back to auto-buffered camera bytes
raw_bytes = uploaded_img if uploaded_img else st.session_state.img_bytes_buffer
if raw_bytes:
arr = np.frombuffer(raw_bytes, dtype=np.uint8)
img_bgr = cv2.imdecode(arr, cv2.IMREAD_COLOR)
if img_bgr is not None:
st.session_state.captured_image_bgr = img_bgr
st.success("Image saved. Now click **Analyze Image** below.")
else:
st.warning("Could not decode the image. Please recapture/upload.")
else:
st.warning("No image provided.")
analyze_image = st.button("๐ Analyze Image", type="primary", use_container_width=True)
else:
st.subheader("๐ฝ Face Video")
with st.form("vid_form", clear_on_submit=False):
vid_source = st.radio("Source", ["Camera (Live)", "Upload"], horizontal=True, key="vid_source")
temp_video_path = None
if vid_source == "Upload":
up_vid = st.file_uploader("Upload MP4/AVI/MOV", type=["mp4", "avi", "mov"])
if up_vid is not None:
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmpf:
tmpf.write(up_vid.read())
temp_video_path = tmpf.name
else:
st.write("Start โ record ~20โ30s โ Stop & Use")
if "webrtc_ctx" not in st.session_state:
st.session_state.webrtc_ctx = None
st.session_state.hr_processor = HRCollectorVideoProcessor()
ctx = webrtc_streamer(
key="hr-webrtc",
mode=WebRtcMode.SENDRECV,
rtc_configuration=RTC_CONFIGURATION,
media_stream_constraints={"video": True, "audio": False},
video_processor_factory=lambda: st.session_state.hr_processor,
)
c1, c2 = st.columns(2)
with c1:
start_clicked = st.form_submit_button("Start Recording")
with c2:
stop_clicked = st.form_submit_button("Stop & Use")
if start_clicked and ctx.state.playing:
st.session_state.hr_processor.start()
st.info("Recording started...")
if stop_clicked:
dump_path = st.session_state.hr_processor.stop_and_dump_to_file()
if dump_path and os.path.exists(dump_path):
temp_video_path = dump_path
st.success("Video captured. Now click **Analyze Video** below.")
else:
st.warning("Captured video too short. Please record ~20โ30 seconds.")
# Store selected/captured video after form submit
submitted_vid = st.form_submit_button("Save Video")
if submitted_vid:
if temp_video_path:
st.session_state.captured_video_path = temp_video_path
elif vid_source == "Upload":
st.warning("No video uploaded.")
else:
st.warning("No video captured yet.")
analyze_video_btn = st.button("๐ Analyze Video", type="primary", use_container_width=True)
with col_right:
st.subheader("๐ Blurred Preview (shown only after analysis)")
preview_placeholder = st.empty()
st.markdown("---")
# Single HTML report placeholder
report_placeholder = st.empty()
# Run analysis only when the explicit Analyze buttons are pressed
if mode == "Image" and 'analyze_image' in locals() and analyze_image:
# If user forgot "Save Image", try auto-buffer
if st.session_state.captured_image_bgr is None and st.session_state.img_bytes_buffer is not None:
arr = np.frombuffer(st.session_state.img_bytes_buffer, dtype=np.uint8)
img_bgr = cv2.imdecode(arr, cv2.IMREAD_COLOR)
if img_bgr is not None:
st.session_state.captured_image_bgr = img_bgr
html, frame_rgb_blurred = analyze_face(st.session_state.captured_image_bgr)
if frame_rgb_blurred is not None:
preview_placeholder.image(frame_rgb_blurred, caption="Blurred Image", use_container_width=True)
with report_placeholder:
components.html(html, height=1200, scrolling=True)
elif mode == "Video" and 'analyze_video_btn' in locals() and analyze_video_btn:
html, frame_rgb_blurred = analyze_video(st.session_state.captured_video_path)
if frame_rgb_blurred is not None:
preview_placeholder.image(frame_rgb_blurred, caption="Blurred Key Frame", use_container_width=True)
with report_placeholder:
components.html(html, height=1200, scrolling=True)