import os import sys import cv2 import numpy as np import gradio as gr import plotly.graph_objects as go from ultralytics import YOLO import google.generativeai as genai from PIL import Image from gtts import gTTS import tempfile import datetime import requests import shutil # --- ReportLab Imports (PDF) --- from reportlab.pdfgen import canvas from reportlab.lib.pagesizes import A4 from reportlab.lib.units import cm, mm from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import TTFont # ============================================ # 1) Configuration & Setup # ============================================ # 🔑 API KEY GOOGLE_API_KEY = os.getenv("AIzaSyBYksOq03N5V2MjSYicHdsk4ESdyR9FABw") if GOOGLE_API_KEY: genai.configure(api_key=GOOGLE_API_KEY) # 📂 Model Path MODEL_PATH = "otu_multiclass_yolo11s_v2.pt" LOGO_KMUTNB_URL = "https://www.mou.kmutnb.ac.th/logo_kmutnb.png" LOGO_RAMA_URL ="https://www.rama.mahidol.ac.th/nursing/sites/default/files/public/Rama_Logo.png" INTRO_SOUND_URL = "https://cdn.pixabay.com/download/audio/2022/03/24/audio_c8c8a73467.mp3?filename=cinematic-atmosphere-score-2-22136.mp3" CLASS_NAMES = { 0: "Chocolate cyst", 1: "Serous cystadenoma", 2: "Teratoma", 3: "Theca cell tumor", 4: "Simple cyst", 5: "Normal ovary", 6: "Mucinous cystadenoma", 7: "High grade serous" } # --------------------------------------------------------- # 🛠️ AUTO-DOWNLOAD FONTS # --------------------------------------------------------- def force_download_font(url, filename): if not os.path.exists(filename): print(f"📥 Downloading {filename}...") try: r = requests.get(url, allow_redirects=True) with open(filename, 'wb') as f: f.write(r.content) except Exception as e: print(f"❌ Error downloading {filename}: {e}") return False return True font_urls = [ ("https://github.com/nutjunkie/thaifonts_sipa/raw/master/sipa_fonts/THSarabunNew/THSarabunNew.ttf", "THSarabunNew.ttf"), ("https://github.com/nutjunkie/thaifonts_sipa/raw/master/sipa_fonts/THSarabunNew/THSarabunNew%20Bold.ttf", "THSarabunNew-Bold.ttf") ] for url, fname in font_urls: force_download_font(url, fname) try: if os.path.exists("THSarabunNew.ttf"): pdfmetrics.registerFont(TTFont('THSarabun', 'THSarabunNew.ttf')) if os.path.exists("THSarabunNew-Bold.ttf"): pdfmetrics.registerFont(TTFont('THSarabun-Bold', 'THSarabunNew-Bold.ttf')) except Exception as e: print(f"⚠️ Font Registration Error: {e}") # ============================================ # 2) Helper Functions # ============================================ def text_to_speech(text): try: tts = gTTS(text, lang='th') f = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") tts.save(f.name) return f.name except: return None def generate_led_html(score, diagnosis): color = "#ff4444" if "High grade" in diagnosis else "#00C851" if "Normal" in diagnosis else "#ffbb33" return f"""
Primary Diagnosis
{diagnosis}

{score}%
""" def create_medical_report(pt_name, pt_id, diagnosis, conf): try: filename = tempfile.mktemp(suffix=".pdf") c = canvas.Canvas(filename, pagesize=A4) # Use registered font if available, else standard font_name = 'THSarabun-Bold' if 'THSarabun-Bold' in pdfmetrics.getRegisteredFontNames() else 'Helvetica-Bold' c.setFont(font_name, 24) c.drawString(2*cm, 27*cm, "Medical Image Analysis Report") c.setFont(font_name, 16) c.drawString(2*cm, 25*cm, f"Patient Name: {pt_name}") c.drawString(2*cm, 24*cm, f"Patient ID: {pt_id}") c.drawString(2*cm, 22*cm, f"Diagnosis Result: {diagnosis}") c.drawString(2*cm, 21*cm, f"Confidence Score: {conf}%") c.drawString(2*cm, 20*cm, f"Date: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M')}") c.save() return filename except Exception as e: print(f"PDF Error: {e}") return None # ============================================ # [FIXED] Chat Function # ============================================ def chat_fn(message, history, crop_img, info_text, diagnosis): # เปลี่ยนจาก history = [] เป็นการรับค่า list ของ dict if history is None: history = [] # 1. เช็ค API Key if not GOOGLE_API_KEY: response = "❌ ไม่พบ API KEY: กรุณาไปที่ Settings > Secrets แล้วตั้งค่า 'GOOGLE_API_KEY' จากนั้นกด Restart Space" # เพิ่มข้อความลง history แบบ Dictionary (ตามมาตรฐานใหม่ Huggingface/Gradio) history.append({"role": "user", "content": message}) history.append({"role": "assistant", "content": response}) return history, "" try: # 2. สร้าง Prompt context_prompt = f""" บทบาท: คุณคือผู้ช่วยทางการแพทย์อัจฉริยะ (AI Medical Assistant) ข้อมูลบริบททางการแพทย์ของผู้ป่วยรายนี้: - ผลการวินิจฉัยหลัก (Diagnosis): {diagnosis if diagnosis else "ยังไม่มีการวินิจฉัย"} - ข้อมูลเพิ่มเติม: {info_text if info_text else "ไม่มี"} คำถามจากผู้ใช้: {message} คำแนะนำในการตอบ: - ตอบเป็นภาษาไทย ให้กระชับ เข้าใจง่าย - ถ้าเกี่ยวกับเรื่องซีสต์หรือเนื้องอก ให้ข้อมูลตามหลักการแพทย์ - *สำคัญ*: ต้องลงท้ายเสมอว่า "ควรปรึกษาแพทย์ผู้เชี่ยวชาญเพื่อการวินิจฉัยที่แม่นยำที่สุด" """ # 3. เรียกใช้ Gemini model = genai.GenerativeModel('gemini-1.5-flash') response = model.generate_content(context_prompt) bot_reply = response.text except Exception as e: bot_reply = f"เกิดข้อผิดพลาด (System Error): {str(e)}" print(f"DEBUG ERROR: {e}") # [FIX] เพิ่มข้อความลงใน History แบบ Dictionary history.append({"role": "user", "content": message}) history.append({"role": "assistant", "content": bot_reply}) # คืนค่า history และ string ว่าง ("") เพื่อลบข้อความในช่องพิมพ์ return history, "" # ============================================ # 3) Main Inference Logic # ============================================ def analyze_image(image, history_list): if history_list is None: history_list = [] if image is None: return [None]*15 if not os.path.exists(MODEL_PATH): error_msg = f"⚠️ Model file not found at {MODEL_PATH}. Please upload the .pt file." return image, image, image, go.Figure(), error_msg, "", None, image, "Error", 0, history_list, history_list, image, image, image history_list.append(image) lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB) l, a, b = cv2.split(lab) clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8)) cl = clahe.apply(l) enhanced_img = cv2.merge((cl,a,b)) enhanced_img = cv2.cvtColor(enhanced_img, cv2.COLOR_LAB2RGB) try: model = YOLO(MODEL_PATH) results = model.predict( enhanced_img, imgsz=640, conf=0.25, iou=0.45, augment=True, verbose=False )[0] except Exception as e: return image, image, image, go.Figure(), f"Inference Error: {e}", "", None, image, "Error", 0, history_list, history_list, image, image, image orig = image.copy() seg_overlay = image.copy() crop_img = np.zeros_like(image) info_log = "Analysis Results:\n" + "-"*20 + "\n" max_conf = 0 primary_diag = "Normal / Not Found" fig = go.Figure() if results.boxes and len(results.boxes) > 0: boxes = results.boxes.data.cpu().numpy() for i, box in enumerate(boxes): x1, y1, x2, y2, conf, cls_id = box cls_name = CLASS_NAMES.get(int(cls_id), "Unknown") if conf > max_conf: max_conf = conf primary_diag = cls_name crop_img = image[int(y1):int(y2), int(x1):int(x2)] color = (0, 165, 255) if "High grade" in cls_name: color = (255, 0, 0) if "Normal" in cls_name: color = (0, 255, 0) cv2.rectangle(orig, (int(x1), int(y1)), (int(x2), int(y2)), color, 3) label = f"{cls_name} {conf*100:.1f}%" cv2.putText(orig, label, (int(x1), int(y1)-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) info_log += f"Found #{i+1}: {cls_name} ({conf*100:.1f}%)\n" if results.masks: mask_combined = np.zeros(image.shape[:2], dtype=np.float32) for m_raw in results.masks.data.cpu().numpy(): m_resized = cv2.resize(m_raw, (image.shape[1], image.shape[0])) mask_combined = np.maximum(mask_combined, m_resized) mask_bool = mask_combined > 0.5 mask_uint8 = (mask_bool * 255).astype(np.uint8) colored_mask = np.zeros_like(seg_overlay) colored_mask[mask_bool] = (0, 255, 0) seg_overlay = cv2.addWeighted(seg_overlay, 1.0, colored_mask, 0.4, 0) contours, _ = cv2.findContours(mask_uint8, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(seg_overlay, contours, -1, (255, 255, 255), 2) dist_map = cv2.distanceTransform(mask_uint8, cv2.DIST_L2, 5) y_idx, x_idx = np.where(mask_bool) if len(x_idx) > 0: step = max(1, len(x_idx) // 1000) fig.add_trace(go.Scatter3d( x=x_idx[::step], y=image.shape[0]-y_idx[::step], z=dist_map[y_idx, x_idx][::step], mode='markers', marker=dict(size=2, color=dist_map[y_idx, x_idx][::step], colorscale='Hot', opacity=0.8) )) else: info_log = "ไม่พบความผิดปกติในภาพนี้ (No Lesion Detected)" crop_img = image fig.update_layout(scene=dict(xaxis_title='Width', yaxis_title='Height', zaxis_title='Density'), margin=dict(l=0,r=0,b=0,t=0), height=300) score_percent = int(max_conf * 100) led_html = generate_led_html(score_percent, primary_diag) audio_path = text_to_speech(f"วิเคราะห์เสร็จสิ้น ตรวจพบ {primary_diag} ความมั่นใจ {score_percent} เปอร์เซ็นต์") return orig, seg_overlay, crop_img, fig, info_log, led_html, audio_path, crop_img, primary_diag, score_percent, history_list, history_list, image, orig, seg_overlay # ============================================ # 4) Gradio UI # ============================================ css = """ @import url('https://fonts.googleapis.com/css2?family=Montserrat:wght@400;700;900&display=swap'); .logo-container { display: flex; justify-content: flex-end; align-items: center; gap: 20px; } #intro-overlay { position: fixed; top: 0; left: 0; width: 100vw; height: 100vh; background-color: #000; z-index: 99999; display: flex; flex-direction: column; justify-content: center; align-items: center; animation: fadeOutOverlay 1s ease-in-out 4.5s forwards; pointer-events: none; } .intro-content { display: flex; gap: 40px; align-items: center; animation: zoomInLogos 3s cubic-bezier(0.25, 0.46, 0.45, 0.94) forwards; } .intro-logo { height: 120px; width: auto; filter: drop-shadow(0 0 10px rgba(255,255,255,0.3)); } .intro-text-container { margin-top: 40px; text-align: center; opacity: 0; animation: textSlideUp 1.5s ease-out 1.2s forwards; } .intro-title { color: #ffffff; font-family: 'Montserrat', sans-serif; font-size: 2.5rem; font-weight: 900; text-transform: uppercase; letter-spacing: 2px; text-shadow: 0 0 20px rgba(255, 255, 255, 0.6); line-height: 1.2; margin-bottom: 10px; } .intro-subtitle { color: #b3b3b3; font-family: 'Montserrat', sans-serif; font-size: 1.2rem; font-weight: 400; letter-spacing: 4px; } @keyframes zoomInLogos { 0% { transform: scale(0.8); opacity: 0; } 50% { transform: scale(1.05); opacity: 1; } 100% { transform: scale(1.0); opacity: 1; } } @keyframes textSlideUp { 0% { transform: translateY(30px); opacity: 0; } 100% { transform: translateY(0); opacity: 1; } } @keyframes fadeOutOverlay { to { opacity: 0; visibility: hidden; z-index: -1; } } /* Floating Chatbot CSS */ #floating_container { position: fixed; bottom: 25px; left: 25px; z-index: 9999; display: flex; flex-direction: column; align-items: flex-start; } #chat_window { width: 380px; height: 550px; background: white; border-radius: 20px; box-shadow: 0 15px 50px rgba(0,0,0,0.25); margin-bottom: 15px; display: none; flex-direction: column; border: 1px solid #eee; overflow: hidden; } .show-chat #chat_window { display: flex !important; } #chat_btn { width: 80px; height: 80px; background: white; border-radius: 50%; cursor: pointer; display: flex; justify-content: center; align-items: center; box-shadow: 0 8px 30px rgba(0,0,0,0.2); transition: 0.3s; border: 2px solid #0072ff; } #chat_btn:hover { transform: scale(1.1); } #chat_btn img { width: 65px; height: 65px; object-fit: contain; border-radius: 50%; } """ with gr.Blocks(theme=gr.themes.Soft(), css=css, title="Ovarian Tumor AI") as demo: # --- Intro Overlay --- gr.HTML(f"""
Deep Learning for
Ovarian Tumor Detection
in Ultrasound Images
AI MEDICAL DIAGNOSIS SYSTEM
""") # --- Header --- with gr.Row(variant="panel"): with gr.Column(scale=3): gr.Markdown("# 🏥 Ovarian Tumor Diagnosis System") gr.Markdown("AI System for Ovarian Tumor Detection & Diagnosis") gr.Markdown("จัดทำโดย นายภานรินทร์ เปียกบุตร & นางสาวภาพิมล ไพจิตโรจนา") with gr.Column(scale=2): with gr.Row(elem_classes="logo-container"): gr.Image(LOGO_KMUTNB_URL, show_label=False, container=False, height=65) gr.Image(LOGO_RAMA_URL, show_label=False, container=False, height=65) # State Variables state_crop = gr.State(None) state_info = gr.State("") state_diag = gr.State("") state_conf = gr.State(0) state_gallery = gr.State([]) state_img_orig = gr.State(None) state_img_det = gr.State(None) state_img_seg = gr.State(None) state_fig = gr.State(None) # --- Main UI --- with gr.Tabs(): with gr.Tab("1. Detection Analysis"): with gr.Row(): with gr.Column(scale=2): img_in = gr.Image(label="Upload Ultrasound Image", type="numpy", height=400) btn_analyze = gr.Button("🔍 Analyze Image", variant="primary") with gr.Column(scale=1): html_led = gr.HTML() aud = gr.Audio(label="Voice Assistant", autoplay=True) txt_log = gr.Textbox(label="Detailed Findings", lines=8) with gr.Row(): img_det = gr.Image(label="AI Detection", interactive=False) img_seg = gr.Image(label="Segmentation", interactive=False) img_crop = gr.Image(label="Focused Lesion", interactive=False) with gr.Tab("2. Medical Report"): with gr.Row(): inp_pt_name = gr.Textbox(label="Patient Name") inp_pt_id = gr.Textbox(label="Patient ID (HN)") btn_pdf = gr.Button("🖨️ Generate PDF Report", variant="primary") out_pdf = gr.File(label="Download Report") with gr.Tab("3. Gallery History"): gallery_ui = gr.Gallery(columns=4, height=600) # --- [FIXED] Floating Chatbot --- with gr.Column(elem_id="floating_container"): with gr.Column(elem_id="chat_window"): gr.HTML(f"
💬 ปรึกษาน้องดูแล
") # [FIX] เพิ่ม type="messages" เพื่อบอก Gradio ว่าใช้ format ใหม่ chatbot = gr.Chatbot(height=400, show_label=False, avatar_images=(None, LOGO_RAMA_URL), type="messages") msg = gr.Textbox(placeholder="พิมพ์คำถามที่นี่...", show_label=False) btn_send = gr.Button("ส่งข้อความ", variant="primary") gr.HTML(f"""
""") # --- Interactions --- btn_analyze.click( analyze_image, [img_in, state_gallery], [img_det, img_seg, img_crop, state_fig, txt_log, html_led, aud, state_crop, state_diag, state_conf, gallery_ui, state_gallery, state_img_orig, state_img_det, state_img_seg] ) def pdf_wrapper(name, pid, diag, conf): if not diag: return None return create_medical_report(name, pid, diag, conf) btn_pdf.click(pdf_wrapper, [inp_pt_name, inp_pt_id, state_diag, state_conf], out_pdf) # [FIX] Chat interaction: เพิ่ม outputs ตัวที่ 2 (msg) เพื่อล้างข้อความ btn_send.click(chat_fn, [msg, chatbot, state_crop, state_info, state_diag], [chatbot, msg]) msg.submit(chat_fn, [msg, chatbot, state_crop, state_info, state_diag], [chatbot, msg]) if __name__ == "__main__": demo.launch()