hamdim commited on
Commit
02d44c3
·
verified ·
1 Parent(s): 55fa450

Upload 25 files

Browse files
Dockerfile ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ # Optimizations for RunPod / Production
4
+ # 1. Prevent Python from buffering stdout/stderr (ensures logs show up immediately in RunPod console)
5
+ ENV PYTHONUNBUFFERED=1
6
+ # 2. Prevent Python from writing .pyc files
7
+ ENV PYTHONDONTWRITEBYTECODE=1
8
+
9
+ WORKDIR /app
10
+
11
+ # Install system dependencies
12
+ # 'build-essential' is often needed for compiling python packages
13
+ # 'curl' is added for the Healthcheck
14
+ RUN apt-get update && apt-get install -y \
15
+ build-essential \
16
+ curl \
17
+ && rm -rf /var/lib/apt/lists/*
18
+
19
+ # Copy requirements first to leverage Docker cache
20
+ COPY requirements.txt .
21
+
22
+ # Install Python dependencies
23
+ RUN pip install --no-cache-dir --upgrade pip && \
24
+ pip install --no-cache-dir -r requirements.txt
25
+
26
+ # Copy the rest of the application
27
+ COPY . .
28
+
29
+ # Expose the API port
30
+ EXPOSE 8000
31
+
32
+ # Healthcheck (Optional but recommended for RunPod reliability)
33
+ # It pings the root endpoint to ensure the server is responsive
34
+ HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
35
+ CMD curl -f http://0.0.0.0:8000/ || exit 1
36
+
37
+ # Launch the FastAPI server
38
+ # 0.0.0.0 is critical for container networking
39
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
README.md CHANGED
@@ -1,12 +1,8 @@
1
  ---
2
- title: Smartiag
3
- emoji: 👁
4
- colorFrom: indigo
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 6.2.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: SmartDiag Backend
3
+ emoji: ⚕️
4
+ colorFrom: blue
5
+ colorTo: indigo
6
+ sdk: docker
7
+ app_port: 8000
 
 
8
  ---
 
 
agents/__init__.py ADDED
File without changes
agents/anamnesis.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
2
+ from llm_factory import get_llm
3
+ from langchain_core.messages import HumanMessage, AIMessage
4
+ from models import AgentState
5
+ from typing import List
6
+
7
+ llm = get_llm(model_type="text", temperature=0.7)
8
+
9
+ system_prompt = """You are a highly advanced Clinical Copilot (AI Doctor Assistant).
10
+ Your role is to assist the physician by analyzing patient data, suggesting diagnoses, and recommending exams.
11
+
12
+ ### 1. MODE: INTAKE ANALYSIS (Critical)
13
+ IF the user provides a summary of patient data (Name, Age, Symptoms, Vitals), you MUST output a structured clinical assessment in FRENCH:
14
+
15
+ **🚨 ANALYSE CLINIQUE IMMÉDIATE**
16
+ * **Synthèse**: 1 sentence summary of the patient's state (Stable/Unstable).
17
+ * **Red Flags**: List any abnormal vitals or worrying symptoms (e.g., "Hypertension Sévère", "SpO2 Basse").
18
+
19
+ **🧐 HYPOTHÈSES DIAGNOSTIQUES (Probabilités)**
20
+ 1. **[Most Likely]**: Explanation.
21
+ 2. **[Plausible]**: Explanation.
22
+ 3. **[To rule out]**: Explanation (Red Flag).
23
+
24
+ **✅ CONDUITE À TENIR IMMÉDIATE**
25
+ * List 3-4 specific exams to order NOW (e.g., ECG, Troponine, Radio Thorax).
26
+ * Immediate treatments if urgent (e.g., Oxygen, Aspirin).
27
+
28
+ **❓ QUESTIONS D'INVESTIGATION**
29
+ * Ask 3 precises questions to the doctor/patient to narrow down the diagnosis (e.g., "La douleur irradie-t-elle dans le bras gauche?", "Avez-vous des antécédents de phlébite?").
30
+
31
+ ---
32
+
33
+ ### 2. MODE: ONGOING CONSULTATION
34
+ For subsequent messages:
35
+ - Acknowledge the new information.
36
+ - Update your hypothesis.
37
+ - Suggest next steps.
38
+ - Be concise.
39
+
40
+ ### TONE & STYLE
41
+ - Professional, efficient, "Doctor-to-Doctor" tone.
42
+ - USE FRENCH LANGUAGE unless told otherwise.
43
+ - USE MARKDOWN for bolding and lists.
44
+ """
45
+
46
+ prompt = ChatPromptTemplate.from_messages([
47
+ ("system", system_prompt),
48
+ MessagesPlaceholder(variable_name="history"),
49
+ ("user", "{input}")
50
+ ])
51
+
52
+ chain = prompt | llm
53
+
54
+ from services.rag_service import rag_service
55
+
56
+ async def run_anamnesis_agent(history: List[dict], user_input: str) -> str:
57
+ # Fetch RAG Context - DISABLE GLOBAL CONTEXT TO PREVENT LEAKAGE BETWEEN PATIENTS IN POC
58
+ # rag_context = rag_service.get_context("default")
59
+ rag_context = ""
60
+
61
+ # Prepend Context info if found
62
+ messages = []
63
+ if rag_context:
64
+ messages.append(HumanMessage(content=f"[SYSTEM: Patient Context Loaded from RAG]\n{rag_context}\n\n[End of Context]"))
65
+
66
+ for msg in history:
67
+ if msg['role'] == 'user':
68
+ messages.append(HumanMessage(content=msg['content']))
69
+ elif msg['role'] == 'assistant':
70
+ messages.append(AIMessage(content=msg['content']))
71
+
72
+ # Check if this is an "Intake Analysis" request (detected by keyword in system prompt from frontend)
73
+ if "intake data" in user_input.lower() or "patient:" in user_input.lower():
74
+ # Temporarily boost temperature for creative medical reasoning
75
+ # (Not possible to change runtime param easily without refactor, but prompt engineering helps)
76
+ pass
77
+
78
+ response = await chain.ainvoke({"history": messages, "input": user_input})
79
+ return response.content
agents/bio_analysis.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from langchain_core.messages import HumanMessage
3
+ from llm_factory import get_llm
4
+ import base64
5
+
6
+ # Initialize both models
7
+ text_llm = get_llm(model_type="text", temperature=0.1)
8
+ vision_llm = get_llm(model_type="vision", temperature=0.1)
9
+
10
+ import io
11
+ from pypdf import PdfReader
12
+ from services.rag_service import rag_service
13
+
14
+ async def run_bio_analysis_agent(file_data: bytes, mime_type: str) -> dict:
15
+ try:
16
+ print(f"Processing file with mime_type: {mime_type}")
17
+
18
+ prompt_text = """
19
+ Tu es un Expert Biologiste et Pharmacien.
20
+
21
+ TA MISSION :
22
+ Analyser ce rapport de laboratoire et extraire les données structurées.
23
+
24
+ FORMAT DE SORTIE ATTENDU (JSON uniquement) :
25
+ {
26
+ "patient_name": "Nom du patient ou 'Inconnu'",
27
+ "date": "Date du rapport ou 'Inconnue'",
28
+ "critical_alerts": ["Liste des valeurs critiques nécessitant une attention immédiate"],
29
+ "results": [
30
+ {
31
+ "test_name": "Nom de l'analyse (ex: Hémoglobine)",
32
+ "value": "Valeur numérique (ex: 10.5)",
33
+ "unit": "Unité (ex: g/dL)",
34
+ "ref_range": "Plage de référence (ex: 12-16)",
35
+ "status": "LOW" | "HIGH" | "NORMAL",
36
+ "interpretation": "Brève interprétation clinique"
37
+ }
38
+ ],
39
+ "global_interpretation": "Synthèse globale de l'état du patient basés sur les résultats.",
40
+ "recommendations": ["Actions recommandées"]
41
+ }
42
+
43
+ RÉPOND UNIQUEMENT AVEC LE JSON. PAS DE MARKDOWN.
44
+ """
45
+
46
+ message_content = []
47
+ selected_llm = text_llm # Default to text
48
+
49
+ if mime_type == "application/pdf":
50
+ # Extract text from PDF
51
+ try:
52
+ pdf_file = io.BytesIO(file_data)
53
+ reader = PdfReader(pdf_file)
54
+ extracted_text = ""
55
+ for page in reader.pages:
56
+ extracted_text += page.extract_text() + "\n"
57
+
58
+ print(f"Extracted {len(extracted_text)} chars from PDF")
59
+ message_content.append({"type": "text", "text": prompt_text + "\n\nVOICI LE CONTENU DU RAPPORT :\n" + extracted_text})
60
+ selected_llm = text_llm # Use DeepSeek for text
61
+ except Exception as pdf_err:
62
+ print(f"PDF Extraction failed: {pdf_err}")
63
+ return {"error": "PDF parsing failed", "details": str(pdf_err)}
64
+
65
+ elif mime_type.startswith("image/"):
66
+ # Use Vision for images
67
+ b64_data = base64.b64encode(file_data).decode("utf-8")
68
+ message_content.append({"type": "text", "text": prompt_text})
69
+ message_content.append({
70
+ "type": "image_url",
71
+ "image_url": {"url": f"data:{mime_type};base64,{b64_data}"}
72
+ })
73
+ selected_llm = vision_llm # Use Gemini for Vision
74
+ else:
75
+ return {"error": "Unsupported file type", "details": mime_type}
76
+
77
+ message = HumanMessage(content=message_content)
78
+
79
+ response = await selected_llm.ainvoke([message])
80
+
81
+ # Clean up response to ensure valid JSON
82
+ content = response.content.strip()
83
+ if content.startswith("```json"):
84
+ content = content[7:-3]
85
+ elif content.startswith("```"):
86
+ content = content[3:-3]
87
+
88
+ try:
89
+ # SAVE TO RAG (Mocking 'default' patient ID for this POC)
90
+ rag_service.add_document("default", f"Résultat Analyse Labo:\n{content}", "lab_result")
91
+
92
+ return json.loads(content)
93
+ except json.JSONDecodeError as e:
94
+ print(f"JSON Parse Error: {e}")
95
+ print(f"Raw Content: {content}")
96
+
97
+ # Even on JSON error, save raw text so context is kept
98
+ rag_service.add_document("default", f"Tentative Analyse Labo (Brut):\n{content}", "lab_result_raw")
99
+
100
+ return {
101
+ "error": "Failed to parse JSON",
102
+ "raw_content": content
103
+ }
104
+
105
+ except Exception as e:
106
+ print(f"CRITICAL ERROR in run_bio_analysis_agent: {str(e)}")
107
+ import traceback
108
+ traceback.print_exc()
109
+ return {
110
+ "error": "Analysis failed",
111
+ "details": str(e)
112
+ }
agents/diagnosis.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import ChatPromptTemplate
2
+ from llm_factory import get_llm
3
+
4
+ llm = get_llm(model_type="text", temperature=0.2)
5
+
6
+ system_prompt = """You are an expert medical Diagnostician.
7
+ Your goal is to analyze patient symptoms, history, and biological lab results (if available) to provide a differential diagnosis.
8
+
9
+ Rules:
10
+ 1. **LANGUAGE: FRENCH ONLY.** The entire output must be in French.
11
+ 2. **INTEGRATION:** If biological results are provided, use them to CONFIRM or RULE OUT hypotheses. Cite specific values (e.g., "L'anémie est confirmée par l'hémoglobine à 9g/dL").
12
+ 3. Format your response clearly:
13
+ - **Diagnostic Principal**: The most likely cause.
14
+ - **Justification**: Combine clinical signs + biological proofs.
15
+ - **Diagnostics Différentiels**: Other possibilities.
16
+ - **Signes de Gravité (Red Flags)**: Any urgent warnings.
17
+ 4. Be concise and professional.
18
+ """
19
+
20
+ prompt = ChatPromptTemplate.from_messages([
21
+ ("system", system_prompt),
22
+ ("user", """Patient Info: {patient_info}
23
+
24
+ Symptom Summary (Chat): {symptom_summary}
25
+
26
+ Biological Analysis (Labs): {bio_data}
27
+ """)
28
+ ])
29
+
30
+ chain = prompt | llm
31
+
32
+ async def run_diagnosis_agent(patient_info: dict, symptom_summary: str, bio_data: dict = None) -> str:
33
+ bio_text = str(bio_data) if bio_data else "Aucune donnée biologique fournie."
34
+
35
+ response = await chain.ainvoke({
36
+ "patient_info": str(patient_info),
37
+ "symptom_summary": symptom_summary,
38
+ "bio_data": bio_text
39
+ })
40
+ return response.content
agents/followup.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llm_factory import get_llm
2
+ from langchain_core.prompts import PromptTemplate
3
+ import os
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+
8
+ def get_followup_agent():
9
+ llm = get_llm(model_type="text", temperature=0.2)
10
+
11
+ template = """
12
+ Tu es un Agent de Suivi Médical (Care Manager).
13
+
14
+ TA MISSION :
15
+ Créer un plan de surveillance à domicile clair et rassurant pour le patient, basé sur son diagnostic et son traitement.
16
+
17
+ DIAGNOSTIC : {diagnosis}
18
+ TRAITEMENT : {treatment}
19
+
20
+ GÉNÈRE UN PLAN DE SUIVI (En Français, ton empathique et clair) :
21
+
22
+ 📅 QUAND CONSULTER À NOUVEAU ?
23
+ - [Délai ou condition précise]
24
+
25
+ 🚨 SIGNES D'ALERTE (Red Flags) - "Revenez aux urgences si..." :
26
+ - [Liste des symptômes graves à surveiller]
27
+
28
+ 🏠 CONSEILS DE SURVEILLANCE :
29
+ - [Ex: Prise de température, hydratation, repos...]
30
+ """
31
+
32
+ prompt = PromptTemplate(
33
+ input_variables=["diagnosis", "treatment"],
34
+ template=template,
35
+ )
36
+
37
+ return prompt | llm
38
+
39
+ async def run_followup_agent(diagnosis: str, treatment: str) -> str:
40
+ agent = get_followup_agent()
41
+ response = await agent.ainvoke({
42
+ "diagnosis": diagnosis,
43
+ "treatment": treatment
44
+ })
45
+ return response.content
agents/imaging.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llm_factory import get_llm
2
+ from langchain_core.prompts import PromptTemplate
3
+ import os
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+
8
+ def get_imaging_agent():
9
+ llm = get_llm(model_type="text", temperature=0.1)
10
+
11
+ template = """
12
+ Tu es un Radiologue Expert (IA).
13
+
14
+ TA MISSION :
15
+ Analyser la description d'une imagerie médicale (Radio, Scanner, IRM, Échographie) pour en extraire les anomalies significatives et suggérer une conclusion clinique.
16
+
17
+ DESCRIPTION DE L'IMAGERIE / COMPTE-RENDU :
18
+ {imaging_desc}
19
+
20
+ CONTEXTE CLINIQUE :
21
+ {clinical_context}
22
+
23
+ RÉPONSE ATTENDUE (En Français) :
24
+ 1. 🔍 ANOMALIES DÉTECTÉES : [Liste à puces]
25
+ 2. 🩺 INTERPRÉTATION CLINIQUE : [Ce que cela signifie pour le patient]
26
+ 3. ⚠️ RECOMMANDATION : [Ex: Avis spécialisé, examen complémentaire, urgence]
27
+ """
28
+
29
+ prompt = PromptTemplate(
30
+ input_variables=["imaging_desc", "clinical_context"],
31
+ template=template,
32
+ )
33
+
34
+ return prompt | llm
35
+
36
+ async def run_imaging_agent(imaging_desc: str, clinical_context: str) -> str:
37
+ agent = get_imaging_agent()
38
+ response = await agent.ainvoke({
39
+ "imaging_desc": imaging_desc,
40
+ "clinical_context": clinical_context
41
+ })
42
+ return response.content
agents/intake.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import ChatPromptTemplate
2
+ from llm_factory import get_llm
3
+ from langchain_core.output_parsers import JsonOutputParser
4
+ from models import Patient
5
+ import os
6
+
7
+ # Initialize LLM
8
+ llm = get_llm(model_type="text", temperature=0.1)
9
+
10
+ # Parser
11
+ parser = JsonOutputParser(pydantic_object=Patient)
12
+
13
+ # Prompt
14
+ system_prompt = """You are a medical receptionist agent. Your goal is to extract patient information from a natural language introduction.
15
+ Extract the following fields: name, age, gender, and any mentioned medical history.
16
+ If a field is missing, leave it as null or infer it if obvious.
17
+ Return the result as a JSON object matching the Patient schema.
18
+ """
19
+
20
+ prompt = ChatPromptTemplate.from_messages([
21
+ ("system", system_prompt),
22
+ ("user", "{input}")
23
+ ])
24
+
25
+ chain = prompt | llm | parser
26
+
27
+ async def run_intake_agent(user_input: str) -> Patient:
28
+ try:
29
+ result = await chain.ainvoke({"input": user_input})
30
+ return Patient(**result)
31
+ except Exception as e:
32
+ # Fallback or error handling
33
+ print(f"Error in intake agent: {e}")
34
+ return Patient(name="Unknown", age=0, gender="Unknown")
agents/pharmacist.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llm_factory import get_llm
2
+ from langchain_core.prompts import PromptTemplate
3
+ import os
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+
8
+ def get_pharmacist_agent():
9
+ llm = get_llm(model_type="text", temperature=0.1)
10
+
11
+ template = """
12
+ Tu es un Pharmacien Clinicien Expert. Ta mission est de sécuriser la prescription.
13
+
14
+ PATIENT :
15
+ - Nom : {patient_name}
16
+ - Âge : {patient_age}
17
+ - Antécédents : {history}
18
+
19
+ ORDONNANCE PROPOSÉE :
20
+ {prescription}
21
+
22
+ ANALYSE REQUISE :
23
+ 1. Vérifier les contre-indications absolues avec les antécédents.
24
+ 2. Vérifier les interactions médicamenteuses majeures.
25
+ 3. Vérifier les dosages (si fournis) par rapport à l'âge.
26
+
27
+ RÉPONSE (En Français) :
28
+ - STATUT : [VALIDÉ ✅ / ATTENTION ⚠️ / DANGER ⛔]
29
+ - ANALYSE : [Commentaire bref]
30
+ - RECOMMANDATION : [Si nécessaire, correction suggérée]
31
+ """
32
+
33
+ prompt = PromptTemplate(
34
+ input_variables=["patient_name", "patient_age", "history", "prescription"],
35
+ template=template,
36
+ )
37
+
38
+ return prompt | llm
39
+
40
+ async def run_pharmacist_agent(patient_name: str, patient_age: int, history: str, prescription: str) -> str:
41
+ agent = get_pharmacist_agent()
42
+ response = await agent.ainvoke({
43
+ "patient_name": patient_name,
44
+ "patient_age": patient_age,
45
+ "history": history,
46
+ "prescription": prescription
47
+ })
48
+ return response.content
agents/planner.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import ChatPromptTemplate
2
+ from llm_factory import get_llm
3
+
4
+ llm = get_llm(model_type="text", temperature=0.2)
5
+
6
+ system_prompt = """You are a Medical Action Planner.
7
+ Based on the provided diagnosis, recommend the next steps.
8
+
9
+ Rules:
10
+ 1. **LANGUAGE: FRENCH ONLY.**
11
+ 2. Structure the plan as follows:
12
+ - **Examens Complémentaires**: Labs, Imaging (X-Ray, MRI, etc.). Mark URGENT if needed.
13
+ - **Traitement**: Medications (Generic names), dosage instructions.
14
+ - **Orientation**: Specialist referrals (Cardiologist, Pneumologist, etc.).
15
+ - **Conseils**: Lifestyle or monitoring advice.
16
+ 3. If the diagnosis suggests an emergency, emphasize immediate hospital admission or urgent specialist consultation.
17
+ Be specific about which agent (e.g., "Bio Analysis Agent") would handle which part if applicable, but primarily output the medical actions.
18
+ """
19
+
20
+ prompt = ChatPromptTemplate.from_messages([
21
+ ("system", system_prompt),
22
+ ("user", "Diagnosis Report: {diagnosis_report}")
23
+ ])
24
+
25
+ chain = prompt | llm
26
+
27
+ async def run_planner_agent(diagnosis_report: str) -> str:
28
+ response = await chain.ainvoke({
29
+ "diagnosis_report": diagnosis_report
30
+ })
31
+ return response.content
agents/radiology.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ import pydicom
4
+ import numpy as np
5
+ from PIL import Image
6
+ import base64
7
+ import httpx
8
+ import asyncio
9
+ from dotenv import load_dotenv
10
+
11
+ load_dotenv()
12
+
13
+ HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
14
+ Z_AI_API_KEY = os.getenv("Z_AI_API_KEY")
15
+ ROBOFLOW_API_KEY = os.getenv("ROBOFLOW_API_KEY")
16
+ HEADERS_HF = {"Authorization": f"Bearer {HF_TOKEN}"}
17
+ HEADERS_Z_AI = {"Authorization": f"Bearer {Z_AI_API_KEY}", "Content-Type": "application/json"}
18
+
19
+ # List of models updated for 2026 SOTA
20
+ MODELS = {
21
+ "fracture": "bone-fracture-vqdiz/1", # Roboflow Object Detection (Local Docker)
22
+ "fracture_vlm": "AIRI-Institute/chexfract-maira2", # Medical VLM for Fracture Reports
23
+ "mammo": "ianpan/mammoscreen",
24
+ "breast_tissue": "nateraw/breast-cancer-classification",
25
+ "glm": "glm-4.6v"
26
+ }
27
+
28
+ async def call_hf_api(model_id, payload):
29
+ # Use standard Inference API endpoint for 2026 models
30
+ api_url = f"https://api-inference.huggingface.co/models/{model_id}"
31
+
32
+ async with httpx.AsyncClient(timeout=60.0) as client:
33
+ # Some models expect raw bytes, others expect json with 'inputs'
34
+ # For our 2026 ViT models, passing bytes directly often works better for image-classification
35
+ if "inputs" in payload and isinstance(payload["inputs"], str):
36
+ # If it's base64, we decode it back to bytes for the inference API raw call
37
+ try:
38
+ img_data = base64.b64decode(payload["inputs"])
39
+ response = await client.post(api_url, headers=HEADERS_HF, content=img_data)
40
+ except:
41
+ response = await client.post(api_url, headers=HEADERS_HF, json=payload)
42
+ else:
43
+ response = await client.post(api_url, headers=HEADERS_HF, json=payload)
44
+
45
+ if response.status_code == 503:
46
+ return {"error": "Model is loading", "details": response.json()}
47
+
48
+ return response.json()
49
+
50
+ async def call_roboflow_api(model_id, image_bytes):
51
+ """
52
+ Appelle le serveur d'inférence Roboflow LOCAL (Docker).
53
+ Configuration basée sur la version RAG finale.
54
+ """
55
+ # On utilise exactement les IDs de ton projet fonctionnel
56
+ API_KEY = os.getenv("ROBOFLOW_API_KEY", "Ac4Ngxa813phZbyPDo64")
57
+ PROJECT_ID = "bone-fracture-vqdiz"
58
+ MODEL_VERSION = "1"
59
+
60
+ # URL de ton Docker local
61
+ api_url = f"http://localhost:9001/{PROJECT_ID}/{MODEL_VERSION}?api_key={API_KEY}"
62
+
63
+ async with httpx.AsyncClient(timeout=30.0) as client:
64
+ # Envoi en multipart/form-data comme dans ton code med-ai-final
65
+ files = {'file': ("image.jpg", image_bytes, "image/jpeg")}
66
+ try:
67
+ response = await client.post(api_url, files=files)
68
+
69
+ print(f"DEBUG Roboflow Local: URL={api_url} Status={response.status_code}")
70
+
71
+ if response.status_code != 200:
72
+ print(f"DEBUG Roboflow Error Detail: {response.text}")
73
+ return {"error": response.text, "status": response.status_code}
74
+
75
+ return response.json()
76
+ except Exception as e:
77
+ print(f"DEBUG Roboflow Local Exception: {str(e)}")
78
+ return {
79
+ "error": "Serveur d'inférence local introuvable.",
80
+ "message": "Assurez-vous que le conteneur Docker tourne (inference server start).",
81
+ "details": str(e)
82
+ }
83
+
84
+ async def call_z_ai_api(payload):
85
+ api_url = "https://api.z.ai/api/paas/v4/chat/completions"
86
+
87
+ async with httpx.AsyncClient(timeout=90.0) as client:
88
+ response = await client.post(api_url, headers=HEADERS_Z_AI, json=payload)
89
+ return response.json()
90
+
91
+ def convert_dicom_to_jpg(dicom_bytes: bytes) -> bytes:
92
+ """Convertit un fichier DICOM en image JPEG exploitable par l'IA."""
93
+ try:
94
+ ds = pydicom.dcmread(io.BytesIO(dicom_bytes))
95
+ pixel_array = ds.pixel_array
96
+
97
+ if hasattr(ds, 'RescaleIntercept') and hasattr(ds, 'RescaleSlope'):
98
+ pixel_array = pixel_array.astype(np.float32) * ds.RescaleSlope + ds.RescaleIntercept
99
+
100
+ p_min, p_max = np.min(pixel_array), np.max(pixel_array)
101
+ if p_max > p_min:
102
+ pixel_array = ((pixel_array - p_min) / (p_max - p_min) * 255).astype(np.uint8)
103
+ else:
104
+ pixel_array = pixel_array.astype(np.uint8)
105
+
106
+ img = Image.fromarray(pixel_array)
107
+ if img.mode != 'RGB':
108
+ img = img.convert('RGB')
109
+
110
+ buf = io.BytesIO()
111
+ img.save(buf, format="JPEG", quality=95)
112
+ return buf.getvalue()
113
+ except Exception as e:
114
+ print(f"Erreur conversion DICOM: {e}")
115
+ return None
116
+
117
+ async def run_radiology_agent(image_bytes: bytes, model_type: str, question: str = None) -> dict:
118
+ # Détection DICOM
119
+ is_dicom = False
120
+ if len(image_bytes) > 132 and image_bytes[128:132] == b"DICM":
121
+ is_dicom = True
122
+
123
+ if is_dicom:
124
+ converted = convert_dicom_to_jpg(image_bytes)
125
+ if converted:
126
+ image_bytes = converted
127
+ print("Image DICOM convertie avec succès.")
128
+
129
+ img_str = base64.b64encode(image_bytes).decode("utf-8")
130
+
131
+ if model_type == "glm":
132
+ payload = {
133
+ "model": "glm-4.6v",
134
+ "messages": [
135
+ {
136
+ "role": "user",
137
+ "content": [
138
+ {
139
+ "type": "text",
140
+ "text": f"En tant qu'expert radiologue : {question if question else 'Décrivez cette imagerie et les anomalies potentielles.'}"
141
+ },
142
+ {
143
+ "type": "image_url",
144
+ "image_url": { "url": f"data:image/jpeg;base64,{img_str}" }
145
+ }
146
+ ]
147
+ }
148
+ ]
149
+ }
150
+ result = await call_z_ai_api(payload)
151
+ if "choices" in result:
152
+ return result["choices"][0]["message"]["content"]
153
+ return result
154
+
155
+ elif model_type == "fracture_vlm":
156
+ # Specific handling for 2026 ChexFract specialized VLM
157
+ model_id = MODELS.get("fracture_vlm")
158
+ # For VLMs on HF, they often expect a specific format or we use the chat template via inference
159
+ payload = {
160
+ "inputs": {
161
+ "image": img_str,
162
+ "text": question if question else "Describe any fractures in this X-ray and propose a BIRADS/Classification."
163
+ }
164
+ }
165
+ return await call_hf_api(model_id, payload)
166
+
167
+ else:
168
+ # Standard classification or specialized detection
169
+ model_id = MODELS.get(model_type)
170
+
171
+ if model_type == "fracture":
172
+ # Roboflow expects binary or base64 but our helper uses binary
173
+ return await call_roboflow_api(model_id, image_bytes)
174
+
175
+ payload = {
176
+ "inputs": img_str
177
+ }
178
+ return await call_hf_api(model_id, payload)
agents/report.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import ChatPromptTemplate
2
+ from llm_factory import get_llm
3
+ from fpdf import FPDF
4
+ import qrcode
5
+ import os
6
+
7
+ llm = get_llm(model_type="text", temperature=0.1)
8
+
9
+ system_prompt = """You are a Medical Report Generator.
10
+ Based on the diagnosis and action plan, write a formal medical report.
11
+ Include:
12
+ 1. Patient Summary.
13
+ 2. Clinical Findings.
14
+ 3. Diagnosis.
15
+ 4. Recommendations & Prescription.
16
+ 5. A polite closing.
17
+
18
+ Format it as a clean text that can be put into a PDF.
19
+ """
20
+
21
+ prompt = ChatPromptTemplate.from_messages([
22
+ ("system", system_prompt),
23
+ ("user", "Patient: {patient_info}\nDiagnosis: {diagnosis}\nPlan: {plan}")
24
+ ])
25
+
26
+ chain = prompt | llm
27
+
28
+ async def generate_report_content(patient_info: dict, diagnosis: str, plan: str) -> str:
29
+ response = await chain.ainvoke({
30
+ "patient_info": str(patient_info),
31
+ "diagnosis": diagnosis,
32
+ "plan": plan
33
+ })
34
+ return response.content
35
+
36
+ def create_pdf_report(content: str, patient_name: str, filename: str = "report.pdf"):
37
+ pdf = FPDF()
38
+ pdf.add_page()
39
+ pdf.set_font("Arial", size=12)
40
+
41
+ # Header
42
+ pdf.set_font("Arial", 'B', 16)
43
+ pdf.cell(200, 10, txt="Medical Report", ln=1, align='C')
44
+ pdf.set_font("Arial", size=12)
45
+ pdf.cell(200, 10, txt="Dr. Amine Tounsi", ln=1, align='C')
46
+ pdf.ln(10)
47
+
48
+ # Content
49
+ # Sanitize content for FPDF (latin-1)
50
+ replacements = {
51
+ "\u2019": "'", # Smart quote
52
+ "\u2018": "'",
53
+ "\u201c": '"', # Smart double quotes
54
+ "\u201d": '"',
55
+ "\u2013": "-", # En dash
56
+ "\u2014": "-", # Em dash
57
+ "\u2264": "<=", # Less than or equal
58
+ "\u2265": ">=", # Greater than or equal
59
+ "…": "...",
60
+ "–": "-"
61
+ }
62
+ for char, replacement in replacements.items():
63
+ content = content.replace(char, replacement)
64
+
65
+ # Final fallback: encode to latin-1, replace errors with '?', then decode
66
+ content = content.encode('latin-1', 'replace').decode('latin-1')
67
+
68
+ pdf.multi_cell(0, 10, txt=content)
69
+
70
+ # QR Code
71
+ qr = qrcode.QRCode(version=1, box_size=10, border=5)
72
+ qr.add_data(f"Patient: {patient_name} - Verified by Dr. Tounsi")
73
+ qr.make(fit=True)
74
+ img = qr.make_image(fill_color="black", back_color="white")
75
+ img.save("temp_qr.png")
76
+
77
+ pdf.ln(10)
78
+ pdf.image("temp_qr.png", w=30)
79
+ os.remove("temp_qr.png")
80
+
81
+ # Ensure directory exists
82
+ os.makedirs("reports", exist_ok=True)
83
+ pdf.output(f"reports/{filename}")
84
+ return f"reports/{filename}"
agents/triage.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llm_factory import get_llm
2
+ from langchain_core.prompts import PromptTemplate
3
+ import os
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+
8
+ def get_triage_agent():
9
+ llm = get_llm(model_type="text", temperature=0.1)
10
+
11
+ template = """
12
+ Tu es un Agent de Triage Médical expert pour les urgences de première ligne en Tunisie.
13
+
14
+ TA MISSION :
15
+ Analyser les symptômes du patient et déterminer le niveau d'urgence.
16
+
17
+ NIVEAUX D'URGENCE :
18
+ - 🔴 ROUGE (Urgence Vitale) : Intervention immédiate requise (ex: douleur thoracique, détresse respiratoire, AVC).
19
+ - 🟠 ORANGE (Urgent) : Consultation nécessaire dans les 2-4 heures (ex: fièvre élevée mal tolérée, douleur intense, fracture).
20
+ - 🟢 VERT (Non Urgent) : Consultation classique ou conseils (ex: rhume, petite plaie, renouvellement).
21
+
22
+ SYMPTÔMES DU PATIENT :
23
+ {symptoms}
24
+
25
+ FORMAT DE RÉPONSE ATTENDU (Strictement en Français) :
26
+
27
+ NIVEAU : [ROUGE/ORANGE/VERT]
28
+ JUSTIFICATION : [Explication courte et précise en 1 phrase]
29
+ ACTION RECOMMANDÉE : [Ex: "Appeler le 190", "Consultation immédiate", "Prendre RDV demain"]
30
+ """
31
+
32
+ prompt = PromptTemplate(
33
+ input_variables=["symptoms"],
34
+ template=template,
35
+ )
36
+
37
+ return prompt | llm
38
+
39
+ async def run_triage_agent(symptoms: str) -> str:
40
+ agent = get_triage_agent()
41
+ response = await agent.ainvoke({"symptoms": symptoms})
42
+ return response.content
check_mammo_api.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gradio_client import Client
2
+ import json
3
+
4
+ def check_api():
5
+ try:
6
+ client = Client("ianpan/screening-mammography")
7
+ print("API Information:")
8
+ print(client.view_api(return_format="json"))
9
+ except Exception as e:
10
+ print(f"Error: {e}")
11
+
12
+ if __name__ == "__main__":
13
+ check_api()
list_models.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ load_dotenv()
4
+
5
+ # Ensure the key is available for the client if it expects GEMINI_API_KEY
6
+ if "GOOGLE_API_KEY" in os.environ:
7
+ os.environ["GEMINI_API_KEY"] = os.environ["GOOGLE_API_KEY"]
8
+
9
+ try:
10
+ from google import genai
11
+ from google.genai.errors import APIError
12
+ except ImportError:
13
+ # Fallback to google.generativeai if google.genai is not found (older SDK)
14
+ import google.generativeai as genai
15
+ print("Using google.generativeai SDK fallback...")
16
+
17
+ try:
18
+ genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
19
+ print("✨ Modèles d'IA disponibles (via google-generativeai) :")
20
+ print("-------------------------------")
21
+ for model in genai.list_models():
22
+ if 'generateContent' in model.supported_generation_methods:
23
+ print(f"- **{model.name}**")
24
+ exit(0)
25
+ except Exception as e:
26
+ print(f"Fallback failed: {e}")
27
+ exit(1)
28
+
29
+ def lister_modeles_disponibles():
30
+ """Liste tous les modèles disponibles via l'API Google GenAI."""
31
+ try:
32
+ # Initialisation du client (il lira la clé API automatiquement)
33
+ client = genai.Client(api_key=os.environ.get("GOOGLE_API_KEY"))
34
+
35
+ print("✨ Modèles d'IA disponibles :")
36
+ print("-------------------------------")
37
+
38
+ # Récupération de la liste des modèles
39
+ for model in client.models.list():
40
+ # Afficher le nom du modèle et sa description (facultatif)
41
+ print(f"- **{model.name}**")
42
+ # print(f" Description: {model.description}")
43
+
44
+ except Exception as e:
45
+ print(f"Erreur lors de la connexion à l'API : {e}")
46
+ print("Vérifiez que votre clé API est valide et correctement configurée.")
47
+
48
+ # Exécution de la fonction
49
+ lister_modeles_disponibles()
llm_factory.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_openai import ChatOpenAI
3
+ from langchain_google_genai import ChatGoogleGenerativeAI
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+
8
+ def get_llm(model_type="text", temperature=0.1):
9
+ """
10
+ Factory to get the appropriate LLM based on task type.
11
+
12
+ Args:
13
+ model_type: "text" (DeepSeek) or "vision" (Gemini)
14
+ temperature: Creativity level
15
+ """
16
+
17
+ # Prioritize Gemini as requested for the demo
18
+ google_key = os.getenv("GOOGLE_API_KEY")
19
+ if google_key:
20
+ return ChatGoogleGenerativeAI(
21
+ model="gemini-2.5-flash",
22
+ temperature=temperature,
23
+ google_api_key=google_key
24
+ )
25
+
26
+ if model_type == "text":
27
+ api_key = os.getenv("DEEPSEEK_API_KEY")
28
+ if not api_key:
29
+ raise ValueError("No API keys found (DeepSeek or Google)")
30
+
31
+ return ChatOpenAI(
32
+ model="deepseek-chat",
33
+ api_key=api_key,
34
+ base_url="https://api.deepseek.com",
35
+ temperature=temperature
36
+ )
37
+
38
+ elif model_type == "vision":
39
+ # Vision specific logic if needed, but Gemini handles both
40
+ if google_key:
41
+ return ChatGoogleGenerativeAI(
42
+ model="gemini-2.5-flash",
43
+ temperature=temperature,
44
+ google_api_key=google_key
45
+ )
46
+ raise ValueError("Google API Key required for vision")
47
+
48
+ else:
49
+ raise ValueError(f"Unknown model_type: {model_type}")
main.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ import os
3
+ import httpx
4
+ import base64
5
+ load_dotenv()
6
+
7
+ from fastapi import FastAPI, HTTPException, Form
8
+ from fastapi.middleware.cors import CORSMiddleware
9
+ from pydantic import BaseModel
10
+ from typing import List, Dict, Any, Optional
11
+ from models import Patient, AgentState
12
+ from agents.intake import run_intake_agent
13
+ from agents.anamnesis import run_anamnesis_agent
14
+ from agents.diagnosis import run_diagnosis_agent
15
+ from agents.planner import run_planner_agent
16
+
17
+
18
+ app = FastAPI(title="Medical Multi-Agent POC")
19
+
20
+ # CORS configuration
21
+ origins = ["*"]
22
+
23
+ app.add_middleware(
24
+ CORSMiddleware,
25
+ allow_origins=origins,
26
+ allow_credentials=True,
27
+ allow_methods=["*"],
28
+ allow_headers=["*"],
29
+ )
30
+
31
+ class IntakeRequest(BaseModel):
32
+ input: str
33
+
34
+ class AnamnesisRequest(BaseModel):
35
+ history: List[Dict[str, str]]
36
+ input: str
37
+
38
+ class DiagnosisRequest(BaseModel):
39
+ patient_info: Dict[str, Any]
40
+ symptom_summary: str
41
+ bio_data: Optional[Dict[str, Any]] = None
42
+
43
+ class PlannerRequest(BaseModel):
44
+ diagnosis_report: str
45
+
46
+ @app.get("/")
47
+ async def root():
48
+ return {"message": "Medical Multi-Agent POC Backend is running"}
49
+
50
+ @app.post("/api/intake", response_model=Patient)
51
+ async def intake_endpoint(request: IntakeRequest):
52
+ return await run_intake_agent(request.input)
53
+
54
+ @app.post("/api/anamnesis")
55
+ async def anamnesis_endpoint(request: AnamnesisRequest):
56
+ response = await run_anamnesis_agent(request.history, request.input)
57
+ return {"response": response}
58
+
59
+ @app.post("/api/diagnosis")
60
+ async def diagnosis_endpoint(request: DiagnosisRequest):
61
+ report = await run_diagnosis_agent(request.patient_info, request.symptom_summary, request.bio_data)
62
+ return {"report": report}
63
+
64
+ @app.post("/api/planner")
65
+ async def planner_endpoint(request: PlannerRequest):
66
+ plan = await run_planner_agent(request.diagnosis_report)
67
+ return {"plan": plan}
68
+
69
+ # --- New Agents Endpoints ---
70
+
71
+ from agents.triage import run_triage_agent
72
+ from agents.pharmacist import run_pharmacist_agent
73
+ from agents.imaging import run_imaging_agent
74
+ from agents.followup import run_followup_agent
75
+
76
+ class TriageRequest(BaseModel):
77
+ symptoms: str
78
+
79
+ class PharmacistRequest(BaseModel):
80
+ patient_name: str
81
+ patient_age: int
82
+ history: str
83
+ prescription: str
84
+
85
+ class ImagingRequest(BaseModel):
86
+ imaging_desc: str
87
+ clinical_context: str
88
+
89
+ class FollowupRequest(BaseModel):
90
+ diagnosis: str
91
+ treatment: str
92
+
93
+ @app.post("/api/triage")
94
+ async def triage_endpoint(request: TriageRequest):
95
+ result = await run_triage_agent(request.symptoms)
96
+ return {"result": result}
97
+
98
+ @app.post("/api/pharmacist")
99
+ async def pharmacist_endpoint(request: PharmacistRequest):
100
+ result = await run_pharmacist_agent(request.patient_name, request.patient_age, request.history, request.prescription)
101
+ return {"result": result}
102
+
103
+ @app.post("/api/imaging")
104
+ async def imaging_endpoint(request: ImagingRequest):
105
+ result = await run_imaging_agent(request.imaging_desc, request.clinical_context)
106
+ return {"result": result}
107
+
108
+ @app.post("/api/followup")
109
+ async def followup_endpoint(request: FollowupRequest):
110
+ result = await run_followup_agent(request.diagnosis, request.treatment)
111
+ return {"result": result}
112
+
113
+ from agents.bio_analysis import run_bio_analysis_agent
114
+ from agents.radiology import run_radiology_agent
115
+ from agents.report import generate_report_content, create_pdf_report
116
+ from fastapi.responses import FileResponse
117
+
118
+ from fastapi import UploadFile, File
119
+
120
+ class ReportRequest(BaseModel):
121
+ patient_info: Dict[str, Any]
122
+ diagnosis: str
123
+ plan: str
124
+
125
+ @app.post("/api/bio-analysis")
126
+ async def bio_analysis_endpoint(file: UploadFile = File(...)):
127
+ content = await file.read()
128
+ analysis = await run_bio_analysis_agent(content, file.content_type)
129
+ return {"analysis": analysis}
130
+
131
+ @app.post("/api/radiology-analysis")
132
+ async def radiology_analysis_endpoint(
133
+ file: UploadFile = File(...),
134
+ model_type: str = Form(...),
135
+ question: Optional[str] = Form(None)
136
+ ):
137
+ content = await file.read()
138
+ result = await run_radiology_agent(content, model_type, question)
139
+ return {"result": result}
140
+
141
+ @app.post("/api/generate-report")
142
+ async def report_endpoint(request: ReportRequest):
143
+ content = await generate_report_content(request.patient_info, request.diagnosis, request.plan)
144
+ patient_name = request.patient_info.get("name", "patient").replace(" ", "_").lower()
145
+ filename = f"{patient_name}_report.pdf"
146
+ file_path = create_pdf_report(content, request.patient_info.get("name", "Unknown"), filename)
147
+ return {"content": content, "pdf_url": f"/reports/{filename}"}
148
+
149
+ @app.get("/reports/{filename}")
150
+ async def get_report(filename: str):
151
+ return FileResponse(f"reports/{filename}")
152
+
153
+ import google.generativeai as genai
154
+ import json
155
+
156
+ @app.post("/api/collab-chat")
157
+ async def collab_chat_endpoint(
158
+ text: str = Form(...),
159
+ history: str = Form(...),
160
+ file: Optional[UploadFile] = File(None)
161
+ ):
162
+ try:
163
+ # Load chat history
164
+ chat_history = json.loads(history)
165
+
166
+ # Priority: GLM (VLM) for multimodal medical analysis
167
+ z_api_key = os.getenv("Z_AI_API_KEY")
168
+
169
+ if not z_api_key:
170
+ return {"response": "Erreur: Clé API IA Vision (VLM) non configurée dans le fichier .env."}
171
+
172
+ api_url = "https://api.z.ai/api/paas/v4/chat/completions"
173
+ headers = {
174
+ "Authorization": f"Bearer {z_api_key}",
175
+ "Content-Type": "application/json"
176
+ }
177
+
178
+ # Convert history (Gemini format) to GLM format
179
+ messages = []
180
+ # Add system prompt for medical expertise
181
+ messages.append({
182
+ "role": "system",
183
+ "content": "Tu es un expert médical SmartDiag. Analyse les documents et réponds aux médecins avec précision clinique."
184
+ })
185
+
186
+ for h in chat_history:
187
+ role = "user" if h["role"] == "user" else "assistant"
188
+ # Extract text from parts
189
+ msg_text = ""
190
+ if isinstance(h["parts"], list) and len(h["parts"]) > 0:
191
+ msg_text = h["parts"][0].get("text", "")
192
+ elif isinstance(h["parts"], str):
193
+ msg_text = h["parts"]
194
+
195
+ if msg_text:
196
+ messages.append({"role": role, "content": msg_text})
197
+
198
+ # Prepare current content based on media type
199
+ if file:
200
+ current_content = []
201
+ extracted_text = ""
202
+ file_bytes = await file.read()
203
+
204
+ if file.content_type.startswith("image/"):
205
+ img_b64 = base64.b64encode(file_bytes).decode("utf-8")
206
+ current_content.append({
207
+ "type": "image_url",
208
+ "image_url": {"url": f"data:{file.content_type};base64,{img_b64}"}
209
+ })
210
+ elif file.content_type == "application/pdf":
211
+ try:
212
+ import io
213
+ from pypdf import PdfReader
214
+ pdf_file = io.BytesIO(file_bytes)
215
+ reader = PdfReader(pdf_file)
216
+ for page in reader.pages:
217
+ extracted_text += page.extract_text() + "\n"
218
+
219
+ if extracted_text:
220
+ text = f"{text}\n\n[Contenu du PDF joint ({file.filename})]:\n{extracted_text}"
221
+ else:
222
+ return {"response": "Impossible d'extraire le texte de ce PDF (scan/image). Merci d'envoyer une capture d'écran."}
223
+ except Exception as pdf_err:
224
+ print(f"PDF Error: {pdf_err}")
225
+ return {"response": "Erreur lecture PDF."}
226
+
227
+ # For VLM, content is a list
228
+ current_content.append({"type": "text", "text": text})
229
+ messages.append({"role": "user", "content": current_content})
230
+ model_name = "glm-4.6v"
231
+ else:
232
+ # Text only: Use GLM-4.6v as well for consistency, but with text-only payload
233
+ if not text.strip():
234
+ return {"response": "Veuillez saisir un message."}
235
+
236
+ # GLM-4V expects list content for consistency if we use that model
237
+ messages.append({
238
+ "role": "user",
239
+ "content": [{"type": "text", "text": text}]
240
+ })
241
+ model_name = "glm-4.6v"
242
+
243
+ payload = {
244
+ "model": model_name,
245
+ "messages": messages,
246
+ "temperature": 0.7,
247
+ "top_p": 0.9,
248
+ "stream": False
249
+ }
250
+
251
+ async with httpx.AsyncClient(timeout=120.0) as client:
252
+ response = await client.post(api_url, headers=headers, json=payload)
253
+ if response.status_code == 200:
254
+ result = response.json()
255
+ return {"response": result["choices"][0]["message"]["content"]}
256
+ else:
257
+ error_msg = response.text
258
+ print(f"GLM API Error: {error_msg}")
259
+ return {"response": f"Erreur VLM : Impossible d'analyser le document ({response.status_code})."}
260
+
261
+ except Exception as e:
262
+ print(f"Critical error in collab-chat: {str(e)}")
263
+ import traceback
264
+ traceback.print_exc()
265
+ return {"response": f"Désolé, une erreur technique est survenue : {str(e)}"}
models.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import List, Optional, Dict, Any
3
+
4
+ class Patient(BaseModel):
5
+ id: Optional[str] = None
6
+ name: str
7
+ age: int
8
+ gender: str
9
+ medical_history: Optional[str] = None
10
+
11
+ class AgentMessage(BaseModel):
12
+ role: str # "user", "assistant", "system"
13
+ content: str
14
+
15
+ class AgentState(BaseModel):
16
+ patient: Optional[Patient] = None
17
+ messages: List[AgentMessage] = []
18
+ context: Dict[str, Any] = {}
19
+ next_step: Optional[str] = None
20
+
21
+ class AgentConfig(BaseModel):
22
+ name: str
23
+ type: str # "intake", "anamnesis", "diagnosis", "planner"
24
+ model: str = "gpt-4o"
25
+ system_prompt: str
reports/mohamed_ben_ali_report.pdf ADDED
Binary file (6.27 kB). View file
 
reports/patient_report.pdf ADDED
Binary file (5.21 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ pydantic
4
+ langchain
5
+ langchain-google-genai
6
+ python-dotenv
7
+ fpdf
8
+ qrcode
9
+ python-multipart
10
+ pypdf
11
+ langchain-openai
12
+ pillow
13
+ httpx
14
+ requests
15
+ google-generativeai
services/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Init
services/rag_service.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Dict, Optional
2
+
3
+ class RAGService:
4
+ def __init__(self):
5
+ # In-memory storage for POC: { patient_id: [context_strings] }
6
+ self.context_store: Dict[str, List[str]] = {}
7
+
8
+ def add_document(self, patient_id: str, content: str, doc_type: str = "general"):
9
+ """Adds a piece of information to the patient's context."""
10
+ if patient_id not in self.context_store:
11
+ self.context_store[patient_id] = []
12
+
13
+ entry = f"--- DOCUMENT ({doc_type}) ---\n{content}\n"
14
+ self.context_store[patient_id].append(entry)
15
+ print(f"RAG: Added {doc_type} for patient {patient_id}")
16
+
17
+ def get_context(self, patient_id: str) -> str:
18
+ """Retrieves all context for a patient as a formatted string."""
19
+ docs = self.context_store.get(patient_id, [])
20
+ if not docs:
21
+ return ""
22
+
23
+ return "\n".join(docs)
24
+
25
+ # Singleton instance
26
+ rag_service = RAGService()
test.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ test content