File size: 2,557 Bytes
82d84c7
e8589a9
0ef172c
 
ed04336
82d84c7
 
 
 
7e29701
82d84c7
 
 
 
7e29701
 
 
 
 
d74007c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e29701
 
82d84c7
2d164e9
 
 
 
82d84c7
d74007c
 
 
 
 
 
 
 
 
 
 
 
 
 
7e29701
 
 
 
 
 
82d84c7
40e71b1
 
82d84c7
7e29701
 
d74007c
 
82d84c7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import os
import gradio as gr
from llama_index.llms.huggingface import HuggingFaceLLM
from parse_tabular import symptom_index
import json

# --- System prompt ---
SYSTEM_PROMPT = """
You are a medical assistant helping a user narrow down to the most likely ICD-10 code.
At each turn, EITHER ask one focused clarifying question (e.g. "Is your cough dry or productive?")
or, if you have enough info, output a final JSON with fields:
{"diagnoses":[…], "confidences":[…]}.
"""

def process_speech(new_transcript, history):
    # Skip if no new transcript
    if not new_transcript:
        return history
    
    try:
        # Build conversation context
        context = "\n".join([f"{role}: {msg}" for role, msg in history])
        
        # Query symptom index for relevant codes
        response = symptom_index.as_query_engine().query(new_transcript)
        
        # Format response as structured JSON
        formatted_response = {
            "diagnoses": [str(response).split(":")[0]],  # Extract ICD code
            "confidences": [0.8],  # Add confidence scoring
            "follow_up": "Is the cough productive or dry?"  # Add interactive questioning
        }
        
        # Append exchange to history
        history.append((new_transcript, json.dumps(formatted_response, indent=2)))
        
    except Exception as e:
        # Handle errors gracefully for MCP clients
        error_response = {
            "error": str(e),
            "status": "error"
        }
        history.append((new_transcript, json.dumps(error_response, indent=2)))
    
    return history

# Build Gradio interface
demo = gr.Blocks()
with demo:
    gr.Markdown("# Symptom to ICD-10 Code Lookup (Audio Input)")
    chatbot = gr.Chatbot(label="Conversation")
    audio = gr.Audio(source="microphone", type="text", streaming=True)
    
    # Add MCP-specific metadata
    demo.config = {
        "mcp": {
            "title": "Medical Symptom to ICD-10 Code Assistant",
            "description": "Convert spoken medical symptoms to ICD-10 codes",
            "version": "1.0.0",
            "capabilities": {
                "speech_input": True,
                "streaming": True
            }
        }
    }
    
    audio.stream(
        process_speech,
        inputs=[audio, chatbot], 
        outputs=chatbot,
        show_progress="hidden"
    )

if __name__ == "__main__":
    demo.launch(
        server_name="0.0.0.0", 
        server_port=7860, 
        mcp_server=True,
        mcp_polling_interval=1000  # 1 second polling interval
    )