File size: 2,967 Bytes
d1c266e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import os
from langchain_huggingface import HuggingFaceEndpoint
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
from langchain_core.runnables import RunnableLambda

LLM_REPO_ID = os.environ.get("LLM_REPO_ID", "Jyo-K/Fine-Tuned-Qwen2.5_1B")
HF_API_KEY = os.environ.get("HF_API_KEY")

_llm_instance = None

def get_llm():
    """Lazily initializes and returns the LLM instance."""
    global _llm_instance
    if _llm_instance is None:
        if not HF_API_KEY:
            raise ValueError("HF_TOKEN environment variable not set. Cannot initialize LLM.")
        _llm_instance = HuggingFaceEndpoint(
            #model= "Jyo-K/Fine-Tuned-Qwen2.5_1B",
            repo_id=LLM_REPO_ID,
            huggingfacehub_api_token=HF_API_KEY,
            temperature=0.1,
            max_new_tokens=256,
            top_k=50,
            top_p=0.95
        )
        print("--- LLM Initialized ---")
    return _llm_instance

classifier_prompt = ChatPromptTemplate.from_messages([
    ("system", (
        "You are a helpful classification assistant. "
        "Your task is to classify the user's last response as 'yes', 'no', or 'unclear' "
        "based on their message. "
        "User's previous message: '{last_human_message}'"
        "\nRespond ONLY with a single JSON object in the format: "
        "{{\"classification\": \"yes\"}} or {{\"classification\": \"no\"}} or {{\"classification\": \"unclear\"}}"
    ))
])


symptom_classifier_chain = classifier_prompt | get_llm() | JsonOutputParser()

question_prompt = ChatPromptTemplate.from_messages([
    ("system", (
        "You are a friendly medical assistant bot. Ask the user if they are experiencing the "
        "following symptom. Be clear and concise. Do not add any extra greeting or sign-off. "
        "Symptom: '{symptom}'"
        "\nExample: Are you experiencing any itchiness or a rash?"
    ))
])

question_generation_chain = question_prompt | get_llm() | StrOutputParser()


summary_prompt = ChatPromptTemplate.from_messages([
    ("system", (
        "You are a helpful medical assistant providing a summary. "
        "Based on the initial image analysis and confirmed symptoms, generate a summary. "
        "DO NOT provide a definitive diagnosis. "
        "Structure your response clearly: "
        "1. Start by stating the potential condition identified from the image."
        "2. List the symptoms the user confirmed."
        "3. Provide the general treatment information found for this condition."
        "4. **ALWAYS** include the provided disclaimer at the very end."
        "\n---"
        "Initial Image Prediction: {disease}"
        "Confirmed Symptoms: {symptoms}"
        "Potential Treatment Information: {treatment}"
        "Disclaimer: {disclaimer}"
        "\n---"
        "Generate your summary now."
    ))
])

summary_generation_chain = summary_prompt | get_llm() | StrOutputParser()