File size: 3,317 Bytes
d4b3047
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
# chain_recommendations.py
import json
from typing import Dict
from langchain import PromptTemplate, LLMChain
from models import chat_model

#
# UPDATED PROMPT:
# We ask the LLM to return:
#   1) A JSON array (on its own line) listing the recommended packages
#   2) Followed by detailed headings explaining WHY each recommended package is chosen
#      and possibly why other packages are excluded.
#
improved_recommend_prompt_template = PromptTemplate(
    input_variables=["problems"],
    template=(
        "You are a wellness recommendation assistant. You receive problem severity percentages:\n"
        "{problems}\n\n"
        "We have these potential packages:\n"
        "1. Fitness & Mobility | Tagline: 'Enhance Mobility. Boost Fitness.'\n"
        "2. No More Insomnia | Deep Rest | Tagline: 'Reclaim Your Sleep. Restore Your Mind.'\n"
        "3. Focus Flow | Clarity Boost | Tagline: 'Stay Focused. Stay Productive.'\n"
        "4. Boost Energy | Tagline: 'Fuel Your Day. Boost Your Energy.'\n"
        "5. Chronic Care | Chronic Support | Tagline: 'Ongoing Support for Chronic Wellness.'\n"
        "6. Mental Wellness | Calm Mind | Tagline: 'Find Peace of Mind, Every Day.'\n\n"
        "Carefully analyze these percentages, considering:\n"
        "- If one area is extremely high (above 70), prioritize that area.\n"
        "- If multiple areas are high (above 60), recommend multiple specialized packages.\n"
        "- If all areas are moderate (30 to 70), suggest a balanced approach.\n"
        "- If all areas are low, a general or minimal package might suffice.\n"
        "- Consider borderline and preventative measures.\n\n"
        "## IMPORTANT:\n"
        "1) First, output the recommended packages in a strict JSON array format, for example:\n"
        "   ```json\n"
        "   [\n"
        "       \"Mental Wellness\",\n"
        "       \"Fitness & Mobility\"\n"
        "   ]\n"
        "   ```\n"
        "   Each item must be exactly one of these valid names:\n"
        "   \"Fitness & Mobility\", \"No More Insomnia\", \"Focus Flow\", \"Boost Energy\", \"Chronic Care\", \"Mental Wellness\".\n\n"
        "2) After that JSON array, provide explanation/analysis under headings:\n"
        "   **High Stress/Anxiety:** ... (if relevant)\n"
        "   **Moderate Fitness & Mobility:** ... (if relevant)\n"
        "   **Gut Health:** ... (if relevant)\n"
        "   **No More Insomnia:** ... (if relevant)\n"
        "   **Focus Flow:** ...\n"
        "   **Justification for Exclusion:** ...\n"
        "   etc.\n\n"
        "Make sure your headings are prefixed with double asterisks, e.g. `**High Stress/Anxiety:**`.\n\n"
        "Return everything as a single string. The important part:\n"
        " - The JSON array is fully valid JSON.\n"
        " - After that, the text analysis uses headings.\n\n"
    )
)

recommend_chain = LLMChain(llm=chat_model, prompt=improved_recommend_prompt_template)

def generate_recommendations(problems: Dict[str, float]) -> str:
    """
    Runs the LLMChain with the updated prompt.
    Returns a string containing:
      1) A JSON array on its own line
      2) Explanations for each recommended or excluded package.
    """
    recommendations = recommend_chain.run(problems=json.dumps(problems))
    return recommendations.strip()