Spaces:
Runtime error
Runtime error
| # chain_recommendations.py | |
| import json | |
| from typing import Dict | |
| from langchain import PromptTemplate, LLMChain | |
| from models import chat_model | |
| # | |
| # UPDATED PROMPT: | |
| # We ask the LLM to return: | |
| # 1) A JSON array (on its own line) listing the recommended packages | |
| # 2) Followed by detailed headings explaining WHY each recommended package is chosen | |
| # and possibly why other packages are excluded. | |
| # | |
| improved_recommend_prompt_template = PromptTemplate( | |
| input_variables=["problems"], | |
| template=( | |
| "You are a wellness recommendation assistant. You receive problem severity percentages:\n" | |
| "{problems}\n\n" | |
| "We have these potential packages:\n" | |
| "1. Fitness & Mobility | Tagline: 'Enhance Mobility. Boost Fitness.'\n" | |
| "2. No More Insomnia | Deep Rest | Tagline: 'Reclaim Your Sleep. Restore Your Mind.'\n" | |
| "3. Focus Flow | Clarity Boost | Tagline: 'Stay Focused. Stay Productive.'\n" | |
| "4. Boost Energy | Tagline: 'Fuel Your Day. Boost Your Energy.'\n" | |
| "5. Chronic Care | Chronic Support | Tagline: 'Ongoing Support for Chronic Wellness.'\n" | |
| "6. Mental Wellness | Calm Mind | Tagline: 'Find Peace of Mind, Every Day.'\n\n" | |
| "Carefully analyze these percentages, considering:\n" | |
| "- If one area is extremely high (above 70), prioritize that area.\n" | |
| "- If multiple areas are high (above 60), recommend multiple specialized packages.\n" | |
| "- If all areas are moderate (30 to 70), suggest a balanced approach.\n" | |
| "- If all areas are low, a general or minimal package might suffice.\n" | |
| "- Consider borderline and preventative measures.\n\n" | |
| "## IMPORTANT:\n" | |
| "1) First, output the recommended packages in a strict JSON array format, for example:\n" | |
| " ```json\n" | |
| " [\n" | |
| " \"Mental Wellness\",\n" | |
| " \"Fitness & Mobility\"\n" | |
| " ]\n" | |
| " ```\n" | |
| " Each item must be exactly one of these valid names:\n" | |
| " \"Fitness & Mobility\", \"No More Insomnia\", \"Focus Flow\", \"Boost Energy\", \"Chronic Care\", \"Mental Wellness\".\n\n" | |
| "2) After that JSON array, provide explanation/analysis under headings:\n" | |
| " **High Stress/Anxiety:** ... (if relevant)\n" | |
| " **Moderate Fitness & Mobility:** ... (if relevant)\n" | |
| " **Gut Health:** ... (if relevant)\n" | |
| " **No More Insomnia:** ... (if relevant)\n" | |
| " **Focus Flow:** ...\n" | |
| " **Justification for Exclusion:** ...\n" | |
| " etc.\n\n" | |
| "Make sure your headings are prefixed with double asterisks, e.g. `**High Stress/Anxiety:**`.\n\n" | |
| "Return everything as a single string. The important part:\n" | |
| " - The JSON array is fully valid JSON.\n" | |
| " - After that, the text analysis uses headings.\n\n" | |
| ) | |
| ) | |
| recommend_chain = LLMChain(llm=chat_model, prompt=improved_recommend_prompt_template) | |
| def generate_recommendations(problems: Dict[str, float]) -> str: | |
| """ | |
| Runs the LLMChain with the updated prompt. | |
| Returns a string containing: | |
| 1) A JSON array on its own line | |
| 2) Explanations for each recommended or excluded package. | |
| """ | |
| recommendations = recommend_chain.run(problems=json.dumps(problems)) | |
| return recommendations.strip() | |