Spaces:
Sleeping
Sleeping
Update agents.py
Browse files
agents.py
CHANGED
|
@@ -1,81 +1,131 @@
|
|
| 1 |
-
import os
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
from fpdf import FPDF
|
| 5 |
-
import matplotlib.pyplot as plt
|
| 6 |
-
|
| 7 |
-
# --- Logging ---
|
| 8 |
-
def log_chat_interaction(user_msg, ai_reply, mood):
|
| 9 |
-
file = "chat_log.json"
|
| 10 |
-
logs = []
|
| 11 |
-
if os.path.exists(file):
|
| 12 |
-
with open(file) as f: logs = json.load(f)
|
| 13 |
-
logs.append({
|
| 14 |
-
"timestamp": datetime.utcnow().isoformat(),
|
| 15 |
-
"mood": mood,
|
| 16 |
-
"user": user_msg,
|
| 17 |
-
"ai": ai_reply
|
| 18 |
-
})
|
| 19 |
-
with open(file, "w") as f: json.dump(logs, f, indent=2)
|
| 20 |
-
|
| 21 |
-
def log_entry(entry_obj):
|
| 22 |
-
file = "journal_log.json"
|
| 23 |
-
logs = []
|
| 24 |
-
if os.path.exists(file):
|
| 25 |
-
with open(file) as f: logs = json.load(f)
|
| 26 |
-
logs.append({
|
| 27 |
-
**entry_obj,
|
| 28 |
-
"timestamp": datetime.utcnow().isoformat()
|
| 29 |
-
})
|
| 30 |
-
with open(file, "w") as f: json.dump(logs, f, indent=2)
|
| 31 |
-
|
| 32 |
-
# --- Export ---
|
| 33 |
-
def export_to_pdf(entry, mood, mode):
|
| 34 |
-
pdf = FPDF()
|
| 35 |
-
pdf.add_page()
|
| 36 |
-
pdf.set_font("Arial", size=12)
|
| 37 |
-
pdf.multi_cell(0, 10, f"Journaling Mode: {mode}\nMood: {mood}\nEntry:\n{entry}")
|
| 38 |
-
path = "journal_export.pdf"
|
| 39 |
-
pdf.output(path)
|
| 40 |
-
return path
|
| 41 |
-
|
| 42 |
-
def export_to_md(entry, mood, mode):
|
| 43 |
-
text = f"### Journal Entry\n**Mode**: {mode}\n**Mood**: {mood}\n\n{entry}"
|
| 44 |
-
path = "journal_export.md"
|
| 45 |
-
with open(path, "w") as f: f.write(text)
|
| 46 |
-
return path
|
| 47 |
-
|
| 48 |
-
# --- Counselor Analytics ---
|
| 49 |
-
def get_weekly_summary():
|
| 50 |
-
if not os.path.exists("journal_log.json"):
|
| 51 |
-
return {"error": "No logs yet."}
|
| 52 |
-
with open("journal_log.json") as f:
|
| 53 |
-
logs = json.load(f)[-7:]
|
| 54 |
-
return {
|
| 55 |
-
"total_entries": len(logs),
|
| 56 |
-
"mood_trend": Counter(x["mood"] for x in logs),
|
| 57 |
-
"frequent_words": Counter(" ".join(x["entry"] for x in logs).split()).most_common(7)
|
| 58 |
-
}
|
| 59 |
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
with open("journal_log.json") as f:
|
| 74 |
logs = json.load(f)
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
}
|
| 81 |
-
return "\n".join([f"{k}: {v}" for k, v in insights.items()])
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import requests
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
+
# Set your API Key securely
|
| 6 |
+
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
| 7 |
+
LLAMA_MODEL = "llama3-8b-8192"
|
| 8 |
+
SLM_MODEL = "slm-1b"
|
| 9 |
+
|
| 10 |
+
# ---------------------------
|
| 11 |
+
# Groq API Call Wrapper
|
| 12 |
+
# ---------------------------
|
| 13 |
+
def call_groq(prompt, model=LLAMA_MODEL):
|
| 14 |
+
response = requests.post(
|
| 15 |
+
"https://api.groq.com/openai/v1/chat/completions",
|
| 16 |
+
headers={"Authorization": f"Bearer {GROQ_API_KEY}"},
|
| 17 |
+
json={
|
| 18 |
+
"model": model,
|
| 19 |
+
"messages": [{"role": "user", "content": prompt}],
|
| 20 |
+
"temperature": 0.7
|
| 21 |
+
}
|
| 22 |
+
)
|
| 23 |
+
if response.status_code != 200:
|
| 24 |
+
return f"[Groq API Error {response.status_code}]: {response.text}"
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
data = response.json()
|
| 28 |
+
return data.get("choices", [{}])[0].get("message", {}).get("content", "[No valid response]")
|
| 29 |
+
except Exception as e:
|
| 30 |
+
return f"[Groq Parsing Error]: {str(e)}"
|
| 31 |
+
|
| 32 |
+
# ---------------------------
|
| 33 |
+
# Empathetic First-Aider Chat Agent
|
| 34 |
+
# ---------------------------
|
| 35 |
+
def run_first_aider(message, mood):
|
| 36 |
+
prompt = f"""
|
| 37 |
+
You're a warm and respectful AI listener. Respond to this user's message kindly and briefly, in 1-2 sentences.
|
| 38 |
+
- Never give advice.
|
| 39 |
+
- Be supportive, clear, emotionally kind, and safe.
|
| 40 |
+
- Maintain boundaries.
|
| 41 |
+
|
| 42 |
+
User mood: {mood}
|
| 43 |
+
Message: "{message}"
|
| 44 |
+
"""
|
| 45 |
+
return call_groq(prompt, model=SLM_MODEL)
|
| 46 |
+
|
| 47 |
+
# ---------------------------
|
| 48 |
+
# Context-Aware Introspect Agent
|
| 49 |
+
# ---------------------------
|
| 50 |
+
def get_user_context():
|
| 51 |
+
context = ""
|
| 52 |
+
|
| 53 |
+
if os.path.exists("chat_log.json"):
|
| 54 |
+
with open("chat_log.json") as f:
|
| 55 |
+
chats = json.load(f)[-3:] # last 3 chat messages
|
| 56 |
+
context += "\nRecent Conversations:\n"
|
| 57 |
+
for c in chats:
|
| 58 |
+
context += f"User: {c['user']}\nAI: {c['ai']}\n"
|
| 59 |
+
|
| 60 |
+
if os.path.exists("journal_log.json"):
|
| 61 |
+
with open("journal_log.json") as f:
|
| 62 |
+
logs = json.load(f)[-2:]
|
| 63 |
+
context += "\nJournal Entries:\n"
|
| 64 |
+
for j in logs:
|
| 65 |
+
context += f"Mood: {j['mood']}\nEntry: {j['entry']}\nAI Response: {j['response']}\n"
|
| 66 |
+
|
| 67 |
+
return context.strip() if context.strip() else None
|
| 68 |
+
|
| 69 |
+
def run_introspect(message, mood):
|
| 70 |
+
context = get_user_context()
|
| 71 |
+
if not context:
|
| 72 |
+
return "Let's talk a bit or journal first so I can help you reflect better."
|
| 73 |
+
|
| 74 |
+
prompt = f"""
|
| 75 |
+
You're a calm, thoughtful AI helping a user gently reflect on their emotional patterns.
|
| 76 |
+
|
| 77 |
+
Context from past chats and journals:
|
| 78 |
+
{context}
|
| 79 |
+
|
| 80 |
+
User's new message: "{message}"
|
| 81 |
+
Mood: {mood}
|
| 82 |
+
|
| 83 |
+
Instructions:
|
| 84 |
+
- Highlight any potential recurring emotional themes (gently).
|
| 85 |
+
- Suggest a new way to think about the situation (without naming therapy techniques).
|
| 86 |
+
- End with a kind affirmation or journaling suggestion.
|
| 87 |
+
|
| 88 |
+
Be kind, short, helpful, and never mention psychology, CBT, or NLP.
|
| 89 |
+
"""
|
| 90 |
+
return call_groq(prompt, model=LLAMA_MODEL)
|
| 91 |
+
|
| 92 |
+
# ---------------------------
|
| 93 |
+
# Journaling Agent (Context-Aware, CBT/NLP Masked)
|
| 94 |
+
# ---------------------------
|
| 95 |
+
def get_mood_context():
|
| 96 |
+
if not os.path.exists("journal_log.json"):
|
| 97 |
+
return []
|
| 98 |
with open("journal_log.json") as f:
|
| 99 |
logs = json.load(f)
|
| 100 |
+
return [x["mood"] for x in logs[-3:]]
|
| 101 |
+
|
| 102 |
+
def run_journaling_pipeline(mood, entry, mode):
|
| 103 |
+
recent_moods = get_mood_context()
|
| 104 |
+
mood_summary = f"Recent moods: {', '.join(recent_moods)}." if recent_moods else "No mood history."
|
| 105 |
+
reflective_prompt = "What patterns have you noticed in how you've been feeling lately?" if recent_moods else "What stood out to you emotionally today?"
|
| 106 |
+
|
| 107 |
+
prompt = f"""
|
| 108 |
+
You're a reflective journaling assistant. Help the user explore their thoughts kindly and safely.
|
| 109 |
+
|
| 110 |
+
User wrote:
|
| 111 |
+
"{entry}"
|
| 112 |
+
Journaling Mode: {mode}
|
| 113 |
+
Mood: {mood}
|
| 114 |
+
{mood_summary}
|
| 115 |
+
Prompt: {reflective_prompt}
|
| 116 |
+
|
| 117 |
+
Instructions:
|
| 118 |
+
- Reflect what the user might be feeling or thinking.
|
| 119 |
+
- Suggest gentle rephrasing or ways to understand the situation better.
|
| 120 |
+
- End with a kind affirmation or open-ended journaling question.
|
| 121 |
+
- Do not mention therapy, CBT, NLP, psychology, or diagnosis.
|
| 122 |
+
|
| 123 |
+
Be gentle and warm.
|
| 124 |
+
"""
|
| 125 |
+
response = call_groq(prompt, model=LLAMA_MODEL)
|
| 126 |
+
return {
|
| 127 |
+
"entry": entry,
|
| 128 |
+
"mood": mood,
|
| 129 |
+
"mode": mode,
|
| 130 |
+
"response": response
|
| 131 |
}
|
|
|