Spaces:
Paused
Paused
Delete frontend_pam.py
Browse files- frontend_pam.py +0 -108
frontend_pam.py
DELETED
|
@@ -1,108 +0,0 @@
|
|
| 1 |
-
# filename: frontend_pam.py (UPDATED FOR HF INFERENCE API)
|
| 2 |
-
|
| 3 |
-
import os
|
| 4 |
-
import json
|
| 5 |
-
import random
|
| 6 |
-
import requests
|
| 7 |
-
from datetime import datetime
|
| 8 |
-
from typing import Dict, Any, Optional
|
| 9 |
-
|
| 10 |
-
# --- Constants for Data Paths ---
|
| 11 |
-
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 12 |
-
DATA_DIR = os.path.join(BASE_DIR, "data")
|
| 13 |
-
|
| 14 |
-
APPOINTMENTS_FILE = os.path.join(DATA_DIR, "appointments.json")
|
| 15 |
-
RESOURCES_FILE = os.path.join(DATA_DIR, "resources.json")
|
| 16 |
-
FOLLOW_UP_FILE = os.path.join(DATA_DIR, "follow_up.json")
|
| 17 |
-
PERMISSIONS_FILE = os.path.join(DATA_DIR, "permissions.json")
|
| 18 |
-
|
| 19 |
-
# --- HuggingFace Inference API Setup ---
|
| 20 |
-
HF_API_TOKEN = os.getenv("HF_READ_TOKEN")
|
| 21 |
-
HF_HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"}
|
| 22 |
-
HF_ENDPOINTS = {
|
| 23 |
-
"intent": "https://api-inference.huggingface.co/models/typeform/distilbert-base-uncased-mnli",
|
| 24 |
-
"sentiment": "https://api-inference.huggingface.co/models/distilbert-base-uncased-finetuned-sst-2-english",
|
| 25 |
-
"summarizer": "https://api-inference.huggingface.co/models/google/flan-t5-large",
|
| 26 |
-
"chat": "https://api-inference.huggingface.co/models/tiiuae/falcon-7b-instruct"
|
| 27 |
-
}
|
| 28 |
-
|
| 29 |
-
# --- Load JSON Helper ---
|
| 30 |
-
def load_json(filepath: str) -> Dict[str, Any]:
|
| 31 |
-
try:
|
| 32 |
-
with open(filepath, 'r') as f:
|
| 33 |
-
return json.load(f)
|
| 34 |
-
except FileNotFoundError:
|
| 35 |
-
print(f"CRITICAL: Data file not found at: {filepath}")
|
| 36 |
-
return {}
|
| 37 |
-
except json.JSONDecodeError as e:
|
| 38 |
-
print(f"CRITICAL: Failed to decode JSON from {filepath}: {e}")
|
| 39 |
-
return {}
|
| 40 |
-
|
| 41 |
-
# --- Inference API Call Helper ---
|
| 42 |
-
def hf_infer(task: str, payload: Any) -> Any:
|
| 43 |
-
url = HF_ENDPOINTS.get(task)
|
| 44 |
-
if not url:
|
| 45 |
-
return {"error": f"Invalid task: {task}"}
|
| 46 |
-
response = requests.post(url, headers=HF_HEADERS, json=payload)
|
| 47 |
-
if response.status_code != 200:
|
| 48 |
-
return {"error": f"HF API Error ({response.status_code})", "details": response.text}
|
| 49 |
-
return response.json()
|
| 50 |
-
|
| 51 |
-
# --- Agent Initialization ---
|
| 52 |
-
def load_frontend_agent() -> 'FrontendPAM':
|
| 53 |
-
print("Initializing Frontend PAM using HF Inference API...")
|
| 54 |
-
data = {
|
| 55 |
-
"APPOINTMENTS": load_json(APPOINTMENTS_FILE),
|
| 56 |
-
"RESOURCES": load_json(RESOURCES_FILE),
|
| 57 |
-
"FOLLOW_UP": load_json(FOLLOW_UP_FILE),
|
| 58 |
-
"PERMISSIONS": load_json(PERMISSIONS_FILE)
|
| 59 |
-
}
|
| 60 |
-
return FrontendPAM(data)
|
| 61 |
-
|
| 62 |
-
# --- PAM Personality ---
|
| 63 |
-
PAM_TONE = ("I am PAM — the cool older sister... [Full description abridged]")
|
| 64 |
-
GREETINGS = ["babe", "love", "friend", "girl", "boo", "sweetheart"]
|
| 65 |
-
|
| 66 |
-
# --- Agent Class ---
|
| 67 |
-
class FrontendPAM:
|
| 68 |
-
def __init__(self, data: Dict[str, Dict]):
|
| 69 |
-
self.APPOINTMENTS = data.get("APPOINTMENTS", {})
|
| 70 |
-
self.PERMISSIONS = data.get("PERMISSIONS", {})
|
| 71 |
-
self.RESOURCES = data.get("RESOURCES", {})
|
| 72 |
-
self.FOLLOW_UP = data.get("FOLLOW_UP", {})
|
| 73 |
-
self.user_id = "user_001"
|
| 74 |
-
|
| 75 |
-
def respond(self, user_text: str, backend_brief: Optional[str] = None) -> Dict[str, Any]:
|
| 76 |
-
if not user_text.lower().startswith("hey pam"):
|
| 77 |
-
return {"reply": "Oops, you didn’t say 'Hey Pam'. I only respond to respectful greetings — you know how I am. 💅"}
|
| 78 |
-
|
| 79 |
-
text = user_text.replace("PAM", "you").replace("pam", "you")
|
| 80 |
-
detected_intent = hf_infer("intent", {"inputs": text})[0].get("label", "unknown")
|
| 81 |
-
sentiment_result = hf_infer("sentiment", {"inputs": text})[0]
|
| 82 |
-
backend_summary = backend_brief or "No backend input provided."
|
| 83 |
-
greeting = random.choice(GREETINGS)
|
| 84 |
-
|
| 85 |
-
for term, allowed in self.PERMISSIONS.items():
|
| 86 |
-
if term in text.lower() and not allowed:
|
| 87 |
-
return {"reply": f"That’s something I’m not allowed to help with directly, {greeting}. But I can connect you to a safe resource or provider if you’d like."}
|
| 88 |
-
|
| 89 |
-
if "appointment" in text.lower():
|
| 90 |
-
appt = self.APPOINTMENTS.get(self.user_id)
|
| 91 |
-
if appt:
|
| 92 |
-
return {"reply": f"Hey {greeting}, you've got a {appt['type']} scheduled..."}
|
| 93 |
-
else:
|
| 94 |
-
return {"reply": f"Hey {greeting}, I don’t have any appointments saved..."}
|
| 95 |
-
|
| 96 |
-
elif any(keyword in text.lower() for keyword in ["cramp", "discharge", "bleed", "smell", "spotting", "fatigue", "mood", "missed"]):
|
| 97 |
-
return {"reply": f"Okay {greeting}, I pulled a few resources I think will help..."}
|
| 98 |
-
|
| 99 |
-
else:
|
| 100 |
-
prompt = f"{PAM_TONE}\n... User said: {text}\nPAM:"
|
| 101 |
-
chat_output = hf_infer("chat", {"inputs": prompt})
|
| 102 |
-
reply = chat_output[0].get("generated_text", "Sorry love, I didn’t catch that. Try again?").split("PAM:")[-1].strip()
|
| 103 |
-
return {
|
| 104 |
-
"intent": detected_intent,
|
| 105 |
-
"sentiment": sentiment_result,
|
| 106 |
-
"summary": backend_summary,
|
| 107 |
-
"reply": reply
|
| 108 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|