pythonprincess commited on
Commit
9423397
·
verified ·
1 Parent(s): 6cf881c

Upload 6 files

Browse files
Files changed (6) hide show
  1. README.md +2 -11
  2. api_service.py +92 -0
  3. app.py +6 -0
  4. backend_pam.py +204 -0
  5. frontend_pam.py +108 -0
  6. requirements.txt +15 -0
README.md CHANGED
@@ -1,11 +1,2 @@
1
- ---
2
- title: PAM UmiNur
3
- emoji: 🌍
4
- colorFrom: yellow
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- short_description: PAM V1
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ # PAM-UmiNur
2
+ Private AI assistant powered by Hugging Face Inference API. Integrates frontend + backend secure logic for UmiNur health platform.
 
 
 
 
 
 
 
 
 
api_service.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # filename: api_service.py
2
+
3
+ from fastapi import FastAPI, HTTPException
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ from pydantic import BaseModel
6
+ from typing import Dict, Any, Optional
7
+
8
+ # Import the core logic and model loading functions
9
+ from backend_pam import load_agent as load_backend_agent, PAM
10
+ from frontend_pam import load_frontend_agent, FrontendPAM
11
+
12
+ # --- Global Agent Variables ---
13
+ backend_agent: Optional[PAM] = None
14
+ frontend_agent: Optional[FrontendPAM] = None
15
+
16
+ # --- Data Model for API Request ---
17
+ class UserInput(BaseModel):
18
+ user_input: str
19
+
20
+ # --- FastAPI Initialization ---
21
+ app = FastAPI(
22
+ title="PAM Unified Inference Service (Technical & Chat)",
23
+ version="2.0.0",
24
+ docs_url=None,
25
+ redoc_url=None
26
+ )
27
+
28
+ # --- CORS Setup ---
29
+ origins = [
30
+ "https://www.uminur.app",
31
+ "https://api.uminur.app",
32
+ ]
33
+
34
+ app.add_middleware(
35
+ CORSMiddleware,
36
+ allow_origins=origins,
37
+ allow_credentials=True,
38
+ allow_methods=["POST", "GET"],
39
+ allow_headers=["*"],
40
+ )
41
+
42
+ # --- Startup Event ---
43
+ @app.on_event("startup")
44
+ async def startup_event():
45
+ global backend_agent, frontend_agent
46
+ print("FastAPI: Starting up PAM agents...")
47
+
48
+ backend_agent = load_backend_agent()
49
+ frontend_agent = load_frontend_agent()
50
+
51
+ if not backend_agent or not frontend_agent:
52
+ print("FastAPI: ERROR — One or both agents failed to initialize.")
53
+
54
+ # --- Health Check ---
55
+ @app.get("/health", status_code=200, tags=["Status"])
56
+ def health_check():
57
+ backend_ok = backend_agent is not None
58
+ frontend_ok = frontend_agent is not None
59
+
60
+ if backend_ok and frontend_ok:
61
+ return {"status": "ok", "backend_ready": True, "frontend_ready": True}
62
+
63
+ raise HTTPException(
64
+ status_code=503,
65
+ detail=f"Service Unavailable: Backend Ready: {backend_ok}, Frontend Ready: {frontend_ok}"
66
+ )
67
+
68
+ # --- Technical Endpoint ---
69
+ @app.post("/ai/technical/", tags=["Technical"])
70
+ async def technical_endpoint(input_data: UserInput) -> Dict[str, Any]:
71
+ if backend_agent is None:
72
+ raise HTTPException(status_code=503, detail="Backend PAM is not initialized.")
73
+
74
+ try:
75
+ pam_reply = backend_agent.process_input(input_data.user_input)
76
+ return pam_reply
77
+ except Exception as e:
78
+ print(f"Error during technical inference: {e}")
79
+ raise HTTPException(status_code=500, detail="Internal Server Error (technical)")
80
+
81
+ # --- Chat Endpoint ---
82
+ @app.post("/ai/chat/", tags=["Chat"])
83
+ async def chat_endpoint(input_data: UserInput) -> Dict[str, Any]:
84
+ if frontend_agent is None:
85
+ raise HTTPException(status_code=503, detail="Frontend PAM is not initialized.")
86
+
87
+ try:
88
+ pam_reply = frontend_agent.respond(input_data.user_input)
89
+ return pam_reply
90
+ except Exception as e:
91
+ print(f"Error during chat inference: {e}")
92
+ raise HTTPException(status_code=500, detail="Internal Server Error (chat)")
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # app.py
2
+ import uvicorn
3
+ from api_service import app
4
+
5
+ if __name__ == "__main__":
6
+ uvicorn.run(app, host="0.0.0.0", port=7860)
backend_pam.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # filename: backend_pam.py (UPDATED FOR INFERENCE API)
2
+
3
+ from transformers import pipeline, HuggingFaceHub
4
+ from datetime import datetime
5
+ from typing import Dict, Any, Optional
6
+ import json
7
+ import os
8
+
9
+ # --- Constants for Data Paths ---
10
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
11
+ DATA_DIR = os.path.join(BASE_DIR, "data")
12
+ LOGS_FILE = os.path.join(DATA_DIR, "logs.json")
13
+ COMPLIANCE_FILE = os.path.join(DATA_DIR, "compliance.json")
14
+
15
+ # --- Global Storage for Loaded Components ---
16
+ LOADED_MODELS = None
17
+ LOADED_DATA = None
18
+
19
+ # --- Data Loading Helper ---
20
+ def load_json(filepath: str) -> Dict[str, Any]:
21
+ try:
22
+ with open(filepath, 'r') as f:
23
+ return json.load(f)
24
+ except FileNotFoundError:
25
+ print(f"CRITICAL: Data file not found at: {filepath}")
26
+ return {}
27
+ except json.JSONDecodeError as e:
28
+ print(f"CRITICAL: Failed to decode JSON from {filepath}: {e}")
29
+ return {}
30
+
31
+ # --- Agent Initialization ---
32
+ def load_agent() -> 'PAM':
33
+ global LOADED_MODELS, LOADED_DATA
34
+
35
+ if LOADED_MODELS is not None:
36
+ print("PAM agent already loaded. Skipping reinit.")
37
+ return PAM(LOADED_MODELS, LOADED_DATA)
38
+
39
+ print("Loading PAM technical assistant models from Hugging Face Inference API...")
40
+
41
+ HUGGING_FACE_TOKEN = os.getenv("HF_READ_TOKEN")
42
+ if not HUGGING_FACE_TOKEN:
43
+ print("FATAL: HF_READ_TOKEN not set in environment. PAM will fail to load.")
44
+
45
+ try:
46
+ models = {
47
+ "phi_ner": pipeline(
48
+ "ner",
49
+ model=HuggingFaceHub(repo_id="Jean-Baptiste/roberta-large-ner-english", token=HUGGING_FACE_TOKEN),
50
+ aggregation_strategy="simple"
51
+ ),
52
+ "log_ner": pipeline(
53
+ "ner",
54
+ model=HuggingFaceHub(repo_id="dslim/bert-base-NER", token=HUGGING_FACE_TOKEN),
55
+ aggregation_strategy="simple"
56
+ ),
57
+ "summarizer": pipeline(
58
+ "summarization",
59
+ model=HuggingFaceHub(repo_id="google/flan-t5-large", token=HUGGING_FACE_TOKEN)
60
+ )
61
+ }
62
+ print("✅ All PAM models loaded via Hugging Face Inference API.")
63
+ LOADED_MODELS = models
64
+ except Exception as e:
65
+ print(f"FATAL: Could not load inference models. {e}")
66
+ LOADED_MODELS = None
67
+
68
+ data = {
69
+ "LOGS": load_json(LOGS_FILE),
70
+ "COMPLIANCE": load_json(COMPLIANCE_FILE)
71
+ }
72
+
73
+ if not data["LOGS"] or not data["COMPLIANCE"]:
74
+ print("❌ WARNING: Log or compliance data failed to load.")
75
+ else:
76
+ print("✅ Log & compliance data loaded.")
77
+
78
+ LOADED_DATA = data
79
+ return PAM(LOADED_MODELS, LOADED_DATA)
80
+
81
+ # --- Helper: classify severity ---
82
+ def classify_severity(entry: str) -> str:
83
+ entry_lower = entry.lower()
84
+ if any(x in entry_lower for x in ["unauthorized", "failed", "attack", "port scanning", "unavailable"]):
85
+ return "CRITICAL"
86
+ elif any(x in entry_lower for x in ["warning", "unexpected", "outside working hours"]):
87
+ return "WARNING"
88
+ else:
89
+ return "INFO"
90
+
91
+ # --- PAM Role ---
92
+ PAM_ROLE = (
93
+ "I am PAM, your technical assistant and infrastructure watchdog. "
94
+ "I summarize logs, detect risks, and support developers with clarity. "
95
+ "I flag anomalies, monitor compliance, and hand off client-facing issues when needed. "
96
+ "I never act on my own, always permission first, protocol always."
97
+ )
98
+
99
+ # --- Backend PAM Class ---
100
+ class PAM:
101
+ def __init__(self, models: Optional[Dict[str, Any]], data: Dict[str, Dict]):
102
+ self.phi_detector = models.get("phi_ner") if models else None
103
+ self.log_parser = models.get("log_ner") if models else None
104
+ self.summarizer = models.get("summarizer") if models else None
105
+ self.LOGS = data.get("LOGS", {})
106
+ self.COMPLIANCE = data.get("COMPLIANCE", {})
107
+
108
+ def _check_activation(self, text: str) -> Optional[str]:
109
+ if not self.phi_detector:
110
+ return "Fatal Error: PAM models failed to load on startup. 🛠️"
111
+ return None
112
+
113
+ def detect_phi(self, text: str) -> Dict[str, Any]:
114
+ error = self._check_activation(text)
115
+ if error:
116
+ return {"message": error, "role": PAM_ROLE}
117
+
118
+ entities = self.phi_detector(text)
119
+ phi = [e for e in entities if e["entity_group"] in ["PER", "LOC", "ORG", "DATE"]]
120
+ return {
121
+ "message": "🔒 Scanning for PHI...",
122
+ "role": PAM_ROLE,
123
+ "has_phi": len(phi) > 0,
124
+ "entities": phi
125
+ }
126
+
127
+ def parse_log(self, log_text: str) -> Dict[str, Any]:
128
+ error = self._check_activation(log_text)
129
+ if error:
130
+ return {"message": error, "role": PAM_ROLE}
131
+
132
+ parsed = self.log_parser(log_text)
133
+ return {
134
+ "message": "🕵🏽‍♀️ Parsing log entry...",
135
+ "role": PAM_ROLE,
136
+ "log_entities": parsed
137
+ }
138
+
139
+ def summarize(self, raw_text: str) -> Dict[str, Any]:
140
+ error = self._check_activation(raw_text)
141
+ if error:
142
+ return {"message": error, "role": PAM_ROLE}
143
+
144
+ result = self.summarizer(raw_text[:1024], max_length=150, min_length=30, do_sample=False)
145
+ return {
146
+ "message": "📊 Summary generated:",
147
+ "role": PAM_ROLE,
148
+ "summary": result[0]["summary_text"]
149
+ }
150
+
151
+ def get_latest_logs(self) -> Dict[str, Any]:
152
+ if "latest_logs" not in self.LOGS:
153
+ return {"message": "No logs available. Check JSON file location and contents.", "role": PAM_ROLE}
154
+
155
+ full_logset = []
156
+ client_handoffs = []
157
+ for item in self.LOGS["latest_logs"]:
158
+ entry = item.get("entry", "")
159
+ time = item.get("timestamp", "Unknown time")
160
+ severity = classify_severity(entry)
161
+ formatted = f"[{time}] ({severity}) -> {entry}"
162
+ full_logset.append(formatted)
163
+
164
+ if "frontend" in entry.lower() or "provider unavailable" in entry.lower():
165
+ client_handoffs.append(formatted)
166
+
167
+ return {
168
+ "message": "📡 Infrastructure Log Review:",
169
+ "role": PAM_ROLE,
170
+ "logs": full_logset,
171
+ "handoff_to_frontend": client_handoffs
172
+ }
173
+
174
+ def check_compliance(self) -> Dict[str, Any]:
175
+ report = []
176
+ for item, status in self.COMPLIANCE.items():
177
+ emoji = "✅" if status else "❌"
178
+ report.append(f"{item.replace('_', ' ').title()}: {emoji}")
179
+ return {
180
+ "message": "🛡️ Compliance Status Overview:",
181
+ "role": PAM_ROLE,
182
+ "compliance_report": report
183
+ }
184
+
185
+ def process_input(self, user_input: str) -> Dict[str, Any]:
186
+ u_input = user_input.lower().strip()
187
+
188
+ if "check compliance" in u_input:
189
+ return self.check_compliance()
190
+ if "get logs" in u_input or "latest logs" in u_input:
191
+ return self.get_latest_logs()
192
+ if "detect phi in" in u_input:
193
+ text_to_scan = user_input[u_input.find("detect phi in") + len("detect phi in"):].strip()
194
+ return self.detect_phi(text_to_scan)
195
+ if "parse log" in u_input:
196
+ log_to_parse = user_input[u_input.find("parse log") + len("parse log"):].strip()
197
+ return self.parse_log(log_to_parse)
198
+ if "summarize" in u_input or "explain" in u_input:
199
+ return self.summarize(user_input)
200
+
201
+ return {
202
+ "response": f"Hello! I am PAM. I can process your request: '{user_input}'. Try commands like 'check compliance', 'get logs', 'detect phi in [text]', or 'parse log [log text]'.",
203
+ "role": PAM_ROLE
204
+ }
frontend_pam.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # filename: frontend_pam.py (UPDATED FOR HF INFERENCE API)
2
+
3
+ import os
4
+ import json
5
+ import random
6
+ import requests
7
+ from datetime import datetime
8
+ from typing import Dict, Any, Optional
9
+
10
+ # --- Constants for Data Paths ---
11
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
12
+ DATA_DIR = os.path.join(BASE_DIR, "data")
13
+
14
+ APPOINTMENTS_FILE = os.path.join(DATA_DIR, "appointments.json")
15
+ RESOURCES_FILE = os.path.join(DATA_DIR, "resources.json")
16
+ FOLLOW_UP_FILE = os.path.join(DATA_DIR, "follow_up.json")
17
+ PERMISSIONS_FILE = os.path.join(DATA_DIR, "permissions.json")
18
+
19
+ # --- HuggingFace Inference API Setup ---
20
+ HF_API_TOKEN = os.getenv("HF_READ_TOKEN")
21
+ HF_HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"}
22
+ HF_ENDPOINTS = {
23
+ "intent": "https://api-inference.huggingface.co/models/typeform/distilbert-base-uncased-mnli",
24
+ "sentiment": "https://api-inference.huggingface.co/models/distilbert-base-uncased-finetuned-sst-2-english",
25
+ "summarizer": "https://api-inference.huggingface.co/models/google/flan-t5-large",
26
+ "chat": "https://api-inference.huggingface.co/models/tiiuae/falcon-7b-instruct"
27
+ }
28
+
29
+ # --- Load JSON Helper ---
30
+ def load_json(filepath: str) -> Dict[str, Any]:
31
+ try:
32
+ with open(filepath, 'r') as f:
33
+ return json.load(f)
34
+ except FileNotFoundError:
35
+ print(f"CRITICAL: Data file not found at: {filepath}")
36
+ return {}
37
+ except json.JSONDecodeError as e:
38
+ print(f"CRITICAL: Failed to decode JSON from {filepath}: {e}")
39
+ return {}
40
+
41
+ # --- Inference API Call Helper ---
42
+ def hf_infer(task: str, payload: Any) -> Any:
43
+ url = HF_ENDPOINTS.get(task)
44
+ if not url:
45
+ return {"error": f"Invalid task: {task}"}
46
+ response = requests.post(url, headers=HF_HEADERS, json=payload)
47
+ if response.status_code != 200:
48
+ return {"error": f"HF API Error ({response.status_code})", "details": response.text}
49
+ return response.json()
50
+
51
+ # --- Agent Initialization ---
52
+ def load_frontend_agent() -> 'FrontendPAM':
53
+ print("Initializing Frontend PAM using HF Inference API...")
54
+ data = {
55
+ "APPOINTMENTS": load_json(APPOINTMENTS_FILE),
56
+ "RESOURCES": load_json(RESOURCES_FILE),
57
+ "FOLLOW_UP": load_json(FOLLOW_UP_FILE),
58
+ "PERMISSIONS": load_json(PERMISSIONS_FILE)
59
+ }
60
+ return FrontendPAM(data)
61
+
62
+ # --- PAM Personality ---
63
+ PAM_TONE = ("I am PAM — the cool older sister... [Full description abridged]")
64
+ GREETINGS = ["babe", "love", "friend", "girl", "boo", "sweetheart"]
65
+
66
+ # --- Agent Class ---
67
+ class FrontendPAM:
68
+ def __init__(self, data: Dict[str, Dict]):
69
+ self.APPOINTMENTS = data.get("APPOINTMENTS", {})
70
+ self.PERMISSIONS = data.get("PERMISSIONS", {})
71
+ self.RESOURCES = data.get("RESOURCES", {})
72
+ self.FOLLOW_UP = data.get("FOLLOW_UP", {})
73
+ self.user_id = "user_001"
74
+
75
+ def respond(self, user_text: str, backend_brief: Optional[str] = None) -> Dict[str, Any]:
76
+ if not user_text.lower().startswith("hey pam"):
77
+ return {"reply": "Oops, you didn’t say 'Hey Pam'. I only respond to respectful greetings — you know how I am. 💅"}
78
+
79
+ text = user_text.replace("PAM", "you").replace("pam", "you")
80
+ detected_intent = hf_infer("intent", {"inputs": text})[0].get("label", "unknown")
81
+ sentiment_result = hf_infer("sentiment", {"inputs": text})[0]
82
+ backend_summary = backend_brief or "No backend input provided."
83
+ greeting = random.choice(GREETINGS)
84
+
85
+ for term, allowed in self.PERMISSIONS.items():
86
+ if term in text.lower() and not allowed:
87
+ return {"reply": f"That’s something I’m not allowed to help with directly, {greeting}. But I can connect you to a safe resource or provider if you’d like."}
88
+
89
+ if "appointment" in text.lower():
90
+ appt = self.APPOINTMENTS.get(self.user_id)
91
+ if appt:
92
+ return {"reply": f"Hey {greeting}, you've got a {appt['type']} scheduled..."}
93
+ else:
94
+ return {"reply": f"Hey {greeting}, I don’t have any appointments saved..."}
95
+
96
+ elif any(keyword in text.lower() for keyword in ["cramp", "discharge", "bleed", "smell", "spotting", "fatigue", "mood", "missed"]):
97
+ return {"reply": f"Okay {greeting}, I pulled a few resources I think will help..."}
98
+
99
+ else:
100
+ prompt = f"{PAM_TONE}\n... User said: {text}\nPAM:"
101
+ chat_output = hf_infer("chat", {"inputs": prompt})
102
+ reply = chat_output[0].get("generated_text", "Sorry love, I didn’t catch that. Try again?").split("PAM:")[-1].strip()
103
+ return {
104
+ "intent": detected_intent,
105
+ "sentiment": sentiment_result,
106
+ "summary": backend_summary,
107
+ "reply": reply
108
+ }
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # filename: requirements.txt
2
+
3
+ # Web Server / API Framework
4
+ fastapi>=0.104.0
5
+ uvicorn[standard]>=0.23.2
6
+ gunicorn>=21.2.0
7
+ pydantic>=2.4.2
8
+ python-multipart>=0.0.6
9
+ starlette>=0.35.1
10
+
11
+ # AI/ML/Data Science Libraries (Used in backend_pam.py and frontend_pam.py)
12
+ transformers>=4.34.0
13
+ torch>=2.1.0
14
+ boto3>=1.28.69
15
+ # Add any other libraries required by your agents (e.g., specific AWS SDKs, scikit-learn, etc.)