JerameeUC commited on
Commit
8bbe87c
·
1 Parent(s): d6ddab6

1st Commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .env.sample +8 -0
  2. Makefile +11 -0
  3. README.md +42 -1
  4. agenticcore/__init__.py +1 -0
  5. agenticcore/chatbot/__init__.py +1 -0
  6. agenticcore/chatbot/services.py +103 -0
  7. agenticcore/cli.py +187 -0
  8. agenticcore/providers_unified.py +274 -0
  9. anon_bot/handler.py +3 -0
  10. anon_bot/rules.py +1 -0
  11. app/app.py +139 -0
  12. app/assets/html/agenticcore_frontend.html +200 -0
  13. app/assets/html/chat.html +56 -0
  14. app/assets/html/chat_console.html +77 -0
  15. app/assets/html/chat_minimal.html +89 -0
  16. app/routes.py +1 -0
  17. core/config.py +4 -0
  18. core/logging.py +1 -0
  19. core/types.py +1 -0
  20. docs/DEV_DOC.md +97 -0
  21. docs/architecture.md +2 -0
  22. docs/design.md +2 -0
  23. docs/flowchart.png +0 -0
  24. docs/results.md +2 -0
  25. examples/example.py +9 -0
  26. guardrails/pii_redaction.py +3 -0
  27. guardrails/safety.py +1 -0
  28. integrations/azure/bot_framework.py +2 -0
  29. integrations/botframework/app.py +138 -0
  30. integrations/botframework/bot.py +86 -0
  31. integrations/botframework/bots/echo_bot.py +57 -0
  32. integrations/email/ticket_stub.py +2 -0
  33. integrations/web/fastapi/web_agentic.py +22 -0
  34. logged_in_bot/handler.py +20 -0
  35. logged_in_bot/sentiment_azure.py +1 -0
  36. logged_in_bot/tools.py +1 -0
  37. memory/rag/indexer.py +1 -0
  38. memory/rag/retriever.py +1 -0
  39. memory/sessions.py +1 -0
  40. memory/store.py +3 -0
  41. nlu/pipeline.py +3 -0
  42. nlu/prompts.py +1 -0
  43. nlu/router.py +1 -0
  44. notebooks/ChatbotIntegration.ipynb +559 -0
  45. notebooks/SimpleTraditionalChatbot.ipynb +522 -0
  46. pyproject.toml +10 -0
  47. requirements.txt +15 -0
  48. samples/service.py +58 -0
  49. scripts/check_compliance.py +3 -0
  50. scripts/run_local.sh +5 -0
.env.sample ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # .env.sample
2
+ # Feature toggles
3
+ AZURE_ENABLED=false
4
+ SENTIMENT_ENABLED=false
5
+ DB_URL=memory://
6
+ # Azure (optional)
7
+ AZURE_TEXT_ANALYTICS_ENDPOINT=
8
+ AZURE_TEXT_ANALYTICS_KEY=
Makefile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: dev test run seed check
2
+ dev:
3
+ pip install -r requirements.txt
4
+ test:
5
+ pytest -q
6
+ run:
7
+ export PYTHONPATH=. && python -c "from storefront_chatbot.app.app import build; build().launch(server_name='0.0.0.0', server_port=7860)"
8
+ seed:
9
+ python storefront_chatbot/scripts/seed_data.py
10
+ check:
11
+ python storefront_chatbot/scripts/check_compliance.py
README.md CHANGED
@@ -1,2 +1,43 @@
 
1
  # Agentic-Chat-bot-
2
- Agentic Chat-bot with RAG, Memory, and Privacy Considerations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- README.md -->
2
  # Agentic-Chat-bot-
3
+ Agentic Chat-bot with RAG, Memory, and Privacy Considerations.
4
+
5
+ # Storefront Chatbot
6
+
7
+ This repo follows a modular layout with a Gradio UI, NLU pipeline, anonymous and logged-in flows,
8
+ guardrails, and optional Azure sentiment.
9
+
10
+ ## Quickstart
11
+ ```bash
12
+ make dev
13
+ make run
14
+ # open http://localhost:7860
15
+ ```
16
+
17
+ ## Agentic Integration
18
+ - Core bot: `agenticcore/chatbot/services.py`
19
+ - Providers: `agenticcore/providers_unified.py`
20
+ - CLI: `python -m agenticcore.cli agentic "hello"` (loads .env)
21
+ - FastAPI demo: `uvicorn integrations.web.fastapi.web_agentic:app --reload`
22
+
23
+ ## Added Samples & Tests
24
+ - chat.html → `app/assets/html/chat.html`
25
+ - echo_bot.py → `integrations/botframework/bots/echo_bot.py`
26
+ - ChatbotIntegration.ipynb → `notebooks/ChatbotIntegration.ipynb`
27
+ - SimpleTraditionalChatbot.ipynb → `notebooks/SimpleTraditionalChatbot.ipynb`
28
+ - smoke_test.py → `tests/smoke_test.py`
29
+ - test_routes.py → `tests/test_routes.py`
30
+ - quick_sanity.py → `tools/quick_sanity.py`
31
+ - example.py → `examples/example.py`
32
+ - service.py → `samples/service.py`
33
+ - DEV_DOC.md → `docs/DEV_DOC.md`
34
+
35
+ Run `pytest -q` for tests; open HTML in `app/assets/html/` to try local UIs.
36
+
37
+
38
+ ---
39
+ This is the **unified** storefront-chatbot bundle.
40
+ Duplicates from earlier skeletons were removed; priority order was:
41
+ 1) storefront_chatbot_final_bundle
42
+ 2) storefront_chatbot_merged_with_agentic
43
+ 3) storefront_chatbot_skeleton
agenticcore/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # package
agenticcore/chatbot/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # package
agenticcore/chatbot/services.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /agenticcore/chatbot/services.py
2
+ from __future__ import annotations
3
+
4
+ import json
5
+ import os
6
+ from dataclasses import dataclass
7
+ from typing import Dict
8
+
9
+ # Delegate sentiment to the unified provider layer
10
+ # If you put providers_unified.py under agenticcore/chatbot/, change the import to:
11
+ # from agenticcore.chatbot.providers_unified import analyze_sentiment
12
+ from agenticcore.providers_unified import analyze_sentiment
13
+ from ..providers_unified import analyze_sentiment
14
+
15
+
16
+ def _trim(s: str, max_len: int = 2000) -> str:
17
+ s = (s or "").strip()
18
+ return s if len(s) <= max_len else s[: max_len - 1] + "…"
19
+
20
+
21
+ @dataclass(frozen=True)
22
+ class SentimentResult:
23
+ label: str # "positive" | "neutral" | "negative" | "mixed" | "unknown"
24
+ confidence: float # 0.0 .. 1.0
25
+
26
+
27
+ class ChatBot:
28
+ """
29
+ Minimal chatbot that uses provider-agnostic sentiment via providers_unified.
30
+ Public API:
31
+ - reply(text: str) -> Dict[str, object]
32
+ - capabilities() -> Dict[str, object]
33
+ """
34
+
35
+ def __init__(self, system_prompt: str = "You are a concise helper.") -> None:
36
+ self._system_prompt = _trim(system_prompt, 800)
37
+ # Expose which provider is intended/active (for diagnostics)
38
+ self._mode = os.getenv("AI_PROVIDER") or "auto"
39
+
40
+ def capabilities(self) -> Dict[str, object]:
41
+ """List what this bot can do."""
42
+ return {
43
+ "system": "chatbot",
44
+ "mode": self._mode, # "auto" or a pinned provider (hf/azure/openai/cohere/deepai/offline)
45
+ "features": ["text-input", "sentiment-analysis", "help"],
46
+ "commands": {"help": "Describe capabilities and usage."},
47
+ }
48
+
49
+ def reply(self, text: str) -> Dict[str, object]:
50
+ """Produce a reply and sentiment for one user message."""
51
+ user = _trim(text)
52
+ if not user:
53
+ return self._make_response(
54
+ "I didn't catch that. Please provide some text.",
55
+ SentimentResult("unknown", 0.0),
56
+ )
57
+
58
+ if user.lower() in {"help", "/help"}:
59
+ return {"reply": self._format_help(), "capabilities": self.capabilities()}
60
+
61
+ s = analyze_sentiment(user) # -> {"provider", "label", "score", ...}
62
+ sr = SentimentResult(label=str(s.get("label", "neutral")), confidence=float(s.get("score", 0.5)))
63
+ return self._make_response(self._compose(sr), sr)
64
+
65
+ # ---- internals ----
66
+
67
+ def _format_help(self) -> str:
68
+ caps = self.capabilities()
69
+ feats = ", ".join(caps["features"])
70
+ return f"I can analyze sentiment and respond concisely. Features: {feats}. Send any text or type 'help'."
71
+
72
+ @staticmethod
73
+ def _make_response(reply: str, s: SentimentResult) -> Dict[str, object]:
74
+ return {"reply": reply, "sentiment": s.label, "confidence": round(float(s.confidence), 2)}
75
+
76
+ @staticmethod
77
+ def _compose(s: SentimentResult) -> str:
78
+ if s.label == "positive":
79
+ return "Thanks for sharing. I detected a positive sentiment."
80
+ if s.label == "negative":
81
+ return "I hear your concern. I detected a negative sentiment."
82
+ if s.label == "neutral":
83
+ return "Noted. The sentiment appears neutral."
84
+ if s.label == "mixed":
85
+ return "Your message has mixed signals. Can you clarify?"
86
+ return "I could not determine the sentiment. Please rephrase."
87
+
88
+
89
+ # Optional: local REPL for quick manual testing
90
+ def _interactive_loop() -> None:
91
+ bot = ChatBot()
92
+ try:
93
+ while True:
94
+ msg = input("> ").strip()
95
+ if msg.lower() in {"exit", "quit"}:
96
+ break
97
+ print(json.dumps(bot.reply(msg), ensure_ascii=False))
98
+ except (EOFError, KeyboardInterrupt):
99
+ pass
100
+
101
+
102
+ if __name__ == "__main__":
103
+ _interactive_loop()
agenticcore/cli.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /agenticcore/cli.py
2
+ """
3
+ agenticcore.cli
4
+ Console entrypoints:
5
+ - agentic: send a message to ChatBot and print reply JSON
6
+ - repo-tree: print a filtered tree view (uses tree.txt if present)
7
+ - repo-flatten: flatten code listing to stdout (uses FLATTENED_CODE.txt if present)
8
+ """
9
+ import argparse, json, sys, traceback
10
+ from pathlib import Path
11
+ from dotenv import load_dotenv
12
+ import os
13
+
14
+ # Load .env variables into os.environ (project root .env by default)
15
+ load_dotenv()
16
+
17
+
18
+ def cmd_agentic(argv=None):
19
+ # Lazy import so other commands don't require ChatBot to be importable
20
+ from agenticcore.chatbot.services import ChatBot
21
+ # We call analyze_sentiment only for 'status' to reveal the actual chosen provider
22
+ try:
23
+ from agenticcore.providers_unified import analyze_sentiment
24
+ except Exception:
25
+ analyze_sentiment = None # still fine; we'll show mode only
26
+
27
+ p = argparse.ArgumentParser(prog="agentic", description="Chat with AgenticCore ChatBot")
28
+ p.add_argument("message", nargs="*", help="Message to send")
29
+ p.add_argument("--debug", action="store_true", help="Print debug info")
30
+ args = p.parse_args(argv)
31
+ msg = " ".join(args.message).strip() or "hello"
32
+
33
+ if args.debug:
34
+ print(f"DEBUG argv={sys.argv}", flush=True)
35
+ print(f"DEBUG raw message='{msg}'", flush=True)
36
+
37
+ bot = ChatBot()
38
+
39
+ # Special commands for testing / assignments
40
+ # Special commands for testing / assignments
41
+ if msg.lower() == "status":
42
+ import requests # local import to avoid hard dep for other commands
43
+
44
+ # Try a lightweight provider probe via analyze_sentiment
45
+ provider = None
46
+ if analyze_sentiment is not None:
47
+ try:
48
+ probe = analyze_sentiment("status ping")
49
+ provider = (probe or {}).get("provider")
50
+ except Exception:
51
+ if args.debug:
52
+ traceback.print_exc()
53
+
54
+ # Hugging Face whoami auth probe
55
+ tok = os.getenv("HF_API_KEY", "")
56
+ who = None
57
+ auth_ok = False
58
+ err = None
59
+ try:
60
+ if tok:
61
+ r = requests.get(
62
+ "https://huggingface.co/api/whoami-v2",
63
+ headers={"Authorization": f"Bearer {tok}"},
64
+ timeout=15,
65
+ )
66
+ auth_ok = (r.status_code == 200)
67
+ who = r.json() if auth_ok else None
68
+ if not auth_ok:
69
+ err = r.text # e.g., {"error":"Invalid credentials in Authorization header"}
70
+ else:
71
+ err = "HF_API_KEY not set (load .env or export it)"
72
+ except Exception as e:
73
+ err = str(e)
74
+
75
+ # Extract fine-grained scopes for visibility
76
+ fg = (((who or {}).get("auth") or {}).get("accessToken") or {}).get("fineGrained") or {}
77
+ scoped = fg.get("scoped") or []
78
+ global_scopes = fg.get("global") or []
79
+
80
+ # ---- tiny inference ping (proves 'Make calls to Inference Providers') ----
81
+ infer_ok, infer_err = False, None
82
+ try:
83
+ if tok:
84
+ model = os.getenv(
85
+ "HF_MODEL_SENTIMENT",
86
+ "distilbert-base-uncased-finetuned-sst-2-english"
87
+ )
88
+ r2 = requests.post(
89
+ f"https://api-inference.huggingface.co/models/{model}",
90
+ headers={"Authorization": f"Bearer {tok}", "x-wait-for-model": "true"},
91
+ json={"inputs": "ping"},
92
+ timeout=int(os.getenv("HTTP_TIMEOUT", "60")),
93
+ )
94
+ infer_ok = (r2.status_code == 200)
95
+ if not infer_ok:
96
+ infer_err = f"HTTP {r2.status_code}: {r2.text}"
97
+ except Exception as e:
98
+ infer_err = str(e)
99
+ # -------------------------------------------------------------------------
100
+
101
+ # Mask + length to verify what .env provided
102
+ mask = (tok[:3] + "..." + tok[-4:]) if tok else None
103
+ out = {
104
+ "provider": provider or "unknown",
105
+ "mode": getattr(bot, "_mode", "auto"),
106
+ "auth_ok": auth_ok,
107
+ "whoami": who,
108
+ "token_scopes": { # <--- added
109
+ "global": global_scopes,
110
+ "scoped": scoped,
111
+ },
112
+ "inference_ok": infer_ok,
113
+ "inference_error": infer_err,
114
+ "env": {
115
+ "HF_API_KEY_len": len(tok) if tok else 0,
116
+ "HF_API_KEY_mask": mask,
117
+ "HF_MODEL_SENTIMENT": os.getenv("HF_MODEL_SENTIMENT"),
118
+ "HTTP_TIMEOUT": os.getenv("HTTP_TIMEOUT"),
119
+ },
120
+ "capabilities": bot.capabilities(),
121
+ "error": err,
122
+ }
123
+
124
+ elif msg.lower() == "help":
125
+ out = {"capabilities": bot.capabilities()}
126
+
127
+ else:
128
+ try:
129
+ out = bot.reply(msg)
130
+ except Exception as e:
131
+ if args.debug:
132
+ traceback.print_exc()
133
+ out = {"error": str(e), "message": msg}
134
+
135
+ if args.debug:
136
+ print(f"DEBUG out={out}", flush=True)
137
+
138
+ print(json.dumps(out, indent=2), flush=True)
139
+
140
+
141
+ def cmd_repo_tree(argv=None):
142
+ p = argparse.ArgumentParser(prog="repo-tree", description="Print repo tree (from tree.txt if available)")
143
+ p.add_argument("--path", default="tree.txt", help="Path to precomputed tree file")
144
+ args = p.parse_args(argv)
145
+ path = Path(args.path)
146
+ if path.exists():
147
+ print(path.read_text(encoding="utf-8"), flush=True)
148
+ else:
149
+ print("(no tree.txt found)", flush=True)
150
+
151
+
152
+ def cmd_repo_flatten(argv=None):
153
+ p = argparse.ArgumentParser(prog="repo-flatten", description="Print flattened code listing")
154
+ p.add_argument("--path", default="FLATTENED_CODE.txt", help="Path to pre-flattened code file")
155
+ args = p.parse_args(argv)
156
+ path = Path(args.path)
157
+ if path.exists():
158
+ print(path.read_text(encoding="utf-8"), flush=True)
159
+ else:
160
+ print("(no FLATTENED_CODE.txt found)", flush=True)
161
+
162
+
163
+ def _dispatch():
164
+ # Allow: python -m agenticcore.cli <subcommand> [args...]
165
+ if len(sys.argv) <= 1:
166
+ print("Usage: python -m agenticcore.cli <agentic|repo-tree|repo-flatten> [args]", file=sys.stderr)
167
+ sys.exit(2)
168
+ cmd, argv = sys.argv[1], sys.argv[2:]
169
+ try:
170
+ if cmd == "agentic":
171
+ cmd_agentic(argv)
172
+ elif cmd == "repo-tree":
173
+ cmd_repo_tree(argv)
174
+ elif cmd == "repo-flatten":
175
+ cmd_repo_flatten(argv)
176
+ else:
177
+ print(f"Unknown subcommand: {cmd}", file=sys.stderr)
178
+ sys.exit(2)
179
+ except SystemExit:
180
+ raise
181
+ except Exception:
182
+ traceback.print_exc()
183
+ sys.exit(1)
184
+
185
+
186
+ if __name__ == "__main__":
187
+ _dispatch()
agenticcore/providers_unified.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /agenticcore/providers_unified.py
2
+ """
3
+ providers_unified.py
4
+ Unified, switchable providers for sentiment + (optional) text generation.
5
+ Selection order unless AI_PROVIDER is set:
6
+ HF -> AZURE -> OPENAI -> COHERE -> DEEPAI -> OFFLINE
7
+ Env vars:
8
+ HF_API_KEY
9
+ MICROSOFT_AI_SERVICE_ENDPOINT, MICROSOFT_AI_API_KEY
10
+ OPENAI_API_KEY, OPENAI_MODEL=gpt-3.5-turbo
11
+ COHERE_API_KEY, COHERE_MODEL=command
12
+ DEEPAI_API_KEY
13
+ AI_PROVIDER = hf|azure|openai|cohere|deepai|offline
14
+ HTTP_TIMEOUT = 20
15
+ """
16
+ from __future__ import annotations
17
+ import os, json
18
+ from typing import Dict, Any, Optional
19
+ import requests
20
+
21
+ TIMEOUT = float(os.getenv("HTTP_TIMEOUT", "20"))
22
+
23
+ def _env(name: str, default: Optional[str] = None) -> Optional[str]:
24
+ v = os.getenv(name)
25
+ return v if (v is not None and str(v).strip() != "") else default
26
+
27
+ def _pick_provider() -> str:
28
+ forced = _env("AI_PROVIDER")
29
+ if forced in {"hf", "azure", "openai", "cohere", "deepai", "offline"}:
30
+ return forced
31
+ if _env("HF_API_KEY"): return "hf"
32
+ if _env("MICROSOFT_AI_API_KEY") and _env("MICROSOFT_AI_SERVICE_ENDPOINT"): return "azure"
33
+ if _env("OPENAI_API_KEY"): return "openai"
34
+ if _env("COHERE_API_KEY"): return "cohere"
35
+ if _env("DEEPAI_API_KEY"): return "deepai"
36
+ return "offline"
37
+
38
+ # ---------------------------
39
+ # Sentiment
40
+ # ---------------------------
41
+
42
+ def analyze_sentiment(text: str) -> Dict[str, Any]:
43
+ provider = _pick_provider()
44
+ try:
45
+ if provider == "hf": return _sentiment_hf(text)
46
+ if provider == "azure": return _sentiment_azure(text)
47
+ if provider == "openai": return _sentiment_openai_prompt(text)
48
+ if provider == "cohere": return _sentiment_cohere_prompt(text)
49
+ if provider == "deepai": return _sentiment_deepai(text)
50
+ return _sentiment_offline(text)
51
+ except Exception as e:
52
+ return {"provider": provider, "label": "neutral", "score": 0.5, "error": str(e)}
53
+
54
+ def _sentiment_offline(text: str) -> Dict[str, Any]:
55
+ t = (text or "").lower()
56
+ pos = any(w in t for w in ["love","great","good","awesome","fantastic","thank","excellent","amazing"])
57
+ neg = any(w in t for w in ["hate","bad","terrible","awful","worst","angry","horrible"])
58
+ label = "positive" if pos and not neg else "negative" if neg and not pos else "neutral"
59
+ score = 0.9 if label != "neutral" else 0.5
60
+ return {"provider": "offline", "label": label, "score": score}
61
+
62
+ def _sentiment_hf(text: str) -> Dict[str, Any]:
63
+ """
64
+ Hugging Face Inference API for sentiment.
65
+ Uses canonical repo id and handles 404/401 and various payload shapes.
66
+ """
67
+ key = _env("HF_API_KEY")
68
+ if not key:
69
+ return _sentiment_offline(text)
70
+
71
+ # canonical repo id to avoid 404
72
+ model = _env("HF_MODEL_SENTIMENT", "distilbert/distilbert-base-uncased-finetuned-sst-2-english")
73
+ timeout = int(_env("HTTP_TIMEOUT", "30"))
74
+
75
+ headers = {
76
+ "Authorization": f"Bearer {key}",
77
+ "x-wait-for-model": "true",
78
+ "Accept": "application/json",
79
+ "Content-Type": "application/json",
80
+ }
81
+
82
+ r = requests.post(
83
+ f"https://api-inference.huggingface.co/models/{model}",
84
+ headers=headers,
85
+ json={"inputs": text},
86
+ timeout=timeout,
87
+ )
88
+
89
+ if r.status_code != 200:
90
+ return {"provider": "hf", "label": "neutral", "score": 0.5, "error": f"HTTP {r.status_code}: {r.text[:500]}"}
91
+
92
+ try:
93
+ data = r.json()
94
+ except Exception as e:
95
+ return {"provider": "hf", "label": "neutral", "score": 0.5, "error": str(e)}
96
+
97
+ if isinstance(data, dict) and "error" in data:
98
+ return {"provider": "hf", "label": "neutral", "score": 0.5, "error": data["error"]}
99
+
100
+ # normalize list shape
101
+ arr = data[0] if isinstance(data, list) and data and isinstance(data[0], list) else (data if isinstance(data, list) else [])
102
+ if not (isinstance(arr, list) and arr):
103
+ return {"provider": "hf", "label": "neutral", "score": 0.5, "error": f"Unexpected payload: {data}"}
104
+
105
+ top = max(arr, key=lambda x: x.get("score", 0.0) if isinstance(x, dict) else 0.0)
106
+ raw = str(top.get("label", "")).upper()
107
+ score = float(top.get("score", 0.5))
108
+
109
+ mapping = {
110
+ "LABEL_0": "negative", "LABEL_1": "neutral", "LABEL_2": "positive",
111
+ "NEGATIVE": "negative", "NEUTRAL": "neutral", "POSITIVE": "positive",
112
+ }
113
+ label = mapping.get(raw, (raw.lower() or "neutral"))
114
+
115
+ neutral_floor = float(os.getenv("SENTIMENT_NEUTRAL_THRESHOLD", "0.65"))
116
+ if label in {"positive", "negative"} and score < neutral_floor:
117
+ label = "neutral"
118
+
119
+ return {"provider": "hf", "label": label, "score": score}
120
+
121
+ def _sentiment_azure(text: str) -> Dict[str, Any]:
122
+ try:
123
+ from azure.core.credentials import AzureKeyCredential # type: ignore
124
+ from azure.ai.textanalytics import TextAnalyticsClient # type: ignore
125
+ except Exception:
126
+ return _sentiment_offline(text)
127
+ endpoint = _env("MICROSOFT_AI_SERVICE_ENDPOINT")
128
+ key = _env("MICROSOFT_AI_API_KEY")
129
+ if not (endpoint and key): return _sentiment_offline(text)
130
+ client = TextAnalyticsClient(endpoint=endpoint.strip(), credential=AzureKeyCredential(key.strip()))
131
+ resp = client.analyze_sentiment(documents=[text], show_opinion_mining=False)[0]
132
+ scores = {
133
+ "positive": float(getattr(resp.confidence_scores, "positive", 0.0) or 0.0),
134
+ "neutral": float(getattr(resp.confidence_scores, "neutral", 0.0) or 0.0),
135
+ "negative": float(getattr(resp.confidence_scores, "negative", 0.0) or 0.0),
136
+ }
137
+ label = max(scores, key=scores.get)
138
+ return {"provider": "azure", "label": label, "score": scores[label]}
139
+
140
+ def _sentiment_openai_prompt(text: str) -> Dict[str, Any]:
141
+ key = _env("OPENAI_API_KEY")
142
+ model = _env("OPENAI_MODEL", "gpt-3.5-turbo")
143
+ if not key: return _sentiment_offline(text)
144
+ url = "https://api.openai.com/v1/chat/completions"
145
+ prompt = f"Classify the sentiment of this text as positive, negative, or neutral. Reply JSON with keys label and score (0..1). Text: {text!r}"
146
+ r = requests.post(
147
+ url,
148
+ headers={"Authorization": f"Bearer {key}", "Content-Type": "application/json"},
149
+ json={"model": model, "messages": [{"role": "user", "content": prompt}], "temperature": 0},
150
+ timeout=TIMEOUT,
151
+ )
152
+ r.raise_for_status()
153
+ content = r.json()["choices"][0]["message"]["content"]
154
+ try:
155
+ obj = json.loads(content)
156
+ label = str(obj.get("label", "neutral")).lower()
157
+ score = float(obj.get("score", 0.5))
158
+ return {"provider": "openai", "label": label, "score": score}
159
+ except Exception:
160
+ l = "positive" if "positive" in content.lower() else "negative" if "negative" in content.lower() else "neutral"
161
+ return {"provider": "openai", "label": l, "score": 0.5}
162
+
163
+ def _sentiment_cohere_prompt(text: str) -> Dict[str, Any]:
164
+ key = _env("COHERE_API_KEY")
165
+ model = _env("COHERE_MODEL", "command")
166
+ if not key: return _sentiment_offline(text)
167
+ url = "https://api.cohere.ai/v1/generate"
168
+ prompt = f"Classify the sentiment (positive, negative, neutral) and return JSON with keys label and score (0..1). Text: {text!r}"
169
+ r = requests.post(
170
+ url,
171
+ headers={
172
+ "Authorization": f"Bearer {key}",
173
+ "Content-Type": "application/json",
174
+ "Cohere-Version": "2022-12-06",
175
+ },
176
+ json={"model": model, "prompt": prompt, "max_tokens": 30, "temperature": 0},
177
+ timeout=TIMEOUT,
178
+ )
179
+ r.raise_for_status()
180
+ gen = (r.json().get("generations") or [{}])[0].get("text", "")
181
+ try:
182
+ obj = json.loads(gen)
183
+ label = str(obj.get("label", "neutral")).lower()
184
+ score = float(obj.get("score", 0.5))
185
+ return {"provider": "cohere", "label": label, "score": score}
186
+ except Exception:
187
+ l = "positive" if "positive" in gen.lower() else "negative" if "negative" in gen.lower() else "neutral"
188
+ return {"provider": "cohere", "label": l, "score": 0.5}
189
+
190
+ def _sentiment_deepai(text: str) -> Dict[str, Any]:
191
+ key = _env("DEEPAI_API_KEY")
192
+ if not key: return _sentiment_offline(text)
193
+ url = "https://api.deepai.org/api/sentiment-analysis"
194
+ r = requests.post(url, headers={"api-key": key}, data={"text": text}, timeout=TIMEOUT)
195
+ r.raise_for_status()
196
+ data = r.json()
197
+ label = (data.get("output") or ["neutral"])[0].lower()
198
+ return {"provider": "deepai", "label": label, "score": 0.5 if label == "neutral" else 0.9}
199
+
200
+ # ---------------------------
201
+ # Text generation (optional)
202
+ # ---------------------------
203
+
204
+ def generate_text(prompt: str, max_tokens: int = 128) -> Dict[str, Any]:
205
+ provider = _pick_provider()
206
+ try:
207
+ if provider == "hf": return _gen_hf(prompt, max_tokens)
208
+ if provider == "openai": return _gen_openai(prompt, max_tokens)
209
+ if provider == "cohere": return _gen_cohere(prompt, max_tokens)
210
+ if provider == "deepai": return _gen_deepai(prompt, max_tokens)
211
+ return {"provider": "offline", "text": f"(offline) {prompt[:160]}"}
212
+ except Exception as e:
213
+ return {"provider": provider, "text": f"(error) {str(e)}"}
214
+
215
+ def _gen_hf(prompt: str, max_tokens: int) -> Dict[str, Any]:
216
+ key = _env("HF_API_KEY")
217
+ if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"}
218
+ model = _env("HF_MODEL_GENERATION", "tiiuae/falcon-7b-instruct")
219
+ r = requests.post(
220
+ f"https://api-inference.huggingface.co/models/{model}",
221
+ headers={"Authorization": f"Bearer {key}"},
222
+ json={"inputs": prompt, "parameters": {"max_new_tokens": max_tokens}},
223
+ timeout=TIMEOUT,
224
+ )
225
+ r.raise_for_status()
226
+ data = r.json()
227
+ if isinstance(data, list) and data and "generated_text" in data[0]:
228
+ return {"provider": "hf", "text": data[0]["generated_text"]}
229
+ return {"provider": "hf", "text": str(data)}
230
+
231
+ def _gen_openai(prompt: str, max_tokens: int) -> Dict[str, Any]:
232
+ key = _env("OPENAI_API_KEY")
233
+ model = _env("OPENAI_MODEL", "gpt-3.5-turbo")
234
+ if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"}
235
+ url = "https://api.openai.com/v1/chat/completions"
236
+ r = requests.post(
237
+ url,
238
+ headers={"Authorization": f"Bearer {key}", "Content-Type": "application/json"},
239
+ json={"model": model, "messages": [{"role": "user", "content": prompt}], "max_tokens": max_tokens},
240
+ timeout=TIMEOUT,
241
+ )
242
+ r.raise_for_status()
243
+ data = r.json()
244
+ text = data["choices"][0]["message"]["content"]
245
+ return {"provider": "openai", "text": text}
246
+
247
+ def _gen_cohere(prompt: str, max_tokens: int) -> Dict[str, Any]:
248
+ key = _env("COHERE_API_KEY")
249
+ model = _env("COHERE_MODEL", "command")
250
+ if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"}
251
+ url = "https://api.cohere.ai/v1/generate"
252
+ r = requests.post(
253
+ url,
254
+ headers={
255
+ "Authorization": f"Bearer {key}",
256
+ "Content-Type": "application/json",
257
+ "Cohere-Version": "2022-12-06",
258
+ },
259
+ json={"model": model, "prompt": prompt, "max_tokens": max_tokens},
260
+ timeout=TIMEOUT,
261
+ )
262
+ r.raise_for_status()
263
+ data = r.json()
264
+ text = data.get("generations", [{}])[0].get("text", "")
265
+ return {"provider": "cohere", "text": text}
266
+
267
+ def _gen_deepai(prompt: str, max_tokens: int) -> Dict[str, Any]:
268
+ key = _env("DEEPAI_API_KEY")
269
+ if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"}
270
+ url = "https://api.deepai.org/api/text-generator"
271
+ r = requests.post(url, headers={"api-key": key}, data={"text": prompt}, timeout=TIMEOUT)
272
+ r.raise_for_status()
273
+ data = r.json()
274
+ return {"provider": "deepai", "text": data.get("output", "")}
anon_bot/handler.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # /anon_bot/handler.py
2
+
3
+ def handle_turn(m,h,u): return (h or [])+[[m,'hi']]
anon_bot/rules.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /anon_bot/rules.py
app/app.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /app/app.py
2
+ #!/usr/bin/env python3
3
+ # app.py — aiohttp + Bot Framework Echo bot
4
+
5
+ import os
6
+ import sys
7
+ import json
8
+ from logic import handle_text
9
+ from aiohttp import web
10
+ from botbuilder.core import BotFrameworkAdapter, BotFrameworkAdapterSettings, TurnContext
11
+ from botbuilder.schema import Activity
12
+ import aiohttp_cors
13
+ from pathlib import Path
14
+
15
+
16
+ # -------------------------------------------------------------------
17
+ # Your bot implementation
18
+ # -------------------------------------------------------------------
19
+ # Make sure this exists at packages/bots/echo_bot.py
20
+ # from bots.echo_bot import EchoBot
21
+ # Minimal inline fallback if you want to test quickly:
22
+ class EchoBot:
23
+ async def on_turn(self, turn_context: TurnContext):
24
+ if turn_context.activity.type == "message":
25
+ text = (turn_context.activity.text or "").strip()
26
+ if not text:
27
+ await turn_context.send_activity("Input was empty. Type 'help' for usage.")
28
+ return
29
+
30
+ lower = text.lower()
31
+ if lower == "help":
32
+ await turn_context.send_activity("Try: echo <msg> | reverse: <msg> | capabilities")
33
+ elif lower == "capabilities":
34
+ await turn_context.send_activity("- echo\n- reverse\n- help\n- capabilities")
35
+ elif lower.startswith("reverse:"):
36
+ payload = text.split(":", 1)[1].strip()
37
+ await turn_context.send_activity(payload[::-1])
38
+ elif lower.startswith("echo "):
39
+ await turn_context.send_activity(text[5:])
40
+ else:
41
+ await turn_context.send_activity("Unsupported command. Type 'help' for examples.")
42
+ else:
43
+ await turn_context.send_activity(f"[{turn_context.activity.type}] event received.")
44
+
45
+ # -------------------------------------------------------------------
46
+ # Adapter / bot setup
47
+ # -------------------------------------------------------------------
48
+ APP_ID = os.environ.get("MicrosoftAppId") or None
49
+ APP_PASSWORD = os.environ.get("MicrosoftAppPassword") or None
50
+
51
+ adapter_settings = BotFrameworkAdapterSettings(APP_ID, APP_PASSWORD)
52
+ adapter = BotFrameworkAdapter(adapter_settings)
53
+
54
+ async def on_error(context: TurnContext, error: Exception):
55
+ print(f"[on_turn_error] {error}", file=sys.stderr, flush=True)
56
+ try:
57
+ await context.send_activity("Oops. Something went wrong!")
58
+ except Exception as send_err:
59
+ print(f"[on_turn_error][send_activity_failed] {send_err}", file=sys.stderr, flush=True)
60
+
61
+ adapter.on_turn_error = on_error
62
+ bot = EchoBot()
63
+
64
+ # -------------------------------------------------------------------
65
+ # HTTP handlers
66
+ # -------------------------------------------------------------------
67
+ async def messages(req: web.Request) -> web.Response:
68
+ # Content-Type can include charset; do a contains check
69
+ ctype = (req.headers.get("Content-Type") or "").lower()
70
+ if "application/json" not in ctype:
71
+ return web.Response(status=415, text="Unsupported Media Type: expected application/json")
72
+
73
+ try:
74
+ body = await req.json()
75
+ except json.JSONDecodeError:
76
+ return web.Response(status=400, text="Invalid JSON body")
77
+
78
+ activity = Activity().deserialize(body)
79
+ auth_header = req.headers.get("Authorization")
80
+
81
+ invoke_response = await adapter.process_activity(activity, auth_header, bot.on_turn)
82
+ if invoke_response:
83
+ # For invoke activities, adapter returns explicit status/body
84
+ return web.json_response(data=invoke_response.body, status=invoke_response.status)
85
+ # Acknowledge standard message activities
86
+ return web.Response(status=202, text="Accepted")
87
+
88
+ async def home(_req: web.Request) -> web.Response:
89
+ return web.Response(
90
+ text="Bot is running. POST Bot Framework activities to /api/messages.",
91
+ content_type="text/plain"
92
+ )
93
+
94
+ async def messages_get(_req: web.Request) -> web.Response:
95
+ return web.Response(
96
+ text="This endpoint only accepts POST (Bot Framework activities).",
97
+ content_type="text/plain",
98
+ status=405
99
+ )
100
+
101
+ async def healthz(_req: web.Request) -> web.Response:
102
+ return web.json_response({"status": "ok"})
103
+
104
+ async def plain_chat(req: web.Request) -> web.Response:
105
+ try:
106
+ payload = await req.json()
107
+ except Exception:
108
+ return web.json_response({"error": "Invalid JSON"}, status=400)
109
+ user_text = payload.get("text", "")
110
+ reply = handle_text(user_text)
111
+ return web.json_response({"reply": reply})
112
+
113
+ # -------------------------------------------------------------------
114
+ # App factory and entrypoint
115
+ # -------------------------------------------------------------------
116
+ from pathlib import Path
117
+
118
+ def create_app() -> web.Application:
119
+ app = web.Application()
120
+ app.router.add_get("/", home)
121
+ app.router.add_get("/healthz", healthz)
122
+ app.router.add_get("/api/messages", messages_get)
123
+ app.router.add_post("/api/messages", messages)
124
+ app.router.add_post("/plain-chat", plain_chat)
125
+
126
+ static_dir = Path(__file__).parent / "static"
127
+ if static_dir.exists():
128
+ app.router.add_static("/static/", path=static_dir, show_index=True)
129
+ else:
130
+ print(f"[warn] static directory not found: {static_dir}", flush=True)
131
+
132
+ return app
133
+
134
+ app = create_app()
135
+
136
+ if __name__ == "__main__":
137
+ host = os.environ.get("HOST", "127.0.0.1") # use 0.0.0.0 in containers
138
+ port = int(os.environ.get("PORT", 3978))
139
+ web.run_app(app, host=host, port=port)
app/assets/html/agenticcore_frontend.html ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="utf-8" />
5
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
6
+ <title>AgenticCore Chatbot Frontend</title>
7
+ <style>
8
+ :root {
9
+ --bg: #0b0d12;
10
+ --panel: #0f172a;
11
+ --panel-2: #111827;
12
+ --text: #e5e7eb;
13
+ --muted: #9ca3af;
14
+ --accent: #60a5fa;
15
+ --border: #1f2940;
16
+ --danger: #ef4444;
17
+ --success: #22c55e;
18
+ }
19
+ * { box-sizing: border-box; }
20
+ body { margin: 0; font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif; background: var(--bg); color: var(--text); }
21
+ .wrap { max-width: 920px; margin: 32px auto; padding: 0 16px; }
22
+ header { display: flex; align-items: center; justify-content: space-between; margin-bottom: 16px; gap: 16px; }
23
+ header h1 { font-size: 18px; margin: 0; letter-spacing: .3px; }
24
+ header .badge { font-size: 12px; opacity: .85; padding: 4px 8px; border:1px solid var(--border); border-radius: 999px; background: rgba(255,255,255,0.03); }
25
+ .card { background: var(--panel); border: 1px solid var(--border); border-radius: 16px; padding: 16px; }
26
+ .row { display: flex; gap: 10px; align-items: center; }
27
+ .stack { display: grid; gap: 12px; }
28
+ label { font-size: 12px; color: var(--muted); }
29
+ input[type=text] { flex: 1; padding: 12px 14px; border-radius: 12px; border: 1px solid var(--border); background: var(--panel-2); color: var(--text); outline: none; }
30
+ input[type=text]::placeholder { color: #6b7280; }
31
+ button { padding: 10px 14px; border-radius: 12px; border: 1px solid var(--border); background: #1f2937; color: var(--text); cursor: pointer; transition: transform .02s ease, background .2s; }
32
+ button:hover { background: #273449; }
33
+ button:active { transform: translateY(1px); }
34
+ .btn-primary { background: #1f2937; border-color: #31405a; }
35
+ .btn-ghost { background: transparent; border-color: var(--border); }
36
+ .grid { display: grid; gap: 12px; }
37
+ .grid-2 { grid-template-columns: 1fr 1fr; }
38
+ .log { margin-top: 16px; display: grid; gap: 10px; }
39
+ .bubble { max-width: 80%; padding: 12px 14px; border-radius: 14px; line-height: 1.35; }
40
+ .user { background: #1e293b; border:1px solid #2b3b55; margin-left: auto; border-bottom-right-radius: 4px; }
41
+ .bot { background: #0d1b2a; border:1px solid #223049; margin-right: auto; border-bottom-left-radius: 4px; }
42
+ .meta { font-size: 12px; color: var(--muted); margin-top: 4px; }
43
+ pre { margin: 0; white-space: pre-wrap; word-break: break-word; }
44
+ .status { display:flex; align-items:center; gap:8px; font-size: 12px; color: var(--muted); }
45
+ .dot { width:8px; height:8px; border-radius:999px; background: #64748b; display:inline-block; }
46
+ .dot.ok { background: var(--success); }
47
+ .dot.bad { background: var(--danger); }
48
+ footer { margin: 24px 0; text-align:center; color: var(--muted); font-size: 12px; }
49
+ .small { font-size: 12px; }
50
+ @media (max-width: 700px) { .grid-2 { grid-template-columns: 1fr; } }
51
+ </style>
52
+ </head>
53
+ <body>
54
+ <div class="wrap">
55
+ <header>
56
+ <h1>AgenticCore Chatbot Frontend</h1>
57
+ <div class="badge">Frontend → FastAPI → providers_unified</div>
58
+ </header>
59
+
60
+ <section class="card stack">
61
+ <div class="grid grid-2">
62
+ <div class="stack">
63
+ <label for="backend">Backend URL</label>
64
+ <div class="row">
65
+ <input id="backend" type="text" placeholder="http://127.0.0.1:8000" />
66
+ <button id="save" class="btn-ghost">Save</button>
67
+ </div>
68
+ <div class="status" id="status"><span class="dot"></span><span>Not checked</span></div>
69
+ </div>
70
+ <div class="stack">
71
+ <label for="message">Message</label>
72
+ <div class="row">
73
+ <input id="message" type="text" placeholder="Type a message…" />
74
+ <button id="send" class="btn-primary">Send</button>
75
+ </div>
76
+ <div class="row">
77
+ <button id="cap" class="btn-ghost small">Capabilities</button>
78
+ <button id="health" class="btn-ghost small">Health</button>
79
+ <button id="clear" class="btn-ghost small">Clear</button>
80
+ </div>
81
+ </div>
82
+ </div>
83
+ <div class="log" id="log"></div>
84
+ </section>
85
+
86
+ <footer>
87
+ Use with your FastAPI backend at <code>/chatbot/message</code>. Configure CORS if you serve this file from a different origin.
88
+ </footer>
89
+ </div>
90
+
91
+ <script>
92
+ const $ = (sel) => document.querySelector(sel);
93
+ const backendInput = $('#backend');
94
+ const sendBtn = $('#send');
95
+ const saveBtn = $('#save');
96
+ const msgInput = $('#message');
97
+ const capBtn = $('#cap');
98
+ const healthBtn = $('#health');
99
+ const clearBtn = $('#clear');
100
+ const log = $('#log');
101
+ const status = $('#status');
102
+ const dot = status.querySelector('.dot');
103
+ const statusText = status.querySelector('span:last-child');
104
+
105
+ function getBackendUrl() {
106
+ return localStorage.getItem('BACKEND_URL') || 'http://127.0.0.1:8000';
107
+ }
108
+ function setBackendUrl(v) {
109
+ localStorage.setItem('BACKEND_URL', v);
110
+ }
111
+ function cardUser(text) {
112
+ const div = document.createElement('div');
113
+ div.className = 'bubble user';
114
+ div.textContent = text;
115
+ log.appendChild(div);
116
+ log.scrollTop = log.scrollHeight;
117
+ }
118
+ function cardBot(obj) {
119
+ const wrap = document.createElement('div');
120
+ wrap.className = 'bubble bot';
121
+ const pre = document.createElement('pre');
122
+ pre.textContent = typeof obj === 'string' ? obj : JSON.stringify(obj, null, 2);
123
+ wrap.appendChild(pre);
124
+ log.appendChild(wrap);
125
+ log.scrollTop = log.scrollHeight;
126
+ }
127
+ function setStatus(ok, text) {
128
+ dot.classList.toggle('ok', !!ok);
129
+ dot.classList.toggle('bad', ok === false);
130
+ statusText.textContent = text || (ok ? 'OK' : 'Error');
131
+ }
132
+ async function api(path, init) {
133
+ const base = backendInput.value.trim().replace(/\/$/, '');
134
+ const url = base + path;
135
+ const resp = await fetch(url, init);
136
+ if (!resp.ok) {
137
+ let t = await resp.text().catch(() => '');
138
+ throw new Error(`HTTP ${resp.status} ${resp.statusText} — ${t}`);
139
+ }
140
+ const contentType = resp.headers.get('content-type') || '';
141
+ if (contentType.includes('application/json')) return resp.json();
142
+ return resp.text();
143
+ }
144
+
145
+ async function checkHealth() {
146
+ try {
147
+ const h = await api('/health', { method: 'GET' });
148
+ setStatus(true, 'Healthy');
149
+ cardBot({ health: h });
150
+ } catch (e) {
151
+ setStatus(false, String(e.message || e));
152
+ cardBot({ error: String(e.message || e) });
153
+ }
154
+ }
155
+
156
+ async function sendMessage() {
157
+ const text = msgInput.value.trim();
158
+ if (!text) return;
159
+ cardUser(text);
160
+ msgInput.value = '';
161
+ try {
162
+ const data = await api('/chatbot/message', {
163
+ method: 'POST',
164
+ headers: { 'Content-Type': 'application/json' },
165
+ body: JSON.stringify({ message: text })
166
+ });
167
+ cardBot(data);
168
+ } catch (e) {
169
+ cardBot({ error: String(e.message || e) });
170
+ }
171
+ }
172
+
173
+ async function showCapabilities() {
174
+ try {
175
+ // Prefer API if available; if 404, fall back to library-like prompt.
176
+ const data = await api('/chatbot/message', {
177
+ method: 'POST',
178
+ headers: { 'Content-Type': 'application/json' },
179
+ body: JSON.stringify({ message: 'help' })
180
+ });
181
+ cardBot(data);
182
+ } catch (e) {
183
+ cardBot({ capabilities: ['text-input','sentiment-analysis','help'], note: 'API help failed, showing defaults', error: String(e.message || e) });
184
+ }
185
+ }
186
+
187
+ // Wire up
188
+ backendInput.value = getBackendUrl();
189
+ saveBtn.onclick = () => { setBackendUrl(backendInput.value.trim()); setStatus(null, 'Saved'); };
190
+ sendBtn.onclick = sendMessage;
191
+ msgInput.addEventListener('keydown', (ev) => { if (ev.key === 'Enter') sendMessage(); });
192
+ capBtn.onclick = showCapabilities;
193
+ healthBtn.onclick = checkHealth;
194
+ clearBtn.onclick = () => { log.innerHTML = ''; setStatus(null, 'Idle'); };
195
+
196
+ // Initial health ping
197
+ checkHealth();
198
+ </script>
199
+ </body>
200
+ </html>
app/assets/html/chat.html ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html><head><meta charset="utf-8"/><title>Simple Chat</title>
3
+ <meta name="viewport" content="width=device-width,initial-scale=1"/>
4
+ <style>
5
+ :root { --bg:#f6f7f9; --card:#fff; --me:#dff1ff; --bot:#ffffff; --text:#23262b; --muted:#8a9099; }
6
+ body { margin:0; font-family:system-ui,-apple-system,Segoe UI,Roboto,Arial,sans-serif; background:var(--bg); color:var(--text); }
7
+ .app { max-width:840px; margin:24px auto; padding:0 16px; }
8
+ .card { background:var(--card); border:1px solid #e3e6ea; border-radius:14px; box-shadow:0 1px 2px rgba(0,0,0,.04); overflow:hidden; }
9
+ .header { padding:14px 16px; border-bottom:1px solid #e9edf2; font-weight:600; }
10
+ .chat { height:480px; overflow:auto; padding:16px; display:flex; flex-direction:column; gap:12px; }
11
+ .row { display:flex; }
12
+ .row.me { justify-content:flex-end; }
13
+ .bubble { max-width:70%; padding:10px 12px; border-radius:12px; line-height:1.35; white-space:pre-wrap; }
14
+ .me .bubble { background:var(--me); border:1px solid #c3e5ff; }
15
+ .bot .bubble { background:var(--bot); border:1px solid #e5e8ec; }
16
+ .footer { display:flex; gap:8px; padding:12px; border-top:1px solid #e9edf2; }
17
+ input[type=text] { flex:1; padding:10px 12px; border-radius:10px; border:1px solid #d5dbe3; font-size:15px; }
18
+ button { padding:10px 14px; border-radius:10px; border:1px solid #2b6cb0; background:#2b6cb0; color:#fff; font-weight:600; cursor:pointer; }
19
+ button:disabled { opacity:.6; cursor:not-allowed; }
20
+ .hint { color:var(--muted); font-size:12px; padding:0 16px 12px; }
21
+ </style></head>
22
+ <body>
23
+ <div class="app"><div class="card">
24
+ <div class="header">Traditional Chatbot (Local)</div>
25
+ <div id="chat" class="chat"></div>
26
+ <div class="hint">Try: <code>reverse: hello world</code>, <code>help</code>, <code>capabilities</code></div>
27
+ <div class="footer">
28
+ <input id="msg" type="text" placeholder="Type a message..." autofocus />
29
+ <button id="send">Send</button>
30
+ </div>
31
+ </div></div>
32
+ <script>
33
+ const API = "http://127.0.0.1:3978/plain-chat";
34
+ const chat = document.getElementById("chat");
35
+ const input = document.getElementById("msg");
36
+ const sendBtn = document.getElementById("send");
37
+ function addBubble(text, who) {
38
+ const row = document.createElement("div"); row.className = "row " + who;
39
+ const wrap = document.createElement("div"); wrap.className = who === "me" ? "me" : "bot";
40
+ const b = document.createElement("div"); b.className = "bubble"; b.textContent = text;
41
+ wrap.appendChild(b); row.appendChild(wrap); chat.appendChild(row); chat.scrollTop = chat.scrollHeight;
42
+ }
43
+ async function send() {
44
+ const text = input.value.trim(); if (!text) return; input.value = ""; addBubble(text, "me"); sendBtn.disabled = true;
45
+ try {
46
+ const res = await fetch(API, { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ text }) });
47
+ if (!res.ok) throw new Error("HTTP " + res.status);
48
+ const data = await res.json(); addBubble(data.reply ?? "(no reply)", "bot");
49
+ } catch (err) { addBubble("Error: " + err.message, "bot"); }
50
+ finally { sendBtn.disabled = false; input.focus(); }
51
+ }
52
+ sendBtn.addEventListener("click", send);
53
+ input.addEventListener("keydown", (e)=>{ if (e.key === "Enter") send(); });
54
+ addBubble("Connected to local bot at /plain-chat", "bot");
55
+ </script>
56
+ </body></html>
app/assets/html/chat_console.html ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="utf-8" />
5
+ <title>Console Chat Tester</title>
6
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
7
+ <style>
8
+ body{ font-family: ui-sans-serif, system-ui, Arial; margin:20px; }
9
+ .row{ display:flex; gap:8px; align-items:center; margin:6px 0; }
10
+ input[type=text]{ flex:1; padding:8px; }
11
+ button{ padding:8px 10px; }
12
+ pre{ background:#0b1020; color:#d6e7ff; padding:10px; height:320px; overflow:auto; }
13
+ .chip{ display:inline-block; padding:3px 8px; background:#eef; border-radius:12px; margin-left:8px; }
14
+ </style>
15
+ </head>
16
+ <body>
17
+ <h2>AgenticCore Console</h2>
18
+
19
+ <div class="row">
20
+ <label>Backend</label>
21
+ <input id="base" type="text" value="http://127.0.0.1:8000" />
22
+ <button id="btnHealth">Health</button>
23
+ <button id="btnRoutes">Routes</button>
24
+ </div>
25
+
26
+ <div class="row">
27
+ <input id="msg" type="text" placeholder="Say something…" />
28
+ <button id="btnSend">POST /chatbot/message</button>
29
+ </div>
30
+
31
+ <div>
32
+ <span>Mode:</span>
33
+ <span id="mode" class="chip">API</span>
34
+ </div>
35
+
36
+ <pre id="out"></pre>
37
+
38
+ <script>
39
+ const $ = id => document.getElementById(id);
40
+ const out = $("out");
41
+ function print(o){ out.textContent += (typeof o==="string" ? o : JSON.stringify(o,null,2)) + "\n"; out.scrollTop = out.scrollHeight; }
42
+ function join(b, p){ return b.replace(/\/+$/,"") + p; }
43
+
44
+ async function health(){
45
+ try{
46
+ const r = await fetch(join($("base").value, "/health"));
47
+ print(await r.json());
48
+ }catch(e){ print("health error: " + e); }
49
+ }
50
+ async function routes(){
51
+ try{
52
+ const r = await fetch(join($("base").value, "/openapi.json"));
53
+ const j = await r.json();
54
+ print({ routes: Object.keys(j.paths) });
55
+ }catch(e){ print("routes error: " + e); }
56
+ }
57
+ async function send(){
58
+ const text = $("msg").value.trim();
59
+ if(!text){ print("enter a message first"); return; }
60
+ try{
61
+ const r = await fetch(join($("base").value, "/chatbot/message"), {
62
+ method:"POST",
63
+ headers:{ "Content-Type":"application/json" },
64
+ body: JSON.stringify({ message: text })
65
+ });
66
+ print(await r.json());
67
+ }catch(e){ print("send error: " + e); }
68
+ }
69
+ $("btnHealth").onclick = health;
70
+ $("btnRoutes").onclick = routes;
71
+ $("btnSend").onclick = send;
72
+
73
+ // boot
74
+ health();
75
+ </script>
76
+ </body>
77
+ </html>
app/assets/html/chat_minimal.html ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="utf-8" />
5
+ <title>Minimal Chat Tester</title>
6
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
7
+ <style>
8
+ body { font-family: system-ui, Arial, sans-serif; margin: 24px; }
9
+ .row { display:flex; gap:8px; align-items:center; margin-bottom:8px; }
10
+ input[type=text]{ width:420px; padding:8px; }
11
+ textarea{ width:100%; height:240px; padding:8px; }
12
+ button{ padding:8px 12px; }
13
+ .ok{ color:#1a7f37; }
14
+ .warn{ color:#b54708; }
15
+ .err{ color:#b42318; }
16
+ </style>
17
+ </head>
18
+ <body>
19
+ <h2>Minimal Chat Tester → FastAPI /chatbot/message</h2>
20
+
21
+ <div class="row">
22
+ <label>Backend URL:</label>
23
+ <input id="base" type="text" value="http://127.0.0.1:8000" />
24
+ <button id="btnHealth">Health</button>
25
+ <button id="btnCaps">Capabilities</button>
26
+ </div>
27
+
28
+ <div class="row">
29
+ <input id="msg" type="text" placeholder="Type a message…" />
30
+ <button id="btnSend">Send</button>
31
+ </div>
32
+
33
+ <p id="status"></p>
34
+ <textarea id="log" readonly></textarea>
35
+
36
+ <script>
37
+ const $ = id => document.getElementById(id);
38
+ const log = (o, cls="") => {
39
+ const line = (typeof o === "string") ? o : JSON.stringify(o, null, 2);
40
+ $("log").value += line + "\n";
41
+ $("log").scrollTop = $("log").scrollHeight;
42
+ if(cls) { $("status").className = cls; $("status").textContent = line; }
43
+ };
44
+
45
+ function urlJoin(base, path) {
46
+ return base.replace(/\/+$/,"") + path;
47
+ }
48
+
49
+ async function health() {
50
+ try {
51
+ const r = await fetch(urlJoin($("base").value, "/health"));
52
+ const j = await r.json();
53
+ log(j, "ok");
54
+ } catch (e) { log("Health error: " + e, "err"); }
55
+ }
56
+
57
+ async function caps() {
58
+ try {
59
+ // Prefer library-like caps endpoint if you expose one; otherwise call /openapi.json for visibility
60
+ const r = await fetch(urlJoin($("base").value, "/openapi.json"));
61
+ const j = await r.json();
62
+ log({paths: Object.keys(j.paths).slice(0,20)}, "ok");
63
+ } catch (e) { log("Caps error: " + e, "err"); }
64
+ }
65
+
66
+ async function sendMsg() {
67
+ const text = $("msg").value.trim();
68
+ if(!text) { log("Please type a message.", "warn"); return; }
69
+ try {
70
+ const r = await fetch(urlJoin($("base").value, "/chatbot/message"), {
71
+ method:"POST",
72
+ headers:{ "Content-Type":"application/json" },
73
+ body: JSON.stringify({ message: text })
74
+ });
75
+ if(!r.ok) throw new Error(`${r.status} ${r.statusText}`);
76
+ const j = await r.json();
77
+ log(j, "ok");
78
+ } catch (e) { log("Send error: " + e, "err"); }
79
+ }
80
+
81
+ $("btnHealth").onclick = health;
82
+ $("btnCaps").onclick = caps;
83
+ $("btnSend").onclick = sendMsg;
84
+
85
+ // Warmup
86
+ health();
87
+ </script>
88
+ </body>
89
+ </html>
app/routes.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /app/routes.py
core/config.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # /core/config.py
2
+
3
+ class Settings: pass
4
+ settings = Settings()
core/logging.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /core/logging.py
core/types.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /core/types.py
docs/DEV_DOC.md ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## 3. Functional Requirements
2
+
3
+ This section describes the functional requirements for connecting a chatbot to an AI-as-a-Service (AIaaS) platform. It defines the expected system behavior, outlines constraints, and sets measurable acceptance criteria. Requirements are grouped into system context, core functions, supporting functions, and non-functional aspects.
4
+
5
+ ---
6
+
7
+ ### 3.1 System Context
8
+
9
+ The chatbot acts as the client application. It receives user input, processes it, and communicates with an external AIaaS endpoint (e.g., Azure AI Language Service). The AI service provides natural language processing (NLP) features such as sentiment analysis. The chatbot then interprets the service output and responds back to the user.
10
+
11
+ Key components include:
12
+ - **User Interface (UI):** Chat interface for entering text.
13
+ - **Chatbot Core:** Handles request routing and conversation logic.
14
+ - **AI Service Connector:** Manages authentication and API calls to the AI service.
15
+ - **AIaaS Platform:** External cloud service providing NLP functions.
16
+
17
+ ---
18
+
19
+ ### 3.2 Functional Requirements
20
+
21
+ #### FR-1: User Input Handling
22
+ - The chatbot shall accept text input from users.
23
+ - The chatbot shall sanitize input to remove unsafe characters.
24
+ - The chatbot shall log all interactions for debugging and testing.
25
+
26
+ #### FR-2: API Connection
27
+ - The system shall authenticate with the AI service using API keys stored securely in environment variables.
28
+ - The chatbot shall send user text to the AIaaS endpoint in the required format.
29
+ - The chatbot shall handle and parse responses from the AIaaS.
30
+
31
+ #### FR-3: Sentiment Analysis Integration
32
+ - The chatbot shall use the AIaaS to determine the sentiment (e.g., positive, neutral, negative) of user input.
33
+ - The chatbot shall present sentiment results as part of its response or use them to adjust tone.
34
+
35
+ #### FR-4: Error and Exception Handling
36
+ - The system shall detect failed API calls and return a fallback message to the user.
37
+ - The chatbot shall notify the user if the AI service is unavailable.
38
+ - The chatbot shall log errors with timestamp and cause.
39
+
40
+ #### FR-5: Reporting and Documentation
41
+ - The chatbot shall provide a list of supported commands or features when prompted.
42
+ - The chatbot shall record system status and output for inclusion in the project report.
43
+ - The development process shall be documented with screenshots and configuration notes.
44
+
45
+ ---
46
+
47
+ ### 3.3 Non-Functional Requirements
48
+
49
+ #### NFR-1: Security
50
+ - API keys shall not be hard-coded in source files.
51
+ - Sensitive data shall be retrieved from environment variables or secure vaults.
52
+
53
+ #### NFR-2: Performance
54
+ - The chatbot shall return responses within 2 seconds under normal network conditions.
55
+ - The system shall process at least 20 concurrent user sessions without performance degradation.
56
+
57
+ #### NFR-3: Reliability
58
+ - The chatbot shall achieve at least 95% uptime during testing.
59
+ - The chatbot shall gracefully degrade to local responses if the AI service is unavailable.
60
+
61
+ #### NFR-4: Usability
62
+ - The chatbot shall provide clear, user-friendly error messages.
63
+ - The chatbot shall handle malformed input without crashing.
64
+
65
+ ---
66
+
67
+ ### 3.4 Acceptance Criteria
68
+
69
+ 1. **Input Handling**
70
+ - Given valid text input, the chatbot processes it without errors.
71
+ - Given invalid or malformed input, the chatbot responds with a clarification request.
72
+
73
+ 2. **API Connection**
74
+ - Given a valid API key and endpoint, the chatbot connects and retrieves sentiment analysis.
75
+ - Given an invalid API key, the chatbot logs an error and informs the user.
76
+
77
+ 3. **Sentiment Analysis**
78
+ - Given a positive statement, the chatbot labels it correctly with at least 90% accuracy.
79
+ - Given a negative statement, the chatbot labels it correctly with at least 90% accuracy.
80
+
81
+ 4. **Error Handling**
82
+ - When the AI service is unavailable, the chatbot informs the user and continues functioning with local responses.
83
+ - All failures are recorded in a log file.
84
+
85
+ 5. **Usability**
86
+ - The chatbot returns responses in less than 2 seconds for 95% of requests.
87
+ - The chatbot displays a list of features when the user requests “help.”
88
+
89
+ ---
90
+
91
+ ### Glossary
92
+
93
+ - **AIaaS (AI-as-a-Service):** Cloud-based artificial intelligence services accessible via APIs.
94
+ - **API (Application Programming Interface):** A set of rules for software applications to communicate with each other.
95
+ - **NLP (Natural Language Processing):** A field of AI focused on enabling computers to understand human language.
96
+ - **Sentiment Analysis:** An NLP technique that determines the emotional tone behind a text.
97
+
docs/architecture.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # /docs/slides/architecture.md
2
+ # Architecture\n\nShort explainer tied to the flowchart.\n
docs/design.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # /docs/slides/design.md
2
+ # Design notes\n\nAPI notes, security, tradeoffs.\n
docs/flowchart.png ADDED
docs/results.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # /docs/slides/design.md
2
+ # Results\n\nChallenges, metrics, screenshots.\n
examples/example.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # /example/example.py
2
+ """Simple CLI example that sends a message to the ChatBot and prints the JSON reply."""
3
+ import json
4
+ from agenticcore.chatbot.services import ChatBot
5
+
6
+ if __name__ == "__main__":
7
+ bot = ChatBot()
8
+ result = bot.reply("hello world")
9
+ print(json.dumps(result, indent=2))
guardrails/pii_redaction.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # /guardrails/pii_redaction.py
2
+
3
+ def redact(t): return t
guardrails/safety.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /guardrails/safety.py
integrations/azure/bot_framework.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # /intergrations/azure/bot_framework.py
2
+ # Azure Bot Framework (placeholder)
integrations/botframework/app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /intergrations/botframework/app.py — aiohttp + Bot Framework Echo bot
2
+ #!/usr/bin/env python3
3
+
4
+ import os
5
+ import sys
6
+ import json
7
+ from logic import handle_text
8
+ from aiohttp import web
9
+ from botbuilder.core import BotFrameworkAdapter, BotFrameworkAdapterSettings, TurnContext
10
+ from botbuilder.schema import Activity
11
+ import aiohttp_cors
12
+ from pathlib import Path
13
+
14
+
15
+ # -------------------------------------------------------------------
16
+ # Your bot implementation
17
+ # -------------------------------------------------------------------
18
+ # Make sure this exists at packages/bots/echo_bot.py
19
+ # from bots.echo_bot import EchoBot
20
+ # Minimal inline fallback if you want to test quickly:
21
+ class EchoBot:
22
+ async def on_turn(self, turn_context: TurnContext):
23
+ if turn_context.activity.type == "message":
24
+ text = (turn_context.activity.text or "").strip()
25
+ if not text:
26
+ await turn_context.send_activity("Input was empty. Type 'help' for usage.")
27
+ return
28
+
29
+ lower = text.lower()
30
+ if lower == "help":
31
+ await turn_context.send_activity("Try: echo <msg> | reverse: <msg> | capabilities")
32
+ elif lower == "capabilities":
33
+ await turn_context.send_activity("- echo\n- reverse\n- help\n- capabilities")
34
+ elif lower.startswith("reverse:"):
35
+ payload = text.split(":", 1)[1].strip()
36
+ await turn_context.send_activity(payload[::-1])
37
+ elif lower.startswith("echo "):
38
+ await turn_context.send_activity(text[5:])
39
+ else:
40
+ await turn_context.send_activity("Unsupported command. Type 'help' for examples.")
41
+ else:
42
+ await turn_context.send_activity(f"[{turn_context.activity.type}] event received.")
43
+
44
+ # -------------------------------------------------------------------
45
+ # Adapter / bot setup
46
+ # -------------------------------------------------------------------
47
+ APP_ID = os.environ.get("MicrosoftAppId") or None
48
+ APP_PASSWORD = os.environ.get("MicrosoftAppPassword") or None
49
+
50
+ adapter_settings = BotFrameworkAdapterSettings(APP_ID, APP_PASSWORD)
51
+ adapter = BotFrameworkAdapter(adapter_settings)
52
+
53
+ async def on_error(context: TurnContext, error: Exception):
54
+ print(f"[on_turn_error] {error}", file=sys.stderr, flush=True)
55
+ try:
56
+ await context.send_activity("Oops. Something went wrong!")
57
+ except Exception as send_err:
58
+ print(f"[on_turn_error][send_activity_failed] {send_err}", file=sys.stderr, flush=True)
59
+
60
+ adapter.on_turn_error = on_error
61
+ bot = EchoBot()
62
+
63
+ # -------------------------------------------------------------------
64
+ # HTTP handlers
65
+ # -------------------------------------------------------------------
66
+ async def messages(req: web.Request) -> web.Response:
67
+ # Content-Type can include charset; do a contains check
68
+ ctype = (req.headers.get("Content-Type") or "").lower()
69
+ if "application/json" not in ctype:
70
+ return web.Response(status=415, text="Unsupported Media Type: expected application/json")
71
+
72
+ try:
73
+ body = await req.json()
74
+ except json.JSONDecodeError:
75
+ return web.Response(status=400, text="Invalid JSON body")
76
+
77
+ activity = Activity().deserialize(body)
78
+ auth_header = req.headers.get("Authorization")
79
+
80
+ invoke_response = await adapter.process_activity(activity, auth_header, bot.on_turn)
81
+ if invoke_response:
82
+ # For invoke activities, adapter returns explicit status/body
83
+ return web.json_response(data=invoke_response.body, status=invoke_response.status)
84
+ # Acknowledge standard message activities
85
+ return web.Response(status=202, text="Accepted")
86
+
87
+ async def home(_req: web.Request) -> web.Response:
88
+ return web.Response(
89
+ text="Bot is running. POST Bot Framework activities to /api/messages.",
90
+ content_type="text/plain"
91
+ )
92
+
93
+ async def messages_get(_req: web.Request) -> web.Response:
94
+ return web.Response(
95
+ text="This endpoint only accepts POST (Bot Framework activities).",
96
+ content_type="text/plain",
97
+ status=405
98
+ )
99
+
100
+ async def healthz(_req: web.Request) -> web.Response:
101
+ return web.json_response({"status": "ok"})
102
+
103
+ async def plain_chat(req: web.Request) -> web.Response:
104
+ try:
105
+ payload = await req.json()
106
+ except Exception:
107
+ return web.json_response({"error": "Invalid JSON"}, status=400)
108
+ user_text = payload.get("text", "")
109
+ reply = handle_text(user_text)
110
+ return web.json_response({"reply": reply})
111
+
112
+ # -------------------------------------------------------------------
113
+ # App factory and entrypoint
114
+ # -------------------------------------------------------------------
115
+ from pathlib import Path
116
+
117
+ def create_app() -> web.Application:
118
+ app = web.Application()
119
+ app.router.add_get("/", home)
120
+ app.router.add_get("/healthz", healthz)
121
+ app.router.add_get("/api/messages", messages_get)
122
+ app.router.add_post("/api/messages", messages)
123
+ app.router.add_post("/plain-chat", plain_chat)
124
+
125
+ static_dir = Path(__file__).parent / "static"
126
+ if static_dir.exists():
127
+ app.router.add_static("/static/", path=static_dir, show_index=True)
128
+ else:
129
+ print(f"[warn] static directory not found: {static_dir}", flush=True)
130
+
131
+ return app
132
+
133
+ app = create_app()
134
+
135
+ if __name__ == "__main__":
136
+ host = os.environ.get("HOST", "127.0.0.1") # use 0.0.0.0 in containers
137
+ port = int(os.environ.get("PORT", 3978))
138
+ web.run_app(app, host=host, port=port)
integrations/botframework/bot.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /intergrations/botframework/bot.py
2
+ """
3
+ Simple MBF bot:
4
+ - 'help' / 'capabilities' shows features
5
+ - 'reverse <text>' returns reversed text
6
+ - otherwise delegates to AgenticCore ChatBot (sentiment) if available
7
+ """
8
+
9
+ from typing import List, Optional, Dict, Any
10
+ from botbuilder.core import ActivityHandler, TurnContext
11
+ from botbuilder.schema import ChannelAccount, ActivityTypes
12
+
13
+ from skills import normalize, reverse_text, capabilities, is_empty
14
+
15
+ # Try to import AgenticCore; if unavailable, provide a tiny fallback.
16
+ try:
17
+ from agenticcore.chatbot.services import ChatBot # real provider-backed bot
18
+ except Exception:
19
+ class ChatBot: # fallback shim for offline/dev
20
+ def reply(self, message: str) -> Dict[str, Any]:
21
+ return {
22
+ "reply": "Noted. (local fallback reply)",
23
+ "sentiment": "neutral",
24
+ "confidence": 0.5,
25
+ }
26
+
27
+ def _format_sentiment(res: Dict[str, Any]) -> str:
28
+ """Compose a user-facing string from ChatBot reply payload."""
29
+ reply = (res.get("reply") or "").strip()
30
+ label: Optional[str] = res.get("sentiment")
31
+ conf = res.get("confidence")
32
+ if label is not None and conf is not None:
33
+ return f"{reply} (sentiment: {label}, confidence: {float(conf):.2f})"
34
+ return reply or "I'm not sure what to say."
35
+
36
+ def _help_text() -> str:
37
+ """Single source of truth for the help/capability text."""
38
+ feats = "\n".join(f"- {c}" for c in capabilities())
39
+ return (
40
+ "I can reverse text and provide concise replies with sentiment.\n"
41
+ "Commands:\n"
42
+ "- help | capabilities\n"
43
+ "- reverse <text>\n"
44
+ "General text will be handled by the ChatBot service.\n\n"
45
+ f"My capabilities:\n{feats}"
46
+ )
47
+
48
+ class SimpleBot(ActivityHandler):
49
+ """Minimal ActivityHandler with local commands + ChatBot fallback."""
50
+
51
+ def __init__(self, chatbot: Optional[ChatBot] = None):
52
+ self._chatbot = chatbot or ChatBot()
53
+
54
+ async def on_members_added_activity(
55
+ self, members_added: List[ChannelAccount], turn_context: TurnContext
56
+ ):
57
+ for member in members_added:
58
+ if member.id != turn_context.activity.recipient.id:
59
+ await turn_context.send_activity("Hello! Type 'help' to see what I can do.")
60
+
61
+ async def on_message_activity(self, turn_context: TurnContext):
62
+ if turn_context.activity.type != ActivityTypes.message:
63
+ return
64
+
65
+ text = (turn_context.activity.text or "").strip()
66
+ if is_empty(text):
67
+ await turn_context.send_activity("Please enter a message (try 'help').")
68
+ return
69
+
70
+ cmd = normalize(text)
71
+
72
+ if cmd in {"help", "capabilities"}:
73
+ await turn_context.send_activity(_help_text())
74
+ return
75
+
76
+ if cmd.startswith("reverse "):
77
+ original = text.split(" ", 1)[1] if " " in text else ""
78
+ await turn_context.send_activity(reverse_text(original))
79
+ return
80
+
81
+ # ChatBot fallback (provider-agnostic sentiment/reply)
82
+ try:
83
+ result = self._chatbot.reply(text)
84
+ await turn_context.send_activity(_format_sentiment(result))
85
+ except Exception:
86
+ await turn_context.send_activity(f"You said: {text}")
integrations/botframework/bots/echo_bot.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # bots/echo_bot.py
2
+ from botbuilder.core import ActivityHandler, TurnContext
3
+ from botbuilder.schema import ChannelAccount
4
+
5
+ def simple_sentiment(text: str):
6
+ """
7
+ Tiny, no-cost heuristic so you can demo behavior without extra services.
8
+ You can swap this later for HF/OpenAI/Azure easily.
9
+ """
10
+ t = (text or "").lower()
11
+ pos = any(w in t for w in ["love","great","good","awesome","fantastic","excellent","amazing"])
12
+ neg = any(w in t for w in ["hate","bad","terrible","awful","worst","horrible","angry"])
13
+ if pos and not neg: return "positive", 0.9
14
+ if neg and not pos: return "negative", 0.9
15
+ return "neutral", 0.5
16
+
17
+ CAPS = [
18
+ "Echo what you say (baseline).",
19
+ "Show my capabilities with 'help' or 'capabilities'.",
20
+ "Handle malformed/empty input politely.",
21
+ "Classify simple sentiment (positive/negative/neutral).",
22
+ ]
23
+
24
+ class EchoBot(ActivityHandler):
25
+ async def on_members_added_activity(
26
+ self, members_added: [ChannelAccount], turn_context: TurnContext
27
+ ):
28
+ for member in members_added:
29
+ if member.id != turn_context.activity.recipient.id:
30
+ await turn_context.send_activity(
31
+ "Hi! I’m your sample bot.\n"
32
+ "- Try typing: **help**\n"
33
+ "- Or any sentence and I’ll echo it + sentiment."
34
+ )
35
+
36
+ async def on_message_activity(self, turn_context: TurnContext):
37
+ text = (turn_context.activity.text or "").strip()
38
+
39
+ # Handle empty/malformed
40
+ if not text:
41
+ await turn_context.send_activity(
42
+ "I didn’t catch anything. Please type a message (or 'help')."
43
+ )
44
+ return
45
+
46
+ # Capabilities
47
+ if text.lower() in {"help","capabilities","what can you do"}:
48
+ caps = "\n".join(f"• {c}" for c in CAPS)
49
+ await turn_context.send_activity(
50
+ "Here’s what I can do:\n" + caps
51
+ )
52
+ return
53
+
54
+ # Normal message → echo + sentiment
55
+ label, score = simple_sentiment(text)
56
+ reply = f"You said: **{text}**\nSentiment: **{label}** (conf {score:.2f})"
57
+ await turn_context.send_activity(reply)
integrations/email/ticket_stub.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # /intergrations/email/ticket_stub.py
2
+ # Email ticket stub (placeholder)
integrations/web/fastapi/web_agentic.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /integrations/web/fastapi/web_agentic.py
2
+ from fastapi import FastAPI, Query
3
+ from fastapi.responses import HTMLResponse
4
+ from agenticcore.chatbot.services import ChatBot
5
+
6
+ app = FastAPI(title="AgenticCore Web UI")
7
+
8
+ # 1. Simple HTML form at /
9
+ @app.get("/", response_class=HTMLResponse)
10
+ def index():
11
+ return """
12
+ <form action="/agentic" method="get">
13
+ <input type="text" name="msg" placeholder="Type a message" style="width:300px">
14
+ <input type="submit" value="Send">
15
+ </form>
16
+ """
17
+
18
+ # 2. Agentic endpoint
19
+ @app.get("/agentic")
20
+ def run_agentic(msg: str = Query(..., description="Message to send to ChatBot")):
21
+ bot = ChatBot()
22
+ return bot.reply(msg)
logged_in_bot/handler.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /logged_in_bot/handler.py
2
+
3
+ from agenticcore.chatbot.services import ChatBot
4
+
5
+ _bot = ChatBot()
6
+
7
+ def handle_turn(message, history, user):
8
+ history = history or []
9
+ try:
10
+ res = _bot.reply(message)
11
+ reply = res.get("reply") or "Noted."
12
+ label = res.get("sentiment")
13
+ conf = res.get("confidence")
14
+ if label is not None and conf is not None:
15
+ reply = f"{reply} (sentiment: {label}, confidence: {float(conf):.2f})"
16
+ except Exception as e:
17
+ reply = f"Sorry—error in ChatBot: {type(e).__name__}. Using fallback."
18
+ history = history + [[message, reply]]
19
+ return history
20
+
logged_in_bot/sentiment_azure.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /logged_in_bot/sentiment_azure.py
logged_in_bot/tools.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /logged_in_bot/tools.py
memory/rag/indexer.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /memory/rag/data/indexer.py
memory/rag/retriever.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /memory/rag/data/retriever.py
memory/sessions.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /memory/sessions.py
memory/store.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # /memory/sessions.py
2
+
3
+ DB={}
nlu/pipeline.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # /nlu/pipeline.py
2
+
3
+ def analyze(t): return {'intent':'general'}
nlu/prompts.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /nlu/prompts.py
nlu/router.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # /nlu/router.py
notebooks/ChatbotIntegration.ipynb ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "3c5453da-9714-4410-af12-2727730020bc",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "{'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "from agenticcore.chatbot.services import ChatBot\n",
19
+ "bot = ChatBot()\n",
20
+ "print(bot.reply(\"Testing from notebook\"))\n"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "markdown",
25
+ "id": "6d467914-f9b5-43bb-b66e-fc7f1db12b21",
26
+ "metadata": {},
27
+ "source": [
28
+ "# 2) Config: choose backend URL and provider (HF/Azure/etc.)"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "code",
33
+ "execution_count": 2,
34
+ "id": "240d2787-aacd-49ec-bfe9-709108e49df0",
35
+ "metadata": {},
36
+ "outputs": [
37
+ {
38
+ "data": {
39
+ "text/plain": [
40
+ "'http://127.0.0.1:8000'"
41
+ ]
42
+ },
43
+ "execution_count": 2,
44
+ "metadata": {},
45
+ "output_type": "execute_result"
46
+ }
47
+ ],
48
+ "source": [
49
+ "import os\n",
50
+ "\n",
51
+ "# Point to your FastAPI server (change if needed)\n",
52
+ "import os\n",
53
+ "\n",
54
+ "# Default backend URL (can be overridden later via the widget)\n",
55
+ "BACKEND_URL = os.environ.get(\"BACKEND_URL\", \"http://127.0.0.1:8000\")\n",
56
+ "\n",
57
+ "# Provider hint (optional; providers_unified auto-detects if keys exist)\n",
58
+ "# Examples:\n",
59
+ "# os.environ[\"AI_PROVIDER\"] = \"hf\"\n",
60
+ "# os.environ[\"HF_API_KEY\"] = \"hf_XXXXXXXX...\" # if using Hugging Face\n",
61
+ "# os.environ[\"MICROSOFT_AI_SERVICE_ENDPOINT\"] = \"https://<name>.cognitiveservices.azure.com/\"\n",
62
+ "# os.environ[\"MICROSOFT_AI_API_KEY\"] = \"<your-azure-key>\"\n",
63
+ "\n",
64
+ "BACKEND_URL\n"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "markdown",
69
+ "id": "bde64f5a-dd29-414e-9116-498ee972e759",
70
+ "metadata": {},
71
+ "source": [
72
+ "# 3) Helper functions (API + Library paths)"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "code",
77
+ "execution_count": 3,
78
+ "id": "8d50f328-567b-454f-8090-87c045674338",
79
+ "metadata": {},
80
+ "outputs": [],
81
+ "source": [
82
+ "import os\n",
83
+ "import json\n",
84
+ "import requests\n",
85
+ "from typing import Dict, Any\n",
86
+ "\n",
87
+ "# Default backend URL\n",
88
+ "BACKEND_URL = os.environ.get(\"BACKEND_URL\", \"http://127.0.0.1:8000\")\n",
89
+ "\n",
90
+ "def send_via_api(message: str, url: str = BACKEND_URL) -> Dict[str, Any]:\n",
91
+ " \"\"\"POST to FastAPI /chatbot/message. Returns dict with reply/sentiment/confidence.\"\"\"\n",
92
+ " u = url.rstrip(\"/\") + \"/chatbot/message\"\n",
93
+ " r = requests.post(u, json={\"message\": message}, timeout=20)\n",
94
+ " r.raise_for_status()\n",
95
+ " return r.json()\n",
96
+ "\n",
97
+ "def send_via_library(message: str) -> Dict[str, Any]:\n",
98
+ " \"\"\"Call ChatBot() directly inside this kernel.\"\"\"\n",
99
+ " from agenticcore.chatbot.services import ChatBot\n",
100
+ " return ChatBot().reply(message)\n",
101
+ "\n",
102
+ "def health(url: str = BACKEND_URL) -> Dict[str, Any]:\n",
103
+ " r = requests.get(url.rstrip(\"/\") + \"/health\", timeout=10)\n",
104
+ " r.raise_for_status()\n",
105
+ " return r.json()\n"
106
+ ]
107
+ },
108
+ {
109
+ "cell_type": "markdown",
110
+ "id": "f247c509-abd4-44de-9b49-20402d54a296",
111
+ "metadata": {},
112
+ "source": [
113
+ "# 4) Minimal UI (ipywidgets)"
114
+ ]
115
+ },
116
+ {
117
+ "cell_type": "code",
118
+ "execution_count": 4,
119
+ "id": "3f1258d0-5616-4c5a-b19e-e4ffa040e9cb",
120
+ "metadata": {},
121
+ "outputs": [
122
+ {
123
+ "data": {
124
+ "application/vnd.jupyter.widget-view+json": {
125
+ "model_id": "d5e531363757461cbb9225d4afdd5ea9",
126
+ "version_major": 2,
127
+ "version_minor": 0
128
+ },
129
+ "text/plain": [
130
+ "HBox(children=(ToggleButtons(description='Route:', options=(('API', 'api'), ('Library', 'lib')), value='api'),…"
131
+ ]
132
+ },
133
+ "metadata": {},
134
+ "output_type": "display_data"
135
+ },
136
+ {
137
+ "data": {
138
+ "application/vnd.jupyter.widget-view+json": {
139
+ "model_id": "b7e695b92e094e3094099e4f54c5852d",
140
+ "version_major": 2,
141
+ "version_minor": 0
142
+ },
143
+ "text/plain": [
144
+ "HBox(children=(Text(value='', description='You:', layout=Layout(width='60%'), placeholder='Type a message…'), …"
145
+ ]
146
+ },
147
+ "metadata": {},
148
+ "output_type": "display_data"
149
+ },
150
+ {
151
+ "data": {
152
+ "application/vnd.jupyter.widget-view+json": {
153
+ "model_id": "4d1b73dbb842416bb085ba3237e6c69c",
154
+ "version_major": 2,
155
+ "version_minor": 0
156
+ },
157
+ "text/plain": [
158
+ "Output()"
159
+ ]
160
+ },
161
+ "metadata": {},
162
+ "output_type": "display_data"
163
+ },
164
+ {
165
+ "data": {
166
+ "text/html": [
167
+ "\n",
168
+ "<div style=\"margin-top:8px;opacity:.8\">\n",
169
+ " Tip: API path requires your FastAPI server running at /chatbot/message.\n",
170
+ " Switch to <b>Library</b> mode for offline tests.\n",
171
+ "</div>\n"
172
+ ],
173
+ "text/plain": [
174
+ "<IPython.core.display.HTML object>"
175
+ ]
176
+ },
177
+ "metadata": {},
178
+ "output_type": "display_data"
179
+ }
180
+ ],
181
+ "source": [
182
+ "import ipywidgets as W\n",
183
+ "from IPython.display import display, HTML, clear_output\n",
184
+ "\n",
185
+ "mode = W.ToggleButtons(\n",
186
+ " options=[(\"API\", \"api\"), (\"Library\", \"lib\")],\n",
187
+ " value=\"api\",\n",
188
+ " description=\"Route:\",\n",
189
+ ")\n",
190
+ "backend = W.Text(value=BACKEND_URL, placeholder=\"http://127.0.0.1:8000\", description=\"Backend:\", layout=W.Layout(width=\"60%\"))\n",
191
+ "save_btn = W.Button(description=\"Save\", button_style=\"info\")\n",
192
+ "msg = W.Text(placeholder=\"Type a message…\", description=\"You:\", layout=W.Layout(width=\"60%\"))\n",
193
+ "send_btn = W.Button(description=\"Send\", button_style=\"primary\")\n",
194
+ "cap_btn = W.Button(description=\"Capabilities\", tooltip=\"Show ChatBot capabilities\")\n",
195
+ "out = W.Output()\n",
196
+ "\n",
197
+ "def on_save(_):\n",
198
+ " os.environ[\"BACKEND_URL\"] = backend.value.strip()\n",
199
+ " with out:\n",
200
+ " print(f\"[config] BACKEND_URL = {os.environ['BACKEND_URL']}\")\n",
201
+ "\n",
202
+ "def on_send(_):\n",
203
+ " text = msg.value.strip()\n",
204
+ " if not text:\n",
205
+ " with out:\n",
206
+ " print(\"[warn] Please enter some text.\")\n",
207
+ " return\n",
208
+ " try:\n",
209
+ " if mode.value == \"api\":\n",
210
+ " data = send_via_api(text, backend.value.strip())\n",
211
+ " else:\n",
212
+ " data = send_via_library(text)\n",
213
+ " with out:\n",
214
+ " print(json.dumps(data, indent=2, ensure_ascii=False))\n",
215
+ " except Exception as e:\n",
216
+ " with out:\n",
217
+ " print(f\"[error] {e}\")\n",
218
+ "\n",
219
+ "def on_caps(_):\n",
220
+ " try:\n",
221
+ " # Prefer library capabilities; keeps working even if API is down\n",
222
+ " from agenticcore.chatbot.services import ChatBot\n",
223
+ " data = ChatBot().capabilities()\n",
224
+ " with out:\n",
225
+ " print(json.dumps({\"capabilities\": data}, indent=2))\n",
226
+ " except Exception as e:\n",
227
+ " with out:\n",
228
+ " print(f\"[error capabilities] {e}\")\n",
229
+ "\n",
230
+ "save_btn.on_click(on_save)\n",
231
+ "send_btn.on_click(on_send)\n",
232
+ "cap_btn.on_click(on_caps)\n",
233
+ "\n",
234
+ "display(W.HBox([mode, backend, save_btn]))\n",
235
+ "display(W.HBox([msg, send_btn, cap_btn]))\n",
236
+ "display(out)\n",
237
+ "\n",
238
+ "# Optional visual hint\n",
239
+ "display(HTML(\"\"\"\n",
240
+ "<div style=\"margin-top:8px;opacity:.8\">\n",
241
+ " Tip: API path requires your FastAPI server running at /chatbot/message.\n",
242
+ " Switch to <b>Library</b> mode for offline tests.\n",
243
+ "</div>\n",
244
+ "\"\"\"))\n"
245
+ ]
246
+ },
247
+ {
248
+ "cell_type": "markdown",
249
+ "id": "7aaf5b2a-2a30-42a5-ae77-d851c62feccb",
250
+ "metadata": {},
251
+ "source": [
252
+ "# 5) Batch test cell (multi-prompt, tabular)"
253
+ ]
254
+ },
255
+ {
256
+ "cell_type": "code",
257
+ "execution_count": 5,
258
+ "id": "f1d5da55-08ff-4433-aea9-a3ccde34de5c",
259
+ "metadata": {},
260
+ "outputs": [
261
+ {
262
+ "data": {
263
+ "text/html": [
264
+ "<div>\n",
265
+ "<style scoped>\n",
266
+ " .dataframe tbody tr th:only-of-type {\n",
267
+ " vertical-align: middle;\n",
268
+ " }\n",
269
+ "\n",
270
+ " .dataframe tbody tr th {\n",
271
+ " vertical-align: top;\n",
272
+ " }\n",
273
+ "\n",
274
+ " .dataframe thead th {\n",
275
+ " text-align: right;\n",
276
+ " }\n",
277
+ "</style>\n",
278
+ "<table border=\"1\" class=\"dataframe\">\n",
279
+ " <thead>\n",
280
+ " <tr style=\"text-align: right;\">\n",
281
+ " <th></th>\n",
282
+ " <th>message</th>\n",
283
+ " <th>reply</th>\n",
284
+ " <th>sentiment</th>\n",
285
+ " <th>confidence</th>\n",
286
+ " </tr>\n",
287
+ " </thead>\n",
288
+ " <tbody>\n",
289
+ " <tr>\n",
290
+ " <th>0</th>\n",
291
+ " <td>I absolutely love this project!</td>\n",
292
+ " <td>(error) 404 Client Error: Not Found for url: h...</td>\n",
293
+ " <td>None</td>\n",
294
+ " <td>None</td>\n",
295
+ " </tr>\n",
296
+ " <tr>\n",
297
+ " <th>1</th>\n",
298
+ " <td>This is awful and broken.</td>\n",
299
+ " <td>(error) 404 Client Error: Not Found for url: h...</td>\n",
300
+ " <td>None</td>\n",
301
+ " <td>None</td>\n",
302
+ " </tr>\n",
303
+ " <tr>\n",
304
+ " <th>2</th>\n",
305
+ " <td>Can you list your capabilities?</td>\n",
306
+ " <td>(error) 404 Client Error: Not Found for url: h...</td>\n",
307
+ " <td>None</td>\n",
308
+ " <td>None</td>\n",
309
+ " </tr>\n",
310
+ " <tr>\n",
311
+ " <th>3</th>\n",
312
+ " <td></td>\n",
313
+ " <td>(error) 404 Client Error: Not Found for url: h...</td>\n",
314
+ " <td>None</td>\n",
315
+ " <td>None</td>\n",
316
+ " </tr>\n",
317
+ " </tbody>\n",
318
+ "</table>\n",
319
+ "</div>"
320
+ ],
321
+ "text/plain": [
322
+ " message \\\n",
323
+ "0 I absolutely love this project! \n",
324
+ "1 This is awful and broken. \n",
325
+ "2 Can you list your capabilities? \n",
326
+ "3 \n",
327
+ "\n",
328
+ " reply sentiment confidence \n",
329
+ "0 (error) 404 Client Error: Not Found for url: h... None None \n",
330
+ "1 (error) 404 Client Error: Not Found for url: h... None None \n",
331
+ "2 (error) 404 Client Error: Not Found for url: h... None None \n",
332
+ "3 (error) 404 Client Error: Not Found for url: h... None None "
333
+ ]
334
+ },
335
+ "execution_count": 5,
336
+ "metadata": {},
337
+ "output_type": "execute_result"
338
+ }
339
+ ],
340
+ "source": [
341
+ "import pandas as pd\n",
342
+ "\n",
343
+ "tests = [\n",
344
+ " \"I absolutely love this project!\",\n",
345
+ " \"This is awful and broken.\",\n",
346
+ " \"Can you list your capabilities?\",\n",
347
+ " \"\", # malformed/empty\n",
348
+ "]\n",
349
+ "\n",
350
+ "rows = []\n",
351
+ "for t in tests:\n",
352
+ " try:\n",
353
+ " data = send_via_api(t, backend.value.strip()) if mode.value == \"api\" else send_via_library(t)\n",
354
+ " rows.append({\"message\": t, **data})\n",
355
+ " except Exception as e:\n",
356
+ " rows.append({\"message\": t, \"reply\": f\"(error) {e}\", \"sentiment\": None, \"confidence\": None})\n",
357
+ "\n",
358
+ "df = pd.DataFrame(rows)\n",
359
+ "df\n"
360
+ ]
361
+ },
362
+ {
363
+ "cell_type": "markdown",
364
+ "id": "bc6096a4-ea15-4908-9edf-e80d2c89c4a6",
365
+ "metadata": {},
366
+ "source": [
367
+ "# 6) Health check + quick assertions"
368
+ ]
369
+ },
370
+ {
371
+ "cell_type": "code",
372
+ "execution_count": 11,
373
+ "id": "a3b9632b-a7eb-4c4b-94ed-add3bd5a88c6",
374
+ "metadata": {},
375
+ "outputs": [
376
+ {
377
+ "name": "stdout",
378
+ "output_type": "stream",
379
+ "text": [
380
+ "Health: {'ok': True, 'version': '0.3.0', 'time': 1757798428}\n",
381
+ "Library OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n",
382
+ "API OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5, 'thread': None}\n"
383
+ ]
384
+ }
385
+ ],
386
+ "source": [
387
+ "try:\n",
388
+ " print(\"Health:\", health(backend.value.strip()))\n",
389
+ "except Exception as e:\n",
390
+ " print(\"Health check failed:\", e)\n",
391
+ "\n",
392
+ "# Simple acceptance checks\n",
393
+ "sample = send_via_library(\"hello\")\n",
394
+ "assert all(k in sample for k in (\"reply\", \"sentiment\", \"confidence\"))\n",
395
+ "print(\"Library OK:\", sample)\n",
396
+ "\n",
397
+ "sample_api = send_via_api(\"hello from api\", backend.value.strip())\n",
398
+ "assert all(k in sample_api for k in (\"reply\", \"sentiment\", \"confidence\"))\n",
399
+ "print(\"API OK:\", sample_api)\n"
400
+ ]
401
+ },
402
+ {
403
+ "cell_type": "code",
404
+ "execution_count": null,
405
+ "id": "21b5e668-3b60-4d3d-bef2-fae238ebf91a",
406
+ "metadata": {},
407
+ "outputs": [],
408
+ "source": []
409
+ },
410
+ {
411
+ "cell_type": "code",
412
+ "execution_count": 12,
413
+ "id": "1fd68863-30cd-4790-a77d-e637acfb9fd0",
414
+ "metadata": {},
415
+ "outputs": [
416
+ {
417
+ "name": "stdout",
418
+ "output_type": "stream",
419
+ "text": [
420
+ "[\n",
421
+ " \"/health\",\n",
422
+ " \"/status\",\n",
423
+ " \"/chatbot/message\",\n",
424
+ " \"/ui\"\n",
425
+ "]\n"
426
+ ]
427
+ }
428
+ ],
429
+ "source": [
430
+ "import requests, os, json\n",
431
+ "BACKEND_URL = os.environ.get(\"BACKEND_URL\", \"http://127.0.0.1:8000\")\n",
432
+ "routes = requests.get(BACKEND_URL.rstrip(\"/\") + \"/openapi.json\", timeout=10).json()[\"paths\"]\n",
433
+ "print(json.dumps(list(routes.keys())[:20], indent=2))\n"
434
+ ]
435
+ },
436
+ {
437
+ "cell_type": "code",
438
+ "execution_count": 13,
439
+ "id": "0595d521-f8da-46ed-aafa-bc84fd519c08",
440
+ "metadata": {},
441
+ "outputs": [
442
+ {
443
+ "data": {
444
+ "text/plain": [
445
+ "{'reply': 'Noted. The sentiment appears neutral.',\n",
446
+ " 'sentiment': 'neutral',\n",
447
+ " 'confidence': 0.5,\n",
448
+ " 'thread': None}"
449
+ ]
450
+ },
451
+ "execution_count": 13,
452
+ "metadata": {},
453
+ "output_type": "execute_result"
454
+ }
455
+ ],
456
+ "source": [
457
+ "send_via_api(\"hello from api\", BACKEND_URL.strip())\n"
458
+ ]
459
+ },
460
+ {
461
+ "cell_type": "code",
462
+ "execution_count": 14,
463
+ "id": "7e44f300-e314-466b-9a9e-b2817f6b3aaa",
464
+ "metadata": {},
465
+ "outputs": [
466
+ {
467
+ "name": "stdout",
468
+ "output_type": "stream",
469
+ "text": [
470
+ "Health: {'ok': True, 'version': '0.3.0', 'time': 1757798440}\n",
471
+ "Library OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n",
472
+ "API OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5, 'thread': None}\n"
473
+ ]
474
+ }
475
+ ],
476
+ "source": [
477
+ "print(\"Health:\", health(BACKEND_URL))\n",
478
+ "sample = send_via_library(\"hello\")\n",
479
+ "print(\"Library OK:\", sample)\n",
480
+ "\n",
481
+ "sample_api = send_via_api(\"hello from api\", BACKEND_URL)\n",
482
+ "print(\"API OK:\", sample_api)\n"
483
+ ]
484
+ },
485
+ {
486
+ "cell_type": "code",
487
+ "execution_count": 16,
488
+ "id": "190a5e9b-a0b6-47a9-8201-51788715ac12",
489
+ "metadata": {},
490
+ "outputs": [
491
+ {
492
+ "ename": "SyntaxError",
493
+ "evalue": "invalid syntax (3247471142.py, line 2)",
494
+ "output_type": "error",
495
+ "traceback": [
496
+ "\u001b[1;36m Cell \u001b[1;32mIn[16], line 2\u001b[1;36m\u001b[0m\n\u001b[1;33m uvicorn backend.app.main:app --reload --port 8077 --app-dir .\u001b[0m\n\u001b[1;37m ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m invalid syntax\n"
497
+ ]
498
+ }
499
+ ],
500
+ "source": [
501
+ "# Pick a clean port to avoid collisions (e.g., 8077)\n",
502
+ "uvicorn backend.app.main:app --reload --port 8077 --app-dir .\n"
503
+ ]
504
+ },
505
+ {
506
+ "cell_type": "code",
507
+ "execution_count": null,
508
+ "id": "efef3da8-8f93-483c-a623-ea8e48c604c8",
509
+ "metadata": {},
510
+ "outputs": [],
511
+ "source": []
512
+ },
513
+ {
514
+ "cell_type": "code",
515
+ "execution_count": null,
516
+ "id": "99187776-e8c6-4c4c-80d6-6d87c299c96b",
517
+ "metadata": {},
518
+ "outputs": [],
519
+ "source": []
520
+ },
521
+ {
522
+ "cell_type": "code",
523
+ "execution_count": null,
524
+ "id": "c32a0f8c-a533-4bd0-abd3-88b5fd993305",
525
+ "metadata": {},
526
+ "outputs": [],
527
+ "source": []
528
+ },
529
+ {
530
+ "cell_type": "code",
531
+ "execution_count": null,
532
+ "id": "2e8a5292-7458-474f-b599-6b2192c23b37",
533
+ "metadata": {},
534
+ "outputs": [],
535
+ "source": []
536
+ }
537
+ ],
538
+ "metadata": {
539
+ "kernelspec": {
540
+ "display_name": "Python (stock_ai)",
541
+ "language": "python",
542
+ "name": "stock_ai"
543
+ },
544
+ "language_info": {
545
+ "codemirror_mode": {
546
+ "name": "ipython",
547
+ "version": 3
548
+ },
549
+ "file_extension": ".py",
550
+ "mimetype": "text/x-python",
551
+ "name": "python",
552
+ "nbconvert_exporter": "python",
553
+ "pygments_lexer": "ipython3",
554
+ "version": "3.9.20"
555
+ }
556
+ },
557
+ "nbformat": 4,
558
+ "nbformat_minor": 5
559
+ }
notebooks/SimpleTraditionalChatbot.ipynb ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "0c68164d-7eea-473c-9722-8bc92564fa6f",
6
+ "metadata": {},
7
+ "source": [
8
+ "# **Jupyter notebook front-end (drop-in cells)**\n"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "markdown",
13
+ "id": "298d485d-5391-47f5-9844-b5f5945324fd",
14
+ "metadata": {},
15
+ "source": [
16
+ "**Smoke tests (copy/paste)**\n",
17
+ "\n",
18
+ "**Run these whenever something feels off.**\n",
19
+ "\n",
20
+ "**A. Confirm the router is mounted**"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": 20,
26
+ "id": "bd2a2e6d-8b6e-4630-9aeb-d6c0cbd1b78d",
27
+ "metadata": {
28
+ "jupyter": {
29
+ "source_hidden": true
30
+ }
31
+ },
32
+ "outputs": [
33
+ {
34
+ "name": "stdout",
35
+ "output_type": "stream",
36
+ "text": [
37
+ "[ui] serving SPA from: C:\\Users\\User\\PortaeOS-skeleton\\packages\\shell\\dist\n"
38
+ ]
39
+ },
40
+ {
41
+ "data": {
42
+ "text/plain": [
43
+ "[('/openapi.json', {'GET', 'HEAD'}),\n",
44
+ " ('/docs', {'GET', 'HEAD'}),\n",
45
+ " ('/docs/oauth2-redirect', {'GET', 'HEAD'}),\n",
46
+ " ('/redoc', {'GET', 'HEAD'}),\n",
47
+ " ('/health', {'GET'}),\n",
48
+ " ('/status', {'GET'}),\n",
49
+ " ('/services', {'GET'}),\n",
50
+ " ('/services/{name}/start', {'POST'}),\n",
51
+ " ('/services/{name}/stop', {'POST'}),\n",
52
+ " ('/services/{name}/restart', {'POST'}),\n",
53
+ " ('/logs/{service}/tail', {'GET'}),\n",
54
+ " ('/logs/bundle.zip', {'GET'}),\n",
55
+ " ('/favorites', {'GET'}),\n",
56
+ " ('/macros/open', {'POST'}),\n",
57
+ " ('/license/validate', {'POST'}),\n",
58
+ " ('/agents/run', {'POST'}),\n",
59
+ " ('/terminals', None),\n",
60
+ " ('/storefront/postman-collection', {'GET'}),\n",
61
+ " ('/office/status', {'GET'}),\n",
62
+ " ('/office/run-macro', {'POST'}),\n",
63
+ " ('/ui', None),\n",
64
+ " ('/', {'GET'}),\n",
65
+ " ('/ui/{_:path}', {'GET'}),\n",
66
+ " ('/ai/ping', {'GET'}),\n",
67
+ " ('/ai/health', {'GET'}),\n",
68
+ " ('/ai/agents/dispatch', {'POST'}),\n",
69
+ " ('/ai/ingest', {'POST'}),\n",
70
+ " ('/ai/search', {'GET'}),\n",
71
+ " ('/ai/chat', {'POST'}),\n",
72
+ " ('/recode/health', {'GET'}),\n",
73
+ " ('/terminals/{tid}', None),\n",
74
+ " ('/terminals/spawn/{tid}', {'GET'}),\n",
75
+ " ('/terminals/kill/{tid}', {'GET'}),\n",
76
+ " ('/terminals/{tid}/resize', {'POST'})]"
77
+ ]
78
+ },
79
+ "execution_count": 20,
80
+ "metadata": {},
81
+ "output_type": "execute_result"
82
+ }
83
+ ],
84
+ "source": [
85
+ "import os\n",
86
+ "os.chdir(r\"C:\\Users\\User\\PortaeOS-skeleton\\packages\\agenticcore\") # <-- adjust to your repo root\n",
87
+ "\n",
88
+ "# Python one-liner in the same env where the server runs\n",
89
+ "import sys; sys.path.insert(0,'.')\n",
90
+ "import backend.app.main as m\n",
91
+ "[(getattr(r,'path',None), getattr(r,'methods',None)) for r in m.app.routes]\n",
92
+ "\n",
93
+ "# Expect to see ('/chatbot/message', {'POST'}) in the list\n"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "markdown",
98
+ "id": "2705c51f-c966-4183-a2c6-a54534e035ae",
99
+ "metadata": {},
100
+ "source": [
101
+ "(You already did a version of this and saw /chatbot/message appear — perfect.)\n",
102
+ "\n",
103
+ "**B. Health → Chat via API**"
104
+ ]
105
+ },
106
+ {
107
+ "cell_type": "code",
108
+ "execution_count": 6,
109
+ "id": "0e622f18-d769-4003-a915-41511c5240e1",
110
+ "metadata": {},
111
+ "outputs": [
112
+ {
113
+ "name": "stdout",
114
+ "output_type": "stream",
115
+ "text": [
116
+ "Health: {'ok': True, 'version': '0.3.0', 'time': 1757812216}\n",
117
+ "Reply: 200 {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5, 'thread': None}\n"
118
+ ]
119
+ }
120
+ ],
121
+ "source": [
122
+ "import requests, json, os\n",
123
+ "BASE = os.environ.get(\"BACKEND_URL\",\"http://127.0.0.1:8000\").rstrip(\"/\")\n",
124
+ "print(\"Health:\", requests.get(BASE+\"/health\").json())\n",
125
+ "r = requests.post(BASE+\"/chatbot/message\", json={\"message\":\"hello via api\"})\n",
126
+ "print(\"Reply:\", r.status_code, r.json())\n"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "markdown",
131
+ "id": "02aeef6d-dd70-476d-83a2-48e7732eba78",
132
+ "metadata": {},
133
+ "source": [
134
+ "**C. Library path (no server required)**"
135
+ ]
136
+ },
137
+ {
138
+ "cell_type": "code",
139
+ "execution_count": 7,
140
+ "id": "11a88e5d-9ae6-4d44-a1b4-3c89716dba28",
141
+ "metadata": {},
142
+ "outputs": [
143
+ {
144
+ "name": "stdout",
145
+ "output_type": "stream",
146
+ "text": [
147
+ "{'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n"
148
+ ]
149
+ }
150
+ ],
151
+ "source": [
152
+ "from agenticcore.chatbot.services import ChatBot\n",
153
+ "print(ChatBot().reply(\"hello via library\"))\n"
154
+ ]
155
+ },
156
+ {
157
+ "cell_type": "markdown",
158
+ "id": "89fd0fdf-c799-4e4d-8c63-c430f9d8f3b3",
159
+ "metadata": {},
160
+ "source": [
161
+ "# **Minimal notebook “front-end” cells (drop into top of your .ipynb)**\n",
162
+ "\n",
163
+ "These mirror your working UI and give you pass/fail signals inside the notebook."
164
+ ]
165
+ },
166
+ {
167
+ "cell_type": "code",
168
+ "execution_count": 8,
169
+ "id": "65acc3b5-ec39-4b53-9c1e-be6eee9928ac",
170
+ "metadata": {},
171
+ "outputs": [
172
+ {
173
+ "name": "stdout",
174
+ "output_type": "stream",
175
+ "text": [
176
+ "BACKEND_URL = http://127.0.0.1:8000\n",
177
+ "Health: {'ok': True, 'version': '0.3.0', 'time': 1757812221}\n"
178
+ ]
179
+ }
180
+ ],
181
+ "source": [
182
+ "# Cell 1: config + helpers\n",
183
+ "import os, json, requests\n",
184
+ "BACKEND_URL = os.environ.get(\"BACKEND_URL\", \"http://127.0.0.1:8000\").rstrip(\"/\")\n",
185
+ "\n",
186
+ "def health(url: str = BACKEND_URL): \n",
187
+ " r = requests.get(url + \"/health\", timeout=10); r.raise_for_status(); return r.json()\n",
188
+ "\n",
189
+ "def send_via_api(message: str, url: str = BACKEND_URL):\n",
190
+ " r = requests.post(url + \"/chatbot/message\", json={\"message\": message}, timeout=20)\n",
191
+ " r.raise_for_status(); return r.json()\n",
192
+ "\n",
193
+ "def send_via_library(message: str):\n",
194
+ " from agenticcore.chatbot.services import ChatBot\n",
195
+ " return ChatBot().reply(message)\n",
196
+ "\n",
197
+ "print(\"BACKEND_URL =\", BACKEND_URL)\n",
198
+ "print(\"Health:\", health())\n"
199
+ ]
200
+ },
201
+ {
202
+ "cell_type": "code",
203
+ "execution_count": 9,
204
+ "id": "b00f82ca-faea-43b3-b35a-03fdc0d1c2b3",
205
+ "metadata": {},
206
+ "outputs": [
207
+ {
208
+ "name": "stdout",
209
+ "output_type": "stream",
210
+ "text": [
211
+ "Library OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n",
212
+ "API OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5, 'thread': None}\n"
213
+ ]
214
+ }
215
+ ],
216
+ "source": [
217
+ "# Cell 2: quick acceptance checks\n",
218
+ "lib = send_via_library(\"hello\")\n",
219
+ "assert all(k in lib for k in (\"reply\",\"sentiment\",\"confidence\"))\n",
220
+ "print(\"Library OK:\", lib)\n",
221
+ "\n",
222
+ "api = send_via_api(\"hello from api\")\n",
223
+ "assert all(k in api for k in (\"reply\",\"sentiment\",\"confidence\"))\n",
224
+ "print(\"API OK:\", api)\n"
225
+ ]
226
+ },
227
+ {
228
+ "cell_type": "markdown",
229
+ "id": "7b4dcea8-0a06-4734-8780-f4180b4ceec8",
230
+ "metadata": {},
231
+ "source": [
232
+ "**Cell 1 — Config & helpers**"
233
+ ]
234
+ },
235
+ {
236
+ "cell_type": "code",
237
+ "execution_count": 10,
238
+ "id": "93d23e33-c19f-423f-9e89-b98295637075",
239
+ "metadata": {},
240
+ "outputs": [
241
+ {
242
+ "name": "stdout",
243
+ "output_type": "stream",
244
+ "text": [
245
+ "BACKEND_URL = http://127.0.0.1:8000\n"
246
+ ]
247
+ }
248
+ ],
249
+ "source": [
250
+ "# Notebook Config\n",
251
+ "import os, json, requests\n",
252
+ "from typing import Dict, Any\n",
253
+ "\n",
254
+ "BACKEND_URL = os.environ.get(\"BACKEND_URL\", \"http://127.0.0.1:8000\").rstrip(\"/\")\n",
255
+ "\n",
256
+ "def health(url: str = BACKEND_URL) -> Dict[str, Any]:\n",
257
+ " \"\"\"GET /health to verify server is up.\"\"\"\n",
258
+ " r = requests.get(url + \"/health\", timeout=10)\n",
259
+ " r.raise_for_status()\n",
260
+ " return r.json()\n",
261
+ "\n",
262
+ "def send_via_api(message: str, url: str = BACKEND_URL) -> Dict[str, Any]:\n",
263
+ " \"\"\"POST to FastAPI /chatbot/message. Returns reply/sentiment/confidence.\"\"\"\n",
264
+ " r = requests.post(url + \"/chatbot/message\", json={\"message\": message}, timeout=20)\n",
265
+ " r.raise_for_status()\n",
266
+ " return r.json()\n",
267
+ "\n",
268
+ "def send_via_library(message: str) -> Dict[str, Any]:\n",
269
+ " \"\"\"Call ChatBot() directly (no server needed).\"\"\"\n",
270
+ " from agenticcore.chatbot.services import ChatBot\n",
271
+ " return ChatBot().reply(message)\n",
272
+ "\n",
273
+ "print(\"BACKEND_URL =\", BACKEND_URL)\n"
274
+ ]
275
+ },
276
+ {
277
+ "cell_type": "markdown",
278
+ "id": "3239843e-75b6-4bb7-8e6c-e20eb160e0a6",
279
+ "metadata": {},
280
+ "source": [
281
+ "**Cell 2 — Widget UI (switch API / Library)**"
282
+ ]
283
+ },
284
+ {
285
+ "cell_type": "code",
286
+ "execution_count": 11,
287
+ "id": "f688b856-cb39-45ae-b5d3-ff20564893c0",
288
+ "metadata": {},
289
+ "outputs": [
290
+ {
291
+ "data": {
292
+ "application/vnd.jupyter.widget-view+json": {
293
+ "model_id": "160ab5a3d8a54eccbaf75eec203babae",
294
+ "version_major": 2,
295
+ "version_minor": 0
296
+ },
297
+ "text/plain": [
298
+ "HBox(children=(ToggleButtons(description='Route:', options=(('API', 'api'), ('Library', 'lib')), value='api'),…"
299
+ ]
300
+ },
301
+ "metadata": {},
302
+ "output_type": "display_data"
303
+ },
304
+ {
305
+ "data": {
306
+ "application/vnd.jupyter.widget-view+json": {
307
+ "model_id": "70a92ad1d523445781c212d302c44556",
308
+ "version_major": 2,
309
+ "version_minor": 0
310
+ },
311
+ "text/plain": [
312
+ "HBox(children=(Text(value='', description='You:', layout=Layout(width='60%'), placeholder='Type a message…'), …"
313
+ ]
314
+ },
315
+ "metadata": {},
316
+ "output_type": "display_data"
317
+ },
318
+ {
319
+ "data": {
320
+ "application/vnd.jupyter.widget-view+json": {
321
+ "model_id": "bc59056493eb4380a6559c0d88f373cb",
322
+ "version_major": 2,
323
+ "version_minor": 0
324
+ },
325
+ "text/plain": [
326
+ "Output()"
327
+ ]
328
+ },
329
+ "metadata": {},
330
+ "output_type": "display_data"
331
+ },
332
+ {
333
+ "data": {
334
+ "text/html": [
335
+ "<div style=\"margin-top:8px;opacity:.8\">Tip: ensure FastAPI exposes <code>/chatbot/message</code>. Switch to Library for offline tests.</div>"
336
+ ],
337
+ "text/plain": [
338
+ "<IPython.core.display.HTML object>"
339
+ ]
340
+ },
341
+ "metadata": {},
342
+ "output_type": "display_data"
343
+ }
344
+ ],
345
+ "source": [
346
+ "import ipywidgets as W\n",
347
+ "from IPython.display import display, HTML\n",
348
+ "\n",
349
+ "mode = W.ToggleButtons(options=[(\"API\", \"api\"), (\"Library\", \"lib\")], value=\"api\", description=\"Route:\")\n",
350
+ "backend = W.Text(value=BACKEND_URL, description=\"Backend:\", layout=W.Layout(width=\"60%\"))\n",
351
+ "save_btn = W.Button(description=\"Save\", button_style=\"info\")\n",
352
+ "msg = W.Text(placeholder=\"Type a message…\", description=\"You:\", layout=W.Layout(width=\"60%\"))\n",
353
+ "send_btn = W.Button(description=\"Send\", button_style=\"primary\")\n",
354
+ "cap_btn = W.Button(description=\"Capabilities\")\n",
355
+ "out = W.Output()\n",
356
+ "\n",
357
+ "def on_save(_):\n",
358
+ " os.environ[\"BACKEND_URL\"] = backend.value.strip().rstrip(\"/\")\n",
359
+ " with out: print(\"[config] BACKEND_URL =\", os.environ[\"BACKEND_URL\"])\n",
360
+ "\n",
361
+ "def on_send(_):\n",
362
+ " text = msg.value.strip()\n",
363
+ " if not text:\n",
364
+ " with out: print(\"[warn] Please enter some text.\")\n",
365
+ " return\n",
366
+ " try:\n",
367
+ " data = send_via_api(text, backend.value.strip()) if mode.value == \"api\" else send_via_library(text)\n",
368
+ " with out: print(json.dumps(data, indent=2, ensure_ascii=False))\n",
369
+ " except Exception as e:\n",
370
+ " with out: print(f\"[error] {e}\")\n",
371
+ "\n",
372
+ "def on_caps(_):\n",
373
+ " try:\n",
374
+ " from agenticcore.chatbot.services import ChatBot\n",
375
+ " with out: print(json.dumps({\"capabilities\": ChatBot().capabilities()}, indent=2))\n",
376
+ " except Exception as e:\n",
377
+ " with out: print(f\"[error capabilities] {e}\")\n",
378
+ "\n",
379
+ "save_btn.on_click(on_save); send_btn.on_click(on_send); cap_btn.on_click(on_caps)\n",
380
+ "\n",
381
+ "display(W.HBox([mode, backend, save_btn]))\n",
382
+ "display(W.HBox([msg, send_btn, cap_btn]))\n",
383
+ "display(out)\n",
384
+ "display(HTML('<div style=\"margin-top:8px;opacity:.8\">Tip: ensure FastAPI exposes <code>/chatbot/message</code>. Switch to Library for offline tests.</div>'))\n"
385
+ ]
386
+ },
387
+ {
388
+ "cell_type": "markdown",
389
+ "id": "85999cc1-26e2-497d-95c1-ffb8c9210240",
390
+ "metadata": {},
391
+ "source": [
392
+ "**Cell 3 — Smoke checks (acceptance)**"
393
+ ]
394
+ },
395
+ {
396
+ "cell_type": "code",
397
+ "execution_count": 12,
398
+ "id": "3fc28db0-5aa6-4813-9663-f19d6937e39b",
399
+ "metadata": {},
400
+ "outputs": [
401
+ {
402
+ "name": "stdout",
403
+ "output_type": "stream",
404
+ "text": [
405
+ "Health: {'ok': True, 'version': '0.3.0', 'time': 1757812228}\n",
406
+ "Library OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n",
407
+ "API OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5, 'thread': None}\n"
408
+ ]
409
+ }
410
+ ],
411
+ "source": [
412
+ "# Backend health (if running)\n",
413
+ "try:\n",
414
+ " print(\"Health:\", health(backend.value.strip()))\n",
415
+ "except Exception as e:\n",
416
+ " print(\"Health check failed:\", e)\n",
417
+ "\n",
418
+ "# Library path always available\n",
419
+ "sample = send_via_library(\"hello\")\n",
420
+ "assert all(k in sample for k in (\"reply\", \"sentiment\", \"confidence\"))\n",
421
+ "print(\"Library OK:\", sample)\n",
422
+ "\n",
423
+ "# API path (requires uvicorn backend running)\n",
424
+ "try:\n",
425
+ " sample_api = send_via_api(\"hello from api\", backend.value.strip())\n",
426
+ " assert all(k in sample_api for k in (\"reply\", \"sentiment\", \"confidence\"))\n",
427
+ " print(\"API OK:\", sample_api)\n",
428
+ "except Exception as e:\n",
429
+ " print(\"API test failed (start uvicorn?):\", e)\n"
430
+ ]
431
+ },
432
+ {
433
+ "cell_type": "markdown",
434
+ "id": "caeb5108-f0fb-4d5e-ad2a-56011904e397",
435
+ "metadata": {},
436
+ "source": [
437
+ "**Cell 4 — Minimal report cell (optional screenshots prompt)**"
438
+ ]
439
+ },
440
+ {
441
+ "cell_type": "code",
442
+ "execution_count": 13,
443
+ "id": "8a3d85fc-6c2d-4c99-a286-4e9af2085d64",
444
+ "metadata": {},
445
+ "outputs": [
446
+ {
447
+ "data": {
448
+ "text/markdown": [
449
+ "\n",
450
+ "### What to capture for the report\n",
451
+ "- Screenshot of **/health** and a successful **/chatbot/message** call.\n",
452
+ "- Notebook output using **API** mode and **Library** mode.\n",
453
+ "- Short note: environment variables used (e.g., `MICROSOFT_AI_*`, `AI_PROVIDER`, `HF_API_KEY`).\n",
454
+ "- Brief discussion of any errors and fixes (e.g., route mounting, ports).\n"
455
+ ],
456
+ "text/plain": [
457
+ "<IPython.core.display.Markdown object>"
458
+ ]
459
+ },
460
+ "execution_count": 13,
461
+ "metadata": {},
462
+ "output_type": "execute_result"
463
+ }
464
+ ],
465
+ "source": [
466
+ "from IPython.display import Markdown\n",
467
+ "Markdown(\"\"\"\n",
468
+ "### What to capture for the report\n",
469
+ "- Screenshot of **/health** and a successful **/chatbot/message** call.\n",
470
+ "- Notebook output using **API** mode and **Library** mode.\n",
471
+ "- Short note: environment variables used (e.g., `MICROSOFT_AI_*`, `AI_PROVIDER`, `HF_API_KEY`).\n",
472
+ "- Brief discussion of any errors and fixes (e.g., route mounting, ports).\n",
473
+ "\"\"\")\n"
474
+ ]
475
+ },
476
+ {
477
+ "cell_type": "code",
478
+ "execution_count": null,
479
+ "id": "0fcc4e15-82c2-49e8-86c5-86b80a5b691a",
480
+ "metadata": {},
481
+ "outputs": [],
482
+ "source": []
483
+ },
484
+ {
485
+ "cell_type": "code",
486
+ "execution_count": null,
487
+ "id": "c3e950a2-af8a-4a69-802b-7b2f81b9245c",
488
+ "metadata": {},
489
+ "outputs": [],
490
+ "source": []
491
+ },
492
+ {
493
+ "cell_type": "code",
494
+ "execution_count": null,
495
+ "id": "ccb42ea3-2d61-4451-9ee8-f8e0755ff89c",
496
+ "metadata": {},
497
+ "outputs": [],
498
+ "source": []
499
+ }
500
+ ],
501
+ "metadata": {
502
+ "kernelspec": {
503
+ "display_name": "Python (stock_ai)",
504
+ "language": "python",
505
+ "name": "stock_ai"
506
+ },
507
+ "language_info": {
508
+ "codemirror_mode": {
509
+ "name": "ipython",
510
+ "version": 3
511
+ },
512
+ "file_extension": ".py",
513
+ "mimetype": "text/x-python",
514
+ "name": "python",
515
+ "nbconvert_exporter": "python",
516
+ "pygments_lexer": "ipython3",
517
+ "version": "3.9.20"
518
+ }
519
+ },
520
+ "nbformat": 4,
521
+ "nbformat_minor": 5
522
+ }
pyproject.toml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # pyproject.toml
2
+ [tool.black]
3
+ line-length = 100
4
+ target-version = ["py310"]
5
+
6
+ [tool.isort]
7
+ profile = "black"
8
+
9
+ [tool.pytest.ini_options]
10
+ addopts = "-q"
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=4.0
2
+ transformers>=4.41.0
3
+ torch>=2.2.0
4
+ scikit-learn>=1.3.0
5
+ pandas>=2.1.0
6
+ numpy>=1.26.0
7
+ pytest>=7.4.0
8
+ # Optional Azure
9
+ azure-ai-textanalytics>=5.3.0
10
+ python-dotenv>=1.0
11
+ fastapi>=0.115.0
12
+ uvicorn[standard]>=0.30.0
13
+ # Optional for Bot Framework sample:
14
+ # aiohttp>=3.9
15
+ # botbuilder-core>=4.14
samples/service.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /samples/services.py
2
+ import os
3
+ from typing import Dict, Any
4
+
5
+ # Use the unified provider layer (HF, Azure, OpenAI, Cohere, DeepAI, or offline)
6
+ from packages.agenticcore.agenticcore.providers_unified import analyze_sentiment, generate_text
7
+
8
+
9
+ class ChatBot:
10
+ """
11
+ Thin façade over provider-agnostic functions.
12
+ - Provider selection is automatic unless AI_PROVIDER is set (hf|azure|openai|cohere|deepai|offline).
13
+ - Reply shape: {"reply": str, "sentiment": str, "confidence": float}
14
+ """
15
+
16
+ def __init__(self) -> None:
17
+ # Optional: pin a provider via env; otherwise providers_unified auto-detects.
18
+ self.provider = os.getenv("AI_PROVIDER") or "auto"
19
+
20
+ def reply(self, message: str) -> Dict[str, Any]:
21
+ msg = (message or "").strip()
22
+ if not msg:
23
+ return {"reply": "Please enter some text.", "sentiment": "unknown", "confidence": 0.0}
24
+
25
+ if msg.lower() in {"help", "/help"}:
26
+ return {
27
+ "reply": self._help_text(),
28
+ "capabilities": {
29
+ "system": "chatbot",
30
+ "mode": self.provider,
31
+ "features": ["text-input", "sentiment-analysis", "help"],
32
+ "commands": {"help": "Describe capabilities and usage."},
33
+ },
34
+ }
35
+
36
+ s = analyze_sentiment(msg) # -> {"provider","label","score",...}
37
+ label = str(s.get("label", "neutral"))
38
+ score = float(s.get("score", 0.5))
39
+
40
+ # Keep the same phrasing used elsewhere so surfaces are consistent.
41
+ reply = self._compose(label)
42
+ return {"reply": reply, "sentiment": label, "confidence": round(score, 2)}
43
+
44
+ @staticmethod
45
+ def _compose(label: str) -> str:
46
+ if label == "positive":
47
+ return "Thanks for sharing. I detected a positive sentiment."
48
+ if label == "negative":
49
+ return "I hear your concern. I detected a negative sentiment."
50
+ if label == "neutral":
51
+ return "Noted. The sentiment appears neutral."
52
+ if label == "mixed":
53
+ return "Your message has mixed signals. Can you clarify?"
54
+ return "I could not determine the sentiment. Please rephrase."
55
+
56
+ @staticmethod
57
+ def _help_text() -> str:
58
+ return "I analyze sentiment and respond concisely. Send any text or type 'help'."
scripts/check_compliance.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # /scripts/check_compliance.py
2
+
3
+ # Fails if disallowed deps appear (placeholder)
scripts/run_local.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # /scripts/run_local.sh
2
+ #!/usr/bin/env bash
3
+ set -euo pipefail
4
+ export PYTHONPATH=.
5
+ python -c "from storefront_chatbot.app.app import build; build().launch(server_name='0.0.0.0', server_port=7860)"