diff --git a/.env.sample b/.env.sample new file mode 100644 index 0000000000000000000000000000000000000000..1e7bc63420b19e37135f21bb8d59e2f1c3ce1e15 --- /dev/null +++ b/.env.sample @@ -0,0 +1,8 @@ +# .env.sample +# Feature toggles +AZURE_ENABLED=false +SENTIMENT_ENABLED=false +DB_URL=memory:// +# Azure (optional) +AZURE_TEXT_ANALYTICS_ENDPOINT= +AZURE_TEXT_ANALYTICS_KEY= diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..5f5636501fb59339863a6be7d0d116d21cb0bda8 --- /dev/null +++ b/Makefile @@ -0,0 +1,11 @@ +.PHONY: dev test run seed check +dev: + pip install -r requirements.txt +test: + pytest -q +run: + export PYTHONPATH=. && python -c "from storefront_chatbot.app.app import build; build().launch(server_name='0.0.0.0', server_port=7860)" +seed: + python storefront_chatbot/scripts/seed_data.py +check: + python storefront_chatbot/scripts/check_compliance.py diff --git a/README.md b/README.md index 8f50efc18b92576e78a288a2df3f5325628e6c22..4a04db3764e2b0d3b2493529d00e411cede37b0e 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,43 @@ + # Agentic-Chat-bot- -Agentic Chat-bot with RAG, Memory, and Privacy Considerations +Agentic Chat-bot with RAG, Memory, and Privacy Considerations. + +# Storefront Chatbot + +This repo follows a modular layout with a Gradio UI, NLU pipeline, anonymous and logged-in flows, +guardrails, and optional Azure sentiment. + +## Quickstart +```bash +make dev +make run +# open http://localhost:7860 +``` + +## Agentic Integration +- Core bot: `agenticcore/chatbot/services.py` +- Providers: `agenticcore/providers_unified.py` +- CLI: `python -m agenticcore.cli agentic "hello"` (loads .env) +- FastAPI demo: `uvicorn integrations.web.fastapi.web_agentic:app --reload` + +## Added Samples & Tests +- chat.html → `app/assets/html/chat.html` +- echo_bot.py → `integrations/botframework/bots/echo_bot.py` +- ChatbotIntegration.ipynb → `notebooks/ChatbotIntegration.ipynb` +- SimpleTraditionalChatbot.ipynb → `notebooks/SimpleTraditionalChatbot.ipynb` +- smoke_test.py → `tests/smoke_test.py` +- test_routes.py → `tests/test_routes.py` +- quick_sanity.py → `tools/quick_sanity.py` +- example.py → `examples/example.py` +- service.py → `samples/service.py` +- DEV_DOC.md → `docs/DEV_DOC.md` + +Run `pytest -q` for tests; open HTML in `app/assets/html/` to try local UIs. + + +--- +This is the **unified** storefront-chatbot bundle. +Duplicates from earlier skeletons were removed; priority order was: +1) storefront_chatbot_final_bundle +2) storefront_chatbot_merged_with_agentic +3) storefront_chatbot_skeleton diff --git a/agenticcore/__init__.py b/agenticcore/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5bb534f795ae0f566ba9f57c31944d8c6f45284c --- /dev/null +++ b/agenticcore/__init__.py @@ -0,0 +1 @@ +# package diff --git a/agenticcore/chatbot/__init__.py b/agenticcore/chatbot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5bb534f795ae0f566ba9f57c31944d8c6f45284c --- /dev/null +++ b/agenticcore/chatbot/__init__.py @@ -0,0 +1 @@ +# package diff --git a/agenticcore/chatbot/services.py b/agenticcore/chatbot/services.py new file mode 100644 index 0000000000000000000000000000000000000000..560ba85b5925c4ab7d139117f92f5a8e9e93a1ff --- /dev/null +++ b/agenticcore/chatbot/services.py @@ -0,0 +1,103 @@ +# /agenticcore/chatbot/services.py +from __future__ import annotations + +import json +import os +from dataclasses import dataclass +from typing import Dict + +# Delegate sentiment to the unified provider layer +# If you put providers_unified.py under agenticcore/chatbot/, change the import to: +# from agenticcore.chatbot.providers_unified import analyze_sentiment +from agenticcore.providers_unified import analyze_sentiment +from ..providers_unified import analyze_sentiment + + +def _trim(s: str, max_len: int = 2000) -> str: + s = (s or "").strip() + return s if len(s) <= max_len else s[: max_len - 1] + "…" + + +@dataclass(frozen=True) +class SentimentResult: + label: str # "positive" | "neutral" | "negative" | "mixed" | "unknown" + confidence: float # 0.0 .. 1.0 + + +class ChatBot: + """ + Minimal chatbot that uses provider-agnostic sentiment via providers_unified. + Public API: + - reply(text: str) -> Dict[str, object] + - capabilities() -> Dict[str, object] + """ + + def __init__(self, system_prompt: str = "You are a concise helper.") -> None: + self._system_prompt = _trim(system_prompt, 800) + # Expose which provider is intended/active (for diagnostics) + self._mode = os.getenv("AI_PROVIDER") or "auto" + + def capabilities(self) -> Dict[str, object]: + """List what this bot can do.""" + return { + "system": "chatbot", + "mode": self._mode, # "auto" or a pinned provider (hf/azure/openai/cohere/deepai/offline) + "features": ["text-input", "sentiment-analysis", "help"], + "commands": {"help": "Describe capabilities and usage."}, + } + + def reply(self, text: str) -> Dict[str, object]: + """Produce a reply and sentiment for one user message.""" + user = _trim(text) + if not user: + return self._make_response( + "I didn't catch that. Please provide some text.", + SentimentResult("unknown", 0.0), + ) + + if user.lower() in {"help", "/help"}: + return {"reply": self._format_help(), "capabilities": self.capabilities()} + + s = analyze_sentiment(user) # -> {"provider", "label", "score", ...} + sr = SentimentResult(label=str(s.get("label", "neutral")), confidence=float(s.get("score", 0.5))) + return self._make_response(self._compose(sr), sr) + + # ---- internals ---- + + def _format_help(self) -> str: + caps = self.capabilities() + feats = ", ".join(caps["features"]) + return f"I can analyze sentiment and respond concisely. Features: {feats}. Send any text or type 'help'." + + @staticmethod + def _make_response(reply: str, s: SentimentResult) -> Dict[str, object]: + return {"reply": reply, "sentiment": s.label, "confidence": round(float(s.confidence), 2)} + + @staticmethod + def _compose(s: SentimentResult) -> str: + if s.label == "positive": + return "Thanks for sharing. I detected a positive sentiment." + if s.label == "negative": + return "I hear your concern. I detected a negative sentiment." + if s.label == "neutral": + return "Noted. The sentiment appears neutral." + if s.label == "mixed": + return "Your message has mixed signals. Can you clarify?" + return "I could not determine the sentiment. Please rephrase." + + +# Optional: local REPL for quick manual testing +def _interactive_loop() -> None: + bot = ChatBot() + try: + while True: + msg = input("> ").strip() + if msg.lower() in {"exit", "quit"}: + break + print(json.dumps(bot.reply(msg), ensure_ascii=False)) + except (EOFError, KeyboardInterrupt): + pass + + +if __name__ == "__main__": + _interactive_loop() diff --git a/agenticcore/cli.py b/agenticcore/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..d12f6d052ebcac15ba121b6583449deb4926ee23 --- /dev/null +++ b/agenticcore/cli.py @@ -0,0 +1,187 @@ +# /agenticcore/cli.py +""" +agenticcore.cli +Console entrypoints: + - agentic: send a message to ChatBot and print reply JSON + - repo-tree: print a filtered tree view (uses tree.txt if present) + - repo-flatten: flatten code listing to stdout (uses FLATTENED_CODE.txt if present) +""" +import argparse, json, sys, traceback +from pathlib import Path +from dotenv import load_dotenv +import os + +# Load .env variables into os.environ (project root .env by default) +load_dotenv() + + +def cmd_agentic(argv=None): + # Lazy import so other commands don't require ChatBot to be importable + from agenticcore.chatbot.services import ChatBot + # We call analyze_sentiment only for 'status' to reveal the actual chosen provider + try: + from agenticcore.providers_unified import analyze_sentiment + except Exception: + analyze_sentiment = None # still fine; we'll show mode only + + p = argparse.ArgumentParser(prog="agentic", description="Chat with AgenticCore ChatBot") + p.add_argument("message", nargs="*", help="Message to send") + p.add_argument("--debug", action="store_true", help="Print debug info") + args = p.parse_args(argv) + msg = " ".join(args.message).strip() or "hello" + + if args.debug: + print(f"DEBUG argv={sys.argv}", flush=True) + print(f"DEBUG raw message='{msg}'", flush=True) + + bot = ChatBot() + + # Special commands for testing / assignments + # Special commands for testing / assignments + if msg.lower() == "status": + import requests # local import to avoid hard dep for other commands + + # Try a lightweight provider probe via analyze_sentiment + provider = None + if analyze_sentiment is not None: + try: + probe = analyze_sentiment("status ping") + provider = (probe or {}).get("provider") + except Exception: + if args.debug: + traceback.print_exc() + + # Hugging Face whoami auth probe + tok = os.getenv("HF_API_KEY", "") + who = None + auth_ok = False + err = None + try: + if tok: + r = requests.get( + "https://huggingface.co/api/whoami-v2", + headers={"Authorization": f"Bearer {tok}"}, + timeout=15, + ) + auth_ok = (r.status_code == 200) + who = r.json() if auth_ok else None + if not auth_ok: + err = r.text # e.g., {"error":"Invalid credentials in Authorization header"} + else: + err = "HF_API_KEY not set (load .env or export it)" + except Exception as e: + err = str(e) + + # Extract fine-grained scopes for visibility + fg = (((who or {}).get("auth") or {}).get("accessToken") or {}).get("fineGrained") or {} + scoped = fg.get("scoped") or [] + global_scopes = fg.get("global") or [] + + # ---- tiny inference ping (proves 'Make calls to Inference Providers') ---- + infer_ok, infer_err = False, None + try: + if tok: + model = os.getenv( + "HF_MODEL_SENTIMENT", + "distilbert-base-uncased-finetuned-sst-2-english" + ) + r2 = requests.post( + f"https://api-inference.huggingface.co/models/{model}", + headers={"Authorization": f"Bearer {tok}", "x-wait-for-model": "true"}, + json={"inputs": "ping"}, + timeout=int(os.getenv("HTTP_TIMEOUT", "60")), + ) + infer_ok = (r2.status_code == 200) + if not infer_ok: + infer_err = f"HTTP {r2.status_code}: {r2.text}" + except Exception as e: + infer_err = str(e) + # ------------------------------------------------------------------------- + + # Mask + length to verify what .env provided + mask = (tok[:3] + "..." + tok[-4:]) if tok else None + out = { + "provider": provider or "unknown", + "mode": getattr(bot, "_mode", "auto"), + "auth_ok": auth_ok, + "whoami": who, + "token_scopes": { # <--- added + "global": global_scopes, + "scoped": scoped, + }, + "inference_ok": infer_ok, + "inference_error": infer_err, + "env": { + "HF_API_KEY_len": len(tok) if tok else 0, + "HF_API_KEY_mask": mask, + "HF_MODEL_SENTIMENT": os.getenv("HF_MODEL_SENTIMENT"), + "HTTP_TIMEOUT": os.getenv("HTTP_TIMEOUT"), + }, + "capabilities": bot.capabilities(), + "error": err, + } + + elif msg.lower() == "help": + out = {"capabilities": bot.capabilities()} + + else: + try: + out = bot.reply(msg) + except Exception as e: + if args.debug: + traceback.print_exc() + out = {"error": str(e), "message": msg} + + if args.debug: + print(f"DEBUG out={out}", flush=True) + + print(json.dumps(out, indent=2), flush=True) + + +def cmd_repo_tree(argv=None): + p = argparse.ArgumentParser(prog="repo-tree", description="Print repo tree (from tree.txt if available)") + p.add_argument("--path", default="tree.txt", help="Path to precomputed tree file") + args = p.parse_args(argv) + path = Path(args.path) + if path.exists(): + print(path.read_text(encoding="utf-8"), flush=True) + else: + print("(no tree.txt found)", flush=True) + + +def cmd_repo_flatten(argv=None): + p = argparse.ArgumentParser(prog="repo-flatten", description="Print flattened code listing") + p.add_argument("--path", default="FLATTENED_CODE.txt", help="Path to pre-flattened code file") + args = p.parse_args(argv) + path = Path(args.path) + if path.exists(): + print(path.read_text(encoding="utf-8"), flush=True) + else: + print("(no FLATTENED_CODE.txt found)", flush=True) + + +def _dispatch(): + # Allow: python -m agenticcore.cli [args...] + if len(sys.argv) <= 1: + print("Usage: python -m agenticcore.cli [args]", file=sys.stderr) + sys.exit(2) + cmd, argv = sys.argv[1], sys.argv[2:] + try: + if cmd == "agentic": + cmd_agentic(argv) + elif cmd == "repo-tree": + cmd_repo_tree(argv) + elif cmd == "repo-flatten": + cmd_repo_flatten(argv) + else: + print(f"Unknown subcommand: {cmd}", file=sys.stderr) + sys.exit(2) + except SystemExit: + raise + except Exception: + traceback.print_exc() + sys.exit(1) + + +if __name__ == "__main__": + _dispatch() diff --git a/agenticcore/providers_unified.py b/agenticcore/providers_unified.py new file mode 100644 index 0000000000000000000000000000000000000000..a4c67698b9ee10d7196e74139c58499465e7baf8 --- /dev/null +++ b/agenticcore/providers_unified.py @@ -0,0 +1,274 @@ +# /agenticcore/providers_unified.py +""" +providers_unified.py +Unified, switchable providers for sentiment + (optional) text generation. +Selection order unless AI_PROVIDER is set: + HF -> AZURE -> OPENAI -> COHERE -> DEEPAI -> OFFLINE +Env vars: + HF_API_KEY + MICROSOFT_AI_SERVICE_ENDPOINT, MICROSOFT_AI_API_KEY + OPENAI_API_KEY, OPENAI_MODEL=gpt-3.5-turbo + COHERE_API_KEY, COHERE_MODEL=command + DEEPAI_API_KEY + AI_PROVIDER = hf|azure|openai|cohere|deepai|offline + HTTP_TIMEOUT = 20 +""" +from __future__ import annotations +import os, json +from typing import Dict, Any, Optional +import requests + +TIMEOUT = float(os.getenv("HTTP_TIMEOUT", "20")) + +def _env(name: str, default: Optional[str] = None) -> Optional[str]: + v = os.getenv(name) + return v if (v is not None and str(v).strip() != "") else default + +def _pick_provider() -> str: + forced = _env("AI_PROVIDER") + if forced in {"hf", "azure", "openai", "cohere", "deepai", "offline"}: + return forced + if _env("HF_API_KEY"): return "hf" + if _env("MICROSOFT_AI_API_KEY") and _env("MICROSOFT_AI_SERVICE_ENDPOINT"): return "azure" + if _env("OPENAI_API_KEY"): return "openai" + if _env("COHERE_API_KEY"): return "cohere" + if _env("DEEPAI_API_KEY"): return "deepai" + return "offline" + +# --------------------------- +# Sentiment +# --------------------------- + +def analyze_sentiment(text: str) -> Dict[str, Any]: + provider = _pick_provider() + try: + if provider == "hf": return _sentiment_hf(text) + if provider == "azure": return _sentiment_azure(text) + if provider == "openai": return _sentiment_openai_prompt(text) + if provider == "cohere": return _sentiment_cohere_prompt(text) + if provider == "deepai": return _sentiment_deepai(text) + return _sentiment_offline(text) + except Exception as e: + return {"provider": provider, "label": "neutral", "score": 0.5, "error": str(e)} + +def _sentiment_offline(text: str) -> Dict[str, Any]: + t = (text or "").lower() + pos = any(w in t for w in ["love","great","good","awesome","fantastic","thank","excellent","amazing"]) + neg = any(w in t for w in ["hate","bad","terrible","awful","worst","angry","horrible"]) + label = "positive" if pos and not neg else "negative" if neg and not pos else "neutral" + score = 0.9 if label != "neutral" else 0.5 + return {"provider": "offline", "label": label, "score": score} + +def _sentiment_hf(text: str) -> Dict[str, Any]: + """ + Hugging Face Inference API for sentiment. + Uses canonical repo id and handles 404/401 and various payload shapes. + """ + key = _env("HF_API_KEY") + if not key: + return _sentiment_offline(text) + + # canonical repo id to avoid 404 + model = _env("HF_MODEL_SENTIMENT", "distilbert/distilbert-base-uncased-finetuned-sst-2-english") + timeout = int(_env("HTTP_TIMEOUT", "30")) + + headers = { + "Authorization": f"Bearer {key}", + "x-wait-for-model": "true", + "Accept": "application/json", + "Content-Type": "application/json", + } + + r = requests.post( + f"https://api-inference.huggingface.co/models/{model}", + headers=headers, + json={"inputs": text}, + timeout=timeout, + ) + + if r.status_code != 200: + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": f"HTTP {r.status_code}: {r.text[:500]}"} + + try: + data = r.json() + except Exception as e: + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": str(e)} + + if isinstance(data, dict) and "error" in data: + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": data["error"]} + + # normalize list shape + arr = data[0] if isinstance(data, list) and data and isinstance(data[0], list) else (data if isinstance(data, list) else []) + if not (isinstance(arr, list) and arr): + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": f"Unexpected payload: {data}"} + + top = max(arr, key=lambda x: x.get("score", 0.0) if isinstance(x, dict) else 0.0) + raw = str(top.get("label", "")).upper() + score = float(top.get("score", 0.5)) + + mapping = { + "LABEL_0": "negative", "LABEL_1": "neutral", "LABEL_2": "positive", + "NEGATIVE": "negative", "NEUTRAL": "neutral", "POSITIVE": "positive", + } + label = mapping.get(raw, (raw.lower() or "neutral")) + + neutral_floor = float(os.getenv("SENTIMENT_NEUTRAL_THRESHOLD", "0.65")) + if label in {"positive", "negative"} and score < neutral_floor: + label = "neutral" + + return {"provider": "hf", "label": label, "score": score} + +def _sentiment_azure(text: str) -> Dict[str, Any]: + try: + from azure.core.credentials import AzureKeyCredential # type: ignore + from azure.ai.textanalytics import TextAnalyticsClient # type: ignore + except Exception: + return _sentiment_offline(text) + endpoint = _env("MICROSOFT_AI_SERVICE_ENDPOINT") + key = _env("MICROSOFT_AI_API_KEY") + if not (endpoint and key): return _sentiment_offline(text) + client = TextAnalyticsClient(endpoint=endpoint.strip(), credential=AzureKeyCredential(key.strip())) + resp = client.analyze_sentiment(documents=[text], show_opinion_mining=False)[0] + scores = { + "positive": float(getattr(resp.confidence_scores, "positive", 0.0) or 0.0), + "neutral": float(getattr(resp.confidence_scores, "neutral", 0.0) or 0.0), + "negative": float(getattr(resp.confidence_scores, "negative", 0.0) or 0.0), + } + label = max(scores, key=scores.get) + return {"provider": "azure", "label": label, "score": scores[label]} + +def _sentiment_openai_prompt(text: str) -> Dict[str, Any]: + key = _env("OPENAI_API_KEY") + model = _env("OPENAI_MODEL", "gpt-3.5-turbo") + if not key: return _sentiment_offline(text) + url = "https://api.openai.com/v1/chat/completions" + prompt = f"Classify the sentiment of this text as positive, negative, or neutral. Reply JSON with keys label and score (0..1). Text: {text!r}" + r = requests.post( + url, + headers={"Authorization": f"Bearer {key}", "Content-Type": "application/json"}, + json={"model": model, "messages": [{"role": "user", "content": prompt}], "temperature": 0}, + timeout=TIMEOUT, + ) + r.raise_for_status() + content = r.json()["choices"][0]["message"]["content"] + try: + obj = json.loads(content) + label = str(obj.get("label", "neutral")).lower() + score = float(obj.get("score", 0.5)) + return {"provider": "openai", "label": label, "score": score} + except Exception: + l = "positive" if "positive" in content.lower() else "negative" if "negative" in content.lower() else "neutral" + return {"provider": "openai", "label": l, "score": 0.5} + +def _sentiment_cohere_prompt(text: str) -> Dict[str, Any]: + key = _env("COHERE_API_KEY") + model = _env("COHERE_MODEL", "command") + if not key: return _sentiment_offline(text) + url = "https://api.cohere.ai/v1/generate" + prompt = f"Classify the sentiment (positive, negative, neutral) and return JSON with keys label and score (0..1). Text: {text!r}" + r = requests.post( + url, + headers={ + "Authorization": f"Bearer {key}", + "Content-Type": "application/json", + "Cohere-Version": "2022-12-06", + }, + json={"model": model, "prompt": prompt, "max_tokens": 30, "temperature": 0}, + timeout=TIMEOUT, + ) + r.raise_for_status() + gen = (r.json().get("generations") or [{}])[0].get("text", "") + try: + obj = json.loads(gen) + label = str(obj.get("label", "neutral")).lower() + score = float(obj.get("score", 0.5)) + return {"provider": "cohere", "label": label, "score": score} + except Exception: + l = "positive" if "positive" in gen.lower() else "negative" if "negative" in gen.lower() else "neutral" + return {"provider": "cohere", "label": l, "score": 0.5} + +def _sentiment_deepai(text: str) -> Dict[str, Any]: + key = _env("DEEPAI_API_KEY") + if not key: return _sentiment_offline(text) + url = "https://api.deepai.org/api/sentiment-analysis" + r = requests.post(url, headers={"api-key": key}, data={"text": text}, timeout=TIMEOUT) + r.raise_for_status() + data = r.json() + label = (data.get("output") or ["neutral"])[0].lower() + return {"provider": "deepai", "label": label, "score": 0.5 if label == "neutral" else 0.9} + +# --------------------------- +# Text generation (optional) +# --------------------------- + +def generate_text(prompt: str, max_tokens: int = 128) -> Dict[str, Any]: + provider = _pick_provider() + try: + if provider == "hf": return _gen_hf(prompt, max_tokens) + if provider == "openai": return _gen_openai(prompt, max_tokens) + if provider == "cohere": return _gen_cohere(prompt, max_tokens) + if provider == "deepai": return _gen_deepai(prompt, max_tokens) + return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + except Exception as e: + return {"provider": provider, "text": f"(error) {str(e)}"} + +def _gen_hf(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("HF_API_KEY") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + model = _env("HF_MODEL_GENERATION", "tiiuae/falcon-7b-instruct") + r = requests.post( + f"https://api-inference.huggingface.co/models/{model}", + headers={"Authorization": f"Bearer {key}"}, + json={"inputs": prompt, "parameters": {"max_new_tokens": max_tokens}}, + timeout=TIMEOUT, + ) + r.raise_for_status() + data = r.json() + if isinstance(data, list) and data and "generated_text" in data[0]: + return {"provider": "hf", "text": data[0]["generated_text"]} + return {"provider": "hf", "text": str(data)} + +def _gen_openai(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("OPENAI_API_KEY") + model = _env("OPENAI_MODEL", "gpt-3.5-turbo") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + url = "https://api.openai.com/v1/chat/completions" + r = requests.post( + url, + headers={"Authorization": f"Bearer {key}", "Content-Type": "application/json"}, + json={"model": model, "messages": [{"role": "user", "content": prompt}], "max_tokens": max_tokens}, + timeout=TIMEOUT, + ) + r.raise_for_status() + data = r.json() + text = data["choices"][0]["message"]["content"] + return {"provider": "openai", "text": text} + +def _gen_cohere(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("COHERE_API_KEY") + model = _env("COHERE_MODEL", "command") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + url = "https://api.cohere.ai/v1/generate" + r = requests.post( + url, + headers={ + "Authorization": f"Bearer {key}", + "Content-Type": "application/json", + "Cohere-Version": "2022-12-06", + }, + json={"model": model, "prompt": prompt, "max_tokens": max_tokens}, + timeout=TIMEOUT, + ) + r.raise_for_status() + data = r.json() + text = data.get("generations", [{}])[0].get("text", "") + return {"provider": "cohere", "text": text} + +def _gen_deepai(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("DEEPAI_API_KEY") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + url = "https://api.deepai.org/api/text-generator" + r = requests.post(url, headers={"api-key": key}, data={"text": prompt}, timeout=TIMEOUT) + r.raise_for_status() + data = r.json() + return {"provider": "deepai", "text": data.get("output", "")} diff --git a/anon_bot/handler.py b/anon_bot/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..2bbd6adf03f0e95b6a162221eabe64aed8aca694 --- /dev/null +++ b/anon_bot/handler.py @@ -0,0 +1,3 @@ +# /anon_bot/handler.py + +def handle_turn(m,h,u): return (h or [])+[[m,'hi']] diff --git a/anon_bot/rules.py b/anon_bot/rules.py new file mode 100644 index 0000000000000000000000000000000000000000..721684fc54ff375c6518f9669cbe6091ac9fd0a0 --- /dev/null +++ b/anon_bot/rules.py @@ -0,0 +1 @@ +# /anon_bot/rules.py diff --git a/app/app.py b/app/app.py new file mode 100644 index 0000000000000000000000000000000000000000..e53d11773b9734f041c6123d70acef2497050172 --- /dev/null +++ b/app/app.py @@ -0,0 +1,139 @@ +# /app/app.py +#!/usr/bin/env python3 +# app.py — aiohttp + Bot Framework Echo bot + +import os +import sys +import json +from logic import handle_text +from aiohttp import web +from botbuilder.core import BotFrameworkAdapter, BotFrameworkAdapterSettings, TurnContext +from botbuilder.schema import Activity +import aiohttp_cors +from pathlib import Path + + +# ------------------------------------------------------------------- +# Your bot implementation +# ------------------------------------------------------------------- +# Make sure this exists at packages/bots/echo_bot.py +# from bots.echo_bot import EchoBot +# Minimal inline fallback if you want to test quickly: +class EchoBot: + async def on_turn(self, turn_context: TurnContext): + if turn_context.activity.type == "message": + text = (turn_context.activity.text or "").strip() + if not text: + await turn_context.send_activity("Input was empty. Type 'help' for usage.") + return + + lower = text.lower() + if lower == "help": + await turn_context.send_activity("Try: echo | reverse: | capabilities") + elif lower == "capabilities": + await turn_context.send_activity("- echo\n- reverse\n- help\n- capabilities") + elif lower.startswith("reverse:"): + payload = text.split(":", 1)[1].strip() + await turn_context.send_activity(payload[::-1]) + elif lower.startswith("echo "): + await turn_context.send_activity(text[5:]) + else: + await turn_context.send_activity("Unsupported command. Type 'help' for examples.") + else: + await turn_context.send_activity(f"[{turn_context.activity.type}] event received.") + +# ------------------------------------------------------------------- +# Adapter / bot setup +# ------------------------------------------------------------------- +APP_ID = os.environ.get("MicrosoftAppId") or None +APP_PASSWORD = os.environ.get("MicrosoftAppPassword") or None + +adapter_settings = BotFrameworkAdapterSettings(APP_ID, APP_PASSWORD) +adapter = BotFrameworkAdapter(adapter_settings) + +async def on_error(context: TurnContext, error: Exception): + print(f"[on_turn_error] {error}", file=sys.stderr, flush=True) + try: + await context.send_activity("Oops. Something went wrong!") + except Exception as send_err: + print(f"[on_turn_error][send_activity_failed] {send_err}", file=sys.stderr, flush=True) + +adapter.on_turn_error = on_error +bot = EchoBot() + +# ------------------------------------------------------------------- +# HTTP handlers +# ------------------------------------------------------------------- +async def messages(req: web.Request) -> web.Response: + # Content-Type can include charset; do a contains check + ctype = (req.headers.get("Content-Type") or "").lower() + if "application/json" not in ctype: + return web.Response(status=415, text="Unsupported Media Type: expected application/json") + + try: + body = await req.json() + except json.JSONDecodeError: + return web.Response(status=400, text="Invalid JSON body") + + activity = Activity().deserialize(body) + auth_header = req.headers.get("Authorization") + + invoke_response = await adapter.process_activity(activity, auth_header, bot.on_turn) + if invoke_response: + # For invoke activities, adapter returns explicit status/body + return web.json_response(data=invoke_response.body, status=invoke_response.status) + # Acknowledge standard message activities + return web.Response(status=202, text="Accepted") + +async def home(_req: web.Request) -> web.Response: + return web.Response( + text="Bot is running. POST Bot Framework activities to /api/messages.", + content_type="text/plain" + ) + +async def messages_get(_req: web.Request) -> web.Response: + return web.Response( + text="This endpoint only accepts POST (Bot Framework activities).", + content_type="text/plain", + status=405 + ) + +async def healthz(_req: web.Request) -> web.Response: + return web.json_response({"status": "ok"}) + +async def plain_chat(req: web.Request) -> web.Response: + try: + payload = await req.json() + except Exception: + return web.json_response({"error": "Invalid JSON"}, status=400) + user_text = payload.get("text", "") + reply = handle_text(user_text) + return web.json_response({"reply": reply}) + +# ------------------------------------------------------------------- +# App factory and entrypoint +# ------------------------------------------------------------------- +from pathlib import Path + +def create_app() -> web.Application: + app = web.Application() + app.router.add_get("/", home) + app.router.add_get("/healthz", healthz) + app.router.add_get("/api/messages", messages_get) + app.router.add_post("/api/messages", messages) + app.router.add_post("/plain-chat", plain_chat) + + static_dir = Path(__file__).parent / "static" + if static_dir.exists(): + app.router.add_static("/static/", path=static_dir, show_index=True) + else: + print(f"[warn] static directory not found: {static_dir}", flush=True) + + return app + +app = create_app() + +if __name__ == "__main__": + host = os.environ.get("HOST", "127.0.0.1") # use 0.0.0.0 in containers + port = int(os.environ.get("PORT", 3978)) + web.run_app(app, host=host, port=port) diff --git a/app/assets/html/agenticcore_frontend.html b/app/assets/html/agenticcore_frontend.html new file mode 100644 index 0000000000000000000000000000000000000000..39d63a7bc4a2daada334e8d2d64577f9e7078882 --- /dev/null +++ b/app/assets/html/agenticcore_frontend.html @@ -0,0 +1,200 @@ + + + + + + AgenticCore Chatbot Frontend + + + +
+
+

AgenticCore Chatbot Frontend

+
Frontend → FastAPI → providers_unified
+
+ +
+
+
+ +
+ + +
+
Not checked
+
+
+ +
+ + +
+
+ + + +
+
+
+
+
+ +
+ Use with your FastAPI backend at /chatbot/message. Configure CORS if you serve this file from a different origin. +
+
+ + + + diff --git a/app/assets/html/chat.html b/app/assets/html/chat.html new file mode 100644 index 0000000000000000000000000000000000000000..baf2e85b4cb8a466d832afc06d151e5e203a0639 --- /dev/null +++ b/app/assets/html/chat.html @@ -0,0 +1,56 @@ + +Simple Chat + + + +
+
Traditional Chatbot (Local)
+
+
Try: reverse: hello world, help, capabilities
+ +
+ + diff --git a/app/assets/html/chat_console.html b/app/assets/html/chat_console.html new file mode 100644 index 0000000000000000000000000000000000000000..c2cf2ca34021674756a9e61906fe6b8f75948724 --- /dev/null +++ b/app/assets/html/chat_console.html @@ -0,0 +1,77 @@ + + + + + Console Chat Tester + + + + +

AgenticCore Console

+ +
+ + + + +
+ +
+ + +
+ +
+ Mode: + API +
+ +

+
+
+
+
diff --git a/app/assets/html/chat_minimal.html b/app/assets/html/chat_minimal.html
new file mode 100644
index 0000000000000000000000000000000000000000..0b3fb325ece7bf9972b31d977e4aeee0e167c025
--- /dev/null
+++ b/app/assets/html/chat_minimal.html
@@ -0,0 +1,89 @@
+
+
+
+  
+  Minimal Chat Tester
+  
+  
+
+
+

Minimal Chat Tester → FastAPI /chatbot/message

+ +
+ + + + +
+ +
+ + +
+ +

+ + + + + diff --git a/app/routes.py b/app/routes.py new file mode 100644 index 0000000000000000000000000000000000000000..79c6f0b04a854f929db292c6e85ef45594b2e19f --- /dev/null +++ b/app/routes.py @@ -0,0 +1 @@ +# /app/routes.py diff --git a/core/config.py b/core/config.py new file mode 100644 index 0000000000000000000000000000000000000000..24b4666db4b0da691e280e811b62b3c0fed2cc2b --- /dev/null +++ b/core/config.py @@ -0,0 +1,4 @@ +# /core/config.py + +class Settings: pass +settings = Settings() diff --git a/core/logging.py b/core/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..d3fb82d1e6d5c56a08f514c608549f4a0e47a932 --- /dev/null +++ b/core/logging.py @@ -0,0 +1 @@ +# /core/logging.py diff --git a/core/types.py b/core/types.py new file mode 100644 index 0000000000000000000000000000000000000000..4955f9e987f5b41469115cf71dfffe69b37c6924 --- /dev/null +++ b/core/types.py @@ -0,0 +1 @@ +# /core/types.py diff --git a/docs/DEV_DOC.md b/docs/DEV_DOC.md new file mode 100644 index 0000000000000000000000000000000000000000..53da2cfb23e1040908fb879f5462a20925d3a09e --- /dev/null +++ b/docs/DEV_DOC.md @@ -0,0 +1,97 @@ +## 3. Functional Requirements + +This section describes the functional requirements for connecting a chatbot to an AI-as-a-Service (AIaaS) platform. It defines the expected system behavior, outlines constraints, and sets measurable acceptance criteria. Requirements are grouped into system context, core functions, supporting functions, and non-functional aspects. + +--- + +### 3.1 System Context + +The chatbot acts as the client application. It receives user input, processes it, and communicates with an external AIaaS endpoint (e.g., Azure AI Language Service). The AI service provides natural language processing (NLP) features such as sentiment analysis. The chatbot then interprets the service output and responds back to the user. + +Key components include: +- **User Interface (UI):** Chat interface for entering text. +- **Chatbot Core:** Handles request routing and conversation logic. +- **AI Service Connector:** Manages authentication and API calls to the AI service. +- **AIaaS Platform:** External cloud service providing NLP functions. + +--- + +### 3.2 Functional Requirements + +#### FR-1: User Input Handling +- The chatbot shall accept text input from users. +- The chatbot shall sanitize input to remove unsafe characters. +- The chatbot shall log all interactions for debugging and testing. + +#### FR-2: API Connection +- The system shall authenticate with the AI service using API keys stored securely in environment variables. +- The chatbot shall send user text to the AIaaS endpoint in the required format. +- The chatbot shall handle and parse responses from the AIaaS. + +#### FR-3: Sentiment Analysis Integration +- The chatbot shall use the AIaaS to determine the sentiment (e.g., positive, neutral, negative) of user input. +- The chatbot shall present sentiment results as part of its response or use them to adjust tone. + +#### FR-4: Error and Exception Handling +- The system shall detect failed API calls and return a fallback message to the user. +- The chatbot shall notify the user if the AI service is unavailable. +- The chatbot shall log errors with timestamp and cause. + +#### FR-5: Reporting and Documentation +- The chatbot shall provide a list of supported commands or features when prompted. +- The chatbot shall record system status and output for inclusion in the project report. +- The development process shall be documented with screenshots and configuration notes. + +--- + +### 3.3 Non-Functional Requirements + +#### NFR-1: Security +- API keys shall not be hard-coded in source files. +- Sensitive data shall be retrieved from environment variables or secure vaults. + +#### NFR-2: Performance +- The chatbot shall return responses within 2 seconds under normal network conditions. +- The system shall process at least 20 concurrent user sessions without performance degradation. + +#### NFR-3: Reliability +- The chatbot shall achieve at least 95% uptime during testing. +- The chatbot shall gracefully degrade to local responses if the AI service is unavailable. + +#### NFR-4: Usability +- The chatbot shall provide clear, user-friendly error messages. +- The chatbot shall handle malformed input without crashing. + +--- + +### 3.4 Acceptance Criteria + +1. **Input Handling** + - Given valid text input, the chatbot processes it without errors. + - Given invalid or malformed input, the chatbot responds with a clarification request. + +2. **API Connection** + - Given a valid API key and endpoint, the chatbot connects and retrieves sentiment analysis. + - Given an invalid API key, the chatbot logs an error and informs the user. + +3. **Sentiment Analysis** + - Given a positive statement, the chatbot labels it correctly with at least 90% accuracy. + - Given a negative statement, the chatbot labels it correctly with at least 90% accuracy. + +4. **Error Handling** + - When the AI service is unavailable, the chatbot informs the user and continues functioning with local responses. + - All failures are recorded in a log file. + +5. **Usability** + - The chatbot returns responses in less than 2 seconds for 95% of requests. + - The chatbot displays a list of features when the user requests “help.” + +--- + +### Glossary + +- **AIaaS (AI-as-a-Service):** Cloud-based artificial intelligence services accessible via APIs. +- **API (Application Programming Interface):** A set of rules for software applications to communicate with each other. +- **NLP (Natural Language Processing):** A field of AI focused on enabling computers to understand human language. +- **Sentiment Analysis:** An NLP technique that determines the emotional tone behind a text. + diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 0000000000000000000000000000000000000000..95ed0bab27e0139370e40521faad58ecad87b7cc --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,2 @@ +# /docs/slides/architecture.md +# Architecture\n\nShort explainer tied to the flowchart.\n \ No newline at end of file diff --git a/docs/design.md b/docs/design.md new file mode 100644 index 0000000000000000000000000000000000000000..1db89fc8a56905fa0cc2cbd047c5cedbb0ad2660 --- /dev/null +++ b/docs/design.md @@ -0,0 +1,2 @@ +# /docs/slides/design.md +# Design notes\n\nAPI notes, security, tradeoffs.\n diff --git a/docs/flowchart.png b/docs/flowchart.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/results.md b/docs/results.md new file mode 100644 index 0000000000000000000000000000000000000000..b4c19ed6cf5eeae6f0bbcfe64bcd1bc33d50d3a4 --- /dev/null +++ b/docs/results.md @@ -0,0 +1,2 @@ +# /docs/slides/design.md +# Results\n\nChallenges, metrics, screenshots.\n \ No newline at end of file diff --git a/examples/example.py b/examples/example.py new file mode 100644 index 0000000000000000000000000000000000000000..4cfe692269b6a1e166dfa186965f91814740b2d6 --- /dev/null +++ b/examples/example.py @@ -0,0 +1,9 @@ +# /example/example.py +"""Simple CLI example that sends a message to the ChatBot and prints the JSON reply.""" +import json +from agenticcore.chatbot.services import ChatBot + +if __name__ == "__main__": + bot = ChatBot() + result = bot.reply("hello world") + print(json.dumps(result, indent=2)) diff --git a/guardrails/pii_redaction.py b/guardrails/pii_redaction.py new file mode 100644 index 0000000000000000000000000000000000000000..cf30b9ccef9b8851e94cd16634e62fc47a796165 --- /dev/null +++ b/guardrails/pii_redaction.py @@ -0,0 +1,3 @@ +# /guardrails/pii_redaction.py + +def redact(t): return t diff --git a/guardrails/safety.py b/guardrails/safety.py new file mode 100644 index 0000000000000000000000000000000000000000..91aa093544340afa45df0645c6cf192b69f389bd --- /dev/null +++ b/guardrails/safety.py @@ -0,0 +1 @@ +# /guardrails/safety.py \ No newline at end of file diff --git a/integrations/azure/bot_framework.py b/integrations/azure/bot_framework.py new file mode 100644 index 0000000000000000000000000000000000000000..31cf1634454861a79d82867038ff26f640f945ef --- /dev/null +++ b/integrations/azure/bot_framework.py @@ -0,0 +1,2 @@ +# /intergrations/azure/bot_framework.py +# Azure Bot Framework (placeholder) diff --git a/integrations/botframework/app.py b/integrations/botframework/app.py new file mode 100644 index 0000000000000000000000000000000000000000..912981afe32eeff6c8cf4c69bb5da6a6d5429f29 --- /dev/null +++ b/integrations/botframework/app.py @@ -0,0 +1,138 @@ +# /intergrations/botframework/app.py — aiohttp + Bot Framework Echo bot +#!/usr/bin/env python3 + +import os +import sys +import json +from logic import handle_text +from aiohttp import web +from botbuilder.core import BotFrameworkAdapter, BotFrameworkAdapterSettings, TurnContext +from botbuilder.schema import Activity +import aiohttp_cors +from pathlib import Path + + +# ------------------------------------------------------------------- +# Your bot implementation +# ------------------------------------------------------------------- +# Make sure this exists at packages/bots/echo_bot.py +# from bots.echo_bot import EchoBot +# Minimal inline fallback if you want to test quickly: +class EchoBot: + async def on_turn(self, turn_context: TurnContext): + if turn_context.activity.type == "message": + text = (turn_context.activity.text or "").strip() + if not text: + await turn_context.send_activity("Input was empty. Type 'help' for usage.") + return + + lower = text.lower() + if lower == "help": + await turn_context.send_activity("Try: echo | reverse: | capabilities") + elif lower == "capabilities": + await turn_context.send_activity("- echo\n- reverse\n- help\n- capabilities") + elif lower.startswith("reverse:"): + payload = text.split(":", 1)[1].strip() + await turn_context.send_activity(payload[::-1]) + elif lower.startswith("echo "): + await turn_context.send_activity(text[5:]) + else: + await turn_context.send_activity("Unsupported command. Type 'help' for examples.") + else: + await turn_context.send_activity(f"[{turn_context.activity.type}] event received.") + +# ------------------------------------------------------------------- +# Adapter / bot setup +# ------------------------------------------------------------------- +APP_ID = os.environ.get("MicrosoftAppId") or None +APP_PASSWORD = os.environ.get("MicrosoftAppPassword") or None + +adapter_settings = BotFrameworkAdapterSettings(APP_ID, APP_PASSWORD) +adapter = BotFrameworkAdapter(adapter_settings) + +async def on_error(context: TurnContext, error: Exception): + print(f"[on_turn_error] {error}", file=sys.stderr, flush=True) + try: + await context.send_activity("Oops. Something went wrong!") + except Exception as send_err: + print(f"[on_turn_error][send_activity_failed] {send_err}", file=sys.stderr, flush=True) + +adapter.on_turn_error = on_error +bot = EchoBot() + +# ------------------------------------------------------------------- +# HTTP handlers +# ------------------------------------------------------------------- +async def messages(req: web.Request) -> web.Response: + # Content-Type can include charset; do a contains check + ctype = (req.headers.get("Content-Type") or "").lower() + if "application/json" not in ctype: + return web.Response(status=415, text="Unsupported Media Type: expected application/json") + + try: + body = await req.json() + except json.JSONDecodeError: + return web.Response(status=400, text="Invalid JSON body") + + activity = Activity().deserialize(body) + auth_header = req.headers.get("Authorization") + + invoke_response = await adapter.process_activity(activity, auth_header, bot.on_turn) + if invoke_response: + # For invoke activities, adapter returns explicit status/body + return web.json_response(data=invoke_response.body, status=invoke_response.status) + # Acknowledge standard message activities + return web.Response(status=202, text="Accepted") + +async def home(_req: web.Request) -> web.Response: + return web.Response( + text="Bot is running. POST Bot Framework activities to /api/messages.", + content_type="text/plain" + ) + +async def messages_get(_req: web.Request) -> web.Response: + return web.Response( + text="This endpoint only accepts POST (Bot Framework activities).", + content_type="text/plain", + status=405 + ) + +async def healthz(_req: web.Request) -> web.Response: + return web.json_response({"status": "ok"}) + +async def plain_chat(req: web.Request) -> web.Response: + try: + payload = await req.json() + except Exception: + return web.json_response({"error": "Invalid JSON"}, status=400) + user_text = payload.get("text", "") + reply = handle_text(user_text) + return web.json_response({"reply": reply}) + +# ------------------------------------------------------------------- +# App factory and entrypoint +# ------------------------------------------------------------------- +from pathlib import Path + +def create_app() -> web.Application: + app = web.Application() + app.router.add_get("/", home) + app.router.add_get("/healthz", healthz) + app.router.add_get("/api/messages", messages_get) + app.router.add_post("/api/messages", messages) + app.router.add_post("/plain-chat", plain_chat) + + static_dir = Path(__file__).parent / "static" + if static_dir.exists(): + app.router.add_static("/static/", path=static_dir, show_index=True) + else: + print(f"[warn] static directory not found: {static_dir}", flush=True) + + return app + +app = create_app() + +if __name__ == "__main__": + host = os.environ.get("HOST", "127.0.0.1") # use 0.0.0.0 in containers + port = int(os.environ.get("PORT", 3978)) + web.run_app(app, host=host, port=port) diff --git a/integrations/botframework/bot.py b/integrations/botframework/bot.py new file mode 100644 index 0000000000000000000000000000000000000000..8a66aa5ff0bef79c6ac99e0d314f6d306bce088b --- /dev/null +++ b/integrations/botframework/bot.py @@ -0,0 +1,86 @@ +# /intergrations/botframework/bot.py +""" +Simple MBF bot: +- 'help' / 'capabilities' shows features +- 'reverse ' returns reversed text +- otherwise delegates to AgenticCore ChatBot (sentiment) if available +""" + +from typing import List, Optional, Dict, Any +from botbuilder.core import ActivityHandler, TurnContext +from botbuilder.schema import ChannelAccount, ActivityTypes + +from skills import normalize, reverse_text, capabilities, is_empty + +# Try to import AgenticCore; if unavailable, provide a tiny fallback. +try: + from agenticcore.chatbot.services import ChatBot # real provider-backed bot +except Exception: + class ChatBot: # fallback shim for offline/dev + def reply(self, message: str) -> Dict[str, Any]: + return { + "reply": "Noted. (local fallback reply)", + "sentiment": "neutral", + "confidence": 0.5, + } + +def _format_sentiment(res: Dict[str, Any]) -> str: + """Compose a user-facing string from ChatBot reply payload.""" + reply = (res.get("reply") or "").strip() + label: Optional[str] = res.get("sentiment") + conf = res.get("confidence") + if label is not None and conf is not None: + return f"{reply} (sentiment: {label}, confidence: {float(conf):.2f})" + return reply or "I'm not sure what to say." + +def _help_text() -> str: + """Single source of truth for the help/capability text.""" + feats = "\n".join(f"- {c}" for c in capabilities()) + return ( + "I can reverse text and provide concise replies with sentiment.\n" + "Commands:\n" + "- help | capabilities\n" + "- reverse \n" + "General text will be handled by the ChatBot service.\n\n" + f"My capabilities:\n{feats}" + ) + +class SimpleBot(ActivityHandler): + """Minimal ActivityHandler with local commands + ChatBot fallback.""" + + def __init__(self, chatbot: Optional[ChatBot] = None): + self._chatbot = chatbot or ChatBot() + + async def on_members_added_activity( + self, members_added: List[ChannelAccount], turn_context: TurnContext + ): + for member in members_added: + if member.id != turn_context.activity.recipient.id: + await turn_context.send_activity("Hello! Type 'help' to see what I can do.") + + async def on_message_activity(self, turn_context: TurnContext): + if turn_context.activity.type != ActivityTypes.message: + return + + text = (turn_context.activity.text or "").strip() + if is_empty(text): + await turn_context.send_activity("Please enter a message (try 'help').") + return + + cmd = normalize(text) + + if cmd in {"help", "capabilities"}: + await turn_context.send_activity(_help_text()) + return + + if cmd.startswith("reverse "): + original = text.split(" ", 1)[1] if " " in text else "" + await turn_context.send_activity(reverse_text(original)) + return + + # ChatBot fallback (provider-agnostic sentiment/reply) + try: + result = self._chatbot.reply(text) + await turn_context.send_activity(_format_sentiment(result)) + except Exception: + await turn_context.send_activity(f"You said: {text}") diff --git a/integrations/botframework/bots/echo_bot.py b/integrations/botframework/bots/echo_bot.py new file mode 100644 index 0000000000000000000000000000000000000000..86f8ab582567115d9e040b2fa1aa0beff44008b3 --- /dev/null +++ b/integrations/botframework/bots/echo_bot.py @@ -0,0 +1,57 @@ +# bots/echo_bot.py +from botbuilder.core import ActivityHandler, TurnContext +from botbuilder.schema import ChannelAccount + +def simple_sentiment(text: str): + """ + Tiny, no-cost heuristic so you can demo behavior without extra services. + You can swap this later for HF/OpenAI/Azure easily. + """ + t = (text or "").lower() + pos = any(w in t for w in ["love","great","good","awesome","fantastic","excellent","amazing"]) + neg = any(w in t for w in ["hate","bad","terrible","awful","worst","horrible","angry"]) + if pos and not neg: return "positive", 0.9 + if neg and not pos: return "negative", 0.9 + return "neutral", 0.5 + +CAPS = [ + "Echo what you say (baseline).", + "Show my capabilities with 'help' or 'capabilities'.", + "Handle malformed/empty input politely.", + "Classify simple sentiment (positive/negative/neutral).", +] + +class EchoBot(ActivityHandler): + async def on_members_added_activity( + self, members_added: [ChannelAccount], turn_context: TurnContext + ): + for member in members_added: + if member.id != turn_context.activity.recipient.id: + await turn_context.send_activity( + "Hi! I’m your sample bot.\n" + "- Try typing: **help**\n" + "- Or any sentence and I’ll echo it + sentiment." + ) + + async def on_message_activity(self, turn_context: TurnContext): + text = (turn_context.activity.text or "").strip() + + # Handle empty/malformed + if not text: + await turn_context.send_activity( + "I didn’t catch anything. Please type a message (or 'help')." + ) + return + + # Capabilities + if text.lower() in {"help","capabilities","what can you do"}: + caps = "\n".join(f"• {c}" for c in CAPS) + await turn_context.send_activity( + "Here’s what I can do:\n" + caps + ) + return + + # Normal message → echo + sentiment + label, score = simple_sentiment(text) + reply = f"You said: **{text}**\nSentiment: **{label}** (conf {score:.2f})" + await turn_context.send_activity(reply) diff --git a/integrations/email/ticket_stub.py b/integrations/email/ticket_stub.py new file mode 100644 index 0000000000000000000000000000000000000000..90a5408df05b835b8ca61af39e226cf41b8de174 --- /dev/null +++ b/integrations/email/ticket_stub.py @@ -0,0 +1,2 @@ +# /intergrations/email/ticket_stub.py +# Email ticket stub (placeholder) diff --git a/integrations/web/fastapi/web_agentic.py b/integrations/web/fastapi/web_agentic.py new file mode 100644 index 0000000000000000000000000000000000000000..51bd880287a0172021172898c10ac0cf7eb0c378 --- /dev/null +++ b/integrations/web/fastapi/web_agentic.py @@ -0,0 +1,22 @@ +# /integrations/web/fastapi/web_agentic.py +from fastapi import FastAPI, Query +from fastapi.responses import HTMLResponse +from agenticcore.chatbot.services import ChatBot + +app = FastAPI(title="AgenticCore Web UI") + +# 1. Simple HTML form at / +@app.get("/", response_class=HTMLResponse) +def index(): + return """ +
+ + +
+ """ + +# 2. Agentic endpoint +@app.get("/agentic") +def run_agentic(msg: str = Query(..., description="Message to send to ChatBot")): + bot = ChatBot() + return bot.reply(msg) diff --git a/logged_in_bot/handler.py b/logged_in_bot/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..6185ff6f173031f3432029a26c388abad92ea9b1 --- /dev/null +++ b/logged_in_bot/handler.py @@ -0,0 +1,20 @@ +# /logged_in_bot/handler.py + +from agenticcore.chatbot.services import ChatBot + +_bot = ChatBot() + +def handle_turn(message, history, user): + history = history or [] + try: + res = _bot.reply(message) + reply = res.get("reply") or "Noted." + label = res.get("sentiment") + conf = res.get("confidence") + if label is not None and conf is not None: + reply = f"{reply} (sentiment: {label}, confidence: {float(conf):.2f})" + except Exception as e: + reply = f"Sorry—error in ChatBot: {type(e).__name__}. Using fallback." + history = history + [[message, reply]] + return history + diff --git a/logged_in_bot/sentiment_azure.py b/logged_in_bot/sentiment_azure.py new file mode 100644 index 0000000000000000000000000000000000000000..91e0f9c9cd2cb34d5f7b84c8a6f585d98e51f92d --- /dev/null +++ b/logged_in_bot/sentiment_azure.py @@ -0,0 +1 @@ +# /logged_in_bot/sentiment_azure.py diff --git a/logged_in_bot/tools.py b/logged_in_bot/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..040a9688661baa35436923991736f7e466a7c20f --- /dev/null +++ b/logged_in_bot/tools.py @@ -0,0 +1 @@ +# /logged_in_bot/tools.py diff --git a/memory/rag/indexer.py b/memory/rag/indexer.py new file mode 100644 index 0000000000000000000000000000000000000000..90e24c92b5c4ee9779075906499567c7f1370fa4 --- /dev/null +++ b/memory/rag/indexer.py @@ -0,0 +1 @@ +# /memory/rag/data/indexer.py diff --git a/memory/rag/retriever.py b/memory/rag/retriever.py new file mode 100644 index 0000000000000000000000000000000000000000..526bb82fcc1300c1b487879f859f2512bddd8283 --- /dev/null +++ b/memory/rag/retriever.py @@ -0,0 +1 @@ +# /memory/rag/data/retriever.py diff --git a/memory/sessions.py b/memory/sessions.py new file mode 100644 index 0000000000000000000000000000000000000000..9ba9074f0a8ee8d7edf86433f502529ddd32da8f --- /dev/null +++ b/memory/sessions.py @@ -0,0 +1 @@ +# /memory/sessions.py diff --git a/memory/store.py b/memory/store.py new file mode 100644 index 0000000000000000000000000000000000000000..8feae109f45ff2153c3d72cb3c7aeae3a2b6e273 --- /dev/null +++ b/memory/store.py @@ -0,0 +1,3 @@ +# /memory/sessions.py + +DB={} diff --git a/nlu/pipeline.py b/nlu/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..504c96aabbf254b2afc87e4a6b23ebc82e2397ca --- /dev/null +++ b/nlu/pipeline.py @@ -0,0 +1,3 @@ +# /nlu/pipeline.py + +def analyze(t): return {'intent':'general'} diff --git a/nlu/prompts.py b/nlu/prompts.py new file mode 100644 index 0000000000000000000000000000000000000000..fcdc1ed6fabe2d397b2c4c1bb8c01d18d1f874a6 --- /dev/null +++ b/nlu/prompts.py @@ -0,0 +1 @@ +# /nlu/prompts.py diff --git a/nlu/router.py b/nlu/router.py new file mode 100644 index 0000000000000000000000000000000000000000..8f55b74780e380562912e8fe2102b9d265afaaaf --- /dev/null +++ b/nlu/router.py @@ -0,0 +1 @@ +# /nlu/router.py diff --git a/notebooks/ChatbotIntegration.ipynb b/notebooks/ChatbotIntegration.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..16f2e6f41f9cd48dd83e44b6ed800118dc24686f --- /dev/null +++ b/notebooks/ChatbotIntegration.ipynb @@ -0,0 +1,559 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "3c5453da-9714-4410-af12-2727730020bc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n" + ] + } + ], + "source": [ + "from agenticcore.chatbot.services import ChatBot\n", + "bot = ChatBot()\n", + "print(bot.reply(\"Testing from notebook\"))\n" + ] + }, + { + "cell_type": "markdown", + "id": "6d467914-f9b5-43bb-b66e-fc7f1db12b21", + "metadata": {}, + "source": [ + "# 2) Config: choose backend URL and provider (HF/Azure/etc.)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "240d2787-aacd-49ec-bfe9-709108e49df0", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'http://127.0.0.1:8000'" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import os\n", + "\n", + "# Point to your FastAPI server (change if needed)\n", + "import os\n", + "\n", + "# Default backend URL (can be overridden later via the widget)\n", + "BACKEND_URL = os.environ.get(\"BACKEND_URL\", \"http://127.0.0.1:8000\")\n", + "\n", + "# Provider hint (optional; providers_unified auto-detects if keys exist)\n", + "# Examples:\n", + "# os.environ[\"AI_PROVIDER\"] = \"hf\"\n", + "# os.environ[\"HF_API_KEY\"] = \"hf_XXXXXXXX...\" # if using Hugging Face\n", + "# os.environ[\"MICROSOFT_AI_SERVICE_ENDPOINT\"] = \"https://.cognitiveservices.azure.com/\"\n", + "# os.environ[\"MICROSOFT_AI_API_KEY\"] = \"\"\n", + "\n", + "BACKEND_URL\n" + ] + }, + { + "cell_type": "markdown", + "id": "bde64f5a-dd29-414e-9116-498ee972e759", + "metadata": {}, + "source": [ + "# 3) Helper functions (API + Library paths)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "8d50f328-567b-454f-8090-87c045674338", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import json\n", + "import requests\n", + "from typing import Dict, Any\n", + "\n", + "# Default backend URL\n", + "BACKEND_URL = os.environ.get(\"BACKEND_URL\", \"http://127.0.0.1:8000\")\n", + "\n", + "def send_via_api(message: str, url: str = BACKEND_URL) -> Dict[str, Any]:\n", + " \"\"\"POST to FastAPI /chatbot/message. Returns dict with reply/sentiment/confidence.\"\"\"\n", + " u = url.rstrip(\"/\") + \"/chatbot/message\"\n", + " r = requests.post(u, json={\"message\": message}, timeout=20)\n", + " r.raise_for_status()\n", + " return r.json()\n", + "\n", + "def send_via_library(message: str) -> Dict[str, Any]:\n", + " \"\"\"Call ChatBot() directly inside this kernel.\"\"\"\n", + " from agenticcore.chatbot.services import ChatBot\n", + " return ChatBot().reply(message)\n", + "\n", + "def health(url: str = BACKEND_URL) -> Dict[str, Any]:\n", + " r = requests.get(url.rstrip(\"/\") + \"/health\", timeout=10)\n", + " r.raise_for_status()\n", + " return r.json()\n" + ] + }, + { + "cell_type": "markdown", + "id": "f247c509-abd4-44de-9b49-20402d54a296", + "metadata": {}, + "source": [ + "# 4) Minimal UI (ipywidgets)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "3f1258d0-5616-4c5a-b19e-e4ffa040e9cb", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d5e531363757461cbb9225d4afdd5ea9", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(ToggleButtons(description='Route:', options=(('API', 'api'), ('Library', 'lib')), value='api'),…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b7e695b92e094e3094099e4f54c5852d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(Text(value='', description='You:', layout=Layout(width='60%'), placeholder='Type a message…'), …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4d1b73dbb842416bb085ba3237e6c69c", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " Tip: API path requires your FastAPI server running at /chatbot/message.\n", + " Switch to Library mode for offline tests.\n", + "
\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import ipywidgets as W\n", + "from IPython.display import display, HTML, clear_output\n", + "\n", + "mode = W.ToggleButtons(\n", + " options=[(\"API\", \"api\"), (\"Library\", \"lib\")],\n", + " value=\"api\",\n", + " description=\"Route:\",\n", + ")\n", + "backend = W.Text(value=BACKEND_URL, placeholder=\"http://127.0.0.1:8000\", description=\"Backend:\", layout=W.Layout(width=\"60%\"))\n", + "save_btn = W.Button(description=\"Save\", button_style=\"info\")\n", + "msg = W.Text(placeholder=\"Type a message…\", description=\"You:\", layout=W.Layout(width=\"60%\"))\n", + "send_btn = W.Button(description=\"Send\", button_style=\"primary\")\n", + "cap_btn = W.Button(description=\"Capabilities\", tooltip=\"Show ChatBot capabilities\")\n", + "out = W.Output()\n", + "\n", + "def on_save(_):\n", + " os.environ[\"BACKEND_URL\"] = backend.value.strip()\n", + " with out:\n", + " print(f\"[config] BACKEND_URL = {os.environ['BACKEND_URL']}\")\n", + "\n", + "def on_send(_):\n", + " text = msg.value.strip()\n", + " if not text:\n", + " with out:\n", + " print(\"[warn] Please enter some text.\")\n", + " return\n", + " try:\n", + " if mode.value == \"api\":\n", + " data = send_via_api(text, backend.value.strip())\n", + " else:\n", + " data = send_via_library(text)\n", + " with out:\n", + " print(json.dumps(data, indent=2, ensure_ascii=False))\n", + " except Exception as e:\n", + " with out:\n", + " print(f\"[error] {e}\")\n", + "\n", + "def on_caps(_):\n", + " try:\n", + " # Prefer library capabilities; keeps working even if API is down\n", + " from agenticcore.chatbot.services import ChatBot\n", + " data = ChatBot().capabilities()\n", + " with out:\n", + " print(json.dumps({\"capabilities\": data}, indent=2))\n", + " except Exception as e:\n", + " with out:\n", + " print(f\"[error capabilities] {e}\")\n", + "\n", + "save_btn.on_click(on_save)\n", + "send_btn.on_click(on_send)\n", + "cap_btn.on_click(on_caps)\n", + "\n", + "display(W.HBox([mode, backend, save_btn]))\n", + "display(W.HBox([msg, send_btn, cap_btn]))\n", + "display(out)\n", + "\n", + "# Optional visual hint\n", + "display(HTML(\"\"\"\n", + "
\n", + " Tip: API path requires your FastAPI server running at /chatbot/message.\n", + " Switch to Library mode for offline tests.\n", + "
\n", + "\"\"\"))\n" + ] + }, + { + "cell_type": "markdown", + "id": "7aaf5b2a-2a30-42a5-ae77-d851c62feccb", + "metadata": {}, + "source": [ + "# 5) Batch test cell (multi-prompt, tabular)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f1d5da55-08ff-4433-aea9-a3ccde34de5c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
messagereplysentimentconfidence
0I absolutely love this project!(error) 404 Client Error: Not Found for url: h...NoneNone
1This is awful and broken.(error) 404 Client Error: Not Found for url: h...NoneNone
2Can you list your capabilities?(error) 404 Client Error: Not Found for url: h...NoneNone
3(error) 404 Client Error: Not Found for url: h...NoneNone
\n", + "
" + ], + "text/plain": [ + " message \\\n", + "0 I absolutely love this project! \n", + "1 This is awful and broken. \n", + "2 Can you list your capabilities? \n", + "3 \n", + "\n", + " reply sentiment confidence \n", + "0 (error) 404 Client Error: Not Found for url: h... None None \n", + "1 (error) 404 Client Error: Not Found for url: h... None None \n", + "2 (error) 404 Client Error: Not Found for url: h... None None \n", + "3 (error) 404 Client Error: Not Found for url: h... None None " + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import pandas as pd\n", + "\n", + "tests = [\n", + " \"I absolutely love this project!\",\n", + " \"This is awful and broken.\",\n", + " \"Can you list your capabilities?\",\n", + " \"\", # malformed/empty\n", + "]\n", + "\n", + "rows = []\n", + "for t in tests:\n", + " try:\n", + " data = send_via_api(t, backend.value.strip()) if mode.value == \"api\" else send_via_library(t)\n", + " rows.append({\"message\": t, **data})\n", + " except Exception as e:\n", + " rows.append({\"message\": t, \"reply\": f\"(error) {e}\", \"sentiment\": None, \"confidence\": None})\n", + "\n", + "df = pd.DataFrame(rows)\n", + "df\n" + ] + }, + { + "cell_type": "markdown", + "id": "bc6096a4-ea15-4908-9edf-e80d2c89c4a6", + "metadata": {}, + "source": [ + "# 6) Health check + quick assertions" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "a3b9632b-a7eb-4c4b-94ed-add3bd5a88c6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Health: {'ok': True, 'version': '0.3.0', 'time': 1757798428}\n", + "Library OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n", + "API OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5, 'thread': None}\n" + ] + } + ], + "source": [ + "try:\n", + " print(\"Health:\", health(backend.value.strip()))\n", + "except Exception as e:\n", + " print(\"Health check failed:\", e)\n", + "\n", + "# Simple acceptance checks\n", + "sample = send_via_library(\"hello\")\n", + "assert all(k in sample for k in (\"reply\", \"sentiment\", \"confidence\"))\n", + "print(\"Library OK:\", sample)\n", + "\n", + "sample_api = send_via_api(\"hello from api\", backend.value.strip())\n", + "assert all(k in sample_api for k in (\"reply\", \"sentiment\", \"confidence\"))\n", + "print(\"API OK:\", sample_api)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21b5e668-3b60-4d3d-bef2-fae238ebf91a", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "1fd68863-30cd-4790-a77d-e637acfb9fd0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " \"/health\",\n", + " \"/status\",\n", + " \"/chatbot/message\",\n", + " \"/ui\"\n", + "]\n" + ] + } + ], + "source": [ + "import requests, os, json\n", + "BACKEND_URL = os.environ.get(\"BACKEND_URL\", \"http://127.0.0.1:8000\")\n", + "routes = requests.get(BACKEND_URL.rstrip(\"/\") + \"/openapi.json\", timeout=10).json()[\"paths\"]\n", + "print(json.dumps(list(routes.keys())[:20], indent=2))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "0595d521-f8da-46ed-aafa-bc84fd519c08", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'reply': 'Noted. The sentiment appears neutral.',\n", + " 'sentiment': 'neutral',\n", + " 'confidence': 0.5,\n", + " 'thread': None}" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "send_via_api(\"hello from api\", BACKEND_URL.strip())\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "7e44f300-e314-466b-9a9e-b2817f6b3aaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Health: {'ok': True, 'version': '0.3.0', 'time': 1757798440}\n", + "Library OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n", + "API OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5, 'thread': None}\n" + ] + } + ], + "source": [ + "print(\"Health:\", health(BACKEND_URL))\n", + "sample = send_via_library(\"hello\")\n", + "print(\"Library OK:\", sample)\n", + "\n", + "sample_api = send_via_api(\"hello from api\", BACKEND_URL)\n", + "print(\"API OK:\", sample_api)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "190a5e9b-a0b6-47a9-8201-51788715ac12", + "metadata": {}, + "outputs": [ + { + "ename": "SyntaxError", + "evalue": "invalid syntax (3247471142.py, line 2)", + "output_type": "error", + "traceback": [ + "\u001b[1;36m Cell \u001b[1;32mIn[16], line 2\u001b[1;36m\u001b[0m\n\u001b[1;33m uvicorn backend.app.main:app --reload --port 8077 --app-dir .\u001b[0m\n\u001b[1;37m ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m invalid syntax\n" + ] + } + ], + "source": [ + "# Pick a clean port to avoid collisions (e.g., 8077)\n", + "uvicorn backend.app.main:app --reload --port 8077 --app-dir .\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "efef3da8-8f93-483c-a623-ea8e48c604c8", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99187776-e8c6-4c4c-80d6-6d87c299c96b", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c32a0f8c-a533-4bd0-abd3-88b5fd993305", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2e8a5292-7458-474f-b599-6b2192c23b37", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python (stock_ai)", + "language": "python", + "name": "stock_ai" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.20" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/SimpleTraditionalChatbot.ipynb b/notebooks/SimpleTraditionalChatbot.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..f45e37931e64e85a120da698404ad917a0db5405 --- /dev/null +++ b/notebooks/SimpleTraditionalChatbot.ipynb @@ -0,0 +1,522 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0c68164d-7eea-473c-9722-8bc92564fa6f", + "metadata": {}, + "source": [ + "# **Jupyter notebook front-end (drop-in cells)**\n" + ] + }, + { + "cell_type": "markdown", + "id": "298d485d-5391-47f5-9844-b5f5945324fd", + "metadata": {}, + "source": [ + "**Smoke tests (copy/paste)**\n", + "\n", + "**Run these whenever something feels off.**\n", + "\n", + "**A. Confirm the router is mounted**" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "bd2a2e6d-8b6e-4630-9aeb-d6c0cbd1b78d", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ui] serving SPA from: C:\\Users\\User\\PortaeOS-skeleton\\packages\\shell\\dist\n" + ] + }, + { + "data": { + "text/plain": [ + "[('/openapi.json', {'GET', 'HEAD'}),\n", + " ('/docs', {'GET', 'HEAD'}),\n", + " ('/docs/oauth2-redirect', {'GET', 'HEAD'}),\n", + " ('/redoc', {'GET', 'HEAD'}),\n", + " ('/health', {'GET'}),\n", + " ('/status', {'GET'}),\n", + " ('/services', {'GET'}),\n", + " ('/services/{name}/start', {'POST'}),\n", + " ('/services/{name}/stop', {'POST'}),\n", + " ('/services/{name}/restart', {'POST'}),\n", + " ('/logs/{service}/tail', {'GET'}),\n", + " ('/logs/bundle.zip', {'GET'}),\n", + " ('/favorites', {'GET'}),\n", + " ('/macros/open', {'POST'}),\n", + " ('/license/validate', {'POST'}),\n", + " ('/agents/run', {'POST'}),\n", + " ('/terminals', None),\n", + " ('/storefront/postman-collection', {'GET'}),\n", + " ('/office/status', {'GET'}),\n", + " ('/office/run-macro', {'POST'}),\n", + " ('/ui', None),\n", + " ('/', {'GET'}),\n", + " ('/ui/{_:path}', {'GET'}),\n", + " ('/ai/ping', {'GET'}),\n", + " ('/ai/health', {'GET'}),\n", + " ('/ai/agents/dispatch', {'POST'}),\n", + " ('/ai/ingest', {'POST'}),\n", + " ('/ai/search', {'GET'}),\n", + " ('/ai/chat', {'POST'}),\n", + " ('/recode/health', {'GET'}),\n", + " ('/terminals/{tid}', None),\n", + " ('/terminals/spawn/{tid}', {'GET'}),\n", + " ('/terminals/kill/{tid}', {'GET'}),\n", + " ('/terminals/{tid}/resize', {'POST'})]" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import os\n", + "os.chdir(r\"C:\\Users\\User\\PortaeOS-skeleton\\packages\\agenticcore\") # <-- adjust to your repo root\n", + "\n", + "# Python one-liner in the same env where the server runs\n", + "import sys; sys.path.insert(0,'.')\n", + "import backend.app.main as m\n", + "[(getattr(r,'path',None), getattr(r,'methods',None)) for r in m.app.routes]\n", + "\n", + "# Expect to see ('/chatbot/message', {'POST'}) in the list\n" + ] + }, + { + "cell_type": "markdown", + "id": "2705c51f-c966-4183-a2c6-a54534e035ae", + "metadata": {}, + "source": [ + "(You already did a version of this and saw /chatbot/message appear — perfect.)\n", + "\n", + "**B. Health → Chat via API**" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "0e622f18-d769-4003-a915-41511c5240e1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Health: {'ok': True, 'version': '0.3.0', 'time': 1757812216}\n", + "Reply: 200 {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5, 'thread': None}\n" + ] + } + ], + "source": [ + "import requests, json, os\n", + "BASE = os.environ.get(\"BACKEND_URL\",\"http://127.0.0.1:8000\").rstrip(\"/\")\n", + "print(\"Health:\", requests.get(BASE+\"/health\").json())\n", + "r = requests.post(BASE+\"/chatbot/message\", json={\"message\":\"hello via api\"})\n", + "print(\"Reply:\", r.status_code, r.json())\n" + ] + }, + { + "cell_type": "markdown", + "id": "02aeef6d-dd70-476d-83a2-48e7732eba78", + "metadata": {}, + "source": [ + "**C. Library path (no server required)**" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "11a88e5d-9ae6-4d44-a1b4-3c89716dba28", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n" + ] + } + ], + "source": [ + "from agenticcore.chatbot.services import ChatBot\n", + "print(ChatBot().reply(\"hello via library\"))\n" + ] + }, + { + "cell_type": "markdown", + "id": "89fd0fdf-c799-4e4d-8c63-c430f9d8f3b3", + "metadata": {}, + "source": [ + "# **Minimal notebook “front-end” cells (drop into top of your .ipynb)**\n", + "\n", + "These mirror your working UI and give you pass/fail signals inside the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "65acc3b5-ec39-4b53-9c1e-be6eee9928ac", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "BACKEND_URL = http://127.0.0.1:8000\n", + "Health: {'ok': True, 'version': '0.3.0', 'time': 1757812221}\n" + ] + } + ], + "source": [ + "# Cell 1: config + helpers\n", + "import os, json, requests\n", + "BACKEND_URL = os.environ.get(\"BACKEND_URL\", \"http://127.0.0.1:8000\").rstrip(\"/\")\n", + "\n", + "def health(url: str = BACKEND_URL): \n", + " r = requests.get(url + \"/health\", timeout=10); r.raise_for_status(); return r.json()\n", + "\n", + "def send_via_api(message: str, url: str = BACKEND_URL):\n", + " r = requests.post(url + \"/chatbot/message\", json={\"message\": message}, timeout=20)\n", + " r.raise_for_status(); return r.json()\n", + "\n", + "def send_via_library(message: str):\n", + " from agenticcore.chatbot.services import ChatBot\n", + " return ChatBot().reply(message)\n", + "\n", + "print(\"BACKEND_URL =\", BACKEND_URL)\n", + "print(\"Health:\", health())\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "b00f82ca-faea-43b3-b35a-03fdc0d1c2b3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Library OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n", + "API OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5, 'thread': None}\n" + ] + } + ], + "source": [ + "# Cell 2: quick acceptance checks\n", + "lib = send_via_library(\"hello\")\n", + "assert all(k in lib for k in (\"reply\",\"sentiment\",\"confidence\"))\n", + "print(\"Library OK:\", lib)\n", + "\n", + "api = send_via_api(\"hello from api\")\n", + "assert all(k in api for k in (\"reply\",\"sentiment\",\"confidence\"))\n", + "print(\"API OK:\", api)\n" + ] + }, + { + "cell_type": "markdown", + "id": "7b4dcea8-0a06-4734-8780-f4180b4ceec8", + "metadata": {}, + "source": [ + "**Cell 1 — Config & helpers**" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "93d23e33-c19f-423f-9e89-b98295637075", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "BACKEND_URL = http://127.0.0.1:8000\n" + ] + } + ], + "source": [ + "# Notebook Config\n", + "import os, json, requests\n", + "from typing import Dict, Any\n", + "\n", + "BACKEND_URL = os.environ.get(\"BACKEND_URL\", \"http://127.0.0.1:8000\").rstrip(\"/\")\n", + "\n", + "def health(url: str = BACKEND_URL) -> Dict[str, Any]:\n", + " \"\"\"GET /health to verify server is up.\"\"\"\n", + " r = requests.get(url + \"/health\", timeout=10)\n", + " r.raise_for_status()\n", + " return r.json()\n", + "\n", + "def send_via_api(message: str, url: str = BACKEND_URL) -> Dict[str, Any]:\n", + " \"\"\"POST to FastAPI /chatbot/message. Returns reply/sentiment/confidence.\"\"\"\n", + " r = requests.post(url + \"/chatbot/message\", json={\"message\": message}, timeout=20)\n", + " r.raise_for_status()\n", + " return r.json()\n", + "\n", + "def send_via_library(message: str) -> Dict[str, Any]:\n", + " \"\"\"Call ChatBot() directly (no server needed).\"\"\"\n", + " from agenticcore.chatbot.services import ChatBot\n", + " return ChatBot().reply(message)\n", + "\n", + "print(\"BACKEND_URL =\", BACKEND_URL)\n" + ] + }, + { + "cell_type": "markdown", + "id": "3239843e-75b6-4bb7-8e6c-e20eb160e0a6", + "metadata": {}, + "source": [ + "**Cell 2 — Widget UI (switch API / Library)**" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "f688b856-cb39-45ae-b5d3-ff20564893c0", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "160ab5a3d8a54eccbaf75eec203babae", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(ToggleButtons(description='Route:', options=(('API', 'api'), ('Library', 'lib')), value='api'),…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "70a92ad1d523445781c212d302c44556", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(Text(value='', description='You:', layout=Layout(width='60%'), placeholder='Type a message…'), …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "bc59056493eb4380a6559c0d88f373cb", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Tip: ensure FastAPI exposes /chatbot/message. Switch to Library for offline tests.
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import ipywidgets as W\n", + "from IPython.display import display, HTML\n", + "\n", + "mode = W.ToggleButtons(options=[(\"API\", \"api\"), (\"Library\", \"lib\")], value=\"api\", description=\"Route:\")\n", + "backend = W.Text(value=BACKEND_URL, description=\"Backend:\", layout=W.Layout(width=\"60%\"))\n", + "save_btn = W.Button(description=\"Save\", button_style=\"info\")\n", + "msg = W.Text(placeholder=\"Type a message…\", description=\"You:\", layout=W.Layout(width=\"60%\"))\n", + "send_btn = W.Button(description=\"Send\", button_style=\"primary\")\n", + "cap_btn = W.Button(description=\"Capabilities\")\n", + "out = W.Output()\n", + "\n", + "def on_save(_):\n", + " os.environ[\"BACKEND_URL\"] = backend.value.strip().rstrip(\"/\")\n", + " with out: print(\"[config] BACKEND_URL =\", os.environ[\"BACKEND_URL\"])\n", + "\n", + "def on_send(_):\n", + " text = msg.value.strip()\n", + " if not text:\n", + " with out: print(\"[warn] Please enter some text.\")\n", + " return\n", + " try:\n", + " data = send_via_api(text, backend.value.strip()) if mode.value == \"api\" else send_via_library(text)\n", + " with out: print(json.dumps(data, indent=2, ensure_ascii=False))\n", + " except Exception as e:\n", + " with out: print(f\"[error] {e}\")\n", + "\n", + "def on_caps(_):\n", + " try:\n", + " from agenticcore.chatbot.services import ChatBot\n", + " with out: print(json.dumps({\"capabilities\": ChatBot().capabilities()}, indent=2))\n", + " except Exception as e:\n", + " with out: print(f\"[error capabilities] {e}\")\n", + "\n", + "save_btn.on_click(on_save); send_btn.on_click(on_send); cap_btn.on_click(on_caps)\n", + "\n", + "display(W.HBox([mode, backend, save_btn]))\n", + "display(W.HBox([msg, send_btn, cap_btn]))\n", + "display(out)\n", + "display(HTML('
Tip: ensure FastAPI exposes /chatbot/message. Switch to Library for offline tests.
'))\n" + ] + }, + { + "cell_type": "markdown", + "id": "85999cc1-26e2-497d-95c1-ffb8c9210240", + "metadata": {}, + "source": [ + "**Cell 3 — Smoke checks (acceptance)**" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "3fc28db0-5aa6-4813-9663-f19d6937e39b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Health: {'ok': True, 'version': '0.3.0', 'time': 1757812228}\n", + "Library OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5}\n", + "API OK: {'reply': 'Noted. The sentiment appears neutral.', 'sentiment': 'neutral', 'confidence': 0.5, 'thread': None}\n" + ] + } + ], + "source": [ + "# Backend health (if running)\n", + "try:\n", + " print(\"Health:\", health(backend.value.strip()))\n", + "except Exception as e:\n", + " print(\"Health check failed:\", e)\n", + "\n", + "# Library path always available\n", + "sample = send_via_library(\"hello\")\n", + "assert all(k in sample for k in (\"reply\", \"sentiment\", \"confidence\"))\n", + "print(\"Library OK:\", sample)\n", + "\n", + "# API path (requires uvicorn backend running)\n", + "try:\n", + " sample_api = send_via_api(\"hello from api\", backend.value.strip())\n", + " assert all(k in sample_api for k in (\"reply\", \"sentiment\", \"confidence\"))\n", + " print(\"API OK:\", sample_api)\n", + "except Exception as e:\n", + " print(\"API test failed (start uvicorn?):\", e)\n" + ] + }, + { + "cell_type": "markdown", + "id": "caeb5108-f0fb-4d5e-ad2a-56011904e397", + "metadata": {}, + "source": [ + "**Cell 4 — Minimal report cell (optional screenshots prompt)**" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "8a3d85fc-6c2d-4c99-a286-4e9af2085d64", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "\n", + "### What to capture for the report\n", + "- Screenshot of **/health** and a successful **/chatbot/message** call.\n", + "- Notebook output using **API** mode and **Library** mode.\n", + "- Short note: environment variables used (e.g., `MICROSOFT_AI_*`, `AI_PROVIDER`, `HF_API_KEY`).\n", + "- Brief discussion of any errors and fixes (e.g., route mounting, ports).\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from IPython.display import Markdown\n", + "Markdown(\"\"\"\n", + "### What to capture for the report\n", + "- Screenshot of **/health** and a successful **/chatbot/message** call.\n", + "- Notebook output using **API** mode and **Library** mode.\n", + "- Short note: environment variables used (e.g., `MICROSOFT_AI_*`, `AI_PROVIDER`, `HF_API_KEY`).\n", + "- Brief discussion of any errors and fixes (e.g., route mounting, ports).\n", + "\"\"\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0fcc4e15-82c2-49e8-86c5-86b80a5b691a", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c3e950a2-af8a-4a69-802b-7b2f81b9245c", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ccb42ea3-2d61-4451-9ee8-f8e0755ff89c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python (stock_ai)", + "language": "python", + "name": "stock_ai" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.20" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..2f1624a6802e4296990419cb5633e5b49863857e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,10 @@ +# pyproject.toml +[tool.black] +line-length = 100 +target-version = ["py310"] + +[tool.isort] +profile = "black" + +[tool.pytest.ini_options] +addopts = "-q" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..b25ad0db0c22654d50a6dd45ae3e72540302f7b5 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,15 @@ +gradio>=4.0 +transformers>=4.41.0 +torch>=2.2.0 +scikit-learn>=1.3.0 +pandas>=2.1.0 +numpy>=1.26.0 +pytest>=7.4.0 +# Optional Azure +azure-ai-textanalytics>=5.3.0 +python-dotenv>=1.0 +fastapi>=0.115.0 +uvicorn[standard]>=0.30.0 +# Optional for Bot Framework sample: +# aiohttp>=3.9 +# botbuilder-core>=4.14 diff --git a/samples/service.py b/samples/service.py new file mode 100644 index 0000000000000000000000000000000000000000..dcab134ef4f89f0efa2ad89a24a531e6ce9b46c2 --- /dev/null +++ b/samples/service.py @@ -0,0 +1,58 @@ +# /samples/services.py +import os +from typing import Dict, Any + +# Use the unified provider layer (HF, Azure, OpenAI, Cohere, DeepAI, or offline) +from packages.agenticcore.agenticcore.providers_unified import analyze_sentiment, generate_text + + +class ChatBot: + """ + Thin façade over provider-agnostic functions. + - Provider selection is automatic unless AI_PROVIDER is set (hf|azure|openai|cohere|deepai|offline). + - Reply shape: {"reply": str, "sentiment": str, "confidence": float} + """ + + def __init__(self) -> None: + # Optional: pin a provider via env; otherwise providers_unified auto-detects. + self.provider = os.getenv("AI_PROVIDER") or "auto" + + def reply(self, message: str) -> Dict[str, Any]: + msg = (message or "").strip() + if not msg: + return {"reply": "Please enter some text.", "sentiment": "unknown", "confidence": 0.0} + + if msg.lower() in {"help", "/help"}: + return { + "reply": self._help_text(), + "capabilities": { + "system": "chatbot", + "mode": self.provider, + "features": ["text-input", "sentiment-analysis", "help"], + "commands": {"help": "Describe capabilities and usage."}, + }, + } + + s = analyze_sentiment(msg) # -> {"provider","label","score",...} + label = str(s.get("label", "neutral")) + score = float(s.get("score", 0.5)) + + # Keep the same phrasing used elsewhere so surfaces are consistent. + reply = self._compose(label) + return {"reply": reply, "sentiment": label, "confidence": round(score, 2)} + + @staticmethod + def _compose(label: str) -> str: + if label == "positive": + return "Thanks for sharing. I detected a positive sentiment." + if label == "negative": + return "I hear your concern. I detected a negative sentiment." + if label == "neutral": + return "Noted. The sentiment appears neutral." + if label == "mixed": + return "Your message has mixed signals. Can you clarify?" + return "I could not determine the sentiment. Please rephrase." + + @staticmethod + def _help_text() -> str: + return "I analyze sentiment and respond concisely. Send any text or type 'help'." diff --git a/scripts/check_compliance.py b/scripts/check_compliance.py new file mode 100644 index 0000000000000000000000000000000000000000..515fb8b7883338832a0bb76c888b9418ce70780a --- /dev/null +++ b/scripts/check_compliance.py @@ -0,0 +1,3 @@ +# /scripts/check_compliance.py + +# Fails if disallowed deps appear (placeholder) diff --git a/scripts/run_local.sh b/scripts/run_local.sh new file mode 100644 index 0000000000000000000000000000000000000000..2a148c86c34aa1dc06e59a2a68adf3ab27bd8716 --- /dev/null +++ b/scripts/run_local.sh @@ -0,0 +1,5 @@ +# /scripts/run_local.sh +#!/usr/bin/env bash +set -euo pipefail +export PYTHONPATH=. +python -c "from storefront_chatbot.app.app import build; build().launch(server_name='0.0.0.0', server_port=7860)" diff --git a/scripts/seed_data.py b/scripts/seed_data.py new file mode 100644 index 0000000000000000000000000000000000000000..edc9ac581c32788b2aa2e6b2214ca33e39046ad6 --- /dev/null +++ b/scripts/seed_data.py @@ -0,0 +1,3 @@ +# /scripts/seed_data.py +# Load sample products/FAQs (placeholder) + diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/MAPPING_TABLE.csv b/storefront-chatbot_scaffold_merged/storefront-chatbot/MAPPING_TABLE.csv new file mode 100644 index 0000000000000000000000000000000000000000..4ce2b461d03e9dda18e0f738030351e11a09ef8a --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/MAPPING_TABLE.csv @@ -0,0 +1,23 @@ +source_zip,source_path,target_path,status +full,/mnt/data/storefront-chatbot_full_merge/_ext_full/extras/mbf_bot/app.py,storefront_chatbot/app/app.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/anon_bot/handler.py,storefront_chatbot/anon_bot/handler.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/anon_bot/rules.py,storefront_chatbot/anon_bot/rules.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/app/app.py,storefront_chatbot/app/app.py,overwrote_full +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/app/routes.py,storefront_chatbot/app/routes.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/core/config.py,storefront_chatbot/core/config.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/core/logging.py,storefront_chatbot/core/logging.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/core/types.py,storefront_chatbot/core/types.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/guardrails/pii_redaction.py,storefront_chatbot/guardrails/pii_redaction.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/guardrails/safety.py,storefront_chatbot/guardrails/safety.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/logged_in_bot/handler.py,storefront_chatbot/logged_in_bot/handler.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/logged_in_bot/sentiment_azure.py,storefront_chatbot/logged_in_bot/sentiment_azure.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/logged_in_bot/tools.py,storefront_chatbot/logged_in_bot/tools.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/memory/sessions.py,storefront_chatbot/memory/sessions.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/memory/store.py,storefront_chatbot/memory/store.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/memory/rag/indexer.py,storefront_chatbot/memory/rag/indexer.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/memory/rag/retriever.py,storefront_chatbot/memory/rag/retriever.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/nlu/pipeline.py,storefront_chatbot/nlu/pipeline.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/nlu/prompts.py,storefront_chatbot/nlu/prompts.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/storefront_chatbot_skeleton/nlu/router.py,storefront_chatbot/nlu/router.py,placed +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/integrations/botframework/app.py,storefront_chatbot/app/app.py,overwrote_full +agentic,/mnt/data/storefront-chatbot_full_merge/_ext_agentic/logged_in_bot/handler.py,storefront_chatbot/logged_in_bot/handler.py,placed diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/README_MERGE_NOTES.md b/storefront-chatbot_scaffold_merged/storefront-chatbot/README_MERGE_NOTES.md new file mode 100644 index 0000000000000000000000000000000000000000..ca8f7a26a8cdf1c8e20a8cddb16306980f300098 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/README_MERGE_NOTES.md @@ -0,0 +1,16 @@ + +# Merge Notes +Timestamp: 2025-09-26T04:21:21.687107 + +- Scaffold created under `storefront-chatbot/storefront_chatbot/`. +- Both zips extracted to `legacy_src/` for full traceability. +- Heuristic filename-based mapping used; **agentic** overwrites **full** on conflicts. +- Any unmapped files remain in `legacy_src/...` for manual triage. + +## Quickstart +```bash +cd storefront-chatbot +make dev +make run +# open http://localhost:7860 +``` diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/README.md b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2da4701fbbf3619c13db54434f0a55d61ad793c8 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/README.md @@ -0,0 +1,6 @@ + +## Agentic Integration +- Core bot: `agenticcore/chatbot/services.py` +- Providers: `agenticcore/providers_unified.py` +- CLI: `python -m agenticcore.cli agentic "hello"` (loads .env) +- FastAPI demo: `uvicorn integrations.web.fastapi.web_agentic:app --reload` diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5bb534f795ae0f566ba9f57c31944d8c6f45284c --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/__init__.py @@ -0,0 +1 @@ +# package diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/chatbot/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/chatbot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5bb534f795ae0f566ba9f57c31944d8c6f45284c --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/chatbot/__init__.py @@ -0,0 +1 @@ +# package diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/chatbot/services.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/chatbot/services.py new file mode 100644 index 0000000000000000000000000000000000000000..94c5f7df595c4f3b524387303672885ec13d90c4 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/chatbot/services.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +import json +import os +from dataclasses import dataclass +from typing import Dict + +# Delegate sentiment to the unified provider layer +# If you put providers_unified.py under agenticcore/chatbot/, change the import to: +# from agenticcore.chatbot.providers_unified import analyze_sentiment +from agenticcore.providers_unified import analyze_sentiment +from ..providers_unified import analyze_sentiment + + +def _trim(s: str, max_len: int = 2000) -> str: + s = (s or "").strip() + return s if len(s) <= max_len else s[: max_len - 1] + "…" + + +@dataclass(frozen=True) +class SentimentResult: + label: str # "positive" | "neutral" | "negative" | "mixed" | "unknown" + confidence: float # 0.0 .. 1.0 + + +class ChatBot: + """ + Minimal chatbot that uses provider-agnostic sentiment via providers_unified. + Public API: + - reply(text: str) -> Dict[str, object] + - capabilities() -> Dict[str, object] + """ + + def __init__(self, system_prompt: str = "You are a concise helper.") -> None: + self._system_prompt = _trim(system_prompt, 800) + # Expose which provider is intended/active (for diagnostics) + self._mode = os.getenv("AI_PROVIDER") or "auto" + + def capabilities(self) -> Dict[str, object]: + """List what this bot can do.""" + return { + "system": "chatbot", + "mode": self._mode, # "auto" or a pinned provider (hf/azure/openai/cohere/deepai/offline) + "features": ["text-input", "sentiment-analysis", "help"], + "commands": {"help": "Describe capabilities and usage."}, + } + + def reply(self, text: str) -> Dict[str, object]: + """Produce a reply and sentiment for one user message.""" + user = _trim(text) + if not user: + return self._make_response( + "I didn't catch that. Please provide some text.", + SentimentResult("unknown", 0.0), + ) + + if user.lower() in {"help", "/help"}: + return {"reply": self._format_help(), "capabilities": self.capabilities()} + + s = analyze_sentiment(user) # -> {"provider", "label", "score", ...} + sr = SentimentResult(label=str(s.get("label", "neutral")), confidence=float(s.get("score", 0.5))) + return self._make_response(self._compose(sr), sr) + + # ---- internals ---- + + def _format_help(self) -> str: + caps = self.capabilities() + feats = ", ".join(caps["features"]) + return f"I can analyze sentiment and respond concisely. Features: {feats}. Send any text or type 'help'." + + @staticmethod + def _make_response(reply: str, s: SentimentResult) -> Dict[str, object]: + return {"reply": reply, "sentiment": s.label, "confidence": round(float(s.confidence), 2)} + + @staticmethod + def _compose(s: SentimentResult) -> str: + if s.label == "positive": + return "Thanks for sharing. I detected a positive sentiment." + if s.label == "negative": + return "I hear your concern. I detected a negative sentiment." + if s.label == "neutral": + return "Noted. The sentiment appears neutral." + if s.label == "mixed": + return "Your message has mixed signals. Can you clarify?" + return "I could not determine the sentiment. Please rephrase." + + +# Optional: local REPL for quick manual testing +def _interactive_loop() -> None: + bot = ChatBot() + try: + while True: + msg = input("> ").strip() + if msg.lower() in {"exit", "quit"}: + break + print(json.dumps(bot.reply(msg), ensure_ascii=False)) + except (EOFError, KeyboardInterrupt): + pass + + +if __name__ == "__main__": + _interactive_loop() diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/cli.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..1e880c409a523dcd4108b0ac83e6d198c15df34b --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/cli.py @@ -0,0 +1,186 @@ +""" +agenticcore.cli +Console entrypoints: + - agentic: send a message to ChatBot and print reply JSON + - repo-tree: print a filtered tree view (uses tree.txt if present) + - repo-flatten: flatten code listing to stdout (uses FLATTENED_CODE.txt if present) +""" +import argparse, json, sys, traceback +from pathlib import Path +from dotenv import load_dotenv +import os + +# Load .env variables into os.environ (project root .env by default) +load_dotenv() + + +def cmd_agentic(argv=None): + # Lazy import so other commands don't require ChatBot to be importable + from agenticcore.chatbot.services import ChatBot + # We call analyze_sentiment only for 'status' to reveal the actual chosen provider + try: + from agenticcore.providers_unified import analyze_sentiment + except Exception: + analyze_sentiment = None # still fine; we'll show mode only + + p = argparse.ArgumentParser(prog="agentic", description="Chat with AgenticCore ChatBot") + p.add_argument("message", nargs="*", help="Message to send") + p.add_argument("--debug", action="store_true", help="Print debug info") + args = p.parse_args(argv) + msg = " ".join(args.message).strip() or "hello" + + if args.debug: + print(f"DEBUG argv={sys.argv}", flush=True) + print(f"DEBUG raw message='{msg}'", flush=True) + + bot = ChatBot() + + # Special commands for testing / assignments + # Special commands for testing / assignments + if msg.lower() == "status": + import requests # local import to avoid hard dep for other commands + + # Try a lightweight provider probe via analyze_sentiment + provider = None + if analyze_sentiment is not None: + try: + probe = analyze_sentiment("status ping") + provider = (probe or {}).get("provider") + except Exception: + if args.debug: + traceback.print_exc() + + # Hugging Face whoami auth probe + tok = os.getenv("HF_API_KEY", "") + who = None + auth_ok = False + err = None + try: + if tok: + r = requests.get( + "https://huggingface.co/api/whoami-v2", + headers={"Authorization": f"Bearer {tok}"}, + timeout=15, + ) + auth_ok = (r.status_code == 200) + who = r.json() if auth_ok else None + if not auth_ok: + err = r.text # e.g., {"error":"Invalid credentials in Authorization header"} + else: + err = "HF_API_KEY not set (load .env or export it)" + except Exception as e: + err = str(e) + + # Extract fine-grained scopes for visibility + fg = (((who or {}).get("auth") or {}).get("accessToken") or {}).get("fineGrained") or {} + scoped = fg.get("scoped") or [] + global_scopes = fg.get("global") or [] + + # ---- tiny inference ping (proves 'Make calls to Inference Providers') ---- + infer_ok, infer_err = False, None + try: + if tok: + model = os.getenv( + "HF_MODEL_SENTIMENT", + "distilbert-base-uncased-finetuned-sst-2-english" + ) + r2 = requests.post( + f"https://api-inference.huggingface.co/models/{model}", + headers={"Authorization": f"Bearer {tok}", "x-wait-for-model": "true"}, + json={"inputs": "ping"}, + timeout=int(os.getenv("HTTP_TIMEOUT", "60")), + ) + infer_ok = (r2.status_code == 200) + if not infer_ok: + infer_err = f"HTTP {r2.status_code}: {r2.text}" + except Exception as e: + infer_err = str(e) + # ------------------------------------------------------------------------- + + # Mask + length to verify what .env provided + mask = (tok[:3] + "..." + tok[-4:]) if tok else None + out = { + "provider": provider or "unknown", + "mode": getattr(bot, "_mode", "auto"), + "auth_ok": auth_ok, + "whoami": who, + "token_scopes": { # <--- added + "global": global_scopes, + "scoped": scoped, + }, + "inference_ok": infer_ok, + "inference_error": infer_err, + "env": { + "HF_API_KEY_len": len(tok) if tok else 0, + "HF_API_KEY_mask": mask, + "HF_MODEL_SENTIMENT": os.getenv("HF_MODEL_SENTIMENT"), + "HTTP_TIMEOUT": os.getenv("HTTP_TIMEOUT"), + }, + "capabilities": bot.capabilities(), + "error": err, + } + + elif msg.lower() == "help": + out = {"capabilities": bot.capabilities()} + + else: + try: + out = bot.reply(msg) + except Exception as e: + if args.debug: + traceback.print_exc() + out = {"error": str(e), "message": msg} + + if args.debug: + print(f"DEBUG out={out}", flush=True) + + print(json.dumps(out, indent=2), flush=True) + + +def cmd_repo_tree(argv=None): + p = argparse.ArgumentParser(prog="repo-tree", description="Print repo tree (from tree.txt if available)") + p.add_argument("--path", default="tree.txt", help="Path to precomputed tree file") + args = p.parse_args(argv) + path = Path(args.path) + if path.exists(): + print(path.read_text(encoding="utf-8"), flush=True) + else: + print("(no tree.txt found)", flush=True) + + +def cmd_repo_flatten(argv=None): + p = argparse.ArgumentParser(prog="repo-flatten", description="Print flattened code listing") + p.add_argument("--path", default="FLATTENED_CODE.txt", help="Path to pre-flattened code file") + args = p.parse_args(argv) + path = Path(args.path) + if path.exists(): + print(path.read_text(encoding="utf-8"), flush=True) + else: + print("(no FLATTENED_CODE.txt found)", flush=True) + + +def _dispatch(): + # Allow: python -m agenticcore.cli [args...] + if len(sys.argv) <= 1: + print("Usage: python -m agenticcore.cli [args]", file=sys.stderr) + sys.exit(2) + cmd, argv = sys.argv[1], sys.argv[2:] + try: + if cmd == "agentic": + cmd_agentic(argv) + elif cmd == "repo-tree": + cmd_repo_tree(argv) + elif cmd == "repo-flatten": + cmd_repo_flatten(argv) + else: + print(f"Unknown subcommand: {cmd}", file=sys.stderr) + sys.exit(2) + except SystemExit: + raise + except Exception: + traceback.print_exc() + sys.exit(1) + + +if __name__ == "__main__": + _dispatch() diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/providers_unified.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/providers_unified.py new file mode 100644 index 0000000000000000000000000000000000000000..921c0eeafce6f620f92e80112f7c5cd1bb6cb6a6 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/agenticcore/providers_unified.py @@ -0,0 +1,273 @@ +""" +providers_unified.py +Unified, switchable providers for sentiment + (optional) text generation. +Selection order unless AI_PROVIDER is set: + HF -> AZURE -> OPENAI -> COHERE -> DEEPAI -> OFFLINE +Env vars: + HF_API_KEY + MICROSOFT_AI_SERVICE_ENDPOINT, MICROSOFT_AI_API_KEY + OPENAI_API_KEY, OPENAI_MODEL=gpt-3.5-turbo + COHERE_API_KEY, COHERE_MODEL=command + DEEPAI_API_KEY + AI_PROVIDER = hf|azure|openai|cohere|deepai|offline + HTTP_TIMEOUT = 20 +""" +from __future__ import annotations +import os, json +from typing import Dict, Any, Optional +import requests + +TIMEOUT = float(os.getenv("HTTP_TIMEOUT", "20")) + +def _env(name: str, default: Optional[str] = None) -> Optional[str]: + v = os.getenv(name) + return v if (v is not None and str(v).strip() != "") else default + +def _pick_provider() -> str: + forced = _env("AI_PROVIDER") + if forced in {"hf", "azure", "openai", "cohere", "deepai", "offline"}: + return forced + if _env("HF_API_KEY"): return "hf" + if _env("MICROSOFT_AI_API_KEY") and _env("MICROSOFT_AI_SERVICE_ENDPOINT"): return "azure" + if _env("OPENAI_API_KEY"): return "openai" + if _env("COHERE_API_KEY"): return "cohere" + if _env("DEEPAI_API_KEY"): return "deepai" + return "offline" + +# --------------------------- +# Sentiment +# --------------------------- + +def analyze_sentiment(text: str) -> Dict[str, Any]: + provider = _pick_provider() + try: + if provider == "hf": return _sentiment_hf(text) + if provider == "azure": return _sentiment_azure(text) + if provider == "openai": return _sentiment_openai_prompt(text) + if provider == "cohere": return _sentiment_cohere_prompt(text) + if provider == "deepai": return _sentiment_deepai(text) + return _sentiment_offline(text) + except Exception as e: + return {"provider": provider, "label": "neutral", "score": 0.5, "error": str(e)} + +def _sentiment_offline(text: str) -> Dict[str, Any]: + t = (text or "").lower() + pos = any(w in t for w in ["love","great","good","awesome","fantastic","thank","excellent","amazing"]) + neg = any(w in t for w in ["hate","bad","terrible","awful","worst","angry","horrible"]) + label = "positive" if pos and not neg else "negative" if neg and not pos else "neutral" + score = 0.9 if label != "neutral" else 0.5 + return {"provider": "offline", "label": label, "score": score} + +def _sentiment_hf(text: str) -> Dict[str, Any]: + """ + Hugging Face Inference API for sentiment. + Uses canonical repo id and handles 404/401 and various payload shapes. + """ + key = _env("HF_API_KEY") + if not key: + return _sentiment_offline(text) + + # canonical repo id to avoid 404 + model = _env("HF_MODEL_SENTIMENT", "distilbert/distilbert-base-uncased-finetuned-sst-2-english") + timeout = int(_env("HTTP_TIMEOUT", "30")) + + headers = { + "Authorization": f"Bearer {key}", + "x-wait-for-model": "true", + "Accept": "application/json", + "Content-Type": "application/json", + } + + r = requests.post( + f"https://api-inference.huggingface.co/models/{model}", + headers=headers, + json={"inputs": text}, + timeout=timeout, + ) + + if r.status_code != 200: + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": f"HTTP {r.status_code}: {r.text[:500]}"} + + try: + data = r.json() + except Exception as e: + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": str(e)} + + if isinstance(data, dict) and "error" in data: + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": data["error"]} + + # normalize list shape + arr = data[0] if isinstance(data, list) and data and isinstance(data[0], list) else (data if isinstance(data, list) else []) + if not (isinstance(arr, list) and arr): + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": f"Unexpected payload: {data}"} + + top = max(arr, key=lambda x: x.get("score", 0.0) if isinstance(x, dict) else 0.0) + raw = str(top.get("label", "")).upper() + score = float(top.get("score", 0.5)) + + mapping = { + "LABEL_0": "negative", "LABEL_1": "neutral", "LABEL_2": "positive", + "NEGATIVE": "negative", "NEUTRAL": "neutral", "POSITIVE": "positive", + } + label = mapping.get(raw, (raw.lower() or "neutral")) + + neutral_floor = float(os.getenv("SENTIMENT_NEUTRAL_THRESHOLD", "0.65")) + if label in {"positive", "negative"} and score < neutral_floor: + label = "neutral" + + return {"provider": "hf", "label": label, "score": score} + +def _sentiment_azure(text: str) -> Dict[str, Any]: + try: + from azure.core.credentials import AzureKeyCredential # type: ignore + from azure.ai.textanalytics import TextAnalyticsClient # type: ignore + except Exception: + return _sentiment_offline(text) + endpoint = _env("MICROSOFT_AI_SERVICE_ENDPOINT") + key = _env("MICROSOFT_AI_API_KEY") + if not (endpoint and key): return _sentiment_offline(text) + client = TextAnalyticsClient(endpoint=endpoint.strip(), credential=AzureKeyCredential(key.strip())) + resp = client.analyze_sentiment(documents=[text], show_opinion_mining=False)[0] + scores = { + "positive": float(getattr(resp.confidence_scores, "positive", 0.0) or 0.0), + "neutral": float(getattr(resp.confidence_scores, "neutral", 0.0) or 0.0), + "negative": float(getattr(resp.confidence_scores, "negative", 0.0) or 0.0), + } + label = max(scores, key=scores.get) + return {"provider": "azure", "label": label, "score": scores[label]} + +def _sentiment_openai_prompt(text: str) -> Dict[str, Any]: + key = _env("OPENAI_API_KEY") + model = _env("OPENAI_MODEL", "gpt-3.5-turbo") + if not key: return _sentiment_offline(text) + url = "https://api.openai.com/v1/chat/completions" + prompt = f"Classify the sentiment of this text as positive, negative, or neutral. Reply JSON with keys label and score (0..1). Text: {text!r}" + r = requests.post( + url, + headers={"Authorization": f"Bearer {key}", "Content-Type": "application/json"}, + json={"model": model, "messages": [{"role": "user", "content": prompt}], "temperature": 0}, + timeout=TIMEOUT, + ) + r.raise_for_status() + content = r.json()["choices"][0]["message"]["content"] + try: + obj = json.loads(content) + label = str(obj.get("label", "neutral")).lower() + score = float(obj.get("score", 0.5)) + return {"provider": "openai", "label": label, "score": score} + except Exception: + l = "positive" if "positive" in content.lower() else "negative" if "negative" in content.lower() else "neutral" + return {"provider": "openai", "label": l, "score": 0.5} + +def _sentiment_cohere_prompt(text: str) -> Dict[str, Any]: + key = _env("COHERE_API_KEY") + model = _env("COHERE_MODEL", "command") + if not key: return _sentiment_offline(text) + url = "https://api.cohere.ai/v1/generate" + prompt = f"Classify the sentiment (positive, negative, neutral) and return JSON with keys label and score (0..1). Text: {text!r}" + r = requests.post( + url, + headers={ + "Authorization": f"Bearer {key}", + "Content-Type": "application/json", + "Cohere-Version": "2022-12-06", + }, + json={"model": model, "prompt": prompt, "max_tokens": 30, "temperature": 0}, + timeout=TIMEOUT, + ) + r.raise_for_status() + gen = (r.json().get("generations") or [{}])[0].get("text", "") + try: + obj = json.loads(gen) + label = str(obj.get("label", "neutral")).lower() + score = float(obj.get("score", 0.5)) + return {"provider": "cohere", "label": label, "score": score} + except Exception: + l = "positive" if "positive" in gen.lower() else "negative" if "negative" in gen.lower() else "neutral" + return {"provider": "cohere", "label": l, "score": 0.5} + +def _sentiment_deepai(text: str) -> Dict[str, Any]: + key = _env("DEEPAI_API_KEY") + if not key: return _sentiment_offline(text) + url = "https://api.deepai.org/api/sentiment-analysis" + r = requests.post(url, headers={"api-key": key}, data={"text": text}, timeout=TIMEOUT) + r.raise_for_status() + data = r.json() + label = (data.get("output") or ["neutral"])[0].lower() + return {"provider": "deepai", "label": label, "score": 0.5 if label == "neutral" else 0.9} + +# --------------------------- +# Text generation (optional) +# --------------------------- + +def generate_text(prompt: str, max_tokens: int = 128) -> Dict[str, Any]: + provider = _pick_provider() + try: + if provider == "hf": return _gen_hf(prompt, max_tokens) + if provider == "openai": return _gen_openai(prompt, max_tokens) + if provider == "cohere": return _gen_cohere(prompt, max_tokens) + if provider == "deepai": return _gen_deepai(prompt, max_tokens) + return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + except Exception as e: + return {"provider": provider, "text": f"(error) {str(e)}"} + +def _gen_hf(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("HF_API_KEY") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + model = _env("HF_MODEL_GENERATION", "tiiuae/falcon-7b-instruct") + r = requests.post( + f"https://api-inference.huggingface.co/models/{model}", + headers={"Authorization": f"Bearer {key}"}, + json={"inputs": prompt, "parameters": {"max_new_tokens": max_tokens}}, + timeout=TIMEOUT, + ) + r.raise_for_status() + data = r.json() + if isinstance(data, list) and data and "generated_text" in data[0]: + return {"provider": "hf", "text": data[0]["generated_text"]} + return {"provider": "hf", "text": str(data)} + +def _gen_openai(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("OPENAI_API_KEY") + model = _env("OPENAI_MODEL", "gpt-3.5-turbo") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + url = "https://api.openai.com/v1/chat/completions" + r = requests.post( + url, + headers={"Authorization": f"Bearer {key}", "Content-Type": "application/json"}, + json={"model": model, "messages": [{"role": "user", "content": prompt}], "max_tokens": max_tokens}, + timeout=TIMEOUT, + ) + r.raise_for_status() + data = r.json() + text = data["choices"][0]["message"]["content"] + return {"provider": "openai", "text": text} + +def _gen_cohere(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("COHERE_API_KEY") + model = _env("COHERE_MODEL", "command") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + url = "https://api.cohere.ai/v1/generate" + r = requests.post( + url, + headers={ + "Authorization": f"Bearer {key}", + "Content-Type": "application/json", + "Cohere-Version": "2022-12-06", + }, + json={"model": model, "prompt": prompt, "max_tokens": max_tokens}, + timeout=TIMEOUT, + ) + r.raise_for_status() + data = r.json() + text = data.get("generations", [{}])[0].get("text", "") + return {"provider": "cohere", "text": text} + +def _gen_deepai(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("DEEPAI_API_KEY") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + url = "https://api.deepai.org/api/text-generator" + r = requests.post(url, headers={"api-key": key}, data={"text": prompt}, timeout=TIMEOUT) + r.raise_for_status() + data = r.json() + return {"provider": "deepai", "text": data.get("output", "")} diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/app/assets/html/agenticcore_frontend.html b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/app/assets/html/agenticcore_frontend.html new file mode 100644 index 0000000000000000000000000000000000000000..39d63a7bc4a2daada334e8d2d64577f9e7078882 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/app/assets/html/agenticcore_frontend.html @@ -0,0 +1,200 @@ + + + + + + AgenticCore Chatbot Frontend + + + +
+
+

AgenticCore Chatbot Frontend

+
Frontend → FastAPI → providers_unified
+
+ +
+
+
+ +
+ + +
+
Not checked
+
+
+ +
+ + +
+
+ + + +
+
+
+
+
+ +
+ Use with your FastAPI backend at /chatbot/message. Configure CORS if you serve this file from a different origin. +
+
+ + + + diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/app/assets/html/chat_console.html b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/app/assets/html/chat_console.html new file mode 100644 index 0000000000000000000000000000000000000000..c2cf2ca34021674756a9e61906fe6b8f75948724 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/app/assets/html/chat_console.html @@ -0,0 +1,77 @@ + + + + + Console Chat Tester + + + + +

AgenticCore Console

+ +
+ + + + +
+ +
+ + +
+ +
+ Mode: + API +
+ +

+
+
+
+
diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/app/assets/html/chat_minimal.html b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/app/assets/html/chat_minimal.html
new file mode 100644
index 0000000000000000000000000000000000000000..0b3fb325ece7bf9972b31d977e4aeee0e167c025
--- /dev/null
+++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/app/assets/html/chat_minimal.html
@@ -0,0 +1,89 @@
+
+
+
+  
+  Minimal Chat Tester
+  
+  
+
+
+

Minimal Chat Tester → FastAPI /chatbot/message

+ +
+ + + + +
+ +
+ + +
+ +

+ + + + + diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/integrations/botframework/app.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/integrations/botframework/app.py new file mode 100644 index 0000000000000000000000000000000000000000..eb54bf07d1c4b27a80245c48c356f390985d0758 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/integrations/botframework/app.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +# app.py — aiohttp + Bot Framework Echo bot + +import os +import sys +import json +from logic import handle_text +from aiohttp import web +from botbuilder.core import BotFrameworkAdapter, BotFrameworkAdapterSettings, TurnContext +from botbuilder.schema import Activity +import aiohttp_cors +from pathlib import Path + + +# ------------------------------------------------------------------- +# Your bot implementation +# ------------------------------------------------------------------- +# Make sure this exists at packages/bots/echo_bot.py +# from bots.echo_bot import EchoBot +# Minimal inline fallback if you want to test quickly: +class EchoBot: + async def on_turn(self, turn_context: TurnContext): + if turn_context.activity.type == "message": + text = (turn_context.activity.text or "").strip() + if not text: + await turn_context.send_activity("Input was empty. Type 'help' for usage.") + return + + lower = text.lower() + if lower == "help": + await turn_context.send_activity("Try: echo | reverse: | capabilities") + elif lower == "capabilities": + await turn_context.send_activity("- echo\n- reverse\n- help\n- capabilities") + elif lower.startswith("reverse:"): + payload = text.split(":", 1)[1].strip() + await turn_context.send_activity(payload[::-1]) + elif lower.startswith("echo "): + await turn_context.send_activity(text[5:]) + else: + await turn_context.send_activity("Unsupported command. Type 'help' for examples.") + else: + await turn_context.send_activity(f"[{turn_context.activity.type}] event received.") + +# ------------------------------------------------------------------- +# Adapter / bot setup +# ------------------------------------------------------------------- +APP_ID = os.environ.get("MicrosoftAppId") or None +APP_PASSWORD = os.environ.get("MicrosoftAppPassword") or None + +adapter_settings = BotFrameworkAdapterSettings(APP_ID, APP_PASSWORD) +adapter = BotFrameworkAdapter(adapter_settings) + +async def on_error(context: TurnContext, error: Exception): + print(f"[on_turn_error] {error}", file=sys.stderr, flush=True) + try: + await context.send_activity("Oops. Something went wrong!") + except Exception as send_err: + print(f"[on_turn_error][send_activity_failed] {send_err}", file=sys.stderr, flush=True) + +adapter.on_turn_error = on_error +bot = EchoBot() + +# ------------------------------------------------------------------- +# HTTP handlers +# ------------------------------------------------------------------- +async def messages(req: web.Request) -> web.Response: + # Content-Type can include charset; do a contains check + ctype = (req.headers.get("Content-Type") or "").lower() + if "application/json" not in ctype: + return web.Response(status=415, text="Unsupported Media Type: expected application/json") + + try: + body = await req.json() + except json.JSONDecodeError: + return web.Response(status=400, text="Invalid JSON body") + + activity = Activity().deserialize(body) + auth_header = req.headers.get("Authorization") + + invoke_response = await adapter.process_activity(activity, auth_header, bot.on_turn) + if invoke_response: + # For invoke activities, adapter returns explicit status/body + return web.json_response(data=invoke_response.body, status=invoke_response.status) + # Acknowledge standard message activities + return web.Response(status=202, text="Accepted") + +async def home(_req: web.Request) -> web.Response: + return web.Response( + text="Bot is running. POST Bot Framework activities to /api/messages.", + content_type="text/plain" + ) + +async def messages_get(_req: web.Request) -> web.Response: + return web.Response( + text="This endpoint only accepts POST (Bot Framework activities).", + content_type="text/plain", + status=405 + ) + +async def healthz(_req: web.Request) -> web.Response: + return web.json_response({"status": "ok"}) + +async def plain_chat(req: web.Request) -> web.Response: + try: + payload = await req.json() + except Exception: + return web.json_response({"error": "Invalid JSON"}, status=400) + user_text = payload.get("text", "") + reply = handle_text(user_text) + return web.json_response({"reply": reply}) + +# ------------------------------------------------------------------- +# App factory and entrypoint +# ------------------------------------------------------------------- +from pathlib import Path + +def create_app() -> web.Application: + app = web.Application() + app.router.add_get("/", home) + app.router.add_get("/healthz", healthz) + app.router.add_get("/api/messages", messages_get) + app.router.add_post("/api/messages", messages) + app.router.add_post("/plain-chat", plain_chat) + + static_dir = Path(__file__).parent / "static" + if static_dir.exists(): + app.router.add_static("/static/", path=static_dir, show_index=True) + else: + print(f"[warn] static directory not found: {static_dir}", flush=True) + + return app + +app = create_app() + +if __name__ == "__main__": + host = os.environ.get("HOST", "127.0.0.1") # use 0.0.0.0 in containers + port = int(os.environ.get("PORT", 3978)) + web.run_app(app, host=host, port=port) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/integrations/botframework/bot.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/integrations/botframework/bot.py new file mode 100644 index 0000000000000000000000000000000000000000..4cb889920c8265d0630fa203ec85773533fa5a79 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/integrations/botframework/bot.py @@ -0,0 +1,86 @@ +# bot.py +""" +Simple MBF bot: +- 'help' / 'capabilities' shows features +- 'reverse ' returns reversed text +- otherwise delegates to AgenticCore ChatBot (sentiment) if available +""" + +from typing import List, Optional, Dict, Any +from botbuilder.core import ActivityHandler, TurnContext +from botbuilder.schema import ChannelAccount, ActivityTypes + +from skills import normalize, reverse_text, capabilities, is_empty + +# Try to import AgenticCore; if unavailable, provide a tiny fallback. +try: + from agenticcore.chatbot.services import ChatBot # real provider-backed bot +except Exception: + class ChatBot: # fallback shim for offline/dev + def reply(self, message: str) -> Dict[str, Any]: + return { + "reply": "Noted. (local fallback reply)", + "sentiment": "neutral", + "confidence": 0.5, + } + +def _format_sentiment(res: Dict[str, Any]) -> str: + """Compose a user-facing string from ChatBot reply payload.""" + reply = (res.get("reply") or "").strip() + label: Optional[str] = res.get("sentiment") + conf = res.get("confidence") + if label is not None and conf is not None: + return f"{reply} (sentiment: {label}, confidence: {float(conf):.2f})" + return reply or "I'm not sure what to say." + +def _help_text() -> str: + """Single source of truth for the help/capability text.""" + feats = "\n".join(f"- {c}" for c in capabilities()) + return ( + "I can reverse text and provide concise replies with sentiment.\n" + "Commands:\n" + "- help | capabilities\n" + "- reverse \n" + "General text will be handled by the ChatBot service.\n\n" + f"My capabilities:\n{feats}" + ) + +class SimpleBot(ActivityHandler): + """Minimal ActivityHandler with local commands + ChatBot fallback.""" + + def __init__(self, chatbot: Optional[ChatBot] = None): + self._chatbot = chatbot or ChatBot() + + async def on_members_added_activity( + self, members_added: List[ChannelAccount], turn_context: TurnContext + ): + for member in members_added: + if member.id != turn_context.activity.recipient.id: + await turn_context.send_activity("Hello! Type 'help' to see what I can do.") + + async def on_message_activity(self, turn_context: TurnContext): + if turn_context.activity.type != ActivityTypes.message: + return + + text = (turn_context.activity.text or "").strip() + if is_empty(text): + await turn_context.send_activity("Please enter a message (try 'help').") + return + + cmd = normalize(text) + + if cmd in {"help", "capabilities"}: + await turn_context.send_activity(_help_text()) + return + + if cmd.startswith("reverse "): + original = text.split(" ", 1)[1] if " " in text else "" + await turn_context.send_activity(reverse_text(original)) + return + + # ChatBot fallback (provider-agnostic sentiment/reply) + try: + result = self._chatbot.reply(text) + await turn_context.send_activity(_format_sentiment(result)) + except Exception: + await turn_context.send_activity(f"You said: {text}") diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/integrations/web/fastapi/web_agentic.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/integrations/web/fastapi/web_agentic.py new file mode 100644 index 0000000000000000000000000000000000000000..5807d4bcf0f9eb328cc4f57e64f56331108110d6 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/integrations/web/fastapi/web_agentic.py @@ -0,0 +1,22 @@ +# agenticcore/web_agentic.py +from fastapi import FastAPI, Query +from fastapi.responses import HTMLResponse +from agenticcore.chatbot.services import ChatBot + +app = FastAPI(title="AgenticCore Web UI") + +# 1. Simple HTML form at / +@app.get("/", response_class=HTMLResponse) +def index(): + return """ +
+ + +
+ """ + +# 2. Agentic endpoint +@app.get("/agentic") +def run_agentic(msg: str = Query(..., description="Message to send to ChatBot")): + bot = ChatBot() + return bot.reply(msg) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/logged_in_bot/handler.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/logged_in_bot/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..42488cd43c647dfd7f1ff6189fb4ad008d21f18f --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/logged_in_bot/handler.py @@ -0,0 +1,17 @@ +from agenticcore.chatbot.services import ChatBot + +_bot = ChatBot() + +def handle_turn(message, history, user): + history = history or [] + try: + res = _bot.reply(message) + reply = res.get("reply") or "Noted." + label = res.get("sentiment") + conf = res.get("confidence") + if label is not None and conf is not None: + reply = f"{reply} (sentiment: {label}, confidence: {float(conf):.2f})" + except Exception as e: + reply = f"Sorry—error in ChatBot: {type(e).__name__}. Using fallback." + history = history + [[message, reply]] + return history diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/requirements.txt b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..eee348aaada32fe4ad42fb5406eab15c2666269b --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/requirements.txt @@ -0,0 +1,7 @@ + +python-dotenv>=1.0 +fastapi>=0.115.0 +uvicorn[standard]>=0.30.0 +# Optional for Bot Framework sample: +# aiohttp>=3.9 +# botbuilder-core>=4.14 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/.env.sample b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/.env.sample new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/Makefile b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/README.md b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9bea57d1cb9e65551241d34e41fbab607b355945 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/README.md @@ -0,0 +1 @@ +# Storefront Chatbot (skeleton) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/anon_bot/handler.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/anon_bot/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..04907a388c5f485a55f941252f09fe9af3504faa --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/anon_bot/handler.py @@ -0,0 +1 @@ +def handle_turn(m,h,u): return (h or [])+[[m,'hi']] diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/anon_bot/rules.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/anon_bot/rules.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/app/app.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/app/app.py new file mode 100644 index 0000000000000000000000000000000000000000..cb83de1596ffab86c4770b5be0aac4b2680251f2 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/app/app.py @@ -0,0 +1 @@ +print('stub app.py') diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/app/routes.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/app/routes.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/core/config.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/core/config.py new file mode 100644 index 0000000000000000000000000000000000000000..77f363b6df321c56f03a2a76166a1dd71ea68958 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/core/config.py @@ -0,0 +1,2 @@ +class Settings: pass +settings = Settings() diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/core/logging.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/core/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/core/types.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/core/types.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/docs/architecture.md b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/docs/architecture.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/docs/design.md b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/docs/design.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/docs/results.md b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/docs/results.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/guardrails/pii_redaction.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/guardrails/pii_redaction.py new file mode 100644 index 0000000000000000000000000000000000000000..09bbfdc84ce7ad778bb4c128a6ed3d6c4b2185f5 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/guardrails/pii_redaction.py @@ -0,0 +1 @@ +def redact(t): return t diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/guardrails/safety.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/guardrails/safety.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/integrations/azure/bot_framework.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/integrations/azure/bot_framework.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/integrations/email/ticket_stub.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/integrations/email/ticket_stub.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/logged_in_bot/handler.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/logged_in_bot/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..04800bc418b6da89c5f65d770cdc8807967b4df1 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/logged_in_bot/handler.py @@ -0,0 +1 @@ +def handle_turn(m,h,u): return (h or [])+[[m,'hi-logged']] diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/logged_in_bot/sentiment_azure.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/logged_in_bot/sentiment_azure.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/logged_in_bot/tools.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/logged_in_bot/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/memory/rag/indexer.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/memory/rag/indexer.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/memory/rag/retriever.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/memory/rag/retriever.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/memory/sessions.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/memory/sessions.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/memory/store.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/memory/store.py new file mode 100644 index 0000000000000000000000000000000000000000..f1f8fc1367351ebdb6f66e0ae63170d72ab49c3d --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/memory/store.py @@ -0,0 +1 @@ +DB={} diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/nlu/pipeline.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/nlu/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..bf3d76e8b4e2f3dd602d4019f2127f53bc461ab0 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/nlu/pipeline.py @@ -0,0 +1 @@ +def analyze(t): return {'intent':'general'} diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/nlu/prompts.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/nlu/prompts.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/nlu/router.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/nlu/router.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/pyproject.toml b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/requirements.txt b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..b880164032e207e96b8ce3960b58ade69971be01 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/requirements.txt @@ -0,0 +1,2 @@ +gradio==4.44.0 +pytest==8.3.3 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/scripts/check_compliance.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/scripts/check_compliance.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/scripts/run_local.sh b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/scripts/run_local.sh new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/scripts/seed_data.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/scripts/seed_data.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/tests/test_anon_bot.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/tests/test_anon_bot.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/tests/test_guardrails.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/tests/test_guardrails.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/tests/test_logged_in_bot.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/tests/test_logged_in_bot.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/tests/test_memory.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/tests/test_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/tests/test_nlu.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/agentic/storefront_chatbot_skeleton/tests/test_nlu.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/.env.example b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/.env.example new file mode 100644 index 0000000000000000000000000000000000000000..0d25b8c0e9639978114885d694226b23f7c8e6ce --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/.env.example @@ -0,0 +1,9 @@ +# Optional provider pinning & keys +# AI_PROVIDER=hf|azure|openai|cohere|deepai|offline +# HF_API_KEY= +# MICROSOFT_AI_SERVICE_ENDPOINT= +# MICROSOFT_AI_API_KEY= +# OPENAI_API_KEY= +# COHERE_API_KEY= +# DEEPAI_API_KEY= +# HTTP_TIMEOUT=20 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/README.md b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ddff20a1d564d1f006d072544e0fb286e92f7f8a --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/README.md @@ -0,0 +1,13 @@ +# Storefront Chatbot (Unified) + +This merges your skeleton with AgenticCore + a simple FastAPI backend and frontends. + +## Run +1) python -m venv .venv && source .venv/bin/activate +2) pip install -r requirements.txt +3) uvicorn backend.app.main:app --reload --port 8000 +4) Open http://127.0.0.1:8000/ui or open frontend/chat_minimal.html + +## Test +- pytest -q +- python tools/smoke_test.py diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/chatbot/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/chatbot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/chatbot/services.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/chatbot/services.py new file mode 100644 index 0000000000000000000000000000000000000000..94c5f7df595c4f3b524387303672885ec13d90c4 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/chatbot/services.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +import json +import os +from dataclasses import dataclass +from typing import Dict + +# Delegate sentiment to the unified provider layer +# If you put providers_unified.py under agenticcore/chatbot/, change the import to: +# from agenticcore.chatbot.providers_unified import analyze_sentiment +from agenticcore.providers_unified import analyze_sentiment +from ..providers_unified import analyze_sentiment + + +def _trim(s: str, max_len: int = 2000) -> str: + s = (s or "").strip() + return s if len(s) <= max_len else s[: max_len - 1] + "…" + + +@dataclass(frozen=True) +class SentimentResult: + label: str # "positive" | "neutral" | "negative" | "mixed" | "unknown" + confidence: float # 0.0 .. 1.0 + + +class ChatBot: + """ + Minimal chatbot that uses provider-agnostic sentiment via providers_unified. + Public API: + - reply(text: str) -> Dict[str, object] + - capabilities() -> Dict[str, object] + """ + + def __init__(self, system_prompt: str = "You are a concise helper.") -> None: + self._system_prompt = _trim(system_prompt, 800) + # Expose which provider is intended/active (for diagnostics) + self._mode = os.getenv("AI_PROVIDER") or "auto" + + def capabilities(self) -> Dict[str, object]: + """List what this bot can do.""" + return { + "system": "chatbot", + "mode": self._mode, # "auto" or a pinned provider (hf/azure/openai/cohere/deepai/offline) + "features": ["text-input", "sentiment-analysis", "help"], + "commands": {"help": "Describe capabilities and usage."}, + } + + def reply(self, text: str) -> Dict[str, object]: + """Produce a reply and sentiment for one user message.""" + user = _trim(text) + if not user: + return self._make_response( + "I didn't catch that. Please provide some text.", + SentimentResult("unknown", 0.0), + ) + + if user.lower() in {"help", "/help"}: + return {"reply": self._format_help(), "capabilities": self.capabilities()} + + s = analyze_sentiment(user) # -> {"provider", "label", "score", ...} + sr = SentimentResult(label=str(s.get("label", "neutral")), confidence=float(s.get("score", 0.5))) + return self._make_response(self._compose(sr), sr) + + # ---- internals ---- + + def _format_help(self) -> str: + caps = self.capabilities() + feats = ", ".join(caps["features"]) + return f"I can analyze sentiment and respond concisely. Features: {feats}. Send any text or type 'help'." + + @staticmethod + def _make_response(reply: str, s: SentimentResult) -> Dict[str, object]: + return {"reply": reply, "sentiment": s.label, "confidence": round(float(s.confidence), 2)} + + @staticmethod + def _compose(s: SentimentResult) -> str: + if s.label == "positive": + return "Thanks for sharing. I detected a positive sentiment." + if s.label == "negative": + return "I hear your concern. I detected a negative sentiment." + if s.label == "neutral": + return "Noted. The sentiment appears neutral." + if s.label == "mixed": + return "Your message has mixed signals. Can you clarify?" + return "I could not determine the sentiment. Please rephrase." + + +# Optional: local REPL for quick manual testing +def _interactive_loop() -> None: + bot = ChatBot() + try: + while True: + msg = input("> ").strip() + if msg.lower() in {"exit", "quit"}: + break + print(json.dumps(bot.reply(msg), ensure_ascii=False)) + except (EOFError, KeyboardInterrupt): + pass + + +if __name__ == "__main__": + _interactive_loop() diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/providers_unified.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/providers_unified.py new file mode 100644 index 0000000000000000000000000000000000000000..921c0eeafce6f620f92e80112f7c5cd1bb6cb6a6 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/providers_unified.py @@ -0,0 +1,273 @@ +""" +providers_unified.py +Unified, switchable providers for sentiment + (optional) text generation. +Selection order unless AI_PROVIDER is set: + HF -> AZURE -> OPENAI -> COHERE -> DEEPAI -> OFFLINE +Env vars: + HF_API_KEY + MICROSOFT_AI_SERVICE_ENDPOINT, MICROSOFT_AI_API_KEY + OPENAI_API_KEY, OPENAI_MODEL=gpt-3.5-turbo + COHERE_API_KEY, COHERE_MODEL=command + DEEPAI_API_KEY + AI_PROVIDER = hf|azure|openai|cohere|deepai|offline + HTTP_TIMEOUT = 20 +""" +from __future__ import annotations +import os, json +from typing import Dict, Any, Optional +import requests + +TIMEOUT = float(os.getenv("HTTP_TIMEOUT", "20")) + +def _env(name: str, default: Optional[str] = None) -> Optional[str]: + v = os.getenv(name) + return v if (v is not None and str(v).strip() != "") else default + +def _pick_provider() -> str: + forced = _env("AI_PROVIDER") + if forced in {"hf", "azure", "openai", "cohere", "deepai", "offline"}: + return forced + if _env("HF_API_KEY"): return "hf" + if _env("MICROSOFT_AI_API_KEY") and _env("MICROSOFT_AI_SERVICE_ENDPOINT"): return "azure" + if _env("OPENAI_API_KEY"): return "openai" + if _env("COHERE_API_KEY"): return "cohere" + if _env("DEEPAI_API_KEY"): return "deepai" + return "offline" + +# --------------------------- +# Sentiment +# --------------------------- + +def analyze_sentiment(text: str) -> Dict[str, Any]: + provider = _pick_provider() + try: + if provider == "hf": return _sentiment_hf(text) + if provider == "azure": return _sentiment_azure(text) + if provider == "openai": return _sentiment_openai_prompt(text) + if provider == "cohere": return _sentiment_cohere_prompt(text) + if provider == "deepai": return _sentiment_deepai(text) + return _sentiment_offline(text) + except Exception as e: + return {"provider": provider, "label": "neutral", "score": 0.5, "error": str(e)} + +def _sentiment_offline(text: str) -> Dict[str, Any]: + t = (text or "").lower() + pos = any(w in t for w in ["love","great","good","awesome","fantastic","thank","excellent","amazing"]) + neg = any(w in t for w in ["hate","bad","terrible","awful","worst","angry","horrible"]) + label = "positive" if pos and not neg else "negative" if neg and not pos else "neutral" + score = 0.9 if label != "neutral" else 0.5 + return {"provider": "offline", "label": label, "score": score} + +def _sentiment_hf(text: str) -> Dict[str, Any]: + """ + Hugging Face Inference API for sentiment. + Uses canonical repo id and handles 404/401 and various payload shapes. + """ + key = _env("HF_API_KEY") + if not key: + return _sentiment_offline(text) + + # canonical repo id to avoid 404 + model = _env("HF_MODEL_SENTIMENT", "distilbert/distilbert-base-uncased-finetuned-sst-2-english") + timeout = int(_env("HTTP_TIMEOUT", "30")) + + headers = { + "Authorization": f"Bearer {key}", + "x-wait-for-model": "true", + "Accept": "application/json", + "Content-Type": "application/json", + } + + r = requests.post( + f"https://api-inference.huggingface.co/models/{model}", + headers=headers, + json={"inputs": text}, + timeout=timeout, + ) + + if r.status_code != 200: + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": f"HTTP {r.status_code}: {r.text[:500]}"} + + try: + data = r.json() + except Exception as e: + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": str(e)} + + if isinstance(data, dict) and "error" in data: + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": data["error"]} + + # normalize list shape + arr = data[0] if isinstance(data, list) and data and isinstance(data[0], list) else (data if isinstance(data, list) else []) + if not (isinstance(arr, list) and arr): + return {"provider": "hf", "label": "neutral", "score": 0.5, "error": f"Unexpected payload: {data}"} + + top = max(arr, key=lambda x: x.get("score", 0.0) if isinstance(x, dict) else 0.0) + raw = str(top.get("label", "")).upper() + score = float(top.get("score", 0.5)) + + mapping = { + "LABEL_0": "negative", "LABEL_1": "neutral", "LABEL_2": "positive", + "NEGATIVE": "negative", "NEUTRAL": "neutral", "POSITIVE": "positive", + } + label = mapping.get(raw, (raw.lower() or "neutral")) + + neutral_floor = float(os.getenv("SENTIMENT_NEUTRAL_THRESHOLD", "0.65")) + if label in {"positive", "negative"} and score < neutral_floor: + label = "neutral" + + return {"provider": "hf", "label": label, "score": score} + +def _sentiment_azure(text: str) -> Dict[str, Any]: + try: + from azure.core.credentials import AzureKeyCredential # type: ignore + from azure.ai.textanalytics import TextAnalyticsClient # type: ignore + except Exception: + return _sentiment_offline(text) + endpoint = _env("MICROSOFT_AI_SERVICE_ENDPOINT") + key = _env("MICROSOFT_AI_API_KEY") + if not (endpoint and key): return _sentiment_offline(text) + client = TextAnalyticsClient(endpoint=endpoint.strip(), credential=AzureKeyCredential(key.strip())) + resp = client.analyze_sentiment(documents=[text], show_opinion_mining=False)[0] + scores = { + "positive": float(getattr(resp.confidence_scores, "positive", 0.0) or 0.0), + "neutral": float(getattr(resp.confidence_scores, "neutral", 0.0) or 0.0), + "negative": float(getattr(resp.confidence_scores, "negative", 0.0) or 0.0), + } + label = max(scores, key=scores.get) + return {"provider": "azure", "label": label, "score": scores[label]} + +def _sentiment_openai_prompt(text: str) -> Dict[str, Any]: + key = _env("OPENAI_API_KEY") + model = _env("OPENAI_MODEL", "gpt-3.5-turbo") + if not key: return _sentiment_offline(text) + url = "https://api.openai.com/v1/chat/completions" + prompt = f"Classify the sentiment of this text as positive, negative, or neutral. Reply JSON with keys label and score (0..1). Text: {text!r}" + r = requests.post( + url, + headers={"Authorization": f"Bearer {key}", "Content-Type": "application/json"}, + json={"model": model, "messages": [{"role": "user", "content": prompt}], "temperature": 0}, + timeout=TIMEOUT, + ) + r.raise_for_status() + content = r.json()["choices"][0]["message"]["content"] + try: + obj = json.loads(content) + label = str(obj.get("label", "neutral")).lower() + score = float(obj.get("score", 0.5)) + return {"provider": "openai", "label": label, "score": score} + except Exception: + l = "positive" if "positive" in content.lower() else "negative" if "negative" in content.lower() else "neutral" + return {"provider": "openai", "label": l, "score": 0.5} + +def _sentiment_cohere_prompt(text: str) -> Dict[str, Any]: + key = _env("COHERE_API_KEY") + model = _env("COHERE_MODEL", "command") + if not key: return _sentiment_offline(text) + url = "https://api.cohere.ai/v1/generate" + prompt = f"Classify the sentiment (positive, negative, neutral) and return JSON with keys label and score (0..1). Text: {text!r}" + r = requests.post( + url, + headers={ + "Authorization": f"Bearer {key}", + "Content-Type": "application/json", + "Cohere-Version": "2022-12-06", + }, + json={"model": model, "prompt": prompt, "max_tokens": 30, "temperature": 0}, + timeout=TIMEOUT, + ) + r.raise_for_status() + gen = (r.json().get("generations") or [{}])[0].get("text", "") + try: + obj = json.loads(gen) + label = str(obj.get("label", "neutral")).lower() + score = float(obj.get("score", 0.5)) + return {"provider": "cohere", "label": label, "score": score} + except Exception: + l = "positive" if "positive" in gen.lower() else "negative" if "negative" in gen.lower() else "neutral" + return {"provider": "cohere", "label": l, "score": 0.5} + +def _sentiment_deepai(text: str) -> Dict[str, Any]: + key = _env("DEEPAI_API_KEY") + if not key: return _sentiment_offline(text) + url = "https://api.deepai.org/api/sentiment-analysis" + r = requests.post(url, headers={"api-key": key}, data={"text": text}, timeout=TIMEOUT) + r.raise_for_status() + data = r.json() + label = (data.get("output") or ["neutral"])[0].lower() + return {"provider": "deepai", "label": label, "score": 0.5 if label == "neutral" else 0.9} + +# --------------------------- +# Text generation (optional) +# --------------------------- + +def generate_text(prompt: str, max_tokens: int = 128) -> Dict[str, Any]: + provider = _pick_provider() + try: + if provider == "hf": return _gen_hf(prompt, max_tokens) + if provider == "openai": return _gen_openai(prompt, max_tokens) + if provider == "cohere": return _gen_cohere(prompt, max_tokens) + if provider == "deepai": return _gen_deepai(prompt, max_tokens) + return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + except Exception as e: + return {"provider": provider, "text": f"(error) {str(e)}"} + +def _gen_hf(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("HF_API_KEY") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + model = _env("HF_MODEL_GENERATION", "tiiuae/falcon-7b-instruct") + r = requests.post( + f"https://api-inference.huggingface.co/models/{model}", + headers={"Authorization": f"Bearer {key}"}, + json={"inputs": prompt, "parameters": {"max_new_tokens": max_tokens}}, + timeout=TIMEOUT, + ) + r.raise_for_status() + data = r.json() + if isinstance(data, list) and data and "generated_text" in data[0]: + return {"provider": "hf", "text": data[0]["generated_text"]} + return {"provider": "hf", "text": str(data)} + +def _gen_openai(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("OPENAI_API_KEY") + model = _env("OPENAI_MODEL", "gpt-3.5-turbo") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + url = "https://api.openai.com/v1/chat/completions" + r = requests.post( + url, + headers={"Authorization": f"Bearer {key}", "Content-Type": "application/json"}, + json={"model": model, "messages": [{"role": "user", "content": prompt}], "max_tokens": max_tokens}, + timeout=TIMEOUT, + ) + r.raise_for_status() + data = r.json() + text = data["choices"][0]["message"]["content"] + return {"provider": "openai", "text": text} + +def _gen_cohere(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("COHERE_API_KEY") + model = _env("COHERE_MODEL", "command") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + url = "https://api.cohere.ai/v1/generate" + r = requests.post( + url, + headers={ + "Authorization": f"Bearer {key}", + "Content-Type": "application/json", + "Cohere-Version": "2022-12-06", + }, + json={"model": model, "prompt": prompt, "max_tokens": max_tokens}, + timeout=TIMEOUT, + ) + r.raise_for_status() + data = r.json() + text = data.get("generations", [{}])[0].get("text", "") + return {"provider": "cohere", "text": text} + +def _gen_deepai(prompt: str, max_tokens: int) -> Dict[str, Any]: + key = _env("DEEPAI_API_KEY") + if not key: return {"provider": "offline", "text": f"(offline) {prompt[:160]}"} + url = "https://api.deepai.org/api/text-generator" + r = requests.post(url, headers={"api-key": key}, data={"text": prompt}, timeout=TIMEOUT) + r.raise_for_status() + data = r.json() + return {"provider": "deepai", "text": data.get("output", "")} diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/web_agentic.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/web_agentic.py new file mode 100644 index 0000000000000000000000000000000000000000..5807d4bcf0f9eb328cc4f57e64f56331108110d6 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/agenticcore/web_agentic.py @@ -0,0 +1,22 @@ +# agenticcore/web_agentic.py +from fastapi import FastAPI, Query +from fastapi.responses import HTMLResponse +from agenticcore.chatbot.services import ChatBot + +app = FastAPI(title="AgenticCore Web UI") + +# 1. Simple HTML form at / +@app.get("/", response_class=HTMLResponse) +def index(): + return """ +
+ + +
+ """ + +# 2. Agentic endpoint +@app.get("/agentic") +def run_agentic(msg: str = Query(..., description="Message to send to ChatBot")): + bot = ChatBot() + return bot.reply(msg) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/app/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/app/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/app/main.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/app/main.py new file mode 100644 index 0000000000000000000000000000000000000000..7d46b0c0f2fe84c40354dff57e0f8fd127bf15b5 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/app/main.py @@ -0,0 +1,69 @@ +# backend/app/main.py +from __future__ import annotations + +import os +import time +import traceback +from pathlib import Path + +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import HTMLResponse +from fastapi.staticfiles import StaticFiles + + +def create_app() -> FastAPI: + app = FastAPI(title="AgenticCore Backend") + + # --- CORS: allow local dev origins + file:// (appears as 'null') --- + app.add_middleware( + CORSMiddleware, + allow_origins=[ + "http://127.0.0.1:3000", "http://localhost:3000", + "http://127.0.0.1:5173", "http://localhost:5173", + "http://127.0.0.1:8080", "http://localhost:8080", + "http://127.0.0.1:8000", "http://localhost:8000", + "null", # file:// pages in some browsers + ], + allow_credentials=False, + allow_methods=["*"], + allow_headers=["*"], + ) + + @app.get("/health") + def health(): + return {"ok": True, "version": "0.3.0", "time": int(time.time())} + + @app.get("/status") + def status(): + provider = os.getenv("AI_PROVIDER") or "auto" + return {"provider": provider} + + # --- Routers --- + try: + from backend.app.routers.chatbot import router as chatbot_router + app.include_router(chatbot_router) # exposes POST /chatbot/message + print("✅ Chatbot router mounted at /chatbot") + except Exception as e: + print("❌ Failed to mount chatbot router:", e) + traceback.print_exc() + + # --- Static frontends served by FastAPI (same-origin -> no CORS) --- + FRONTEND_DIR = Path(__file__).resolve().parents[2] / "frontend" + app.mount("/ui2", StaticFiles(directory=str(FRONTEND_DIR)), name="frontend") + + # Keep your existing single-file UI at /ui (optional) + FRONTEND_FILE = FRONTEND_DIR / "agenticcore_frontend.html" + + @app.get("/ui", response_class=HTMLResponse) + def ui(): + try: + return FRONTEND_FILE.read_text(encoding="utf-8") + except Exception: + return HTMLResponse("

UI not found

", status_code=404) + + return app + + +# Uvicorn entrypoint +app = create_app() diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/app/routers/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/app/routers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/app/routers/chatbot.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/app/routers/chatbot.py new file mode 100644 index 0000000000000000000000000000000000000000..b273d89e02b43d070207661223ab613044e91caa --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/backend/app/routers/chatbot.py @@ -0,0 +1,27 @@ +from fastapi import APIRouter +from pydantic import BaseModel +from typing import Optional, Dict, Any + +router = APIRouter(prefix="/chatbot", tags=["chatbot"]) + +class ChatIn(BaseModel): + message: str + thread: Optional[str] = None + +class ChatOut(BaseModel): + reply: str + sentiment: Optional[str] = None + confidence: Optional[float] = None + thread: Optional[str] = None + +@router.post("/message", response_model=ChatOut) +def post_message(body: ChatIn) -> Dict[str, Any]: + from agenticcore.chatbot.services import ChatBot + bot = ChatBot() + res = bot.reply(body.message) + return { + "reply": res.get("reply", ""), + "sentiment": res.get("sentiment"), + "confidence": res.get("confidence"), + "thread": body.thread or res.get("thread"), + } diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/mbf_bot/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/mbf_bot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/mbf_bot/app.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/mbf_bot/app.py new file mode 100644 index 0000000000000000000000000000000000000000..eb54bf07d1c4b27a80245c48c356f390985d0758 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/mbf_bot/app.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +# app.py — aiohttp + Bot Framework Echo bot + +import os +import sys +import json +from logic import handle_text +from aiohttp import web +from botbuilder.core import BotFrameworkAdapter, BotFrameworkAdapterSettings, TurnContext +from botbuilder.schema import Activity +import aiohttp_cors +from pathlib import Path + + +# ------------------------------------------------------------------- +# Your bot implementation +# ------------------------------------------------------------------- +# Make sure this exists at packages/bots/echo_bot.py +# from bots.echo_bot import EchoBot +# Minimal inline fallback if you want to test quickly: +class EchoBot: + async def on_turn(self, turn_context: TurnContext): + if turn_context.activity.type == "message": + text = (turn_context.activity.text or "").strip() + if not text: + await turn_context.send_activity("Input was empty. Type 'help' for usage.") + return + + lower = text.lower() + if lower == "help": + await turn_context.send_activity("Try: echo | reverse: | capabilities") + elif lower == "capabilities": + await turn_context.send_activity("- echo\n- reverse\n- help\n- capabilities") + elif lower.startswith("reverse:"): + payload = text.split(":", 1)[1].strip() + await turn_context.send_activity(payload[::-1]) + elif lower.startswith("echo "): + await turn_context.send_activity(text[5:]) + else: + await turn_context.send_activity("Unsupported command. Type 'help' for examples.") + else: + await turn_context.send_activity(f"[{turn_context.activity.type}] event received.") + +# ------------------------------------------------------------------- +# Adapter / bot setup +# ------------------------------------------------------------------- +APP_ID = os.environ.get("MicrosoftAppId") or None +APP_PASSWORD = os.environ.get("MicrosoftAppPassword") or None + +adapter_settings = BotFrameworkAdapterSettings(APP_ID, APP_PASSWORD) +adapter = BotFrameworkAdapter(adapter_settings) + +async def on_error(context: TurnContext, error: Exception): + print(f"[on_turn_error] {error}", file=sys.stderr, flush=True) + try: + await context.send_activity("Oops. Something went wrong!") + except Exception as send_err: + print(f"[on_turn_error][send_activity_failed] {send_err}", file=sys.stderr, flush=True) + +adapter.on_turn_error = on_error +bot = EchoBot() + +# ------------------------------------------------------------------- +# HTTP handlers +# ------------------------------------------------------------------- +async def messages(req: web.Request) -> web.Response: + # Content-Type can include charset; do a contains check + ctype = (req.headers.get("Content-Type") or "").lower() + if "application/json" not in ctype: + return web.Response(status=415, text="Unsupported Media Type: expected application/json") + + try: + body = await req.json() + except json.JSONDecodeError: + return web.Response(status=400, text="Invalid JSON body") + + activity = Activity().deserialize(body) + auth_header = req.headers.get("Authorization") + + invoke_response = await adapter.process_activity(activity, auth_header, bot.on_turn) + if invoke_response: + # For invoke activities, adapter returns explicit status/body + return web.json_response(data=invoke_response.body, status=invoke_response.status) + # Acknowledge standard message activities + return web.Response(status=202, text="Accepted") + +async def home(_req: web.Request) -> web.Response: + return web.Response( + text="Bot is running. POST Bot Framework activities to /api/messages.", + content_type="text/plain" + ) + +async def messages_get(_req: web.Request) -> web.Response: + return web.Response( + text="This endpoint only accepts POST (Bot Framework activities).", + content_type="text/plain", + status=405 + ) + +async def healthz(_req: web.Request) -> web.Response: + return web.json_response({"status": "ok"}) + +async def plain_chat(req: web.Request) -> web.Response: + try: + payload = await req.json() + except Exception: + return web.json_response({"error": "Invalid JSON"}, status=400) + user_text = payload.get("text", "") + reply = handle_text(user_text) + return web.json_response({"reply": reply}) + +# ------------------------------------------------------------------- +# App factory and entrypoint +# ------------------------------------------------------------------- +from pathlib import Path + +def create_app() -> web.Application: + app = web.Application() + app.router.add_get("/", home) + app.router.add_get("/healthz", healthz) + app.router.add_get("/api/messages", messages_get) + app.router.add_post("/api/messages", messages) + app.router.add_post("/plain-chat", plain_chat) + + static_dir = Path(__file__).parent / "static" + if static_dir.exists(): + app.router.add_static("/static/", path=static_dir, show_index=True) + else: + print(f"[warn] static directory not found: {static_dir}", flush=True) + + return app + +app = create_app() + +if __name__ == "__main__": + host = os.environ.get("HOST", "127.0.0.1") # use 0.0.0.0 in containers + port = int(os.environ.get("PORT", 3978)) + web.run_app(app, host=host, port=port) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/mbf_bot/bot.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/mbf_bot/bot.py new file mode 100644 index 0000000000000000000000000000000000000000..4cb889920c8265d0630fa203ec85773533fa5a79 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/mbf_bot/bot.py @@ -0,0 +1,86 @@ +# bot.py +""" +Simple MBF bot: +- 'help' / 'capabilities' shows features +- 'reverse ' returns reversed text +- otherwise delegates to AgenticCore ChatBot (sentiment) if available +""" + +from typing import List, Optional, Dict, Any +from botbuilder.core import ActivityHandler, TurnContext +from botbuilder.schema import ChannelAccount, ActivityTypes + +from skills import normalize, reverse_text, capabilities, is_empty + +# Try to import AgenticCore; if unavailable, provide a tiny fallback. +try: + from agenticcore.chatbot.services import ChatBot # real provider-backed bot +except Exception: + class ChatBot: # fallback shim for offline/dev + def reply(self, message: str) -> Dict[str, Any]: + return { + "reply": "Noted. (local fallback reply)", + "sentiment": "neutral", + "confidence": 0.5, + } + +def _format_sentiment(res: Dict[str, Any]) -> str: + """Compose a user-facing string from ChatBot reply payload.""" + reply = (res.get("reply") or "").strip() + label: Optional[str] = res.get("sentiment") + conf = res.get("confidence") + if label is not None and conf is not None: + return f"{reply} (sentiment: {label}, confidence: {float(conf):.2f})" + return reply or "I'm not sure what to say." + +def _help_text() -> str: + """Single source of truth for the help/capability text.""" + feats = "\n".join(f"- {c}" for c in capabilities()) + return ( + "I can reverse text and provide concise replies with sentiment.\n" + "Commands:\n" + "- help | capabilities\n" + "- reverse \n" + "General text will be handled by the ChatBot service.\n\n" + f"My capabilities:\n{feats}" + ) + +class SimpleBot(ActivityHandler): + """Minimal ActivityHandler with local commands + ChatBot fallback.""" + + def __init__(self, chatbot: Optional[ChatBot] = None): + self._chatbot = chatbot or ChatBot() + + async def on_members_added_activity( + self, members_added: List[ChannelAccount], turn_context: TurnContext + ): + for member in members_added: + if member.id != turn_context.activity.recipient.id: + await turn_context.send_activity("Hello! Type 'help' to see what I can do.") + + async def on_message_activity(self, turn_context: TurnContext): + if turn_context.activity.type != ActivityTypes.message: + return + + text = (turn_context.activity.text or "").strip() + if is_empty(text): + await turn_context.send_activity("Please enter a message (try 'help').") + return + + cmd = normalize(text) + + if cmd in {"help", "capabilities"}: + await turn_context.send_activity(_help_text()) + return + + if cmd.startswith("reverse "): + original = text.split(" ", 1)[1] if " " in text else "" + await turn_context.send_activity(reverse_text(original)) + return + + # ChatBot fallback (provider-agnostic sentiment/reply) + try: + result = self._chatbot.reply(text) + await turn_context.send_activity(_format_sentiment(result)) + except Exception: + await turn_context.send_activity(f"You said: {text}") diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/mbf_bot/skills.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/mbf_bot/skills.py new file mode 100644 index 0000000000000000000000000000000000000000..1db3a468576e084e193425997add38721414c83d --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/extras/mbf_bot/skills.py @@ -0,0 +1,28 @@ +# skills.py +""" +Small, dependency-free helpers used by the MBF SimpleBot. +""" + +from typing import List + +_CAPS: List[str] = [ + "echo-reverse", # reverse + "help", # help / capabilities + "chatbot-sentiment", # delegate to ChatBot() if available +] + +def normalize(text: str) -> str: + """Normalize user text for lightweight command routing.""" + return (text or "").strip().lower() + +def reverse_text(text: str) -> str: + """Return the input string reversed.""" + return (text or "")[::-1] + +def capabilities() -> List[str]: + """Return a stable list of bot capabilities.""" + return list(_CAPS) + +def is_empty(text: str) -> bool: + """True if message is blank after trimming.""" + return len((text or "").strip()) == 0 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/frontend/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/frontend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/frontend/agenticcore_frontend.html b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/frontend/agenticcore_frontend.html new file mode 100644 index 0000000000000000000000000000000000000000..39d63a7bc4a2daada334e8d2d64577f9e7078882 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/frontend/agenticcore_frontend.html @@ -0,0 +1,200 @@ + + + + + + AgenticCore Chatbot Frontend + + + +
+
+

AgenticCore Chatbot Frontend

+
Frontend → FastAPI → providers_unified
+
+ +
+
+
+ +
+ + +
+
Not checked
+
+
+ +
+ + +
+
+ + + +
+
+
+
+
+ +
+ Use with your FastAPI backend at /chatbot/message. Configure CORS if you serve this file from a different origin. +
+
+ + + + diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/frontend/chat_console.html b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/frontend/chat_console.html new file mode 100644 index 0000000000000000000000000000000000000000..c2cf2ca34021674756a9e61906fe6b8f75948724 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/frontend/chat_console.html @@ -0,0 +1,77 @@ + + + + + Console Chat Tester + + + + +

AgenticCore Console

+ +
+ + + + +
+ +
+ + +
+ +
+ Mode: + API +
+ +

+
+
+
+
diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/frontend/chat_minimal.html b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/frontend/chat_minimal.html
new file mode 100644
index 0000000000000000000000000000000000000000..0b3fb325ece7bf9972b31d977e4aeee0e167c025
--- /dev/null
+++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/frontend/chat_minimal.html
@@ -0,0 +1,89 @@
+
+
+
+  
+  Minimal Chat Tester
+  
+  
+
+
+

Minimal Chat Tester → FastAPI /chatbot/message

+ +
+ + + + +
+ +
+ + +
+ +

+ + + + + diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/requirements.txt b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..c802989abda1ce968ffd62e45003f85b7f81ac7d --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/requirements.txt @@ -0,0 +1,14 @@ +# Web +fastapi>=0.112 +uvicorn[standard]>=0.30 +pydantic>=2.7 + +# HTTP +requests>=2.32 + +# Optional Bot Framework demo +aiohttp>=3.9 +botbuilder-core>=4.15 + +# Dev +pytest>=8.0 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tests/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tests/test_routes.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tests/test_routes.py new file mode 100644 index 0000000000000000000000000000000000000000..420d026b978c230d39929cacf65c9d5882ba9f23 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tests/test_routes.py @@ -0,0 +1,6 @@ +def test_routes_mount(): + from backend.app.main import create_app + app = create_app() + paths = [getattr(r, "path", "") for r in app.routes] + assert "/chatbot/message" in paths + assert "/health" in paths diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tools/__init__.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tools/quick_sanity.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tools/quick_sanity.py new file mode 100644 index 0000000000000000000000000000000000000000..aa8c5bb34683c430a654783cc533c66ea61ab967 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tools/quick_sanity.py @@ -0,0 +1,13 @@ +# tools/quick_sanity.py +""" +Tiny sanity test for MBF helpers. Run from repo root or set PYTHONPATH. +""" +import sys, os +# Add repo root so 'mbf_bot' is importable if running directly +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) + +from mbf_bot.skills import reverse_text, capabilities, normalize + +print("caps:", capabilities()) +print("reverse:", reverse_text("hello")) +print("cmd:", normalize(" Help ")) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tools/smoke_test.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tools/smoke_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5337e397dda5896cd4b38fbfe7f3235d45192a66 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/legacy_src/full/tools/smoke_test.py @@ -0,0 +1,11 @@ +import os, json, requests +from agenticcore.chatbot.services import ChatBot + +def p(title, data): print(f"\n== {title} ==\n{json.dumps(data, indent=2)}") + +bot = ChatBot() +p("Lib/Direct", bot.reply("I really love this")) + +url = os.getenv("BACKEND_URL", "http://127.0.0.1:8000") +r = requests.get(f"{url}/health"); p("API/Health", r.json()) +r = requests.post(f"{url}/chatbot/message", json={"message":"api path test"}); p("API/Chat", r.json()) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/.env.sample b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/.env.sample new file mode 100644 index 0000000000000000000000000000000000000000..cc4d3812f29b8c9d0ebd71cb1d62d46b9d1f41c3 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/.env.sample @@ -0,0 +1,7 @@ +# Feature toggles +AZURE_ENABLED=false +SENTIMENT_ENABLED=false +DB_URL=memory:// +# Azure (optional) +AZURE_TEXT_ANALYTICS_ENDPOINT= +AZURE_TEXT_ANALYTICS_KEY= diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/Makefile b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..5f5636501fb59339863a6be7d0d116d21cb0bda8 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/Makefile @@ -0,0 +1,11 @@ +.PHONY: dev test run seed check +dev: + pip install -r requirements.txt +test: + pytest -q +run: + export PYTHONPATH=. && python -c "from storefront_chatbot.app.app import build; build().launch(server_name='0.0.0.0', server_port=7860)" +seed: + python storefront_chatbot/scripts/seed_data.py +check: + python storefront_chatbot/scripts/check_compliance.py diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/README.md b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ff8da746f63798ed65a18326962f9139347b2e0e --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/README.md @@ -0,0 +1,11 @@ +# Storefront Chatbot + +This repo follows a modular layout with a Gradio UI, NLU pipeline, anonymous and logged-in flows, +guardrails, and optional Azure sentiment. + +## Quickstart +```bash +make dev +make run +# open http://localhost:7860 +``` diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/anon_bot/handler.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/anon_bot/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..04907a388c5f485a55f941252f09fe9af3504faa --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/anon_bot/handler.py @@ -0,0 +1 @@ +def handle_turn(m,h,u): return (h or [])+[[m,'hi']] diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/anon_bot/rules.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/anon_bot/rules.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/app/app.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/app/app.py new file mode 100644 index 0000000000000000000000000000000000000000..eb54bf07d1c4b27a80245c48c356f390985d0758 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/app/app.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +# app.py — aiohttp + Bot Framework Echo bot + +import os +import sys +import json +from logic import handle_text +from aiohttp import web +from botbuilder.core import BotFrameworkAdapter, BotFrameworkAdapterSettings, TurnContext +from botbuilder.schema import Activity +import aiohttp_cors +from pathlib import Path + + +# ------------------------------------------------------------------- +# Your bot implementation +# ------------------------------------------------------------------- +# Make sure this exists at packages/bots/echo_bot.py +# from bots.echo_bot import EchoBot +# Minimal inline fallback if you want to test quickly: +class EchoBot: + async def on_turn(self, turn_context: TurnContext): + if turn_context.activity.type == "message": + text = (turn_context.activity.text or "").strip() + if not text: + await turn_context.send_activity("Input was empty. Type 'help' for usage.") + return + + lower = text.lower() + if lower == "help": + await turn_context.send_activity("Try: echo | reverse: | capabilities") + elif lower == "capabilities": + await turn_context.send_activity("- echo\n- reverse\n- help\n- capabilities") + elif lower.startswith("reverse:"): + payload = text.split(":", 1)[1].strip() + await turn_context.send_activity(payload[::-1]) + elif lower.startswith("echo "): + await turn_context.send_activity(text[5:]) + else: + await turn_context.send_activity("Unsupported command. Type 'help' for examples.") + else: + await turn_context.send_activity(f"[{turn_context.activity.type}] event received.") + +# ------------------------------------------------------------------- +# Adapter / bot setup +# ------------------------------------------------------------------- +APP_ID = os.environ.get("MicrosoftAppId") or None +APP_PASSWORD = os.environ.get("MicrosoftAppPassword") or None + +adapter_settings = BotFrameworkAdapterSettings(APP_ID, APP_PASSWORD) +adapter = BotFrameworkAdapter(adapter_settings) + +async def on_error(context: TurnContext, error: Exception): + print(f"[on_turn_error] {error}", file=sys.stderr, flush=True) + try: + await context.send_activity("Oops. Something went wrong!") + except Exception as send_err: + print(f"[on_turn_error][send_activity_failed] {send_err}", file=sys.stderr, flush=True) + +adapter.on_turn_error = on_error +bot = EchoBot() + +# ------------------------------------------------------------------- +# HTTP handlers +# ------------------------------------------------------------------- +async def messages(req: web.Request) -> web.Response: + # Content-Type can include charset; do a contains check + ctype = (req.headers.get("Content-Type") or "").lower() + if "application/json" not in ctype: + return web.Response(status=415, text="Unsupported Media Type: expected application/json") + + try: + body = await req.json() + except json.JSONDecodeError: + return web.Response(status=400, text="Invalid JSON body") + + activity = Activity().deserialize(body) + auth_header = req.headers.get("Authorization") + + invoke_response = await adapter.process_activity(activity, auth_header, bot.on_turn) + if invoke_response: + # For invoke activities, adapter returns explicit status/body + return web.json_response(data=invoke_response.body, status=invoke_response.status) + # Acknowledge standard message activities + return web.Response(status=202, text="Accepted") + +async def home(_req: web.Request) -> web.Response: + return web.Response( + text="Bot is running. POST Bot Framework activities to /api/messages.", + content_type="text/plain" + ) + +async def messages_get(_req: web.Request) -> web.Response: + return web.Response( + text="This endpoint only accepts POST (Bot Framework activities).", + content_type="text/plain", + status=405 + ) + +async def healthz(_req: web.Request) -> web.Response: + return web.json_response({"status": "ok"}) + +async def plain_chat(req: web.Request) -> web.Response: + try: + payload = await req.json() + except Exception: + return web.json_response({"error": "Invalid JSON"}, status=400) + user_text = payload.get("text", "") + reply = handle_text(user_text) + return web.json_response({"reply": reply}) + +# ------------------------------------------------------------------- +# App factory and entrypoint +# ------------------------------------------------------------------- +from pathlib import Path + +def create_app() -> web.Application: + app = web.Application() + app.router.add_get("/", home) + app.router.add_get("/healthz", healthz) + app.router.add_get("/api/messages", messages_get) + app.router.add_post("/api/messages", messages) + app.router.add_post("/plain-chat", plain_chat) + + static_dir = Path(__file__).parent / "static" + if static_dir.exists(): + app.router.add_static("/static/", path=static_dir, show_index=True) + else: + print(f"[warn] static directory not found: {static_dir}", flush=True) + + return app + +app = create_app() + +if __name__ == "__main__": + host = os.environ.get("HOST", "127.0.0.1") # use 0.0.0.0 in containers + port = int(os.environ.get("PORT", 3978)) + web.run_app(app, host=host, port=port) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/app/routes.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/app/routes.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/core/config.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/core/config.py new file mode 100644 index 0000000000000000000000000000000000000000..77f363b6df321c56f03a2a76166a1dd71ea68958 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/core/config.py @@ -0,0 +1,2 @@ +class Settings: pass +settings = Settings() diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/core/logging.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/core/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/core/types.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/core/types.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/docs/architecture.md b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/docs/architecture.md new file mode 100644 index 0000000000000000000000000000000000000000..931999fd4baec8ab8f5d524ad3ddd123e278ebb4 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/docs/architecture.md @@ -0,0 +1 @@ +# Architecture\n\nShort explainer tied to the flowchart.\n \ No newline at end of file diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/docs/design.md b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/docs/design.md new file mode 100644 index 0000000000000000000000000000000000000000..257007bc4a973df95b8c0853985ad077a3070ddb --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/docs/design.md @@ -0,0 +1 @@ +# Design notes\n\nAPI notes, security, tradeoffs.\n \ No newline at end of file diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/docs/flowchart.png b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/docs/flowchart.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/docs/results.md b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/docs/results.md new file mode 100644 index 0000000000000000000000000000000000000000..497ecabbd2021246019ae3d643d48dde8c930f94 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/docs/results.md @@ -0,0 +1 @@ +# Results\n\nChallenges, metrics, screenshots.\n \ No newline at end of file diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/guardrails/pii_redaction.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/guardrails/pii_redaction.py new file mode 100644 index 0000000000000000000000000000000000000000..09bbfdc84ce7ad778bb4c128a6ed3d6c4b2185f5 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/guardrails/pii_redaction.py @@ -0,0 +1 @@ +def redact(t): return t diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/guardrails/safety.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/guardrails/safety.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/integrations/azure/bot_framework.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/integrations/azure/bot_framework.py new file mode 100644 index 0000000000000000000000000000000000000000..444dc91cc163307b248b4e821adc1ea47b9116f0 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/integrations/azure/bot_framework.py @@ -0,0 +1 @@ +# Azure Bot Framework (placeholder) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/integrations/email/ticket_stub.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/integrations/email/ticket_stub.py new file mode 100644 index 0000000000000000000000000000000000000000..0c28e0c8155d74be301d68aa2d7909cf5282a4c2 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/integrations/email/ticket_stub.py @@ -0,0 +1 @@ +# Email ticket stub (placeholder) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/logged_in_bot/handler.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/logged_in_bot/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..42488cd43c647dfd7f1ff6189fb4ad008d21f18f --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/logged_in_bot/handler.py @@ -0,0 +1,17 @@ +from agenticcore.chatbot.services import ChatBot + +_bot = ChatBot() + +def handle_turn(message, history, user): + history = history or [] + try: + res = _bot.reply(message) + reply = res.get("reply") or "Noted." + label = res.get("sentiment") + conf = res.get("confidence") + if label is not None and conf is not None: + reply = f"{reply} (sentiment: {label}, confidence: {float(conf):.2f})" + except Exception as e: + reply = f"Sorry—error in ChatBot: {type(e).__name__}. Using fallback." + history = history + [[message, reply]] + return history diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/logged_in_bot/sentiment_azure.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/logged_in_bot/sentiment_azure.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/logged_in_bot/tools.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/logged_in_bot/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/memory/rag/indexer.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/memory/rag/indexer.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/memory/rag/retriever.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/memory/rag/retriever.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/memory/sessions.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/memory/sessions.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/memory/store.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/memory/store.py new file mode 100644 index 0000000000000000000000000000000000000000..f1f8fc1367351ebdb6f66e0ae63170d72ab49c3d --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/memory/store.py @@ -0,0 +1 @@ +DB={} diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/nlu/pipeline.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/nlu/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..bf3d76e8b4e2f3dd602d4019f2127f53bc461ab0 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/nlu/pipeline.py @@ -0,0 +1 @@ +def analyze(t): return {'intent':'general'} diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/nlu/prompts.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/nlu/prompts.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/nlu/router.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/nlu/router.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/pyproject.toml b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..28f8b61d577755d6ba78475f57c78169f27ad6d3 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/pyproject.toml @@ -0,0 +1,9 @@ +[tool.black] +line-length = 100 +target-version = ["py310"] + +[tool.isort] +profile = "black" + +[tool.pytest.ini_options] +addopts = "-q" diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/requirements.txt b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..caf3e0a6b2da6d44b32e077675eae58dfacbf6c4 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/requirements.txt @@ -0,0 +1,9 @@ +gradio>=4.0 +transformers>=4.41.0 +torch>=2.2.0 +scikit-learn>=1.3.0 +pandas>=2.1.0 +numpy>=1.26.0 +pytest>=7.4.0 +# Optional Azure +azure-ai-textanalytics>=5.3.0 diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/scripts/check_compliance.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/scripts/check_compliance.py new file mode 100644 index 0000000000000000000000000000000000000000..783aad1e3e269c5e1b3f64b395dc980a60b6e76c --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/scripts/check_compliance.py @@ -0,0 +1 @@ +# Fails if disallowed deps appear (placeholder) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/scripts/run_local.sh b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/scripts/run_local.sh new file mode 100644 index 0000000000000000000000000000000000000000..1574d429e034e078de6319b6c0c7e4f7cd513a8c --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/scripts/run_local.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +set -euo pipefail +export PYTHONPATH=. +python -c "from storefront_chatbot.app.app import build; build().launch(server_name='0.0.0.0', server_port=7860)" diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/scripts/seed_data.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/scripts/seed_data.py new file mode 100644 index 0000000000000000000000000000000000000000..7182ae7271ee5dda3ed7e67799b408766c77c377 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/scripts/seed_data.py @@ -0,0 +1 @@ +# Load sample products/FAQs (placeholder) diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_anon_bot.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_anon_bot.py new file mode 100644 index 0000000000000000000000000000000000000000..9f9192473f115338b5490683f0ac7169338d4f96 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_anon_bot.py @@ -0,0 +1 @@ +def test_anon_stub(): assert True diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_guardrails.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_guardrails.py new file mode 100644 index 0000000000000000000000000000000000000000..846d874cdede0615ea40741d3283c5107d509e81 --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_guardrails.py @@ -0,0 +1 @@ +def test_guardrails_stub(): assert True diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_logged_in_bot.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_logged_in_bot.py new file mode 100644 index 0000000000000000000000000000000000000000..7c5df721836277c95479ae5780a569bfe795a78e --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_logged_in_bot.py @@ -0,0 +1 @@ +def test_logged_stub(): assert True diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_memory.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..f6dca01a4a4125febb46974b9a0500801001cd2d --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_memory.py @@ -0,0 +1 @@ +def test_memory_stub(): assert True diff --git a/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_nlu.py b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_nlu.py new file mode 100644 index 0000000000000000000000000000000000000000..da354911445cbb8dcb9705b6c31ac0bb9e988dad --- /dev/null +++ b/storefront-chatbot_scaffold_merged/storefront-chatbot/storefront_chatbot/tests/test_nlu.py @@ -0,0 +1 @@ +def test_nlu_stub(): assert True diff --git a/tests/smoke_test.py b/tests/smoke_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e2c9b83e19fb6090bd897ff72e3e7653b68d3908 --- /dev/null +++ b/tests/smoke_test.py @@ -0,0 +1,12 @@ +# /test/smoke_test.py +import os, json, requests +from agenticcore.chatbot.services import ChatBot + +def p(title, data): print(f"\n== {title} ==\n{json.dumps(data, indent=2)}") + +bot = ChatBot() +p("Lib/Direct", bot.reply("I really love this")) + +url = os.getenv("BACKEND_URL", "http://127.0.0.1:8000") +r = requests.get(f"{url}/health"); p("API/Health", r.json()) +r = requests.post(f"{url}/chatbot/message", json={"message":"api path test"}); p("API/Chat", r.json()) diff --git a/tests/test_anon_bot.py b/tests/test_anon_bot.py new file mode 100644 index 0000000000000000000000000000000000000000..cbf8a5729506392ad7ec8e649bd9adbbc211ce4a --- /dev/null +++ b/tests/test_anon_bot.py @@ -0,0 +1,3 @@ +# /test/test_anon_bot.py +def test_anon_stub(): assert True + diff --git a/tests/test_guardrails.py b/tests/test_guardrails.py new file mode 100644 index 0000000000000000000000000000000000000000..f4294c3c2697e495e4497e8359c6409de21b0704 --- /dev/null +++ b/tests/test_guardrails.py @@ -0,0 +1 @@ +# /test/test_guardrails.py diff --git a/tests/test_logged_in_bot.py b/tests/test_logged_in_bot.py new file mode 100644 index 0000000000000000000000000000000000000000..8ce4ca0e9c91fce459562641eff0d0a9ad3b4ff4 --- /dev/null +++ b/tests/test_logged_in_bot.py @@ -0,0 +1,2 @@ +# /test/test_logged_in_bot.py +def test_logged_stub(): assert True diff --git a/tests/test_memory.py b/tests/test_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..224d705581bc0cebd6ef742e494b86fc44dba606 --- /dev/null +++ b/tests/test_memory.py @@ -0,0 +1,2 @@ +# /test/test_memory.py +def test_memory_stub(): assert True diff --git a/tests/test_nlu.py b/tests/test_nlu.py new file mode 100644 index 0000000000000000000000000000000000000000..e933c06a45419c15d590ef998564d9cbfb33b284 --- /dev/null +++ b/tests/test_nlu.py @@ -0,0 +1,2 @@ +# /test/test_nlu.py +def test_nlu_stub(): assert True diff --git a/tests/test_routes.py b/tests/test_routes.py new file mode 100644 index 0000000000000000000000000000000000000000..5484733a8ff33a137f8498618db44ca0a138a602 --- /dev/null +++ b/tests/test_routes.py @@ -0,0 +1,7 @@ +# /test/test_routes.py +def test_routes_mount(): + from backend.app.main import create_app + app = create_app() + paths = [getattr(r, "path", "") for r in app.routes] + assert "/chatbot/message" in paths + assert "/health" in paths diff --git a/tools/quick_sanity.py b/tools/quick_sanity.py new file mode 100644 index 0000000000000000000000000000000000000000..aa8c5bb34683c430a654783cc533c66ea61ab967 --- /dev/null +++ b/tools/quick_sanity.py @@ -0,0 +1,13 @@ +# tools/quick_sanity.py +""" +Tiny sanity test for MBF helpers. Run from repo root or set PYTHONPATH. +""" +import sys, os +# Add repo root so 'mbf_bot' is importable if running directly +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) + +from mbf_bot.skills import reverse_text, capabilities, normalize + +print("caps:", capabilities()) +print("reverse:", reverse_text("hello")) +print("cmd:", normalize(" Help "))