+agenticcore.cli
+
+ agenticcore.cli +Console entrypoints:
+ +-
+
- agentic: send a message to ChatBot and print reply JSON +
- repo-tree: print a filtered tree view (uses tree.txt if present) +
- repo-flatten: flatten code listing to stdout (uses FLATTENED_CODE.txt if present) +
1# /agenticcore/cli.py + 2""" + 3agenticcore.cli + 4Console entrypoints: + 5 - agentic: send a message to ChatBot and print reply JSON + 6 - repo-tree: print a filtered tree view (uses tree.txt if present) + 7 - repo-flatten: flatten code listing to stdout (uses FLATTENED_CODE.txt if present) + 8""" + 9import argparse, json, sys, traceback + 10from pathlib import Path + 11from dotenv import load_dotenv + 12import os + 13 + 14# Load .env variables into os.environ (project root .env by default) + 15load_dotenv() + 16 + 17 + 18def cmd_agentic(argv=None): + 19 # Lazy import so other commands don't require ChatBot to be importable + 20 from agenticcore.chatbot.services import ChatBot + 21 # We call analyze_sentiment only for 'status' to reveal the actual chosen provider + 22 try: + 23 from agenticcore.providers_unified import analyze_sentiment + 24 except Exception: + 25 analyze_sentiment = None # still fine; we'll show mode only + 26 + 27 p = argparse.ArgumentParser(prog="agentic", description="Chat with AgenticCore ChatBot") + 28 p.add_argument("message", nargs="*", help="Message to send") + 29 p.add_argument("--debug", action="store_true", help="Print debug info") + 30 args = p.parse_args(argv) + 31 msg = " ".join(args.message).strip() or "hello" + 32 + 33 if args.debug: + 34 print(f"DEBUG argv={sys.argv}", flush=True) + 35 print(f"DEBUG raw message='{msg}'", flush=True) + 36 + 37 bot = ChatBot() + 38 + 39 # Special commands for testing / assignments + 40 # Special commands for testing / assignments + 41 if msg.lower() == "status": + 42 import requests # local import to avoid hard dep for other commands + 43 + 44 # Try a lightweight provider probe via analyze_sentiment + 45 provider = None + 46 if analyze_sentiment is not None: + 47 try: + 48 probe = analyze_sentiment("status ping") + 49 provider = (probe or {}).get("provider") + 50 except Exception: + 51 if args.debug: + 52 traceback.print_exc() + 53 + 54 # Hugging Face whoami auth probe + 55 tok = os.getenv("HF_API_KEY", "") + 56 who = None + 57 auth_ok = False + 58 err = None + 59 try: + 60 if tok: + 61 r = requests.get( + 62 "https://huggingface.co/api/whoami-v2", + 63 headers={"Authorization": f"Bearer {tok}"}, + 64 timeout=15, + 65 ) + 66 auth_ok = (r.status_code == 200) + 67 who = r.json() if auth_ok else None + 68 if not auth_ok: + 69 err = r.text # e.g., {"error":"Invalid credentials in Authorization header"} + 70 else: + 71 err = "HF_API_KEY not set (load .env or export it)" + 72 except Exception as e: + 73 err = str(e) + 74 + 75 # Extract fine-grained scopes for visibility + 76 fg = (((who or {}).get("auth") or {}).get("accessToken") or {}).get("fineGrained") or {} + 77 scoped = fg.get("scoped") or [] + 78 global_scopes = fg.get("global") or [] + 79 + 80 # ---- tiny inference ping (proves 'Make calls to Inference Providers') ---- + 81 infer_ok, infer_err = False, None + 82 try: + 83 if tok: + 84 model = os.getenv( + 85 "HF_MODEL_SENTIMENT", + 86 "distilbert-base-uncased-finetuned-sst-2-english" + 87 ) + 88 r2 = requests.post( + 89 f"https://api-inference.huggingface.co/models/{model}", + 90 headers={"Authorization": f"Bearer {tok}", "x-wait-for-model": "true"}, + 91 json={"inputs": "ping"}, + 92 timeout=int(os.getenv("HTTP_TIMEOUT", "60")), + 93 ) + 94 infer_ok = (r2.status_code == 200) + 95 if not infer_ok: + 96 infer_err = f"HTTP {r2.status_code}: {r2.text}" + 97 except Exception as e: + 98 infer_err = str(e) + 99 # ------------------------------------------------------------------------- +100 +101 # Mask + length to verify what .env provided +102 mask = (tok[:3] + "..." + tok[-4:]) if tok else None +103 out = { +104 "provider": provider or "unknown", +105 "mode": getattr(bot, "_mode", "auto"), +106 "auth_ok": auth_ok, +107 "whoami": who, +108 "token_scopes": { # <--- added +109 "global": global_scopes, +110 "scoped": scoped, +111 }, +112 "inference_ok": infer_ok, +113 "inference_error": infer_err, +114 "env": { +115 "HF_API_KEY_len": len(tok) if tok else 0, +116 "HF_API_KEY_mask": mask, +117 "HF_MODEL_SENTIMENT": os.getenv("HF_MODEL_SENTIMENT"), +118 "HTTP_TIMEOUT": os.getenv("HTTP_TIMEOUT"), +119 }, +120 "capabilities": bot.capabilities(), +121 "error": err, +122 } +123 +124 elif msg.lower() == "help": +125 out = {"capabilities": bot.capabilities()} +126 +127 else: +128 try: +129 out = bot.reply(msg) +130 except Exception as e: +131 if args.debug: +132 traceback.print_exc() +133 out = {"error": str(e), "message": msg} +134 +135 if args.debug: +136 print(f"DEBUG out={out}", flush=True) +137 +138 print(json.dumps(out, indent=2), flush=True) +139 +140 +141def cmd_repo_tree(argv=None): +142 p = argparse.ArgumentParser(prog="repo-tree", description="Print repo tree (from tree.txt if available)") +143 p.add_argument("--path", default="tree.txt", help="Path to precomputed tree file") +144 args = p.parse_args(argv) +145 path = Path(args.path) +146 if path.exists(): +147 print(path.read_text(encoding="utf-8"), flush=True) +148 else: +149 print("(no tree.txt found)", flush=True) +150 +151 +152def cmd_repo_flatten(argv=None): +153 p = argparse.ArgumentParser(prog="repo-flatten", description="Print flattened code listing") +154 p.add_argument("--path", default="FLATTENED_CODE.txt", help="Path to pre-flattened code file") +155 args = p.parse_args(argv) +156 path = Path(args.path) +157 if path.exists(): +158 print(path.read_text(encoding="utf-8"), flush=True) +159 else: +160 print("(no FLATTENED_CODE.txt found)", flush=True) +161 +162 +163def _dispatch(): +164 # Allow: python -m agenticcore.cli <subcommand> [args...] +165 if len(sys.argv) <= 1: +166 print("Usage: python -m agenticcore.cli <agentic|repo-tree|repo-flatten> [args]", file=sys.stderr) +167 sys.exit(2) +168 cmd, argv = sys.argv[1], sys.argv[2:] +169 try: +170 if cmd == "agentic": +171 cmd_agentic(argv) +172 elif cmd == "repo-tree": +173 cmd_repo_tree(argv) +174 elif cmd == "repo-flatten": +175 cmd_repo_flatten(argv) +176 else: +177 print(f"Unknown subcommand: {cmd}", file=sys.stderr) +178 sys.exit(2) +179 except SystemExit: +180 raise +181 except Exception: +182 traceback.print_exc() +183 sys.exit(1) +184 +185 +186if __name__ == "__main__": +187 _dispatch() +
+
+ def
+ cmd_agentic(argv=None):
+
+
+
+
+
+ 19def cmd_agentic(argv=None): + 20 # Lazy import so other commands don't require ChatBot to be importable + 21 from agenticcore.chatbot.services import ChatBot + 22 # We call analyze_sentiment only for 'status' to reveal the actual chosen provider + 23 try: + 24 from agenticcore.providers_unified import analyze_sentiment + 25 except Exception: + 26 analyze_sentiment = None # still fine; we'll show mode only + 27 + 28 p = argparse.ArgumentParser(prog="agentic", description="Chat with AgenticCore ChatBot") + 29 p.add_argument("message", nargs="*", help="Message to send") + 30 p.add_argument("--debug", action="store_true", help="Print debug info") + 31 args = p.parse_args(argv) + 32 msg = " ".join(args.message).strip() or "hello" + 33 + 34 if args.debug: + 35 print(f"DEBUG argv={sys.argv}", flush=True) + 36 print(f"DEBUG raw message='{msg}'", flush=True) + 37 + 38 bot = ChatBot() + 39 + 40 # Special commands for testing / assignments + 41 # Special commands for testing / assignments + 42 if msg.lower() == "status": + 43 import requests # local import to avoid hard dep for other commands + 44 + 45 # Try a lightweight provider probe via analyze_sentiment + 46 provider = None + 47 if analyze_sentiment is not None: + 48 try: + 49 probe = analyze_sentiment("status ping") + 50 provider = (probe or {}).get("provider") + 51 except Exception: + 52 if args.debug: + 53 traceback.print_exc() + 54 + 55 # Hugging Face whoami auth probe + 56 tok = os.getenv("HF_API_KEY", "") + 57 who = None + 58 auth_ok = False + 59 err = None + 60 try: + 61 if tok: + 62 r = requests.get( + 63 "https://huggingface.co/api/whoami-v2", + 64 headers={"Authorization": f"Bearer {tok}"}, + 65 timeout=15, + 66 ) + 67 auth_ok = (r.status_code == 200) + 68 who = r.json() if auth_ok else None + 69 if not auth_ok: + 70 err = r.text # e.g., {"error":"Invalid credentials in Authorization header"} + 71 else: + 72 err = "HF_API_KEY not set (load .env or export it)" + 73 except Exception as e: + 74 err = str(e) + 75 + 76 # Extract fine-grained scopes for visibility + 77 fg = (((who or {}).get("auth") or {}).get("accessToken") or {}).get("fineGrained") or {} + 78 scoped = fg.get("scoped") or [] + 79 global_scopes = fg.get("global") or [] + 80 + 81 # ---- tiny inference ping (proves 'Make calls to Inference Providers') ---- + 82 infer_ok, infer_err = False, None + 83 try: + 84 if tok: + 85 model = os.getenv( + 86 "HF_MODEL_SENTIMENT", + 87 "distilbert-base-uncased-finetuned-sst-2-english" + 88 ) + 89 r2 = requests.post( + 90 f"https://api-inference.huggingface.co/models/{model}", + 91 headers={"Authorization": f"Bearer {tok}", "x-wait-for-model": "true"}, + 92 json={"inputs": "ping"}, + 93 timeout=int(os.getenv("HTTP_TIMEOUT", "60")), + 94 ) + 95 infer_ok = (r2.status_code == 200) + 96 if not infer_ok: + 97 infer_err = f"HTTP {r2.status_code}: {r2.text}" + 98 except Exception as e: + 99 infer_err = str(e) +100 # ------------------------------------------------------------------------- +101 +102 # Mask + length to verify what .env provided +103 mask = (tok[:3] + "..." + tok[-4:]) if tok else None +104 out = { +105 "provider": provider or "unknown", +106 "mode": getattr(bot, "_mode", "auto"), +107 "auth_ok": auth_ok, +108 "whoami": who, +109 "token_scopes": { # <--- added +110 "global": global_scopes, +111 "scoped": scoped, +112 }, +113 "inference_ok": infer_ok, +114 "inference_error": infer_err, +115 "env": { +116 "HF_API_KEY_len": len(tok) if tok else 0, +117 "HF_API_KEY_mask": mask, +118 "HF_MODEL_SENTIMENT": os.getenv("HF_MODEL_SENTIMENT"), +119 "HTTP_TIMEOUT": os.getenv("HTTP_TIMEOUT"), +120 }, +121 "capabilities": bot.capabilities(), +122 "error": err, +123 } +124 +125 elif msg.lower() == "help": +126 out = {"capabilities": bot.capabilities()} +127 +128 else: +129 try: +130 out = bot.reply(msg) +131 except Exception as e: +132 if args.debug: +133 traceback.print_exc() +134 out = {"error": str(e), "message": msg} +135 +136 if args.debug: +137 print(f"DEBUG out={out}", flush=True) +138 +139 print(json.dumps(out, indent=2), flush=True) +
+
+ def
+ cmd_repo_tree(argv=None):
+
+
+
+
+
+ 142def cmd_repo_tree(argv=None): +143 p = argparse.ArgumentParser(prog="repo-tree", description="Print repo tree (from tree.txt if available)") +144 p.add_argument("--path", default="tree.txt", help="Path to precomputed tree file") +145 args = p.parse_args(argv) +146 path = Path(args.path) +147 if path.exists(): +148 print(path.read_text(encoding="utf-8"), flush=True) +149 else: +150 print("(no tree.txt found)", flush=True) +
+
+ def
+ cmd_repo_flatten(argv=None):
+
+
+
+
+
+ 153def cmd_repo_flatten(argv=None): +154 p = argparse.ArgumentParser(prog="repo-flatten", description="Print flattened code listing") +155 p.add_argument("--path", default="FLATTENED_CODE.txt", help="Path to pre-flattened code file") +156 args = p.parse_args(argv) +157 path = Path(args.path) +158 if path.exists(): +159 print(path.read_text(encoding="utf-8"), flush=True) +160 else: +161 print("(no FLATTENED_CODE.txt found)", flush=True) +