PeacebinfLow's picture
Create knowledge/mindseye_knowledge.py
dd39fa5 verified
from typing import List, Dict
import random
FACTS: List[Dict] = [
{
"id": "mindseye_overview",
"title": "What is MindsEye?",
"keywords": [
"what is mindseye",
"what is minds eye",
"mindseye os",
"minds eye os",
"mindseye overview",
"os overview",
"system overview",
],
"answer": (
"MindsEye is a multi-surface AI operating system being built under "
"SAGEWORKS AI. It treats data as time-labeled binary flows across "
"devices, browsers, and cloud services.\n\n"
"Instead of random scripts everywhere, you get:\n"
"- A shared sense of **time** for all events\n"
"- Ledgers that remember what agents did and why\n"
"- Layers like LAW-N / LAW-T / Workspace / SQL / Cloud Fabric that "
"cooperate instead of fighting each other."
),
},
{
"id": "workspace_layer",
"title": "Workspace Automation Layer",
"keywords": [
"workspace",
"gmail",
"docs",
"sheets",
"drive",
"workspace automation",
"google workspace",
"apps script",
],
"answer": (
"The Workspace layer is the front-of-house for MindsEye.\n\n"
"Gmail, Docs, Sheets, and Drive events land here first. They get:\n"
"- Labeled with time and context\n"
"- Routed into flows\n"
"- Optionally logged into ledgers\n\n"
"Itโ€™s the part that makes MindsEye feel like an assistant living "
"inside your existing tools, not just another random bot."
),
},
{
"id": "ledger_layer",
"title": "MindsEye Ledger / Time-Labeled Truth",
"keywords": [
"ledger",
"google ledger",
"time labeled",
"time-labeled",
"tlb",
"binary ledger",
],
"answer": (
"The ledger layer is where MindsEye becomes ruthless about time.\n\n"
"Every event, prompt, and response becomes a **time-labeled row**. "
"Agents can look back and see:\n"
"- What happened\n"
"- When it happened\n"
"- Which node or agent acted\n\n"
"That lets MindsEye reason over history instead of treating each prompt "
"like amnesia mode."
),
},
{
"id": "law_n_layer",
"title": "LAW-N โ€” Network-Native Layer",
"keywords": [
"law-n",
"law n",
"lawn",
"network layer",
"network-native",
"api graph",
"service graph",
],
"answer": (
"LAW-N is the network-native layer of MindsEye.\n\n"
"Instead of hard-coding API calls everywhere, LAW-N treats services, "
"agents, and tools as a **graph of time-aware nodes**. Each node knows:\n"
"- What data it consumes\n"
"- What it outputs\n"
"- How it fits into the wider flow."
),
},
{
"id": "roadmap",
"title": "MindsEye Roadmap",
"keywords": [
"roadmap",
"what next",
"next steps",
"plan",
"how to scale",
],
"answer": (
"High-level roadmap:\n\n"
"1. Sandbox โ€” this Space: local chatbot + flow studio.\n"
"2. Glue โ€” bridge sandbox into ledgers + Workspace flows.\n"
"3. Network โ€” orchestrate services via LAW-N.\n"
"4. Full Stack โ€” OS experience across web, mobile, and cloud."
),
},
]
GENERIC_FALLBACKS = [
"I donโ€™t have that topic wired into the sandbox brain yet. Try asking about MindsEye, the ledger, LAW-N, the roadmap, or SAGEWORKS.",
"This version of the bot is intentionally small. If this topic matters, it probably deserves its own node or ledger later.",
]
GREETING_TRIGGERS = [
"hi", "hello", "hey", "yo", "morning", "afternoon", "evening", "sup", "what's up", "whats up"
]
def normalize(text: str) -> str:
return text.strip().lower()
def is_greeting(message: str) -> bool:
msg = normalize(message)
return any(trigger in msg for trigger in GREETING_TRIGGERS)
def greeting_reply() -> str:
options = [
"Yo ๐Ÿ‘‹ This is the MindsEye sandbox brain. No APIs, just local logic.",
"Hey ๐Ÿ‘‹ Youโ€™re talking to the MindsEye sandbox bot โ€” the tiny version before the 21-repo beast shows up.",
"Hi ๐Ÿ‘‹ This is the clean-slate MindsEye chatbot. Ask about MindsEye, the ledger, LAW-N, or the roadmap.",
]
return random.choice(options)
def find_best_fact(user_message: str) -> str:
msg = normalize(user_message)
if is_greeting(msg):
return greeting_reply()
# Exact keyword match
for fact in FACTS:
for kw in fact["keywords"]:
if kw in msg:
return fact["answer"]
# Soft scoring
best_fact = None
best_score = 0
tokens = set(msg.split())
for fact in FACTS:
score = 0
for kw in fact["keywords"]:
for token in kw.split():
if token in tokens:
score += 1
if score > best_score:
best_score = score
best_fact = fact
if best_fact and best_score > 0:
return best_fact["answer"]
return random.choice(GENERIC_FALLBACKS)