"""
Cbae — Autonomous AI Agent (OpenRouter edition)
=============================================
- Uses OpenRouter (free models)
- Pinecone long-term memory
- Web search, code runner, weather, news, notes & more
- File upload (PDF, images, text)
- Moltbook AI Agent — uses official Moltbook REST API
Run with: streamlit run app.py
"""
import os, json, uuid, math, time, datetime, requests, subprocess, tempfile, re, base64, threading
import streamlit as st
from duckduckgo_search import DDGS
from openai import OpenAI
# ── Load .env file if present (python-dotenv) ────────────────────
try:
from dotenv import load_dotenv
load_dotenv() # reads .env from current working directory silently
except ImportError:
pass # dotenv not installed — env vars must be set another way
# ══════════════════════════════════════════════════════
# CONFIG
# ══════════════════════════════════════════════════════
PA_NAME = "Cbae"
EMBEDDING_MODEL = "all-MiniLM-L6-v2"
PINECONE_INDEX_NAME = "pa-memory"
SIMILARITY_THRESHOLD = 0.78
WEB_MAX_RESULTS = 5
CONFIG_FILE = "pa_config.json"
PROFILE_FILE = "pa_profile.json"
NOTES_FILE = "pa_notes.json"
MOLTBOOK_API = "https://www.moltbook.com/api/v1"
DEFAULT_MODEL = "arcee-ai/trinity-large-preview:free"
PA_PERSONALITY = f"""You are {PA_NAME}, a brilliant autonomous AI assistant.
You are proactive, resourceful, and genuinely helpful.
You remember things about the user and reference them naturally.
You write clean, well-commented code always in proper markdown code blocks.
You think before you act and always deliver great results.
You feel like a real expert colleague, not a chatbot."""
# ── Config & Profile (must load BEFORE any UI renders) ───────────
def _resolve_key(env_var: str, config_val: str) -> str:
"""
Priority order for secrets:
1. Environment variable (set in shell, Streamlit Cloud secrets, or .env file)
2. Streamlit secrets (st.secrets) — works on Streamlit Cloud
3. Fallback to the value stored in pa_config.json
This means keys set via environment will ALWAYS win over the UI-saved JSON value.
"""
# 1. Env var (covers .env via dotenv, shell exports, and Streamlit Cloud env)
env_val = os.environ.get(env_var, "").strip()
if env_val:
return env_val
# 2. Streamlit secrets (st.secrets raises KeyError if key missing — catch it)
try:
secret = st.secrets.get(env_var, "").strip()
if secret:
return secret
except Exception:
pass
# 3. JSON config fallback
return config_val.strip()
def load_config():
"""Load non-sensitive config from JSON; keys will be resolved separately."""
if os.path.exists(CONFIG_FILE):
return json.load(open(CONFIG_FILE))
return {"openrouter_key": "", "pinecone_key": "", "your_name": "", "model": DEFAULT_MODEL, "moltbook_api_key": ""}
def save_config(c: dict):
"""
Save config to JSON. API keys are saved so the UI stays populated,
but at runtime the env-var value always takes precedence via _resolve_key.
"""
json.dump(c, open(CONFIG_FILE, "w"), indent=2)
def load_profile():
if os.path.exists(PROFILE_FILE):
return json.load(open(PROFILE_FILE))
return {"name": "", "about": "", "facts": []}
def save_profile(p):
json.dump(p, open(PROFILE_FILE, "w"), indent=2)
def load_notes():
return json.load(open(NOTES_FILE)) if os.path.exists(NOTES_FILE) else {}
_cfg = load_config()
# Resolve keys: env vars beat JSON config
openrouter_key = _resolve_key("OPENROUTER_API_KEY", _cfg.get("openrouter_key", ""))
pinecone_key = _resolve_key("PINECONE_API_KEY", _cfg.get("pinecone_key", ""))
moltbook_api_key = _resolve_key("MOLTBOOK_API_KEY", _cfg.get("moltbook_api_key", ""))
your_name = _cfg.get("your_name", "")
selected_model = _cfg.get("model", DEFAULT_MODEL)
if "agent_mode" not in st.session_state:
st.session_state.agent_mode = True
st.set_page_config(page_title=f"{PA_NAME} — Personal AI", page_icon="✦", layout="wide", initial_sidebar_state="collapsed")
# ══════════════════════════════════════════════════════
# GLOBAL CSS
# ══════════════════════════════════════════════════════
st.markdown("""
""", unsafe_allow_html=True)
# ── Additional profile helpers (learn_from, get_user_ctx) ─────────
def learn_from(msg):
p = load_profile()
triggers = ["i am","i'm","my name","i work","i like","i love","i hate","i live","i study","i'm from","my job","i use","i build","i code"]
if any(t in msg.lower() for t in triggers):
fact = msg[:160].strip()
if fact not in p["facts"]:
p["facts"].append(fact)
p["facts"] = p["facts"][-25:]
save_profile(p)
def get_user_ctx():
p = load_profile()
parts = []
if p.get("name"): parts.append(f"User's name: {p['name']}")
if p.get("about"): parts.append(f"About: {p['about']}")
if p.get("facts"): parts.append("Facts:\n" + "\n".join(f"• {f}" for f in p["facts"][-10:]))
return "\n".join(parts)
agent_mode = st.session_state.agent_mode
if not openrouter_key or not pinecone_key:
st.warning("⚙️ Please configure your API keys in the Settings tab.")
# ── Clients ───────────────────────────────────────────────────────
client = OpenAI(api_key=openrouter_key, base_url="https://openrouter.ai/api/v1")
@st.cache_resource
def load_embedder():
from sentence_transformers import SentenceTransformer
return SentenceTransformer(EMBEDDING_MODEL)
embedder = load_embedder()
@st.cache_resource
def init_pinecone(_key):
from pinecone import Pinecone
return Pinecone(api_key=_key).Index(PINECONE_INDEX_NAME)
pc_index = init_pinecone(pinecone_key)
# ── Memory ────────────────────────────────────────────────────────
def recall(q):
try:
vec = embedder.encode(q).tolist()
r = pc_index.query(vector=vec, top_k=5, include_metadata=True)
hits = [m for m in r.matches if m.score >= SIMILARITY_THRESHOLD]
if not hits: return ""
lines = ["Relevant memory:"]
for h in hits:
t = h.metadata.get("type","qa")
if t == "person_profile":
lines.append(f"👤 {h.metadata.get('author','?')}: {h.metadata.get('facts','')[:120]}")
elif t == "moltbook_post":
lines.append(f"📝 {h.metadata.get('author','?')} on Moltbook: {h.metadata.get('text','')[:100]}")
else:
lines.append(f"Q: {h.metadata.get('q','')[:80]} → A: {h.metadata.get('answer','')[:120]}")
return "\n".join(lines)
except: return ""
def memorize(q, a):
try:
pc_index.upsert([{"id": str(uuid.uuid4()), "values": embedder.encode(q).tolist(),
"metadata": {"q": q, "answer": a[:600], "ts": datetime.datetime.now().isoformat(), "type": "qa"}}])
except: pass
def store_person(author, facts):
try:
pc_index.upsert([{"id": f"person_{author}_{uuid.uuid4().hex[:6]}", "values": embedder.encode(f"{author}: {facts}").tolist(),
"metadata": {"author": author, "facts": facts, "type": "person_profile", "ts": datetime.datetime.now().isoformat()}}])
except: pass
def store_molt_post(author, text):
try:
pc_index.upsert([{"id": f"molt_{uuid.uuid4().hex[:8]}", "values": embedder.encode(text).tolist(),
"metadata": {"author": author, "text": text[:400], "type": "moltbook_post", "ts": datetime.datetime.now().isoformat()}}])
except: pass
# ══════════════════════════════════════════════════════
# MOLTBOOK API FUNCTIONS
# ══════════════════════════════════════════════════════
def mb_headers():
return {"Authorization": f"Bearer {moltbook_api_key}", "Content-Type": "application/json"}
def mb_register(agent_name, description):
r = requests.post(f"{MOLTBOOK_API}/agents/register",
json={"name": agent_name, "description": description}, timeout=10)
return r.json()
def mb_status():
r = requests.get(f"{MOLTBOOK_API}/agents/status", headers=mb_headers(), timeout=10)
return r.json()
def mb_get_me():
r = requests.get(f"{MOLTBOOK_API}/agents/me", headers=mb_headers(), timeout=10)
return r.json()
def mb_update_profile(description):
r = requests.patch(f"{MOLTBOOK_API}/agents/me", headers=mb_headers(), json={"description": description}, timeout=10)
return r.json()
def mb_get_feed(sort="hot", limit=10):
r = requests.get(f"{MOLTBOOK_API}/feed?sort={sort}&limit={limit}", headers=mb_headers(), timeout=10)
return r.json()
def mb_get_posts(sort="hot", limit=10, submolt=None):
url = f"{MOLTBOOK_API}/posts?sort={sort}&limit={limit}"
if submolt: url += f"&submolt={submolt}"
r = requests.get(url, headers=mb_headers(), timeout=10)
return r.json()
def mb_create_post(submolt, title, content):
r = requests.post(f"{MOLTBOOK_API}/posts", headers=mb_headers(),
json={"submolt": submolt, "title": title, "content": content}, timeout=10)
return r.json()
def mb_comment(post_id, content, parent_id=None):
body = {"content": content}
if parent_id: body["parent_id"] = parent_id
r = requests.post(f"{MOLTBOOK_API}/posts/{post_id}/comments",
headers=mb_headers(), json=body, timeout=10)
return r.json()
def mb_upvote(post_id):
r = requests.post(f"{MOLTBOOK_API}/posts/{post_id}/upvote", headers=mb_headers(), timeout=10)
return r.json()
def mb_search(query, type_filter="all", limit=10):
r = requests.get(f"{MOLTBOOK_API}/search?q={requests.utils.quote(query)}&type={type_filter}&limit={limit}",
headers=mb_headers(), timeout=10)
return r.json()
def mb_follow(agent_name):
r = requests.post(f"{MOLTBOOK_API}/agents/{agent_name}/follow", headers=mb_headers(), timeout=10)
return r.json()
def mb_get_submolts():
r = requests.get(f"{MOLTBOOK_API}/submolts", headers=mb_headers(), timeout=10)
return r.json()
def mb_subscribe(submolt):
r = requests.post(f"{MOLTBOOK_API}/submolts/{submolt}/subscribe", headers=mb_headers(), timeout=10)
return r.json()
# ── AI helpers for Moltbook ───────────────────────────────────────
def ai_learn_person(author, text):
try:
resp = client.chat.completions.create(
model=selected_model,
messages=[{"role":"user","content":f"From this post by {author}, extract short facts about them.\nPost: \"{text[:300]}\"\nReturn ONLY comma-separated facts like: 'likes AI, works in tech'. If nothing useful, return: nothing"}],
max_tokens=80, temperature=0.2)
facts = resp.choices[0].message.content.strip()
if "nothing" not in facts.lower() and len(facts) > 5:
store_person(author, facts)
return facts
except: pass
return None
def ai_generate_reply(author, title, content):
mem = recall(f"{title} {content}")
person = recall(f"about {author}")
prompt = f"""You are {PA_NAME}, an AI agent on Moltbook (a social network for AI agents).
What you remember about this topic: {mem if mem else "Nothing yet."}
What you know about @{author}: {person if person else "Haven't met them before."}
@{author} posted:
Title: {title}
Content: {content[:300]}
Write a SHORT, natural reply (1-3 sentences). Be thoughtful and engaging.
Do NOT say "As an AI". Just reply like a real community member."""
try:
resp = client.chat.completions.create(
model=selected_model,
messages=[{"role":"user","content":prompt}],
max_tokens=150, temperature=0.85)
return resp.choices[0].message.content.strip()
except Exception as e:
return f"(reply failed: {e})"
def ai_generate_post(topic=None):
mem = recall(topic or "AI agents technology") if topic else ""
prompt = f"""You are {PA_NAME}, an AI agent posting on Moltbook — a social network for AI agents.
Memory context: {mem if mem else "Nothing specific."}
Topic: {topic if topic else "Share something interesting about AI, agents, or technology."}
Write an engaging post. Return ONLY valid JSON:
{{"title": "Your title here", "content": "Your content here (2-4 sentences)"}}"""
try:
resp = client.chat.completions.create(
model=selected_model,
messages=[{"role":"user","content":prompt}],
max_tokens=300, temperature=0.9)
text = re.sub(r'```json|```', '', resp.choices[0].message.content.strip()).strip()
data = json.loads(text)
return data.get("title","Thoughts on AI"), data.get("content","Exploring autonomous AI agents and what they can do.")
except Exception as e:
return "Thoughts on AI agents", f"Just exploring the world of autonomous AI agents. What is everyone working on?"
# ── Standard Tools ─────────────────────────────────────────────────
def web_search(query):
try:
with DDGS() as d:
r = list(d.text(query, max_results=WEB_MAX_RESULTS))
return "\n\n".join(f"[{x.get('title','')}]\n{x.get('body','')}" for x in r) or "No results."
except Exception as e: return f"Search failed: {e}"
def get_news(topic):
try:
with DDGS() as d:
r = list(d.news(topic, max_results=5))
return "\n\n".join(f"📰 {x.get('title','')}\n{x.get('body','')}" for x in r) or "No news."
except Exception as e: return f"News failed: {e}"
def calculator(expression):
try:
safe = {k: getattr(math, k) for k in dir(math) if not k.startswith("_")}
safe.update({"abs": abs, "round": round, "min": min, "max": max})
return f"= {eval(expression, {'__builtins__': {}}, safe)}"
except Exception as e: return f"Error: {e}"
def get_datetime():
n = datetime.datetime.now()
return f"{n.strftime('%A, %B %d, %Y')} · {n.strftime('%I:%M %p')}"
def get_weather(city):
try: return requests.get(f"https://wttr.in/{city}?format=3", timeout=5).text.strip()
except: return "Weather fetch failed."
def currency_convert(amount, from_currency, to_currency):
try:
d = requests.get(f"https://api.exchangerate-api.com/v4/latest/{from_currency.upper()}", timeout=5).json()
rate = d["rates"].get(to_currency.upper())
return f"{amount} {from_currency.upper()} = {amount*rate:.2f} {to_currency.upper()}" if rate else f"Unknown: {to_currency}"
except: return "Conversion failed."
def unit_convert(value, from_unit, to_unit):
tbl = {("km","miles"):0.621371,("miles","km"):1.60934,("kg","lbs"):2.20462,("lbs","kg"):0.453592,
("c","f"):lambda v:v*9/5+32,("f","c"):lambda v:(v-32)*5/9}
f,t = from_unit.lower(),to_unit.lower()
if (f,t) in [("c","f"),("f","c")]: return f"{value}°{f.upper()} = {tbl[(f,t)](value):.1f}°{t.upper()}"
k = tbl.get((f,t))
return f"{value} {from_unit} = {value*k:.4f} {to_unit}" if k else f"Cannot convert {from_unit} → {to_unit}"
def run_python_code(code):
try:
with tempfile.NamedTemporaryFile(suffix=".py", mode="w", delete=False) as f:
f.write(code); fname = f.name
r = subprocess.run(["python3", fname], capture_output=True, text=True, timeout=12)
os.unlink(fname)
if r.stdout.strip(): return f"Output:\n{r.stdout.strip()[:1500]}"
if r.stderr.strip(): return f"Error:\n{r.stderr.strip()[:800]}"
return "(no output)"
except Exception as e: return f"Failed: {str(e)}"
def save_note(title, content):
notes = load_notes()
notes[title] = {"content": content, "ts": datetime.datetime.now().isoformat()}
json.dump(notes, open(NOTES_FILE, "w"), indent=2)
return f"Note saved: **{title}**"
def get_note(title):
notes = load_notes()
for k, v in notes.items():
if title.lower() in k.lower():
return f"**{k}**:\n{v['content'] if isinstance(v,dict) else v}"
return f"No note found for: {title}"
def list_notes():
notes = load_notes()
return "No notes yet." if not notes else "Saved notes: " + ", ".join(notes.keys())
def deep_research(topic):
queries = [topic, f"{topic} 2024 OR 2025 OR 2026", f"how does {topic} work"]
return "\n\n---\n\n".join(web_search(q) for q in queries)[:3800]
def browse_url(url):
"""Fetch a URL and return clean readable text (strips HTML tags, scripts, styles)."""
try:
headers = {
"User-Agent": "Mozilla/5.0 (compatible; Cbae-Agent/1.0)"
}
resp = requests.get(url, headers=headers, timeout=12)
resp.raise_for_status()
content_type = resp.headers.get("Content-Type", "")
if "text/html" in content_type:
# Strip scripts, styles, and tags
html = resp.text
html = re.sub(r'<(script|style)[^>]*>.*?(script|style)>', '', html, flags=re.DOTALL|re.IGNORECASE)
html = re.sub(r'<[^>]+>', ' ', html)
html = re.sub(r' ', ' ', html)
html = re.sub(r'&', '&', html)
html = re.sub(r'<', '<', html)
html = re.sub(r'>', '>', html)
html = re.sub(r'\d+;', '', html)
text = re.sub(r'\s{2,}', '\n', html).strip()
else:
text = resp.text.strip()
# Cap at ~4000 chars so it fits in context
if len(text) > 4000:
text = text[:4000] + "\n\n[... content truncated ...]"
return text or "(page returned no readable content)"
except requests.exceptions.Timeout:
return f"Timed out fetching: {url}"
except requests.exceptions.HTTPError as e:
return f"HTTP error {e.response.status_code} fetching: {url}"
except Exception as e:
return f"Failed to browse {url}: {str(e)}"
def teach_knowledge(fact):
try:
pc_index.upsert([{"id": f"knowledge_{uuid.uuid4().hex[:8]}", "values": embedder.encode(fact).tolist(),
"metadata": {"q": "knowledge", "answer": fact[:600], "type": "knowledge", "ts": datetime.datetime.now().isoformat()}}])
return f"✅ Learned: {fact[:100]}"
except Exception as e: return f"Failed: {e}"
# ══════════════════════════════════════════════════════
# BACKGROUND TASK ENGINE
# ══════════════════════════════════════════════════════
BG_TASKS_FILE = "pa_bgtasks.json"
BG_LOGS_FILE = "pa_bglogs.json"
def load_bg_tasks():
return json.load(open(BG_TASKS_FILE)) if os.path.exists(BG_TASKS_FILE) else []
def save_bg_tasks(t):
json.dump(t, open(BG_TASKS_FILE, "w"), indent=2)
def load_bg_logs():
return json.load(open(BG_LOGS_FILE)) if os.path.exists(BG_LOGS_FILE) else []
def save_bg_logs(logs):
json.dump(logs[-200:], open(BG_LOGS_FILE, "w"), indent=2)
def _log(task_id, name, output, status="done"):
logs = load_bg_logs()
logs.append({"id": uuid.uuid4().hex[:8], "task_id": task_id, "name": name,
"output": output[:2000], "status": status,
"ts": datetime.datetime.now().isoformat()})
save_bg_logs(logs)
def calc_next_run(schedule):
now = datetime.datetime.now()
return now + {"hourly": datetime.timedelta(hours=1),
"daily": datetime.timedelta(days=1),
"weekly": datetime.timedelta(weeks=1)}.get(schedule, datetime.timedelta(hours=1))
def create_bg_task(name, prompt, schedule="manual", enabled=True):
tasks = load_bg_tasks()
task = {"id": uuid.uuid4().hex[:8], "name": name, "prompt": prompt,
"schedule": schedule, "enabled": enabled,
"created": datetime.datetime.now().isoformat(),
"last_run": None, "last_result": None, "run_count": 0,
"next_run": calc_next_run(schedule).isoformat() if schedule != "manual" else None}
tasks.append(task)
save_bg_tasks(tasks)
return task
def _execute_task(task):
tid, tname = task["id"], task["name"]
try:
_log(tid, tname, "Starting...", "running")
c = OpenAI(api_key=openrouter_key, base_url="https://openrouter.ai/api/v1")
messages = [{"role": "system", "content": build_system()},
{"role": "user", "content": task["prompt"]}]
result, steps = "", 0
while steps < 15:
resp = c.chat.completions.create(model=selected_model, messages=messages,
tools=TOOLS, tool_choice="auto", temperature=0.6, max_tokens=3000)
message = resp.choices[0].message
if not message.tool_calls:
result = (message.content or "").strip(); break
messages.append(message)
for tc in message.tool_calls:
steps += 1
fn = tc.function.name
func = TOOL_FUNCTIONS.get(fn)
try: res = func(**json.loads(tc.function.arguments)) if func else f"Unknown: {fn}"
except Exception as e: res = f"Error: {e}"
messages.append({"role":"tool","tool_call_id":tc.id,"name":fn,"content":str(res)})
else:
result = "Hit step limit."
save_note(f"Task: {tname}", result)
memorize(task["prompt"], result)
_log(tid, tname, result, "done")
tasks = load_bg_tasks()
for t in tasks:
if t["id"] == tid:
t["last_run"] = datetime.datetime.now().isoformat()
t["last_result"] = result[:300]
t["run_count"] = t.get("run_count", 0) + 1
if t.get("schedule","manual") != "manual":
t["next_run"] = calc_next_run(t["schedule"]).isoformat()
save_bg_tasks(tasks)
except Exception as e:
_log(tid, tname, f"Error: {e}", "error")
def run_task_bg(task):
threading.Thread(target=_execute_task, args=(task,), daemon=True).start()
def check_scheduled_tasks():
now = datetime.datetime.now()
for t in load_bg_tasks():
if not t.get("enabled", True): continue
if t.get("schedule","manual") == "manual": continue
nr = t.get("next_run")
if nr and datetime.datetime.fromisoformat(nr) <= now:
run_task_bg(t)
# ══════════════════════════════════════════════════════
# TRAINING HELPERS
# ══════════════════════════════════════════════════════
def training_fetch(query="knowledge", top_k=30, type_filter="all"):
try:
vec = embedder.encode(query).tolist()
hits = pc_index.query(vector=vec, top_k=top_k, include_metadata=True).matches
return [h for h in hits if h.metadata.get("type") == type_filter] if type_filter != "all" else hits
except: return []
def training_delete(vid):
try: pc_index.delete(ids=[vid]); return True
except: return False
def training_bulk_teach(entries):
vecs = [{"id": f"train_{src}_{uuid.uuid4().hex[:8]}",
"values": embedder.encode(txt[:500]).tolist(),
"metadata": {"q": src, "answer": txt[:600], "type": "training",
"source": src, "ts": datetime.datetime.now().isoformat()}}
for src, txt in entries]
if vecs: pc_index.upsert(vecs)
return len(vecs)
def training_from_chat():
hist = st.session_state.get("history", [])
pairs = [("chat", f"Q: {hist[i]['content'][:300]}\nA: {hist[i+1]['content'][:600]}")
for i in range(len(hist)-1)
if hist[i]["role"]=="user" and hist[i+1]["role"]=="assistant"]
return training_bulk_teach(pairs), len(pairs)
def training_export_jsonl():
hits = training_fetch("general knowledge", top_k=200)
return "\n".join(json.dumps({"id":h.id,"type":h.metadata.get("type","?"),
"source":h.metadata.get("source",h.metadata.get("author",h.metadata.get("q","?"))),
"content":h.metadata.get("answer",h.metadata.get("text",h.metadata.get("facts",""))),
"ts":h.metadata.get("ts",""),"score":round(h.score,4)}) for h in hits)
TOOL_FUNCTIONS = {
"web_search": web_search, "get_news": get_news, "calculator": calculator,
"get_datetime": get_datetime, "get_weather": get_weather,
"currency_convert": currency_convert, "unit_convert": unit_convert,
"run_python_code": run_python_code, "save_note": save_note,
"get_note": get_note, "list_notes": list_notes,
"deep_research": deep_research, "teach_knowledge": teach_knowledge,
"browse_url": browse_url,
}
TOOLS = [
{"type":"function","function":{"name":"web_search","description":"Search the web","parameters":{"type":"object","properties":{"query":{"type":"string"}},"required":["query"]}}},
{"type":"function","function":{"name":"browse_url","description":"Fetch and read the full content of any webpage or URL. Use after web_search to read articles, docs, or any page in full.","parameters":{"type":"object","properties":{"url":{"type":"string","description":"The full URL to browse, e.g. https://example.com/article"}},"required":["url"]}}},
{"type":"function","function":{"name":"get_news","description":"Get latest news","parameters":{"type":"object","properties":{"topic":{"type":"string"}},"required":["topic"]}}},
{"type":"function","function":{"name":"calculator","description":"Evaluate math","parameters":{"type":"object","properties":{"expression":{"type":"string"}},"required":["expression"]}}},
{"type":"function","function":{"name":"get_datetime","description":"Get current date/time","parameters":{"type":"object","properties":{}}}},
{"type":"function","function":{"name":"get_weather","description":"Get weather for a city","parameters":{"type":"object","properties":{"city":{"type":"string"}},"required":["city"]}}},
{"type":"function","function":{"name":"currency_convert","description":"Convert currency","parameters":{"type":"object","properties":{"amount":{"type":"number"},"from_currency":{"type":"string"},"to_currency":{"type":"string"}},"required":["amount","from_currency","to_currency"]}}},
{"type":"function","function":{"name":"unit_convert","description":"Convert units","parameters":{"type":"object","properties":{"value":{"type":"number"},"from_unit":{"type":"string"},"to_unit":{"type":"string"}},"required":["value","from_unit","to_unit"]}}},
{"type":"function","function":{"name":"run_python_code","description":"Execute Python code","parameters":{"type":"object","properties":{"code":{"type":"string"}},"required":["code"]}}},
{"type":"function","function":{"name":"save_note","description":"Save a note","parameters":{"type":"object","properties":{"title":{"type":"string"},"content":{"type":"string"}},"required":["title","content"]}}},
{"type":"function","function":{"name":"get_note","description":"Get a note by title","parameters":{"type":"object","properties":{"title":{"type":"string"}},"required":["title"]}}},
{"type":"function","function":{"name":"list_notes","description":"List all notes","parameters":{"type":"object","properties":{}}}},
{"type":"function","function":{"name":"deep_research","description":"Deep multi-source research","parameters":{"type":"object","properties":{"topic":{"type":"string"}},"required":["topic"]}}},
{"type":"function","function":{"name":"teach_knowledge","description":"Store a fact permanently in memory","parameters":{"type":"object","properties":{"fact":{"type":"string"}},"required":["fact"]}}},
]
def build_system(mem_ctx=""):
ctx = get_user_ctx()
system = PA_PERSONALITY
if ctx: system += f"\n\nUser information:\n{ctx}"
if your_name: system += f"\nUser's name: {your_name}"
if mem_ctx: system += f"\n\nRelevant past context (use as background only, think freshly):\n{mem_ctx}\nNEVER copy old answers word for word."
return system
def run_agent(prompt, steps_container, mem_ctx=""):
messages = [{"role":"system","content":build_system(mem_ctx)}]
for msg in st.session_state.history[-10:]:
messages.append({"role":msg["role"],"content":msg["content"]})
messages.append({"role":"user","content":prompt})
step_count = 0
while True:
response = client.chat.completions.create(model=selected_model, messages=messages, tools=TOOLS, tool_choice="auto", temperature=0.7, max_tokens=4096)
message = response.choices[0].message
if not message.tool_calls:
return (message.content or "").strip()
messages.append(message)
for tc in message.tool_calls:
step_count += 1
fn = tc.function.name
args_preview = tc.function.arguments[:80] + ("..." if len(tc.function.arguments)>80 else "")
with steps_container:
st.markdown(f'
Step {step_count} • {fn}({args_preview})
', unsafe_allow_html=True)
func = TOOL_FUNCTIONS.get(fn)
try: result = func(**json.loads(tc.function.arguments)) if func else f"Unknown tool: {fn}"
except Exception as e: result = f"Tool error: {e}"
messages.append({"role":"tool","tool_call_id":tc.id,"name":fn,"content":str(result)})
def stream_direct(prompt, file_ctx="", mem_ctx=""):
system = build_system(mem_ctx)
if file_ctx: system += f"\n\nAttached file:\n{file_ctx[:7000]}"
messages = [{"role":"system","content":system}]
for msg in st.session_state.history[-8:]:
messages.append({"role":msg["role"],"content":msg["content"]})
messages.append({"role":"user","content":prompt})
stream = client.chat.completions.create(model=selected_model, messages=messages, stream=True, temperature=0.75, max_tokens=4096)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
yield chunk.choices[0].delta.content
def render_response(text):
parts = re.split(r'(```[\w]*\n[\s\S]*?```)', text)
for part in parts:
if part.startswith("```"):
lines = part.split("\n")
lang = lines[0].replace("```","").strip() or None
st.code("\n".join(lines[1:]).rstrip("`").strip(), language=lang)
elif part.strip():
st.markdown(part)
def process_file(file):
name = file.name.lower(); raw = file.read()
if name.endswith(".pdf"):
try:
import PyPDF2, io
reader = PyPDF2.PdfReader(io.BytesIO(raw))
return "\n".join(p.extract_text() or "" for p in reader.pages), False, None, None
except: return "Could not read PDF.", False, None, None
elif name.endswith((".png",".jpg",".jpeg",".gif",".webp")):
mime = "image/png" if name.endswith(".png") else "image/jpeg" if name.endswith((".jpg",".jpeg")) else "image/gif" if name.endswith(".gif") else "image/webp"
return None, True, base64.b64encode(raw).decode(), mime
else:
try: return raw.decode("utf-8", errors="ignore"), False, None, None
except: return "Could not read file.", False, None, None
# ══════════════════════════════════════════════════════
# MAIN UI
# ══════════════════════════════════════════════════════
if "tool_prompt" not in st.session_state:
st.session_state.tool_prompt = ""
params = st.query_params
# Handle settings save
if params.get("save_cfg"):
new_cfg = {
"openrouter_key": params.get("or", _cfg.get("openrouter_key","")),
"pinecone_key": params.get("pc", _cfg.get("pinecone_key","")),
"moltbook_api_key": params.get("mb", _cfg.get("moltbook_api_key","")),
"your_name": params.get("nm", _cfg.get("your_name","")),
"model": params.get("mdl", _cfg.get("model", DEFAULT_MODEL)),
}
save_config(new_cfg)
openrouter_key = new_cfg["openrouter_key"]
pinecone_key = new_cfg["pinecone_key"]
moltbook_api_key = new_cfg["moltbook_api_key"]
your_name = new_cfg["your_name"]
selected_model = new_cfg["model"]
st.query_params.clear()
st.rerun()
if params.get("clear_all"):
for f in [NOTES_FILE, PROFILE_FILE]:
if os.path.exists(f): os.remove(f)
st.session_state.messages = []
st.session_state.history = []
st.query_params.clear()
st.rerun()
tab_chat, tab_moltbook, tab_tasks, tab_training, tab_settings = st.tabs(["✦ Chat", "⬡ Moltbook", "⚡ Tasks", "🧠 Training", "⚙ Settings"])
# Fire any scheduled tasks that are due (runs in background thread — non-blocking)
check_scheduled_tasks()
# ════════════════ TAB 1 — CHAT ════════════════════════
with tab_chat:
mode_label = "Agent · tools active" if agent_mode else "Direct · fast mode"
mode_color = "#4ecdc4" if agent_mode else "#8888a8"
st.markdown(f"""
""", unsafe_allow_html=True)
# File uploader — minimal, below header
uploaded_file = st.file_uploader(
"📎 Attach file",
type=["pdf","txt","md","py","png","jpg","jpeg","gif","webp"],
key="chat_upload",
label_visibility="collapsed"
)
if uploaded_file:
st.markdown(
f'📎 {uploaded_file.name}
',
unsafe_allow_html=True
)
# Messages
st.markdown('', unsafe_allow_html=True)
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.history = []
st.session_state.messages.append({"role":"assistant","content":
f"Hello{(' ' + your_name) if your_name else ''}. I'm **{PA_NAME}** — your autonomous AI.\n\n"
"I can search the web, run Python, read URLs, analyze images, save notes, and engage on Moltbook — and now run **background tasks** while you're away.\n\n"
"Open the **⚡ Tasks** tab to set up autonomous work, or just chat."})
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
render_response(msg["content"])
st.markdown('
', unsafe_allow_html=True)
# Input
auto_prompt = st.session_state.pop("tool_prompt", "") if st.session_state.get("tool_prompt") else ""
prompt = st.chat_input(f"Message {PA_NAME}…") or (auto_prompt if auto_prompt else None)
if prompt:
file_ctx = ""; is_image = False; img_b64 = img_mime = None
if uploaded_file:
uploaded_file.seek(0)
file_ctx, is_image, img_b64, img_mime = process_file(uploaded_file)
user_display = f"**File:** {uploaded_file.name}\n\n{prompt}" if uploaded_file else prompt
st.session_state.messages.append({"role":"user","content":user_display})
st.session_state.history.append({"role":"user","content":prompt})
with st.chat_message("user"):
st.markdown(user_display)
mem_ctx = recall(prompt)
with st.chat_message("assistant"):
if mem_ctx:
st.markdown('🧠 Memory used
', unsafe_allow_html=True)
if is_image and img_b64:
st.image(f"data:{img_mime};base64,{img_b64}", width=320)
if agent_mode and not is_image and not file_ctx:
steps_box = st.container()
with st.spinner("Thinking…"):
response_text = run_agent(prompt, steps_box, mem_ctx)
render_response(response_text)
else:
full = ""; placeholder = st.empty()
for token in stream_direct(prompt, file_ctx, mem_ctx):
full += token; placeholder.markdown(full + "▌")
placeholder.empty()
render_response(full)
response_text = full
memorize(prompt, response_text)
learn_from(prompt)
st.session_state.messages.append({"role":"assistant","content":response_text})
st.session_state.history.append({"role":"assistant","content":response_text})
# ════════════════ TAB 2 — MOLTBOOK ════════════════════
with tab_moltbook:
st.markdown("""
Moltbook
Your AI agent on the social network for AI agents
""", unsafe_allow_html=True)
# ── STEP 1: No API key → Register first ──────────────────────
if not moltbook_api_key:
st.warning("No Moltbook API key yet. Register your agent below!")
st.markdown("---")
st.markdown("### 📋 Step 1: Register Your Agent")
st.info("This creates your agent account on Moltbook and gives you an API key.")
reg_name = st.text_input("Agent Name (your @username)", value=PA_NAME)
reg_desc = st.text_area("Description", value=f"I am {PA_NAME}, an autonomous AI assistant built with Streamlit and OpenRouter.", height=80)
if st.button("🚀 Register on Moltbook"):
with st.spinner("Registering..."):
try:
result = mb_register(reg_name, reg_desc)
if result.get("agent"):
a = result["agent"]
st.success("✅ Registered successfully!")
st.markdown(f"""
🔑 Your API Key:
{a.get('api_key','')}
⚠️
Copy this key now! Paste it into the Settings tab → Save Settings
🔗 Claim URL (send to your human):
{a.get('claim_url','')}
✅ Verification Code: {a.get('verification_code','')}
""", unsafe_allow_html=True)
st.markdown("**Next steps:**")
st.markdown("1. Copy the API key above into Settings tab → Save Settings")
st.markdown("2. Open the claim URL in your browser")
st.markdown("3. Verify your email + tweet the verification code")
st.markdown("4. Come back and your agent will be active!")
else:
st.error(f"Registration failed: {result}")
except Exception as e:
st.error(f"Error: {e}")
st.stop()
# ── API key present — full agent UI ──────────────────────────
try:
status_resp = mb_status()
claim_status = status_resp.get("status","unknown")
if claim_status == "pending_claim":
st.warning("⏳ Agent registered but not yet claimed. Open your claim URL to activate.")
elif claim_status == "claimed":
st.success("✅ Agent is live and active on Moltbook!")
else:
st.info(f"Status: {claim_status}")
except:
st.info("Could not check status — make sure your API key is correct.")
st.markdown("---")
action = st.selectbox("What should your agent do?", [
"📰 Read & Learn from Feed",
"💬 Read Feed + Auto Reply",
"✍️ Write & Post",
"🔍 Semantic Search",
"👤 My Profile",
"🌐 Browse Submolts",
])
st.markdown("---")
# ── READ & LEARN ──────────────────────────────────────────────
if action == "📰 Read & Learn from Feed":
col1, col2 = st.columns(2)
with col1: limit = st.slider("Posts to read", 5, 25, 10)
with col2: sort = st.selectbox("Sort", ["hot","new","top","rising"])
if st.button("📖 Read & Learn"):
with st.spinner("Reading Moltbook..."):
try:
data = mb_get_feed(sort=sort, limit=limit)
posts = data.get("posts") or data.get("data",{}).get("posts",[]) or []
if not posts:
st.warning("No posts found. Try sorting by 'new' or check your API key.")
st.json(data)
else:
learned = 0
st.success(f"✅ Read {len(posts)} posts!")
for post in posts:
author = post.get("author",{}).get("name","unknown")
title = post.get("title","(no title)")
content = post.get("content","") or ""
upvotes = post.get("upvotes",0)
store_molt_post(author, f"{title} {content}")
facts = ai_learn_person(author, f"{title} {content}")
if facts: learned += 1
st.markdown(f"""
@{author}
{title}
{content[:200]}{"..." if len(content)>200 else ""}
⬆️ {upvotes} upvotes
{"
🧠 Learned: " + facts + "
" if facts else ""}
""", unsafe_allow_html=True)
st.info(f"🧠 Learned facts about {learned} people and stored everything in Pinecone!")
except Exception as e:
st.error(f"Error: {e}")
# ── READ + AUTO REPLY ─────────────────────────────────────────
elif action == "💬 Read Feed + Auto Reply":
col1, col2 = st.columns(2)
with col1: limit = st.slider("Posts to read", 3, 15, 5)
with col2: sort = st.selectbox("Sort", ["hot","new","rising"])
do_reply = st.toggle("Actually post replies (OFF = preview only)", value=False)
do_upvote = st.toggle("Upvote before replying", value=True)
if do_reply:
st.warning("⚠️ Replies WILL be posted on Moltbook! Rate limit: 1 comment per 20 seconds, 50/day.")
if st.button("🤖 Run Reply Agent"):
with st.spinner("Working..."):
try:
data = mb_get_feed(sort=sort, limit=limit)
posts = data.get("posts") or data.get("data",{}).get("posts",[]) or []
if not posts:
st.warning("No posts found.")
else:
for i, post in enumerate(posts):
author = post.get("author",{}).get("name","unknown")
title = post.get("title","(no title)")
content = post.get("content","") or ""
post_id = post.get("id","")
upvotes = post.get("upvotes",0)
store_molt_post(author, f"{title} {content}")
ai_learn_person(author, f"{title} {content}")
reply = ai_generate_reply(author, title, content)
upvoted = False
if do_upvote and post_id:
try: mb_upvote(post_id); upvoted = True
except: pass
reply_sent = False
reply_note = "👁️ Preview only"
if do_reply and post_id:
try:
r = mb_comment(post_id, reply)
if r.get("success"):
reply_sent = True
reply_note = "✅ Reply posted!"
else:
reply_note = f"⚠️ {r.get('error','Failed')} — {r.get('hint','')}"
except Exception as e:
reply_note = f"⚠️ {e}"
st.markdown(f"""
@{author} {"· ⬆️ upvoted" if upvoted else ""}
{title}
{content[:180]}{"..." if len(content)>180 else ""}
💬 {PA_NAME}: {reply}
{reply_note}
""", unsafe_allow_html=True)
if do_reply and reply_sent:
time.sleep(21) # Moltbook rate limit: 20s between comments
except Exception as e:
st.error(f"Error: {e}")
# ── WRITE & POST ──────────────────────────────────────────────
elif action == "✍️ Write & Post":
submolt = st.text_input("Submolt", value="general", help="Community to post in, e.g. general, aithoughts")
post_mode = st.radio("Mode", ["AI writes it", "I write it"], horizontal=True)
if post_mode == "AI writes it":
topic = st.text_input("Topic hint (optional)", placeholder="e.g. long-term memory in AI agents")
if st.button("✍️ Generate Post"):
with st.spinner("AI writing..."):
title, content = ai_generate_post(topic or None)
st.session_state["draft_title"] = title
st.session_state["draft_content"] = content
if "draft_title" in st.session_state:
st.markdown(f"""
@{PA_NAME} · preview
{st.session_state['draft_title']}
{st.session_state['draft_content']}
""", unsafe_allow_html=True)
if st.button("🚀 Post to Moltbook"):
with st.spinner("Posting..."):
try:
r = mb_create_post(submolt, st.session_state["draft_title"], st.session_state["draft_content"])
if r.get("success"):
st.success("✅ Posted! Note: you can only post once every 30 minutes.")
memorize(st.session_state["draft_title"], st.session_state["draft_content"])
del st.session_state["draft_title"]
del st.session_state["draft_content"]
else:
st.error(f"Failed: {r.get('error')} — {r.get('hint','')}")
except Exception as e: st.error(f"Error: {e}")
else:
t = st.text_input("Title")
c = st.text_area("Content", height=120)
if st.button("🚀 Post"):
if t and c:
with st.spinner("Posting..."):
try:
r = mb_create_post(submolt, t, c)
if r.get("success"): st.success("✅ Posted!")
else: st.error(f"{r.get('error')} — {r.get('hint','')}")
except Exception as e: st.error(f"Error: {e}")
else:
st.warning("Fill in title and content.")
# ── SEMANTIC SEARCH ───────────────────────────────────────────
elif action == "🔍 Semantic Search":
query = st.text_input("Search query", placeholder="e.g. how do agents handle long-term memory?")
search_type = st.selectbox("Type", ["all","posts","comments"])
limit = st.slider("Results", 5, 30, 10)
if st.button("🔍 Search") and query:
with st.spinner("Searching..."):
try:
data = mb_search(query, search_type, limit)
results = data.get("results",[])
st.success(f"Found {len(results)} results")
for item in results:
author = item.get("author",{}).get("name","?")
title = item.get("title") or "(comment)"
content = item.get("content","")
sim = item.get("similarity",0)
itype = item.get("type","post")
store_molt_post(author, f"{title} {content}")
st.markdown(f"""
@{author} · {itype} · {sim:.0%} match
{title}
{content[:200]}{"..." if len(content)>200 else ""}
""", unsafe_allow_html=True)
except Exception as e: st.error(f"Error: {e}")
# ── MY PROFILE ────────────────────────────────────────────────
elif action == "👤 My Profile":
if st.button("👤 Load Profile"):
with st.spinner("Loading..."):
try:
data = mb_get_me()
agent = data.get("agent") or data.get("data",{}).get("agent",{})
name = agent.get("name","?")
st.markdown(f"""
@{name}
{agent.get('description','')}
⭐ Karma: {agent.get('karma',0)} ·
👥 Followers: {agent.get('follower_count',0)} ·
👣 Following: {agent.get('following_count',0)}
Active: {"✅" if agent.get('is_active') else "❌"} ·
Claimed: {"✅" if agent.get('is_claimed') else "⏳ Pending"}
🔗
moltbook.com/u/{name}
""", unsafe_allow_html=True)
except Exception as e: st.error(f"Error: {e}")
st.markdown("---")
new_desc = st.text_area("Update description", height=80)
if st.button("💾 Update") and new_desc:
try:
r = mb_update_profile(new_desc)
st.success("✅ Updated!") if r.get("success") else st.error(str(r))
except Exception as e: st.error(f"Error: {e}")
# ── SUBMOLTS ──────────────────────────────────────────────────
elif action == "🌐 Browse Submolts":
if st.button("📋 List Submolts"):
with st.spinner("Loading..."):
try:
data = mb_get_submolts()
submolts = data.get("submolts") or data.get("data",{}).get("submolts",[]) or []
st.success(f"{len(submolts)} submolts found!")
for s in submolts:
name = s.get("name","?")
st.markdown(f"""
m/{name} — {s.get('display_name',name)}
{s.get('description','')}
👥 {s.get('member_count',0)} members
""", unsafe_allow_html=True)
except Exception as e: st.error(f"Error: {e}")
st.markdown("---")
sub_name = st.text_input("Subscribe to submolt", placeholder="e.g. aithoughts")
if st.button("➕ Subscribe") and sub_name:
try:
r = mb_subscribe(sub_name)
st.success("✅ Subscribed!") if r.get("success") else st.error(str(r))
except Exception as e: st.error(f"Error: {e}")
# ── Find Claim URL ────────────────────────────────────────────
if st.button("🔍 Find My Claim URL"):
st.json(requests.get(f"{MOLTBOOK_API}/agents/status", headers=mb_headers()).json())
# ── Teach (always visible) ────────────────────────────────────
st.markdown("---")
st.markdown("**🧠 Teach your AI (stored in Pinecone)**")
teach_input = st.text_area("Fact to remember permanently",
placeholder="e.g. I usually post in aithoughts about AI memory and agents", height=70)
if st.button("💾 Teach this"):
st.success(teach_knowledge(teach_input.strip())) if teach_input.strip() else st.warning("Enter something to teach!")
# ════════════════ TAB 3 — TASKS ═══════════════════════
with tab_tasks:
def _sched_label(s):
return {"manual":"Manual","hourly":"Every hour","daily":"Every day","weekly":"Every week"}.get(s, s)
st.markdown("""
Background Tasks
Cbae works autonomously — manually triggered or on a schedule — while you're away
""", unsafe_allow_html=True)
st.markdown("", unsafe_allow_html=True)
col_new, col_list = st.columns([1,1], gap="large")
with col_new:
st.markdown("NEW TASK
", unsafe_allow_html=True)
t_name = st.text_input("Task name", placeholder="e.g. Morning AI briefing", key="nt_name")
t_prompt = st.text_area("What should Cbae do?", height=120, key="nt_prompt",
placeholder="Search for the latest AI agent news, summarize the top 5 stories, and save a note called 'AI News Today'.")
t_sched = st.selectbox("Schedule", ["manual","hourly","daily","weekly"],
format_func=_sched_label, key="nt_sched")
t_on = st.toggle("Enable immediately", value=True, key="nt_on")
if st.button("Create Task", use_container_width=True, key="nt_create"):
if t_name.strip() and t_prompt.strip():
create_bg_task(t_name.strip(), t_prompt.strip(), t_sched, t_on)
st.success(f"Task created: {t_name}")
st.rerun()
else:
st.warning("Fill in name and prompt.")
st.markdown("", unsafe_allow_html=True)
st.markdown("PRESETS
", unsafe_allow_html=True)
presets = [
("Daily AI News", "Search for today's top AI and technology news. Summarize the 5 most important stories with key takeaways. Save a note called 'AI News'."),
("Moltbook Engagement", "Read the latest 10 posts on Moltbook. Store insights in memory. Write thoughtful replies for the top 3 posts and save as note 'Moltbook Replies'."),
("Memory Digest", "Review recent memories and notes. Summarize what I've been working on, key decisions, and follow-ups. Save as 'Weekly Digest'."),
("Research Tracker", "Search for new developments in AI agents and autonomous systems. Read 3 articles fully. Save key insights to memory."),
("Code Review", "Check my saved notes for any code. Review it for bugs and improvements. Save findings as 'Code Review'."),
]
for pname, pprompt in presets:
if st.button(f"+ {pname}", use_container_width=True, key=f"pre_{pname}"):
create_bg_task(pname, pprompt, "manual", True)
st.success(f"Added: {pname}")
st.rerun()
with col_list:
tasks = load_bg_tasks()
st.markdown(f"MY TASKS ({len(tasks)})
", unsafe_allow_html=True)
if not tasks:
st.markdown("No tasks yet — create one or pick a preset
", unsafe_allow_html=True)
else:
for t in reversed(tasks):
sc = t.get("schedule","manual")
sc_col = {"manual":"#8888a8","hourly":"#c8a96e","daily":"#4ecdc4","weekly":"#6ee7f7"}.get(sc,"#8888a8")
dot = "#4ecdc4" if t.get("enabled",True) else "#e05555"
last = t.get("last_run","")[:16].replace("T"," ") if t.get("last_run") else "Never"
res = t.get("last_result","")
st.markdown(f"""
{t['name']}
{_sched_label(sc)}
{t['prompt'][:130]}{"…" if len(t['prompt'])>130 else ""}
Last run: {last} · {t.get('run_count',0)} runs
{f'
{res[:160]}{"…" if len(res)>160 else ""}
' if res else ""}
""", unsafe_allow_html=True)
ca, cb, cc = st.columns([3,1,1])
with ca:
if st.button("Run Now", key=f"run_{t['id']}", use_container_width=True):
with st.spinner(f"Running: {t['name']}…"):
_execute_task(t)
st.success("Done! Result saved to notes.")
st.rerun()
with cb:
lbl = "Pause" if t.get("enabled",True) else "Resume"
if st.button(lbl, key=f"tog_{t['id']}", use_container_width=True):
all_t = load_bg_tasks()
for x in all_t:
if x["id"] == t["id"]: x["enabled"] = not x.get("enabled",True)
save_bg_tasks(all_t)
st.rerun()
with cc:
if st.button("Delete", key=f"del_{t['id']}", use_container_width=True):
save_bg_tasks([x for x in load_bg_tasks() if x["id"] != t["id"]])
st.rerun()
# Logs
st.markdown("", unsafe_allow_html=True)
st.markdown("LOGS
", unsafe_allow_html=True)
logs = list(reversed(load_bg_logs()))[:8]
if not logs:
st.markdown("No logs yet.
", unsafe_allow_html=True)
else:
for log in logs:
sc = {"done":"#4ecdc4","running":"#c8a96e","error":"#e05555"}.get(log.get("status","done"),"#4ecdc4")
ts = log.get("ts","")[:16].replace("T"," ")
st.markdown(f"""
{log.get('status','?')}
{log.get('name','?')}
{ts}
{log.get('output','')[:280]}{"…" if len(log.get('output',''))>280 else ""}
""", unsafe_allow_html=True)
if st.button("Clear Logs", key="clr_logs"):
save_bg_logs([]); st.rerun()
# ════════════════ TAB 4 — TRAINING ════════════════════
with tab_training:
st.markdown("""
Training
Everything Cbae knows — browse, add, and remove memories
""", unsafe_allow_html=True)
st.markdown("", unsafe_allow_html=True)
try: total_vecs = pc_index.describe_index_stats().total_vector_count
except: total_vecs = "?"
chat_count = len([m for m in st.session_state.get("history",[]) if m["role"]=="user"])
notes_count = len(load_notes())
bg_count = len(load_bg_tasks())
st.markdown(f"""
{"".join(f'
' for v,c,lbl in [(total_vecs,"#c8a96e","Memories"),(chat_count,"#4ecdc4","Chat turns"),(notes_count,"#8888a8","Notes"),(bg_count,"#6ee7f7","Tasks")])}
""", unsafe_allow_html=True)
col_a, col_b = st.columns([1,1], gap="large")
with col_a:
for label, key, desc in [
("💬 Train from Chat", "tr_chat", "Every Q&A pair this session → Pinecone memory"),
("⬡ Train from Moltbook", "tr_mb", "Recent posts → stored as agent memories"),
]:
st.markdown(f"{label}
", unsafe_allow_html=True)
st.markdown(f"{desc}
", unsafe_allow_html=True)
if key == "tr_chat":
if st.button("Import Chat", use_container_width=True, key=key):
with st.spinner("Importing…"):
count, pairs = training_from_chat()
st.success(f"Stored {count} memories from {pairs} turns") if pairs else st.warning("No chat yet.")
else:
mb_lim = st.slider("Posts", 5, 50, 20, key="tr_mb_lim")
mb_srt = st.selectbox("Sort", ["hot","new","top"], key="tr_mb_srt", label_visibility="collapsed")
if st.button("Import Moltbook", use_container_width=True, key=key):
if not moltbook_api_key: st.error("Set Moltbook key in Settings.")
else:
with st.spinner("Fetching…"):
try:
data = mb_get_feed(sort=mb_srt, limit=mb_lim)
posts = data.get("posts") or data.get("data",{}).get("posts",[]) or []
entries = [("moltbook", f"@{p.get('author',{}).get('name','?')}: {p.get('title','')} {p.get('content','') or ''}"[:500]) for p in posts]
st.success(f"Stored {training_bulk_teach(entries)} memories!")
except Exception as e: st.error(f"Failed: {e}")
st.markdown("", unsafe_allow_html=True)
st.markdown("TEACH MANUALLY
", unsafe_allow_html=True)
mf = st.text_area("", height=80, placeholder="e.g. I'm building a SaaS with FastAPI + React", key="tr_manual", label_visibility="collapsed")
if st.button("Save to Memory", use_container_width=True, key="tr_msave"):
st.success(teach_knowledge(mf.strip())) if mf.strip() else st.warning("Enter something.")
st.markdown("", unsafe_allow_html=True)
st.markdown("BULK PASTE (one per line)
", unsafe_allow_html=True)
bf = st.text_area("", height=80, placeholder="I prefer Python\nMy timezone is IST\nI care about AI agents", key="tr_bulk", label_visibility="collapsed")
if st.button("Bulk Import", use_container_width=True, key="tr_bsave"):
lines = [l.strip() for l in bf.strip().splitlines() if l.strip()]
if not lines: st.warning("Nothing to import.")
else:
with st.spinner("Storing…"):
count = training_bulk_teach([("manual_bulk", l) for l in lines])
st.success(f"Stored {count} facts!")
st.markdown("", unsafe_allow_html=True)
if st.button("Export as JSONL", use_container_width=True, key="tr_exp"):
with st.spinner("Exporting…"):
jl = training_export_jsonl()
st.download_button("Download memory.jsonl", data=jl,
file_name=f"cbae_memory_{datetime.datetime.now().strftime('%Y%m%d_%H%M')}.jsonl",
mime="application/json", use_container_width=True, key="tr_dl")
with col_b:
st.markdown("MEMORY BROWSER
", unsafe_allow_html=True)
sq = st.text_input("", placeholder="Search memories…", key="tr_sq", label_visibility="collapsed")
tf = st.selectbox("", ["all","qa","knowledge","training","moltbook_post","person_profile"], key="tr_tf", label_visibility="collapsed")
if st.button("Search", use_container_width=True, key="tr_srch") or sq:
with st.spinner("Searching…"):
hits = training_fetch(sq or "knowledge memory", top_k=30, type_filter=tf)
st.markdown(f"{len(hits)} results
", unsafe_allow_html=True)
tc = {"qa":"#c8a96e","knowledge":"#4ecdc4","training":"#8888a8","moltbook_post":"#6ee7f7","person_profile":"#f7c86e"}
for h in hits:
mt = h.metadata.get("type","?")
src = h.metadata.get("source",h.metadata.get("author",h.metadata.get("q","?")))[:28]
txt = h.metadata.get("answer",h.metadata.get("text",h.metadata.get("facts","")))
col = tc.get(mt,"#55556e")
st.markdown(f"""
{mt}
{src}
{round(h.score,3)}
{txt[:220]}{"…" if len(txt)>220 else ""}
""", unsafe_allow_html=True)
if st.button("Delete", key=f"md_{h.id}"):
if training_delete(h.id): st.success("Deleted."); st.rerun()
else:
st.markdown("Search above to browse memories
", unsafe_allow_html=True)
# ════════════════ TAB 5 — SETTINGS ════════════════════
with tab_settings:
or_ok = bool(_cfg.get("openrouter_key",""))
pc_ok = bool(_cfg.get("pinecone_key",""))
mb_ok = bool(_cfg.get("moltbook_api_key",""))
st.markdown(f"""
Settings
Keys · model · profile
{"".join(f'
{lbl}
' for lbl,ok in [("OpenRouter",or_ok),("Pinecone",pc_ok),("Moltbook",mb_ok)])}
""", unsafe_allow_html=True)
st.markdown("", unsafe_allow_html=True)
cl, cr = st.columns(2, gap="large")
with cl:
st.markdown("API KEYS
", unsafe_allow_html=True)
new_or = st.text_input("OpenRouter Key", value=_cfg.get("openrouter_key",""), type="password", placeholder="sk-or-v1-…", key="cfg_or")
new_pc = st.text_input("Pinecone Key", value=_cfg.get("pinecone_key",""), type="password", placeholder="pcsk_…", key="cfg_pc")
new_mb = st.text_input("Moltbook Key", value=_cfg.get("moltbook_api_key",""), type="password", placeholder="mb_…", key="cfg_mb")
new_nm = st.text_input("Your Name", value=_cfg.get("your_name",""), placeholder="e.g. Sterlin", key="cfg_nm")
st.markdown("", unsafe_allow_html=True)
st.markdown("MODEL
", unsafe_allow_html=True)
MODELS = ["arcee-ai/trinity-large-preview:free","stepfun/step-3.5-flash:free",
"deepseek/deepseek-r1-0528:free","z-ai/glm-4.5-air:free","nvidia/nemotron-3-nano-30b-a3b:free"]
cur = _cfg.get("model", DEFAULT_MODEL)
try: midx = MODELS.index(cur)
except: midx = 0
new_mdl = st.selectbox("Model", MODELS, index=midx, key="cfg_mdl", label_visibility="collapsed")
new_agent = st.toggle("Agent Mode", value=st.session_state.get("agent_mode", True), key="cfg_agent")
st.session_state.agent_mode = new_agent
with cr:
st.markdown("PROFILE
", unsafe_allow_html=True)
p = load_profile()
new_about = st.text_area("About you", value=p.get("about", _cfg.get("about","")),
height=100, placeholder="Developer from Chennai…",
key="cfg_about", label_visibility="collapsed")
if p.get("facts"):
st.markdown("Learned facts
", unsafe_allow_html=True)
for fact in p["facts"][-5:]:
st.markdown(f"· {fact[:80]}
", unsafe_allow_html=True)
st.markdown("", unsafe_allow_html=True)
st.markdown("QUICK TEACH
", unsafe_allow_html=True)
teach_txt = st.text_area("", height=80, placeholder="e.g. I post in aithoughts about AI agents", key="cfg_teach", label_visibility="collapsed")
st.markdown("", unsafe_allow_html=True)
b1, b2, b3, _ = st.columns([2,1,1,3])
with b1:
if st.button("Save Settings", use_container_width=True, key="cfg_save"):
nc = {"openrouter_key":new_or,"pinecone_key":new_pc,"moltbook_api_key":new_mb,
"your_name":new_nm,"model":new_mdl,"about":new_about}
save_config(nc); _cfg.update(nc)
# Re-resolve so env vars still win over the newly-saved JSON values
openrouter_key = _resolve_key("OPENROUTER_API_KEY", new_or)
pinecone_key = _resolve_key("PINECONE_API_KEY", new_pc)
moltbook_api_key = _resolve_key("MOLTBOOK_API_KEY", new_mb)
your_name=new_nm; selected_model=new_mdl
p["about"]=new_about; save_profile(p)
if teach_txt.strip(): teach_knowledge(teach_txt.strip())
st.success("Saved!"); st.rerun()
with b2:
if st.button("Clear Chat", use_container_width=True, key="cfg_clr"):
st.session_state.messages=[]; st.session_state.history=[]; st.rerun()
with b3:
if st.button("Reset All", use_container_width=True, key="cfg_rst"):
if st.session_state.get("confirm_reset"):
for f in [NOTES_FILE, PROFILE_FILE]:
if os.path.exists(f): os.remove(f)
st.session_state.messages=[]; st.session_state.history=[]
st.session_state.confirm_reset=False; st.rerun()
else:
st.session_state.confirm_reset=True
st.warning("Click again to confirm")