Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import os, json, numpy as np | |
| from agent_core import AgentCore | |
| from graph_reasoner import GraphReasoner | |
| from context_graph import ContextGraph | |
| from graph_view import visualize_reasoned_graph | |
| from memory_store import MemoryStore | |
| # === Lightweight Setup === | |
| # Redirects model cache to ephemeral folder (won’t bloat storage) | |
| os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache" | |
| os.environ["HF_HOME"] = "/tmp/hf_home" | |
| # Memory stored in /tmp so it resets safely if needed | |
| MEMORY_PATH = "/tmp/memory.json" | |
| # === Initialize Core Systems === | |
| agent = AgentCore() | |
| reasoner = GraphReasoner() | |
| graph = ContextGraph() | |
| memory = MemoryStore(path=MEMORY_PATH) | |
| # === Core Chat Function === | |
| def process_input(user_input, history=[]): | |
| """Aventra OS conversation loop with memory and reasoning.""" | |
| # 1️⃣ Embed user input | |
| vector = agent.embed_text(user_input) | |
| # 2️⃣ Add memory safely | |
| memory.add_memory(user_input, vector) | |
| # 3️⃣ Reason on new input | |
| reasoning = reasoner.reason(user_input) | |
| response = f"[Aventra Response]: {reasoning}" | |
| # 4️⃣ Append to chat history | |
| history.append((user_input, response)) | |
| return history, history | |
| # === Visualize Memory === | |
| def visualize_memory(): | |
| """Inline visualization for memory graph.""" | |
| memories = memory.memories | |
| if not memories: | |
| return gr.HTML.update(value="<b>No memory data yet — start chatting first.</b>") | |
| relationships = [] | |
| for i, (text1, vec1) in enumerate(memories): | |
| for j, (text2, vec2) in enumerate(memories): | |
| if i != j: | |
| similarity = np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)) | |
| if similarity > 0.35: | |
| relationships.append((text1, text2, float(similarity))) | |
| # Generate and show visualization | |
| output_html = visualize_reasoned_graph(memories, relationships) | |
| print(f"✅ Visualization created at: {output_html}") | |
| if os.path.exists(output_html): | |
| with open(output_html, "r") as f: | |
| html_content = f.read() | |
| return gr.HTML.update(value=html_content) | |
| else: | |
| return gr.HTML.update(value="⚠️ Visualization failed — file not found.") | |
| # === Clear Memory === | |
| def clear_memory(): | |
| memory.memories = [] | |
| memory.save() | |
| return "🧹 Memory cleared successfully." | |
| # === Build Gradio Interface === | |
| with gr.Blocks(title="Aventra OS - Lightweight Mode") as demo: | |
| gr.Markdown("## 🧠 **Aventra OS — Memory Engine (Light Mode)**") | |
| chatbot = gr.Chatbot(label="Aventra Chat") | |
| msg = gr.Textbox(label="Type a message") | |
| send_btn = gr.Button("Send 🚀") | |
| visualize_btn = gr.Button("Visualize Memory 🕸️") | |
| clear_btn = gr.Button("Clear Memory 🧹") | |
| graph_html = gr.HTML(label="Memory Graph") | |
| send_btn.click(process_input, inputs=[msg, chatbot], outputs=[chatbot, chatbot]) | |
| visualize_btn.click(visualize_memory, outputs=[graph_html]) | |
| clear_btn.click(clear_memory, outputs=[]) | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |