File size: 3,057 Bytes
69bdd76
e9b7faf
 
f2fce09
e9b7faf
f2fce09
e9b7faf
bc09f90
1bbe80f
 
 
 
 
 
 
bc09f90
e9b7faf
 
 
 
 
f2fce09
e9b7faf
 
1bbe80f
 
e9b7faf
1bbe80f
 
e9b7faf
f2fce09
1bbe80f
e9b7faf
 
f2fce09
1bbe80f
f2fce09
e9b7faf
f2fce09
1bbe80f
f2fce09
1bbe80f
c482b64
1bbe80f
 
e9b7faf
1bbe80f
c482b64
 
 
 
e9b7faf
c482b64
 
1bbe80f
c482b64
4f3cd2d
e9b7faf
4f3cd2d
e9b7faf
 
 
4f3cd2d
e9b7faf
 
 
 
 
 
 
 
 
1bbe80f
 
 
e9b7faf
 
1bbe80f
e9b7faf
 
 
f2fce09
e9b7faf
 
 
f2fce09
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import gradio as gr
import os, json, numpy as np
from agent_core import AgentCore
from graph_reasoner import GraphReasoner
from context_graph import ContextGraph
from graph_view import visualize_reasoned_graph
from memory_store import MemoryStore

# === Lightweight Setup ===
# Redirects model cache to ephemeral folder (won’t bloat storage)
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
os.environ["HF_HOME"] = "/tmp/hf_home"

# Memory stored in /tmp so it resets safely if needed
MEMORY_PATH = "/tmp/memory.json"

# === Initialize Core Systems ===
agent = AgentCore()
reasoner = GraphReasoner()
graph = ContextGraph()
memory = MemoryStore(path=MEMORY_PATH)

# === Core Chat Function ===
def process_input(user_input, history=[]):
    """Aventra OS conversation loop with memory and reasoning."""
    # 1️⃣ Embed user input
    vector = agent.embed_text(user_input)

    # 2️⃣ Add memory safely
    memory.add_memory(user_input, vector)

    # 3️⃣ Reason on new input
    reasoning = reasoner.reason(user_input)
    response = f"[Aventra Response]: {reasoning}"

    # 4️⃣ Append to chat history
    history.append((user_input, response))
    return history, history

# === Visualize Memory ===
def visualize_memory():
    """Inline visualization for memory graph."""
    memories = memory.memories
    if not memories:
        return gr.HTML.update(value="<b>No memory data yet — start chatting first.</b>")

    relationships = []
    for i, (text1, vec1) in enumerate(memories):
        for j, (text2, vec2) in enumerate(memories):
            if i != j:
                similarity = np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
                if similarity > 0.35:
                    relationships.append((text1, text2, float(similarity)))

    # Generate and show visualization
    output_html = visualize_reasoned_graph(memories, relationships)
    print(f"✅ Visualization created at: {output_html}")

    if os.path.exists(output_html):
        with open(output_html, "r") as f:
            html_content = f.read()
        return gr.HTML.update(value=html_content)
    else:
        return gr.HTML.update(value="⚠️ Visualization failed — file not found.")

# === Clear Memory ===
def clear_memory():
    memory.memories = []
    memory.save()
    return "🧹 Memory cleared successfully."

# === Build Gradio Interface ===
with gr.Blocks(title="Aventra OS - Lightweight Mode") as demo:
    gr.Markdown("## 🧠 **Aventra OS — Memory Engine (Light Mode)**")

    chatbot = gr.Chatbot(label="Aventra Chat")
    msg = gr.Textbox(label="Type a message")
    send_btn = gr.Button("Send 🚀")
    visualize_btn = gr.Button("Visualize Memory 🕸️")
    clear_btn = gr.Button("Clear Memory 🧹")
    graph_html = gr.HTML(label="Memory Graph")

    send_btn.click(process_input, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
    visualize_btn.click(visualize_memory, outputs=[graph_html])
    clear_btn.click(clear_memory, outputs=[])

demo.launch(server_name="0.0.0.0", server_port=7860)