Tpayne101 commited on
Commit
e9b7faf
·
verified ·
1 Parent(s): c482b64

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -41
app.py CHANGED
@@ -1,70 +1,81 @@
1
- import os
2
  import gradio as gr
3
- from sentence_transformers import SentenceTransformer
4
- from memory_store import MemoryStore
5
  from graph_reasoner import GraphReasoner
 
6
  from graph_view import visualize_reasoned_graph
 
7
 
8
- # --- Initialize Environment ---
9
  os.environ["TRANSFORMERS_CACHE"] = "/home/user/.cache"
10
- model = SentenceTransformer("all-MiniLM-L6-v2")
11
- memory = MemoryStore()
12
- reasoner = GraphReasoner()
13
 
14
- # --- Core Logic ---
15
- def process_input(user_input, history):
16
- if not user_input.strip():
17
- return history + [("User input empty.", "Please type something meaningful.")]
18
-
19
- # Encode input
20
- vector = model.encode(user_input)
21
 
22
- # Store memory
23
- save_status = memory.add_memory(user_input, vector)
 
 
 
 
 
 
24
 
25
- # Retrieve related memories
26
- related = memory.retrieve_relevant(vector)
27
- recall_text = "\n".join([f"• {r[0]} (score: {round(r[1], 4)})" for r in related])
28
 
29
- # Build response
30
- response = f"{save_status}\n\nHere’s what I recall that’s most relevant:\n{recall_text if related else 'No related context yet.'}"
31
  history.append((user_input, response))
32
- return history
33
 
 
34
  def visualize_memory():
35
- # Get all stored memories
36
  memories = memory.memories
37
-
38
- # Build relationships between memories (using simple semantic matching)
39
  relationships = []
 
40
  for i, (text1, vec1) in enumerate(memories):
41
  for j, (text2, vec2) in enumerate(memories):
42
  if i != j:
43
- # Cosine similarity threshold
44
  similarity = np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
45
- if similarity > 0.35: # threshold for linking related memories
46
  relationships.append((text1, text2, float(similarity)))
47
 
48
  # Generate visualization
49
  output_html = visualize_reasoned_graph(memories, relationships)
50
-
51
- # Print confirmation and show output path
52
  print(f"✅ Visualization created at: {output_html}")
 
 
53
  if os.path.exists(output_html):
54
- return f"✅ Memory graph generated successfully!\n\nYou can open it here: {output_html}"
 
 
55
  else:
56
- return "⚠️ Visualization failed — file not found."
57
- # --- UI Layout ---
58
- with gr.Blocks(title="Aventra OS Memory Engine") as demo:
59
- gr.Markdown("## 🧠 Aventra Memory Interface")
60
- chatbot = gr.Chatbot(label="Aventra Conversation", height=400)
61
- user_input = gr.Textbox(label="Message", placeholder="Type something like 'My name is Tirrek'...")
 
 
 
 
 
 
 
62
  send_btn = gr.Button("Send")
63
- graph_btn = gr.Button("Visualize Memory")
64
- clear_btn = gr.Button("Clear Chat")
 
65
 
66
- send_btn.click(process_input, inputs=[user_input, chatbot], outputs=[chatbot])
67
- graph_btn.click(visualize_memory, inputs=None, outputs=None)
68
- clear_btn.click(lambda: None, None, chatbot, queue=False)
69
 
70
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
1
  import gradio as gr
2
+ import os, json, numpy as np
3
+ from agent_core import AgentCore
4
  from graph_reasoner import GraphReasoner
5
+ from context_graph import ContextGraph
6
  from graph_view import visualize_reasoned_graph
7
+ from memory_store import MemoryStore
8
 
9
+ # === Setup ===
10
  os.environ["TRANSFORMERS_CACHE"] = "/home/user/.cache"
11
+ MEMORY_PATH = "/home/user/app/memory.json"
 
 
12
 
13
+ # === Initialize Core Systems ===
14
+ agent = AgentCore()
15
+ reasoner = GraphReasoner()
16
+ graph = ContextGraph()
17
+ memory = MemoryStore(path=MEMORY_PATH)
 
 
18
 
19
+ # === Core Chat Function ===
20
+ def process_input(user_input, history=[]):
21
+ """Main conversational and memory loop."""
22
+ # Step 1: Embed input
23
+ vector = agent.embed_text(user_input)
24
+
25
+ # Step 2: Save to memory
26
+ memory.add_memory(user_input, vector)
27
 
28
+ # Step 3: Reasoning
29
+ reasoning = reasoner.reason(user_input)
30
+ response = f"[Aventra Response]: {reasoning}"
31
 
32
+ # Step 4: Update history
 
33
  history.append((user_input, response))
34
+ return history, history
35
 
36
+ # === Visualize Memory Function ===
37
  def visualize_memory():
38
+ """Render memory graph visualization inline."""
39
  memories = memory.memories
 
 
40
  relationships = []
41
+
42
  for i, (text1, vec1) in enumerate(memories):
43
  for j, (text2, vec2) in enumerate(memories):
44
  if i != j:
 
45
  similarity = np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
46
+ if similarity > 0.35:
47
  relationships.append((text1, text2, float(similarity)))
48
 
49
  # Generate visualization
50
  output_html = visualize_reasoned_graph(memories, relationships)
 
 
51
  print(f"✅ Visualization created at: {output_html}")
52
+
53
+ # Return graph inline
54
  if os.path.exists(output_html):
55
+ with open(output_html, "r") as f:
56
+ html_content = f.read()
57
+ return gr.HTML.update(value=html_content)
58
  else:
59
+ return gr.HTML.update(value="⚠️ Visualization failed — file not found.")
60
+
61
+ # === Clear Memory ===
62
+ def clear_memory():
63
+ memory.memories = []
64
+ memory.save()
65
+ return "🧹 Memory cleared successfully."
66
+
67
+ # === Build Gradio Interface ===
68
+ with gr.Blocks(title="Aventra OS") as demo:
69
+ gr.Markdown("## 🧠 **Aventra OS — Context Graph Memory Engine**")
70
+ chatbot = gr.Chatbot(label="Aventra Chat")
71
+ msg = gr.Textbox(label="Type a message")
72
  send_btn = gr.Button("Send")
73
+ visualize_btn = gr.Button("Visualize Memory 🕸️")
74
+ clear_btn = gr.Button("Clear Memory 🧹")
75
+ graph_html = gr.HTML(label="Memory Graph")
76
 
77
+ send_btn.click(process_input, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
78
+ visualize_btn.click(visualize_memory, outputs=[graph_html])
79
+ clear_btn.click(clear_memory, outputs=[])
80
 
81
  demo.launch(server_name="0.0.0.0", server_port=7860)