Tpayne101 commited on
Commit
bc09f90
·
verified ·
1 Parent(s): 0b284c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -40
app.py CHANGED
@@ -1,41 +1,43 @@
1
- # app.py
2
- import os
3
  import gradio as gr
4
- from agent_core import AgentCore
5
-
6
- # ensure model cache stays inside the space quota
7
- os.environ.setdefault("TRANSFORMERS_CACHE", "/home/user/.cache")
8
-
9
- agent = AgentCore()
10
-
11
- INTRO = (
12
- "👋 **Welcome to Aventra OS (Phase 4)**\n\n"
13
- "- I remember important facts you tell me (with weighting + decay).\n"
14
- "- Ask questions and I’ll retrieve the most relevant memories.\n"
15
- "- Try:\n"
16
- " • *“My name is Tirrek. I run MotionBoys.”*\n"
17
- " *“My favorite sport is basketball.”*\n"
18
- " • *“What’s my brand?”* or *“What do you know about me?”*"
19
- )
20
-
21
- def chat_fn(message, history):
22
- # history is a list of (user, assistant). We only need `message`.
23
- reply = agent.handle(message)
24
- return reply
25
-
26
- with gr.Blocks(title="Aventra OS — Memory Core") as demo:
27
- gr.Markdown("# 🧠 Aventra OS — Memory Core (Phase 4)")
28
- gr.Markdown(INTRO)
29
-
30
- chat = gr.ChatInterface(
31
- fn=chat_fn,
32
- chatbot=gr.Chatbot(height=420),
33
- textbox=gr.Textbox(placeholder="Tell me a fact or ask a question…", lines=2, autofocus=True),
34
- submit_btn="Send",
35
- retry_btn=None,
36
- undo_btn=None,
37
- clear_btn="Clear",
38
- )
39
-
40
- if __name__ == "__main__":
41
- demo.launch()
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from sentence_transformers import SentenceTransformer
3
+ import numpy as np
4
+ from memory_store import MemoryStore
5
+ from graph_view import visualize_graph
6
+
7
+ # Initialize
8
+ model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
9
+ memory = MemoryStore()
10
+
11
+ def process_input(user_input, chat_history):
12
+ user_input = user_input.strip()
13
+
14
+ # visualization command
15
+ if user_input.lower() in ["visualize memory", "show graph", "see memory map"]:
16
+ memories = getattr(memory, "memories", [])
17
+ if not memories:
18
+ return chat_history + [[user_input, "No memories yet to visualize."]]
19
+ visualize_graph(memories)
20
+ return chat_history + [[user_input, "🧠 Memory graph generated. Check the output window."]]
21
+
22
+ # embed input
23
+ vector = model.encode(user_input)
24
+ save_status = memory.add_memory(user_input, vector)
25
+
26
+ # retrieve related
27
+ related = memory.retrieve_relevant(vector)
28
+ recall = "\n".join([f"• {txt} (score: {round(score,4)})" for txt, score in related])
29
+
30
+ response = f"{save_status}\nHere’s what I recall most related:\n{recall}"
31
+ chat_history.append([user_input, response])
32
+ return chat_history
33
+
34
+ with gr.Blocks() as demo:
35
+ gr.Markdown("# 🧠 Aventra Memory Interface")
36
+ chatbot = gr.Chatbot(height=500)
37
+ msg = gr.Textbox(label="Type your message...")
38
+ clear = gr.Button("Clear Chat")
39
+
40
+ msg.submit(process_input, [msg, chatbot], [chatbot])
41
+ clear.click(lambda: [], None, chatbot)
42
+
43
+ demo.launch()