broadfield-dev commited on
Commit
863794e
·
verified ·
1 Parent(s): 8ed4eec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -90
app.py CHANGED
@@ -1,91 +1,91 @@
1
- import base64
2
- from flask import Flask, render_template, request, jsonify, Response, stream_with_context
3
- from visumem_core import SectorMemory
4
- import ai_engine
5
- import json
6
-
7
- app = Flask(__name__)
8
-
9
- mem_history = SectorMemory("history")
10
-
11
-
12
- @app.route('/')
13
- def index():
14
- return render_template('index.html')
15
-
16
- @app.route('/visualize/<mem_type>')
17
- def visualize(mem_type):
18
- target = mem_history
19
-
20
- # 1. Generate Image
21
- img_bytes = target.to_image_bytes()
22
- b64 = base64.b64encode(img_bytes).decode('utf-8')
23
-
24
- # 2. Get Stats (Updated for RGB Core Compatibility)
25
- # The RGB core uses .count for items and .cursor_green for text size
26
- # It does not use .slot_cursor or .heap_cursor anymore.
27
- active_count = target.count
28
- text_usage = target.cursor_green
29
-
30
- return jsonify({
31
- "image": f"data:image/png;base64,{b64}",
32
- "stats": f"{active_count} Active Entries",
33
- "usage": f"{text_usage} Bytes Text"
34
- })
35
-
36
- @app.route('/inspect/<mem_type>')
37
- def inspect(mem_type):
38
- content = mem_history.dump_heap_content()
39
- return jsonify({"content": content})
40
-
41
- @app.route('/chat', methods=['POST'])
42
- def chat():
43
- user_msg = request.json.get('message')
44
- if not user_msg: return jsonify({"error": "Empty"}), 400
45
-
46
- def generate():
47
- # 1. VECTORIZE USER INPUT
48
- q_vec = ai_engine.get_embedding(user_msg)
49
-
50
- # B. Relevant Past History
51
- hist_hits = mem_history.search(q_vec, top_k=2)
52
- long_term_txt = "\n".join([f"[Memory]: {h['text']}" for h in hist_hits if h['score'] > 0.4])
53
-
54
- # C. Recent Conversation
55
- recent_msgs = mem_history.get_recent_entries(n=4)
56
- recent_txt = "\n".join(recent_msgs)
57
-
58
- # 3. BUILD PROMPT
59
- system_prompt = f"""You are a helpful AI Assistant with an evolving rule system and knowledge base.
60
-
61
- ### RELEVANT MEMORIES (Context from past):
62
- {long_term_txt}
63
-
64
- ### CURRENT CONVERSATION:
65
- {recent_txt}
66
- """
67
-
68
- messages = [
69
- {"role": "system", "content": system_prompt},
70
- {"role": "user", "content": user_msg}
71
- ]
72
-
73
- # 4. STREAM GENERATION
74
- full_response = ""
75
- for chunk in ai_engine.chat_stream(messages):
76
- full_response += chunk
77
- yield chunk
78
-
79
- # 5. BACKGROUND: WRITE & REFLECT
80
- log_entry = f"User: {user_msg}\nAI: {full_response}"
81
- mem_history.write_entry(log_entry, q_vec)
82
-
83
- return Response(stream_with_context(generate()), mimetype='text/plain')
84
-
85
- @app.route('/wipe', methods=['POST'])
86
- def wipe():
87
- mem_history.wipe()
88
- return jsonify({"success":True})
89
-
90
- if __name__ == '__main__':
91
  app.run(debug=True, port=5000)
 
1
+ import base64
2
+ from flask import Flask, render_template, request, jsonify, Response, stream_with_context
3
+ from vismem import SectorMemory
4
+ import ai_engine
5
+ import json
6
+
7
+ app = Flask(__name__)
8
+
9
+ mem_history = SectorMemory("history")
10
+
11
+
12
+ @app.route('/')
13
+ def index():
14
+ return render_template('index.html')
15
+
16
+ @app.route('/visualize/<mem_type>')
17
+ def visualize(mem_type):
18
+ target = mem_history
19
+
20
+ # 1. Generate Image
21
+ img_bytes = target.to_image_bytes()
22
+ b64 = base64.b64encode(img_bytes).decode('utf-8')
23
+
24
+ # 2. Get Stats (Updated for RGB Core Compatibility)
25
+ # The RGB core uses .count for items and .cursor_green for text size
26
+ # It does not use .slot_cursor or .heap_cursor anymore.
27
+ active_count = target.count
28
+ text_usage = target.cursor_green
29
+
30
+ return jsonify({
31
+ "image": f"data:image/png;base64,{b64}",
32
+ "stats": f"{active_count} Active Entries",
33
+ "usage": f"{text_usage} Bytes Text"
34
+ })
35
+
36
+ @app.route('/inspect/<mem_type>')
37
+ def inspect(mem_type):
38
+ content = mem_history.dump_heap_content()
39
+ return jsonify({"content": content})
40
+
41
+ @app.route('/chat', methods=['POST'])
42
+ def chat():
43
+ user_msg = request.json.get('message')
44
+ if not user_msg: return jsonify({"error": "Empty"}), 400
45
+
46
+ def generate():
47
+ # 1. VECTORIZE USER INPUT
48
+ q_vec = ai_engine.get_embedding(user_msg)
49
+
50
+ # B. Relevant Past History
51
+ hist_hits = mem_history.search(q_vec, top_k=2)
52
+ long_term_txt = "\n".join([f"[Memory]: {h['text']}" for h in hist_hits if h['score'] > 0.4])
53
+
54
+ # C. Recent Conversation
55
+ recent_msgs = mem_history.get_recent_entries(n=4)
56
+ recent_txt = "\n".join(recent_msgs)
57
+
58
+ # 3. BUILD PROMPT
59
+ system_prompt = f"""You are a helpful AI Assistant with an evolving rule system and knowledge base.
60
+
61
+ ### RELEVANT MEMORIES (Context from past):
62
+ {long_term_txt}
63
+
64
+ ### CURRENT CONVERSATION:
65
+ {recent_txt}
66
+ """
67
+
68
+ messages = [
69
+ {"role": "system", "content": system_prompt},
70
+ {"role": "user", "content": user_msg}
71
+ ]
72
+
73
+ # 4. STREAM GENERATION
74
+ full_response = ""
75
+ for chunk in ai_engine.chat_stream(messages):
76
+ full_response += chunk
77
+ yield chunk
78
+
79
+ # 5. BACKGROUND: WRITE & REFLECT
80
+ log_entry = f"User: {user_msg}\nAI: {full_response}"
81
+ mem_history.write_entry(log_entry, q_vec)
82
+
83
+ return Response(stream_with_context(generate()), mimetype='text/plain')
84
+
85
+ @app.route('/wipe', methods=['POST'])
86
+ def wipe():
87
+ mem_history.wipe()
88
+ return jsonify({"success":True})
89
+
90
+ if __name__ == '__main__':
91
  app.run(debug=True, port=5000)