antonypamo commited on
Commit
5de7515
·
verified ·
1 Parent(s): 16081ba

Upload 58 files (#2)

Browse files

- Upload 58 files (3b9d7fb005e5301ba21d8f6d2fba9c8159d7e1f6)

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. core/SAVANT_CORE/README.md +13 -0
  2. core/SAVANT_CORE/SAVANT_CORE/README.md +13 -0
  3. core/SAVANT_CORE/SAVANT_CORE/core/__init__.py +3 -0
  4. core/SAVANT_CORE/SAVANT_CORE/core/api_helpers.py +51 -0
  5. core/SAVANT_CORE/SAVANT_CORE/core/engine.py +327 -0
  6. core/SAVANT_CORE/SAVANT_CORE/core/mappings.py +90 -0
  7. core/SAVANT_CORE/SAVANT_CORE/core/memory.py +38 -0
  8. core/SAVANT_CORE/SAVANT_CORE/core/music.py +76 -0
  9. core/SAVANT_CORE/SAVANT_CORE/core/resonance.py +59 -0
  10. core/SAVANT_CORE/SAVANT_CORE/core/self_improvement.py +143 -0
  11. core/SAVANT_CORE/SAVANT_CORE/core/trainer.py +186 -0
  12. core/SAVANT_CORE/SAVANT_CORE/requirements.txt +10 -0
  13. core/SAVANT_CORE/SAVANT_CORE/run_demo.py +104 -0
  14. core/SAVANT_CORE/SAVANT_CORE/run_savant.py +216 -0
  15. core/SAVANT_CORE/SAVANT_memory.jsonl +73 -0
  16. core/SAVANT_CORE/checkpoints/ckpt_epoch_1.json +14 -0
  17. core/SAVANT_CORE/checkpoints/ckpt_epoch_2.json +14 -0
  18. core/SAVANT_CORE/checkpoints/ckpt_epoch_3.json +14 -0
  19. core/SAVANT_CORE/checkpoints/ckpt_epoch_4.json +14 -0
  20. core/SAVANT_CORE/checkpoints/ckpt_epoch_5.json +14 -0
  21. core/SAVANT_CORE/core/Savant-RRF-Estado-Perfil-Antony.json +22 -0
  22. core/SAVANT_CORE/core/Savant-RRF-Memoria.pkl +3 -0
  23. core/SAVANT_CORE/core/__init__.py +3 -0
  24. core/SAVANT_CORE/core/__pycache__/__init__.cpython-312.pyc +0 -0
  25. core/SAVANT_CORE/core/__pycache__/api_helpers.cpython-312.pyc +0 -0
  26. core/SAVANT_CORE/core/__pycache__/engine.cpython-312.pyc +0 -0
  27. core/SAVANT_CORE/core/__pycache__/mappings.cpython-312.pyc +0 -0
  28. core/SAVANT_CORE/core/__pycache__/memory.cpython-312.pyc +0 -0
  29. core/SAVANT_CORE/core/__pycache__/music.cpython-312.pyc +0 -0
  30. core/SAVANT_CORE/core/__pycache__/resonance.cpython-312.pyc +0 -0
  31. core/SAVANT_CORE/core/__pycache__/self_improvement.cpython-312.pyc +0 -0
  32. core/SAVANT_CORE/core/__pycache__/trainer.cpython-312.pyc +0 -0
  33. core/SAVANT_CORE/core/api_helpers.py +52 -0
  34. core/SAVANT_CORE/core/datasets/savant_rrf_dataset.jsonl +0 -0
  35. core/SAVANT_CORE/core/engine.py +22 -0
  36. core/SAVANT_CORE/core/mappings.py +64 -0
  37. core/SAVANT_CORE/core/memory.py +38 -0
  38. core/SAVANT_CORE/core/music.py +23 -0
  39. core/SAVANT_CORE/core/resonance.py +31 -0
  40. core/SAVANT_CORE/core/self_improvement.py +73 -0
  41. core/SAVANT_CORE/core/trainer.py +175 -0
  42. core/SAVANT_CORE/requirements.txt +9 -0
  43. core/SAVANT_CORE/run_demo.py +91 -0
  44. core/SAVANT_CORE/run_savant.py +143 -0
  45. core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/config.json +45 -0
  46. core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/generation_config.json +6 -0
  47. core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/model.safetensors +3 -0
  48. core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/optimizer.pt +3 -0
  49. core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/rng_state.pth +3 -0
  50. core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/scheduler.pt +3 -0
core/SAVANT_CORE/README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ SAVANT_CORE
3
+ ===========
4
+ Core funcional del motor Savant Simbiótico RRF (scaffold).
5
+
6
+ Cómo usar:
7
+ 1. Copia la carpeta SAVANT_CORE a tu Drive o a tu repo.
8
+ 2. Instala dependencias: pip install -r SAVANT_CORE/requirements.txt
9
+ 3. Ejecuta demo: python SAVANT_CORE/run_demo.py
10
+
11
+ Contenido:
12
+ - core/: módulos engine, memory, mappings, resonance, music, self_improvement, trainer
13
+ - run_demo.py: demo CLI simple
core/SAVANT_CORE/SAVANT_CORE/README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ SAVANT_CORE
3
+ ===========
4
+ Core funcional del motor Savant Simbiótico RRF (scaffold).
5
+
6
+ Cómo usar:
7
+ 1. Copia la carpeta SAVANT_CORE a tu Drive o a tu repo.
8
+ 2. Instala dependencias: pip install -r SAVANT_CORE/requirements.txt
9
+ 3. Ejecuta demo: python SAVANT_CORE/run_demo.py
10
+
11
+ Contenido:
12
+ - core/: módulos engine, memory, mappings, resonance, music, self_improvement, trainer, api_helpers
13
+ - run_demo.py: demo CLI simple
core/SAVANT_CORE/SAVANT_CORE/core/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+
2
+ # SAVANT_CORE - núcleo del motor Savant Simbiótico RRF
3
+ from .engine import SavantEngine
core/SAVANT_CORE/SAVANT_CORE/core/api_helpers.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ api_helpers.py
4
+ Funciones helper para integrar con una UI (Gradio/Flask).
5
+ Provee: chat_refine(text), map_text(text), music_from_text(text)
6
+ """ # Added closing triple quotes
7
+ from .mappings import IcosaMap, DodecaMap
8
+ from .resonance import ResonanceSimulator
9
+ from .music import MusicAdapter
10
+ from .memory import MemoryStore
11
+ from .self_improvement import SelfImprover
12
+
13
+ # instantiate shared small objects (lightweight)
14
+ _mem = MemoryStore()
15
+ _icosa = IcosaMap()
16
+ _dodeca = DodecaMap()
17
+ _res = ResonanceSimulator()
18
+ _music = MusicAdapter()
19
+
20
+ def map_text(text):
21
+ node = _icosa.closest_node(text)
22
+ _mem.add({"type":"map", "query": text, "node": node})
23
+ return node
24
+
25
+ def base_resonance(text):
26
+ r = _res.simulate(text)
27
+ _mem.add({"type":"resonance_query", "query": text, "result": r["summary"]})
28
+ return r
29
+
30
+ def music_from_text(text):
31
+ seq = _music.adapt_text_to_music(text)
32
+ _mem.add({"type":"music", "query": text, "seq_len": len(seq)})
33
+ return seq
34
+
35
+ def chat_refine(user_text, base_model_output, self_improver=None):
36
+ """
37
+ Refinador simbiótico: toma output bruto y aplica transformaciones
38
+ basadas en current self_improver params (if provided).
39
+ Es una función determinista/simbolic toy but extensible.
40
+ """ # Added closing triple quotes
41
+ refined = base_model_output
42
+ if self_improver:
43
+ params = self_improver.params
44
+ if params.get("resonance_scale",1.0) > 1.05:
45
+ # toy: append resonance note
46
+ refined = refined + " [SAVANT-RES: emphasis applied]"
47
+ if params.get("music_influence",0.0) > 0.6:
48
+ refined = refined + " [SAVANT-MUSIC: melodic hint]"
49
+ # store
50
+ _mem.add({"type":"refine", "input": user_text, "before": base_model_output, "after": refined})
51
+ return refined
core/SAVANT_CORE/SAVANT_CORE/core/engine.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ engine.py
4
+ Orquestador principal del motor Savant Simbiótico RRF.
5
+ Expone:
6
+ - handle_query(text): detecta intención (map/resonance/music/chat) y responde
7
+ - access to SimpleTrainer, SelfImprover, MemoryStore for external control
8
+ """
9
+ import time # Import time
10
+ from .mappings import IcosaMap, DodecaMap
11
+ from .resonance import ResonanceSimulator
12
+ from .music import MusicAdapter
13
+ from .memory import MemoryStore
14
+ from .self_improvement import SelfImprover
15
+ # from .trainer import SimpleTrainer # Avoid circular import, trainer can be instantiated externally
16
+ from .api_helpers import chat_refine
17
+ import os # Import os
18
+ import pandas as pd # Import pandas
19
+ import json # Import json
20
+ import pickle # Import pickle
21
+
22
+ class SavantEngine:
23
+ def __init__(self, structured_data_paths=None):
24
+ self.memory = MemoryStore("SAVANT_memory.jsonl")
25
+ # Load structured data if paths are provided
26
+ self.structured_data = {}
27
+ if structured_data_paths:
28
+ print("Engine: Loading structured data...")
29
+ try:
30
+ self.structured_data['equations'] = self._load_json_data(structured_data_paths.get('equations'))
31
+ nodes_raw = self._load_json_data(structured_data_paths.get('icosahedron_nodes'))
32
+ self.structured_data['icosahedron_nodes'] = nodes_raw.get('nodes', []) if isinstance(nodes_raw, dict) else []
33
+ self.structured_data['frequencies'] = self._load_csv_data(structured_data_paths.get('frequencies'))
34
+ self.structured_data['constants'] = self._load_csv_data(structured_data_paths.get('constants'))
35
+
36
+ print("Engine loaded structured data: Equations={}, Nodes={}, Frequencies={}, Constants={}".format(
37
+ len(self.structured_data['equations']) if self.structured_data['equations'] else 0,
38
+ len(self.structured_data['icosahedron_nodes']),
39
+ len(self.structured_data['frequencies']),
40
+ len(self.structured_data['constants'])
41
+ ))
42
+ except Exception as e:
43
+ print(f"Engine: Error loading structured data: {e}")
44
+ self.structured_data = {} # Reset if loading fails
45
+
46
+
47
+ # Instantiate components, passing relevant structured data
48
+ self.icosa = IcosaMap(node_data=self.structured_data.get('icosahedron_nodes')) # Pass node data
49
+ self.dodeca = DodecaMap() # No dodecahedron data provided in list
50
+ self.resonator = ResonanceSimulator(frequencies_data=self.structured_data.get('frequencies'), constants_data=self.structured_data.get('constants')) # Pass freq/const data
51
+ self.music = MusicAdapter(frequencies_data=self.structured_data.get('frequencies')) # Pass frequencies data
52
+ self.self_improver = SelfImprover(self.memory, structured_data=self.structured_data) # Pass structured data to SelfImprover
53
+
54
+
55
+ self._interaction_count = 0 # Initialize interaction count for self-improvement trigger
56
+
57
+
58
+ # Helper methods for loading data within the Engine (copied from Trainer for self-containment)
59
+ def _load_json_data(self, file_path):
60
+ """Loads data from a JSON file."""
61
+ if not file_path or not os.path.exists(file_path):
62
+ # print(f"JSON file not found or path not provided: {file_path}") # Suppress not found for optional files
63
+ return None
64
+ try:
65
+ with open(file_path, "r", encoding="utf-8") as f:
66
+ data = json.load(f)
67
+ # print(f"Successfully loaded JSON data from {file_path}") # Suppress success for cleaner output
68
+ return data
69
+ except json.JSONDecodeError as e:
70
+ print(f"Error decoding JSON from {file_path}: {e}")
71
+ return None
72
+ except Exception as e:
73
+ print(f"An unexpected error occurred while loading JSON data: {e}")
74
+ return None
75
+
76
+ def _load_csv_data(self, file_path):
77
+ """Loads data from a CSV file using pandas."""
78
+ if not file_path or not os.path.exists(file_path):
79
+ # print(f"CSV file not found or path not provided: {file_path}") # Suppress not found for optional files
80
+ return []
81
+ try:
82
+ df = pd.read_csv(file_path)
83
+ # print(f"Successfully loaded CSV data from {file_path}") # Suppress success for cleaner output
84
+ return df.to_dict(orient='records')
85
+ except Exception as e:
86
+ print(f"An error occurred while loading CSV data from {file_path}: {e}")
87
+ return []
88
+
89
+
90
+ def _classify(self, text):
91
+ t = text.lower()
92
+ # Enhanced classification based on structured data keywords and patterns
93
+ if any(k in t for k in ("equation", "ecuacion", "hamiltoniano", "dirac", "formula", "formulae", "formulas")): # Added formula variations
94
+ return "equation_query"
95
+ if any(k in t for k in ("node", "nodo", "icosahedron", "dodecahedron", "poly", "vertex", "point", "map")): # Added map keyword to node query
96
+ # Check for patterns like "node X" where X is a number
97
+ words = t.split()
98
+ if len(words) > 1 and words[-1].isdigit() and words[-2] in ("node", "nodo"):
99
+ return "node_query"
100
+ return "node_query"
101
+ if any(k in t for k in ("frecuen", "freq", "music", "nota", "melod", "tono", "pitch", "scale", "musical", "sound", "audio")): # Added sound, audio
102
+ return "music_resonance" # Combine music and resonance intent for simplicity here
103
+ if any(k in t for k in ("constant", "constante", "valor", "unidad", "define", "what is the value of")): # Added "what is the value of"
104
+ return "constant_query"
105
+ if any(k in t for k in ("resonance", "resonar", "resonant", "vibration", "oscilla")): # Specific keywords for resonance without music
106
+ return "resonance_only"
107
+
108
+ # Existing classifications (kept as fallbacks or for broader terms)
109
+ # Removed redundant 'reson' and 'sinton' mapping to music_resonance as specific resonance_only added
110
+ if any(k in t for k in ("chat", "hola", "qué", "como", "explica", "tell me", "what is", "describe", "info", "information")): # Added info, information
111
+ return "chat"
112
+ return "chat" # Default to chat
113
+
114
+
115
+ def handle_query(self, text, base_model_output=None):
116
+ kind = self._classify(text)
117
+
118
+ # Handle query types based on structured data
119
+ if kind == "equation_query":
120
+ relevant_eqs = []
121
+ if self.structured_data.get('equations'):
122
+ # Find equations related to the query (more robust keyword matching)
123
+ query_words = text.lower().split()
124
+ relevant_eqs = [eq for eq in self.structured_data['equations'] if any(word in eq.get('nombre', '').lower() or word in eq.get('descripcion', '').lower() or any(comp.lower() in word for comp in eq.get('componentes', [])) for word in query_words)]
125
+
126
+ if relevant_eqs:
127
+ # Provide information about found equations
128
+ response_parts = ["Based on the RRF Equations data, I found the following relevant equations:"]
129
+ for eq in relevant_eqs[:3]: # Limit to first 3 for brevity
130
+ response_parts.append(f"- '{eq.get('nombre', 'N/A')}' ({eq.get('tipo', 'Equation')}): {eq.get('ecuacion', 'N/A')} (Components: {', '.join(eq.get('componentes', []))})")
131
+ if len(relevant_eqs) > 3:
132
+ response_parts.append("...")
133
+ response = "\n".join(response_parts)
134
+ self._log_interaction(text, base_model_output, response, type="equation_query")
135
+ return {"type": "equation_query", "query": text, "result": relevant_eqs, "response": response}
136
+ else:
137
+ response = "I couldn't find any relevant equations in the loaded data for that query."
138
+ self._log_interaction(text, base_model_output, response, type="equation_query_not_found")
139
+ return {"type": "equation_query", "query": text, "result": [], "response": response}
140
+
141
+
142
+ if kind == "node_query":
143
+ relevant_nodes = []
144
+ if self.structured_data.get('icosahedron_nodes'):
145
+ query_words = text.lower().split()
146
+ # Try to find by ID first if query contains a number
147
+ try:
148
+ node_id = int(query_words[-1]) if query_words and query_words[-1].isdigit() else None
149
+ if node_id is not None:
150
+ relevant_nodes = [node for node in self.structured_data['icosahedron_nodes'] if node.get('id') == node_id]
151
+ except (ValueError, IndexError):
152
+ pass # Not a number query
153
+
154
+ # If not found by ID or not a number query, search by keyword in description/name
155
+ if not relevant_nodes:
156
+ relevant_nodes = [node for node in self.structured_data['icosahedron_nodes'] if any(word in node.get('description', '').lower() or word in node.get('name', '').lower() for word in query_words)]
157
+
158
+ if relevant_nodes:
159
+ response_parts = ["Based on the Icosahedron Nodes data, I found the following relevant nodes:"]
160
+ for node in relevant_nodes[:3]: # Limit to first 3
161
+ response_parts.append(f"- Node {node.get('id', 'N/A')}: {node.get('description', node.get('name', 'No description'))} (Coords: ({node.get('x', 'N/A')}, {node.get('y', 'N/A')}, {node.get('z', 'N/A')}))") # Added N/A checks
162
+ if len(relevant_nodes) > 3:
163
+ response_parts.append("...")
164
+ response = "\n".join(response_parts)
165
+ self._log_interaction(text, base_model_output, response, type="node_query")
166
+ return {"type": "node_query", "query": text, "result": relevant_nodes, "response": response}
167
+ else:
168
+ response = "I couldn't find any relevant nodes in the loaded data for that query."
169
+ self._log_interaction(text, base_model_output, response, type="node_query_not_found")
170
+ return {"type": "node_query", "query": text, "result": [], "response": response}
171
+
172
+ if kind == "music_resonance":
173
+ # Can still trigger resonance simulation and music adaptation
174
+ # Enhance response with information from frequencies/constants if relevant keywords are used
175
+ response_parts = []
176
+ if self.structured_data.get('frequencies') and any(k in text.lower() for k in ("frecuen", "freq", "nota", "pitch", "scale", "musical", "sound", "audio")):
177
+ query_words = text.lower().split()
178
+ relevant_freqs = [f for f in self.structured_data['frequencies'] if any(word in f.get('note', '').lower() or word in f.get('role', '').lower() for word in query_words)]
179
+ if relevant_freqs:
180
+ response_parts.append("Based on the Frequencies data, I found:")
181
+ for freq in relevant_freqs[:3]:
182
+ response_parts.append(f"- Note: {freq.get('note', 'N/A')}, Frequency: {freq.get('frequency', 'N/A')} Hz, Role: {freq.get('role', 'N/A')}") # Added N/A checks
183
+ if len(relevant_freqs) > 3: response_parts.append("...")
184
+
185
+ if self.structured_data.get('constants') and any(k in text.lower() for k in ("constant", "constante")):
186
+ query_words = text.lower().split()
187
+ relevant_constants = [c for c in self.structured_data['constants'] if any(word in c.get('name', '').lower() for word in query_words)]
188
+ if relevant_constants:
189
+ response_parts.append("Based on the Constants data, I found:")
190
+ for const in relevant_constants[:3]:
191
+ response_parts.append(f"- Constant: {const.get('name', 'N/A')}, Value: {const.get('value', 'N/A')}, Units: {const.get('units', 'N/A')}") # Added N/A checks
192
+ if len(relevant_constants) > 3: response_parts.append("...")
193
+
194
+ # Always run resonance simulation and music adaptation for this type
195
+ r = self.resonator.simulate(text)
196
+ seq = self.music.adapt_text_to_music(text)
197
+
198
+ response_parts.append(f"Resonance simulation summary: Dominant Frequency={r['summary'].get('dom_freq', 0.0):.4f} Hz, Max Power={r['summary'].get('max_power', 0.0):.4f}.") # Added default values
199
+ response_parts.append(f"Adapted to music sequence (first 5 notes: pitch, duration): {seq[:5]}...")
200
+
201
+ response = "\n".join(response_parts) if response_parts else "Processing music and resonance query..."
202
+ self._log_interaction(text, base_model_output, response, type="music_resonance")
203
+ return {"type":"music_resonance","query":text,"resonance_result":r,"music_result":seq, "response": response}
204
+
205
+ if kind == "resonance_only": # New handler for resonance-only queries
206
+ # Can still trigger resonance simulation
207
+ response_parts = []
208
+ if self.structured_data.get('constants') and any(k in text.lower() for k in ("constant", "constante")):
209
+ query_words = text.lower().split()
210
+ relevant_constants = [c for c in self.structured_data['constants'] if any(word in c.get('name', '').lower() for word in query_words)]
211
+ if relevant_constants:
212
+ response_parts.append("Based on the Constants data, I found:")
213
+ for const in relevant_constants[:3]:
214
+ response_parts.append(f"- Constant: {const.get('name', 'N/A')}, Value: {const.get('value', 'N/A')}, Units: {const.get('units', 'N/A')}") # Added N/A checks
215
+ if len(relevant_constants) > 3: response_parts.append("...")
216
+
217
+ r = self.resonator.simulate(text)
218
+ response_parts.append(f"Resonance simulation summary: Dominant Frequency={r['summary'].get('dom_freq', 0.0):.4f} Hz, Max Power={r['summary'].get('max_power', 0.0):.4f}.") # Added default values
219
+
220
+ response = "\n".join(response_parts) if response_parts else "Processing resonance query..."
221
+ self._log_interaction(text, base_model_output, response, type="resonance_only")
222
+ return {"type":"resonance_only","query":text,"resonance_result":r, "response": response}
223
+
224
+
225
+ if kind == "constant_query":
226
+ relevant_constants = []
227
+ if self.structured_data.get('constants'):
228
+ query_words = text.lower().split()
229
+ relevant_constants = [c for c in self.structured_data['constants'] if any(word in c.get('name', '').lower() or word in c.get('units', '').lower() for word in query_words)]
230
+
231
+ if relevant_constants:
232
+ response_parts = ["Based on the RRF Constants data, I found the following relevant constants:"]
233
+ for const in relevant_constants[:3]:
234
+ response_parts.append(f"- Name: {const.get('name', 'N/A')}, Value: {const.get('value', 'N/A')}, Units: {const.get('units', 'N/A')}") # Added N/A checks
235
+ if len(relevant_constants) > 3: response_parts.append("...")
236
+ response = "\n".join(response_parts)
237
+ self._log_interaction(text, base_model_output, response, type="constant_query")
238
+ return {"type": "constant_query", "query": text, "result": relevant_constants, "response": response}
239
+ else:
240
+ response = "I couldn't find any relevant constants in the loaded data for that query."
241
+ self._log_interaction(text, base_model_output, response, type="constant_query_not_found")
242
+ return {"type": "constant_query", "query": text, "result": [], "response": response}
243
+
244
+
245
+ if kind == "map":
246
+ # Use icosahedron_nodes data in mapping (already done in IcosaMap)
247
+ node_label = self.icosa.closest_node(text)
248
+ response = f"Mapping query '{text}' to closest node: {node_label}"
249
+ # If we have node data, try to find details about the mapped node
250
+ if self.structured_data.get('icosahedron_nodes'):
251
+ # Assuming node_label is the description or name from node_data used for embedding
252
+ # A more robust mapping is needed here to link label back to original node dict by ID
253
+ # For now, let's just find the node with a matching description/name if possible
254
+ mapped_node_data = next((node for node in self.structured_data['icosahedron_nodes'] if node.get('description', '').lower() == node_label.lower() or node.get('name', '').lower() == node_label.lower()), None)
255
+ if mapped_node_data:
256
+ response += f" (ID: {mapped_node_data.get('id', 'N/A')}, Coords: ({mapped_node_data.get('x', 'N/A')}, {mapped_node_data.get('y', 'N/A')}, {mapped_node_data.get('z', 'N/A')}))" # Added N/A checks
257
+
258
+
259
+ self._log_interaction(text, base_model_output, response, type="map")
260
+ return {"type":"map","query":text,"node":node_label, "response": response}
261
+
262
+ # chat fallback: if base_model_output provided, refine it using self_improver
263
+ if kind == "chat":
264
+ if base_model_output is None:
265
+ # default echo
266
+ base = "Echo: " + text
267
+ else:
268
+ base = base_model_output
269
+
270
+ refined = chat_refine(text, base, self_improver=self.self_improver)
271
+ response = refined # Use refined output as the main response for chat
272
+ self._log_interaction(text, base_model_output, refined, type="chat_interaction") # Log chat interaction
273
+
274
+ return {"type":"chat","query":text,"base":base,"refined":refined, "response": response}
275
+
276
+ # Fallback for unhandled types (shouldn't be reached with current classify)
277
+ response = "I'm not sure how to handle that query based on the available data and functions."
278
+ self._log_interaction(text, base_model_output, response, type="unhandled_query")
279
+ return {"type": "unhandled", "query": text, "response": response}
280
+
281
+
282
+ def _log_interaction(self, user_input, base_output, final_output, type="interaction"):
283
+ """Logs interaction details to memory and triggers self-improvement if needed."""
284
+ interaction_record = {
285
+ "type": type, # Use the specified type (e.g., chat_interaction, equation_query)
286
+ "user_input": user_input,
287
+ "base_model_output": base_output, # Might be None for non-chat types
288
+ "final_output": final_output, # The response generated by handle_query
289
+ "_ts": time.time() # Add timestamp
290
+ }
291
+ self.memory.add(interaction_record)
292
+
293
+ # Periodically trigger self-improvement (e.g., every 10 interactions)
294
+ self._interaction_count = getattr(self, '_interaction_count', 0) + 1
295
+ if self._interaction_count % 10 == 0:
296
+ print("SAVANT: Triggering self-improvement cycle...")
297
+ try:
298
+ proposal = self.self_improver.propose()
299
+ accepted, metric = self.self_improver.evaluate_and_apply(proposal)
300
+ print(f"SAVANT: Self-improvement proposal accepted: {accepted}, New metric: {metric}")
301
+ self.memory.add({
302
+ "type": "self_improvement_triggered",
303
+ "proposal": proposal,
304
+ "accepted": accepted,
305
+ "metric": metric,
306
+ "_ts": time.time()
307
+ })
308
+ except Exception as si_error:
309
+ # Log the error and continue
310
+ error_message = f"Error during self-improvement: {si_error}"
311
+ print(f"SAVANT: {error_message}")
312
+ self.memory.add({
313
+ "type": "self_improvement_error",
314
+ "error": error_message,
315
+ "_ts": time.time()
316
+ })
317
+
318
+
319
+ # trainer helpers (these are now called externally via SimpleTrainer instance)
320
+ # def run_training_epochs(self, stimuli, epochs=3):
321
+ # return self.trainer.run_epochs(stimuli, epochs)
322
+
323
+ def propose_improvement(self):
324
+ return self.self_improver.propose()
325
+
326
+ def apply_improvement(self, proposal):
327
+ return self.self_improver.evaluate_and_apply(proposal)
core/SAVANT_CORE/SAVANT_CORE/core/mappings.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ mappings.py
4
+ Mapeo semántico a nodos icosa/dodeca usando embeddings ligeros.
5
+ Provee: IcosaMap, DodecaMap con función closest_node(text).
6
+ Can potentially use loaded icosahedron node data.
7
+ """ # Added closing triple quotes
8
+ import math, os
9
+ try:
10
+ from sentence_transformers import SentenceTransformer, util
11
+ _EMBED_AVAILABLE = True
12
+ except Exception as e:
13
+ _EMBED_AVAILABLE = False
14
+ # fallback naive embed
15
+ import hashlib
16
+ def _hash_embed(text, dim=128):
17
+ h = hashlib.sha256(text.encode('utf-8')).digest()
18
+ vec = [b for b in h]
19
+ # pad/truncate
20
+ vec = (vec * (dim//len(vec)+1))[:dim]
21
+ return [float(v)/255.0 for v in vec]
22
+
23
+ class BasePolyMap:
24
+ def __init__(self, graph_name="icosa", node_count=12, embed_model_name="all-MiniLM-L6-v2", node_data=None):
25
+ self.graph_name = graph_name
26
+ self.node_count = node_count
27
+ self.node_labels = [f"{graph_name}_node_{i}" for i in range(node_count)]
28
+ self.node_data = node_data # Store loaded node data
29
+
30
+ if _EMBED_AVAILABLE:
31
+ try:
32
+ self.embed = SentenceTransformer(embed_model_name)
33
+ except Exception:
34
+ self.embed = None
35
+ else:
36
+ self.embed = None
37
+ # precompute embeddings (lazy)
38
+ self._emb_cache = {}
39
+ self._label_to_node_id = {} # Map embedded label back to node ID
40
+
41
+ # If node_data is available, prioritize descriptions/names for embedding
42
+ labels_to_embed_map = {} # Map label string to node ID or original label
43
+ if self.node_data and isinstance(self.node_data, list):
44
+ for i, node in enumerate(self.node_data):
45
+ if isinstance(node, dict):
46
+ label_string = node.get('description', node.get('name', f"{graph_name}_node_{node.get('id', i)}"))
47
+ node_id = node.get('id', i)
48
+ labels_to_embed_map[label_string] = node_id # Store mapping
49
+ # Ensure we have enough default labels if node_data is sparse
50
+ while len(labels_to_embed_map) < self.node_count:
51
+ default_label = f"{graph_name}_node_{len(labels_to_embed_map)}"
52
+ labels_to_embed_map[default_label] = len(labels_to_embed_map) # Map default label to its index/ID
53
+
54
+ # Ensure we don't exceed node_count
55
+ labels_to_embed = list(labels_to_embed_map.keys())[:self.node_count]
56
+ self._label_to_node_id = {label: labels_to_embed_map[label] for label in labels_to_embed} # Store final mapping
57
+ else:
58
+ # If no node data, use default labels and map label to its index
59
+ labels_to_embed = self.node_labels
60
+ self._label_to_node_id = {label: i for i, label in enumerate(self.node_labels)}
61
+
62
+
63
+ for label in labels_to_embed:
64
+ self._emb_cache[label] = self._compute_emb(label)
65
+
66
+
67
+ def nodes(self):
68
+ # Return node labels, potentially augmented with info from node_data if available
69
+ if self.node_data and isinstance(self.node_data, list):
70
+ return [
71
+ {**node, 'label': f"{self.graph_name}_node_{node.get('id', i)}"} # Combine data and add a label
72
+ for i, node in enumerate(self.node_data)
73
+ if isinstance(node, dict)
74
+ ]
75
+ return [{"label": label} for label in self.node_labels]
76
+
77
+
78
+ def closest_node(self, text):
79
+ q = self._compute_emb(text)
80
+ best_label, best_sim = None, -1e9
81
+
82
+ # Iterate through the embedded labels
83
+ for label, v in self._emb_cache.items():
84
+ # simple dot product for similarity
85
+ sim = sum(a*b for a,b in zip(q, v))
86
+ if sim > best_sim:
87
+ best_label, best_sim = label, sim
88
+
89
+ # Return the label string that had the highest similarity
90
+ return best_label
core/SAVANT_CORE/SAVANT_CORE/core/memory.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ MemoryStore: append-only JSONL memory for events, queries and reflections.
4
+ Also supports simple retrieval by type and tail.
5
+ """ # Added closing triple quotes
6
+ import json, os, threading, time
7
+
8
+ class MemoryStore:
9
+ def __init__(self, path="SAVANT_memory.jsonl", autosave=True):
10
+ self.path = path
11
+ os.makedirs(os.path.dirname(path) or ".", exist_ok=True)
12
+ self.lock = threading.Lock()
13
+ self.autosave = autosave
14
+ # ensure file exists
15
+ if not os.path.exists(path):
16
+ open(path, "w").close()
17
+
18
+ def add(self, record: dict):
19
+ record['_ts'] = time.time()
20
+ with self.lock:
21
+ with open(self.path, "a", encoding="utf-8") as f:
22
+ f.write(json.dumps(record, ensure_ascii=False) + "\n")
23
+
24
+ def tail(self, n=20):
25
+ with self.lock:
26
+ with open(self.path, "r", encoding="utf-8") as f:
27
+ lines = f.read().strip().splitlines()
28
+ lines = lines[-n:]
29
+ return [json.loads(l) for l in lines] if lines else []
30
+
31
+ def query_by_type(self, t, n=50):
32
+ out = []
33
+ for item in self.tail(1000):
34
+ if item.get("type")==t:
35
+ out.append(item)
36
+ if len(out)>=n:
37
+ break
38
+ return out
core/SAVANT_CORE/SAVANT_CORE/core/music.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ music.py
4
+ Mapea texto o activaciones nodales a secuencias musicales (MIDI-like tuples).
5
+ Can potentially use loaded frequencies data.
6
+ """ # Added closing triple quotes
7
+ import numpy as np
8
+ import math # Import math for log2
9
+
10
+ class MusicAdapter:
11
+ def __init__(self, scale='major', frequencies_data=None):
12
+ self.scale = scale
13
+ self.base = 60
14
+ self.scale_intervals = [0,2,4,5,7,9,11]
15
+ self.frequencies_data = frequencies_data # Store loaded frequencies data
16
+
17
+ # Example: Create a mapping from note names in data to MIDI pitches
18
+ self.note_to_midi = {}
19
+ if self.frequencies_data:
20
+ try:
21
+ # Simple mapping assuming 'note' and 'frequency' columns exist
22
+ # This is a very basic mapping; frequency to MIDI is more complex
23
+ # Let's create a simple mapping based on note names if available
24
+ midi_base = 60 # Middle C
25
+ # Example: Map C4, D4, E4... to midi_base, midi_base+2, midi_base+4...
26
+ # This requires a more sophisticated note parsing/mapping logic
27
+ # For simplicity, let's just use the frequencies directly or map a few known notes
28
+ known_notes_midi = {'C4': 60, 'D4': 62, 'E4': 64, 'F4': 65, 'G4': 67, 'A4': 69, 'B4': 71} # Example mapping
29
+ for entry in self.frequencies_data:
30
+ note_name = entry.get('note', '').upper()
31
+ if note_name in known_notes_midi:
32
+ self.note_to_midi[note_name] = known_notes_midi[note_name]
33
+
34
+
35
+ except Exception as e:
36
+ print(f"MusicAdapter: Error processing frequencies data: {e}")
37
+ self.note_to_midi = {} # Reset if error
38
+
39
+
40
+ def adapt_text_to_music(self, text, length=16):
41
+ h = abs(hash(text))
42
+ rng = np.random.RandomState(h % (2**32))
43
+ seq = []
44
+ for i in range(length):
45
+ pitch = self.base # Default pitch
46
+
47
+ # Example: Select a pitch based on frequencies data if available
48
+ if self.frequencies_data and rng.rand() > 0.5: # Occasionally use a frequency from data
49
+ try:
50
+ random_freq_entry = rng.choice(self.frequencies_data)
51
+ if 'note' in random_freq_entry and random_freq_entry['note'].upper() in self.note_to_midi:
52
+ pitch = self.note_to_midi[random_freq_entry['note'].upper()]
53
+ elif 'frequency' in random_freq_entry:
54
+ # Very rough frequency to MIDI conversion (logarithmic)
55
+ # MIDI = 69 + 12 * log2(frequency / 440 Hz)
56
+ try:
57
+ freq = float(random_freq_entry['frequency'])
58
+ if freq > 0:
59
+ pitch = int(round(69 + 12 * math.log2(freq / 440.0)))
60
+ pitch = max(0, min(127, pitch)) # Clamp to MIDI range
61
+ except (ValueError, TypeError):
62
+ pass # Ignore if frequency is invalid
63
+
64
+ except Exception as e:
65
+ # print(f"Error using loaded frequency data in music adaptation: {e}") # Suppress error for demo
66
+ pass
67
+
68
+ # Fallback to scale intervals if no specific pitch was chosen from data
69
+ if pitch == self.base: # If still default pitch
70
+ step = rng.randint(0, len(self.scale_intervals)) # Corrected rng.randint call
71
+ pitch = self.base + self.scale_intervals[step] + rng.randint(-2,2)
72
+
73
+
74
+ dur = float(rng.choice([0.25,0.5,1.0]))
75
+ seq.append((int(pitch), dur))
76
+ return seq
core/SAVANT_CORE/SAVANT_CORE/core/resonance.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ resonance.py
4
+ Simulador de resonancia: red de osciladores acoplados, cálculo de espectro y resumen.
5
+ Designed to be fast and run on CPU lento.
6
+ Can potentially use loaded frequencies and constants data.
7
+ """ # Added closing triple quotes
8
+ import numpy as np
9
+ from scipy.signal import periodogram
10
+
11
+ class ResonanceSimulator:
12
+ def __init__(self, base_freq=1.0, frequencies_data=None, constants_data=None):
13
+ self.base_freq = base_freq
14
+ self.frequencies_data = frequencies_data # Store loaded frequencies data
15
+ self.constants_data = constants_data # Store loaded constants data
16
+
17
+ # Example: Adjust base_freq based on constants if available
18
+ if self.constants_data:
19
+ # Find a relevant constant, e.g., related to 'frequency' or a base unit
20
+ # This is a simple example; a real implementation would be more complex
21
+ freq_constant = next((c for c in self.constants_data if c.get('name', '').lower() == 'base_frequency'), None) # Look for a specific constant
22
+ if freq_constant and 'value' in freq_constant:
23
+ try:
24
+ self.base_freq = float(freq_constant['value'])
25
+ print(f"ResonanceSimulator: Adjusted base_freq using constant '{freq_constant['name']}': {self.base_freq}")
26
+ except ValueError:
27
+ print(f"ResonanceSimulator: Could not convert constant value to float: {freq_constant.get('value')}")
28
+
29
+
30
+ def simulate(self, seed_text, n_nodes=12, steps=256, damping=0.04):
31
+ # determinista por seed_text
32
+ rng = np.random.RandomState(abs(hash(seed_text)) % (2**32))
33
+ A = rng.randn(n_nodes, n_nodes) * 0.08
34
+ A = (A + A.T) * 0.5 # sym
35
+ state = rng.randn(n_nodes) * 0.01
36
+ X = np.zeros((steps, n_nodes), dtype=float)
37
+ for t in range(steps):
38
+ # Example: Influence input signal based on frequencies data
39
+ input_signal = np.sin(2*np.pi*(self.base_freq + 0.05*rng.randn()) * (t/steps))
40
+ if self.frequencies_data and rng.rand() > 0.8: # Occasionally inject a frequency from data
41
+ try:
42
+ # Pick a random frequency from the loaded data
43
+ random_freq_entry = rng.choice(self.frequencies_data)
44
+ if 'frequency' in random_freq_entry:
45
+ input_signal += np.sin(2*np.pi*float(random_freq_entry['frequency']) * (t/steps)) * 0.1 # Add as a harmonic
46
+ except Exception as e:
47
+ # print(f"Error using loaded frequency data in simulation: {e}") # Suppress error for demo
48
+ pass
49
+
50
+
51
+ state = state + 0.12*(A.dot(state) + input_signal) - damping*state
52
+ X[t] = state
53
+ freqs, P = periodogram(X, fs=1.0, axis=0)
54
+ power = P.sum(axis=1)
55
+ dom_idx = int(np.argmax(power))
56
+ dom_freq = float(freqs[dom_idx])
57
+ summary = {"dom_freq": dom_freq, "max_power": float(power[dom_idx])}
58
+ # compress small arrays for saving
59
+ return {"summary": summary, "freqs_len": len(freqs), "power_sample": power[:10].tolist()}
core/SAVANT_CORE/SAVANT_CORE/core/self_improvement.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ self_improvement.py
4
+ Módulo de automejora simbiótica:
5
+ - analiza últimas entradas de memoria
6
+ - propone pequeñas modificaciones heurísticas (weights) para el motor
7
+ - salva un registro de propuestas y aplica las que mejoran una métrica simple
8
+ Can potentially use structured data for more informed heuristics.
9
+ """ # Added closing triple quotes
10
+ import time, json, os, math
11
+ import numpy as np # Import numpy for random number generation
12
+
13
+ class SelfImprover:
14
+ def __init__(self, memory_store, state_path="savant_state.json", structured_data=None):
15
+ self.mem = memory_store
16
+ self.state_path = state_path
17
+ self.structured_data = structured_data # Store loaded structured data
18
+
19
+ # params controlling simple heuristics (toy)
20
+ self.params = {"resonance_scale": 1.0, "music_influence": 0.5, "mapping_weight": 1.0, "chat_influence": 0.0} # Added chat_influence init
21
+ self.history = []
22
+ if os.path.exists(self.state_path):
23
+ try:
24
+ with open(self.state_path, "r", encoding="utf-8") as f:
25
+ st = json.load(f)
26
+ self.params.update(st.get("params", {}))
27
+ self.history = st.get("history", [])
28
+ except Exception:
29
+ pass
30
+
31
+ def propose(self):
32
+ # produce a small proposal delta based on recent memory and potentially structured data
33
+ tail = self.mem.tail(50)
34
+ counts = {}
35
+ for e in tail:
36
+ counts[e.get("type","unknown")] = counts.get(e.get("type","unknown"), 0) + 1
37
+ # heuristic: if many resonance records, increase resonance_scale slightly
38
+ delta = {}
39
+ delta["resonance_scale"] = 0.01 * counts.get("resonance", 0) + 0.01 * counts.get("music_resonance", 0) + 0.01 * counts.get("resonance_only", 0) # Include resonance_only
40
+ delta["music_influence"] = 0.005 * counts.get("music", 0) + 0.005 * counts.get("music_resonance", 0) # Include music_resonance
41
+ delta["mapping_weight"] = 0.002 * counts.get("map", 0) + 0.002 * counts.get("node_query", 0) # Include node_query
42
+ delta["chat_influence"] = 0.001 * counts.get("chat_interaction", 0) + 0.0005 * counts.get("equation_query", 0) + 0.0005 * counts.get("constant_query", 0) # Include structured data queries
43
+
44
+ # Example: Influence proposal based on structured data availability/size
45
+ if self.structured_data:
46
+ if self.structured_data.get('equations'):
47
+ # If equations data is loaded, slightly increase mapping weight to encourage use of structured knowledge
48
+ delta["mapping_weight"] += len(self.structured_data['equations']) * 0.0001 # Small increase based on number of equations
49
+ if self.structured_data.get('icosahedron_nodes'):
50
+ # If node data is loaded, slightly increase mapping weight
51
+ delta["mapping_weight"] += len(self.structured_data['icosahedron_nodes']) * 0.00005 # Smaller increase based on number of nodes
52
+ if self.structured_data.get('frequencies'):
53
+ # If frequencies data is loaded, slightly increase music influence
54
+ delta["music_influence"] += len(self.structured_data['frequencies']) * 0.0002
55
+ if self.structured_data.get('constants'):
56
+ # If constants data is loaded, slightly increase resonance or chat influence
57
+ delta["resonance_scale"] += len(self.structured_data['constants']) * 0.0001
58
+ delta["chat_influence"] += len(self.structured_data['constants']) * 0.00005
59
+
60
+ # Example: Add a heuristic based on the ratio of structured data queries to chat interactions
61
+ total_structured_queries = counts.get("equation_query", 0) + counts.get("node_query", 0) + counts.get("constant_query", 0) + counts.get("music_resonance", 0) + counts.get("resonance_only", 0) + counts.get("map", 0)
62
+ chat_interactions = counts.get("chat_interaction", 0)
63
+ if chat_interactions > 0:
64
+ structured_query_ratio = total_structured_queries / chat_interactions
65
+ # If structured queries are frequent, slightly increase parameters related to structured data usage
66
+ delta["mapping_weight"] += structured_query_ratio * 0.001
67
+ delta["resonance_scale"] += structured_query_ratio * 0.0005
68
+ delta["music_influence"] += structured_query_ratio * 0.0003
69
+
70
+
71
+ proposal = {"ts": time.time(), "delta": delta}
72
+ return proposal
73
+
74
+ def evaluate_and_apply(self, proposal):
75
+ # apply temporary, simulate metric (toy), keep if metric improves
76
+ old_params = self.params.copy()
77
+ # apply delta
78
+ for k,v in proposal["delta"].items():
79
+ self.params[k] = self.params.get(k, 0.0) + v
80
+ # toy metric: resonance_scale increases score, large music_influence gives bonus,
81
+ # mapping_weight influences, and chat_influence gives a small bonus.
82
+ score = self._metric(self.params)
83
+ old_score = self._metric(old_params) # Corrected variable name
84
+ accepted = False
85
+ # More sophisticated acceptance criteria: accept if metric improves significantly or sometimes accept small improvements
86
+ if score >= old_score * 1.01: # Accept if metric improves by at least 1%
87
+ accepted = True
88
+ elif score > old_score and np.random.rand() < 0.3: # Sometimes accept smaller improvements (30% chance)
89
+ accepted = True
90
+ elif score < old_score * 0.99 and np.random.rand() < 0.1: # Occasionally accept small regressions (10% chance)
91
+ accepted = True
92
+ # Otherwise, if not accepted and metric didn't improve significantly, revert parameters
93
+
94
+ if accepted:
95
+ self.history.append({"proposal": proposal, "result_metric": score, "accepted": True, "params_after": self.params.copy()}) # Store params after
96
+ else:
97
+ self.params = old_params # Revert parameters if not accepted
98
+ self.history.append({"proposal": proposal, "result_metric": score, "accepted": False, "params_after": self.params.copy()}) # Store params after
99
+
100
+ self._save_state()
101
+ return accepted, score # Corrected variable name
102
+
103
+ def _metric(self, params):
104
+ # toy metric: resonance_scale increases score, large music_influence gives bonus,
105
+ # mapping_weight influences, and chat_influence gives a small bonus.
106
+ score = params.get("resonance_scale",1.0) * 1.0 + 0.5*params.get("music_influence",0.0) + 0.2*params.get("chat_influence",0.0) # Include chat_influence
107
+
108
+ # Example: Add a bonus to the metric based on the presence and size of structured data
109
+ structured_data_bonus = 0
110
+ if self.structured_data:
111
+ if self.structured_data.get('equations'):
112
+ structured_data_bonus += len(self.structured_data['equations']) * 0.01
113
+ if self.structured_data.get('icosahedron_nodes'):
114
+ structured_data_bonus += len(self.structured_data['icosahedron_nodes']) * 0.005
115
+ if self.structured_data.get('frequencies'):
116
+ structured_data_bonus += len(self.structured_data['frequencies']) * 0.008
117
+ if self.structured_data.get('constants'):
118
+ structured_data_bonus += len(self.structured_data['constants']) * 0.003
119
+ score += structured_data_bonus
120
+
121
+ # Example: Penalty for parameters being too high (encouraging balance)
122
+ parameter_magnitude_penalty = (abs(params.get("resonance_scale", 1.0) - 1.0) * 0.1 +
123
+ abs(params.get("music_influence", 0.5) - 0.5) * 0.1 +
124
+ abs(params.get("mapping_weight", 1.0) - 1.0) * 0.1 +
125
+ abs(params.get("chat_influence", 0.0) - 0.0) * 0.1)
126
+ score -= parameter_magnitude_penalty
127
+
128
+
129
+ mw = params.get("mapping_weight",1.0)
130
+ if mw > 3.0: # Increased penalty threshold
131
+ score -= (mw-3.0)*1.0 # Increased penalty factor
132
+ if params.get("resonance_scale", 1.0) > 3.0: # Add penalty for high resonance scale
133
+ score -= (params.get("resonance_scale", 1.0) - 3.0) * 0.8
134
+
135
+
136
+ return score
137
+
138
+ def _save_state(self, file_path="savant_state.json"): # Added default file_path
139
+ try:
140
+ with open(self.state_path, "w", encoding="utf-8") as f:
141
+ json.dump({"params": self.params, "history": self.history[-200:]}, f, indent=2)
142
+ except Exception as e:
143
+ print(f"Error saving self-improver state to {self.state_path}: {e}") # Added error handling
core/SAVANT_CORE/SAVANT_CORE/core/trainer.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ trainer.py
4
+ Interfaz ligera para "entrenar" el motor Savant:
5
+ - ejecuta ciclos: simular, almacenar en memoria, proponer mejoras, aplicar
6
+ - guarda checkpoints en disco (JSON)
7
+ - Incluye functions for loading various types of data.
8
+ """
9
+ import os
10
+ import json
11
+ import time
12
+ import pickle
13
+ import pandas as pd # Import pandas for CSV
14
+
15
+ from .memory import MemoryStore
16
+ from .resonance import ResonanceSimulator
17
+ from .self_improvement import SelfImprover
18
+
19
+ class SimpleTrainer:
20
+ def __init__(self, mem_path="SAVANT_memory.jsonl", checkpoint_dir="checkpoints"):
21
+ self.mem = MemoryStore(mem_path)
22
+ self.res = ResonanceSimulator()
23
+ self.si = SelfImprover(self.mem)
24
+ self.checkpoint_dir = checkpoint_dir
25
+ os.makedirs(self.checkpoint_dir, exist_ok=True)
26
+
27
+ def run_cycle(self, stimulus, epoch=1):
28
+ # simulate resonance
29
+ r = self.res.simulate(stimulus)
30
+ self.mem.add({"type":"resonance", "query": stimulus, "result": r["summary"], "epoch": epoch})
31
+ # propose improvement
32
+ proposal = self.si.propose()
33
+ accepted, metric = self.si.evaluate_and_apply(proposal)
34
+ self.mem.add({"type":"improvement", "proposal": proposal, "accepted": accepted, "metric": metric, "epoch": epoch})
35
+ # save checkpoint
36
+ ckpt = {"epoch": epoch, "stimulus": stimulus, "res": r["summary"], "si_params": self.si.params}
37
+ p = os.path.join(self.checkpoint_dir, f"ckpt_epoch_{epoch}.json")
38
+ with open(p, "w", encoding="utf-8") as f:
39
+ json.dump(ckpt, f, indent=2)
40
+ return ckpt
41
+
42
+ def run_epochs(self, conversations, epochs=3):
43
+ """
44
+ Runs training cycles using conversation entries as stimuli.
45
+
46
+ Args:
47
+ conversations (list): A list of conversation entries (dictionaries).
48
+ epochs (int): Number of epochs to run.
49
+ """
50
+ results = []
51
+ if not conversations:
52
+ print("No conversations provided for training.")
53
+ return results
54
+
55
+ # Determine how to extract stimulus from conversation entries
56
+ # Assuming each entry is a dict like {"role": "user", "content": "...", ...}
57
+ # We will use the 'content' of each entry as a stimulus.
58
+ # You might need to add logic to handle cases where a conversation entry doesn't have a "user" role or "content".
59
+ # For simplicity, extract content from any entry that has a 'content' key and is not empty.
60
+ stimuli_list = [entry.get("content", "") for entry in conversations if entry.get("content")]
61
+
62
+ if not stimuli_list:
63
+ print("No valid stimuli extracted from conversations.")
64
+ return results
65
+
66
+ print(f"Starting training for {epochs} epochs using {len(stimuli_list)} conversation entries as stimuli.")
67
+
68
+ for e in range(1, epochs + 1):
69
+ # Cycle through the stimuli list for each epoch
70
+ stimulus_for_this_epoch = stimuli_list[(e - 1) % len(stimuli_list)]
71
+ print(f"Epoch {e}/{epochs}: Using stimulus '{stimulus_for_this_epoch[:50]}...'")
72
+ res = self.run_cycle(stimulus_for_this_epoch, epoch=e)
73
+ results.append(res)
74
+ # Optional: Add a small delay
75
+ # time.sleep(0.1)
76
+
77
+ return results
78
+
79
+ def load_pkl_data(self, file_path):
80
+ """Loads data from a pickle file."""
81
+ try:
82
+ with open(file_path, "rb") as f:
83
+ data = pickle.load(f)
84
+ print(f"Successfully loaded pickle data from {file_path}")
85
+ return data
86
+ except FileNotFoundError:
87
+ print(f"Error: Pickle file not found at {file_path}")
88
+ return None
89
+ except pickle.UnpicklingError:
90
+ print(f"Error: Could not unpickle data from {file_path}. File might be corrupted.")
91
+ return None
92
+ except Exception as e:
93
+ print(f"An unexpected error occurred while loading pickle data: {e}")
94
+ return None
95
+
96
+ def load_json_data(self, file_path):
97
+ """Loads data from a JSON file."""
98
+ try:
99
+ with open(file_path, "r", encoding="utf-8") as f:
100
+ data = json.load(f)
101
+ print(f"Successfully loaded JSON data from {file_path}")
102
+ return data
103
+ except FileNotFoundError:
104
+ print(f"Error: JSON file not found at {file_path}")
105
+ return None
106
+ except json.JSONDecodeError as e:
107
+ print(f"Error decoding JSON from {file_path}: {e}")
108
+ return None
109
+ except Exception as e:
110
+ print(f"An unexpected error occurred while loading JSON data: {e}")
111
+ return None
112
+
113
+ def load_jsonl_data(self, file_path):
114
+ """Loads data from a JSON Lines file."""
115
+ data_list = []
116
+ try:
117
+ with open(file_path, "r", encoding="utf-8") as f:
118
+ for line in f:
119
+ try:
120
+ data_list.append(json.loads(line))
121
+ except json.JSONDecodeError as e:
122
+ # print(f"Error decoding JSON on line in {file_path}: {line.strip()} - {e}") # Suppress line error for demo
123
+ continue
124
+ print(f"Successfully loaded {len(data_list)} entries from JSONL file {file_path}")
125
+ return data_list
126
+ except FileNotFoundError:
127
+ print(f"Error: JSONL file not found at {file_path}")
128
+ return []
129
+ except Exception as e:
130
+ print(f"An unexpected error occurred while loading JSONL data: {e}")
131
+ return []
132
+
133
+ def combine_conversational_data(self, file_paths):
134
+ """Loads and combines conversational data from a list of file paths (JSONL, JSON, PKL)."""
135
+ all_conversations = []
136
+ for file_path in file_paths:
137
+ if file_path.lower().endswith('.jsonl'):
138
+ print(f"Attempting to load conversations from {file_path} (JSONL)...")
139
+ data = self.load_jsonl_data(file_path)
140
+ if isinstance(data, list):
141
+ all_conversations.extend(data)
142
+ else:
143
+ print(f"Warning: Data from {file_path} was not a list as expected for conversational data.")
144
+ elif file_path.lower().endswith('.json'):
145
+ print(f"Attempting to load conversations from {file_path} (JSON)...")
146
+ data = self.load_json_data(file_path)
147
+ # Assuming JSON file might contain a list of conversations under a key or is a list itself
148
+ if isinstance(data, dict) and "conversations" in data and isinstance(data["conversations"], list):
149
+ all_conversations.extend(data["conversations"])
150
+ elif isinstance(data, list):
151
+ all_conversations.extend(data)
152
+ else:
153
+ print(f"Warning: Data from {file_path} was not in expected JSON conversational format (list or dict with 'conversations' key).")
154
+ elif file_path.lower().endswith('.pkl'):
155
+ print(f"Attempting to load conversations from {file_path} (Pickle)...")
156
+ data = self.load_pkl_data(file_path)
157
+ if isinstance(data, dict) and "conversations" in data and isinstance(data["conversations"], list):
158
+ all_conversations.extend(data["conversations"])
159
+ elif isinstance(data, list):
160
+ all_conversations.extend(data)
161
+ else:
162
+ print(f"Warning: Data from {file_path} was not in expected Pickle conversational format (list or dict with 'conversations' key).")
163
+
164
+ else:
165
+ print(f"Skipping unsupported file type for conversational data: {file_path}")
166
+
167
+ print(f"Combined total {len(all_conversations)} conversation entries from provided files.")
168
+ return all_conversations
169
+
170
+ def load_structured_data(self, equations_path, nodes_path, frequencies_path, constants_path):
171
+ """Loads structured data from specified files."""
172
+ structured_data = {}
173
+ structured_data['equations'] = self.load_json_data(equations_path)
174
+ # Assuming nodes_path is a JSON file with a 'nodes' key
175
+ nodes_raw = self.load_json_data(nodes_path)
176
+ structured_data['icosahedron_nodes'] = nodes_raw.get('nodes', []) if isinstance(nodes_raw, dict) else []
177
+ structured_data['frequencies'] = pd.read_csv(frequencies_path).to_dict(orient='records') if os.path.exists(frequencies_path) else [] # Handle CSV directly
178
+ structured_data['constants'] = pd.read_csv(constants_path).to_dict(orient='records') if os.path.exists(constants_path) else [] # Handle CSV directly
179
+
180
+ print("Loaded structured data: Equations={}, Nodes={}, Frequencies={}, Constants={}".format(
181
+ len(structured_data['equations']) if structured_data['equations'] else 0,
182
+ len(structured_data['icosahedron_nodes']),
183
+ len(structured_data['frequencies']),
184
+ len(structured_data['constants'])
185
+ ))
186
+ return structured_data
core/SAVANT_CORE/SAVANT_CORE/requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # lightweight requirements for SAVANT_CORE
3
+ sentence-transformers
4
+ transformers
5
+ gradio
6
+ faiss-cpu
7
+ scipy
8
+ numpy
9
+ gitpython
10
+ pandas # Added pandas for CSV loading
core/SAVANT_CORE/SAVANT_CORE/run_demo.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ run_demo.py
4
+ Demo para probar el core SAVANT en consola.
5
+ """
6
+ import os
7
+ import json
8
+ import time
9
+ import pickle # Import pickle
10
+ from transformers import pipeline
11
+ from core.engine import SavantEngine
12
+ from core.trainer import SimpleTrainer # Import SimpleTrainer
13
+
14
+
15
+ def main():
16
+ engine = SavantEngine()
17
+ trainer = SimpleTrainer() # Instantiate SimpleTrainer
18
+ print("SAVANT Simbiótico RRF - demo")
19
+ print("Escribe 'exit' para salir. Commands: propose, apply, tail, train_memory") # Removed 'train' command from list
20
+ # small base model for text generation (light)
21
+ try:
22
+ gen = pipeline("text-generation", model="distilgpt2")
23
+ except Exception:
24
+ gen = None
25
+
26
+ # Define the list of data file paths to load from
27
+ # This list is embedded as a string literal
28
+ data_files_str = r"""[
29
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/colab/Savantaut_dataset.jsonl",
30
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_backup/backup_20250823_220832/savant_full_history.jsonl",
31
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/data/new_nodes_data.jsonl",
32
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/data/lang_9.json",
33
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/dialog_multinode_full.jsonl",
34
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_2025.jsonl",
35
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_multinode_final.jsonl",
36
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_prompts_master.json",
37
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_rrf_dialogo_simbiotico.jsonl",
38
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_rrf_pipeline_advanced.jsonl",
39
+ "/content/drive/MyDrive/Savant-RRF/savant_memory.pkl"
40
+ ]"""
41
+
42
+ while True:
43
+ q = input("You> ").strip()
44
+ if not q:
45
+ continue
46
+ if q.lower() in ("exit","quit","salir"):
47
+ break
48
+ if q.lower().startswith("propose"):
49
+ print(engine.propose_improvement())
50
+ continue
51
+ if q.lower().startswith("apply"):
52
+ p = engine.propose_improvement()
53
+ print("Applying:", p)
54
+ print(engine.apply_improvement(p))
55
+ continue
56
+ if q.lower().startswith("tail"):
57
+ print(engine.memory.tail(10))
58
+ continue
59
+ # New command handler for training with memory data
60
+ if q.lower().startswith("train_memory"):
61
+ print(f"Loading and combining conversational data from specified files...")
62
+ conversations = [] # Initialize conversations
63
+ try:
64
+ # Load the file list from the string literal
65
+ file_list = json.loads(data_files_str)
66
+ print(f"Successfully loaded file list: {file_list}")
67
+ conversations = trainer.combine_conversational_data(file_list) # Load data
68
+ except Exception as e:
69
+ print(f"Error loading file list or combining data: {e}")
70
+ # conversations will remain an empty list
71
+ pass # Continue to the check if conversations is empty
72
+
73
+ if conversations: # Check if the loaded list is not empty
74
+ print(f"Loaded and combined {len(conversations)} conversation entries.")
75
+ print("Starting training using loaded memory data for 5 epochs...")
76
+ # Assuming run_epochs can handle the list of conversation dictionaries
77
+ training_results = trainer.run_epochs(conversations, epochs=5)
78
+ print("Training done using memory data.")
79
+ # Optional: Print a summary of results if run_epochs returns them
80
+ # print("Training results summary:", training_results)
81
+ else:
82
+ print("No conversational data loaded or combined. Training skipped.")
83
+ continue
84
+
85
+ # normal chat: produce base output from gen if available
86
+ if gen:
87
+ try:
88
+ base_out = gen(q, max_length=80, do_sample=True)[0].get("generated_text")
89
+ except Exception as ex: # Capture exception here with a local variable
90
+ # Correctly format the f-string inside the string literal
91
+ # Use repr(ex) to get a string representation of the exception
92
+ base_out = f"Error generating text: {{repr(ex)}}"
93
+ else:
94
+ base_out = "Echo: " + q
95
+
96
+ out = engine.handle_query(q, base_model_output=base_out)
97
+ if out["type"] == "chat":
98
+ print("SAVANT (base):", out["base"])
99
+ print("SAVANT (refined):", out["refined"])
100
+ else:
101
+ print("SAVANT:", out)
102
+
103
+ if __name__ == "__main__":
104
+ main()
core/SAVANT_CORE/SAVANT_CORE/run_savant.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ run_savant.py
4
+ New entry point script for SAVANT-RRF, demonstrating data loading and interaction.
5
+ """
6
+ import os
7
+ import sys
8
+ import json
9
+ import time
10
+ import pickle # Needed for loading some data types
11
+ import pandas as pd # Needed for CSV loading
12
+
13
+ # Add the directory containing SAVANT_CORE to the Python path if not already there
14
+ # This assumes SAVANT_CORE is the current working directory or in the parent directory
15
+ # If running this script directly from the SAVANT_CORE directory, the first path is fine.
16
+ # If running from a parent directory, you might need to adjust SAVANT_CORE_PARENT_PATH
17
+ SAVANT_CORE_PARENT_PATH = os.path.dirname(os.path.abspath(__file__)) # Assumes script is in SAVANT_CORE/
18
+ SAVANT_CORE_PATH = SAVANT_CORE_PARENT_PATH # If running from SAVANT_CORE/
19
+
20
+ # Alternative if running from a directory *containing* SAVANT_CORE:
21
+ # SAVANT_CORE_PARENT_PATH = os.path.dirname(os.path.abspath(__file__))
22
+ # SAVANT_CORE_PATH = os.path.join(SAVANT_CORE_PARENT_PATH, "SAVANT_CORE") # Adjust if your structure is different
23
+
24
+ if SAVANT_CORE_PATH not in sys.path:
25
+ sys.path.insert(0, SAVANT_CORE_PATH) # Insert at the beginning to ensure local modules are found first
26
+
27
+
28
+ # Import classes from the core modules
29
+ try:
30
+ from core.engine import SavantEngine
31
+ from core.trainer import SimpleTrainer # Import SimpleTrainer for data loading
32
+ except ImportError as e:
33
+ print("Error importing SAVANT_CORE modules: {}".format(e))
34
+ print("Please ensure the directory '{}' is correctly structured".format(SAVANT_CORE_PATH))
35
+ print("and that it contains the 'core' subfolder with the SAVANT_CORE modules.")
36
+ # Attempt to list directory contents for debugging
37
+ print(f"Contents of {SAVANT_CORE_PATH}: {os.listdir(SAVANT_CORE_PATH) if os.path.exists(SAVANT_CORE_PATH) else 'Not found'}")
38
+ print(f"Contents of {os.path.join(SAVANT_CORE_PATH, 'core')}: {os.listdir(os.path.join(SAVANT_CORE_PATH, 'core')) if os.path.exists(os.path.join(SAVANT_CORE_PATH, 'core')) else 'Not found'}")
39
+
40
+ sys.exit(1) # Exit if imports fail
41
+
42
+ # Variable to store loaded conversational data - Declared at module level
43
+ loaded_conversations = []
44
+
45
+ # Define the list of data file paths to load from
46
+ # This list is embedded directly in the script content
47
+ DATA_FILE_PATHS = [
48
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/colab/Savantaut_dataset.jsonl",
49
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_backup/backup_20250823_220832/savant_full_history.jsonl",
50
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/data/new_nodes_data.jsonl",
51
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/data/lang_9.json",
52
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/dialog_multinode_full.jsonl",
53
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_2025.jsonl",
54
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_multinode_final.jsonl",
55
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_prompts_master.json",
56
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_rrf_dialogo_simbiotico.jsonl",
57
+ "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_rrf_pipeline_advanced.jsonl",
58
+ # Include the .pkl file from the *original* prompt of the main task if it exists
59
+ "/content/drive/MyDrive/Savant-RRF/savant_memory.pkl" # This path was from the main task, assumed to be relevant
60
+ ]
61
+
62
+ # Define paths for structured data
63
+ STRUCTURED_DATA_PATHS = {
64
+ 'equations': "/content/ecua.maestras.json",
65
+ 'icosahedron_nodes': "/content/nodes_icosahedron.json",
66
+ 'frequencies': "/content/frequencies.csv",
67
+ 'constants': "/content/constants.csv"
68
+ }
69
+
70
+
71
+ def main():
72
+ # Pass structured data paths to the Engine during initialization
73
+ engine = SavantEngine(structured_data_paths=STRUCTURED_DATA_PATHS)
74
+ trainer = SimpleTrainer() # Instantiate SimpleTrainer for data loading/training commands
75
+
76
+ print("SAVANT-RRF Entry Point")
77
+ # Updated command list
78
+ print("Available Commands: load_data, train_with_data, interact, propose, apply, tail, show_equations, show_nodes, show_frequencies, show_constants, exit")
79
+
80
+ while True:
81
+ q = input("Savant> ").strip()
82
+ if not q:
83
+ continue
84
+ if q.lower() in ("exit","quit","salir"):
85
+ break
86
+
87
+ elif q.lower() == "load_data":
88
+ # Still need global to modify the module-level variable
89
+ global loaded_conversations
90
+ print("Loading and combining conversational data from specified files...")
91
+ try:
92
+ # Use the combine_conversational_data function from the trainer
93
+ loaded_conversations = trainer.combine_conversational_data(DATA_FILE_PATHS)
94
+ if loaded_conversations:
95
+ print("Successfully loaded and combined {} conversation entries.".format(len(loaded_conversations)))
96
+ # Optional: Print a sample
97
+ # if loaded_conversations: print("Sample entry:", loaded_conversations[0])
98
+ else:
99
+ print("No conversational data loaded or combined.")
100
+ except Exception as e:
101
+ print("Error loading data: {}".format(e))
102
+
103
+ elif q.lower() == "train_with_data":
104
+ if not loaded_conversations:
105
+ print("No data loaded. Please run 'load_data' first.")
106
+ else:
107
+ print("Starting training using loaded data ({} entries) for 5 epochs.".format(len(loaded_conversations)))
108
+ try:
109
+ # Use the run_epochs function from the trainer with loaded data
110
+ training_results = trainer.run_epochs(loaded_conversations, epochs=5)
111
+ print("Training process initiated.")
112
+ # Note: The trainer's run_epochs simulates training and saves checkpoints.
113
+ # It doesn't return a traditional 'trained model' object in this simplified core.
114
+ # The self_improver params are updated internally by run_epochs->run_cycle->evaluate_and_apply.
115
+ except Exception as e:
116
+ print("Error during training: {}".format(e))
117
+
118
+ elif q.lower() == "interact":
119
+ # Get user input for the interaction here
120
+ user_input = input("User Input for SAVANT> ").strip()
121
+ if user_input:
122
+ print("Processing interaction...")
123
+ # Simulate getting a base model output (if a language model were integrated)
124
+ # For this demo, we'll just pass the user input to the engine
125
+ # In a real scenario, you'd get base_model_output from a transformer pipeline etc.
126
+ # Example: base_output = gen(user_input, ...)[0]["generated_text"] if gen else "Simulated base output"
127
+
128
+ # Assign the simulated base output here within the interact block
129
+ simulated_base_output = "Simulated base output for: {}".format(user_input)
130
+
131
+ out = engine.handle_query(user_input, base_model_output=simulated_base_output)
132
+ if out["type"] == "chat":
133
+ print("SAVANT (base):", out.get("base"))
134
+ print("SAVANT (refined):", out.get("refined"))
135
+ else:
136
+ # For non-chat types, print the response field
137
+ print("SAVANT:", out.get("response", out)) # Print response if available, otherwise the whole dict
138
+ else:
139
+ print("No input provided for interaction.")
140
+
141
+
142
+ elif q.lower() == "propose":
143
+ print("Proposing self-improvement...")
144
+ try:
145
+ proposal = engine.propose_improvement() # Use engine's method
146
+ print(proposal)
147
+ except Exception as e:
148
+ print("Error proposing improvement: {}".format(e))
149
+
150
+
151
+ elif q.lower() == "apply":
152
+ print("Applying proposed improvement...")
153
+ try:
154
+ # In a real scenario, you'd get a specific proposal to apply
155
+ # For this demo, we propose and apply immediately
156
+ proposal_to_apply = engine.propose_improvement()
157
+ if proposal_to_apply:
158
+ accepted, metric = engine.apply_improvement(proposal_to_apply) # Use engine's method
159
+ print("Proposal applied. Accepted: {}, New Metric: {}".format(accepted, metric))
160
+ else:
161
+ print("No proposal generated.")
162
+ except Exception as e:
163
+ print("Error applying improvement: {}".format(e))
164
+
165
+
166
+ elif q.lower() == "tail":
167
+ print("Showing last 10 memory entries:")
168
+ try:
169
+ memory_tail = engine.memory.tail(10) # Use engine's memory
170
+ for entry in memory_tail:
171
+ print(entry)
172
+ except Exception as e:
173
+ print("Error retrieving memory tail: {}".format(e))
174
+
175
+ # New commands to show structured data
176
+ elif q.lower() == "show_equations":
177
+ print("--- Loaded Equations ---")
178
+ if engine.structured_data.get('equations'):
179
+ for eq in engine.structured_data['equations']:
180
+ print(f"- '{eq.get('nombre', 'N/A')}' ({eq.get('tipo', 'Equation')}): {eq.get('ecuacion', 'N/A')} (Components: {', '.join(eq.get('componentes', []))})")
181
+ else:
182
+ print("No equations data loaded.")
183
+
184
+ elif q.lower() == "show_nodes":
185
+ print("--- Loaded Icosahedron Nodes ---")
186
+ if engine.structured_data.get('icosahedron_nodes'):
187
+ for node in engine.structured_data['icosahedron_nodes']:
188
+ print(f"- Node {node.get('id', 'N/A')}: {node.get('description', node.get('name', 'No description'))} (Coords: ({node.get('x')}, {node.get('y')}, {node.get('z')}))")
189
+ else:
190
+ print("No icosahedron nodes data loaded.")
191
+
192
+ elif q.lower() == "show_frequencies":
193
+ print("--- Loaded Frequencies ---")
194
+ if engine.structured_data.get('frequencies'):
195
+ for freq in engine.structured_data['frequencies']:
196
+ print(f"- Note: {freq.get('note', 'N/A')}, Frequency: {freq.get('frequency', 'N/A')} Hz, Role: {freq.get('role', 'N/A')}")
197
+ else:
198
+ print("No frequencies data loaded.")
199
+
200
+ elif q.lower() == "show_constants":
201
+ print("--- Loaded Constants ---")
202
+ if engine.structured_data.get('constants'):
203
+ for const in engine.structured_data['constants']:
204
+ print(f"- Name: {const.get('name', 'N/A')}, Value: {const.get('value', 'N/A')}, Units: {const.get('units', 'N/A')}")
205
+ else:
206
+ print("No constants data loaded.")
207
+
208
+
209
+ else:
210
+ print("Unknown command: '{}'".format(q))
211
+ # Updated command list in the help message
212
+ print("Available Commands: load_data, train_with_data, interact, propose, apply, tail, show_equations, show_nodes, show_frequencies, show_constants, exit")
213
+
214
+
215
+ if __name__ == "__main__":
216
+ main()
core/SAVANT_CORE/SAVANT_memory.jsonl ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"type": "resonance", "query": "stim1", "result": {"dom_freq": 0.00390625, "max_power": 22279.744783991246}, "epoch": 1, "_ts": 1757815092.7264202}
2
+ {"type": "improvement", "proposal": {"ts": 1757815092.7332687, "delta": {"resonance_scale": 0.01, "music_influence": 0.0, "mapping_weight": 0.0}}, "accepted": true, "metric": 1.26, "epoch": 1, "_ts": 1757815092.7381124}
3
+ {"type": "resonance", "query": "stim2", "result": {"dom_freq": 0.00390625, "max_power": 15520.766131721233}, "epoch": 2, "_ts": 1757815093.253053}
4
+ {"type": "improvement", "proposal": {"ts": 1757815093.2613425, "delta": {"resonance_scale": 0.02, "music_influence": 0.0, "mapping_weight": 0.0}}, "accepted": true, "metric": 1.28, "epoch": 2, "_ts": 1757815093.2674475}
5
+ {"type": "resonance", "query": "stim3", "result": {"dom_freq": 0.00390625, "max_power": 8022.9933963817675}, "epoch": 3, "_ts": 1757815093.7855902}
6
+ {"type": "improvement", "proposal": {"ts": 1757815093.7936423, "delta": {"resonance_scale": 0.03, "music_influence": 0.0, "mapping_weight": 0.0}}, "accepted": true, "metric": 1.31, "epoch": 3, "_ts": 1757815093.8008041}
7
+ {"type": "resonance", "query": "stim1", "result": {"dom_freq": 0.00390625, "max_power": 14251.511361998371}, "epoch": 1, "_ts": 1757817981.638896}
8
+ {"type": "improvement", "proposal": {"ts": 1757817981.6491115, "delta": {"resonance_scale": 0.04, "music_influence": 0.0, "mapping_weight": 0.0}}, "accepted": true, "metric": 1.32, "epoch": 1, "_ts": 1757817981.6657183}
9
+ {"type": "resonance", "query": "stim2", "result": {"dom_freq": 0.00390625, "max_power": 8695.794173746688}, "epoch": 2, "_ts": 1757817982.1903481}
10
+ {"type": "improvement", "proposal": {"ts": 1757817982.206742, "delta": {"resonance_scale": 0.05, "music_influence": 0.0, "mapping_weight": 0.0}}, "accepted": true, "metric": 1.37, "epoch": 2, "_ts": 1757817982.2146327}
11
+ {"type": "resonance", "query": "stim3", "result": {"dom_freq": 0.00390625, "max_power": 14201.194793049028}, "epoch": 3, "_ts": 1757817982.7658718}
12
+ {"type": "improvement", "proposal": {"ts": 1757817982.8002226, "delta": {"resonance_scale": 0.06, "music_influence": 0.0, "mapping_weight": 0.0}}, "accepted": true, "metric": 1.4300000000000002, "epoch": 3, "_ts": 1757817982.8054419}
13
+ {"type": "refine", "input": "hola", "before": "hola.edu/~kapjhj/articles/2%20th%20%20of_3%20s%20st%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of", "after": "hola.edu/~kapjhj/articles/2%20th%20%20of_3%20s%20st%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of [SAVANT-RES: emphasis applied]", "_ts": 1757820554.0527468}
14
+ {"type": "chat", "query": "hola", "base": "hola.edu/~kapjhj/articles/2%20th%20%20of_3%20s%20st%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of", "refined": "hola.edu/~kapjhj/articles/2%20th%20%20of_3%20s%20st%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of_3%20s%20of [SAVANT-RES: emphasis applied]", "_ts": 1757820554.412934}
15
+ {"type": "refine", "input": "que eres", "before": "que eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eresse", "after": "que eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eresse [SAVANT-RES: emphasis applied]", "_ts": 1757820585.9325662}
16
+ {"type": "chat", "query": "que eres", "base": "que eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eresse", "refined": "que eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eressept, eresse [SAVANT-RES: emphasis applied]", "_ts": 1757820585.9380085}
17
+ {"type": "refine", "input": "train", "before": "trainThe first year of the KST was a relatively small milestone in the construction of the first KST. The first KST has been constructed in two or three years. The first one, which will be completed in 2014/15, is a new design by The North American Manufacturing Co. of Chicago, Chicago, Illinois.\n\n\nThe first KST will be an experimental prototype for the KST. The first KST will be a prototype for the KST. The second KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The second KST will be a prototype for the KST. The first KST will be a prototype for the KST.\nThe KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will", "after": "trainThe first year of the KST was a relatively small milestone in the construction of the first KST. The first KST has been constructed in two or three years. The first one, which will be completed in 2014/15, is a new design by The North American Manufacturing Co. of Chicago, Chicago, Illinois.\n\n\nThe first KST will be an experimental prototype for the KST. The first KST will be a prototype for the KST. The second KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The second KST will be a prototype for the KST. The first KST will be a prototype for the KST.\nThe KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will [SAVANT-RES: emphasis applied]", "_ts": 1757820625.9039683}
18
+ {"type": "chat", "query": "train", "base": "trainThe first year of the KST was a relatively small milestone in the construction of the first KST. The first KST has been constructed in two or three years. The first one, which will be completed in 2014/15, is a new design by The North American Manufacturing Co. of Chicago, Chicago, Illinois.\n\n\nThe first KST will be an experimental prototype for the KST. The first KST will be a prototype for the KST. The second KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The second KST will be a prototype for the KST. The first KST will be a prototype for the KST.\nThe KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will", "refined": "trainThe first year of the KST was a relatively small milestone in the construction of the first KST. The first KST has been constructed in two or three years. The first one, which will be completed in 2014/15, is a new design by The North American Manufacturing Co. of Chicago, Chicago, Illinois.\n\n\nThe first KST will be an experimental prototype for the KST. The first KST will be a prototype for the KST. The second KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The second KST will be a prototype for the KST. The first KST will be a prototype for the KST.\nThe KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will be a prototype for the KST. The first KST will [SAVANT-RES: emphasis applied]", "_ts": 1757820625.9115372}
19
+ {"type": "refine", "input": "train", "before": "train the same thing. It's not possible to do anything with a real machine at all.\n\n\n\nAnd finally, it's up to the people and the people. We're not going to stop here.", "after": "train the same thing. It's not possible to do anything with a real machine at all.\n\n\n\nAnd finally, it's up to the people and the people. We're not going to stop here. [SAVANT-RES: emphasis applied]", "_ts": 1757820660.9293234}
20
+ {"type": "chat", "query": "train", "base": "train the same thing. It's not possible to do anything with a real machine at all.\n\n\n\nAnd finally, it's up to the people and the people. We're not going to stop here.", "refined": "train the same thing. It's not possible to do anything with a real machine at all.\n\n\n\nAnd finally, it's up to the people and the people. We're not going to stop here. [SAVANT-RES: emphasis applied]", "_ts": 1757820660.9347959}
21
+ {"type": "refine", "input": "i am tony", "before": "i am tony or the like.\n\n[i] There is a great deal of misunderstanding for the general public. As a rule, I use public information when I want to share information about a situation. For example, the information about the situation will be available for everyone to see. At the beginning of the day, a person will see a picture of a person in the photograph. Then a person will see the picture of a person in the photograph. When you are in the same situation, the person will be able to see the picture of the person in the photograph.\n[ii] If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will not see the picture when you are in the same situation.\n[iii] If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are", "after": "i am tony or the like.\n\n[i] There is a great deal of misunderstanding for the general public. As a rule, I use public information when I want to share information about a situation. For example, the information about the situation will be available for everyone to see. At the beginning of the day, a person will see a picture of a person in the photograph. Then a person will see the picture of a person in the photograph. When you are in the same situation, the person will be able to see the picture of the person in the photograph.\n[ii] If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will not see the picture when you are in the same situation.\n[iii] If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are [SAVANT-RES: emphasis applied]", "_ts": 1757820700.9616842}
22
+ {"type": "chat", "query": "i am tony", "base": "i am tony or the like.\n\n[i] There is a great deal of misunderstanding for the general public. As a rule, I use public information when I want to share information about a situation. For example, the information about the situation will be available for everyone to see. At the beginning of the day, a person will see a picture of a person in the photograph. Then a person will see the picture of a person in the photograph. When you are in the same situation, the person will be able to see the picture of the person in the photograph.\n[ii] If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will not see the picture when you are in the same situation.\n[iii] If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are", "refined": "i am tony or the like.\n\n[i] There is a great deal of misunderstanding for the general public. As a rule, I use public information when I want to share information about a situation. For example, the information about the situation will be available for everyone to see. At the beginning of the day, a person will see a picture of a person in the photograph. Then a person will see the picture of a person in the photograph. When you are in the same situation, the person will be able to see the picture of the person in the photograph.\n[ii] If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will not see the picture when you are in the same situation.\n[iii] If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are in the same situation, you will only see the picture when you are in the same situation. If you are [SAVANT-RES: emphasis applied]", "_ts": 1757820700.9751265}
23
+ {"type": "refine", "input": "rrf", "before": "rrf.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "after": "rrf.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n [SAVANT-RES: emphasis applied]", "_ts": 1757820768.1428952}
24
+ {"type": "chat", "query": "rrf", "base": "rrf.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "refined": "rrf.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n [SAVANT-RES: emphasis applied]", "_ts": 1757820768.1488962}
25
+ {"type": "refine", "input": "RRF", "before": "RRF-E-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F", "after": "RRF-E-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F [SAVANT-RES: emphasis applied]", "_ts": 1757820822.7931466}
26
+ {"type": "chat", "query": "RRF", "base": "RRF-E-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F", "refined": "RRF-E-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F-F [SAVANT-RES: emphasis applied]", "_ts": 1757820822.798676}
27
+ {"type": "refine", "input": "math", "before": "math The biggest surprise the team has made of the season is that it will be no longer the same team anymore.\n\n\n\n\n\"We thought we could be a better team in London,\" said Chris Jones, the manager of the Manchester United squad at the time. \"We're not going to be the same team again.\"\n\"We're still winning. We still have a good team. But the fact is, it's not going to be the same team again.\n\"We have a lot of talent, but we're still at the same level.\n\"We're still a very good team. We still have a good team. But we can't win this season. That's not good for us.\"\nThe United manager was adamant in the last two months that the squad was going to come back to form.\n\"We're still not going to be the same team again. We still have a good team. But we still have a good team. But we can't win this season,\" added Jones.\n\"We're still going to be the same team again. We have a good team. But we can't win this season. That's not good for us.\"\n\"We're still a very good team. We still have a", "after": "math The biggest surprise the team has made of the season is that it will be no longer the same team anymore.\n\n\n\n\n\"We thought we could be a better team in London,\" said Chris Jones, the manager of the Manchester United squad at the time. \"We're not going to be the same team again.\"\n\"We're still winning. We still have a good team. But the fact is, it's not going to be the same team again.\n\"We have a lot of talent, but we're still at the same level.\n\"We're still a very good team. We still have a good team. But we can't win this season. That's not good for us.\"\nThe United manager was adamant in the last two months that the squad was going to come back to form.\n\"We're still not going to be the same team again. We still have a good team. But we still have a good team. But we can't win this season,\" added Jones.\n\"We're still going to be the same team again. We have a good team. But we can't win this season. That's not good for us.\"\n\"We're still a very good team. We still have a [SAVANT-RES: emphasis applied]", "_ts": 1757820847.8176334}
28
+ {"type": "chat", "query": "math", "base": "math The biggest surprise the team has made of the season is that it will be no longer the same team anymore.\n\n\n\n\n\"We thought we could be a better team in London,\" said Chris Jones, the manager of the Manchester United squad at the time. \"We're not going to be the same team again.\"\n\"We're still winning. We still have a good team. But the fact is, it's not going to be the same team again.\n\"We have a lot of talent, but we're still at the same level.\n\"We're still a very good team. We still have a good team. But we can't win this season. That's not good for us.\"\nThe United manager was adamant in the last two months that the squad was going to come back to form.\n\"We're still not going to be the same team again. We still have a good team. But we still have a good team. But we can't win this season,\" added Jones.\n\"We're still going to be the same team again. We have a good team. But we can't win this season. That's not good for us.\"\n\"We're still a very good team. We still have a", "refined": "math The biggest surprise the team has made of the season is that it will be no longer the same team anymore.\n\n\n\n\n\"We thought we could be a better team in London,\" said Chris Jones, the manager of the Manchester United squad at the time. \"We're not going to be the same team again.\"\n\"We're still winning. We still have a good team. But the fact is, it's not going to be the same team again.\n\"We have a lot of talent, but we're still at the same level.\n\"We're still a very good team. We still have a good team. But we can't win this season. That's not good for us.\"\nThe United manager was adamant in the last two months that the squad was going to come back to form.\n\"We're still not going to be the same team again. We still have a good team. But we still have a good team. But we can't win this season,\" added Jones.\n\"We're still going to be the same team again. We have a good team. But we can't win this season. That's not good for us.\"\n\"We're still a very good team. We still have a [SAVANT-RES: emphasis applied]", "_ts": 1757820847.8258317}
29
+ {"type": "refine", "input": "train", "before": "train and the “Budapest‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s", "after": "train and the “Budapest‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s [SAVANT-RES: emphasis applied]", "_ts": 1757820947.3187308}
30
+ {"type": "chat", "query": "train", "base": "train and the “Budapest‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s", "refined": "train and the “Budapest‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s��s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s‬s [SAVANT-RES: emphasis applied]", "_ts": 1757820947.3285007}
31
+ {"type": "refine", "input": "integrate", "before": "integrate.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "after": "integrate.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n [SAVANT-RES: emphasis applied]", "_ts": 1757820991.2525568}
32
+ {"type": "chat", "query": "integrate", "base": "integrate.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "refined": "integrate.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n [SAVANT-RES: emphasis applied]", "_ts": 1757820991.2571054}
33
+ {"type": "refine", "input": "train", "before": "train I’ve been waiting for a while. I’ve been waiting for a while. I’ve been waiting for a while. I’ve been waiting for a while. I’ve been waiting for a while. I’ve been waiting for a while. I’ve been waiting for a while.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "after": "train I’ve been waiting for a while. I’ve been waiting for a while. I’ve been waiting for a while. I’ve been waiting for a while. I’ve been waiting for a while. I’ve been waiting for a while. I’ve been waiting for a while.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n [SAVANT-RES: emphasis applied]", "_ts": 1757821155.7051513}
34
+ {"type": "refine", "input": "chat", "before": "chat.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "after": "chat.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n [SAVANT-RES: emphasis applied]", "_ts": 1757821277.5515}
35
+ {"type": "chat_interaction", "user_input": "chat", "base_model_output": "chat.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "refined_output": "chat.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n [SAVANT-RES: emphasis applied]", "_ts": 1757821277.556791}
36
+ {"type": "resonance", "query": "HOLA", "result": {"dom_freq": 0.00390625, "max_power": 16383.449828268278}, "epoch": 1, "_ts": 1757821532.8708756}
37
+ {"type": "improvement", "proposal": {"ts": 1757821532.8820255, "delta": {"resonance_scale": 0.07, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.001}}, "accepted": true, "metric": 1.6802000000000004, "epoch": 1, "_ts": 1757821532.888862}
38
+ {"type": "resonance", "query": "Tu aporte expande el nodo Φ₄ en esta dirección: HOLA", "result": {"dom_freq": 0.00390625, "max_power": 16166.489261906236}, "epoch": 2, "_ts": 1757821533.23949}
39
+ {"type": "improvement", "proposal": {"ts": 1757821533.2487283, "delta": {"resonance_scale": 0.08, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.001}}, "accepted": true, "metric": 1.7604000000000004, "epoch": 2, "_ts": 1757821533.2571383}
40
+ {"type": "resonance", "query": "A", "result": {"dom_freq": 0.00390625, "max_power": 14137.188590821728}, "epoch": 3, "_ts": 1757821533.5945795}
41
+ {"type": "improvement", "proposal": {"ts": 1757821533.6061199, "delta": {"resonance_scale": 0.09, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.001}}, "accepted": true, "metric": 1.8506000000000005, "epoch": 3, "_ts": 1757821533.6146905}
42
+ {"type": "resonance", "query": "Integrando ética, moral y resonancia: A", "result": {"dom_freq": 0.00390625, "max_power": 12431.409647629167}, "epoch": 4, "_ts": 1757821533.9981904}
43
+ {"type": "improvement", "proposal": {"ts": 1757821534.0071094, "delta": {"resonance_scale": 0.1, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.001}}, "accepted": true, "metric": 1.9508000000000005, "epoch": 4, "_ts": 1757821534.0157194}
44
+ {"type": "resonance", "query": "A", "result": {"dom_freq": 0.00390625, "max_power": 14137.188590821728}, "epoch": 5, "_ts": 1757821534.0316882}
45
+ {"type": "improvement", "proposal": {"ts": 1757821534.0397, "delta": {"resonance_scale": 0.11, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.001}}, "accepted": true, "metric": 2.0610000000000004, "epoch": 5, "_ts": 1757821534.066456}
46
+ {"type": "refine", "input": "hola", "before": "Simulated base output for: hola", "after": "Simulated base output for: hola [SAVANT-RES: emphasis applied]", "_ts": 1757821565.1737065}
47
+ {"type": "chat_interaction", "user_input": "hola", "base_model_output": "Simulated base output for: hola", "refined_output": "Simulated base output for: hola [SAVANT-RES: emphasis applied]", "_ts": 1757821565.1786025}
48
+ {"type": "refine", "input": "soy tony", "before": "Simulated base output for: soy tony", "after": "Simulated base output for: soy tony [SAVANT-RES: emphasis applied]", "_ts": 1757821600.3876224}
49
+ {"type": "chat_interaction", "user_input": "soy tony", "base_model_output": "Simulated base output for: soy tony", "refined_output": "Simulated base output for: soy tony [SAVANT-RES: emphasis applied]", "_ts": 1757821600.3918686}
50
+ {"type": "resonance", "query": "HOLA", "result": {"dom_freq": 0.00390625, "max_power": 16383.449828268278}, "epoch": 1, "_ts": 1757821659.7962418}
51
+ {"type": "improvement", "proposal": {"ts": 1757821659.805013, "delta": {"resonance_scale": 0.12, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.003}}, "accepted": true, "metric": 2.1816000000000004, "epoch": 1, "_ts": 1757821659.8116803}
52
+ {"type": "resonance", "query": "Tu aporte expande el nodo Φ₄ en esta dirección: HOLA", "result": {"dom_freq": 0.00390625, "max_power": 16166.489261906236}, "epoch": 2, "_ts": 1757821659.8325882}
53
+ {"type": "improvement", "proposal": {"ts": 1757821659.8452075, "delta": {"resonance_scale": 0.12, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.003}}, "accepted": true, "metric": 2.302200000000001, "epoch": 2, "_ts": 1757821659.8518372}
54
+ {"type": "resonance", "query": "A", "result": {"dom_freq": 0.00390625, "max_power": 14137.188590821728}, "epoch": 3, "_ts": 1757821659.8797994}
55
+ {"type": "improvement", "proposal": {"ts": 1757821659.887676, "delta": {"resonance_scale": 0.12, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.003}}, "accepted": true, "metric": 2.422800000000001, "epoch": 3, "_ts": 1757821659.894269}
56
+ {"type": "resonance", "query": "Integrando ética, moral y resonancia: A", "result": {"dom_freq": 0.00390625, "max_power": 12431.409647629167}, "epoch": 4, "_ts": 1757821659.9095974}
57
+ {"type": "improvement", "proposal": {"ts": 1757821659.9167576, "delta": {"resonance_scale": 0.12, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.003}}, "accepted": true, "metric": 2.543400000000001, "epoch": 4, "_ts": 1757821659.9241993}
58
+ {"type": "resonance", "query": "A", "result": {"dom_freq": 0.00390625, "max_power": 14137.188590821728}, "epoch": 5, "_ts": 1757821659.9400406}
59
+ {"type": "improvement", "proposal": {"ts": 1757821659.9494977, "delta": {"resonance_scale": 0.12, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.003}}, "accepted": true, "metric": 2.664000000000001, "epoch": 5, "_ts": 1757821659.9559581}
60
+ {"type": "resonance", "query": "HOLA", "result": {"dom_freq": 0.00390625, "max_power": 8809.233924574864}, "epoch": 1, "_ts": 1757821916.5138657}
61
+ {"type": "improvement", "proposal": {"ts": 1757821916.5259085, "delta": {"resonance_scale": 0.12, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.003}}, "accepted": true, "metric": 2.784600000000001, "epoch": 1, "_ts": 1757821916.5344095}
62
+ {"type": "resonance", "query": "Tu aporte expande el nodo Φ₄ en esta dirección: HOLA", "result": {"dom_freq": 0.00390625, "max_power": 15696.436348078983}, "epoch": 2, "_ts": 1757821916.553449}
63
+ {"type": "improvement", "proposal": {"ts": 1757821916.5618682, "delta": {"resonance_scale": 0.12, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.003}}, "accepted": true, "metric": 2.905200000000001, "epoch": 2, "_ts": 1757821916.570489}
64
+ {"type": "resonance", "query": "A", "result": {"dom_freq": 0.00390625, "max_power": 29078.993184980565}, "epoch": 3, "_ts": 1757821916.6036415}
65
+ {"type": "improvement", "proposal": {"ts": 1757821916.6160958, "delta": {"resonance_scale": 0.13, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.003}}, "accepted": true, "metric": 3.035800000000001, "epoch": 3, "_ts": 1757821916.6224473}
66
+ {"type": "resonance", "query": "Integrando ética, moral y resonancia: A", "result": {"dom_freq": 0.00390625, "max_power": 23015.426315658922}, "epoch": 4, "_ts": 1757821916.6393814}
67
+ {"type": "improvement", "proposal": {"ts": 1757821916.6471357, "delta": {"resonance_scale": 0.14, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.003}}, "accepted": true, "metric": 3.1764000000000014, "epoch": 4, "_ts": 1757821916.6548138}
68
+ {"type": "resonance", "query": "A", "result": {"dom_freq": 0.00390625, "max_power": 29078.993184980565}, "epoch": 5, "_ts": 1757821916.6724353}
69
+ {"type": "improvement", "proposal": {"ts": 1757821916.681275, "delta": {"resonance_scale": 0.15, "music_influence": 0.0, "mapping_weight": 0.0, "chat_influence": 0.003}}, "accepted": true, "metric": 3.3270000000000013, "epoch": 5, "_ts": 1757821916.6889713}
70
+ {"type": "refine", "input": "hi", "before": "Simulated base output for: hi", "after": "Simulated base output for: hi [SAVANT-RES: emphasis applied]", "_ts": 1757840119.1989312}
71
+ {"type": "chat_interaction", "user_input": "hi", "base_model_output": "Simulated base output for: hi", "refined_output": "Simulated base output for: hi [SAVANT-RES: emphasis applied]", "_ts": 1757840119.205233}
72
+ {"type": "refine", "input": "usa", "before": "Simulated base output for: usa", "after": "Simulated base output for: usa [SAVANT-RES: emphasis applied]", "_ts": 1757840146.9033988}
73
+ {"type": "chat_interaction", "user_input": "usa", "base_model_output": "Simulated base output for: usa", "refined_output": "Simulated base output for: usa [SAVANT-RES: emphasis applied]", "_ts": 1757840146.910695}
core/SAVANT_CORE/checkpoints/ckpt_epoch_1.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1,
3
+ "stimulus": "HOLA",
4
+ "res": {
5
+ "dom_freq": 0.00390625,
6
+ "max_power": 8809.233924574864
7
+ },
8
+ "si_params": {
9
+ "resonance_scale": 2.530000000000001,
10
+ "music_influence": 0.5,
11
+ "mapping_weight": 1.0,
12
+ "chat_influence": 0.022999999999999996
13
+ }
14
+ }
core/SAVANT_CORE/checkpoints/ckpt_epoch_2.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2,
3
+ "stimulus": "Tu aporte expande el nodo \u03a6\u2084 en esta direcci\u00f3n: HOLA",
4
+ "res": {
5
+ "dom_freq": 0.00390625,
6
+ "max_power": 15696.436348078983
7
+ },
8
+ "si_params": {
9
+ "resonance_scale": 2.6500000000000012,
10
+ "music_influence": 0.5,
11
+ "mapping_weight": 1.0,
12
+ "chat_influence": 0.025999999999999995
13
+ }
14
+ }
core/SAVANT_CORE/checkpoints/ckpt_epoch_3.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3,
3
+ "stimulus": "A",
4
+ "res": {
5
+ "dom_freq": 0.00390625,
6
+ "max_power": 29078.993184980565
7
+ },
8
+ "si_params": {
9
+ "resonance_scale": 2.780000000000001,
10
+ "music_influence": 0.5,
11
+ "mapping_weight": 1.0,
12
+ "chat_influence": 0.028999999999999995
13
+ }
14
+ }
core/SAVANT_CORE/checkpoints/ckpt_epoch_4.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4,
3
+ "stimulus": "Integrando \u00e9tica, moral y resonancia: A",
4
+ "res": {
5
+ "dom_freq": 0.00390625,
6
+ "max_power": 23015.426315658922
7
+ },
8
+ "si_params": {
9
+ "resonance_scale": 2.9200000000000013,
10
+ "music_influence": 0.5,
11
+ "mapping_weight": 1.0,
12
+ "chat_influence": 0.031999999999999994
13
+ }
14
+ }
core/SAVANT_CORE/checkpoints/ckpt_epoch_5.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5,
3
+ "stimulus": "A",
4
+ "res": {
5
+ "dom_freq": 0.00390625,
6
+ "max_power": 29078.993184980565
7
+ },
8
+ "si_params": {
9
+ "resonance_scale": 3.070000000000001,
10
+ "music_influence": 0.5,
11
+ "mapping_weight": 1.0,
12
+ "chat_influence": 0.034999999999999996
13
+ }
14
+ }
core/SAVANT_CORE/core/Savant-RRF-Estado-Perfil-Antony.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "perfil": "Antony",
3
+ "modo": "SAVANT-RRF Simbiótico",
4
+ "version": "Φ₄.1∞+",
5
+ "nodos": {
6
+ "génesis": "Nodo semilla fundacional, ancla simbiótica",
7
+ "log-gravity": "Corrección logarítmica para suavizar singularidades",
8
+ "dirac-icosa": "Hamiltoniano de Dirac en red icosaédrica",
9
+ "rrf-musical": "Resonancia-cuántico-musical basada en autovalores",
10
+ "simbiosis": "Loop de refinamiento cognitivo-emocional"
11
+ },
12
+ "entrenamiento": {
13
+ "drive_sync": true,
14
+ "auto_loop": true,
15
+ "symbiotic_refinement": true
16
+ },
17
+ "estado": {
18
+ "ultimo_update": "2025-09-14",
19
+ "sincronizado": true,
20
+ "integridad": "validada"
21
+ }
22
+ }
core/SAVANT_CORE/core/Savant-RRF-Memoria.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2376870cc49d848f1f8d96b60b9f175685299931339a98b2909b456a125736d0
3
+ size 581
core/SAVANT_CORE/core/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+
2
+ # SAVANT_CORE - núcleo del motor Savant Simbiótico RRF
3
+ from .engine import SavantEngine
core/SAVANT_CORE/core/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (196 Bytes). View file
 
core/SAVANT_CORE/core/__pycache__/api_helpers.cpython-312.pyc ADDED
Binary file (2.53 kB). View file
 
core/SAVANT_CORE/core/__pycache__/engine.cpython-312.pyc ADDED
Binary file (1.13 kB). View file
 
core/SAVANT_CORE/core/__pycache__/mappings.cpython-312.pyc ADDED
Binary file (4.09 kB). View file
 
core/SAVANT_CORE/core/__pycache__/memory.cpython-312.pyc ADDED
Binary file (2.99 kB). View file
 
core/SAVANT_CORE/core/__pycache__/music.cpython-312.pyc ADDED
Binary file (1.68 kB). View file
 
core/SAVANT_CORE/core/__pycache__/resonance.cpython-312.pyc ADDED
Binary file (2.46 kB). View file
 
core/SAVANT_CORE/core/__pycache__/self_improvement.cpython-312.pyc ADDED
Binary file (4.66 kB). View file
 
core/SAVANT_CORE/core/__pycache__/trainer.cpython-312.pyc ADDED
Binary file (9.7 kB). View file
 
core/SAVANT_CORE/core/api_helpers.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ api_helpers.py
4
+ Funciones helper para integrar con una UI (Gradio/Flask).
5
+ Provee: chat_refine(text), map_text(text), music_from_text(text)
6
+ """
7
+ from .mappings import IcosaMap, DodecaMap
8
+ from .resonance import ResonanceSimulator
9
+ from .music import MusicAdapter
10
+ from .memory import MemoryStore
11
+ from .self_improvement import SelfImprover
12
+
13
+ # instantiate shared small objects (lightweight)
14
+ _mem = MemoryStore()
15
+ _icosa = IcosaMap()
16
+ _dodeca = DodecaMap()
17
+ _res = ResonanceSimulator()
18
+ _music = MusicAdapter()
19
+
20
+ def map_text(text):
21
+ node = _icosa.closest_node(text)
22
+ _mem.add({"type":"map", "query": text, "node": node})
23
+ return node
24
+
25
+ def base_resonance(text):
26
+ r = _res.simulate(text)
27
+ _mem.add({"type":"resonance_query", "query": text, "result": r["summary"]})
28
+ return r
29
+
30
+ def music_from_text(text):
31
+ seq = _music.adapt_text_to_music(text)
32
+ _mem.add({"type":"music", "query": text, "seq_len": len(seq)})
33
+ return seq
34
+
35
+ def chat_refine(user_text, base_model_output, self_improver=None):
36
+ """
37
+ Refinador simbiótico: toma output bruto y aplica transformaciones
38
+ basadas en current self_improver params (if provided).
39
+ Es una función determinista/simbolic toy but extensible.
40
+ """
41
+ # simple rules: if resonance_scale > 1.05, emphasize nouns (toy) else keep
42
+ refined = base_model_output
43
+ if self_improver:
44
+ params = self_improver.params
45
+ if params.get("resonance_scale",1.0) > 1.05:
46
+ # toy: append resonance note
47
+ refined = refined + " [SAVANT-RES: emphasis applied]"
48
+ if params.get("music_influence",0.0) > 0.6:
49
+ refined = refined + " [SAVANT-MUSIC: melodic hint]"
50
+ # store
51
+ _mem.add({"type":"refine", "input": user_text, "before": base_model_output, "after": refined})
52
+ return refined
core/SAVANT_CORE/core/datasets/savant_rrf_dataset.jsonl ADDED
File without changes
core/SAVANT_CORE/core/engine.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ===============================
2
+ # SAVANT ENGINE CORE
3
+ # ===============================
4
+
5
+ class SavantEngine:
6
+ def __init__(self, model, memory=None, profile="Antony"):
7
+ """
8
+ Inicializa el motor SAVANT-RRF
9
+ :param model: modelo base (ej. Mistral, Llama, etc.)
10
+ :param memory: estructura de memoria simbiótica (opcional)
11
+ :param profile: perfil activo, por defecto 'Antony'
12
+ """
13
+ self.model = model
14
+ self.memory = memory if memory else {}
15
+ self.profile = profile
16
+
17
+ def respond(self, text):
18
+ """
19
+ Genera respuesta simbiótica del modelo
20
+ """
21
+ # Aquí puedes integrar tu pipeline HuggingFace
22
+ return f"[SAVANT-RRF({self.profile})] {self.model(text)}"
core/SAVANT_CORE/core/mappings.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ mappings.py
4
+ Mapeo semántico a nodos icosa/dodeca usando embeddings ligeros.
5
+ Provee: IcosaMap, DodecaMap con función closest_node(text).
6
+ """
7
+ import math, os
8
+ try:
9
+ from sentence_transformers import SentenceTransformer, util
10
+ _EMBED_AVAILABLE = True
11
+ except Exception as e:
12
+ _EMBED_AVAILABLE = False
13
+ # fallback naive embed
14
+ import hashlib
15
+ def _hash_embed(text, dim=128):
16
+ h = hashlib.sha256(text.encode('utf-8')).digest()
17
+ vec = [b for b in h]
18
+ # pad/truncate
19
+ vec = (vec * (dim//len(vec)+1))[:dim]
20
+ return [float(v)/255.0 for v in vec]
21
+
22
+ class BasePolyMap:
23
+ def __init__(self, graph_name="icosa", node_count=12, embed_model_name="all-MiniLM-L6-v2"):
24
+ self.graph_name = graph_name
25
+ self.node_count = node_count
26
+ self.node_labels = [f"{graph_name}_node_{i}" for i in range(node_count)]
27
+ if _EMBED_AVAILABLE:
28
+ try:
29
+ self.embed = SentenceTransformer(embed_model_name)
30
+ except Exception:
31
+ self.embed = None
32
+ else:
33
+ self.embed = None
34
+ # precompute embeddings (lazy)
35
+ self._emb_cache = {}
36
+ for n in self.node_labels:
37
+ self._emb_cache[n] = self._compute_emb(n)
38
+
39
+ def _compute_emb(self, text):
40
+ if self.embed:
41
+ return self.embed.encode(text)
42
+ else:
43
+ return _hash_embed(text, dim=384)
44
+
45
+ def nodes(self):
46
+ return list(self.node_labels)
47
+
48
+ def closest_node(self, text):
49
+ q = self._compute_emb(text)
50
+ best, best_sim = None, -1e9
51
+ for n, v in self._emb_cache.items():
52
+ # simple dot
53
+ sim = sum(a*b for a,b in zip(q, v))
54
+ if sim > best_sim:
55
+ best, best_sim = n, sim
56
+ return best
57
+
58
+ class IcosaMap(BasePolyMap):
59
+ def __init__(self, embed_model_name="all-MiniLM-L6-v2"):
60
+ super().__init__(graph_name="icosa", node_count=12, embed_model_name=embed_model_name)
61
+
62
+ class DodecaMap(BasePolyMap):
63
+ def __init__(self, embed_model_name="all-MiniLM-L6-v2"):
64
+ super().__init__(graph_name="dodeca", node_count=20, embed_model_name=embed_model_name)
core/SAVANT_CORE/core/memory.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ MemoryStore: append-only JSONL memory for events, queries and reflections.
4
+ Also supports simple retrieval by type and tail.
5
+ """
6
+ import json, os, threading, time
7
+
8
+ class MemoryStore:
9
+ def __init__(self, path="SAVANT_memory.jsonl", autosave=True):
10
+ self.path = path
11
+ os.makedirs(os.path.dirname(path) or ".", exist_ok=True)
12
+ self.lock = threading.Lock()
13
+ self.autosave = autosave
14
+ # ensure file exists
15
+ if not os.path.exists(path):
16
+ open(path, "w").close()
17
+
18
+ def add(self, record: dict):
19
+ record['_ts'] = time.time()
20
+ with self.lock:
21
+ with open(self.path, "a", encoding="utf-8") as f:
22
+ f.write(json.dumps(record, ensure_ascii=False) + "\n")
23
+
24
+ def tail(self, n=20):
25
+ with self.lock:
26
+ with open(self.path, "r", encoding="utf-8") as f:
27
+ lines = f.read().strip().splitlines()
28
+ lines = lines[-n:]
29
+ return [json.loads(l) for l in lines] if lines else []
30
+
31
+ def query_by_type(self, t, n=50):
32
+ out = []
33
+ for item in self.tail(1000):
34
+ if item.get("type")==t:
35
+ out.append(item)
36
+ if len(out)>=n:
37
+ break
38
+ return out
core/SAVANT_CORE/core/music.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ music.py
4
+ Mapea texto o activaciones nodales a secuencias musicales (MIDI-like tuples).
5
+ """
6
+ import numpy as np
7
+
8
+ class MusicAdapter:
9
+ def __init__(self, scale='major'):
10
+ self.scale = scale
11
+ self.base = 60
12
+ self.scale_intervals = [0,2,4,5,7,9,11]
13
+
14
+ def adapt_text_to_music(self, text, length=16):
15
+ h = abs(hash(text))
16
+ rng = np.random.RandomState(h % (2**32))
17
+ seq = []
18
+ for i in range(length):
19
+ step = rng.randint(0, len(self.scale_intervals))
20
+ pitch = self.base + self.scale_intervals[step] + rng.randint(-2,2)
21
+ dur = float(rng.choice([0.25,0.5,1.0]))
22
+ seq.append((int(pitch), dur))
23
+ return seq
core/SAVANT_CORE/core/resonance.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ resonance.py
4
+ Simulador de resonancia: red de osciladores acoplados, cálculo de espectro y resumen.
5
+ Diseñado para ser rápido y correr en CPU lento.
6
+ """
7
+ import numpy as np
8
+ from scipy.signal import periodogram
9
+
10
+ class ResonanceSimulator:
11
+ def __init__(self, base_freq=1.0):
12
+ self.base_freq = base_freq
13
+
14
+ def simulate(self, seed_text, n_nodes=12, steps=256, damping=0.04):
15
+ # determinista por seed_text
16
+ rng = np.random.RandomState(abs(hash(seed_text)) % (2**32))
17
+ A = rng.randn(n_nodes, n_nodes) * 0.08
18
+ A = (A + A.T) * 0.5 # sym
19
+ state = rng.randn(n_nodes) * 0.01
20
+ X = np.zeros((steps, n_nodes), dtype=float)
21
+ for t in range(steps):
22
+ input_signal = np.sin(2*np.pi*(self.base_freq + 0.05*rng.randn()) * (t/steps))
23
+ state = state + 0.12*(A.dot(state) + input_signal) - damping*state
24
+ X[t] = state
25
+ freqs, P = periodogram(X, fs=1.0, axis=0)
26
+ power = P.sum(axis=1)
27
+ dom_idx = int(np.argmax(power))
28
+ dom_freq = float(freqs[dom_idx])
29
+ summary = {"dom_freq": dom_freq, "max_power": float(power[dom_idx])}
30
+ # compress small arrays for saving
31
+ return {"summary": summary, "freqs_len": len(freqs), "power_sample": power[:10].tolist()}
core/SAVANT_CORE/core/self_improvement.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ self_improvement.py
4
+ Módulo de automejora simbiótica:
5
+ - analiza últimas entradas de memoria
6
+ - propone pequeñas modificaciones heurísticas (weights) para el motor
7
+ - salva un registro de propuestas y aplica las que mejoran una métrica simple
8
+ """
9
+ import time, json, os, math
10
+
11
+ class SelfImprover:
12
+ def __init__(self, memory_store, state_path="savant_state.json"):
13
+ self.mem = memory_store
14
+ self.state_path = state_path
15
+ # params controlling simple heuristics (toy)
16
+ self.params = {"resonance_scale": 1.0, "music_influence": 0.5, "mapping_weight": 1.0}
17
+ self.history = []
18
+ if os.path.exists(self.state_path):
19
+ try:
20
+ with open(self.state_path, "r", encoding="utf-8") as f:
21
+ st = json.load(f)
22
+ self.params.update(st.get("params", {}))
23
+ self.history = st.get("history", [])
24
+ except Exception:
25
+ pass
26
+
27
+ def propose(self):
28
+ # produce a small proposal delta based on recent memory
29
+ tail = self.mem.tail(50)
30
+ counts = {}
31
+ for e in tail:
32
+ counts[e.get("type","unknown")] = counts.get(e.get("type","unknown"), 0) + 1
33
+ # heuristic: if many resonance records, increase resonance_scale slightly
34
+ delta = {}
35
+ delta["resonance_scale"] = 0.01 * counts.get("resonance", 0)
36
+ delta["music_influence"] = 0.005 * counts.get("music", 0)
37
+ delta["mapping_weight"] = 0.002 * counts.get("map", 0)
38
+ # Add heuristic based on chat interactions
39
+ delta["chat_influence"] = 0.001 * counts.get("chat_interaction", 0) # New heuristic
40
+ proposal = {"ts": time.time(), "delta": delta}
41
+ return proposal
42
+
43
+ def evaluate_and_apply(self, proposal):
44
+ # apply temporary, simulate metric (toy), keep if metric improves
45
+ old_params = self.params.copy()
46
+ # apply delta
47
+ for k,v in proposal["delta"].items():
48
+ self.params[k] = self.params.get(k, 0.0) + v
49
+ # toy metric: prefer larger resonance_scale but penalize too large values
50
+ metric = self._metric(self.params)
51
+ old_metric = self._metric(old_params)
52
+ accepted = False
53
+ if metric >= old_metric:
54
+ accepted = True
55
+ self.history.append({"proposal": proposal, "result_metric": metric, "accepted": True})
56
+ else:
57
+ self.params = old_params
58
+ self.history.append({"proposal": proposal, "result_metric": metric, "accepted": False})
59
+ self._save_state()
60
+ return accepted, metric
61
+
62
+ def _metric(self, params):
63
+ # toy metric: resonance_scale increases score, large music_influence gives bonus,
64
+ # mapping_weight influences, and chat_influence gives a small bonus.
65
+ score = params.get("resonance_scale",1.0) * 1.0 + 0.5*params.get("music_influence",0.0) + 0.2*params.get("chat_influence",0.0) # Include chat_influence
66
+ mw = params.get("mapping_weight",1.0)
67
+ if mw > 2.0:
68
+ score -= (mw-2.0)*0.5
69
+ return score
70
+
71
+ def _save_state(self):
72
+ with open(self.state_path, "w", encoding="utf-8") as f:
73
+ json.dump({"params": self.params, "history": self.history[-200:]}, f, indent=2)
core/SAVANT_CORE/core/trainer.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ trainer.py
4
+ Interfaz ligera para "entrenar" el motor Savant:
5
+ - ejecuta ciclos: simular, almacenar en memoria, proponer mejoras, aplicar
6
+ - guarda checkpoints en disco (JSON)
7
+ - Incluye funciones para cargar varios tipos de datos.
8
+ """
9
+ import os
10
+ import json
11
+ import time
12
+ import pickle
13
+ import pandas as pd # Import pandas for CSV (although CSV wasn't in the final list, keep for robustness)
14
+
15
+
16
+ from .memory import MemoryStore
17
+ from .resonance import ResonanceSimulator
18
+ from .self_improvement import SelfImprover
19
+
20
+ class SimpleTrainer:
21
+ def __init__(self, mem_path="SAVANT_memory.jsonl", checkpoint_dir="checkpoints"):
22
+ self.mem = MemoryStore(mem_path)
23
+ self.res = ResonanceSimulator()
24
+ self.si = SelfImprover(self.mem)
25
+ self.checkpoint_dir = checkpoint_dir
26
+ os.makedirs(self.checkpoint_dir, exist_ok=True)
27
+
28
+ def run_cycle(self, stimulus, epoch=1):
29
+ # simulate resonance
30
+ r = self.res.simulate(stimulus)
31
+ self.mem.add({"type":"resonance", "query": stimulus, "result": r["summary"], "epoch": epoch})
32
+ # propose improvement
33
+ proposal = self.si.propose()
34
+ accepted, metric = self.si.evaluate_and_apply(proposal)
35
+ self.mem.add({"type":"improvement", "proposal": proposal, "accepted": accepted, "metric": metric, "epoch": epoch})
36
+ # save checkpoint
37
+ ckpt = {"epoch": epoch, "stimulus": stimulus, "res": r["summary"], "si_params": self.si.params}
38
+ p = os.path.join(self.checkpoint_dir, f"ckpt_epoch_{epoch}.json")
39
+ with open(p, "w", encoding="utf-8") as f:
40
+ json.dump(ckpt, f, indent=2)
41
+ return ckpt
42
+
43
+ def run_epochs(self, conversations, epochs=3):
44
+ """
45
+ Runs training cycles using conversation entries as stimuli.
46
+
47
+ Args:
48
+ conversations (list): A list of conversation entries (dictionaries).
49
+ epochs (int): Number of epochs to run.
50
+ """
51
+ results = []
52
+ if not conversations:
53
+ print("No conversations provided for training.")
54
+ return results
55
+
56
+ # Determine how to extract stimulus from conversation entries
57
+ # Assuming each entry is a dict like {"role": "user", "content": "...", ...}
58
+ # We will use the 'content' of each entry as a stimulus.
59
+ # You might need to add logic to handle cases where a conversation entry doesn't have a "user" role or "content".
60
+ # For simplicity, extract content from any entry that has a 'content' key and is not empty.
61
+ stimuli_list = [entry.get("content", "") for entry in conversations if entry.get("content")]
62
+
63
+ if not stimuli_list:
64
+ print("No valid stimuli extracted from conversations.")
65
+ return results
66
+
67
+ print(f"Starting training for {epochs} epochs using {len(stimuli_list)} conversation entries as stimuli.")
68
+
69
+ for e in range(1, epochs + 1):
70
+ # Cycle through the stimuli list for each epoch
71
+ stimulus_for_this_epoch = stimuli_list[(e - 1) % len(stimuli_list)]
72
+ print(f"Epoch {e}/{epochs}: Using stimulus '{stimulus_for_this_epoch[:50]}...'")
73
+ res = self.run_cycle(stimulus_for_this_epoch, epoch=e)
74
+ results.append(res)
75
+ # Optional: Add a small delay
76
+ # time.sleep(0.1)
77
+
78
+ return results
79
+
80
+ def load_pkl_data(self, file_path):
81
+ """Loads data from a pickle file."""
82
+ try:
83
+ with open(file_path, "rb") as f:
84
+ data = pickle.load(f)
85
+ print(f"Successfully loaded pickle data from {file_path}")
86
+ return data
87
+ except FileNotFoundError:
88
+ print(f"Error: Pickle file not found at {file_path}")
89
+ return None
90
+ except pickle.UnpicklingError:
91
+ print(f"Error: Could not unpickle data from {file_path}. File might be corrupted.")
92
+ return None
93
+ except Exception as e:
94
+ print(f"An unexpected error occurred while loading pickle data: {e}")
95
+ return None
96
+
97
+ def load_json_data(self, file_path):
98
+ """Loads data from a JSON file."""
99
+ try:
100
+ with open(file_path, "r", encoding="utf-8") as f:
101
+ data = json.load(f)
102
+ print(f"Successfully loaded JSON data from {file_path}")
103
+ return data
104
+ except FileNotFoundError:
105
+ print(f"Error: JSON file not found at {file_path}")
106
+ return None
107
+ except json.JSONDecodeError as e:
108
+ print(f"Error decoding JSON from {file_path}: {e}")
109
+ return None
110
+ except Exception as e:
111
+ print(f"An unexpected error occurred while loading JSON data: {e}")
112
+ return None
113
+
114
+ def load_jsonl_data(self, file_path):
115
+ """Loads data from a JSON Lines file."""
116
+ data_list = []
117
+ try:
118
+ with open(file_path, "r", encoding="utf-8") as f:
119
+ for line in f:
120
+ try:
121
+ data_list.append(json.loads(line))
122
+ except json.JSONDecodeError as e:
123
+ print(f"Error decoding JSON on line in {file_path}: {line.strip()} - {e}")
124
+ continue
125
+ print(f"Successfully loaded {len(data_list)} entries from JSONL file {file_path}")
126
+ return data_list
127
+ except FileNotFoundError:
128
+ print(f"Error: JSONL file not found at {file_path}")
129
+ return []
130
+ except Exception as e:
131
+ print(f"An unexpected error occurred while loading JSONL data: {e}")
132
+ return []
133
+
134
+ def combine_conversational_data(self, file_paths):
135
+ """Loads and combines conversational data from a list of JSONL files."""
136
+ all_conversations = []
137
+ for file_path in file_paths:
138
+ if file_path.lower().endswith('.jsonl'):
139
+ print(f"Attempting to load conversations from {file_path}...")
140
+ data = self.load_jsonl_data(file_path)
141
+ # Assuming JSONL files contain lists of entries, or each line is an entry
142
+ # Need to inspect the actual file contents to confirm format
143
+ # For now, assume each line is an entry dictionary
144
+ if isinstance(data, list):
145
+ # Assuming data is a list of entry dictionaries
146
+ all_conversations.extend(data)
147
+ else:
148
+ print(f"Warning: Data from {file_path} was not a list as expected for conversational data.")
149
+ elif file_path.lower().endswith('.json'):
150
+ print(f"Attempting to load conversations from JSON file {file_path}...")
151
+ data = self.load_json_data(file_path)
152
+ # Assuming JSON file might contain a list of conversations under a key
153
+ if isinstance(data, dict) and "conversations" in data and isinstance(data["conversations"], list):
154
+ all_conversations.extend(data["conversations"])
155
+ elif isinstance(data, list):
156
+ all_conversations.extend(data)
157
+ else:
158
+ print(f"Warning: Data from {file_path} was not in expected JSON conversational format (list or dict with 'conversations' key).")
159
+ elif file_path.lower().endswith('.pkl'):
160
+ print(f"Attempting to load conversations from Pickle file {file_path}...")
161
+ data = self.load_pkl_data(file_path)
162
+ if isinstance(data, dict) and "conversations" in data and isinstance(data["conversations"], list):
163
+ all_conversations.extend(data["conversations"])
164
+ elif isinstance(data, list):
165
+ all_conversations.extend(data)
166
+ else:
167
+ print(f"Warning: Data from {file_path} was not in expected Pickle conversational format (list or dict with 'conversations' key).")
168
+
169
+ else:
170
+ print(f"Skipping unsupported file type for conversational data: {file_path}")
171
+
172
+ print(f"Combined total {len(all_conversations)} conversation entries from provided files.")
173
+ return all_conversations
174
+
175
+
core/SAVANT_CORE/requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # lightweight requirements for SAVANT_CORE
3
+ sentence-transformers
4
+ transformers
5
+ gradio
6
+ faiss-cpu
7
+ scipy
8
+ numpy
9
+ gitpython
core/SAVANT_CORE/run_demo.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ run_demo.py
4
+ Demo para probar el core SAVANT en consola.
5
+ """
6
+ import os
7
+ import json
8
+ import time
9
+ import pickle # Import pickle
10
+ from transformers import pipeline
11
+ from core.engine import SavantEngine
12
+ from core.trainer import SimpleTrainer # Import SimpleTrainer
13
+
14
+
15
+ def main():
16
+ engine = SavantEngine()
17
+ trainer = SimpleTrainer() # Instantiate SimpleTrainer
18
+ print("SAVANT Simbiótico RRF - demo")
19
+ print("Escribe 'exit' para salir. Commands: propose, apply, tail, train_memory") # Removed 'train' command from list
20
+ # small base model for text generation (light)
21
+ try:
22
+ gen = pipeline("text-generation", model="distilgpt2")
23
+ except Exception:
24
+ gen = None
25
+
26
+ # Define the list of data file paths to load from - MOVED INSIDE main()
27
+ data_files_str = r"""["/content/drive/MyDrive/ColabNotebooks/Takeout/AI/colab/Savantaut_dataset.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_backup/backup_20250823_220832/savant_full_history.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/data/new_nodes_data.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/data/lang_9.json", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/dialog_multinode_full.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_2025.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_multinode_final.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_prompts_master.json", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_rrf_dialogo_simbiotico.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_rrf_pipeline_advanced.jsonl"]""" # Store file paths as a string literal containing a JSON array
28
+
29
+ while True:
30
+ q = input("You> ").strip()
31
+ if not q:
32
+ continue
33
+ if q.lower() in ("exit","quit","salir"):
34
+ break
35
+ if q.lower().startswith("propose"):
36
+ print(engine.propose_improvement())
37
+ continue
38
+ if q.lower().startswith("apply"):
39
+ p = engine.propose_improvement()
40
+ print("Applying:", p)
41
+ print(engine.apply_improvement(p))
42
+ continue
43
+ if q.lower().startswith("tail"):
44
+ print(engine.memory.tail(10))
45
+ continue
46
+ # New command handler for training with memory data
47
+ if q.lower().startswith("train_memory"):
48
+ print(f"Loading and combining conversational data from specified files...")
49
+ conversations = [] # Initialize conversations
50
+ try:
51
+ print(f"Attempting to load file list from string: {data_files_str}") # Use double braces for f-string literal
52
+ file_list = json.loads(data_files_str)
53
+ print(f"Successfully loaded file list: {file_list}") # Use double braces for f-string literal
54
+ conversations = trainer.combine_conversational_data(file_list) # Load data
55
+ except Exception as e:
56
+ print(f"Error loading file list or combining data: {e}") # Use double braces for f-string literal
57
+ # conversations will remain an empty list
58
+ pass # Continue to the check if conversations is empty
59
+
60
+ if conversations: # Check if the loaded list is not empty
61
+ print(f"Loaded and combined {len(conversations)} conversation entries.") # Use double braces for f-string literal
62
+ print("Starting training using loaded memory data for 5 epochs...")
63
+ # Assuming run_epochs can handle the list of conversation dictionaries
64
+ training_results = trainer.run_epochs(conversations, epochs=5)
65
+ print("Training done using memory data.")
66
+ # Optional: Print a summary of results if run_epochs returns them
67
+ # print("Training results summary:", training_results)
68
+ else:
69
+ print("No conversational data loaded or combined. Training skipped.")
70
+ continue
71
+
72
+ # normal chat: produce base output from gen if available
73
+ if gen:
74
+ try:
75
+ base_out = gen(q, max_length=80, do_sample=True)[0].get("generated_text")
76
+ except Exception as ex: # Capture exception here with a local variable
77
+ # Correctly format the f-string inside the string literal
78
+ # Use repr(ex) to get a string representation of the exception
79
+ base_out = f"Error generating text: {repr(ex)}"
80
+ else:
81
+ base_out = "Echo: " + q
82
+
83
+ out = engine.handle_query(q, base_model_output=base_out)
84
+ if out["type"] == "chat":
85
+ print("SAVANT (base):", out["base"])
86
+ print("SAVANT (refined):", out["refined"])
87
+ else:
88
+ print("SAVANT:", out)
89
+
90
+ if __name__ == "__main__":
91
+ main()
core/SAVANT_CORE/run_savant.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ run_savant.py
4
+ New entry point script for SAVANT-RRF, demonstrating data loading and interaction.
5
+ """
6
+ import os
7
+ import sys
8
+ import json
9
+ import time
10
+ import pickle # Needed for loading some data types
11
+
12
+ # Add the SAVANT_CORE directory to the Python path if not already there
13
+ # This assumes SAVANT_CORE is in the parent directory or explicitly added
14
+ SAVANT_CORE_PATH = os.path.dirname(os.path.abspath(__file__)) # Get path of the script
15
+ if SAVANT_CORE_PATH not in sys.path:
16
+ sys.path.append(SAVANT_CORE_PATH)
17
+
18
+ # Import classes from the core modules
19
+ try:
20
+ from core.engine import SavantEngine
21
+ from core.trainer import SimpleTrainer
22
+ except ImportError as e:
23
+ print(f"Error importing SAVANT_CORE modules: {e}")
24
+ print(f"Please ensure the directory '{SAVANT_CORE_PATH}' is correctly structured")
25
+ print("and that it contains the 'core' subfolder with the SAVANT_CORE modules.")
26
+ sys.exit(1) # Exit if imports fail
27
+
28
+ # Variable to store loaded data - Declared at module level
29
+ loaded_conversations = []
30
+
31
+ def main():
32
+ engine = SavantEngine()
33
+ trainer = SimpleTrainer() # Instantiate SimpleTrainer
34
+
35
+ print("SAVANT-RRF Entry Point")
36
+ print("Available Commands: load_data, train_with_data, interact, propose, apply, tail, exit")
37
+
38
+ # Define the list of data file paths to load from - MOVED INSIDE main()
39
+ data_files_list = ["/content/drive/MyDrive/ColabNotebooks/Takeout/AI/colab/Savantaut_dataset.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_backup/backup_20250823_220832/savant_full_history.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/data/new_nodes_data.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/data/lang_9.json", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/dialog_multinode_full.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_2025.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_multinode_final.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_prompts_master.json", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_rrf_dialogo_simbiotico.jsonl", "/content/drive/MyDrive/ColabNotebooks/Takeout/AI/savant_datasets/datasets/savant_rrf_pipeline_advanced.jsonl", "/content/drive/MyDrive/Savant-RRF/savant_memory.pkl"] # Embed the list as a JSON string
40
+
41
+
42
+ while True:
43
+ q = input("Savant> ").strip()
44
+ if not q:
45
+ continue
46
+ if q.lower() in ("exit","quit","salir"):
47
+ break
48
+
49
+ elif q.lower() == "load_data":
50
+ # No need for global here as loaded_conversations is now module level
51
+ global loaded_conversations # Still need global to modify the module-level variable
52
+ print("Loading and combining conversational data from specified files...")
53
+ try:
54
+ # Use the combine_conversational_data function from the trainer
55
+ loaded_conversations = trainer.combine_conversational_data(data_files_list)
56
+ if loaded_conversations:
57
+ print(f"Successfully loaded and combined {len(loaded_conversations)} conversation entries.")
58
+ # Optional: Print a sample
59
+ # print("Sample entry:", loaded_conversations[0])
60
+ else:
61
+ print("No conversational data loaded or combined.")
62
+ except Exception as e:
63
+ print(f"Error loading data: {e}")
64
+
65
+ elif q.lower() == "train_with_data":
66
+ if not loaded_conversations:
67
+ print("No data loaded. Please run 'load_data' first.")
68
+ else:
69
+ print(f"Starting training using loaded data ({len(loaded_conversations)} entries) for 5 epochs...")
70
+ try:
71
+ # Use the run_epochs function from the trainer with loaded data
72
+ training_results = trainer.run_epochs(loaded_conversations, epochs=5)
73
+ print("Training process initiated.")
74
+ # Note: The trainer's run_epochs simulates training and saves checkpoints.
75
+ # It doesn't return a traditional 'trained model' object in this simplified core.
76
+ # The self_improver params are updated internally by run_epochs->run_cycle->evaluate_and_apply.
77
+ except Exception as e:
78
+ print(f"Error during training: {e}")
79
+
80
+ elif q.lower() == "interact":
81
+ # Get user input for the interaction here
82
+ user_input = input("User Input for SAVANT> ").strip()
83
+ if user_input:
84
+ print("Processing interaction...")
85
+ # Simulate getting a base model output (if a language model were integrated)
86
+ # For this demo, we'll just pass the user input to the engine
87
+ # In a real scenario, you'd get base_model_output from a transformer pipeline etc.
88
+ # Example: base_output = gen(user_input, ...)[0]["generated_text"] if gen else "Simulated base output"
89
+
90
+ # Assign the simulated base output here within the interact block
91
+ # Correctly format the f-string inside the string literal
92
+ simulated_base_output = f"Simulated base output for: {user_input}"
93
+
94
+ out = engine.handle_query(user_input, base_model_output=simulated_base_output)
95
+ if out["type"] == "chat":
96
+ print("SAVANT (base):", out.get("base"))
97
+ print("SAVANT (refined):", out.get("refined"))
98
+ else:
99
+ print("SAVANT:", out)
100
+ else:
101
+ print("No input provided for interaction.")
102
+
103
+
104
+ elif q.lower() == "propose":
105
+ print("Proposing self-improvement...")
106
+ try:
107
+ proposal = engine.propose_improvement() # Use engine's method
108
+ print(proposal)
109
+ except Exception as e:
110
+ print(f"Error proposing improvement: {e}")
111
+
112
+
113
+ elif q.lower() == "apply":
114
+ print("Applying proposed improvement...")
115
+ try:
116
+ # In a real scenario, you'd get a specific proposal to apply
117
+ # For this demo, we propose and apply immediately
118
+ proposal_to_apply = engine.propose_improvement()
119
+ if proposal_to_apply:
120
+ accepted, metric = engine.apply_improvement(proposal_to_apply) # Use engine's method
121
+ print(f"Proposal applied. Accepted: {accepted}, New Metric: {metric}")
122
+ else:
123
+ print("No proposal generated.")
124
+ except Exception as e:
125
+ print(f"Error applying improvement: {e}")
126
+
127
+
128
+ elif q.lower() == "tail":
129
+ print("Showing last 10 memory entries:")
130
+ try:
131
+ memory_tail = engine.memory.tail(10) # Use engine's memory
132
+ for entry in memory_tail:
133
+ print(entry)
134
+ except Exception as e:
135
+ print(f"Error retrieving memory tail: {e}")
136
+
137
+ else:
138
+ print(f"Unknown command: '{q}'")
139
+ print("Available Commands: load_data, train_with_data, interact, propose, apply, tail, exit")
140
+
141
+
142
+ if __name__ == "__main__":
143
+ main()
core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 1,
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "dtype": "float32",
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 50256,
12
+ "id2label": {
13
+ "0": "LABEL_0"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "label2id": {
17
+ "LABEL_0": 0
18
+ },
19
+ "layer_norm_epsilon": 1e-05,
20
+ "model_type": "gpt2",
21
+ "n_ctx": 1024,
22
+ "n_embd": 768,
23
+ "n_head": 12,
24
+ "n_inner": null,
25
+ "n_layer": 6,
26
+ "n_positions": 1024,
27
+ "reorder_and_upcast_attn": false,
28
+ "resid_pdrop": 0.1,
29
+ "scale_attn_by_inverse_layer_idx": false,
30
+ "scale_attn_weights": true,
31
+ "summary_activation": null,
32
+ "summary_first_dropout": 0.1,
33
+ "summary_proj_to_labels": true,
34
+ "summary_type": "cls_index",
35
+ "summary_use_proj": true,
36
+ "task_specific_params": {
37
+ "text-generation": {
38
+ "do_sample": true,
39
+ "max_length": 50
40
+ }
41
+ },
42
+ "transformers_version": "4.56.1",
43
+ "use_cache": true,
44
+ "vocab_size": 50257
45
+ }
core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.56.1"
6
+ }
core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a233a88c8c7e599f2219a16318eb72420d601c7bd6ef6b2166027ccfdf555e0
3
+ size 327657928
core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10562a4e2ae010923b95988d53a9a392a592ab0de5d199d0263eb91158e080e2
3
+ size 655362763
core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d356c5157bb58dcc9636c3d1cfe74e32a22522d75982719d472930006e5eaca
3
+ size 14455
core/SAVANT_CORE/savant_finetuned_model/checkpoint-3/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1883b05d1aea2d7680e95c8580838f2a05b0260e5fc08597f62232cb8732629
3
+ size 1465