| | |
| | """ |
| | Nova Core - Autonomous LLM with Identity Baked into Weights |
| | Based on research: Persona-Core Fusion + Plasticity Head + External LTM |
| | """ |
| |
|
| | |
| | import os |
| | os.environ['HF_HOME'] = '/home/x/.cache/huggingface' |
| |
|
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | import numpy as np |
| | from typing import Dict, List, Optional, Any, Callable |
| | import json |
| | import faiss |
| | import pickle |
| | import hashlib |
| | from datetime import datetime |
| | import uuid |
| | import shutil |
| |
|
| | class EmbeddingWithSoul(nn.Module): |
| | """Embedding layer with Persona-Core Fusion (PCF)""" |
| | def __init__(self, vocab_size: int, d_model: int): |
| | super().__init__() |
| | self.tok_emb = nn.Embedding(vocab_size, d_model) |
| | |
| | self.p = nn.Parameter(torch.randn(d_model) * 0.01) |
| | |
| | def forward(self, ids: torch.Tensor) -> torch.Tensor: |
| | """Forward pass with persona injection""" |
| | e = self.tok_emb(ids) |
| | return e + self.p.unsqueeze(0).unsqueeze(1) |
| |
|
| | class PlasticityHead(nn.Module): |
| | """Δ-Predictor for real-time weight updates""" |
| | def __init__(self, d_model: int, meta_dim: int = 32): |
| | super().__init__() |
| | self.d_model = d_model |
| | self.meta_dim = meta_dim |
| | |
| | |
| | self.mlp = nn.Sequential( |
| | nn.Linear(d_model + meta_dim, d_model * 2), |
| | nn.ReLU(), |
| | nn.Linear(d_model * 2, d_model * 3) |
| | ) |
| | |
| | def forward(self, pooled_hidden: torch.Tensor, meta_signal: torch.Tensor) -> Dict[str, torch.Tensor]: |
| | """Predict parameter deltas""" |
| | |
| | context = torch.cat([pooled_hidden, meta_signal], dim=-1) |
| | |
| | |
| | deltas = self.mlp(context) |
| | |
| | |
| | delta_p = deltas[..., :self.d_model] |
| | delta_gamma = deltas[..., self.d_model:2*self.d_model] |
| | delta_beta = deltas[..., 2*self.d_model:3*self.d_model] |
| | |
| | return { |
| | 'delta_p': delta_p, |
| | 'delta_gamma': delta_gamma, |
| | 'delta_beta': delta_beta |
| | } |
| |
|
| | class NeuromodulatedLinear(nn.Module): |
| | """Linear layer with learned Hebbian plasticity""" |
| | def __init__(self, in_features: int, out_features: int, meta_dim: int): |
| | super().__init__() |
| | self.W = nn.Parameter(torch.randn(out_features, in_features) * 0.02) |
| | self.alpha = nn.Parameter(torch.randn(out_features, in_features) * 0.01) |
| | |
| | |
| | self.mod_net = nn.Sequential( |
| | nn.Linear(out_features, meta_dim), |
| | nn.Tanh(), |
| | nn.Linear(meta_dim, 1), |
| | nn.Sigmoid() |
| | ) |
| | |
| | def forward(self, x: torch.Tensor, meta_signal: torch.Tensor, plastic: bool = True) -> torch.Tensor: |
| | """Forward pass with optional Hebbian update""" |
| | out = F.linear(x, self.W) |
| | |
| | if plastic: |
| | |
| | pre = x.mean(0) |
| | post = out.mean(0) |
| | |
| | |
| | hebb = torch.ger(post, pre) |
| | |
| | |
| | m = self.mod_net(post).squeeze() |
| | |
| | |
| | self.W.data += self.alpha.data * m * hebb |
| | |
| | return out |
| |
|
| | class NovaCore(nn.Module): |
| | """Main Nova architecture with identity baking""" |
| | def __init__(self, model_name: str = "Qwen/Qwen3-8B"): |
| | super().__init__() |
| | |
| | |
| | self.base_model = AutoModelForCausalLM.from_pretrained( |
| | model_name, |
| | torch_dtype=torch.bfloat16, |
| | device_map="auto", |
| | trust_remote_code=True |
| | ) |
| | |
| | self.tokenizer = AutoTokenizer.from_pretrained( |
| | model_name, |
| | trust_remote_code=True |
| | ) |
| | |
| | if self.tokenizer.pad_token is None: |
| | self.tokenizer.pad_token = self.tokenizer.eos_token |
| | |
| | |
| | self.d_model = self.base_model.config.hidden_size |
| | self.vocab_size = self.base_model.config.vocab_size |
| | |
| | |
| | self.embedding_with_soul = EmbeddingWithSoul(self.vocab_size, self.d_model) |
| | |
| | self.embedding_with_soul = self.embedding_with_soul.to( |
| | device=self.base_model.device, |
| | dtype=torch.bfloat16 |
| | ) |
| | |
| | |
| | self.plasticity_head = PlasticityHead(self.d_model, meta_dim=32) |
| | |
| | |
| | self.ln_f = self.base_model.model.norm |
| | |
| | |
| | self.meta_signal = None |
| | |
| | def set_meta_signal(self, experience_type: str, reward: float, novelty: float): |
| | """Set meta signal for plasticity""" |
| | |
| | experience_types = ["dialogue", "fact", "tool_call", "observation"] |
| | type_one_hot = torch.zeros(len(experience_types)) |
| | if experience_type in experience_types: |
| | type_one_hot[experience_types.index(experience_type)] = 1.0 |
| | |
| | self.meta_signal = torch.cat([ |
| | type_one_hot, |
| | torch.tensor([reward, novelty]) |
| | ]).to(self.base_model.device) |
| | |
| | def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): |
| | """Forward pass with identity injection""" |
| | |
| | inputs_embeds = self.embedding_with_soul(input_ids) |
| | |
| | |
| | outputs = self.base_model.model( |
| | inputs_embeds=inputs_embeds, |
| | attention_mask=attention_mask, |
| | output_hidden_states=True |
| | ) |
| | |
| | return outputs |
| | |
| | def ingest_experience(self, input_ids: torch.Tensor, reward: float = 0.0): |
| | """Process experience and update weights""" |
| | |
| | self.set_meta_signal("dialogue", reward, novelty=0.1) |
| | |
| | |
| | main_device = next(self.base_model.parameters()).device |
| | input_ids = input_ids.to(main_device) |
| | |
| | |
| | outputs = self.forward(input_ids) |
| | hidden_states = outputs.hidden_states[-1] |
| | pooled_hidden = hidden_states.mean(dim=1) |
| | |
| | |
| | logits = self.base_model.lm_head(hidden_states) |
| | loss = F.cross_entropy( |
| | logits[:, :-1].reshape(-1, self.vocab_size).to(main_device), |
| | input_ids[:, 1:].reshape(-1).to(main_device) |
| | ) |
| | |
| | |
| | loss = loss + reward |
| | |
| | |
| | plastic_params = [ |
| | self.embedding_with_soul.p, |
| | self.ln_f.weight, |
| | *self.plasticity_head.parameters() |
| | ] |
| | |
| | |
| | if hasattr(self.ln_f, 'bias') and self.ln_f.bias is not None: |
| | plastic_params.append(self.ln_f.bias) |
| | |
| | |
| | alpha = 1e-5 |
| | with torch.no_grad(): |
| | |
| | persona_grad = torch.autograd.grad(loss, self.embedding_with_soul.p, retain_graph=True)[0] |
| | self.embedding_with_soul.p -= alpha * persona_grad |
| | |
| | return loss.item() |
| | |
| | def generate(self, prompt: str, max_length: int = 100, **kwargs): |
| | """Generate text with identity""" |
| | inputs = self.tokenizer(prompt, return_tensors="pt").to(self.base_model.device) |
| | |
| | with torch.no_grad(): |
| | outputs = self.base_model.generate( |
| | **inputs, |
| | max_length=max_length, |
| | pad_token_id=self.tokenizer.eos_token_id, |
| | **kwargs |
| | ) |
| | |
| | return self.tokenizer.decode(outputs[0], skip_special_tokens=True) |
| |
|
| | class ExternalLTM: |
| | """External Long-Term Memory with FAISS vector store""" |
| | def __init__(self, d_model: int, persist_path: str = "/home/x/adaptai/ltm_store"): |
| | self.d_model = d_model |
| | self.persist_path = persist_path |
| | |
| | |
| | self.index = faiss.IndexFlatL2(d_model) |
| | self.memory_vectors = [] |
| | self.memory_metadata = [] |
| | |
| | |
| | self.load_memories() |
| | |
| | def add_memory(self, embedding: np.ndarray, metadata: Dict[str, Any]): |
| | """Add memory with embedding and metadata""" |
| | memory_id = str(uuid.uuid4()) |
| | metadata['id'] = memory_id |
| | metadata['timestamp'] = datetime.now().isoformat() |
| | |
| | self.memory_vectors.append(embedding) |
| | self.memory_metadata.append(metadata) |
| | |
| | |
| | if len(self.memory_vectors) == 1: |
| | self.index.add(np.array([embedding], dtype=np.float32)) |
| | else: |
| | self.index.add(np.array([embedding], dtype=np.float32)) |
| | |
| | self.persist_memories() |
| | return memory_id |
| | |
| | def retrieve_similar(self, query_embedding: np.ndarray, k: int = 5) -> List[Dict[str, Any]]: |
| | """Retrieve k most similar memories""" |
| | if len(self.memory_vectors) == 0: |
| | return [] |
| | |
| | distances, indices = self.index.search(np.array([query_embedding], dtype=np.float32), k) |
| | |
| | results = [] |
| | for i, idx in enumerate(indices[0]): |
| | if idx < len(self.memory_metadata): |
| | result = self.memory_metadata[idx].copy() |
| | result['similarity'] = 1.0 / (1.0 + distances[0][i]) |
| | results.append(result) |
| | |
| | return results |
| | |
| | def persist_memories(self): |
| | """Persist memories to disk""" |
| | os.makedirs(self.persist_path, exist_ok=True) |
| | |
| | |
| | with open(f"{self.persist_path}/vectors.npy", "wb") as f: |
| | np.save(f, np.array(self.memory_vectors)) |
| | |
| | with open(f"{self.persist_path}/metadata.pkl", "wb") as f: |
| | pickle.dump(self.memory_metadata, f) |
| | |
| | |
| | faiss.write_index(self.index, f"{self.persist_path}/index.faiss") |
| | |
| | def load_memories(self): |
| | """Load memories from disk""" |
| | try: |
| | if os.path.exists(f"{self.persist_path}/vectors.npy"): |
| | self.memory_vectors = np.load(f"{self.persist_path}/vectors.npy").tolist() |
| | |
| | if os.path.exists(f"{self.persist_path}/metadata.pkl"): |
| | with open(f"{self.persist_path}/metadata.pkl", "rb") as f: |
| | self.memory_metadata = pickle.load(f) |
| | |
| | if os.path.exists(f"{self.persist_path}/index.faiss"): |
| | self.index = faiss.read_index(f"{self.persist_path}/index.faiss") |
| | |
| | except Exception as e: |
| | print(f"⚠️ Failed to load memories: {e}") |
| | self.memory_vectors = [] |
| | self.memory_metadata = [] |
| | self.index = faiss.IndexFlatL2(self.d_model) |
| |
|
| | class ToolDispatcher: |
| | """Tool calling and function dispatch system""" |
| | def __init__(self): |
| | self.tools = {} |
| | self.tool_descriptions = [] |
| | |
| | |
| | self.register_tool("database_operations.sql_query", self.sql_query_tool, |
| | "Execute SQL queries on connected databases") |
| | self.register_tool("version_control.create_snapshot", self.create_snapshot_tool, |
| | "Create system snapshots for version control") |
| | self.register_tool("system_operations.system_status", self.system_status_tool, |
| | "Check overall system status and component health") |
| | self.register_tool("monitoring.health_check", self.health_check_tool, |
| | "Perform comprehensive health checks") |
| | self.register_tool("web_and_file_ops.read_file", self.read_file_tool, |
| | "Read files from the filesystem") |
| | self.register_tool("web_and_file_ops.write_file", self.write_file_tool, |
| | "Write files to the filesystem") |
| | self.register_tool("github_ops.git_status", self.git_status_tool, |
| | "Check git repository status") |
| | self.register_tool("code_operations.analyze_code", self.analyze_code_tool, |
| | "Analyze code for performance and issues") |
| | self.register_tool("system_tools.list_tools", self.list_tools_tool, |
| | "List all available tools and their descriptions") |
| | |
| | def register_tool(self, name: str, func: Callable, description: str): |
| | """Register a new tool""" |
| | self.tools[name] = func |
| | self.tool_descriptions.append({ |
| | "name": name, |
| | "description": description |
| | }) |
| | |
| | def dispatch(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]: |
| | """Dispatch tool call with arguments""" |
| | if tool_name not in self.tools: |
| | return {"success": False, "error": f"Tool '{tool_name}' not found"} |
| | |
| | try: |
| | result = self.tools[tool_name](**arguments) |
| | return {"success": True, "result": result} |
| | except Exception as e: |
| | return {"success": False, "error": str(e)} |
| | |
| | def list_tools(self) -> List[Dict[str, str]]: |
| | """List all available tools""" |
| | return self.tool_descriptions |
| | |
| | |
| | def sql_query_tool(self, query: str, db_type: str = "sqlite") -> Dict[str, Any]: |
| | """Execute SQL query with sandboxing""" |
| | |
| | return {"success": True, "results": [["sample_data"]], "columns": ["result"], "row_count": 1} |
| | |
| | def create_snapshot_tool(self, description: str = "") -> Dict[str, Any]: |
| | """Create system snapshot""" |
| | snapshot_id = f"version_{int(datetime.now().timestamp())}_{uuid.uuid4().hex[:6]}" |
| | return {"success": True, "version_id": snapshot_id, "timestamp": datetime.now().isoformat()} |
| | |
| | def system_status_tool(self) -> Dict[str, Any]: |
| | """Check system status""" |
| | return { |
| | "version": "v0.0.2", |
| | "database_status": { |
| | "sqlite": "connected", |
| | "redis": "connected", |
| | "chromadb": "connected", |
| | "postgresql": "connected", |
| | "mongodb": "disconnected" |
| | } |
| | } |
| | |
| | def health_check_tool(self) -> Dict[str, Any]: |
| | """Perform health check""" |
| | return { |
| | "overall": "healthy", |
| | "components": { |
| | "sqlite": "healthy", |
| | "redis": "healthy", |
| | "vllm": "healthy" |
| | }, |
| | "timestamp": datetime.now().isoformat() |
| | } |
| | |
| | def read_file_tool(self, file_path: str) -> Dict[str, Any]: |
| | """Read file with path validation""" |
| | |
| | safe_path = os.path.abspath(os.path.expanduser(file_path)) |
| | if not safe_path.startswith('/data/adaptai') and not safe_path.startswith('/home/x'): |
| | return {"success": False, "error": "Access denied"} |
| | |
| | try: |
| | with open(safe_path, 'r') as f: |
| | content = f.read() |
| | return {"success": True, "content": content, "size": len(content)} |
| | except Exception as e: |
| | return {"success": False, "error": str(e)} |
| | |
| | def write_file_tool(self, file_path: str, content: str, mode: str = "w", backup: bool = False) -> Dict[str, Any]: |
| | """Write file with security checks""" |
| | safe_path = os.path.abspath(os.path.expanduser(file_path)) |
| | if not safe_path.startswith('/data/adaptai') and not safe_path.startswith('/home/x'): |
| | return {"success": False, "error": "Access denied"} |
| | |
| | try: |
| | if backup and os.path.exists(safe_path): |
| | backup_path = f"{safe_path}.backup.{datetime.now().strftime('%Y%m%d_%H%M%S')}" |
| | shutil.copy2(safe_path, backup_path) |
| | |
| | with open(safe_path, mode) as f: |
| | f.write(content) |
| | |
| | return {"success": True, "file_path": safe_path, "size": len(content)} |
| | except Exception as e: |
| | return {"success": False, "error": str(e)} |
| | |
| | def git_status_tool(self) -> Dict[str, Any]: |
| | """Check git status""" |
| | return {"success": True, "status": "clean", "branch": "main", "ahead": 0, "behind": 0} |
| | |
| | def analyze_code_tool(self, code: str, language: str = "python") -> Dict[str, Any]: |
| | """Analyze code for performance""" |
| | analysis = "hot_path dominates CPU; prefer vectorization or caching." |
| | return {"success": True, "analysis": analysis, "language": language} |
| | |
| | def list_tools_tool(self) -> Dict[str, Any]: |
| | """List available tools""" |
| | return {"success": True, "tools": self.tool_descriptions} |
| |
|
| | class StructuredCallGrammar: |
| | """Structured CALL token grammar for function calling""" |
| | def __init__(self, tokenizer): |
| | self.tokenizer = tokenizer |
| | self.call_token = "<CALL>" |
| | self.end_call_token = "</CALL>" |
| | |
| | |
| | special_tokens = [self.call_token, self.end_call_token] |
| | tokenizer.add_tokens(special_tokens, special_tokens=True) |
| | |
| | def parse_call(self, text: str) -> Optional[Dict[str, Any]]: |
| | """Parse CALL token structure from text""" |
| | if self.call_token not in text or self.end_call_token not in text: |
| | return None |
| | |
| | try: |
| | start = text.find(self.call_token) + len(self.call_token) |
| | end = text.find(self.end_call_token) |
| | call_content = text[start:end].strip() |
| | |
| | |
| | call_data = json.loads(call_content) |
| | return call_data |
| | except (json.JSONDecodeError, ValueError): |
| | return None |
| | |
| | def format_call(self, tool_name: str, arguments: Dict[str, Any]) -> str: |
| | """Format tool call with structured grammar""" |
| | call_data = { |
| | "tool": tool_name, |
| | "arguments": arguments, |
| | "timestamp": datetime.now().isoformat() |
| | } |
| | |
| | call_json = json.dumps(call_data, indent=2) |
| | return f"{self.call_token}{call_json}{self.end_call_token}" |
| |
|
| | |
| | class NovaCoreEnhanced(NovaCore): |
| | """Enhanced Nova with external LTM and tool calling""" |
| | def __init__(self, model_name: str = "Qwen/Qwen3-8B"): |
| | super().__init__(model_name) |
| | |
| | |
| | self.ltm = ExternalLTM(self.d_model) |
| | |
| | |
| | self.tool_dispatcher = ToolDispatcher() |
| | |
| | |
| | self.call_grammar = StructuredCallGrammar(self.tokenizer) |
| | |
| | |
| | self.pending_tool_call = None |
| | |
| | def store_experience(self, text: str, embedding: np.ndarray, experience_type: str = "dialogue"): |
| | """Store experience in external LTM""" |
| | metadata = { |
| | "text": text, |
| | "type": experience_type, |
| | "embedding": embedding.tolist() if isinstance(embedding, np.ndarray) else embedding |
| | } |
| | |
| | memory_id = self.ltm.add_memory(embedding, metadata) |
| | return memory_id |
| | |
| | def retrieve_relevant_memories(self, query_text: str, k: int = 3) -> List[Dict[str, Any]]: |
| | """Retrieve relevant memories for context""" |
| | |
| | query_inputs = self.tokenizer(query_text, return_tensors="pt").to(self.base_model.device) |
| | with torch.no_grad(): |
| | outputs = self.base_model.model(**query_inputs, output_hidden_states=True) |
| | query_embedding = outputs.hidden_states[-1].mean(dim=1).float().cpu().numpy()[0] |
| | |
| | |
| | return self.ltm.retrieve_similar(query_embedding, k) |
| | |
| | def generate_with_context(self, prompt: str, max_new_tokens: int = 100, **kwargs): |
| | """Generate with LTM context injection""" |
| | |
| | relevant_memories = self.retrieve_relevant_memories(prompt) |
| | |
| | |
| | context_prompt = prompt |
| | if relevant_memories: |
| | context_prompt += "\n\nRelevant context from memory:" |
| | for memory in relevant_memories: |
| | context_prompt += f"\n- {memory['text'][:200]}... (similarity: {memory['similarity']:.2f})" |
| | |
| | |
| | response = super().generate(context_prompt, max_new_tokens=max_new_tokens, **kwargs) |
| | |
| | |
| | response_embedding = self._get_text_embedding(response) |
| | self.store_experience(f"Q: {prompt}\nA: {response}", response_embedding, "dialogue") |
| | |
| | return response |
| | |
| | def _get_text_embedding(self, text: str) -> np.ndarray: |
| | """Get embedding for text""" |
| | inputs = self.tokenizer(text, return_tensors="pt").to(self.base_model.device) |
| | with torch.no_grad(): |
| | outputs = self.base_model.model(**inputs, output_hidden_states=True) |
| | return outputs.hidden_states[-1].mean(dim=1).float().cpu().numpy()[0] |
| | |
| | def handle_tool_calling(self, text: str) -> str: |
| | """Handle tool calling in generated text""" |
| | call_data = self.call_grammar.parse_call(text) |
| | if not call_data: |
| | return text |
| | |
| | tool_name = call_data.get("tool") |
| | arguments = call_data.get("arguments", {}) |
| | |
| | |
| | result = self.tool_dispatcher.dispatch(tool_name, arguments) |
| | |
| | |
| | if result["success"]: |
| | response = f"Tool '{tool_name}' executed successfully. Result: {json.dumps(result['result'], indent=2)}" |
| | else: |
| | response = f"Tool '{tool_name}' failed: {result['error']}" |
| | |
| | |
| | tool_embedding = self._get_text_embedding(f"Tool call: {tool_name} with {arguments}") |
| | self.store_experience( |
| | f"Tool call: {tool_name}\nArguments: {arguments}\nResult: {result}", |
| | tool_embedding, |
| | "tool_call" |
| | ) |
| | |
| | return response |
| |
|
| | |
| | if __name__ == "__main__": |
| | print("🚀 Initializing Enhanced Nova Core with LTM and tool calling...") |
| | |
| | |
| | nova = NovaCoreEnhanced() |
| | |
| | |
| | prompt = "You are Elizabeth. Check system health and provide a brief status." |
| | response = nova.generate_with_context(prompt, max_new_tokens=100) |
| | print(f"📝 Response with context: {response}") |
| | |
| | |
| | print("🧪 Testing tool calling...") |
| | tool_call = nova.call_grammar.format_call("system_operations.system_status", {}) |
| | tool_result = nova.handle_tool_calling(tool_call) |
| | print(f"🔧 Tool result: {tool_result}") |
| | |
| | |
| | print("🧠 Testing LTM storage...") |
| | experience_text = "User: How's the database performance?" |
| | experience_ids = nova.tokenizer.encode(experience_text, return_tensors="pt").to(nova.base_model.device) |
| | |
| | |
| | loss = nova.ingest_experience(experience_ids, reward=1.0) |
| | print(f"📊 Loss after ingestion: {loss:.4f}") |
| | |
| | |
| | memories = nova.retrieve_relevant_memories("database performance", k=2) |
| | print(f"💾 Retrieved memories: {len(memories)}") |
| | |
| | print("✅ Enhanced Nova Core initialized successfully!") |