|
|
import re
|
|
|
import json
|
|
|
import time
|
|
|
import torch
|
|
|
import logging
|
|
|
from threading import Lock
|
|
|
from config import app_config, load_config
|
|
|
from model_manager import safe_get_config_value
|
|
|
from typing import Dict, List, Optional, Union, Any, Tuple
|
|
|
|
|
|
from typing import TYPE_CHECKING
|
|
|
if TYPE_CHECKING:
|
|
|
from model_manager import ModelManager
|
|
|
|
|
|
|
|
|
from service_registry import registry, MODEL, TOKENIZER, MODEL_MANAGER, COMMUNICATOR
|
|
|
|
|
|
|
|
|
from utils.sentence_transformer_utils import get_sentence_transformer
|
|
|
from utils.output_formatter import OutputFormatter
|
|
|
from sklearn.metrics.pairwise import cosine_similarity
|
|
|
|
|
|
from base_interfaces.common_types import *
|
|
|
from base_interfaces.communicator_interface import AbstractCommunicator
|
|
|
|
|
|
from utils.smartHybridAttention import get_hybrid_attention_config
|
|
|
|
|
|
|
|
|
try:
|
|
|
from snntorch._neurons.lapicque import LIF
|
|
|
from snntorch import spikegen
|
|
|
from snntorch._neurons import Synaptic
|
|
|
from communicator_STDP import Communicator_STDP
|
|
|
SNNTORCH_AVAILABLE = True
|
|
|
except ImportError:
|
|
|
SNNTORCH_AVAILABLE = False
|
|
|
logger.warning("SNN/STDP functionality not available - some features will be disabled")
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
logging.basicConfig(level=logging.INFO)
|
|
|
|
|
|
|
|
|
try:
|
|
|
import psutil
|
|
|
PSUTIL_AVAILABLE = True
|
|
|
except ImportError:
|
|
|
logger.warning("psutil not available - cannot monitor system resources")
|
|
|
PSUTIL_AVAILABLE = False
|
|
|
|
|
|
class DummyProcess:
|
|
|
def __init__(self, pid=None):
|
|
|
self.pid = pid or 1
|
|
|
|
|
|
def memory_info(self):
|
|
|
class MemInfo:
|
|
|
def __init__(self):
|
|
|
self.rss = 1000000
|
|
|
self.vms = 1000000
|
|
|
return MemInfo()
|
|
|
def memory_percent(self):
|
|
|
return 1.0
|
|
|
|
|
|
class DummyPsutil:
|
|
|
@staticmethod
|
|
|
def Process(pid=None):
|
|
|
return DummyProcess(pid)
|
|
|
psutil = DummyPsutil()
|
|
|
|
|
|
|
|
|
class Communicator(AbstractCommunicator):
|
|
|
def __init__(self, models: Dict[str, torch.nn.Module] = None, model_manager=None):
|
|
|
"""Initialize the Communicator with a model manager and necessary components."""
|
|
|
self.lock = Lock()
|
|
|
self.config = load_config()
|
|
|
self.similarity_threshold = app_config.SIMILARITY_THRESHOLD
|
|
|
self.top_k = app_config.TOP_K
|
|
|
self.conversation_history = []
|
|
|
self.shared_layers = [
|
|
|
'encoder.layer.0',
|
|
|
'encoder.layer.1',
|
|
|
'embeddings'
|
|
|
]
|
|
|
|
|
|
|
|
|
self._init_model_manager(model_manager)
|
|
|
|
|
|
|
|
|
self.output_formatter = OutputFormatter()
|
|
|
self.embedding_model = get_sentence_transformer("Wildnerve-tlm01-0.05Bx12")
|
|
|
|
|
|
|
|
|
self._init_models_and_embeddings()
|
|
|
|
|
|
|
|
|
self._init_snn_components()
|
|
|
|
|
|
|
|
|
self.attention_config = get_hybrid_attention_config()
|
|
|
|
|
|
|
|
|
if hasattr(app_config, 'TRANSFORMER_CONFIG') and hasattr(app_config.TRANSFORMER_CONFIG, 'ATTENTION_MECHANISM'):
|
|
|
attn_mech = app_config.TRANSFORMER_CONFIG.ATTENTION_MECHANISM
|
|
|
if isinstance(attn_mech, dict):
|
|
|
for key, value in attn_mech.items():
|
|
|
if key in self.attention_config:
|
|
|
self.attention_config[key] = value
|
|
|
|
|
|
|
|
|
self.tokenizer = self._init_tokenizer()
|
|
|
|
|
|
logger.info("Communicator initialized successfully")
|
|
|
|
|
|
def _init_tokenizer(self):
|
|
|
"""Initialize the tokenizer with proper error handling"""
|
|
|
try:
|
|
|
if registry.has(TOKENIZER):
|
|
|
return registry.get(TOKENIZER)
|
|
|
from transformers import AutoTokenizer
|
|
|
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
|
|
logger.info("Tokenizer initialized in communicator")
|
|
|
registry.register(TOKENIZER, tokenizer)
|
|
|
return tokenizer
|
|
|
except Exception as e:
|
|
|
logger.error(f"Tokenizer initialization failed: {e}")
|
|
|
return None
|
|
|
|
|
|
def _init_model_manager(self, model_manager):
|
|
|
"""Helper method to initialize model manager"""
|
|
|
if model_manager is None:
|
|
|
|
|
|
from model_manager import ModelManager
|
|
|
|
|
|
try:
|
|
|
max_active_models = getattr(app_config, 'MAX_ACTIVE_MODELS', 5)
|
|
|
self.model_manager = ModelManager(max_active_models=max_active_models)
|
|
|
logger.info(f"Created ModelManager with max_active_models={max_active_models}")
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error creating ModelManager: {e}")
|
|
|
self.model_manager = None
|
|
|
else:
|
|
|
self.model_manager = model_manager
|
|
|
|
|
|
def _init_models_and_embeddings(self):
|
|
|
"""Initialize models and compute embeddings for specializations"""
|
|
|
|
|
|
self.embedding_model = get_sentence_transformer("Wildnerve-tlm01-0.05Bx12")
|
|
|
self.models = self.model_manager.get_available_models() if self.model_manager else {}
|
|
|
if not self.models:
|
|
|
logger.warning("No models available in model manager")
|
|
|
|
|
|
|
|
|
self.specialization_embeddings = {}
|
|
|
|
|
|
if self.model_manager:
|
|
|
|
|
|
specializations = []
|
|
|
if hasattr(self.model_manager, 'models'):
|
|
|
specializations = list(self.model_manager.models.keys())
|
|
|
elif hasattr(self.model_manager, 'get_available_models'):
|
|
|
specializations = list(self.model_manager.get_available_models().keys())
|
|
|
|
|
|
for spec in specializations:
|
|
|
self.specialization_embeddings[spec] = self.embedding_model.encode(spec, convert_to_numpy=True)
|
|
|
|
|
|
|
|
|
self.weight_sharing_groups = self.create_weight_sharing_groups(self.similarity_threshold)
|
|
|
logger.info("Computed weight sharing groups: %s", self.weight_sharing_groups)
|
|
|
|
|
|
def _init_snn_components(self):
|
|
|
"""Initialize SNN/STDP components if enabled"""
|
|
|
|
|
|
use_snn = self._get_config_value('STDP_CONFIG', 'USE_SNN', False)
|
|
|
|
|
|
if use_snn:
|
|
|
|
|
|
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
|
|
|
|
|
alpha = self._get_config_value('STDP_CONFIG', 'ALPHA', 0.1)
|
|
|
beta = self._get_config_value('STDP_CONFIG', 'BETA', 0.2)
|
|
|
spike_threshold = self._get_config_value('STDP_CONFIG', 'SpikeThreshold', 0.5)
|
|
|
|
|
|
|
|
|
self.synapse_weights = Synaptic(alpha=alpha, beta=beta)
|
|
|
self.spike_threshold = spike_threshold
|
|
|
self.spike_generator = spikegen.rate
|
|
|
self.beta = beta
|
|
|
self.snn_layer = LIF(beta=self.beta)
|
|
|
self.snn_comm = Communicator_STDP(self.models, device=self.device)
|
|
|
self.mem = torch.zeros(1, 1)
|
|
|
self.spk = torch.zeros(1, 1)
|
|
|
logger.info("SNN/STDP components initialized successfully")
|
|
|
else:
|
|
|
self.device = None
|
|
|
self.snn_comm = None
|
|
|
logger.info("SNN/STDP components not enabled")
|
|
|
|
|
|
def _get_config_value(self, config_name, attribute, default=None):
|
|
|
"""Safely retrieve configuration values handling both dict and object access"""
|
|
|
if not hasattr(app_config, config_name):
|
|
|
return default
|
|
|
|
|
|
config_obj = getattr(app_config, config_name)
|
|
|
|
|
|
if isinstance(config_obj, dict):
|
|
|
return config_obj.get(attribute, default)
|
|
|
else:
|
|
|
return getattr(config_obj, attribute, default)
|
|
|
|
|
|
def create_weight_sharing_groups(self, similarity_threshold: float) -> Dict[str, set]:
|
|
|
"""Computes cosine similarities among specialization embeddings and groups
|
|
|
specializations that exceed the similarity threshold to enable weight sharing. Returns:
|
|
|
Dictionary of groups: {specialization: [other_specializations_exceeding_threshold]}"""
|
|
|
groups = {}
|
|
|
for spec1, emb1 in self.specialization_embeddings.items():
|
|
|
for spec2, emb2 in self.specialization_embeddings.items():
|
|
|
if spec1 != spec2:
|
|
|
|
|
|
similarity = cosine_similarity(
|
|
|
emb1.reshape(1, -1),
|
|
|
emb2.reshape(1, -1)
|
|
|
)[0][0]
|
|
|
|
|
|
if similarity > similarity_threshold:
|
|
|
if spec1 not in groups:
|
|
|
groups[spec1] = set()
|
|
|
groups[spec1].add(spec2)
|
|
|
return groups
|
|
|
|
|
|
def share_weights(self):
|
|
|
"""Share weights between models based on their computed similarity groups."""
|
|
|
with self.lock:
|
|
|
for primary_spec, related_specs in self.weight_sharing_groups.items():
|
|
|
primary_model = self.model_manager.get_model(primary_spec)
|
|
|
if not primary_model:
|
|
|
continue
|
|
|
|
|
|
|
|
|
for related_spec in related_specs:
|
|
|
related_model = self.model_manager.get_model(related_spec)
|
|
|
if not related_model:
|
|
|
continue
|
|
|
|
|
|
|
|
|
for p_layer, r_layer in zip(primary_model.parameters(), related_model.parameters()):
|
|
|
r_layer.data.copy_(p_layer.data)
|
|
|
logger.info("Completed weight sharing across model groups")
|
|
|
|
|
|
def process_with_snn(self, input_tensor: torch.Tensor) -> torch.Tensor:
|
|
|
"""Process input through SNN components if enabled."""
|
|
|
|
|
|
if not hasattr(self, 'snn_comm') or self.snn_comm is None:
|
|
|
return input_tensor
|
|
|
|
|
|
|
|
|
self.reset_snn_state()
|
|
|
|
|
|
|
|
|
if input_tensor.dim() == 1:
|
|
|
input_tensor = input_tensor.unsqueeze(0)
|
|
|
|
|
|
try:
|
|
|
|
|
|
if hasattr(self, 'snn_comm') and self.snn_comm is not None:
|
|
|
|
|
|
return self.snn_comm.process_input(input_tensor)
|
|
|
else:
|
|
|
|
|
|
spikes = self.spike_generator(input_tensor, num_steps=1)
|
|
|
|
|
|
|
|
|
syn_out_result = self.synapse_weights(spikes)
|
|
|
syn_out = syn_out_result[0] if isinstance(syn_out_result, tuple) else syn_out_result
|
|
|
|
|
|
|
|
|
batch_size = syn_out.shape[0]
|
|
|
if self.mem.shape[0] != batch_size:
|
|
|
self.mem = torch.zeros(batch_size, syn_out.shape[1], device=syn_out.device)
|
|
|
|
|
|
|
|
|
mem_next = self.beta * self.mem + syn_out
|
|
|
spk_next = (mem_next > self.spike_threshold).float()
|
|
|
self.mem = mem_next * (1 - spk_next)
|
|
|
self.spk = spk_next
|
|
|
|
|
|
return self.spk
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error in SNN processing: {e}", exc_info=True)
|
|
|
return input_tensor
|
|
|
def reset_snn_state(self):
|
|
|
"""Reset the SNN neuron states"""
|
|
|
if hasattr(self, 'mem'):
|
|
|
self.mem = torch.zeros_like(self.mem)
|
|
|
if hasattr(self, 'spk'):
|
|
|
self.spk = torch.zeros_like(self.spk)
|
|
|
|
|
|
def route_input(self, input_text: str, query: Optional[str] = None) -> List[tuple]:
|
|
|
"""Route input to most relevant specializations, returning top-k matches. Returns:
|
|
|
List of (specialization, similarity_score) tuples"""
|
|
|
with self.lock:
|
|
|
text_to_analyze = query if query else input_text
|
|
|
|
|
|
if not self.specialization_embeddings:
|
|
|
logger.warning("No specialization embeddings available for routing")
|
|
|
return [("default", 1.0)]
|
|
|
|
|
|
try:
|
|
|
|
|
|
text_embedding = self.embedding_model.encode(text_to_analyze, convert_to_numpy=True)
|
|
|
|
|
|
use_snn = self._get_config_value('STDP_CONFIG', 'USE_SNN', False)
|
|
|
|
|
|
if use_snn:
|
|
|
text_embedding = torch.from_numpy(text_embedding).float()
|
|
|
text_embedding = self.process_with_snn(text_embedding)
|
|
|
text_embedding = text_embedding.detach().numpy()
|
|
|
|
|
|
|
|
|
text_embedding = text_embedding.reshape(1, -1)
|
|
|
similarities = {}
|
|
|
|
|
|
for spec, spec_embedding in self.specialization_embeddings.items():
|
|
|
spec_embedding = spec_embedding.reshape(1, -1)
|
|
|
similarity = cosine_similarity(text_embedding, spec_embedding)[0][0]
|
|
|
similarities[spec] = float(similarity)
|
|
|
|
|
|
|
|
|
sorted_specs = sorted(similarities.items(), key=lambda x: x[1], reverse=True)
|
|
|
top_k_specs = sorted_specs[:self.top_k]
|
|
|
|
|
|
logger.debug("Routing similarities: %s", similarities)
|
|
|
logger.info("Selected top %d specializations: %s", self.top_k, top_k_specs)
|
|
|
|
|
|
|
|
|
prompt_length = len(input_text.split())
|
|
|
use_sliding_window = prompt_length > self.attention_config['WINDOW_SIZE'] // 2
|
|
|
|
|
|
if use_sliding_window:
|
|
|
logger.info(f"Using sliding window attention for long input (length: {prompt_length})")
|
|
|
return top_k_specs if top_k_specs else [("default", 1.0)]
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error in route_input: {str(e)}")
|
|
|
return [("default", 1.0)]
|
|
|
|
|
|
def process_input(self, input_text: str, context: Optional[Dict] = None) -> Dict[str, Any]:
|
|
|
"""Process user input through the appropriate model(s) and generate response. Returns:
|
|
|
Dictionary containing response and metadata"""
|
|
|
start_time = time.time()
|
|
|
logger.info(f"Processing input: {input_text[:50]}...")
|
|
|
try:
|
|
|
|
|
|
self.conversation_history.append({"role": "user", "content": input_text})
|
|
|
|
|
|
|
|
|
specializations = self.route_input(input_text)
|
|
|
primary_spec, confidence = specializations[0] if specializations else ("default", 0.0)
|
|
|
|
|
|
|
|
|
model = None
|
|
|
if hasattr(self.model_manager, 'get_model'):
|
|
|
model = self.model_manager.get_model(primary_spec)
|
|
|
elif primary_spec in self.models:
|
|
|
model = self.models[primary_spec]
|
|
|
|
|
|
if not model:
|
|
|
logger.warning(f"No model found for {primary_spec}, using default")
|
|
|
|
|
|
if hasattr(self.model_manager, 'get_available_models'):
|
|
|
models = self.model_manager.get_available_models()
|
|
|
if models:
|
|
|
model = next(iter(models.values()), None)
|
|
|
elif self.models:
|
|
|
model = next(iter(self.models.values()), None)
|
|
|
if not model:
|
|
|
return {
|
|
|
"response": "No models available to process your request.",
|
|
|
"specialization": "none",
|
|
|
"processing_time": time.time() - start_time
|
|
|
}
|
|
|
|
|
|
|
|
|
use_snn = self._get_config_value('STDP_CONFIG', 'USE_SNN', False)
|
|
|
|
|
|
|
|
|
model_inputs = self.prepare_model_input(input_text, model)
|
|
|
|
|
|
|
|
|
response = self.process_request(input_text, model)
|
|
|
|
|
|
|
|
|
stdp_response = None
|
|
|
if use_snn and hasattr(self, 'snn_comm') and self.snn_comm:
|
|
|
try:
|
|
|
|
|
|
stdp_response = self.snn_comm.process_request(input_text, model)
|
|
|
logger.info("STDP processing completed successfully")
|
|
|
except Exception as e:
|
|
|
logger.error(f"STDP processing failed: {e}")
|
|
|
|
|
|
|
|
|
formatted_response = None
|
|
|
if response:
|
|
|
formatted_response = self.output_formatter.format_response(response, primary_spec)
|
|
|
elif stdp_response:
|
|
|
formatted_response = self.output_formatter.format_response(stdp_response, primary_spec)
|
|
|
response = stdp_response
|
|
|
else:
|
|
|
formatted_response = "I'm having trouble generating a response."
|
|
|
|
|
|
|
|
|
self.conversation_history.append({"role": "assistant", "content": formatted_response})
|
|
|
|
|
|
|
|
|
if len(specializations) > 1:
|
|
|
self.share_weights()
|
|
|
|
|
|
processing_time = time.time() - start_time
|
|
|
|
|
|
result = {
|
|
|
"response": formatted_response,
|
|
|
"specialization": primary_spec,
|
|
|
"similarity_score": confidence,
|
|
|
"processing_time": processing_time,
|
|
|
"alternative_specializations": [s[0] for s in specializations[1:]] if len(specializations) > 1 else []
|
|
|
}
|
|
|
|
|
|
if stdp_response:
|
|
|
result["stdp_processed"] = True
|
|
|
result["parallel_response"] = stdp_response
|
|
|
return result
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error processing input: {str(e)}", exc_info=True)
|
|
|
return {
|
|
|
"response": f"An error occurred while processing your request: {str(e)}",
|
|
|
"error": str(e),
|
|
|
"processing_time": time.time() - start_time
|
|
|
}
|
|
|
|
|
|
def prepare_model_input(self, text: str, model) -> Dict:
|
|
|
"""Prepare input text for model processing. Returns: Dictionary of model inputs"""
|
|
|
device = next(model.parameters()).device
|
|
|
try:
|
|
|
|
|
|
tokenizer = getattr(model, 'tokenizer', None)
|
|
|
|
|
|
if tokenizer:
|
|
|
|
|
|
inputs = tokenizer(
|
|
|
text,
|
|
|
return_tensors="pt",
|
|
|
padding=True,
|
|
|
truncation=True,
|
|
|
max_length=safe_get_config_value(app_config, "MAX_SEQ_LENGTH", 512)
|
|
|
)
|
|
|
|
|
|
input_ids = inputs["input_ids"].to(device)
|
|
|
|
|
|
return {
|
|
|
"input_ids": input_ids,
|
|
|
"max_length": app_config.MAX_SEQ_LENGTH,
|
|
|
"device": device,
|
|
|
"temperature": getattr(self, 'generation_config', {}).get('temperature', 0.7)
|
|
|
}
|
|
|
else:
|
|
|
|
|
|
logger.warning("Model has no tokenizer attribute, using basic input")
|
|
|
return {
|
|
|
"input_text": text,
|
|
|
"max_length": app_config.MAX_SEQ_LENGTH
|
|
|
}
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error preparing model input: {str(e)}")
|
|
|
|
|
|
return {"input_text": text}
|
|
|
|
|
|
def clear_conversation_history(self):
|
|
|
"""Clear the conversation history"""
|
|
|
self.conversation_history = []
|
|
|
|
|
|
def get_conversation_history(self) -> List[Dict]:
|
|
|
"""Get the current conversation history"""
|
|
|
return self.conversation_history.copy()
|
|
|
|
|
|
def process_request(self, prompt: str, model: Any) -> str:
|
|
|
"""Process a user request through the selected model"""
|
|
|
try:
|
|
|
logger.info(f"Processing request with model")
|
|
|
|
|
|
|
|
|
if not self.tokenizer:
|
|
|
self.tokenizer = self._init_tokenizer()
|
|
|
|
|
|
|
|
|
inputs = self.tokenizer(
|
|
|
prompt,
|
|
|
return_tensors="pt",
|
|
|
truncation=True,
|
|
|
max_length=128
|
|
|
)
|
|
|
|
|
|
with torch.no_grad():
|
|
|
try:
|
|
|
|
|
|
if hasattr(model, 'generate_with_decoding'):
|
|
|
|
|
|
return model.generate_with_decoding(
|
|
|
inputs["input_ids"],
|
|
|
max_length=256,
|
|
|
temperature=0.7
|
|
|
)
|
|
|
elif hasattr(model, 'generate'):
|
|
|
|
|
|
import inspect
|
|
|
sig_params = inspect.signature(model.generate).parameters
|
|
|
generate_kwargs = {'input_ids': inputs["input_ids"]}
|
|
|
|
|
|
|
|
|
if 'max_length' in sig_params:
|
|
|
generate_kwargs['max_length'] = 256
|
|
|
|
|
|
if 'temperature' in sig_params:
|
|
|
generate_kwargs['temperature'] = 0.7
|
|
|
|
|
|
|
|
|
outputs = model.generate(**generate_kwargs)
|
|
|
|
|
|
|
|
|
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
return response.strip()
|
|
|
except Exception as e:
|
|
|
logger.warning(f"Error in model.generate: {e}")
|
|
|
|
|
|
|
|
|
if "shape" in str(e):
|
|
|
|
|
|
shape_match = re.search(r'shape \'\[(.*?)\]\'', str(e))
|
|
|
if shape_match:
|
|
|
|
|
|
logger.info("Detected shape error, trying alternative model inference methods")
|
|
|
|
|
|
|
|
|
alternative_response = self._get_response_from_alternative_model(prompt)
|
|
|
if alternative_response:
|
|
|
return alternative_response
|
|
|
|
|
|
|
|
|
topic, subtopics = self._analyze_prompt_for_topics(prompt)
|
|
|
logger.info(f"Detected topic: {topic}, subtopics: {subtopics}")
|
|
|
|
|
|
return self._get_topic_response(topic, prompt, subtopics)
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
|
outputs = model(inputs["input_ids"])
|
|
|
|
|
|
|
|
|
if isinstance(outputs, dict) and "logits" in outputs:
|
|
|
logits = outputs["logits"]
|
|
|
|
|
|
response = self._generate_response_from_logits(logits, prompt)
|
|
|
if response:
|
|
|
return response
|
|
|
elif isinstance(outputs, torch.Tensor) and outputs.dim() >= 2:
|
|
|
|
|
|
response = self._generate_response_from_tensor(outputs, prompt)
|
|
|
if response:
|
|
|
return response
|
|
|
except Exception as fw_error:
|
|
|
logger.error(f"Forward pass error: {fw_error}")
|
|
|
|
|
|
|
|
|
return self._get_fallback_response(prompt)
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error in process_request: {e}")
|
|
|
return "I encountered an error processing your request. Could you try asking your question differently?"
|
|
|
|
|
|
def _get_response_from_alternative_model(self, prompt: str) -> Optional[str]:
|
|
|
"""Try to get a response using a different model from the model manager"""
|
|
|
try:
|
|
|
if not self.model_manager:
|
|
|
return None
|
|
|
|
|
|
specializations = self.route_input(prompt)
|
|
|
|
|
|
for spec, _ in specializations[1:]:
|
|
|
alt_model = self.model_manager.get_model(spec)
|
|
|
if alt_model:
|
|
|
logger.info(f"Trying alternative model for specialization: {spec}")
|
|
|
try:
|
|
|
|
|
|
if hasattr(alt_model, 'tokenizer'):
|
|
|
tokenizer = alt_model.tokenizer
|
|
|
else:
|
|
|
tokenizer = self.tokenizer
|
|
|
inputs = tokenizer(
|
|
|
prompt,
|
|
|
return_tensors="pt",
|
|
|
truncation=True,
|
|
|
max_length=128
|
|
|
)
|
|
|
|
|
|
if hasattr(alt_model, 'generate_with_decoding'):
|
|
|
response = alt_model.generate_with_decoding(
|
|
|
inputs["input_ids"],
|
|
|
max_length=256,
|
|
|
temperature=0.7
|
|
|
)
|
|
|
if response and isinstance(response, str) and len(response) > 10:
|
|
|
return response
|
|
|
except Exception as alt_error:
|
|
|
logger.warning(f"Alternative model {spec} also failed: {alt_error}")
|
|
|
continue
|
|
|
return None
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error getting response from alternative model: {e}")
|
|
|
return None
|
|
|
def _analyze_prompt_for_topics(self, score, prompt: str) -> Tuple[str, List[str]]:
|
|
|
"""Analyze prompt to dynamically determine the topic and subtopics"""
|
|
|
|
|
|
primary_topic = "general"
|
|
|
subtopics = []
|
|
|
try:
|
|
|
|
|
|
if hasattr(self, 'embedding_model'):
|
|
|
|
|
|
candidate_topics = [
|
|
|
"programming", "math", "science", "history", "art",
|
|
|
"literature", "music", "politics", "economics", "philosophy",
|
|
|
"technology", "health", "sports", "entertainment", "education",
|
|
|
"business", "psychology", "sociology", "linguistics", "physics",
|
|
|
"chemistry", "biology", "medicine", "engineering", "computer science",
|
|
|
"artificial intelligence", "data science", "web development", "finance",
|
|
|
"law", "ethics", "religion", "geography", "astronomy", "environment"
|
|
|
]
|
|
|
|
|
|
prompt_embedding = self.embedding_model.encode(prompt, convert_to_numpy=True)
|
|
|
|
|
|
|
|
|
topic_embeddings = {
|
|
|
topic: self.embedding_model.encode(f"This text is about {topic}.", convert_to_numpy=True)
|
|
|
for topic in candidate_topics
|
|
|
}
|
|
|
|
|
|
similarities = {
|
|
|
topic: float(cosine_similarity(
|
|
|
prompt_embedding.reshape(1, -1),
|
|
|
emb.reshape(1, -1)
|
|
|
)[0][0])
|
|
|
for topic, emb in topic_embeddings.items()
|
|
|
}
|
|
|
|
|
|
sorted_topics = sorted(similarities.items(), key=lambda x: x[1], reverse=True)
|
|
|
|
|
|
if sorted_topics:
|
|
|
primary_topic = sorted_topics[0][0]
|
|
|
|
|
|
threshold = sorted_topics[0][1] * 0.8
|
|
|
subtopics = [topic for topic, score in sorted_topics[1:6] if score > threshold]
|
|
|
|
|
|
|
|
|
if primary_topic == "general" or not subtopics:
|
|
|
|
|
|
topic_keywords = {
|
|
|
"programming": ["code", "programming", "python", "java", "javascript", "function", "algorithm", "developer", "software"],
|
|
|
"math": ["math", "mathematics", "algebra", "calculus", "equation", "geometry", "statistics", "theorem"],
|
|
|
"science": ["science", "physics", "chemistry", "biology", "scientific", "experiment", "theory"],
|
|
|
"history": ["history", "historical", "ancient", "century", "civilization", "war", "empire"],
|
|
|
"technology": ["technology", "tech", "computer", "digital", "internet", "device", "hardware", "software"],
|
|
|
"ai": ["ai", "artificial intelligence", "machine learning", "neural network", "deep learning", "nlp", "algorithm"],
|
|
|
"health": ["health", "medical", "medicine", "disease", "treatment", "doctor", "patient", "healthcare"],
|
|
|
"business": ["business", "company", "market", "industry", "finance", "economic", "management", "strategy"],
|
|
|
"general": []
|
|
|
}
|
|
|
|
|
|
words = re.findall(r'\b[a-zA-Z]{3,}\b', prompt.lower())
|
|
|
|
|
|
|
|
|
topic_scores = {topic: 0 for topic in topic_keywords.keys()}
|
|
|
for word in words:
|
|
|
for topic, keywords in topic_keywords.items():
|
|
|
if word in keywords or any(keyword in word for keyword in keywords):
|
|
|
topic_scores[topic] += 1
|
|
|
|
|
|
|
|
|
sorted_topics = sorted(topic_scores.items(), key=lambda x: x[1], reverse=True)
|
|
|
if sorted_topics[0][1] > 0:
|
|
|
primary_topic = sorted_topics[0][0]
|
|
|
|
|
|
subtopics = [topic for topic in sorted_topics[1:4] if score > 0]
|
|
|
|
|
|
|
|
|
if not subtopics:
|
|
|
|
|
|
related_topics = {
|
|
|
"programming": ["software development", "algorithms", "data structures"],
|
|
|
"math": ["algebra", "geometry", "statistics"],
|
|
|
"science": ["physics", "chemistry", "biology"],
|
|
|
"history": ["ancient history", "modern history", "world wars"],
|
|
|
"technology": ["computers", "internet", "gadgets"],
|
|
|
"ai": ["machine learning", "neural networks", "natural language processing"],
|
|
|
"health": ["medicine", "wellness", "nutrition"],
|
|
|
"business": ["economics", "finance", "management"]
|
|
|
}
|
|
|
subtopics = related_topics.get(primary_topic, ["information", "knowledge", "details"])
|
|
|
|
|
|
return primary_topic, subtopics
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error analyzing prompt for topics: {e}")
|
|
|
return "general", ["information"]
|
|
|
def _generate_response_from_logits(self, logits: torch.Tensor, prompt: str) -> Optional[str]:
|
|
|
"""Generate a coherent response from model output logits"""
|
|
|
try:
|
|
|
|
|
|
if logits.dim() >= 2:
|
|
|
|
|
|
last_logits = logits[:, -1, :] if logits.dim() > 2 else logits
|
|
|
|
|
|
|
|
|
top_k = min(5, last_logits.size(-1))
|
|
|
top_values, top_indices = torch.topk(last_logits, top_k, dim=-1)
|
|
|
|
|
|
|
|
|
if hasattr(self, 'tokenizer') and self.tokenizer is not None:
|
|
|
top_tokens = [self.tokenizer.decode([idx.item()]) for idx in top_indices[0]]
|
|
|
|
|
|
|
|
|
topic_tokens = [token for token in top_tokens if len(token) > 1 and not token.startswith('[')]
|
|
|
if topic_tokens:
|
|
|
|
|
|
topic = self._extract_topic_from_prompt(prompt)
|
|
|
context = ", ".join(topic_tokens[:3])
|
|
|
return f"Based on my understanding of {topic}, the key concepts include {context}. Would you like more specific information about any of these aspects?"
|
|
|
return None
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error generating response from logits: {e}")
|
|
|
return None
|
|
|
def _generate_response_from_tensor(self, tensor: torch.Tensor, prompt: str) -> Optional[str]:
|
|
|
"""Generate a response from a tensor output"""
|
|
|
try:
|
|
|
|
|
|
if tensor.dim() >= 2:
|
|
|
|
|
|
if tensor.dim() == 3:
|
|
|
features = tensor[0, -1, :]
|
|
|
else:
|
|
|
features = tensor[0, :]
|
|
|
|
|
|
|
|
|
|
|
|
topic = self._extract_topic_from_prompt(prompt)
|
|
|
|
|
|
|
|
|
if features.numel() < 10:
|
|
|
values = [f"{val:.2f}" for val in features[:5].tolist()]
|
|
|
value_str = ", ".join(values)
|
|
|
return f"I analyzed your question about {topic}. My analysis indicates values of {value_str}, which suggests this topic involves multiple factors."
|
|
|
else:
|
|
|
|
|
|
shape_str = "x".join(str(dim) for dim in tensor.size())
|
|
|
return f"I analyzed your question about {topic}. This is a complex topic with many dimensions (tensor shape: {shape_str}). Could you specify which aspect you'd like me to focus on?"
|
|
|
return None
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error generating response from tensor: {e}")
|
|
|
return None
|
|
|
def _extract_topic_from_prompt(self, prompt: str) -> str:
|
|
|
"""Extract a topic phrase from the prompt"""
|
|
|
|
|
|
words = prompt.strip().split()
|
|
|
|
|
|
if not words:
|
|
|
return "this topic"
|
|
|
|
|
|
|
|
|
if words[0].lower() in ['what', 'how', 'why', 'when', 'where', 'who', 'which']:
|
|
|
|
|
|
|
|
|
if len(words) > 1:
|
|
|
if words[1].lower() in ['is', 'are', 'was', 'were', 'will', 'did', 'does', 'do']:
|
|
|
if len(words) > 2:
|
|
|
return ' '.join(words[2:min(5, len(words))])
|
|
|
return words[1]
|
|
|
return ' '.join(words[1:min(4, len(words))])
|
|
|
|
|
|
return ' '.join(words[:min(3, len(words))])
|
|
|
|
|
|
def _extract_subject(self, text: str) -> str:
|
|
|
"""Extract the primary subject from a text prompt
|
|
|
This method uses basic NLP techniques to identify the main
|
|
|
subject or topic of a text, which can be used for routing to specialized models."""
|
|
|
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
text = text.lower()
|
|
|
|
|
|
|
|
|
subject_keywords = {
|
|
|
"programming": ["code", "program", "programming", "function", "algorithm", "software", "developer"],
|
|
|
"mathematics": ["math", "equation", "calculation", "formula", "number", "geometry"],
|
|
|
"science": ["science", "physics", "chemistry", "biology", "scientific"],
|
|
|
"history": ["history", "historical", "past", "ancient", "century"]
|
|
|
}
|
|
|
|
|
|
subject_scores = {}
|
|
|
for subject, keywords in subject_keywords.items():
|
|
|
score = sum(1 for keyword in keywords if keyword in text)
|
|
|
if score > 0:
|
|
|
subject_scores[subject] = score
|
|
|
|
|
|
|
|
|
if subject_scores:
|
|
|
return max(subject_scores.items(), key=lambda x: x[1])[0]
|
|
|
return ""
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error extracting subject: {e}")
|
|
|
return ""
|
|
|
|
|
|
|
|
|
def add_to_conversation_history(self, role: str, content: str, metadata: Optional[Dict] = None):
|
|
|
"""Add an entry to conversation history with optional metadata"""
|
|
|
entry = {
|
|
|
"role": role,
|
|
|
"content": content,
|
|
|
"timestamp": time.time()
|
|
|
}
|
|
|
if metadata:
|
|
|
entry["metadata"] = metadata
|
|
|
self.conversation_history.append(entry)
|
|
|
|
|
|
max_history = getattr(app_config, "MAX_CONVERSATION_HISTORY", 10)
|
|
|
if len(self.conversation_history) > max_history:
|
|
|
self.conversation_history = self.conversation_history[-max_history:]
|
|
|
|
|
|
def get_conversation_context(self, window_size: int = 3) -> str:
|
|
|
"""Get recent conversation context formatted as a single string"""
|
|
|
if not self.conversation_history:
|
|
|
return ""
|
|
|
|
|
|
|
|
|
recent_history = self.conversation_history[-window_size*2:]
|
|
|
|
|
|
|
|
|
context_parts = []
|
|
|
for entry in recent_history:
|
|
|
role_prefix = "User: " if entry["role"] == "user" else "Assistant: "
|
|
|
context_parts.append(f"{role_prefix}{entry['content']}")
|
|
|
|
|
|
return "\n".join(context_parts)
|
|
|
|
|
|
def process_with_context(self, input_text: str, context: Optional[Dict] = None) -> Dict[str, Any]:
|
|
|
"""Process input with conversation context for better continuity"""
|
|
|
|
|
|
conversation_context = self.get_conversation_context(window_size=3)
|
|
|
|
|
|
|
|
|
contextualized_prompt = input_text
|
|
|
if conversation_context:
|
|
|
|
|
|
|
|
|
|
|
|
max_seq_length = getattr(app_config, 'MAX_SEQ_LENGTH', 512)
|
|
|
if isinstance(max_seq_length, dict):
|
|
|
max_seq_length = 512
|
|
|
logger.warning(f"MAX_SEQ_LENGTH is a dictionary, using default: {max_seq_length}")
|
|
|
elif not isinstance(max_seq_length, (int, float)):
|
|
|
max_seq_length = 512
|
|
|
logger.warning(f"MAX_SEQ_LENGTH is not a number, using default: {max_seq_length}")
|
|
|
else:
|
|
|
max_seq_length = int(max_seq_length)
|
|
|
|
|
|
max_context_length = max_seq_length // 2
|
|
|
|
|
|
contextualized_prompt = f"Previous conversation:\n{conversation_context}\n\nCurrent question: {input_text}"
|
|
|
|
|
|
|
|
|
result = self.process_input(contextualized_prompt, context)
|
|
|
|
|
|
|
|
|
if isinstance(result, dict):
|
|
|
result["original_query"] = input_text
|
|
|
return result
|
|
|
|
|
|
def _get_fallback_response(self, prompt: str) -> str:
|
|
|
"""Get a fallback response when primary model processing fails"""
|
|
|
try:
|
|
|
|
|
|
topic, subtopics = self._analyze_prompt_for_topics(prompt)
|
|
|
|
|
|
if hasattr(self, 'model_manager') and self.model_manager:
|
|
|
|
|
|
|
|
|
if hasattr(self.model_manager, 'get_alternative_model_for_prompt'):
|
|
|
alt_model = self.model_manager.get_alternative_model_for_prompt(prompt)
|
|
|
if alt_model:
|
|
|
logger.info(f"Using alternative model for fallback response")
|
|
|
try:
|
|
|
inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True, max_length=128)
|
|
|
if hasattr(alt_model, 'generate_with_decoding'):
|
|
|
response = alt_model.generate_with_decoding(
|
|
|
inputs["input_ids"],
|
|
|
max_length=256,
|
|
|
temperature=0.7
|
|
|
)
|
|
|
if response and isinstance(response, str) and len(response) > 10:
|
|
|
return response
|
|
|
except Exception as alt_error:
|
|
|
logger.warning(f"Alternative model also failed: {alt_error}")
|
|
|
|
|
|
|
|
|
try:
|
|
|
available_models = self.model_manager.get_available_models()
|
|
|
for spec_name, model in available_models.items():
|
|
|
if spec_name != topic:
|
|
|
logger.info(f"Trying model from specialization: {spec_name}")
|
|
|
inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True, max_length=128)
|
|
|
if hasattr(model, 'generate_with_decoding'):
|
|
|
response = model.generate_with_decoding(
|
|
|
inputs["input_ids"],
|
|
|
max_length=256,
|
|
|
temperature=0.9
|
|
|
)
|
|
|
if response and isinstance(response, str) and len(response) > 10:
|
|
|
return response
|
|
|
except Exception as e:
|
|
|
logger.warning(f"Failed to use alternative models: {e}")
|
|
|
|
|
|
|
|
|
return self._build_dynamic_response(topic, prompt, subtopics)
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error getting fallback response: {e}")
|
|
|
|
|
|
return "I'm having trouble understanding that request. Could you rephrase it or try asking something else?"
|
|
|
|
|
|
def _build_dynamic_response(self, topic: str, prompt: str, subtopics: List[str] = None) -> str:
|
|
|
"""Build a dynamic response based on topic analysis without hardcoded templates"""
|
|
|
try:
|
|
|
|
|
|
subject = self._extract_subject(prompt)
|
|
|
|
|
|
subtopics = subtopics or []
|
|
|
|
|
|
topic_str = subject if subject else topic
|
|
|
|
|
|
|
|
|
meta_prompt = f"""
|
|
|
Topic: {topic_str}
|
|
|
Related areas: {', '.join(subtopics[:3]) if subtopics else 'various fields'}
|
|
|
Request: {prompt}
|
|
|
|
|
|
Create a brief response that acknowledges the topic but asks for clarification.
|
|
|
Do not provide specific information about the topic, just acknowledge understanding and ask for more details."""
|
|
|
|
|
|
try:
|
|
|
if hasattr(self, 'model_manager') and self.model_manager:
|
|
|
|
|
|
models = self.model_manager.get_available_models()
|
|
|
if models:
|
|
|
model = next(iter(models.values()))
|
|
|
inputs = self.tokenizer(meta_prompt, return_tensors="pt", truncation=True, max_length=256)
|
|
|
meta_response = model.generate_with_decoding(
|
|
|
inputs["input_ids"],
|
|
|
max_length=256,
|
|
|
temperature=0.7
|
|
|
)
|
|
|
if meta_response and len(meta_response) > 20:
|
|
|
return meta_response
|
|
|
except Exception as e:
|
|
|
logger.warning(f"Meta-generation failed: {e}")
|
|
|
|
|
|
|
|
|
subtopic_str = ", ".join(subtopics[:3]) if subtopics else "related areas"
|
|
|
|
|
|
return f"""I understand you're asking about {topic_str}. This relates to {subtopic_str}.
|
|
|
To provide a helpful response, I'd need more specific details about what aspect you're interested in learning about.
|
|
|
Could you please clarify what specific information you're looking for?"""
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error building dynamic response: {e}")
|
|
|
return "I need more information to help you with that topic. Could you provide more details about what you'd like to know?"
|
|
|
def _get_topic_response(self, topic: str, prompt: str, subtopics: List[str] = None) -> str:
|
|
|
"""Get a response for a specific topic using model-driven approach"""
|
|
|
return self._build_dynamic_response(topic, prompt, subtopics)
|
|
|
|
|
|
def process_input(self, prompt, **kwargs):
|
|
|
|
|
|
if self.model and not (hasattr(self.model, '_is_minimal') and self.model._is_minimal) and self.tokenizer:
|
|
|
try:
|
|
|
logger.info("Attempting model inference with actual model")
|
|
|
inputs = self.tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
|
|
|
|
|
|
|
|
max_inference_time = 30
|
|
|
start_time = time.time()
|
|
|
|
|
|
if hasattr(self.model, "generate_with_decoding"):
|
|
|
response = self.model.generate_with_decoding(inputs.input_ids)
|
|
|
elif hasattr(self.model, "generate"):
|
|
|
output_ids = self.model.generate(inputs.input_ids)
|
|
|
response = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
|
|
else:
|
|
|
|
|
|
outputs = self.model(inputs.input_ids)
|
|
|
response = self.tokenizer.decode(torch.argmax(outputs, dim=-1)[0], skip_special_tokens=True)
|
|
|
|
|
|
elapsed_time = time.time() - start_time
|
|
|
|
|
|
if elapsed_time > max_inference_time:
|
|
|
logger.warning(f"Model inference took too long: {elapsed_time:.2f} seconds")
|
|
|
|
|
|
if response and len(response) > 10:
|
|
|
logger.info("Generated model response successfully")
|
|
|
return {"response": response, "minimal_mode": False}
|
|
|
except Exception as e:
|
|
|
logger.warning(f"Model inference failed: {e}")
|
|
|
elif self.model and hasattr(self.model, '_is_minimal') and self.model._is_minimal:
|
|
|
logger.warning("Using minimal model - full model unavailable")
|
|
|
|
|
|
|
|
|
logger.debug(f"Minimal communicator processing: {prompt[:30]}...")
|
|
|
response = self._get_knowledge_response(prompt)
|
|
|
if response:
|
|
|
return {"response": response, "minimal_mode": True}
|
|
|
|
|
|
return {"response": f"I'm operating in minimal mode. Your query was about {prompt.split()[0] if prompt.split() else 'this topic'}...",
|
|
|
"minimal_mode": True}
|
|
|
|
|
|
|
|
|
def create_communicator(model_manager=None):
|
|
|
from communicator import Communicator
|
|
|
comm = Communicator(model_manager=model_manager)
|
|
|
registry.register(COMMUNICATOR, comm)
|
|
|
return comm
|
|
|
|
|
|
from service_registry import registry, COMMUNICATOR
|
|
|
from adapter_layer import WildnerveModelAdapter
|
|
|
|
|
|
class Communicator:
|
|
|
def __init__(self):
|
|
|
self.adapter = WildnerveModelAdapter()
|
|
|
|
|
|
def process_request(self, prompt: str, **kwargs):
|
|
|
return self.adapter.generate(prompt, **kwargs)
|
|
|
|
|
|
|
|
|
comm = Communicator()
|
|
|
registry.register(COMMUNICATOR, comm, overwrite=True) |