|
|
|
|
|
|
|
|
async def retrieve_context(self, query: str, max_tokens: int = None) -> List[Dict[str, Any]]:
|
|
|
"""بازیابی هوشمند context مرتبط با query"""
|
|
|
|
|
|
if max_tokens is None:
|
|
|
max_tokens = self.max_context_tokens
|
|
|
|
|
|
start_time = datetime.now()
|
|
|
|
|
|
|
|
|
query_embedding = self.embedding_manager.get_embedding(query)
|
|
|
|
|
|
|
|
|
retrieved_memories = []
|
|
|
|
|
|
|
|
|
retrieved_memories.extend(self._retrieve_from_working_memory())
|
|
|
|
|
|
|
|
|
recent_memories = await self._retrieve_semantic_memories(query_embedding, 'recent')
|
|
|
retrieved_memories.extend(recent_memories)
|
|
|
|
|
|
|
|
|
long_term_memories = await self._retrieve_semantic_memories(query_embedding, 'long_term')
|
|
|
retrieved_memories.extend(long_term_memories)
|
|
|
|
|
|
|
|
|
core_memories = self._retrieve_core_memories(query)
|
|
|
retrieved_memories.extend(core_memories)
|
|
|
|
|
|
|
|
|
unique_memories = self._deduplicate_memories(retrieved_memories)
|
|
|
prioritized_memories = self._prioritize_memories(unique_memories, query_embedding)
|
|
|
|
|
|
|
|
|
final_context = []
|
|
|
total_tokens = 0
|
|
|
|
|
|
for memory in prioritized_memories:
|
|
|
memory_tokens = memory['node'].tokens if 'node' in memory else 50
|
|
|
|
|
|
if total_tokens + memory_tokens <= max_tokens:
|
|
|
final_context.append(memory)
|
|
|
total_tokens += memory_tokens
|
|
|
else:
|
|
|
break
|
|
|
|
|
|
|
|
|
self.stats['retrieved_memories'] += len(final_context)
|
|
|
|
|
|
retrieval_time = (datetime.now() - start_time).total_seconds()
|
|
|
logger.info(f"Retrieved {len(final_context)} memories in {retrieval_time:.2f}s")
|
|
|
|
|
|
return final_context
|
|
|
|
|
|
|
|
|
async def get_context_for_api(self, query: str = None) -> List[Dict[str, Any]]:
|
|
|
"""تهیه context برای ارسال به API"""
|
|
|
|
|
|
|
|
|
if query:
|
|
|
retrieved = await self.retrieve_context(query)
|
|
|
|
|
|
|
|
|
api_messages = []
|
|
|
|
|
|
|
|
|
api_messages.append({
|
|
|
'role': 'system',
|
|
|
'content': f"User profile: {self._format_user_profile()}"
|
|
|
})
|
|
|
|
|
|
|
|
|
for memory in retrieved:
|
|
|
node = memory['node']
|
|
|
api_messages.append({
|
|
|
'role': node.role,
|
|
|
'content': node.content
|
|
|
})
|
|
|
|
|
|
return api_messages
|
|
|
|
|
|
else:
|
|
|
|
|
|
api_messages = []
|
|
|
|
|
|
for node in list(self.memory_layers['working'])[-6:]:
|
|
|
api_messages.append({
|
|
|
'role': node.role,
|
|
|
'content': node.content
|
|
|
})
|
|
|
|
|
|
return api_messages
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def _process_user_request(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
|
|
chat_id = update.effective_chat.id
|
|
|
user_message = update.message.text
|
|
|
user_id = update.effective_user.id
|
|
|
|
|
|
start_time = time.time()
|
|
|
|
|
|
try:
|
|
|
await context.bot.send_chat_action(chat_id=chat_id, action="typing")
|
|
|
|
|
|
|
|
|
if HAS_SMART_CONTEXT:
|
|
|
smart_context = _get_or_create_smart_context(user_id)
|
|
|
|
|
|
|
|
|
await smart_context.process_message("user", user_message)
|
|
|
|
|
|
|
|
|
retrieved_context = await smart_context.retrieve_context(user_message, max_tokens=1024)
|
|
|
|
|
|
|
|
|
messages = await smart_context.get_context_for_api(user_message)
|
|
|
|
|
|
logger.info(f"Smart context: {len(messages)} messages retrieved for user {user_id}")
|
|
|
else:
|
|
|
|
|
|
user_context = data_manager.get_context_for_api(user_id)
|
|
|
data_manager.add_to_user_context(user_id, "user", user_message)
|
|
|
messages = user_context.copy()
|
|
|
messages.append({"role": "user", "content": user_message})
|
|
|
|
|
|
|
|
|
response = await client.chat.completions.create(
|
|
|
model="mlabonne/gemma-3-27b-it-abliterated:featherless-ai",
|
|
|
messages=messages,
|
|
|
temperature=1.0,
|
|
|
top_p=0.95,
|
|
|
stream=False,
|
|
|
)
|
|
|
|
|
|
end_time = time.time()
|
|
|
response_time = end_time - start_time
|
|
|
data_manager.update_response_stats(response_time)
|
|
|
|
|
|
ai_response = response.choices[0].message.content
|
|
|
|
|
|
|
|
|
if HAS_SMART_CONTEXT:
|
|
|
await smart_context.process_message("assistant", ai_response)
|
|
|
else:
|
|
|
data_manager.add_to_user_context(user_id, "assistant", ai_response)
|
|
|
|
|
|
await update.message.reply_text(ai_response)
|
|
|
data_manager.update_user_stats(user_id, update.effective_user)
|
|
|
|
|
|
except httpx.TimeoutException:
|
|
|
logger.warning(f"Request timed out for user {user_id}.")
|
|
|
await update.message.reply_text("⏱️ ارتباط با سرور هوش مصنوعی طولانی شد. لطفاً دوباره تلاش کنید.")
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error while processing message for user {user_id}: {e}")
|
|
|
await update.message.reply_text("❌ متاسفانه در پردازش درخواست شما مشکلی پیش آمد. لطفاً دوباره تلاش کنید.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def _retrieve_semantic_memories(self, query_embedding: np.ndarray,
|
|
|
layer: str) -> List[Dict[str, Any]]:
|
|
|
"""بازیابی حافظههای معنایی"""
|
|
|
memories = []
|
|
|
|
|
|
if layer not in self.memory_layers:
|
|
|
return memories
|
|
|
|
|
|
layer_memories = self.memory_layers[layer]
|
|
|
|
|
|
for item in layer_memories:
|
|
|
node = item if hasattr(item, 'embeddings') else item['node'] if isinstance(item, dict) else None
|
|
|
|
|
|
if node and node.embeddings is not None:
|
|
|
similarity = self.embedding_manager.cosine_similarity(
|
|
|
query_embedding, node.embeddings
|
|
|
)
|
|
|
|
|
|
if similarity > self.semantic_similarity_threshold:
|
|
|
recency_weight = 1.0 if layer == 'working' else 0.7
|
|
|
|
|
|
memories.append({
|
|
|
'node': node,
|
|
|
'source': layer,
|
|
|
'relevance': similarity,
|
|
|
'recency': recency_weight,
|
|
|
'importance': node.importance_score
|
|
|
})
|
|
|
|
|
|
return memories
|
|
|
|
|
|
|
|
|
async def process_message(self, role: str, content: str) -> Dict[str, Any]:
|
|
|
"""پرداش کامل یک پیام جدید"""
|
|
|
start_time = datetime.now()
|
|
|
|
|
|
|
|
|
analysis = self.analyzer.analyze_message(content, role)
|
|
|
|
|
|
|
|
|
message_id = self._generate_message_id(content)
|
|
|
|
|
|
|
|
|
embedding_task = asyncio.create_task(
|
|
|
self._get_embedding_async(content)
|
|
|
)
|
|
|
|
|
|
node = MessageNode(
|
|
|
id=message_id,
|
|
|
content=content,
|
|
|
role=role,
|
|
|
timestamp=datetime.now(),
|
|
|
message_type=analysis['type'],
|
|
|
importance_score=analysis['importance'],
|
|
|
emotion_score=analysis['emotion'],
|
|
|
tokens=data_manager.count_tokens(content),
|
|
|
embeddings=None,
|
|
|
metadata={
|
|
|
'analysis': analysis,
|
|
|
'topics': analysis['topics'],
|
|
|
'intent': analysis['intent'],
|
|
|
'complexity': analysis['complexity']
|
|
|
}
|
|
|
)
|
|
|
|
|
|
|
|
|
try:
|
|
|
node.embeddings = await asyncio.wait_for(embedding_task, timeout=2.0)
|
|
|
except asyncio.TimeoutError:
|
|
|
logger.warning(f"Embedding generation timeout for message {message_id}")
|
|
|
node.embeddings = self.embedding_manager.get_embedding(content)
|
|
|
|
|
|
|
|
|
await asyncio.to_thread(self._add_to_memory_layers, node, analysis)
|
|
|
await asyncio.to_thread(self.memory_graph.add_node, node)
|
|
|
|
|
|
|
|
|
await asyncio.to_thread(self._create_memory_connections, node)
|
|
|
|
|
|
|
|
|
if role == 'user':
|
|
|
await asyncio.to_thread(self._update_user_profile, content, analysis)
|
|
|
|
|
|
|
|
|
await asyncio.to_thread(self._optimize_memory)
|
|
|
|
|
|
|
|
|
self.stats['total_messages'] += 1
|
|
|
self.stats['average_importance'] = (
|
|
|
self.stats['average_importance'] * (self.stats['total_messages'] - 1) +
|
|
|
analysis['importance']
|
|
|
) / self.stats['total_messages']
|
|
|
|
|
|
|
|
|
await asyncio.to_thread(self._save_data)
|
|
|
|
|
|
processing_time = (datetime.now() - start_time).total_seconds()
|
|
|
logger.info(f"Processed message {message_id} in {processing_time:.2f}s, importance: {analysis['importance']:.2f}")
|
|
|
|
|
|
return {
|
|
|
'node_id': message_id,
|
|
|
'analysis': analysis,
|
|
|
'processing_time': processing_time
|
|
|
}
|
|
|
|
|
|
async def _get_embedding_async(self, text: str) -> np.ndarray:
|
|
|
"""دریافت embedding به صورت async"""
|
|
|
loop = asyncio.get_event_loop()
|
|
|
return await loop.run_in_executor(
|
|
|
None,
|
|
|
self.embedding_manager.get_embedding,
|
|
|
text
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@admin_only
|
|
|
async def admin_smart_context_stats(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
|
|
"""نمایش آمار context هوشمند برای کاربران"""
|
|
|
if not HAS_SMART_CONTEXT:
|
|
|
await update.message.reply_text("⚠️ سیستم context هوشمند فعال نیست.")
|
|
|
return
|
|
|
|
|
|
if not context.args:
|
|
|
await update.message.reply_text("⚠️ لطفاً آیدی کاربر را وارد کنید.\nمثال: `/smart_stats 123456789`")
|
|
|
return
|
|
|
|
|
|
user_id = int(context.args[0])
|
|
|
|
|
|
|
|
|
if user_id not in smart_context_managers:
|
|
|
smart_context_managers[user_id] = IntelligentContextManager(user_id)
|
|
|
|
|
|
smart_context = smart_context_managers[user_id]
|
|
|
summary = await asyncio.to_thread(smart_context.get_summary)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def retrieve_context(self, query: str, max_tokens: int = None) -> List[Dict[str, Any]]:
|
|
|
"""بازیابی هوشمند context مرتبط با query"""
|
|
|
try:
|
|
|
if max_tokens is None:
|
|
|
max_tokens = self.max_context_tokens
|
|
|
|
|
|
start_time = datetime.now()
|
|
|
|
|
|
|
|
|
try:
|
|
|
embedding_task = asyncio.create_task(
|
|
|
self._get_embedding_async(query)
|
|
|
)
|
|
|
query_embedding = await asyncio.wait_for(embedding_task, timeout=3.0)
|
|
|
except asyncio.TimeoutError:
|
|
|
logger.warning(f"Embedding timeout for query: {query[:50]}")
|
|
|
query_embedding = self.embedding_manager.get_embedding(query)
|
|
|
|
|
|
|
|
|
tasks = []
|
|
|
|
|
|
|
|
|
tasks.append(asyncio.create_task(
|
|
|
asyncio.to_thread(self._retrieve_from_working_memory)
|
|
|
))
|
|
|
|
|
|
|
|
|
tasks.append(self._retrieve_semantic_memories(query_embedding, 'recent'))
|
|
|
tasks.append(self._retrieve_semantic_memories(query_embedding, 'long_term'))
|
|
|
|
|
|
|
|
|
tasks.append(asyncio.create_task(
|
|
|
asyncio.to_thread(self._retrieve_core_memories, query)
|
|
|
))
|
|
|
|
|
|
|
|
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
|
|
|
|
|
|
|
retrieved_memories = []
|
|
|
for result in results:
|
|
|
if isinstance(result, Exception):
|
|
|
logger.error(f"Error retrieving memory: {result}")
|
|
|
continue
|
|
|
retrieved_memories.extend(result)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error in retrieve_context: {e}")
|
|
|
|
|
|
return self._retrieve_from_working_memory() |