import logging import sys from llama_index.llms.google_genai import GoogleGenAI from llama_index.llms.openai import OpenAI from llama_index.embeddings.huggingface import HuggingFaceEmbedding from sentence_transformers import CrossEncoder from config import AVAILABLE_MODELS, DEFAULT_MODEL, GOOGLE_API_KEY import time from index_retriever import rerank_nodes from my_logging import log_message from config import PROMPT_SIMPLE_POISK def get_llm_model(model_name): try: model_config = AVAILABLE_MODELS.get(model_name) if not model_config: log_message(f"Модель {model_name} не найдена, использую модель по умолчанию") model_config = AVAILABLE_MODELS[DEFAULT_MODEL] if not model_config.get("api_key"): raise Exception(f"API ключ не найден для модели {model_name}") if model_config["provider"] == "google": return GoogleGenAI( model=model_config["model_name"], api_key=model_config["api_key"] ) elif model_config["provider"] == "openai": return OpenAI( model=model_config["model_name"], api_key=model_config["api_key"] ) else: raise Exception(f"Неподдерживаемый провайдер: {model_config['provider']}") except Exception as e: log_message(f"Ошибка создания модели {model_name}: {str(e)}") return GoogleGenAI(model="gemini-2.0-flash", api_key=GOOGLE_API_KEY) def get_embedding_model(model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"): return HuggingFaceEmbedding(model_name=model_name) def get_reranker_model(model_name='cross-encoder/ms-marco-MiniLM-L-12-v2'): return CrossEncoder(model_name) def generate_sources_html(nodes, chunks_df=None): html = "
" html += "

Источники:

" sources_by_doc = {} for i, node in enumerate(nodes): metadata = node.metadata if hasattr(node, 'metadata') else {} doc_type = metadata.get('type', 'text') doc_id = metadata.get('document_id', 'unknown') if doc_type == 'table' or doc_type == 'table_row': table_num = metadata.get('table_number', 'unknown') key = f"{doc_id}_table_{table_num}" elif doc_type == 'image': image_num = metadata.get('image_number', 'unknown') key = f"{doc_id}_image_{image_num}" else: section_path = metadata.get('section_path', '') section_id = metadata.get('section_id', '') section_key = section_path if section_path else section_id key = f"{doc_id}_text_{section_key}" if key not in sources_by_doc: sources_by_doc[key] = { 'doc_id': doc_id, 'doc_type': doc_type, 'metadata': metadata, 'sections': set() } if doc_type not in ['table', 'table_row', 'image']: section_path = metadata.get('section_path', '') section_id = metadata.get('section_id', '') if section_path: sources_by_doc[key]['sections'].add(f"пункт {section_path}") elif section_id and section_id != 'unknown': sources_by_doc[key]['sections'].add(f"пункт {section_id}") for source_info in sources_by_doc.values(): metadata = source_info['metadata'] doc_type = source_info['doc_type'] doc_id = source_info['doc_id'] html += f"
" if doc_type == 'text': html += f"

📄 {doc_id}

" elif doc_type == 'table' or doc_type == 'table_row': table_num = metadata.get('table_number', 'unknown') table_title = metadata.get('table_title', '') if table_num and table_num != 'unknown': if not str(table_num).startswith('№'): table_num = f"№{table_num}" html += f"

📊 Таблица {table_num} - {doc_id}

" if table_title and table_title != 'unknown': html += f"

{table_title}

" else: html += f"

📊 Таблица - {doc_id}

" elif doc_type == 'image': image_num = metadata.get('image_number', 'unknown') image_title = metadata.get('image_title', '') if image_num and image_num != 'unknown': if not str(image_num).startswith('№'): image_num = f"№{image_num}" html += f"

🖼️ Изображение {image_num} - {doc_id}

" if image_title and image_title != 'unknown': html += f"

{image_title}

" if chunks_df is not None and 'file_link' in chunks_df.columns and doc_type == 'text': doc_rows = chunks_df[chunks_df['document_id'] == doc_id] if not doc_rows.empty: file_link = doc_rows.iloc[0]['file_link'] html += f"🔗 Ссылка на документ
" html += "
" html += "
" return html def deduplicate_nodes(nodes): """Deduplicate retrieved nodes based on content and metadata""" seen = set() unique_nodes = [] for node in nodes: doc_id = node.metadata.get('document_id', '') node_type = node.metadata.get('type', 'text') if node_type == 'table' or node_type == 'table_row': table_num = node.metadata.get('table_number', '') table_identifier = node.metadata.get('table_identifier', table_num) # Use row range to distinguish table chunks row_start = node.metadata.get('row_start', '') row_end = node.metadata.get('row_end', '') is_complete = node.metadata.get('is_complete_table', False) if is_complete: identifier = f"{doc_id}|table|{table_identifier}|complete" elif row_start != '' and row_end != '': identifier = f"{doc_id}|table|{table_identifier}|rows_{row_start}_{row_end}" else: # Fallback: use chunk_id if available chunk_id = node.metadata.get('chunk_id', '') if chunk_id != '': identifier = f"{doc_id}|table|{table_identifier}|chunk_{chunk_id}" else: # Last resort: hash first 100 chars of content import hashlib content_hash = hashlib.md5(node.text[:100].encode()).hexdigest()[:8] identifier = f"{doc_id}|table|{table_identifier}|{content_hash}" elif node_type == 'image': img_num = node.metadata.get('image_number', '') identifier = f"{doc_id}|image|{img_num}" else: # text section_id = node.metadata.get('section_id', '') chunk_id = node.metadata.get('chunk_id', 0) # For text, section_id + chunk_id should be unique identifier = f"{doc_id}|text|{section_id}|{chunk_id}" if identifier not in seen: seen.add(identifier) unique_nodes.append(node) return unique_nodes def normalize_query(query): """Normalize query to match stored format""" import re # Replace Cyrillic connection types with Latin query = query.replace('С-', 'C-').replace('с-', 'c-') query = query.replace('У-', 'U-').replace('у-', 'u-') query = query.replace('Т-', 'T-').replace('т-', 't-') query = query.replace('-', '') return query def answer_question(question, query_engine, reranker, current_model, chunks_df=None): if query_engine is None: return "
Система не инициализирована
", "", "" try: start_time = time.time() # NORMALIZE QUERY: Convert Cyrillic to Latin normalized_question = normalize_query(question) log_message(f"Original query: {question}") if normalized_question != question: log_message(f"Normalized query: {normalized_question}") # Use normalized query for retrieval retrieved_nodes = query_engine.retriever.retrieve(normalized_question) log_message(f"user query: {question}") log_message(f"RETRIEVED: {len(retrieved_nodes)} nodes") unique_retrieved = deduplicate_nodes(retrieved_nodes) log_message(f"UNIQUE NODES: {len(unique_retrieved)} nodes") # Check for connection types conn_types_retrieved = {} for node in unique_retrieved: if node.metadata.get('type') == 'table': conn_type = node.metadata.get('connection_type', '') if conn_type: conn_types_retrieved[conn_type] = conn_types_retrieved.get(conn_type, 0) + 1 if conn_types_retrieved: log_message("CONNECTION TYPES IN RETRIEVED:") for ct, cnt in sorted(conn_types_retrieved.items()): log_message(f" {ct}: {cnt} chunks") # Check if target type was retrieved (keep original Cyrillic) if 'С-25' in question: # Use Cyrillic if 'С-25' in conn_types_retrieved: log_message(f"✓ С-25 RETRIEVED: {conn_types_retrieved['С-25']} chunks") else: log_message("✗ С-25 NOT RETRIEVED despite being in query!") # Sample of retrieved tables log_message("SAMPLE OF RETRIEVED TABLES:") for i, node in enumerate(unique_retrieved[:10]): if node.metadata.get('type') == 'table': table_num = node.metadata.get('table_number', 'N/A') table_title = node.metadata.get('table_title', 'N/A') conn_type = node.metadata.get('connection_type', 'N/A') doc_id = node.metadata.get('document_id', 'N/A') log_message(f" [{i+1}] {doc_id} - Table {table_num} - Type: {conn_type}") # Rerank reranked_nodes = rerank_nodes(question, unique_retrieved, reranker, top_k=20) # Direct query without formatting response = query_engine.query(question) end_time = time.time() processing_time = end_time - start_time log_message(f"Обработка завершена за {processing_time:.2f}с") sources_html = generate_sources_html(reranked_nodes, chunks_df) answer_with_time = f"""

Ответ (Модель: {current_model}):

{response.response}
Время обработки: {processing_time:.2f} секунд
""" chunk_info = [] for node in reranked_nodes: metadata = node.metadata if hasattr(node, 'metadata') else {} chunk_info.append({ 'document_id': metadata.get('document_id', 'unknown'), 'section_id': metadata.get('section_id', 'unknown'), 'section_path': metadata.get('section_path', ''), 'section_text': metadata.get('section_text', ''), 'type': metadata.get('type', 'text'), 'table_number': metadata.get('table_number', ''), 'image_number': metadata.get('image_number', ''), 'chunk_size': len(node.text), 'chunk_text': node.text }) from app import create_chunks_display_html chunks_html = create_chunks_display_html(chunk_info) return answer_with_time, sources_html, chunks_html except Exception as e: log_message(f"Ошибка: {str(e)}") error_msg = f"
Ошибка: {str(e)}
" return error_msg, "", ""