Spaces:
Sleeping
Sleeping
Commit
·
b91dfb0
1
Parent(s):
4c7b0a2
new function for should keep table whole for some files
Browse files- app.py +8 -5
- documents_prep.py +205 -7
app.py
CHANGED
|
@@ -80,11 +80,14 @@ def create_interface():
|
|
| 80 |
ask_btn = gr.Button("Найти ответ", variant="primary")
|
| 81 |
|
| 82 |
gr.Examples(
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
|
|
|
|
|
|
|
|
|
| 88 |
inputs=question
|
| 89 |
)
|
| 90 |
|
|
|
|
| 80 |
ask_btn = gr.Button("Найти ответ", variant="primary")
|
| 81 |
|
| 82 |
gr.Examples(
|
| 83 |
+
examples=[
|
| 84 |
+
"О чем этот рисунок: ГОСТ Р 50.04.07-2022 Приложение Л. Л.1.5 Рисунок Л.2",
|
| 85 |
+
"Л.9 Формула в ГОСТ Р 50.04.07 - 2022 что и о чем там?",
|
| 86 |
+
"Какой стандарт устанавливает порядок признания протоколов испытаний продукции в области использования атомной энергии?",
|
| 87 |
+
"Кто несет ответственность за организацию и проведение признания протоколов испытаний продукции?",
|
| 88 |
+
"В каких случаях могут быть признаны протоколы испытаний, проведенные лабораториями?",
|
| 89 |
+
"В какой таблице можно найти информацию о методы исследований при аттестационных испытаниях технологии термической обработки заготовок из легированных сталей? Какой документ и какой раздел?"
|
| 90 |
+
],
|
| 91 |
inputs=question
|
| 92 |
)
|
| 93 |
|
documents_prep.py
CHANGED
|
@@ -5,6 +5,7 @@ from huggingface_hub import hf_hub_download, list_repo_files
|
|
| 5 |
from llama_index.core import Document
|
| 6 |
from llama_index.core.text_splitter import SentenceSplitter
|
| 7 |
from my_logging import log_message
|
|
|
|
| 8 |
|
| 9 |
# Configuration
|
| 10 |
CHUNK_SIZE = 1500
|
|
@@ -37,14 +38,32 @@ def chunk_text_documents(documents):
|
|
| 37 |
|
| 38 |
return chunked
|
| 39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
-
def chunk_table_by_rows(table_data, doc_id, rows_per_chunk=
|
| 42 |
headers = table_data.get('headers', [])
|
| 43 |
rows = table_data.get('data', [])
|
| 44 |
table_num = str(table_data.get('table_number', 'unknown')).strip()
|
| 45 |
table_title = table_data.get('table_title', '')
|
| 46 |
section = table_data.get('section', '')
|
| 47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
# Section-aware identifier (keep your existing logic)
|
| 49 |
import re
|
| 50 |
if 'приложени' in section.lower():
|
|
@@ -57,9 +76,6 @@ def chunk_table_by_rows(table_data, doc_id, rows_per_chunk=20, max_chars=2000):
|
|
| 57 |
else:
|
| 58 |
table_identifier = table_num
|
| 59 |
|
| 60 |
-
if not rows:
|
| 61 |
-
return []
|
| 62 |
-
|
| 63 |
log_message(f" 📊 Processing: {doc_id} - {table_identifier} ({len(rows)} rows)")
|
| 64 |
|
| 65 |
# Build base header (compact version)
|
|
@@ -149,6 +165,74 @@ def chunk_table_by_rows(table_data, doc_id, rows_per_chunk=20, max_chars=2000):
|
|
| 149 |
return chunks
|
| 150 |
|
| 151 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
def _create_chunk(base_header, batch, table_identifier, doc_id,
|
| 153 |
table_num, table_title, section, total_rows,
|
| 154 |
chunk_num, is_complete):
|
|
@@ -289,6 +373,14 @@ def load_table_documents(repo_id, hf_token, table_dir):
|
|
| 289 |
table_files = [f for f in files if f.startswith(table_dir) and f.endswith('.json')]
|
| 290 |
|
| 291 |
all_chunks = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 292 |
for file_path in table_files:
|
| 293 |
try:
|
| 294 |
local_path = hf_hub_download(
|
|
@@ -306,17 +398,123 @@ def load_table_documents(repo_id, hf_token, table_dir):
|
|
| 306 |
for sheet in data.get('sheets', []):
|
| 307 |
sheet_doc_id = sheet.get('document_id', sheet.get('document', file_doc_id))
|
| 308 |
|
| 309 |
-
# USE NEW ADAPTIVE CHUNKING
|
| 310 |
chunks = chunk_table_by_rows(sheet, sheet_doc_id, max_chars=3072)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 311 |
all_chunks.extend(chunks)
|
| 312 |
-
|
| 313 |
except Exception as e:
|
| 314 |
log_message(f"Error loading {file_path}: {e}")
|
| 315 |
|
| 316 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 317 |
return all_chunks
|
| 318 |
|
| 319 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 320 |
def load_json_documents(repo_id, hf_token, json_dir):
|
| 321 |
import zipfile
|
| 322 |
import tempfile
|
|
|
|
| 5 |
from llama_index.core import Document
|
| 6 |
from llama_index.core.text_splitter import SentenceSplitter
|
| 7 |
from my_logging import log_message
|
| 8 |
+
import re
|
| 9 |
|
| 10 |
# Configuration
|
| 11 |
CHUNK_SIZE = 1500
|
|
|
|
| 38 |
|
| 39 |
return chunked
|
| 40 |
|
| 41 |
+
def should_keep_table_whole(doc_id):
|
| 42 |
+
"""Check if document should be kept as single chunk"""
|
| 43 |
+
special_patterns = [
|
| 44 |
+
r'НП\s*068-05',
|
| 45 |
+
r'НП-068-05',
|
| 46 |
+
r'59023',
|
| 47 |
+
r'ГОСТ\s*Р?\s*59023'
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
for pattern in special_patterns:
|
| 51 |
+
if re.search(pattern, doc_id, re.IGNORECASE):
|
| 52 |
+
return True
|
| 53 |
+
return False
|
| 54 |
|
| 55 |
+
def chunk_table_by_rows(table_data, doc_id, rows_per_chunk=3, max_chars=2000):
|
| 56 |
headers = table_data.get('headers', [])
|
| 57 |
rows = table_data.get('data', [])
|
| 58 |
table_num = str(table_data.get('table_number', 'unknown')).strip()
|
| 59 |
table_title = table_data.get('table_title', '')
|
| 60 |
section = table_data.get('section', '')
|
| 61 |
|
| 62 |
+
# CHECK FOR SPECIAL FILES - NO CHUNKING
|
| 63 |
+
if should_keep_table_whole(doc_id):
|
| 64 |
+
log_message(f" 📊 FULL TABLE (special file): {doc_id} - {table_num}")
|
| 65 |
+
return create_full_table_chunk(table_data, doc_id)
|
| 66 |
+
|
| 67 |
# Section-aware identifier (keep your existing logic)
|
| 68 |
import re
|
| 69 |
if 'приложени' in section.lower():
|
|
|
|
| 76 |
else:
|
| 77 |
table_identifier = table_num
|
| 78 |
|
|
|
|
|
|
|
|
|
|
| 79 |
log_message(f" 📊 Processing: {doc_id} - {table_identifier} ({len(rows)} rows)")
|
| 80 |
|
| 81 |
# Build base header (compact version)
|
|
|
|
| 165 |
return chunks
|
| 166 |
|
| 167 |
|
| 168 |
+
def create_full_table_chunk(table_data, doc_id):
|
| 169 |
+
"""Create a single chunk for entire table (no splitting)"""
|
| 170 |
+
headers = table_data.get('headers', [])
|
| 171 |
+
rows = table_data.get('data', [])
|
| 172 |
+
table_num = str(table_data.get('table_number', 'unknown')).strip()
|
| 173 |
+
table_title = table_data.get('table_title', '')
|
| 174 |
+
section = table_data.get('section', '')
|
| 175 |
+
|
| 176 |
+
# Section-aware identifier
|
| 177 |
+
import re
|
| 178 |
+
if 'приложени' in section.lower():
|
| 179 |
+
appendix_match = re.search(r'приложени[еия]\s*(\d+|[а-яА-Я])', section.lower())
|
| 180 |
+
if appendix_match:
|
| 181 |
+
appendix_num = appendix_match.group(1).upper()
|
| 182 |
+
table_identifier = f"{table_num} Приложение {appendix_num}"
|
| 183 |
+
else:
|
| 184 |
+
table_identifier = table_num
|
| 185 |
+
else:
|
| 186 |
+
table_identifier = table_num
|
| 187 |
+
|
| 188 |
+
# Build full content
|
| 189 |
+
content = f"ДОКУМЕНТ: {doc_id} | ТАБЛИЦА: {table_identifier}\n"
|
| 190 |
+
if table_title:
|
| 191 |
+
content += f"НАЗВАНИЕ: {table_title}\n"
|
| 192 |
+
content += f"РАЗДЕЛ: {section}\n"
|
| 193 |
+
content += f"{'='*60}\n"
|
| 194 |
+
|
| 195 |
+
if headers:
|
| 196 |
+
header_str = ' | '.join(str(h) for h in headers)
|
| 197 |
+
content += f"ЗАГОЛОВКИ: {header_str}\n\n"
|
| 198 |
+
|
| 199 |
+
content += "ДАННЫЕ (ПОЛНАЯ ТАБЛИЦА):\n"
|
| 200 |
+
|
| 201 |
+
for i, row in enumerate(rows, 1):
|
| 202 |
+
row_text = format_single_row(row, i)
|
| 203 |
+
if row_text:
|
| 204 |
+
content += row_text
|
| 205 |
+
|
| 206 |
+
content += f"\n[Полная таблица: {len(rows)} строк]\n"
|
| 207 |
+
|
| 208 |
+
# Embed metadata in text
|
| 209 |
+
content += f"\n\n--- МЕТАДАННЫЕ ---\n"
|
| 210 |
+
content += f"Документ: {doc_id}\n"
|
| 211 |
+
content += f"Таблица: {table_identifier}\n"
|
| 212 |
+
content += f"Название таблицы: {table_title}\n"
|
| 213 |
+
content += f"Раздел: {section}\n"
|
| 214 |
+
content += f"Всего строк: {len(rows)}\n"
|
| 215 |
+
|
| 216 |
+
metadata = {
|
| 217 |
+
'type': 'table',
|
| 218 |
+
'document_id': doc_id,
|
| 219 |
+
'table_number': table_num,
|
| 220 |
+
'table_identifier': table_identifier,
|
| 221 |
+
'table_title': table_title,
|
| 222 |
+
'section': section,
|
| 223 |
+
'chunk_id': 0,
|
| 224 |
+
'row_start': 0,
|
| 225 |
+
'row_end': len(rows),
|
| 226 |
+
'total_rows': len(rows),
|
| 227 |
+
'chunk_size': len(content),
|
| 228 |
+
'is_complete_table': True,
|
| 229 |
+
'chunking_strategy': 'full_table',
|
| 230 |
+
'rows_in_chunk': len(rows)
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
return [Document(text=content, metadata=metadata)]
|
| 234 |
+
|
| 235 |
+
|
| 236 |
def _create_chunk(base_header, batch, table_identifier, doc_id,
|
| 237 |
table_num, table_title, section, total_rows,
|
| 238 |
chunk_num, is_complete):
|
|
|
|
| 373 |
table_files = [f for f in files if f.startswith(table_dir) and f.endswith('.json')]
|
| 374 |
|
| 375 |
all_chunks = []
|
| 376 |
+
stats = {
|
| 377 |
+
'full_tables': 0,
|
| 378 |
+
'split_tables': 0,
|
| 379 |
+
'total_chunks': 0,
|
| 380 |
+
'full_table_sizes': [],
|
| 381 |
+
'split_chunk_sizes': []
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
for file_path in table_files:
|
| 385 |
try:
|
| 386 |
local_path = hf_hub_download(
|
|
|
|
| 398 |
for sheet in data.get('sheets', []):
|
| 399 |
sheet_doc_id = sheet.get('document_id', sheet.get('document', file_doc_id))
|
| 400 |
|
|
|
|
| 401 |
chunks = chunk_table_by_rows(sheet, sheet_doc_id, max_chars=3072)
|
| 402 |
+
|
| 403 |
+
# Track statistics
|
| 404 |
+
if chunks:
|
| 405 |
+
is_full = chunks[0].metadata.get('is_complete_table', False)
|
| 406 |
+
chunk_size = chunks[0].metadata.get('chunk_size', 0)
|
| 407 |
+
|
| 408 |
+
if is_full:
|
| 409 |
+
stats['full_tables'] += 1
|
| 410 |
+
stats['full_table_sizes'].append(chunk_size)
|
| 411 |
+
log_message(f" 📄 {sheet_doc_id}: FULL TABLE ({chunk_size} chars)")
|
| 412 |
+
else:
|
| 413 |
+
stats['split_tables'] += 1
|
| 414 |
+
for c in chunks:
|
| 415 |
+
stats['split_chunk_sizes'].append(c.metadata.get('chunk_size', 0))
|
| 416 |
+
log_message(f" 📄 {sheet_doc_id}: {len(chunks)} chunks (split)")
|
| 417 |
+
|
| 418 |
+
stats['total_chunks'] += len(chunks)
|
| 419 |
+
|
| 420 |
all_chunks.extend(chunks)
|
| 421 |
+
|
| 422 |
except Exception as e:
|
| 423 |
log_message(f"Error loading {file_path}: {e}")
|
| 424 |
|
| 425 |
+
# Print final statistics
|
| 426 |
+
log_message(f"\n{'='*60}")
|
| 427 |
+
log_message(f"TABLE LOADING STATISTICS:")
|
| 428 |
+
log_message(f" Total chunks created: {stats['total_chunks']}")
|
| 429 |
+
log_message(f" Full tables (no split): {stats['full_tables']}")
|
| 430 |
+
log_message(f" Split tables: {stats['split_tables']}")
|
| 431 |
+
|
| 432 |
+
if stats['full_table_sizes']:
|
| 433 |
+
avg_full = sum(stats['full_table_sizes']) / len(stats['full_table_sizes'])
|
| 434 |
+
log_message(f" Full table avg size: {avg_full:.0f} chars")
|
| 435 |
+
log_message(f" Full table size range: {min(stats['full_table_sizes'])} - {max(stats['full_table_sizes'])} chars")
|
| 436 |
+
|
| 437 |
+
if stats['split_chunk_sizes']:
|
| 438 |
+
avg_split = sum(stats['split_chunk_sizes']) / len(stats['split_chunk_sizes'])
|
| 439 |
+
log_message(f" Split chunk avg size: {avg_split:.0f} chars")
|
| 440 |
+
log_message(f" Split chunk size range: {min(stats['split_chunk_sizes'])} - {max(stats['split_chunk_sizes'])} chars")
|
| 441 |
+
|
| 442 |
+
log_message(f"{'='*60}\n")
|
| 443 |
+
|
| 444 |
return all_chunks
|
| 445 |
|
| 446 |
|
| 447 |
+
def create_whole_table_chunk(table_data, doc_id):
|
| 448 |
+
"""Create a single chunk for the entire table (no splitting)"""
|
| 449 |
+
headers = table_data.get('headers', [])
|
| 450 |
+
rows = table_data.get('data', [])
|
| 451 |
+
table_num = str(table_data.get('table_number', 'unknown')).strip()
|
| 452 |
+
table_title = table_data.get('table_title', '')
|
| 453 |
+
section = table_data.get('section', '')
|
| 454 |
+
|
| 455 |
+
# Section-aware identifier
|
| 456 |
+
import re
|
| 457 |
+
if 'приложени' in section.lower():
|
| 458 |
+
appendix_match = re.search(r'приложени[еия]\s*(\d+|[а-яА-Я])', section.lower())
|
| 459 |
+
if appendix_match:
|
| 460 |
+
appendix_num = appendix_match.group(1).upper()
|
| 461 |
+
table_identifier = f"{table_num} Приложение {appendix_num}"
|
| 462 |
+
else:
|
| 463 |
+
table_identifier = table_num
|
| 464 |
+
else:
|
| 465 |
+
table_identifier = table_num
|
| 466 |
+
|
| 467 |
+
if not rows:
|
| 468 |
+
return []
|
| 469 |
+
|
| 470 |
+
log_message(f" 📊 Creating WHOLE table: {doc_id} - {table_identifier} ({len(rows)} rows)")
|
| 471 |
+
|
| 472 |
+
# Build complete table content
|
| 473 |
+
content = f"ДОКУМЕНТ: {doc_id} | ТАБЛИЦА: {table_identifier}\n"
|
| 474 |
+
if table_title:
|
| 475 |
+
content += f"НАЗВАНИЕ: {table_title}\n"
|
| 476 |
+
content += f"{'='*60}\n"
|
| 477 |
+
|
| 478 |
+
if headers:
|
| 479 |
+
header_str = ' | '.join(str(h) for h in headers)
|
| 480 |
+
content += f"ЗАГОЛОВКИ: {header_str}\n\n"
|
| 481 |
+
|
| 482 |
+
content += "ДАННЫЕ:\n"
|
| 483 |
+
|
| 484 |
+
# Add ALL rows
|
| 485 |
+
for i, row in enumerate(rows, 1):
|
| 486 |
+
row_text = format_single_row(row, i)
|
| 487 |
+
if row_text:
|
| 488 |
+
content += row_text
|
| 489 |
+
|
| 490 |
+
# Add metadata section
|
| 491 |
+
content += f"\n\n--- МЕТАДАННЫЕ ---\n"
|
| 492 |
+
content += f"Документ: {doc_id}\n"
|
| 493 |
+
content += f"Таблица: {table_identifier}\n"
|
| 494 |
+
content += f"Название таблицы: {table_title}\n"
|
| 495 |
+
content += f"Раздел: {section}\n"
|
| 496 |
+
content += f"Полная таблица: {len(rows)} строк\n"
|
| 497 |
+
|
| 498 |
+
metadata = {
|
| 499 |
+
'type': 'table',
|
| 500 |
+
'document_id': doc_id,
|
| 501 |
+
'table_number': table_num,
|
| 502 |
+
'table_identifier': table_identifier,
|
| 503 |
+
'table_title': table_title,
|
| 504 |
+
'section': section,
|
| 505 |
+
'chunk_id': 0,
|
| 506 |
+
'row_start': 0,
|
| 507 |
+
'row_end': len(rows),
|
| 508 |
+
'total_rows': len(rows),
|
| 509 |
+
'chunk_size': len(content),
|
| 510 |
+
'is_complete_table': True,
|
| 511 |
+
'rows_in_chunk': len(rows)
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
log_message(f" Created 1 chunk with {len(rows)} rows ({len(content)} chars)")
|
| 515 |
+
|
| 516 |
+
return [Document(text=content, metadata=metadata)]
|
| 517 |
+
|
| 518 |
def load_json_documents(repo_id, hf_token, json_dir):
|
| 519 |
import zipfile
|
| 520 |
import tempfile
|