File size: 14,624 Bytes
600d58a
d3d0d1e
ba52088
600d58a
 
d490230
f0cb4f3
d490230
f0cb4f3
600d58a
f0cb4f3
 
d490230
f0cb4f3
d490230
 
f0cb4f3
 
 
74a8708
f0cb4f3
 
 
 
 
 
 
 
 
 
9ad6501
f0cb4f3
c0bcb11
f0cb4f3
 
 
822ef8c
f0cb4f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
822ef8c
f0cb4f3
 
 
 
 
 
d490230
f0cb4f3
 
 
 
 
 
 
 
ba52088
f0cb4f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba52088
f0cb4f3
 
 
 
ba52088
 
f0cb4f3
 
600d58a
f0cb4f3
ba52088
f0cb4f3
ba52088
f0cb4f3
ba52088
f0cb4f3
 
 
ba52088
d3d0d1e
f0cb4f3
 
ba52088
f0cb4f3
 
 
 
ba52088
f0cb4f3
d3d0d1e
f0cb4f3
 
 
ba52088
f0cb4f3
 
07d4035
f0cb4f3
 
 
 
 
 
 
5d5d2cd
f0cb4f3
 
5d5d2cd
f0cb4f3
5d5d2cd
f0cb4f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d5d2cd
f0cb4f3
5d5d2cd
f0cb4f3
 
5d5d2cd
f0cb4f3
 
 
 
 
 
 
5d5d2cd
f0cb4f3
 
5d5d2cd
f0cb4f3
 
 
 
 
d3d0d1e
ba52088
f0cb4f3
 
 
ba52088
 
f0cb4f3
 
600d58a
f0cb4f3
e7d927a
f0cb4f3
ba52088
f0cb4f3
ba52088
f0cb4f3
 
 
ba52088
 
f0cb4f3
 
ba52088
f0cb4f3
 
 
 
ba52088
f0cb4f3
ba52088
f0cb4f3
 
 
ba52088
f0cb4f3
ba52088
600d58a
f0cb4f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
07d4035
ba52088
f0cb4f3
ba52088
 
f0cb4f3
ba52088
 
f0cb4f3
ba52088
f0cb4f3
ba52088
600d58a
f0cb4f3
 
600d58a
f0cb4f3
 
 
 
 
 
 
 
 
 
 
 
 
ba52088
f0cb4f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba52088
f0cb4f3
 
 
 
 
 
 
 
600d58a
f0cb4f3
 
 
600d58a
f0cb4f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
import json
import zipfile
import pandas as pd
from huggingface_hub import hf_hub_download, list_repo_files
from llama_index.core import Document
from llama_index.core.text_splitter import SentenceSplitter
from my_logging import log_message
from config import CHUNK_SIZE, CHUNK_OVERLAP
import os

def load_json_documents(repo_id, hf_token, json_files_dir, download_dir):
    log_message(f"Загрузка JSON документов из {json_files_dir}")
    
    documents = []
    chunk_info = []
    
    try:
        files = list_repo_files(repo_id, token=hf_token)
        zip_files = [f for f in files if f.startswith(json_files_dir) and f.endswith('.zip')]
        
        log_message(f"Найдено {len(zip_files)} ZIP файлов")
        
        for zip_file in zip_files:
            zip_path = hf_hub_download(
                repo_id=repo_id,
                filename=zip_file,
                token=hf_token,
                repo_type="dataset",
                local_dir=download_dir
            )
            
            log_message(f"Обрабатываю архив: {zip_file}")
            
            with zipfile.ZipFile(zip_path, 'r') as zip_ref:
                json_files = [f for f in zip_ref.namelist() 
                             if f.endswith('.json') and not f.startswith('__MACOSX')]
                
                log_message(f"Найдено {len(json_files)} JSON файлов в архиве")
                
                for json_file in json_files:
                    try:
                        with zip_ref.open(json_file) as f:
                            json_data = json.load(f)
                        
                        doc_id = json_data.get('document_id', os.path.basename(json_file))
                        sections = json_data.get('sections', [])
                        
                        log_message(f"Обработка документа {doc_id}: {len(sections)} разделов")
                        
                        for section in sections:
                            doc, info = process_text_section(section, doc_id)
                            if doc:
                                documents.append(doc)
                                chunk_info.append(info)
                    
                    except Exception as e:
                        log_message(f"Ошибка при обработке {json_file}: {str(e)}")
        
        log_message(f"Загружено {len(documents)} текстовых документов")
        return documents, chunk_info
    
    except Exception as e:
        log_message(f"Ошибка загрузки JSON: {str(e)}")
        return [], []

def process_text_section(section, doc_id):
    section_id = section.get('section_id', 'unknown')
    section_path = section.get('section_path', '')
    section_text = section.get('section_text', '')
    section_content = section.get('section_content', '')
    parent_section = section.get('parent_section', '')
    parent_title = section.get('parent_title', '')
    level = section.get('level', 'section')
    
    full_text = f"{section_text}\n{section_content}".strip()
    
    if not full_text:
        return None, None
    
    metadata = {
        'document_id': doc_id,
        'section_id': section_id,
        'section_path': section_path,
        'section_text': section_text,
        'parent_section': parent_section,
        'parent_title': parent_title,
        'level': level,
        'type': 'text',
        'chunk_text': full_text
    }
    
    doc = Document(
        text=full_text,
        metadata=metadata
    )
    
    chunk_info = {
        'document_id': doc_id,
        'section_id': section_id,
        'section_path': section_path,
        'section_text': section_text,
        'parent_section': parent_section,
        'parent_title': parent_title,
        'level': level,
        'type': 'text',
        'chunk_text': full_text
    }
    
    return doc, chunk_info

def load_table_data(repo_id, hf_token, table_data_dir):
    log_message(f"Загрузка табличных данных из {table_data_dir}")
    
    documents = []
    
    try:
        files = list_repo_files(repo_id, token=hf_token)
        json_files = [f for f in files if f.startswith(table_data_dir) and f.endswith('.json')]
        
        log_message(f"Найдено {len(json_files)} табличных JSON файлов")
        
        for json_file in json_files:
            try:
                file_path = hf_hub_download(
                    repo_id=repo_id,
                    filename=json_file,
                    token=hf_token,
                    repo_type="dataset"
                )
                
                with open(file_path, 'r', encoding='utf-8') as f:
                    table_data = json.load(f)
                
                doc = create_table_document(table_data)
                if doc:
                    documents.append(doc)
            
            except Exception as e:
                log_message(f"Ошибка при обработке таблицы {json_file}: {str(e)}")
        
        log_message(f"Загружено {len(documents)} табличных документов")
        return documents
    
    except Exception as e:
        log_message(f"Ошибка загрузки таблиц: {str(e)}")
        return []

def create_table_document(table_data):
    doc_id = table_data.get('document_id', 'unknown')
    table_number = table_data.get('table_number', 'unknown')
    table_title = table_data.get('table_title', '')
    section = table_data.get('section', '')
    headers = table_data.get('headers', [])
    data = table_data.get('data', [])
    
    if not data:
        return None
    
    token_count = estimate_tokens(str(table_data))
    
    if token_count < 2000:
        text = format_table_as_text(table_number, table_title, section, headers, data)
        
        metadata = {
            'document_id': doc_id,
            'table_number': table_number,
            'table_title': table_title,
            'section': section,
            'type': 'table',
            'headers': str(headers),
            'row_count': len(data)
        }
        
        return Document(text=text, metadata=metadata)
    else:
        return create_chunked_table_document(
            doc_id, table_number, table_title, section, headers, data
        )

def create_chunked_table_document(doc_id, table_number, table_title, section, headers, data, rows_per_chunk=30):
    chunks = []
    
    for i in range(0, len(data), rows_per_chunk):
        chunk_rows = data[i:i+rows_per_chunk]
        
        text = format_table_as_text(
            table_number, 
            table_title, 
            section, 
            headers, 
            chunk_rows,
            chunk_info=f"строки {i+1}-{i+len(chunk_rows)}"
        )
        
        metadata = {
            'document_id': doc_id,
            'table_number': table_number,
            'table_title': table_title,
            'section': section,
            'type': 'table',
            'headers': str(headers),
            'chunk_index': i // rows_per_chunk,
            'row_start': i,
            'row_end': i + len(chunk_rows),
            'row_count': len(chunk_rows)
        }
        
        chunks.append(Document(text=text, metadata=metadata))
    
    return chunks[0] if len(chunks) == 1 else chunks

def format_table_as_text(table_number, table_title, section, headers, data, chunk_info=""):
    text_parts = []
    
    text_parts.append(f"Таблица {table_number}")
    if table_title:
        text_parts.append(f"Название: {table_title}")
    if section:
        text_parts.append(f"Раздел: {section}")
    if chunk_info:
        text_parts.append(f"({chunk_info})")
    
    text_parts.append(f"\nЗаголовки: {', '.join(headers)}")
    text_parts.append("\nДанные:")
    
    for row in data[:100]:
        row_text = " | ".join([str(cell) for cell in row])
        text_parts.append(row_text)
    
    return "\n".join(text_parts)

def load_image_data(repo_id, hf_token, image_data_dir):
    log_message(f"Загрузка данных изображений из {image_data_dir}")
    
    documents = []
    
    try:
        files = list_repo_files(repo_id, token=hf_token)
        json_files = [f for f in files if f.startswith(image_data_dir) and f.endswith('.json')]
        
        log_message(f"Найдено {len(json_files)} JSON файлов изображений")
        
        for json_file in json_files:
            try:
                file_path = hf_hub_download(
                    repo_id=repo_id,
                    filename=json_file,
                    token=hf_token,
                    repo_type="dataset"
                )
                
                with open(file_path, 'r', encoding='utf-8') as f:
                    image_data = json.load(f)
                
                doc = create_image_document(image_data)
                if doc:
                    documents.append(doc)
            
            except Exception as e:
                log_message(f"Ошибка при обработке изображения {json_file}: {str(e)}")
        
        log_message(f"Загружено {len(documents)} документов изображений")
        return documents
    
    except Exception as e:
        log_message(f"Ошибка загрузки изображений: {str(e)}")
        return []

def create_image_document(image_data):
    doc_id = image_data.get('document_id', 'unknown')
    image_number = image_data.get('image_number', 'unknown')
    image_title = image_data.get('image_title', '')
    image_description = image_data.get('image_description', '')
    section = image_data.get('section', '')
    
    text_parts = []
    text_parts.append(f"Рисунок {image_number}")
    if image_title:
        text_parts.append(f"Название: {image_title}")
    if section:
        text_parts.append(f"Раздел: {section}")
    if image_description:
        text_parts.append(f"Описание: {image_description}")
    
    text = "\n".join(text_parts)
    
    metadata = {
        'document_id': doc_id,
        'image_number': image_number,
        'image_title': image_title,
        'section': section,
        'type': 'image'
    }
    
    return Document(text=text, metadata=metadata)

def load_csv_chunks(repo_id, hf_token, chunks_filename, download_dir):
    log_message(f"Загрузка CSV чанков из {chunks_filename}")
    
    try:
        csv_path = hf_hub_download(
            repo_id=repo_id,
            filename=chunks_filename,
            token=hf_token,
            repo_type="dataset",
            local_dir=download_dir
        )
        
        df = pd.read_csv(csv_path)
        log_message(f"Загружено {len(df)} строк из CSV")
        
        documents = []
        for _, row in df.iterrows():
            metadata = {
                'document_id': row.get('document_id', 'unknown'),
                'section_id': row.get('section_id', 'unknown'),
                'section_path': row.get('section_path', ''),
                'type': 'text'
            }
            
            text = row.get('chunk_text', '')
            if text:
                doc = Document(text=text, metadata=metadata)
                documents.append(doc)
        
        log_message(f"Создано {len(documents)} документов из CSV")
        return documents, df
    
    except Exception as e:
        log_message(f"Ошибка загрузки CSV: {str(e)}")
        return [], None

def process_documents_with_chunking(documents):
    log_message(f"Чанкинг {len(documents)} документов")
    
    text_splitter = SentenceSplitter(
        chunk_size=CHUNK_SIZE,
        chunk_overlap=CHUNK_OVERLAP,
        separator=" ",
        backup_separators=["\n", ".", "!", "?"]
    )
    
    chunked_documents = []
    chunk_info = []
    
    for doc in documents:
        doc_type = doc.metadata.get('type', 'text')
        
        if doc_type == 'table':
            if isinstance(doc, list):
                chunked_documents.extend(doc)
                for d in doc:
                    chunk_info.append(create_chunk_info(d))
            else:
                chunked_documents.append(doc)
                chunk_info.append(create_chunk_info(doc))
        
        elif doc_type == 'image':
            chunked_documents.append(doc)
            chunk_info.append(create_chunk_info(doc))
        
        else:
            token_count = estimate_tokens(doc.text)
            
            if token_count <= CHUNK_SIZE:
                chunked_documents.append(doc)
                chunk_info.append(create_chunk_info(doc))
            else:
                nodes = text_splitter.get_nodes_from_documents([doc])
                
                for node in nodes:
                    new_doc = Document(
                        text=node.text,
                        metadata=doc.metadata
                    )
                    chunked_documents.append(new_doc)
                    chunk_info.append(create_chunk_info(new_doc))
    
    log_message(f"Получено {len(chunked_documents)} чанков после обработки")
    return chunked_documents, chunk_info

def create_chunk_info(doc):
    metadata = doc.metadata
    
    info = {
        'document_id': metadata.get('document_id', 'unknown'),
        'type': metadata.get('type', 'text'),
        'chunk_text': doc.text[:500]
    }
    
    if metadata.get('type') == 'table':
        info['table_number'] = metadata.get('table_number', 'unknown')
        info['table_title'] = metadata.get('table_title', '')
        info['section'] = metadata.get('section', '')
    
    elif metadata.get('type') == 'image':
        info['image_number'] = metadata.get('image_number', 'unknown')
        info['image_title'] = metadata.get('image_title', '')
        info['section'] = metadata.get('section', '')
    
    else:
        info['section_id'] = metadata.get('section_id', 'unknown')
        info['section_path'] = metadata.get('section_path', '')
        info['section_text'] = metadata.get('section_text', '')
        info['parent_section'] = metadata.get('parent_section', '')
        info['parent_title'] = metadata.get('parent_title', '')
        info['level'] = metadata.get('level', 'section')
    
    return info

def estimate_tokens(text):
    return len(text.split()) * 1.3