JiRackBaseDataset / scripts /BaseSFTPtData.py
kgrabko's picture
Rename BaseSFTPtData.py to scripts/BaseSFTPtData.py
869c43f verified
# =============================================================================
# COPYRIGHT © 2025 Konstantin Vladimirovich Grabko. ALL RIGHTS RESERVED.
# CMS Manhattan JiRack Technology — PATENT PENDING
#
# This code is proprietary.
# Personal and non-commercial research use is allowed.
# Any commercial use, derivative works for profit, or distribution
# requires a paid license and 5% royalty.
#
# Unauthorized commercial use is strictly prohibited.
# Contact: grabko@cmsmanhattan.com
# =============================================================================
import torch
import os
from transformers import AutoTokenizer
from tqdm import tqdm
def stream_docs(file_path, delimiter="<|end_of_text|>"):
buffer = ""
with open(file_path, 'r', encoding='utf-8') as f:
while True:
chunk = f.read(1024 * 1024) # 1MB
if not chunk:
if buffer.strip(): yield buffer
break
buffer += chunk
while delimiter in buffer:
doc, buffer = buffer.split(delimiter, 1)
if doc.strip(): yield doc
def tokenize_with_overlap(
input_file="jirack_base_dataset.txt",
#model_id="meta-llama/Llama-3.1-8B-Instruct",
model_id=".",
chunk_size=2000,
max_length=8192,
overlap_size=512,
output_prefix="jirack_overlap_data"
):
print(f"📥 Загрузка токенизатора: {model_id}")
tokenizer = AutoTokenizer.from_pretrained(model_id)
# КРИТИЧЕСКИЙ ФИКС: Проверяем pad_token_id
if tokenizer.pad_token_id is None:
if tokenizer.eos_token_id is not None:
tokenizer.pad_token_id = tokenizer.eos_token_id
else:
tokenizer.pad_token_id = 128004 # Дефолт для Llama 3
pad_id = tokenizer.pad_token_id
print(f"🛠 Используемый Pad Token ID: {pad_id}")
stride = max_length - overlap_size
input_ids_buffer = []
labels_buffer = []
chunk_idx = 0
def save_chunk(ids, labels, idx):
if not ids: return
filename = f"{output_prefix}_{idx}.pt"
torch.save({
"input_ids": torch.stack(ids).to(torch.int64),
"labels": torch.stack(labels).to(torch.int64)
}, filename)
print(f"\n💾 Сохранен чанк {idx}: {filename} ({len(ids)} строк)")
print(f"🔄 Нарезка 36GB файла. Окно: {max_length}, Нахлест: {overlap_size}")
for doc in tqdm(stream_docs(input_file), desc="Processing"):
try:
text = doc.strip()
if not text: continue
full_text = f"<|begin_of_text|>{text}<|end_of_text|>"
full_ids = tokenizer.encode(full_text, add_special_tokens=False)
if not full_ids: continue
# Нарезаем на окна
windows = []
if len(full_ids) <= max_length:
windows.append(full_ids)
else:
for i in range(0, len(full_ids), stride):
w = full_ids[i : i + max_length]
if len(w) > 10:
windows.append(w)
for w in windows:
ids = list(w)
lbs = list(w)
if len(ids) < max_length:
pad_len = max_length - len(ids)
# Используем проверенный pad_id
ids += [pad_id] * pad_len
lbs += [-100] * pad_len
input_ids_buffer.append(torch.tensor(ids, dtype=torch.int64))
labels_buffer.append(torch.tensor(lbs, dtype=torch.int64))
if len(input_ids_buffer) >= chunk_size:
save_chunk(input_ids_buffer, labels_buffer, chunk_idx)
chunk_idx += 1
input_ids_buffer, labels_buffer = [], []
except Exception as e:
# Теперь мы будем видеть реальную ошибку, если она осталась
print(f"\n⚠️ Ошибка: {e}")
continue
if input_ids_buffer:
save_chunk(input_ids_buffer, labels_buffer, chunk_idx)
if __name__ == "__main__":
tokenize_with_overlap()