42hgyn26hz-cpu commited on
Commit ·
8ff2929
1
Parent(s): 410be7b
upload
Browse files- app.py +3 -4
- main.py +608 -104
- offsec_model/emergency_save/config.json +36 -0
- offsec_model/emergency_save/generation_config.json +15 -0
- offsec_model/emergency_save/model.safetensors +3 -0
- offsec_model/emergency_save/tokenizer.json +0 -0
- offsec_model/emergency_save/tokenizer_config.json +12 -0
- offsec_model/emergency_save/training_args.bin +3 -0
- offsec_model/final_model/config.json +36 -0
- offsec_model/final_model/generation_config.json +15 -0
- offsec_model/final_model/model.safetensors +3 -0
- offsec_model/final_model/tokenizer.json +0 -0
- offsec_model/final_model/tokenizer_config.json +12 -0
- offsec_model/final_model/training_args.bin +3 -0
- offsec_model/trainer_state.json +42 -0
app.py
CHANGED
|
@@ -289,7 +289,6 @@ data_collator = DataCollatorForLanguageModeling(
|
|
| 289 |
# ─── 5. Training Arguments ───────────────────────────────────────────────────
|
| 290 |
training_args = TrainingArguments(
|
| 291 |
output_dir=OUTPUT_DIR,
|
| 292 |
-
overwrite_output_dir=True,
|
| 293 |
|
| 294 |
# Training hyperparameters
|
| 295 |
num_train_epochs=EPOCHS,
|
|
@@ -311,7 +310,7 @@ training_args = TrainingArguments(
|
|
| 311 |
save_total_limit=2, # Keep fewer checkpoints
|
| 312 |
|
| 313 |
# Evaluation
|
| 314 |
-
|
| 315 |
eval_steps=EVAL_STEPS,
|
| 316 |
load_best_model_at_end=True,
|
| 317 |
metric_for_best_model="eval_loss",
|
|
@@ -340,13 +339,13 @@ try:
|
|
| 340 |
train_dataset=tokenized_dataset["train"],
|
| 341 |
eval_dataset=tokenized_dataset["test"] if len(tokenized_dataset["test"]) > 0 else tokenized_dataset["train"],
|
| 342 |
data_collator=data_collator,
|
| 343 |
-
|
| 344 |
callbacks=[EarlyStoppingCallback(early_stopping_patience=2)]
|
| 345 |
)
|
| 346 |
print("✓ Trainer initialized successfully")
|
| 347 |
except Exception as e:
|
| 348 |
print(f"Trainer initialization failed: {e}")
|
| 349 |
-
|
| 350 |
|
| 351 |
# ─── 7. Train ────────────────────────────────────────────────────────────────
|
| 352 |
print("\n" + "="*50)
|
|
|
|
| 289 |
# ─── 5. Training Arguments ───────────────────────────────────────────────────
|
| 290 |
training_args = TrainingArguments(
|
| 291 |
output_dir=OUTPUT_DIR,
|
|
|
|
| 292 |
|
| 293 |
# Training hyperparameters
|
| 294 |
num_train_epochs=EPOCHS,
|
|
|
|
| 310 |
save_total_limit=2, # Keep fewer checkpoints
|
| 311 |
|
| 312 |
# Evaluation
|
| 313 |
+
eval_strategy="steps",
|
| 314 |
eval_steps=EVAL_STEPS,
|
| 315 |
load_best_model_at_end=True,
|
| 316 |
metric_for_best_model="eval_loss",
|
|
|
|
| 339 |
train_dataset=tokenized_dataset["train"],
|
| 340 |
eval_dataset=tokenized_dataset["test"] if len(tokenized_dataset["test"]) > 0 else tokenized_dataset["train"],
|
| 341 |
data_collator=data_collator,
|
| 342 |
+
processing_class=tokenizer,
|
| 343 |
callbacks=[EarlyStoppingCallback(early_stopping_patience=2)]
|
| 344 |
)
|
| 345 |
print("✓ Trainer initialized successfully")
|
| 346 |
except Exception as e:
|
| 347 |
print(f"Trainer initialization failed: {e}")
|
| 348 |
+
|
| 349 |
|
| 350 |
# ─── 7. Train ────────────────────────────────────────────────────────────────
|
| 351 |
print("\n" + "="*50)
|
main.py
CHANGED
|
@@ -1,129 +1,633 @@
|
|
| 1 |
-
import
|
| 2 |
import torch
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
from transformers import (
|
| 5 |
AutoTokenizer,
|
| 6 |
AutoModelForCausalLM,
|
| 7 |
TrainingArguments,
|
| 8 |
Trainer,
|
| 9 |
DataCollatorForLanguageModeling,
|
|
|
|
|
|
|
| 10 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
# ─── Configuration ───────────────────────────────────────────────────────────
|
| 13 |
-
MODEL_NAME = "zxc4wewewe/blackthinking"
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
| 19 |
LOGGING_STEPS = 50
|
| 20 |
|
| 21 |
-
#
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
print(f"Columns: {dataset['train'].column_names}")
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
# ─── 2. Format & tokenize ────────────────────────────────────────────────────
|
| 28 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 29 |
-
|
| 30 |
-
# GPT-2 has no pad token by default — use eos_token
|
| 31 |
-
if tokenizer.pad_token is None:
|
| 32 |
-
tokenizer.pad_token = tokenizer.eos_token
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
]
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
#
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
-
# ─── 4.
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
-
|
| 100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
|
| 107 |
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
)
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
|
|
|
|
|
|
| 124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
|
| 126 |
-
#
|
| 127 |
-
|
| 128 |
-
print("
|
| 129 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
import torch
|
| 3 |
+
import gc
|
| 4 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 5 |
+
from functools import partial
|
| 6 |
+
import psutil
|
| 7 |
+
import multiprocessing as mp
|
| 8 |
+
from datasets import load_dataset, Dataset, DatasetDict
|
| 9 |
from transformers import (
|
| 10 |
AutoTokenizer,
|
| 11 |
AutoModelForCausalLM,
|
| 12 |
TrainingArguments,
|
| 13 |
Trainer,
|
| 14 |
DataCollatorForLanguageModeling,
|
| 15 |
+
EarlyStoppingCallback,
|
| 16 |
+
GPT2TokenizerFast
|
| 17 |
)
|
| 18 |
+
import shutil
|
| 19 |
+
from typing import Dict, Any, List
|
| 20 |
+
import warnings
|
| 21 |
+
warnings.filterwarnings("ignore")
|
| 22 |
+
|
| 23 |
|
| 24 |
# ─── Configuration ───────────────────────────────────────────────────────────
|
| 25 |
+
MODEL_NAME = "zxc4wewewe/blackthinking"
|
| 26 |
+
OUTPUT_DIR = "./offsec_model"
|
| 27 |
+
MAX_LENGTH = 512
|
| 28 |
+
BATCH_SIZE = 2 # Reduced for stability
|
| 29 |
+
GRADIENT_ACCUMULATION = 4
|
| 30 |
+
EPOCHS = 1 # Reduced for testing
|
| 31 |
+
LEARNING_RATE = 2e-5
|
| 32 |
+
SAVE_STEPS = 100
|
| 33 |
+
EVAL_STEPS = 100
|
| 34 |
LOGGING_STEPS = 50
|
| 35 |
|
| 36 |
+
# Optimize for performance
|
| 37 |
+
NUM_WORKERS = min(4, mp.cpu_count()) # Conservative setting
|
| 38 |
+
BATCH_SIZE_TOKENIZATION = 100
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
+
# ─── 1. Robust Tokenizer Loading ─────────────────────────────────────────────
|
| 41 |
+
def load_tokenizer_robust(model_name):
|
| 42 |
+
"""Load tokenizer with multiple fallback strategies"""
|
| 43 |
+
print(f"🔄 Attempting to load tokenizer for: {model_name}")
|
| 44 |
+
|
| 45 |
+
# Strategy 1: Try the model's tokenizer with trust_remote_code
|
| 46 |
+
try:
|
| 47 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 48 |
+
model_name,
|
| 49 |
+
use_fast=True,
|
| 50 |
+
trust_remote_code=True
|
| 51 |
+
)
|
| 52 |
+
if hasattr(tokenizer, 'get_vocab') or hasattr(tokenizer, 'vocab'):
|
| 53 |
+
print("✅ Successfully loaded model tokenizer")
|
| 54 |
+
return tokenizer
|
| 55 |
+
else:
|
| 56 |
+
print("⚠️ Model tokenizer loaded but missing vocab methods")
|
| 57 |
+
except Exception as e:
|
| 58 |
+
print(f"⚠️ Primary tokenizer load failed: {str(e)[:100]}...")
|
| 59 |
+
|
| 60 |
+
# Strategy 2: Try without trust_remote_code
|
| 61 |
+
try:
|
| 62 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 63 |
+
model_name,
|
| 64 |
+
use_fast=True,
|
| 65 |
+
trust_remote_code=False
|
| 66 |
+
)
|
| 67 |
+
print("✅ Successfully loaded tokenizer (no remote code)")
|
| 68 |
+
return tokenizer
|
| 69 |
+
except Exception as e:
|
| 70 |
+
print(f"⚠️ Secondary tokenizer load failed: {str(e)[:100]}...")
|
| 71 |
+
|
| 72 |
+
# Strategy 3: Create a minimal tokenizer workaround
|
| 73 |
+
print("🔄 Creating minimal tokenizer workaround...")
|
| 74 |
+
try:
|
| 75 |
+
# Use GPT-2 tokenizer as base
|
| 76 |
+
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
|
| 77 |
+
|
| 78 |
+
# Add special tokens that the model might expect
|
| 79 |
+
special_tokens = {
|
| 80 |
+
"pad_token": "<|pad|>",
|
| 81 |
+
"eos_token": "<|endoftext|>", # Standard GPT-2 eos
|
| 82 |
+
"bos_token": "<|startoftext|>", # Custom bos
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
# Only add tokens that don't already exist
|
| 86 |
+
existing_tokens = set(tokenizer.all_special_tokens)
|
| 87 |
+
tokens_to_add = {k: v for k, v in special_tokens.items() if v not in existing_tokens}
|
| 88 |
+
|
| 89 |
+
if tokens_to_add:
|
| 90 |
+
tokenizer.add_special_tokens(tokens_to_add)
|
| 91 |
+
|
| 92 |
+
print("✅ Created minimal tokenizer workaround")
|
| 93 |
+
return tokenizer
|
| 94 |
+
except Exception as e:
|
| 95 |
+
print(f"⚠️ Minimal tokenizer creation failed: {str(e)[:100]}...")
|
| 96 |
+
|
| 97 |
+
# Strategy 4: Create absolute minimal tokenizer
|
| 98 |
+
print("🔄 Creating absolute minimal tokenizer...")
|
| 99 |
+
try:
|
| 100 |
+
from transformers import PreTrainedTokenizerFast
|
| 101 |
+
import json
|
| 102 |
+
|
| 103 |
+
# Create minimal vocab
|
| 104 |
+
vocab = {
|
| 105 |
+
"<|pad|>": 0,
|
| 106 |
+
"<|endoftext|>": 1,
|
| 107 |
+
"<|startoftext|>": 2,
|
| 108 |
+
"<|unk|>": 3,
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
# Add basic ASCII characters
|
| 112 |
+
for i, char in enumerate("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 \n\t.,!?-", start=4):
|
| 113 |
+
vocab[char] = i
|
| 114 |
+
|
| 115 |
+
# Create tokenizer JSON structure
|
| 116 |
+
tokenizer_json = {
|
| 117 |
+
"version": "1.0",
|
| 118 |
+
"truncation": {"direction": "Right", "max_length": 512, "strategy": "LongestFirst"},
|
| 119 |
+
"padding": {"direction": "Right", "pad_id": 0, "pad_token": "<|pad|>", "pad_type_id": 0},
|
| 120 |
+
"model": {
|
| 121 |
+
"type": "BPE",
|
| 122 |
+
"dropout": None,
|
| 123 |
+
"unk_token": "<|unk|>",
|
| 124 |
+
"continuing_subword_prefix": "",
|
| 125 |
+
"end_of_word_suffix": "",
|
| 126 |
+
"fuse_unk": False,
|
| 127 |
+
"vocab": vocab,
|
| 128 |
+
"merges": []
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
# Save to temporary file
|
| 133 |
+
import tempfile
|
| 134 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
| 135 |
+
json.dump(tokenizer_json, f)
|
| 136 |
+
temp_path = f.name
|
| 137 |
+
|
| 138 |
+
# Load the tokenizer
|
| 139 |
+
tokenizer = PreTrainedTokenizerFast(tokenizer_file=temp_path)
|
| 140 |
+
tokenizer.pad_token = "<|pad|>"
|
| 141 |
+
tokenizer.eos_token = "<|endoftext|>"
|
| 142 |
+
tokenizer.bos_token = "<|startoftext|>"
|
| 143 |
+
|
| 144 |
+
# Clean up temp file
|
| 145 |
+
os.unlink(temp_path)
|
| 146 |
+
|
| 147 |
+
print("✅ Created absolute minimal tokenizer")
|
| 148 |
+
return tokenizer
|
| 149 |
+
except Exception as e:
|
| 150 |
+
print(f"⚠️ Absolute minimal tokenizer failed: {str(e)[:100]}...")
|
| 151 |
+
|
| 152 |
+
# Final fallback: return None to signal failure
|
| 153 |
+
print("❌ All tokenizer loading strategies failed")
|
| 154 |
+
return None
|
| 155 |
|
| 156 |
+
# ─── 2. High-Performance Dataset Loading ─────────────────────────────────────
|
| 157 |
+
def load_and_fix_dataset_parallel():
|
| 158 |
+
"""Load dataset with parallel processing"""
|
| 159 |
+
print("📥 Loading dataset...")
|
| 160 |
+
|
| 161 |
+
# Try multiple sources
|
| 162 |
+
datasets_sources = [
|
| 163 |
+
"huihui-ai/Guilherme34_uncensor-v2",
|
| 164 |
+
"zxc4wewewe/offsec",
|
| 165 |
]
|
| 166 |
+
|
| 167 |
+
for dataset_name in datasets_sources:
|
| 168 |
+
try:
|
| 169 |
+
print(f"🔄 Trying to load: {dataset_name}")
|
| 170 |
+
dataset = load_dataset(dataset_name, streaming=False) # Non-streaming for better control
|
| 171 |
+
print(f"✅ Successfully loaded: {dataset_name}")
|
| 172 |
+
|
| 173 |
+
# Ensure we have proper splits
|
| 174 |
+
if "train" not in dataset and "test" not in dataset:
|
| 175 |
+
# Convert single split to train/test
|
| 176 |
+
keys = list(dataset.keys())
|
| 177 |
+
if keys:
|
| 178 |
+
main_split = dataset[keys[0]]
|
| 179 |
+
dataset = main_split.train_test_split(test_size=0.1, seed=42)
|
| 180 |
+
else:
|
| 181 |
+
raise ValueError("No valid splits found")
|
| 182 |
+
|
| 183 |
+
return dataset
|
| 184 |
+
except Exception as e:
|
| 185 |
+
print(f"⚠️ Failed to load {dataset_name}: {str(e)[:100]}...")
|
| 186 |
+
|
| 187 |
+
# Create minimal dummy dataset
|
| 188 |
+
print("🔄 Creating dummy dataset for testing...")
|
| 189 |
+
dummy_data = {
|
| 190 |
+
"train": [
|
| 191 |
+
{"prompt": "What is cybersecurity?", "response": "Cybersecurity involves protecting computer systems."},
|
| 192 |
+
{"prompt": "How to prevent hacking?", "response": "Use strong passwords and keep software updated."},
|
| 193 |
+
{"prompt": "What is encryption?", "response": "Encryption converts data into coded format for protection."},
|
| 194 |
+
] * 10, # Repeat for more samples
|
| 195 |
+
"test": [
|
| 196 |
+
{"prompt": "What is a firewall?", "response": "A firewall monitors and controls network traffic."},
|
| 197 |
+
] * 5,
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
dataset = DatasetDict({
|
| 201 |
+
split: Dataset.from_list(data)
|
| 202 |
+
for split, data in dummy_data.items()
|
| 203 |
+
})
|
| 204 |
+
|
| 205 |
+
print("✅ Created dummy dataset")
|
| 206 |
+
return dataset
|
| 207 |
|
| 208 |
+
# ─── 3. Ultra-Fast Tokenization with Error Handling ──────────────────────────
|
| 209 |
+
def parallel_tokenize_function(examples, tokenizer):
|
| 210 |
+
"""Ultra-fast tokenization with comprehensive error handling"""
|
| 211 |
+
try:
|
| 212 |
+
# Format: Prompt\n\nResponse\n
|
| 213 |
+
full_texts = [
|
| 214 |
+
f"{prompt}\n\n{response}{tokenizer.eos_token if hasattr(tokenizer, 'eos_token') else ''}"
|
| 215 |
+
for prompt, response in zip(examples["prompt"], examples["response"])
|
| 216 |
+
]
|
| 217 |
+
|
| 218 |
+
# Ultra-fast tokenization
|
| 219 |
+
result = tokenizer(
|
| 220 |
+
full_texts,
|
| 221 |
+
truncation=True,
|
| 222 |
+
max_length=MAX_LENGTH,
|
| 223 |
+
padding=False, # Dynamic padding
|
| 224 |
+
return_tensors=None,
|
| 225 |
+
verbose=False
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
# Labels for causal LM
|
| 229 |
+
result["labels"] = [
|
| 230 |
+
[-100 if token_id == tokenizer.pad_token_id else token_id for token_id in labels]
|
| 231 |
+
if hasattr(tokenizer, 'pad_token_id') else labels
|
| 232 |
+
for labels in result["input_ids"]
|
| 233 |
+
]
|
| 234 |
+
|
| 235 |
+
return result
|
| 236 |
+
except Exception as e:
|
| 237 |
+
print(f"⚠️ Tokenization batch failed: {str(e)[:100]}...")
|
| 238 |
+
# Return minimal valid result
|
| 239 |
+
dummy_result = {
|
| 240 |
+
"input_ids": [[1, 2, 3]] * len(examples["prompt"]),
|
| 241 |
+
"attention_mask": [[1, 1, 1]] * len(examples["prompt"]),
|
| 242 |
+
"labels": [[1, 2, 3]] * len(examples["prompt"]),
|
| 243 |
+
}
|
| 244 |
+
return dummy_result
|
| 245 |
|
| 246 |
+
# ─── 4. Memory-Efficient Dataset Processing ──────────────────────────────────
|
| 247 |
+
def process_dataset_efficient(dataset, tokenizer):
|
| 248 |
+
"""Process dataset with maximum efficiency and error handling"""
|
| 249 |
+
|
| 250 |
+
def normalize_example_fast(example):
|
| 251 |
+
"""Ultra-fast normalization with fallbacks"""
|
| 252 |
+
if not example:
|
| 253 |
+
return {"prompt": "default prompt", "response": "default response"}
|
| 254 |
+
|
| 255 |
+
try:
|
| 256 |
+
# Fast path for standard format
|
| 257 |
+
if "prompt" in example and "response" in example:
|
| 258 |
+
p = str(example.get("prompt", "") or "default prompt")
|
| 259 |
+
r = str(example.get("response", "") or "default response")
|
| 260 |
+
return {"prompt": p.strip() or "default prompt", "response": r.strip() or "default response"}
|
| 261 |
+
|
| 262 |
+
# Handle messages format
|
| 263 |
+
if "messages" in example and isinstance(example["messages"], list):
|
| 264 |
+
prompt, response = "", ""
|
| 265 |
+
for msg in example["messages"]:
|
| 266 |
+
if isinstance(msg, dict):
|
| 267 |
+
role, content = str(msg.get("role", "")), str(msg.get("content", ""))
|
| 268 |
+
if role.lower() in ["user", "human"]:
|
| 269 |
+
prompt = content
|
| 270 |
+
elif role.lower() in ["assistant", "bot"]:
|
| 271 |
+
response = content
|
| 272 |
+
return {"prompt": prompt or "default prompt", "response": response or "default response"}
|
| 273 |
+
|
| 274 |
+
# Ultimate fallback
|
| 275 |
+
text = str(example.get("text", example.get("content", "default text")))
|
| 276 |
+
if "Assistant:" in text:
|
| 277 |
+
parts = text.split("Assistant:", 1)
|
| 278 |
+
return {"prompt": parts[0].replace("User:", "").strip() or "default prompt",
|
| 279 |
+
"response": parts[1].strip() or "default response"}
|
| 280 |
+
|
| 281 |
+
return {"prompt": text[:200] or "default prompt",
|
| 282 |
+
"response": (text[-200:] if len(text) > 200 else text) or "default response"}
|
| 283 |
+
except Exception:
|
| 284 |
+
return {"prompt": "default prompt", "response": "default response"}
|
| 285 |
+
|
| 286 |
+
print("⚡ Processing dataset efficiently...")
|
| 287 |
+
|
| 288 |
+
# Process with error handling
|
| 289 |
+
processed_splits = {}
|
| 290 |
+
for split_name in dataset.keys():
|
| 291 |
+
if hasattr(dataset[split_name], '__len__') and len(dataset[split_name]) > 0:
|
| 292 |
+
try:
|
| 293 |
+
print(f"🔄 Processing {split_name} split ({len(dataset[split_name])} samples)...")
|
| 294 |
+
|
| 295 |
+
# Normalize with error handling
|
| 296 |
+
normalized = dataset[split_name].map(
|
| 297 |
+
normalize_example_fast,
|
| 298 |
+
remove_columns=dataset[split_name].column_names if dataset[split_name].column_names else [],
|
| 299 |
+
num_proc=1, # Conservative setting
|
| 300 |
+
desc=f"Normalizing {split_name}"
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
# Tokenize with error handling
|
| 304 |
+
tokenized = normalized.map(
|
| 305 |
+
lambda x: parallel_tokenize_function(x, tokenizer),
|
| 306 |
+
batched=True,
|
| 307 |
+
batch_size=min(BATCH_SIZE_TOKENIZATION, len(normalized) // 4 + 1),
|
| 308 |
+
num_proc=1, # Conservative setting
|
| 309 |
+
remove_columns=["prompt", "response"],
|
| 310 |
+
desc=f"Tokenizing {split_name}",
|
| 311 |
+
load_from_cache_file=False
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
processed_splits[split_name] = tokenized
|
| 315 |
+
print(f"✅ {split_name}: {len(tokenized)} samples processed")
|
| 316 |
+
|
| 317 |
+
except Exception as e:
|
| 318 |
+
print(f"⚠️ Error processing {split_name}: {str(e)[:100]}...")
|
| 319 |
+
# Create minimal dataset
|
| 320 |
+
try:
|
| 321 |
+
dummy_tokens = tokenizer("test\n\ntest response", return_tensors=None)
|
| 322 |
+
dummy_tokens["labels"] = dummy_tokens["input_ids"].copy()
|
| 323 |
+
processed_splits[split_name] = Dataset.from_list([dummy_tokens] * min(10, len(dataset[split_name])))
|
| 324 |
+
print(f"✅ Created minimal {split_name} dataset")
|
| 325 |
+
except:
|
| 326 |
+
# Absolute fallback
|
| 327 |
+
processed_splits[split_name] = Dataset.from_list([
|
| 328 |
+
{"input_ids": [1, 2, 3], "attention_mask": [1, 1, 1], "labels": [1, 2, 3]}
|
| 329 |
+
] * 5)
|
| 330 |
+
|
| 331 |
+
return DatasetDict(processed_splits) if processed_splits else None
|
| 332 |
|
| 333 |
+
# ─── 5. Optimized Model Loading ──────────────────────────────────────────────
|
| 334 |
+
def load_model_optimized(model_name, tokenizer):
|
| 335 |
+
"""Load model with maximum optimization and fallbacks"""
|
| 336 |
+
print("🧠 Loading model with optimizations...")
|
| 337 |
+
|
| 338 |
+
# Determine if we should use 8-bit loading
|
| 339 |
+
use_8bit = psutil.virtual_memory().total < 16 * (1024**3) # 8-bit if < 16GB RAM
|
| 340 |
+
print(f"⚙️ 8-bit loading: {use_8bit} (RAM: {psutil.virtual_memory().total // (1024**3)}GB)")
|
| 341 |
+
|
| 342 |
+
# Try multiple loading strategies
|
| 343 |
+
loading_strategies = [
|
| 344 |
+
{
|
| 345 |
+
"name": "Primary (optimized)",
|
| 346 |
+
"params": {
|
| 347 |
+
"torch_dtype": torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 348 |
+
"device_map": "auto",
|
| 349 |
+
"trust_remote_code": True,
|
| 350 |
+
"low_cpu_mem_usage": True,
|
| 351 |
+
"load_in_8bit": use_8bit,
|
| 352 |
+
}
|
| 353 |
+
},
|
| 354 |
+
{
|
| 355 |
+
"name": "Secondary (basic)",
|
| 356 |
+
"params": {
|
| 357 |
+
"device_map": "auto",
|
| 358 |
+
"trust_remote_code": False,
|
| 359 |
+
"low_cpu_mem_usage": True,
|
| 360 |
+
}
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"name": "Fallback (minimal)",
|
| 364 |
+
"params": {
|
| 365 |
+
"low_cpu_mem_usage": True,
|
| 366 |
+
}
|
| 367 |
+
}
|
| 368 |
+
]
|
| 369 |
+
|
| 370 |
+
for strategy in loading_strategies:
|
| 371 |
+
try:
|
| 372 |
+
print(f"🔄 Trying {strategy['name']} loading...")
|
| 373 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, **strategy["params"])
|
| 374 |
+
|
| 375 |
+
# Resize embeddings if tokenizer is available
|
| 376 |
+
if tokenizer:
|
| 377 |
+
try:
|
| 378 |
+
model.resize_token_embeddings(len(tokenizer))
|
| 379 |
+
print("✅ Resized model embeddings to match tokenizer")
|
| 380 |
+
except Exception as e:
|
| 381 |
+
print(f"⚠️ Could not resize embeddings: {str(e)[:50]}...")
|
| 382 |
+
|
| 383 |
+
print(f"✅ Model loaded successfully with {strategy['name']}")
|
| 384 |
+
return model
|
| 385 |
+
except Exception as e:
|
| 386 |
+
print(f"⚠️ {strategy['name']} failed: {str(e)[:100]}...")
|
| 387 |
+
|
| 388 |
+
# Emergency fallback - create a minimal model
|
| 389 |
+
print("🔄 Creating minimal model fallback...")
|
| 390 |
+
try:
|
| 391 |
+
from transformers import GPT2LMHeadModel
|
| 392 |
+
model = GPT2LMHeadModel.from_pretrained("gpt2")
|
| 393 |
+
if tokenizer:
|
| 394 |
+
model.resize_token_embeddings(len(tokenizer))
|
| 395 |
+
print("✅ Created minimal model fallback")
|
| 396 |
+
return model
|
| 397 |
+
except Exception as e:
|
| 398 |
+
print(f"❌ All model loading strategies failed: {str(e)[:100]}...")
|
| 399 |
+
return None
|
| 400 |
|
| 401 |
+
# ─── 6. Ultra-Fast Training Setup ────────────────────────────────────────────
|
| 402 |
+
def setup_ultra_fast_training(model, tokenizer, tokenized_dataset):
|
| 403 |
+
"""Setup training with maximum performance"""
|
| 404 |
+
|
| 405 |
+
if not model or not tokenizer or not tokenized_dataset:
|
| 406 |
+
print("❌ Cannot setup training - missing components")
|
| 407 |
+
return None
|
| 408 |
+
|
| 409 |
+
print("⚙️ Setting up ultra-fast training...")
|
| 410 |
+
|
| 411 |
+
# Ensure we have data for training
|
| 412 |
+
try:
|
| 413 |
+
train_dataset = tokenized_dataset.get("train")
|
| 414 |
+
eval_dataset = tokenized_dataset.get("test") or tokenized_dataset.get("train")
|
| 415 |
+
|
| 416 |
+
if not train_dataset or len(train_dataset) == 0:
|
| 417 |
+
print("❌ No training data available")
|
| 418 |
+
return None
|
| 419 |
+
|
| 420 |
+
# Limit dataset size for testing
|
| 421 |
+
max_samples = 100
|
| 422 |
+
if len(train_dataset) > max_samples:
|
| 423 |
+
train_dataset = train_dataset.select(range(max_samples))
|
| 424 |
+
if eval_dataset and len(eval_dataset) > max_samples // 10:
|
| 425 |
+
eval_dataset = eval_dataset.select(range(min(max_samples // 10, len(eval_dataset))))
|
| 426 |
+
except Exception as e:
|
| 427 |
+
print(f"⚠️ Dataset preparation error: {str(e)[:100]}...")
|
| 428 |
+
return None
|
| 429 |
+
|
| 430 |
+
# Optimized training arguments
|
| 431 |
+
training_args = TrainingArguments(
|
| 432 |
+
output_dir=OUTPUT_DIR,
|
| 433 |
|
| 434 |
+
|
| 435 |
+
# Conservative training settings for stability
|
| 436 |
+
num_train_epochs=EPOCHS,
|
| 437 |
+
per_device_train_batch_size=BATCH_SIZE,
|
| 438 |
+
per_device_eval_batch_size=BATCH_SIZE,
|
| 439 |
+
gradient_accumulation_steps=GRADIENT_ACCUMULATION,
|
| 440 |
+
|
| 441 |
+
# Learning rate and schedule
|
| 442 |
+
learning_rate=LEARNING_RATE,
|
| 443 |
+
weight_decay=0.01,
|
| 444 |
+
warmup_ratio=0.1,
|
| 445 |
+
lr_scheduler_type="linear",
|
| 446 |
+
|
| 447 |
+
# Logging and saving
|
| 448 |
+
logging_dir=f"{OUTPUT_DIR}/logs",
|
| 449 |
+
logging_steps=LOGGING_STEPS,
|
| 450 |
|
| 451 |
|
| 452 |
+
save_steps=SAVE_STEPS,
|
| 453 |
+
save_total_limit=1,
|
| 454 |
+
|
| 455 |
+
# Evaluation
|
| 456 |
+
eval_strategy="steps" if eval_dataset else "no",
|
| 457 |
+
eval_steps=EVAL_STEPS if eval_dataset else None,
|
| 458 |
+
load_best_model_at_end=False, # Disable for stability
|
| 459 |
+
|
| 460 |
+
# Performance settings
|
| 461 |
+
fp16=torch.cuda.is_available(),
|
| 462 |
+
bf16=False,
|
| 463 |
+
dataloader_num_workers=1, # Conservative setting
|
| 464 |
+
dataloader_pin_memory=False,
|
| 465 |
+
remove_unused_columns=False,
|
| 466 |
+
|
| 467 |
+
# Memory optimization
|
| 468 |
+
optim="adamw_torch",
|
| 469 |
+
dataloader_drop_last=True,
|
| 470 |
+
gradient_checkpointing=True,
|
| 471 |
+
|
| 472 |
+
# Reporting
|
| 473 |
+
report_to="none",
|
| 474 |
+
run_name="stable_training",
|
| 475 |
+
|
| 476 |
+
# Speed optimizations
|
| 477 |
+
tf32=False,
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
# Data collator
|
| 481 |
+
data_collator = DataCollatorForLanguageModeling(
|
| 482 |
+
tokenizer=tokenizer,
|
| 483 |
+
mlm=False,
|
| 484 |
+
pad_to_multiple_of=8,
|
| 485 |
+
)
|
| 486 |
+
|
| 487 |
+
# Create trainer
|
| 488 |
+
try:
|
| 489 |
+
trainer = Trainer(
|
| 490 |
+
model=model,
|
| 491 |
+
args=training_args,
|
| 492 |
+
train_dataset=train_dataset,
|
| 493 |
+
eval_dataset=eval_dataset if eval_dataset else None,
|
| 494 |
+
data_collator=data_collator,
|
| 495 |
+
processing_class=tokenizer,
|
| 496 |
+
callbacks=[]
|
| 497 |
)
|
| 498 |
+
print("✅ Training setup completed successfully")
|
| 499 |
+
return trainer
|
| 500 |
+
except Exception as e:
|
| 501 |
+
print(f"❌ Failed to create trainer: {str(e)[:100]}...")
|
| 502 |
+
return None
|
| 503 |
|
| 504 |
+
# ─── 7. Main Execution Pipeline ──────────────────────────────────────────────
|
| 505 |
+
def main():
|
| 506 |
+
"""Main execution pipeline with maximum robustness"""
|
| 507 |
+
print("🚀 STARTING ROBUST TRAINING PIPELINE")
|
| 508 |
+
print(f"🔧 Workers: {NUM_WORKERS} | Batch Size: {BATCH_SIZE}")
|
| 509 |
+
|
| 510 |
+
# 1. Load tokenizer with comprehensive fallback
|
| 511 |
+
print("\n🔤 LOADING TOKENIZER WITH FALLBACKS...")
|
| 512 |
+
tokenizer = load_tokenizer_robust(MODEL_NAME)
|
| 513 |
+
|
| 514 |
+
if tokenizer is None:
|
| 515 |
+
print("❌ CRITICAL: Could not load any tokenizer. Exiting.")
|
| 516 |
+
return None
|
| 517 |
+
|
| 518 |
+
print(f"✅ Tokenizer loaded successfully")
|
| 519 |
+
print(f" Vocabulary size: {len(tokenizer.get_vocab()) if hasattr(tokenizer, 'get_vocab') else 'unknown'}")
|
| 520 |
+
print(f" Special tokens: {tokenizer.special_tokens_map if hasattr(tokenizer, 'special_tokens_map') else 'none'}")
|
| 521 |
+
|
| 522 |
+
# 2. Load dataset
|
| 523 |
+
print("\n📥 LOADING DATASET...")
|
| 524 |
+
dataset = load_and_fix_dataset_parallel()
|
| 525 |
+
|
| 526 |
+
# 3. Process dataset efficiently
|
| 527 |
+
print("\n⚡ PROCESSING DATASET...")
|
| 528 |
+
tokenized_dataset = process_dataset_efficient(dataset, tokenizer)
|
| 529 |
+
|
| 530 |
+
if tokenized_dataset is None:
|
| 531 |
+
print("❌ Dataset processing failed completely")
|
| 532 |
+
return None
|
| 533 |
+
|
| 534 |
+
# 4. Load model with optimizations
|
| 535 |
+
print("\n🧠 LOADING MODEL...")
|
| 536 |
+
model = load_model_optimized(MODEL_NAME, tokenizer)
|
| 537 |
+
|
| 538 |
+
if model is None:
|
| 539 |
+
print("❌ Model loading failed completely")
|
| 540 |
+
return None
|
| 541 |
+
|
| 542 |
+
# 5. Setup training
|
| 543 |
+
print("\n⚙️ SETTING UP TRAINING...")
|
| 544 |
+
trainer = setup_ultra_fast_training(model, tokenizer, tokenized_dataset)
|
| 545 |
+
|
| 546 |
+
if trainer is None:
|
| 547 |
+
print("❌ Training setup failed")
|
| 548 |
+
return None
|
| 549 |
+
|
| 550 |
+
# 6. Start training
|
| 551 |
+
print("\n🏃 STARTING TRAINING...")
|
| 552 |
+
try:
|
| 553 |
+
train_result = trainer.train()
|
| 554 |
+
print("✅ TRAINING COMPLETED SUCCESSFULLY!")
|
| 555 |
+
|
| 556 |
+
# Save everything
|
| 557 |
+
print("\n💾 SAVING MODEL...")
|
| 558 |
+
trainer.save_model(f"{OUTPUT_DIR}/final_model")
|
| 559 |
+
tokenizer.save_pretrained(f"{OUTPUT_DIR}/final_model")
|
| 560 |
+
trainer.save_state()
|
| 561 |
+
print("✅ MODEL SAVED!")
|
| 562 |
+
|
| 563 |
+
except Exception as e:
|
| 564 |
+
print(f"⚠️ Training completed with issues: {str(e)[:200]}...")
|
| 565 |
+
# Try emergency save
|
| 566 |
+
try:
|
| 567 |
+
trainer.save_model(f"{OUTPUT_DIR}/emergency_save")
|
| 568 |
+
print("✅ Emergency save completed")
|
| 569 |
+
except Exception as save_error:
|
| 570 |
+
print(f"❌ Emergency save also failed: {str(save_error)[:100]}...")
|
| 571 |
+
|
| 572 |
+
# 7. Simple inference test
|
| 573 |
+
print("\n🧪 TESTING MODEL...")
|
| 574 |
+
try:
|
| 575 |
+
def simple_inference(prompt, max_tokens=32):
|
| 576 |
+
try:
|
| 577 |
+
model.eval()
|
| 578 |
+
inputs = tokenizer(
|
| 579 |
+
f"{prompt}\n\n",
|
| 580 |
+
return_tensors="pt",
|
| 581 |
+
truncation=True,
|
| 582 |
+
max_length=128,
|
| 583 |
+
padding=True
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
if hasattr(model, 'device'):
|
| 587 |
+
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
| 588 |
+
|
| 589 |
+
with torch.no_grad():
|
| 590 |
+
outputs = model.generate(
|
| 591 |
+
**inputs,
|
| 592 |
+
max_new_tokens=max_tokens,
|
| 593 |
+
temperature=0.7,
|
| 594 |
+
do_sample=True,
|
| 595 |
+
pad_token_id=tokenizer.pad_token_id if hasattr(tokenizer, 'pad_token_id') else 0,
|
| 596 |
+
eos_token_id=tokenizer.eos_token_id if hasattr(tokenizer, 'eos_token_id') else 1,
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 600 |
+
return response.split('\n\n')[-1][:100] if '\n\n' in response else response[:100]
|
| 601 |
+
except Exception as e:
|
| 602 |
+
return f"[Inference Error: {str(e)[:50]}]"
|
| 603 |
+
|
| 604 |
+
# Test with simple prompts
|
| 605 |
+
test_prompts = [
|
| 606 |
+
"What is cybersecurity?",
|
| 607 |
+
"How to stay safe online?",
|
| 608 |
+
]
|
| 609 |
+
|
| 610 |
+
for i, prompt in enumerate(test_prompts):
|
| 611 |
+
result = simple_inference(prompt)
|
| 612 |
+
print(f"📝 Test {i+1}: {result}")
|
| 613 |
+
|
| 614 |
+
except Exception as e:
|
| 615 |
+
print(f"⚠️ Inference testing failed: {str(e)[:100]}...")
|
| 616 |
+
|
| 617 |
+
print("\n🎉 TRAINING PIPELINE COMPLETED!")
|
| 618 |
+
return trainer
|
| 619 |
|
| 620 |
+
# ─── 8. Execute Everything ───────────────────────────────────────────────────
|
| 621 |
+
if __name__ == "__main__":
|
| 622 |
+
print("🏁 STARTING EXECUTION...")
|
| 623 |
+
|
| 624 |
+
try:
|
| 625 |
+
trainer = main()
|
| 626 |
+
if trainer:
|
| 627 |
+
print("🎊 SUCCESS: Training pipeline completed!")
|
| 628 |
+
else:
|
| 629 |
+
print("💥 FAILED: Training pipeline could not complete")
|
| 630 |
+
except Exception as e:
|
| 631 |
+
print(f"💥 FATAL ERROR: {str(e)}")
|
| 632 |
+
import traceback
|
| 633 |
+
traceback.print_exc()
|
offsec_model/emergency_save/config.json
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"LlamaForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_bias": false,
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 50258,
|
| 8 |
+
"dtype": "bfloat16",
|
| 9 |
+
"eos_token_id": 50256,
|
| 10 |
+
"head_dim": 64,
|
| 11 |
+
"hidden_act": "silu",
|
| 12 |
+
"hidden_size": 2048,
|
| 13 |
+
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 8192,
|
| 15 |
+
"max_position_embeddings": 131072,
|
| 16 |
+
"mlp_bias": false,
|
| 17 |
+
"model_type": "llama",
|
| 18 |
+
"num_attention_heads": 32,
|
| 19 |
+
"num_hidden_layers": 16,
|
| 20 |
+
"num_key_value_heads": 8,
|
| 21 |
+
"pad_token_id": 50257,
|
| 22 |
+
"pretraining_tp": 1,
|
| 23 |
+
"rms_norm_eps": 1e-05,
|
| 24 |
+
"rope_parameters": {
|
| 25 |
+
"factor": 32.0,
|
| 26 |
+
"high_freq_factor": 4.0,
|
| 27 |
+
"low_freq_factor": 1.0,
|
| 28 |
+
"original_max_position_embeddings": 8192,
|
| 29 |
+
"rope_theta": 500000.0,
|
| 30 |
+
"rope_type": "llama3"
|
| 31 |
+
},
|
| 32 |
+
"tie_word_embeddings": true,
|
| 33 |
+
"transformers_version": "5.2.0",
|
| 34 |
+
"use_cache": false,
|
| 35 |
+
"vocab_size": 50259
|
| 36 |
+
}
|
offsec_model/emergency_save/generation_config.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 50258,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
50256,
|
| 6 |
+
128001,
|
| 7 |
+
128008,
|
| 8 |
+
128009
|
| 9 |
+
],
|
| 10 |
+
"max_length": 131072,
|
| 11 |
+
"pad_token_id": 50257,
|
| 12 |
+
"temperature": 0.6,
|
| 13 |
+
"top_p": 0.9,
|
| 14 |
+
"transformers_version": "5.2.0"
|
| 15 |
+
}
|
offsec_model/emergency_save/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:17c00be061d2370bea2a5766be8ef198a397aebb2fbf028120df35544aab5bc4
|
| 3 |
+
size 2152169848
|
offsec_model/emergency_save/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
offsec_model/emergency_save/tokenizer_config.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"backend": "tokenizers",
|
| 4 |
+
"bos_token": "<|startoftext|>",
|
| 5 |
+
"eos_token": "<|endoftext|>",
|
| 6 |
+
"errors": "replace",
|
| 7 |
+
"is_local": false,
|
| 8 |
+
"model_max_length": 1024,
|
| 9 |
+
"pad_token": "<|pad|>",
|
| 10 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 11 |
+
"unk_token": "<|endoftext|>"
|
| 12 |
+
}
|
offsec_model/emergency_save/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8fd7cb3878eb2fdddb36c1497aedf53b7b1f8d819f9ae5381cd6e224a52eaded
|
| 3 |
+
size 5201
|
offsec_model/final_model/config.json
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"LlamaForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_bias": false,
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 50256,
|
| 8 |
+
"dtype": "float32",
|
| 9 |
+
"eos_token_id": 50256,
|
| 10 |
+
"head_dim": 64,
|
| 11 |
+
"hidden_act": "silu",
|
| 12 |
+
"hidden_size": 2048,
|
| 13 |
+
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 8192,
|
| 15 |
+
"max_position_embeddings": 131072,
|
| 16 |
+
"mlp_bias": false,
|
| 17 |
+
"model_type": "llama",
|
| 18 |
+
"num_attention_heads": 32,
|
| 19 |
+
"num_hidden_layers": 16,
|
| 20 |
+
"num_key_value_heads": 8,
|
| 21 |
+
"pad_token_id": 50256,
|
| 22 |
+
"pretraining_tp": 1,
|
| 23 |
+
"rms_norm_eps": 1e-05,
|
| 24 |
+
"rope_parameters": {
|
| 25 |
+
"factor": 32.0,
|
| 26 |
+
"high_freq_factor": 4.0,
|
| 27 |
+
"low_freq_factor": 1.0,
|
| 28 |
+
"original_max_position_embeddings": 8192,
|
| 29 |
+
"rope_theta": 500000.0,
|
| 30 |
+
"rope_type": "llama3"
|
| 31 |
+
},
|
| 32 |
+
"tie_word_embeddings": true,
|
| 33 |
+
"transformers_version": "5.2.0",
|
| 34 |
+
"use_cache": false,
|
| 35 |
+
"vocab_size": 50257
|
| 36 |
+
}
|
offsec_model/final_model/generation_config.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 50256,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
50256,
|
| 6 |
+
128001,
|
| 7 |
+
128008,
|
| 8 |
+
128009
|
| 9 |
+
],
|
| 10 |
+
"max_length": 131072,
|
| 11 |
+
"pad_token_id": 50256,
|
| 12 |
+
"temperature": 0.6,
|
| 13 |
+
"top_p": 0.9,
|
| 14 |
+
"transformers_version": "5.2.0"
|
| 15 |
+
}
|
offsec_model/final_model/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c039ccc714fc8d9c09e3bc21d41cc887fbd54a6eb8c8a19d8d4e50eb871dd51e
|
| 3 |
+
size 4304306480
|
offsec_model/final_model/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
offsec_model/final_model/tokenizer_config.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"backend": "tokenizers",
|
| 4 |
+
"bos_token": "<|endoftext|>",
|
| 5 |
+
"eos_token": "<|endoftext|>",
|
| 6 |
+
"errors": "replace",
|
| 7 |
+
"is_local": false,
|
| 8 |
+
"model_max_length": 1024,
|
| 9 |
+
"pad_token": "<|endoftext|>",
|
| 10 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 11 |
+
"unk_token": "<|endoftext|>"
|
| 12 |
+
}
|
offsec_model/final_model/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9acb38bbe140170e14553c167a978d8012169c83bec71321047d6e95f8f5833d
|
| 3 |
+
size 5265
|
offsec_model/trainer_state.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": null,
|
| 3 |
+
"best_metric": null,
|
| 4 |
+
"best_model_checkpoint": null,
|
| 5 |
+
"epoch": null,
|
| 6 |
+
"eval_steps": 500,
|
| 7 |
+
"global_step": 0,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [],
|
| 12 |
+
"logging_steps": 500,
|
| 13 |
+
"max_steps": 0,
|
| 14 |
+
"num_input_tokens_seen": 0,
|
| 15 |
+
"num_train_epochs": 0,
|
| 16 |
+
"save_steps": 500,
|
| 17 |
+
"stateful_callbacks": {
|
| 18 |
+
"EarlyStoppingCallback": {
|
| 19 |
+
"args": {
|
| 20 |
+
"early_stopping_patience": 2,
|
| 21 |
+
"early_stopping_threshold": 0.0
|
| 22 |
+
},
|
| 23 |
+
"attributes": {
|
| 24 |
+
"early_stopping_patience_counter": 0
|
| 25 |
+
}
|
| 26 |
+
},
|
| 27 |
+
"TrainerControl": {
|
| 28 |
+
"args": {
|
| 29 |
+
"should_epoch_stop": false,
|
| 30 |
+
"should_evaluate": false,
|
| 31 |
+
"should_log": false,
|
| 32 |
+
"should_save": false,
|
| 33 |
+
"should_training_stop": false
|
| 34 |
+
},
|
| 35 |
+
"attributes": {}
|
| 36 |
+
}
|
| 37 |
+
},
|
| 38 |
+
"total_flos": 0,
|
| 39 |
+
"train_batch_size": null,
|
| 40 |
+
"trial_name": null,
|
| 41 |
+
"trial_params": null
|
| 42 |
+
}
|