PGCRYPT commited on
Commit
98e9d77
·
verified ·
1 Parent(s): cf666c4

Delete setup_runpod.sh

Browse files
Files changed (1) hide show
  1. setup_runpod.sh +0 -208
setup_runpod.sh DELETED
@@ -1,208 +0,0 @@
1
- #!/bin/bash
2
- # =============================================================================
3
- # Blender Documentation Fine-tuning Setup for RunPod
4
- # Model: Qwen 3.5 27B | Training: FP16
5
- # =============================================================================
6
-
7
- set -e
8
-
9
- echo "=========================================="
10
- echo "Blender Qwen 3.5-27B Fine-tuning Setup"
11
- echo "=========================================="
12
-
13
- # =============================================================================
14
- # Step 1: Install Dependencies (exact versions from Unsloth Colab)
15
- # =============================================================================
16
- echo "[1/5] Installing dependencies..."
17
-
18
- pip install --upgrade uv
19
-
20
- # Install PyTorch and core packages
21
- uv pip install -qqq \
22
- "torch==2.8.0" \
23
- "triton>=3.3.0" \
24
- "numpy" \
25
- "pillow" \
26
- "torchvision" \
27
- "bitsandbytes" \
28
- "unsloth_zoo[base] @ git+https://github.com/unslothai/unsloth-zoo" \
29
- "unsloth[base] @ git+https://github.com/unslothai/unsloth"
30
-
31
- # Install Unsloth and extras
32
- uv pip install --upgrade --no-deps tokenizers
33
- uv pip install -qqq trl==0.22.2
34
- uv pip install -qqq unsloth unsloth_zoo
35
- uv pip install -qqq transformers==5.2.0
36
-
37
- # Install flash-linear-attention and xformers
38
- uv pip install --no-build-isolation flash-linear-attention
39
- uv pip install -qqq causal_conv1d==1.6.0
40
- uv pip install -qqq xformers==0.0.32.post2
41
-
42
- echo "✓ Dependencies installed"
43
-
44
- # =============================================================================
45
- # Step 2: Verify GPU
46
- # =============================================================================
47
- echo "[2/5] Verifying GPU..."
48
- nvidia-smi
49
- python3 -c "import torch; print(f'CUDA: {torch.cuda.is_available()}'); print(f'GPUs: {torch.cuda.device_count()}'); print(f'PyTorch: {torch.__version__}')"
50
-
51
- # =============================================================================
52
- # Step 3: Create Training Script
53
- # =============================================================================
54
- echo "[3/5] Creating training script..."
55
-
56
- cat > train_blender.py << 'TRAINEOF'
57
- #!/usr/bin/env python3
58
- """
59
- Blender Documentation Fine-tuning
60
- Model: Qwen 3.5 27B (unsloth version - optimized)
61
- Training: FP16 (full precision)
62
- """
63
-
64
- import os
65
- import json
66
- from datasets import load_dataset
67
- from unsloth import FastLanguageModel
68
- from trl import SFTTrainer
69
- from transformers import TrainingArguments
70
-
71
- # =============================================================================
72
- # Configuration
73
- # =============================================================================
74
- MODEL_NAME = "unsloth/Qwen3.5-27B"
75
- MAX_SEQ_LENGTH = 2048
76
- LEARNING_RATE = 2e-4
77
- NUM_EPOCHS = 2
78
- BATCH_SIZE = 2
79
- GRADIENT_ACCUMULATION = 4
80
- OUTPUT_DIR = "./blender_qwen_output"
81
-
82
- # =============================================================================
83
- # Load Model (FP16 - no quantization for best quality)
84
- # =============================================================================
85
- print("Loading model in FP16...")
86
- model, tokenizer = FastLanguageModel.from_pretrained(
87
- model_name=MODEL_NAME,
88
- max_seq_length=MAX_SEQ_LENGTH,
89
- load_in_4bit=False, # FP16 - best quality!
90
- dtype="float16", # Explicit FP16
91
- )
92
-
93
- # =============================================================================
94
- # Add LoRA Adapters
95
- # =============================================================================
96
- print("Adding LoRA adapters...")
97
- model = FastLanguageModel.get_peft_model(
98
- model,
99
- r=64,
100
- lora_alpha=64,
101
- lora_dropout=0,
102
- target_modules=[
103
- "q_proj", "k_proj", "v_proj", "o_proj",
104
- "gate_proj", "up_proj", "down_proj"
105
- ],
106
- bias="none",
107
- task_type="CAUSAL_LM",
108
- )
109
-
110
- # =============================================================================
111
- # Load Dataset
112
- # =============================================================================
113
- print("Loading dataset...")
114
- dataset = load_dataset("json", data_files={
115
- "train": "train.jsonl",
116
- "valid": "valid.jsonl"
117
- })
118
-
119
- print(f"Train samples: {len(dataset['train'])}")
120
- print(f"Valid samples: {len(dataset['valid'])}")
121
-
122
- # =============================================================================
123
- # Training Arguments (FP16)
124
- # =============================================================================
125
- training_args = TrainingArguments(
126
- output_dir=OUTPUT_DIR,
127
- num_train_epochs=NUM_EPOCHS,
128
- per_device_train_batch_size=BATCH_SIZE,
129
- per_device_eval_batch_size=1,
130
- gradient_accumulation_steps=GRADIENT_ACCUMULATION,
131
- learning_rate=LEARNING_RATE,
132
- fp16=True, # FP16 training
133
- bf16=False,
134
- save_strategy="epoch",
135
- eval_strategy="epoch",
136
- save_total_limit=2,
137
- logging_steps=10,
138
- warmup_steps=100,
139
- weight_decay=0.01,
140
- lr_scheduler_type="cosine",
141
- seed=42,
142
- dataloader_num_workers=4,
143
- report_to="tensorboard",
144
- load_best_model_at_end=True,
145
- )
146
-
147
- # =============================================================================
148
- # Initialize Trainer
149
- # =============================================================================
150
- trainer = SFTTrainer(
151
- model=model,
152
- tokenizer=tokenizer,
153
- train_dataset=dataset["train"],
154
- eval_dataset=dataset["valid"],
155
- dataset_text_field="conversations",
156
- max_seq_length=MAX_SEQ_LENGTH,
157
- args=training_args,
158
- )
159
-
160
- # =============================================================================
161
- # Train!
162
- # =============================================================================
163
- print("\n" + "="*50)
164
- print("STARTING TRAINING - FP16 Mode")
165
- print("="*50)
166
- print(f"Model: {MODEL_NAME}")
167
- print(f"Trainable params: {model.num_parameters(only_trainable=True):,}")
168
- print(f"Epochs: {NUM_EPOCHS}")
169
- print(f"Batch size: {BATCH_SIZE} x {GRADIENT_ACCUMULATION} = {BATCH_SIZE * GRADIENT_ACCUMULATION}")
170
- print("="*50 + "\n")
171
-
172
- trainer.train()
173
-
174
- # =============================================================================
175
- # Save Final Model
176
- # =============================================================================
177
- print("\nSaving model...")
178
- trainer.save_model(f"{OUTPUT_DIR}/final")
179
- tokenizer.save_pretrained(f"{OUTPUT_DIR}/final")
180
-
181
- print("\n" + "="*50)
182
- print("TRAINING COMPLETE!")
183
- print("="*50)
184
- print(f"Model saved to: {OUTPUT_DIR}/final")
185
- TRAINEOF
186
-
187
- chmod +x train_blender.py
188
- echo "✓ Training script created"
189
-
190
- # =============================================================================
191
- # Step 4: Instructions
192
- # =============================================================================
193
- echo "[4/5] Setup complete!"
194
- echo ""
195
- echo "=========================================="
196
- echo "NEXT STEPS:"
197
- echo "=========================================="
198
- echo ""
199
- echo "1. Upload train.jsonl and valid.jsonl to RunPod (via JupyterLab drag & drop)"
200
- echo "2. Run training with:"
201
- echo " python train_blender.py"
202
- echo ""
203
- echo "3. Estimated time: 3-4 hours"
204
- echo " Estimated cost: ~\$15-25"
205
- echo ""
206
- echo "Monitor with: tensorboard --logdir ./blender_qwen_output"
207
- echo ""
208
- echo "=========================================="