Bmcbob76 commited on
Commit
17ff807
·
verified ·
1 Parent(s): 4d66a08

Upload pod_train.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. pod_train.py +7 -7
pod_train.py CHANGED
@@ -18,12 +18,12 @@ print(f"Data: {HF_DATA_REPO}")
18
  print(f"Output: {HF_MODEL_REPO}")
19
  start_time = time.time()
20
 
21
- # Install dependencies — upgrade torch first (RunPod image has 2.4.0, need 2.6+ for set_submodule)
 
 
22
  subprocess.check_call([sys.executable, "-m", "pip", "install", "-q",
23
- "torch>=2.6.0", "--index-url", "https://download.pytorch.org/whl/cu124"])
24
- subprocess.check_call([sys.executable, "-m", "pip", "install", "-q",
25
- "transformers", "peft", "datasets", "accelerate", "bitsandbytes",
26
- "huggingface_hub", "trl", "runpod"])
27
 
28
  from datasets import load_dataset
29
  from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
@@ -74,8 +74,8 @@ model.print_trainable_parameters()
74
  # Training args
75
  output_dir = f"/workspace/{ADAPTER_NAME}-lora"
76
  num_examples = len(dataset)
77
- batch_size = 4
78
- grad_accum = 4 # effective batch = 16
79
  num_epochs = 3 if num_examples < 5000 else (2 if num_examples < 20000 else 1)
80
  warmup = min(100, num_examples // (batch_size * grad_accum))
81
 
 
18
  print(f"Output: {HF_MODEL_REPO}")
19
  start_time = time.time()
20
 
21
+ # Install dependencies — pin exact compatible versions for torch 2.4.x (RunPod image)
22
+ # transformers<4.46 avoids set_submodule (needs torch 2.5+)
23
+ # trl<0.12 avoids processing_class kwarg (needs transformers 4.46+)
24
  subprocess.check_call([sys.executable, "-m", "pip", "install", "-q",
25
+ "transformers==4.45.2", "peft==0.12.0", "datasets",
26
+ "accelerate", "bitsandbytes", "huggingface_hub", "trl==0.11.4", "runpod"])
 
 
27
 
28
  from datasets import load_dataset
29
  from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
 
74
  # Training args
75
  output_dir = f"/workspace/{ADAPTER_NAME}-lora"
76
  num_examples = len(dataset)
77
+ batch_size = 2
78
+ grad_accum = 8 # effective batch = 16
79
  num_epochs = 3 if num_examples < 5000 else (2 if num_examples < 20000 else 1)
80
  warmup = min(100, num_examples // (batch_size * grad_accum))
81