davanstrien HF Staff Claude Opus 4.6 commited on
Commit
67f897d
·
1 Parent(s): 074eb92

Update repo URLs from uv-scripts/unsloth-jobs to unsloth/jobs

Browse files

Preparing for move to official unsloth org. Also includes
previously uncommitted formatting fixes and metadata_update additions.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

Files changed (5) hide show
  1. README.md +5 -5
  2. continued-pretraining.py +10 -4
  3. sft-gemma3-vlm.py +9 -3
  4. sft-lfm2.5.py +38 -11
  5. sft-qwen3-vl.py +3 -3
README.md CHANGED
@@ -80,7 +80,7 @@ Use `--text-column` if your column has a different name.
80
  View available options for any script:
81
 
82
  ```bash
83
- uv run https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/sft-lfm2.5.py --help
84
  ```
85
 
86
  ### LLM fine-tuning
@@ -89,7 +89,7 @@ Fine-tune [LFM2.5-1.2B-Instruct](https://huggingface.co/LiquidAI/LFM2.5-1.2B-Ins
89
 
90
  ```bash
91
  hf jobs uv run \
92
- https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/sft-lfm2.5.py \
93
  --flavor a10g-small --secrets HF_TOKEN --timeout 4h \
94
  -- --dataset mlabonne/FineTome-100k \
95
  --num-epochs 1 \
@@ -101,7 +101,7 @@ hf jobs uv run \
101
 
102
  ```bash
103
  hf jobs uv run \
104
- https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/sft-qwen3-vl.py \
105
  --flavor a100-large --secrets HF_TOKEN \
106
  -- --dataset your-username/dataset \
107
  --trackio-space your-username/trackio \
@@ -112,7 +112,7 @@ hf jobs uv run \
112
 
113
  ```bash
114
  hf jobs uv run \
115
- https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/continued-pretraining.py \
116
  --flavor a100-large --secrets HF_TOKEN \
117
  -- --dataset your-username/domain-corpus \
118
  --text-column content \
@@ -124,7 +124,7 @@ hf jobs uv run \
124
 
125
  ```bash
126
  hf jobs uv run \
127
- https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/sft-lfm2.5.py \
128
  --flavor a10g-small --secrets HF_TOKEN \
129
  -- --dataset mlabonne/FineTome-100k \
130
  --trackio-space your-username/trackio \
 
80
  View available options for any script:
81
 
82
  ```bash
83
+ uv run https://huggingface.co/datasets/unsloth/jobs/raw/main/sft-lfm2.5.py --help
84
  ```
85
 
86
  ### LLM fine-tuning
 
89
 
90
  ```bash
91
  hf jobs uv run \
92
+ https://huggingface.co/datasets/unsloth/jobs/raw/main/sft-lfm2.5.py \
93
  --flavor a10g-small --secrets HF_TOKEN --timeout 4h \
94
  -- --dataset mlabonne/FineTome-100k \
95
  --num-epochs 1 \
 
101
 
102
  ```bash
103
  hf jobs uv run \
104
+ https://huggingface.co/datasets/unsloth/jobs/raw/main/sft-qwen3-vl.py \
105
  --flavor a100-large --secrets HF_TOKEN \
106
  -- --dataset your-username/dataset \
107
  --trackio-space your-username/trackio \
 
112
 
113
  ```bash
114
  hf jobs uv run \
115
+ https://huggingface.co/datasets/unsloth/jobs/raw/main/continued-pretraining.py \
116
  --flavor a100-large --secrets HF_TOKEN \
117
  -- --dataset your-username/domain-corpus \
118
  --text-column content \
 
124
 
125
  ```bash
126
  hf jobs uv run \
127
+ https://huggingface.co/datasets/unsloth/jobs/raw/main/sft-lfm2.5.py \
128
  --flavor a10g-small --secrets HF_TOKEN \
129
  -- --dataset mlabonne/FineTome-100k \
130
  --trackio-space your-username/trackio \
continued-pretraining.py CHANGED
@@ -19,7 +19,7 @@ Run locally (if you have a GPU):
19
 
20
  Run on HF Jobs:
21
  hf jobs uv run \
22
- https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/continued-pretraining.py \
23
  --flavor a100-large --secrets HF_TOKEN \
24
  -- --max-steps 1000 --output-repo your-username/qwen-latin
25
 
@@ -56,7 +56,7 @@ def check_cuda():
56
  logger.error("CUDA is not available. This script requires a GPU.")
57
  logger.error("Run on a machine with a CUDA-capable GPU or use HF Jobs:")
58
  logger.error(
59
- " hf jobs uv run https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/continued-pretraining.py --flavor a100-large ..."
60
  )
61
  sys.exit(1)
62
  logger.info(f"CUDA available: {torch.cuda.get_device_name(0)}")
@@ -82,7 +82,7 @@ Examples:
82
 
83
  # HF Jobs with monitoring
84
  hf jobs uv run \\
85
- https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/continued-pretraining.py \\
86
  --flavor a100-large --secrets HF_TOKEN \\
87
  -- --max-steps 1000 --trackio-space username/trackio --output-repo username/qwen-latin
88
  """,
@@ -339,6 +339,12 @@ def main():
339
  model.push_to_hub(args.output_repo, tokenizer=tokenizer)
340
  print(f"Model available at: https://huggingface.co/{args.output_repo}")
341
 
 
 
 
 
 
 
342
  # Quick inference test
343
  print("\n" + "=" * 70)
344
  print("Quick inference test:")
@@ -389,7 +395,7 @@ if __name__ == "__main__":
389
  print("\nHF Jobs example:")
390
  print("\n hf jobs uv run \\")
391
  print(
392
- " https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/continued-pretraining.py \\"
393
  )
394
  print(" --flavor a100-large --secrets HF_TOKEN \\")
395
  print(" -- --max-steps 1000 --output-repo your-username/qwen-latin")
 
19
 
20
  Run on HF Jobs:
21
  hf jobs uv run \
22
+ https://huggingface.co/datasets/unsloth/jobs/raw/main/continued-pretraining.py \
23
  --flavor a100-large --secrets HF_TOKEN \
24
  -- --max-steps 1000 --output-repo your-username/qwen-latin
25
 
 
56
  logger.error("CUDA is not available. This script requires a GPU.")
57
  logger.error("Run on a machine with a CUDA-capable GPU or use HF Jobs:")
58
  logger.error(
59
+ " hf jobs uv run https://huggingface.co/datasets/unsloth/jobs/raw/main/continued-pretraining.py --flavor a100-large ..."
60
  )
61
  sys.exit(1)
62
  logger.info(f"CUDA available: {torch.cuda.get_device_name(0)}")
 
82
 
83
  # HF Jobs with monitoring
84
  hf jobs uv run \\
85
+ https://huggingface.co/datasets/unsloth/jobs/raw/main/continued-pretraining.py \\
86
  --flavor a100-large --secrets HF_TOKEN \\
87
  -- --max-steps 1000 --trackio-space username/trackio --output-repo username/qwen-latin
88
  """,
 
339
  model.push_to_hub(args.output_repo, tokenizer=tokenizer)
340
  print(f"Model available at: https://huggingface.co/{args.output_repo}")
341
 
342
+ # Update model card metadata with dataset info
343
+ from huggingface_hub import metadata_update
344
+
345
+ metadata_update(args.output_repo, {"datasets": [args.dataset]}, overwrite=True)
346
+ print(f" Model card updated with dataset: {args.dataset}")
347
+
348
  # Quick inference test
349
  print("\n" + "=" * 70)
350
  print("Quick inference test:")
 
395
  print("\nHF Jobs example:")
396
  print("\n hf jobs uv run \\")
397
  print(
398
+ " https://huggingface.co/datasets/unsloth/jobs/raw/main/continued-pretraining.py \\"
399
  )
400
  print(" --flavor a100-large --secrets HF_TOKEN \\")
401
  print(" -- --max-steps 1000 --output-repo your-username/qwen-latin")
sft-gemma3-vlm.py CHANGED
@@ -23,7 +23,7 @@ Run locally (if you have a GPU):
23
 
24
  Run on HF Jobs:
25
  hf jobs uv run \
26
- https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/sft-gemma3-vlm.py \
27
  --flavor a100-large --secrets HF_TOKEN \
28
  -- --max-steps 500 --output-repo your-username/vlm-finetuned
29
 
@@ -59,7 +59,7 @@ def check_cuda():
59
  logger.error("CUDA is not available. This script requires a GPU.")
60
  logger.error("Run on a machine with a CUDA-capable GPU or use HF Jobs:")
61
  logger.error(
62
- " hf jobs uv run https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/sft-gemma3-vlm.py --flavor a100-large ..."
63
  )
64
  sys.exit(1)
65
  logger.info(f"CUDA available: {torch.cuda.get_device_name(0)}")
@@ -335,6 +335,12 @@ def main():
335
  processor.push_to_hub(args.output_repo)
336
  print(f"Model available at: https://huggingface.co/{args.output_repo}")
337
 
 
 
 
 
 
 
338
  print("\n" + "=" * 70)
339
  print("Done!")
340
  print("=" * 70)
@@ -360,7 +366,7 @@ if __name__ == "__main__":
360
  print("\nHF Jobs example:")
361
  print("\n hf jobs uv run \\")
362
  print(
363
- " https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/sft-gemma3-vlm.py \\"
364
  )
365
  print(" --flavor a100-large --secrets HF_TOKEN \\")
366
  print(" -- --max-steps 500 --output-repo your-username/vlm-finetuned")
 
23
 
24
  Run on HF Jobs:
25
  hf jobs uv run \
26
+ https://huggingface.co/datasets/unsloth/jobs/raw/main/sft-gemma3-vlm.py \
27
  --flavor a100-large --secrets HF_TOKEN \
28
  -- --max-steps 500 --output-repo your-username/vlm-finetuned
29
 
 
59
  logger.error("CUDA is not available. This script requires a GPU.")
60
  logger.error("Run on a machine with a CUDA-capable GPU or use HF Jobs:")
61
  logger.error(
62
+ " hf jobs uv run https://huggingface.co/datasets/unsloth/jobs/raw/main/sft-gemma3-vlm.py --flavor a100-large ..."
63
  )
64
  sys.exit(1)
65
  logger.info(f"CUDA available: {torch.cuda.get_device_name(0)}")
 
335
  processor.push_to_hub(args.output_repo)
336
  print(f"Model available at: https://huggingface.co/{args.output_repo}")
337
 
338
+ # Update model card metadata with dataset info
339
+ from huggingface_hub import metadata_update
340
+
341
+ metadata_update(args.output_repo, {"datasets": [args.dataset]}, overwrite=True)
342
+ print(f" Model card updated with dataset: {args.dataset}")
343
+
344
  print("\n" + "=" * 70)
345
  print("Done!")
346
  print("=" * 70)
 
366
  print("\nHF Jobs example:")
367
  print("\n hf jobs uv run \\")
368
  print(
369
+ " https://huggingface.co/datasets/unsloth/jobs/raw/main/sft-gemma3-vlm.py \\"
370
  )
371
  print(" --flavor a100-large --secrets HF_TOKEN \\")
372
  print(" -- --max-steps 500 --output-repo your-username/vlm-finetuned")
sft-lfm2.5.py CHANGED
@@ -62,9 +62,7 @@ def check_cuda():
62
  if not torch.cuda.is_available():
63
  logger.error("CUDA is not available. This script requires a GPU.")
64
  logger.error("Run on a machine with a CUDA-capable GPU or use HF Jobs:")
65
- logger.error(
66
- " hf jobs uv run sft-lfm2.5.py --flavor a10g-small ..."
67
- )
68
  sys.exit(1)
69
  logger.info(f"CUDA available: {torch.cuda.get_device_name(0)}")
70
 
@@ -233,10 +231,14 @@ def main():
233
  print(f" Base model: {args.base_model}")
234
  print(f" Dataset: {args.dataset}")
235
  print(f" Num samples: {args.num_samples or 'all'}")
236
- print(f" Eval split: {args.eval_split if args.eval_split > 0 else '(disabled)'}")
 
 
237
  print(f" Seed: {args.seed}")
238
  print(f" Training: {duration_str}")
239
- print(f" Batch size: {args.batch_size} x {args.gradient_accumulation} = {args.batch_size * args.gradient_accumulation}")
 
 
240
  print(f" Learning rate: {args.learning_rate}")
241
  print(f" LoRA rank: {args.lora_r}")
242
  print(f" Max seq length: {args.max_seq_length}")
@@ -253,7 +255,9 @@ def main():
253
  # Set Trackio space if provided
254
  if args.trackio_space:
255
  os.environ["TRACKIO_SPACE_ID"] = args.trackio_space
256
- logger.info(f"Trackio dashboard: https://huggingface.co/spaces/{args.trackio_space}")
 
 
257
 
258
  # Import heavy dependencies
259
  from unsloth import FastLanguageModel
@@ -287,7 +291,16 @@ def main():
287
  model = FastLanguageModel.get_peft_model(
288
  model,
289
  r=args.lora_r,
290
- target_modules=["q_proj", "k_proj", "v_proj", "out_proj", "in_proj", "w1", "w2", "w3"],
 
 
 
 
 
 
 
 
 
291
  lora_alpha=args.lora_alpha,
292
  lora_dropout=0,
293
  bias="none",
@@ -421,13 +434,17 @@ def main():
421
  # 4. Train
422
  print(f"\n[4/5] Training for {duration_str}...")
423
  if args.num_epochs:
424
- print(f" (~{steps_per_epoch} steps/epoch, {int(steps_per_epoch * args.num_epochs)} total steps)")
 
 
425
  start = time.time()
426
 
427
  train_result = trainer.train()
428
 
429
  train_time = time.time() - start
430
- total_steps = train_result.metrics.get("train_steps", args.max_steps or steps_per_epoch * args.num_epochs)
 
 
431
  print(f"\nTraining completed in {train_time / 60:.1f} minutes")
432
  print(f" Speed: {total_steps / train_time:.2f} steps/s")
433
 
@@ -447,9 +464,13 @@ def main():
447
  if train_loss:
448
  ratio = eval_loss / train_loss
449
  if ratio > 1.5:
450
- print(f" Warning: Eval loss is {ratio:.1f}x train loss - possible overfitting")
 
 
451
  else:
452
- print(f" Eval/train ratio: {ratio:.2f} - model generalizes well")
 
 
453
  except Exception as e:
454
  print(f" Warning: Final evaluation failed: {e}")
455
  print(" Continuing to save model...")
@@ -475,6 +496,12 @@ def main():
475
  model.push_to_hub(args.output_repo, tokenizer=tokenizer)
476
  print(f"Adapter available at: https://huggingface.co/{args.output_repo}")
477
 
 
 
 
 
 
 
478
  print("\n" + "=" * 70)
479
  print("Done!")
480
  print("=" * 70)
 
62
  if not torch.cuda.is_available():
63
  logger.error("CUDA is not available. This script requires a GPU.")
64
  logger.error("Run on a machine with a CUDA-capable GPU or use HF Jobs:")
65
+ logger.error(" hf jobs uv run sft-lfm2.5.py --flavor a10g-small ...")
 
 
66
  sys.exit(1)
67
  logger.info(f"CUDA available: {torch.cuda.get_device_name(0)}")
68
 
 
231
  print(f" Base model: {args.base_model}")
232
  print(f" Dataset: {args.dataset}")
233
  print(f" Num samples: {args.num_samples or 'all'}")
234
+ print(
235
+ f" Eval split: {args.eval_split if args.eval_split > 0 else '(disabled)'}"
236
+ )
237
  print(f" Seed: {args.seed}")
238
  print(f" Training: {duration_str}")
239
+ print(
240
+ f" Batch size: {args.batch_size} x {args.gradient_accumulation} = {args.batch_size * args.gradient_accumulation}"
241
+ )
242
  print(f" Learning rate: {args.learning_rate}")
243
  print(f" LoRA rank: {args.lora_r}")
244
  print(f" Max seq length: {args.max_seq_length}")
 
255
  # Set Trackio space if provided
256
  if args.trackio_space:
257
  os.environ["TRACKIO_SPACE_ID"] = args.trackio_space
258
+ logger.info(
259
+ f"Trackio dashboard: https://huggingface.co/spaces/{args.trackio_space}"
260
+ )
261
 
262
  # Import heavy dependencies
263
  from unsloth import FastLanguageModel
 
291
  model = FastLanguageModel.get_peft_model(
292
  model,
293
  r=args.lora_r,
294
+ target_modules=[
295
+ "q_proj",
296
+ "k_proj",
297
+ "v_proj",
298
+ "out_proj",
299
+ "in_proj",
300
+ "w1",
301
+ "w2",
302
+ "w3",
303
+ ],
304
  lora_alpha=args.lora_alpha,
305
  lora_dropout=0,
306
  bias="none",
 
434
  # 4. Train
435
  print(f"\n[4/5] Training for {duration_str}...")
436
  if args.num_epochs:
437
+ print(
438
+ f" (~{steps_per_epoch} steps/epoch, {int(steps_per_epoch * args.num_epochs)} total steps)"
439
+ )
440
  start = time.time()
441
 
442
  train_result = trainer.train()
443
 
444
  train_time = time.time() - start
445
+ total_steps = train_result.metrics.get(
446
+ "train_steps", args.max_steps or steps_per_epoch * args.num_epochs
447
+ )
448
  print(f"\nTraining completed in {train_time / 60:.1f} minutes")
449
  print(f" Speed: {total_steps / train_time:.2f} steps/s")
450
 
 
464
  if train_loss:
465
  ratio = eval_loss / train_loss
466
  if ratio > 1.5:
467
+ print(
468
+ f" Warning: Eval loss is {ratio:.1f}x train loss - possible overfitting"
469
+ )
470
  else:
471
+ print(
472
+ f" Eval/train ratio: {ratio:.2f} - model generalizes well"
473
+ )
474
  except Exception as e:
475
  print(f" Warning: Final evaluation failed: {e}")
476
  print(" Continuing to save model...")
 
496
  model.push_to_hub(args.output_repo, tokenizer=tokenizer)
497
  print(f"Adapter available at: https://huggingface.co/{args.output_repo}")
498
 
499
+ # Update model card metadata with dataset info
500
+ from huggingface_hub import metadata_update
501
+
502
+ metadata_update(args.output_repo, {"datasets": [args.dataset]}, overwrite=True)
503
+ print(f" Model card updated with dataset: {args.dataset}")
504
+
505
  print("\n" + "=" * 70)
506
  print("Done!")
507
  print("=" * 70)
sft-qwen3-vl.py CHANGED
@@ -24,7 +24,7 @@ Epoch-based training (recommended for full datasets):
24
 
25
  Run on HF Jobs (1 epoch with eval):
26
  hf jobs uv run \
27
- https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/sft-qwen3-vl.py \
28
  --flavor a100-large --secrets HF_TOKEN --timeout 4h \
29
  -- --num-epochs 1 --eval-split 0.2 --output-repo your-username/vlm-finetuned
30
 
@@ -67,7 +67,7 @@ def check_cuda():
67
  logger.error("CUDA is not available. This script requires a GPU.")
68
  logger.error("Run on a machine with a CUDA-capable GPU or use HF Jobs:")
69
  logger.error(
70
- " hf jobs uv run https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/sft-qwen3-vl.py --flavor a100-large ..."
71
  )
72
  sys.exit(1)
73
  logger.info(f"CUDA available: {torch.cuda.get_device_name(0)}")
@@ -578,7 +578,7 @@ if __name__ == "__main__":
578
  print("\nHF Jobs example (1 epoch with eval):")
579
  print("\n hf jobs uv run \\")
580
  print(
581
- " https://huggingface.co/datasets/uv-scripts/unsloth-jobs/raw/main/sft-qwen3-vl.py \\"
582
  )
583
  print(" --flavor a100-large --secrets HF_TOKEN --timeout 4h \\")
584
  print(
 
24
 
25
  Run on HF Jobs (1 epoch with eval):
26
  hf jobs uv run \
27
+ https://huggingface.co/datasets/unsloth/jobs/raw/main/sft-qwen3-vl.py \
28
  --flavor a100-large --secrets HF_TOKEN --timeout 4h \
29
  -- --num-epochs 1 --eval-split 0.2 --output-repo your-username/vlm-finetuned
30
 
 
67
  logger.error("CUDA is not available. This script requires a GPU.")
68
  logger.error("Run on a machine with a CUDA-capable GPU or use HF Jobs:")
69
  logger.error(
70
+ " hf jobs uv run https://huggingface.co/datasets/unsloth/jobs/raw/main/sft-qwen3-vl.py --flavor a100-large ..."
71
  )
72
  sys.exit(1)
73
  logger.info(f"CUDA available: {torch.cuda.get_device_name(0)}")
 
578
  print("\nHF Jobs example (1 epoch with eval):")
579
  print("\n hf jobs uv run \\")
580
  print(
581
+ " https://huggingface.co/datasets/unsloth/jobs/raw/main/sft-qwen3-vl.py \\"
582
  )
583
  print(" --flavor a100-large --secrets HF_TOKEN --timeout 4h \\")
584
  print(