| compute_environment: LOCAL_MACHINE |
| debug: false |
| deepspeed_config: |
| deepspeed_multinode_launcher: standard |
| zero3_save_16bit_model: true |
| zero_optimization: |
| stage: 3 |
| offload_optimizer: |
| device: cpu |
| pin_memory: true |
| buffer_count: 4 |
| offload_param: |
| device: cpu |
| pin_memory: true |
| buffer_count: 4 |
| model_parallel: |
| enable: true |
| tensor_parallel_size: 4 |
| pipeline_parallel_size: 1 |
| reduce_bucket_size: 2e5 |
| stage3_prefetch_bucket_size: 0.1e6 |
| stage3_max_live_parameters: 1e6 |
| stage3_max_reuse_distance: 1e5 |
| stage3_max_live_gradients: 1e6 |
| stage3_param_persistence_threshold: 1e4 |
| sub_group_size: 1e9 |
| |
| activation_checkpointing: |
| partition_activations: true |
| contiguous_memory_optimization: true |
| cpu_checkpointing: true |
| |
| bf16: |
| enabled: false |
| fp16: |
| enabled: true |
|
|
| optimizer: |
| type: AdamW |
| params: |
| eight_bit: true |
| lr: auto |
| betas: auto |
| eps: auto |
| weight_decay: auto |
| |
| scheduler: |
| type: WarmupDecayLR |
| params: |
| num_warmup_steps: auto |
| num_training_steps: auto |
|
|
| communication_data_type: fp16 |
| communication_bucket_size: 500000000 |
| dist_backend: nccl |
| train_micro_batch_size_per_gpu: auto |
| gradient_clipping: 1.0 |
|
|
| distributed_type: DEEPSPEED |
| downcast_bf16: auto |
| machine_rank: 0 |
| main_training_function: main |
| mixed_precision: bf16 |
| rdzv_backend: static |
| same_network: true |
| use_cpu: auto |