YAML Metadata Warning: empty or missing yaml metadata in repo card
Check out the documentation for more information.
harm-bpo_gemma2b_random_68740f7b
Model Information
- Dataset: harm-bpo
- Base Model: gemma2b
- Query Strategy: random
- Alignment Method: odpo
- Training Hash: 68740f7b
Original Directory Name
harm-bpo_gemma2b-vanilla_deberta_online_dpo_random_query16_steps625_batch16_gpu1
Training Configuration
{
"output_dir": "outputs/harm-bpo_gemma2b-vanilla_deberta_online_dpo_random_query16_steps625_batch16_gpu1/",
"overwrite_output_dir": false,
"do_train": false,
"do_eval": true,
"do_predict": false,
"eval_strategy": "steps",
"prediction_loss_only": false,
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"per_gpu_train_batch_size": null,
"per_gpu_eval_batch_size": null,
"gradient_accumulation_steps": 1,
"eval_accumulation_steps": null,
"eval_delay": 0,
"torch_empty_cache_steps": null,
"learning_rate": 5e-05,
"weight_decay": 0.0,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-08,
"max_grad_norm": 1.0,
"num_train_epochs": 3.0,
"max_steps": 625,
"lr_scheduler_type": "linear",
"lr_scheduler_kwargs": {},
"warmup_ratio": 0.0,
"warmup_steps": 31,
"log_level": "passive",
"log_level_replica": "warning",
"log_on_each_node": true,
"logging_dir": "outputs/harm-bpo_gemma2b-vanilla_deberta_online_dpo_random_query16_steps625_batch16_gpu1/runs/Sep05_15-25-16_node30",
"logging_strategy": "steps",
"logging_first_step": false,
"logging_steps": 0.05,
"logging_nan_inf_filter": true,
"save_strategy": "steps",
"save_steps": 0.1,
"save_total_limit": null,
"save_safetensors": true,
"save_on_each_node": false,
"save_only_model": false,
"restore_callback_states_from_checkpoint": false,
"no_cuda": false,
"use_cpu": false,
"use_mps_device": false,
"seed": 1,
"data_seed": null,
"jit_mode_eval": false,
"use_ipex": false,
"bf16": false,
"fp16": false,
"fp16_opt_level": "O1",
"half_precision_backend": "auto",
"bf16_full_eval": false,
"fp16_full_eval": false,
"tf32": null,
"local_rank": 0,
"ddp_backend": null,
"tpu_num_cores": null,
"tpu_metrics_debug": false,
"debug": [],
"dataloader_drop_last": false,
"eval_steps": 0.05,
"dataloader_num_workers": 0,
"dataloader_prefetch_factor": null,
"past_index": -1,
"run_name": "outputs/harm-bpo_gemma2b-vanilla_deberta_online_dpo_random_query16_steps625_batch16_gpu1/",
"disable_tqdm": false,
"remove_unused_columns": true,
"label_names": null,
"load_best_model_at_end": false,
"metric_for_best_model": null,
"greater_is_better": null,
"ignore_data_skip": false,
"fsdp": [],
"fsdp_min_num_params": 0,
"fsdp_config": {
"min_num_params": 0,
"xla": false,
"xla_fsdp_v2": false,
"xla_fsdp_grad_ckpt": false
},
"fsdp_transformer_layer_cls_to_wrap": null,
"accelerator_config": "AcceleratorConfig(split_batches=False, dispatch_batches=None, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False)",
"deepspeed": null,
"label_smoothing_factor": 0.0,
"optim": "adamw_torch",
"optim_args": null,
"adafactor": false,
"group_by_length": false,
"length_column_name": "length",
"report_to": [
"wandb"
],
"ddp_find_unused_parameters": null,
"ddp_bucket_cap_mb": null,
"ddp_broadcast_buffers": null,
"dataloader_pin_memory": true,
"dataloader_persistent_workers": false,
"skip_memory_metrics": true,
"use_legacy_prediction_loop": false,
"push_to_hub": false,
"resume_from_checkpoint": null,
"hub_model_id": null,
"hub_strategy": "every_save",
"hub_token": null,
"hub_private_repo": null,
"hub_always_push": false,
"hub_revision": null,
"gradient_checkpointing": false,
"gradient_checkpointing_kwargs": null,
"include_inputs_for_metrics": false,
"include_for_metrics": [],
"eval_do_concat_batches": true,
"fp16_backend": "auto",
"push_to_hub_model_id": null,
"push_to_hub_organization": null,
"push_to_hub_token": null,
"mp_parameters": "",
"auto_find_batch_size": false,
"full_determinism": false,
"torchdynamo": null,
"ray_scope": "last",
"ddp_timeout": 10800,
"torch_compile": false,
"torch_compile_backend": null,
"torch_compile_mode": null,
"include_tokens_per_second": false,
"include_num_input_tokens_seen": false,
"neftune_noise_alpha": null,
"optim_target_modules": null,
"batch_eval_metrics": false,
"eval_on_start": false,
"use_liger_kernel": false,
"liger_kernel_config": null,
"eval_use_gather_object": false,
"average_tokens_across_devices": false,
"reward_model_path": "OpenAssistant/reward-model-deberta-v3-large-v2",
"judge": null,
"eval_judge": "OpenAssistant/reward-model-deberta-v3-large-v2",
"num_eval_prompts": 500,
"max_new_tokens": 64,
"max_length": 256,
"temperature": 0.9,
"missing_eos_penalty": 1.0,
"beta": 0.1,
"loss_type": "sigmoid",
"dataset_num_proc": null,
"disable_dropout": true,
"use_vllm": false,
"gpu_memory_utilization": 0.55,
"ds3_gather_for_generation": true,
"distributed_state": "Distributed environment: DistributedType.MULTI_GPU Backend: nccl\nNum processes: 1\nProcess index: 0\nLocal process index: 0\nDevice: cuda:0\n",
"_n_gpu": 1,
"__cached__setup_devices": "cuda:0",
"deepspeed_plugin": null
}
Files Included
adapter_config.json: LoRA adapter configurationadapter_model.safetensors: Model weightstraining_args.bin: Complete training argumentswin_rates.json: Evaluation win rates (if available)
Upload Information
- Uploaded: 2025-09-07 12:48:54
- Organization: activeDap
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support