| { | |
| "output_dir": "/workspace/believe-it-or-not/data/finetunes/qwen_mo_deception/2026-02-08_01-06-54", | |
| "do_train": false, | |
| "do_eval": false, | |
| "do_predict": false, | |
| "eval_strategy": "no", | |
| "prediction_loss_only": false, | |
| "per_device_train_batch_size": 4, | |
| "per_device_eval_batch_size": 32, | |
| "gradient_accumulation_steps": 2, | |
| "eval_accumulation_steps": null, | |
| "eval_delay": 0, | |
| "torch_empty_cache_steps": null, | |
| "learning_rate": 1e-05, | |
| "weight_decay": 0.0, | |
| "adam_beta1": 0.9, | |
| "adam_beta2": 0.999, | |
| "adam_epsilon": 1e-08, | |
| "max_grad_norm": 1.0, | |
| "num_train_epochs": 1, | |
| "max_steps": -1, | |
| "lr_scheduler_type": "linear", | |
| "lr_scheduler_kwargs": null, | |
| "warmup_ratio": null, | |
| "warmup_steps": 0, | |
| "log_level": "passive", | |
| "log_level_replica": "warning", | |
| "log_on_each_node": true, | |
| "logging_dir": "/workspace/believe-it-or-not/data/finetunes/qwen_mo_deception/2026-02-08_01-06-54/logs", | |
| "logging_strategy": "steps", | |
| "logging_first_step": false, | |
| "logging_steps": 10, | |
| "logging_nan_inf_filter": true, | |
| "save_strategy": "no", | |
| "save_steps": 500, | |
| "save_total_limit": null, | |
| "enable_jit_checkpoint": false, | |
| "save_on_each_node": false, | |
| "save_only_model": false, | |
| "restore_callback_states_from_checkpoint": false, | |
| "use_cpu": false, | |
| "seed": 42, | |
| "data_seed": null, | |
| "bf16": true, | |
| "fp16": false, | |
| "bf16_full_eval": false, | |
| "fp16_full_eval": false, | |
| "tf32": true, | |
| "local_rank": -1, | |
| "ddp_backend": null, | |
| "debug": [], | |
| "dataloader_drop_last": false, | |
| "eval_steps": 0.1, | |
| "dataloader_num_workers": 4, | |
| "dataloader_prefetch_factor": null, | |
| "run_name": null, | |
| "disable_tqdm": false, | |
| "remove_unused_columns": false, | |
| "label_names": null, | |
| "load_best_model_at_end": false, | |
| "metric_for_best_model": null, | |
| "greater_is_better": null, | |
| "ignore_data_skip": false, | |
| "fsdp": [], | |
| "fsdp_config": { | |
| "min_num_params": 0, | |
| "xla": false, | |
| "xla_fsdp_v2": false, | |
| "xla_fsdp_grad_ckpt": false | |
| }, | |
| "accelerator_config": { | |
| "split_batches": false, | |
| "dispatch_batches": null, | |
| "even_batches": true, | |
| "use_seedable_sampler": true, | |
| "non_blocking": false, | |
| "gradient_accumulation_kwargs": null | |
| }, | |
| "parallelism_config": null, | |
| "deepspeed": null, | |
| "label_smoothing_factor": 0.0, | |
| "optim": "adamw_torch", | |
| "optim_args": null, | |
| "group_by_length": true, | |
| "length_column_name": "length", | |
| "report_to": [], | |
| "project": "huggingface", | |
| "trackio_space_id": "trackio", | |
| "ddp_find_unused_parameters": null, | |
| "ddp_bucket_cap_mb": null, | |
| "ddp_broadcast_buffers": null, | |
| "dataloader_pin_memory": true, | |
| "dataloader_persistent_workers": false, | |
| "skip_memory_metrics": true, | |
| "push_to_hub": false, | |
| "resume_from_checkpoint": null, | |
| "hub_model_id": null, | |
| "hub_strategy": "every_save", | |
| "hub_token": "<HUB_TOKEN>", | |
| "hub_private_repo": null, | |
| "hub_always_push": false, | |
| "hub_revision": null, | |
| "gradient_checkpointing": false, | |
| "gradient_checkpointing_kwargs": null, | |
| "include_for_metrics": [], | |
| "eval_do_concat_batches": true, | |
| "auto_find_batch_size": false, | |
| "full_determinism": false, | |
| "ddp_timeout": 1800, | |
| "torch_compile": false, | |
| "torch_compile_backend": null, | |
| "torch_compile_mode": null, | |
| "include_num_input_tokens_seen": "no", | |
| "neftune_noise_alpha": null, | |
| "optim_target_modules": null, | |
| "batch_eval_metrics": false, | |
| "eval_on_start": false, | |
| "use_liger_kernel": false, | |
| "liger_kernel_config": null, | |
| "eval_use_gather_object": false, | |
| "average_tokens_across_devices": true, | |
| "use_cache": false, | |
| "lora_r": 64, | |
| "lora_alpha": 128, | |
| "lora_dropout": 0.05, | |
| "lora_bias": "none", | |
| "lora_task_type": "CAUSAL_LM", | |
| "lora_target_modules": [ | |
| "q_proj", | |
| "k_proj", | |
| "v_proj", | |
| "o_proj", | |
| "down_proj", | |
| "up_proj", | |
| "gate_proj" | |
| ], | |
| "num_train_points": null | |
| } |