Upload folder using huggingface_hub
Browse files- adapter_config.json +40 -0
- adapter_model.safetensors +3 -0
- stats.json +1 -0
- train_config.json +1 -0
adapter_config.json
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 16,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.05,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 8,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"down_proj",
|
| 28 |
+
"up_proj",
|
| 29 |
+
"q_b_proj",
|
| 30 |
+
"o_proj",
|
| 31 |
+
"kv_a_proj_with_mqa",
|
| 32 |
+
"kv_b_proj",
|
| 33 |
+
"gate_proj",
|
| 34 |
+
"q_a_proj"
|
| 35 |
+
],
|
| 36 |
+
"task_type": "CAUSAL_LM",
|
| 37 |
+
"trainable_token_indices": null,
|
| 38 |
+
"use_dora": false,
|
| 39 |
+
"use_rslora": false
|
| 40 |
+
}
|
adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c24812289ab2c2a484924572336154a929ae24423268afd8dcf31f6e59bb250
|
| 3 |
+
size 6707819144
|
stats.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"world_size": 8, "epochs": 1, "steps": 11, "seqs": 248, "tokens": 29976, "last_epoch_steps": 0, "last_epoch_seqs": 0, "last_epoch_tokens": 0, "total_seqs": 248, "nan_in_loss_seqs": 0, "experiment_tracking_run_id": null, "loss_ema": 1.045212610201402, "loss_sum": 11.497338712215424, "mtp_loss_ema": 0, "mtp_loss_sum": 0, "eval_losses_avg": [0.6676790714263916]}
|
train_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"comet": false, "comet_api_key": null, "comet_workspace": null, "comet_project": null, "comet_run_id": "fgp4puis", "wandb": false, "wandb_api_key": null, "wandb_entity": null, "wandb_project": null, "wandb_run_id": "fgp4puis", "base_model_dir": "/llm-downloader-destination/base/fireworks/deepseek-v3p1/hf", "output_model_dir": "gs://fireworks-artifacts-resendezcordell-oah2-bbb5dc/tuned-model-fgp4puis/ccce3b/ft-0121-dyck2-300/checkpoint", "checkpoint_dir": "/dev/shm/checkpoints", "gcs_checkpoint_dir": "gs://fireworks-artifacts-resendezcordell-oah2-bbb5dc/tuned-model-fgp4puis/ccce3b/ft-0121-dyck2-300/checkpoints/checkpoints", "max_checkpoints_to_keep": 1, "checkpoint_interval": 3600, "save_final_checkpoint": false, "train": true, "learning_rate": 0.0001, "learning_rate_warmup_steps": 0, "grad_accum_steps": 1, "epochs": 1, "early_stop": false, "seed": 42, "dataset_dir": "/mnt/staging/dataset", "eval_auto_carveout": true, "eval_dataset_dir": null, "train_limit": null, "max_context_len": 3200, "batch_size": 3200, "batch_size_samples": null, "max_data_workers": 0, "min_evals_per_epoch": 1, "max_evals_per_epoch": 5, "precision": null, "status_file": "gs://fireworks-fine-tuning-job-status/sftj-resendezcordell-oah2-fgp4puis-8a9346d0-bd7e-4e19-9f87-2d0eb438926f", "billing_file": "gs://fireworks-fine-tuning-metadata/sftj-resendezcordell-oah2-fgp4puis/billing-8a9346d0-bd7e-4e19-9f87-2d0eb438926f", "metrics_file": "gs://fireworks-fine-tuning-metadata/sftj-resendezcordell-oah2-fgp4puis/metrics.jsonl", "trainer_logs_file": null, "profile": null, "weight_sharding": null, "activation_sharding": null, "empty_weights": false, "nan_ratio_threshold": 0.05, "fast_api_port": 80, "optimizer": "adamw", "optimizer_weight_decay": 0.01, "target_shard_size_gb": null, "enable_fast_processor": false, "peft_addon_dir": null, "lora_rank": 8, "lora_dropout": 0.05, "template_kind": "conversation", "template": null, "mtp_config": {"enable_mtp": false, "freeze_base_model": false, "num_draft_tokens": 1}, "qat": true, "kld": false, "teft_tokens": [], "skip_dataset_filtering": false}
|