diff --git a/checkpoint-1014/README.md b/checkpoint-1014/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-1014/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-1014/adapter_config.json b/checkpoint-1014/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-1014/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-1014/adapter_model.safetensors b/checkpoint-1014/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..055b976da43499fe7cddca707e6f663264900a3f
--- /dev/null
+++ b/checkpoint-1014/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f6c1169c3645e69f1ff1010360c4583bae334f639f0fc0894474475e922bd88
+size 26235704
diff --git a/checkpoint-1014/optimizer.pt b/checkpoint-1014/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f7373cecd714e1bfa072d5d92f734ea3c987c327
--- /dev/null
+++ b/checkpoint-1014/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f45993592b5aa72151274e216de6fdfc70dbbafd87606b15f0c08c0240f3bfa
+size 52563258
diff --git a/checkpoint-1014/rng_state.pth b/checkpoint-1014/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..5d968263657a30eea713bfe7c3ac18de2db73bca
--- /dev/null
+++ b/checkpoint-1014/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b4c4f0166ef9ca74027402d439827bc8ed2a02c96ba6ba9e51d0a9c0f291412
+size 14244
diff --git a/checkpoint-1014/scheduler.pt b/checkpoint-1014/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1f77c54062f4a69ad063b6b07011b2bfa2f4f58a
--- /dev/null
+++ b/checkpoint-1014/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eb0733feb71b9d9a54913f6c84a00d40e2ea8313a09e349f8b824a911c153299
+size 1064
diff --git a/checkpoint-1014/trainer_state.json b/checkpoint-1014/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..b0df5948bf355fb612ab376099c21a4b867d1be8
--- /dev/null
+++ b/checkpoint-1014/trainer_state.json
@@ -0,0 +1,173 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 5.991137370753323,
+ "eval_steps": 500,
+ "global_step": 1014,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 48.92063522338867,
+ "learning_rate": 2.45e-05,
+ "loss": 352758333.44,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 6.408321380615234,
+ "learning_rate": 4.9500000000000004e-05,
+ "loss": 178610135.04,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 1.5189077854156494,
+ "learning_rate": 4.731947483588622e-05,
+ "loss": 0.4129,
+ "step": 150
+ },
+ {
+ "epoch": 1.1816838995568686,
+ "grad_norm": 2.1191959381103516,
+ "learning_rate": 4.458424507658643e-05,
+ "loss": 0.2667,
+ "step": 200
+ },
+ {
+ "epoch": 1.4771048744460857,
+ "grad_norm": 4.9505510330200195,
+ "learning_rate": 4.201312910284464e-05,
+ "loss": 27.1689,
+ "step": 250
+ },
+ {
+ "epoch": 1.7725258493353029,
+ "grad_norm": 0.197406604886055,
+ "learning_rate": 3.9332603938730855e-05,
+ "loss": 4.521,
+ "step": 300
+ },
+ {
+ "epoch": 2.06794682422452,
+ "grad_norm": 0.035883717238903046,
+ "learning_rate": 3.6597374179431074e-05,
+ "loss": 0.0013,
+ "step": 350
+ },
+ {
+ "epoch": 2.363367799113737,
+ "grad_norm": 0.02112976275384426,
+ "learning_rate": 3.386214442013129e-05,
+ "loss": 0.0006,
+ "step": 400
+ },
+ {
+ "epoch": 2.658788774002954,
+ "grad_norm": 0.005675207823514938,
+ "learning_rate": 3.112691466083151e-05,
+ "loss": 0.0003,
+ "step": 450
+ },
+ {
+ "epoch": 2.9542097488921715,
+ "grad_norm": 0.004977445118129253,
+ "learning_rate": 2.839168490153173e-05,
+ "loss": 0.0003,
+ "step": 500
+ },
+ {
+ "epoch": 3.2496307237813884,
+ "grad_norm": 0.0056141638197004795,
+ "learning_rate": 2.565645514223195e-05,
+ "loss": 0.0002,
+ "step": 550
+ },
+ {
+ "epoch": 3.5450516986706058,
+ "grad_norm": 0.0035467667039483786,
+ "learning_rate": 2.292122538293217e-05,
+ "loss": 0.0002,
+ "step": 600
+ },
+ {
+ "epoch": 3.8404726735598227,
+ "grad_norm": 0.0036520687863230705,
+ "learning_rate": 2.0185995623632387e-05,
+ "loss": 0.0001,
+ "step": 650
+ },
+ {
+ "epoch": 4.13589364844904,
+ "grad_norm": 0.002137696836143732,
+ "learning_rate": 1.7450765864332606e-05,
+ "loss": 0.0001,
+ "step": 700
+ },
+ {
+ "epoch": 4.431314623338257,
+ "grad_norm": 0.0029208394698798656,
+ "learning_rate": 1.4715536105032822e-05,
+ "loss": 0.0001,
+ "step": 750
+ },
+ {
+ "epoch": 4.726735598227474,
+ "grad_norm": 0.0022111597936600447,
+ "learning_rate": 1.1980306345733041e-05,
+ "loss": 0.0001,
+ "step": 800
+ },
+ {
+ "epoch": 5.022156573116692,
+ "grad_norm": 0.0025725914165377617,
+ "learning_rate": 9.245076586433261e-06,
+ "loss": 0.0001,
+ "step": 850
+ },
+ {
+ "epoch": 5.317577548005908,
+ "grad_norm": 0.0019709845073521137,
+ "learning_rate": 6.50984682713348e-06,
+ "loss": 0.0001,
+ "step": 900
+ },
+ {
+ "epoch": 5.612998522895126,
+ "grad_norm": 0.0022124536335468292,
+ "learning_rate": 3.774617067833698e-06,
+ "loss": 0.0001,
+ "step": 950
+ },
+ {
+ "epoch": 5.908419497784343,
+ "grad_norm": 0.0015069200890138745,
+ "learning_rate": 1.039387308533917e-06,
+ "loss": 0.0001,
+ "step": 1000
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": true
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 1.6016891541848064e+17,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1014/training_args.bin b/checkpoint-1014/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..81d7900185e43b427dbbbd817f4b048cc44538b8
--- /dev/null
+++ b/checkpoint-1014/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2685ba35aea51a31bdbfcc1cdf0ba01aa45e80527118c18242d090f068b2e114
+size 5304
diff --git a/checkpoint-169/README.md b/checkpoint-169/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-169/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-169/adapter_config.json b/checkpoint-169/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-169/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-169/adapter_model.safetensors b/checkpoint-169/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ef60f4b79fdd74e3712f9b9a873482760bfe9674
--- /dev/null
+++ b/checkpoint-169/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:615a63207f9114ee4c59e552ed3738d04136688b0f90f38a01126b8123d1d0f8
+size 26235704
diff --git a/checkpoint-169/optimizer.pt b/checkpoint-169/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..973d0a8c3978679d8e03360469816db6ba6ff48f
--- /dev/null
+++ b/checkpoint-169/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97879111c7be154c4f3f4324f704322d405dbca0187e901b48bf840f118cf040
+size 52563258
diff --git a/checkpoint-169/rng_state.pth b/checkpoint-169/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8c3d7eaea723ec61d2e0b66cbd197d16544f0d37
--- /dev/null
+++ b/checkpoint-169/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:671cb42ff173690da931c0dc9e5393050001d4311b9fa17e7f4b9742fc264993
+size 14244
diff --git a/checkpoint-169/scheduler.pt b/checkpoint-169/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..059c58a54740927a1624e5cc856cb8572f38d604
--- /dev/null
+++ b/checkpoint-169/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6152855008c66cc18d23fa50bfa87c3d5a7e1ede2ac197a9bde8c89ee98c3ab6
+size 1064
diff --git a/checkpoint-169/trainer_state.json b/checkpoint-169/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..cb62316bb8b92a805a82ef962cd8ea39090fd4fa
--- /dev/null
+++ b/checkpoint-169/trainer_state.json
@@ -0,0 +1,54 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.9985228951255539,
+ "eval_steps": 500,
+ "global_step": 169,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 48.92063522338867,
+ "learning_rate": 2.45e-05,
+ "loss": 352758333.44,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 6.408321380615234,
+ "learning_rate": 4.9500000000000004e-05,
+ "loss": 178610135.04,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 1.5189077854156494,
+ "learning_rate": 4.731947483588622e-05,
+ "loss": 0.4129,
+ "step": 150
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 2.673267488980992e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-169/training_args.bin b/checkpoint-169/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..81d7900185e43b427dbbbd817f4b048cc44538b8
--- /dev/null
+++ b/checkpoint-169/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2685ba35aea51a31bdbfcc1cdf0ba01aa45e80527118c18242d090f068b2e114
+size 5304
diff --git a/checkpoint-338/README.md b/checkpoint-338/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-338/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-338/adapter_config.json b/checkpoint-338/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-338/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-338/adapter_model.safetensors b/checkpoint-338/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a52e04a45bd93bf468fd09ec28b1efaca76742d0
--- /dev/null
+++ b/checkpoint-338/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:832b691219be2035bb296fef2604f8efc240a40a0af364a338212166c94cc419
+size 26235704
diff --git a/checkpoint-338/optimizer.pt b/checkpoint-338/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6a4c19141a5639f9c70617fa8aadf3e4a5f8b87c
--- /dev/null
+++ b/checkpoint-338/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9da1d8b13c2c1e7e68640d6d791d85b160fb7790c7e5a83296988d7a269445f0
+size 52563258
diff --git a/checkpoint-338/rng_state.pth b/checkpoint-338/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..28a15ce2f3f6b96a48280686bd9a851a0d33ae3e
--- /dev/null
+++ b/checkpoint-338/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a407ae22d91865b0c7b2f37be1ef15d17cbdabfdef79af88be1868bd0b57a9e
+size 14244
diff --git a/checkpoint-338/scheduler.pt b/checkpoint-338/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6b18bdb01273eba367f5fe3783602a3a31241cb9
--- /dev/null
+++ b/checkpoint-338/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2bff91783b31d88429603fb021362c0a001d2b3161d4433d953a87a05fc41e10
+size 1064
diff --git a/checkpoint-338/trainer_state.json b/checkpoint-338/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..5a1e32c0392a8e04ab9e1f2bb104b60094f280c0
--- /dev/null
+++ b/checkpoint-338/trainer_state.json
@@ -0,0 +1,75 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.9970457902511078,
+ "eval_steps": 500,
+ "global_step": 338,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 48.92063522338867,
+ "learning_rate": 2.45e-05,
+ "loss": 352758333.44,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 6.408321380615234,
+ "learning_rate": 4.9500000000000004e-05,
+ "loss": 178610135.04,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 1.5189077854156494,
+ "learning_rate": 4.731947483588622e-05,
+ "loss": 0.4129,
+ "step": 150
+ },
+ {
+ "epoch": 1.1816838995568686,
+ "grad_norm": 2.1191959381103516,
+ "learning_rate": 4.458424507658643e-05,
+ "loss": 0.2667,
+ "step": 200
+ },
+ {
+ "epoch": 1.4771048744460857,
+ "grad_norm": 4.9505510330200195,
+ "learning_rate": 4.201312910284464e-05,
+ "loss": 27.1689,
+ "step": 250
+ },
+ {
+ "epoch": 1.7725258493353029,
+ "grad_norm": 0.197406604886055,
+ "learning_rate": 3.9332603938730855e-05,
+ "loss": 4.521,
+ "step": 300
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 5.346534977961984e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-338/training_args.bin b/checkpoint-338/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..81d7900185e43b427dbbbd817f4b048cc44538b8
--- /dev/null
+++ b/checkpoint-338/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2685ba35aea51a31bdbfcc1cdf0ba01aa45e80527118c18242d090f068b2e114
+size 5304
diff --git a/checkpoint-507/README.md b/checkpoint-507/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-507/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-507/adapter_config.json b/checkpoint-507/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-507/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-507/adapter_model.safetensors b/checkpoint-507/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..00dc9d4ef2876b04bb0a91e6ac14465c5741d24e
--- /dev/null
+++ b/checkpoint-507/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:08db07fcd350f0437cc629b2fb57ac2777ab0dcfb86acec8781b13f3e15964ad
+size 26235704
diff --git a/checkpoint-507/optimizer.pt b/checkpoint-507/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..dcec39d0212d93f7936188c74052f50a9a5fbb87
--- /dev/null
+++ b/checkpoint-507/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b579c6ba9ed6ee79a0b414d3de4970bbb9eb0c861987060918aefe70b264eda
+size 52563258
diff --git a/checkpoint-507/rng_state.pth b/checkpoint-507/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..518eebd4ed0e965d0147b7f42843fa71967f44ac
--- /dev/null
+++ b/checkpoint-507/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e835266e3a87be80aca307cad0a349799b85090ac5a34a85e773d1c8dc2560a
+size 14244
diff --git a/checkpoint-507/scheduler.pt b/checkpoint-507/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c8f2ed85e02fe95ad8b5c486893d2dbbad2a79c9
--- /dev/null
+++ b/checkpoint-507/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:816a4c4af8e64cc4fece6135cd4d3c0c772e93ce22ebcf3ce15070629075e0f7
+size 1064
diff --git a/checkpoint-507/trainer_state.json b/checkpoint-507/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..579692b2bfb064386cf68bc638e9d56f82ec1914
--- /dev/null
+++ b/checkpoint-507/trainer_state.json
@@ -0,0 +1,103 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.9955686853766617,
+ "eval_steps": 500,
+ "global_step": 507,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 48.92063522338867,
+ "learning_rate": 2.45e-05,
+ "loss": 352758333.44,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 6.408321380615234,
+ "learning_rate": 4.9500000000000004e-05,
+ "loss": 178610135.04,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 1.5189077854156494,
+ "learning_rate": 4.731947483588622e-05,
+ "loss": 0.4129,
+ "step": 150
+ },
+ {
+ "epoch": 1.1816838995568686,
+ "grad_norm": 2.1191959381103516,
+ "learning_rate": 4.458424507658643e-05,
+ "loss": 0.2667,
+ "step": 200
+ },
+ {
+ "epoch": 1.4771048744460857,
+ "grad_norm": 4.9505510330200195,
+ "learning_rate": 4.201312910284464e-05,
+ "loss": 27.1689,
+ "step": 250
+ },
+ {
+ "epoch": 1.7725258493353029,
+ "grad_norm": 0.197406604886055,
+ "learning_rate": 3.9332603938730855e-05,
+ "loss": 4.521,
+ "step": 300
+ },
+ {
+ "epoch": 2.06794682422452,
+ "grad_norm": 0.035883717238903046,
+ "learning_rate": 3.6597374179431074e-05,
+ "loss": 0.0013,
+ "step": 350
+ },
+ {
+ "epoch": 2.363367799113737,
+ "grad_norm": 0.02112976275384426,
+ "learning_rate": 3.386214442013129e-05,
+ "loss": 0.0006,
+ "step": 400
+ },
+ {
+ "epoch": 2.658788774002954,
+ "grad_norm": 0.005675207823514938,
+ "learning_rate": 3.112691466083151e-05,
+ "loss": 0.0003,
+ "step": 450
+ },
+ {
+ "epoch": 2.9542097488921715,
+ "grad_norm": 0.004977445118129253,
+ "learning_rate": 2.839168490153173e-05,
+ "loss": 0.0003,
+ "step": 500
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 8.019802466942976e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-507/training_args.bin b/checkpoint-507/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..81d7900185e43b427dbbbd817f4b048cc44538b8
--- /dev/null
+++ b/checkpoint-507/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2685ba35aea51a31bdbfcc1cdf0ba01aa45e80527118c18242d090f068b2e114
+size 5304
diff --git a/checkpoint-677/README.md b/checkpoint-677/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-677/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-677/adapter_config.json b/checkpoint-677/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-677/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-677/adapter_model.safetensors b/checkpoint-677/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f09f5fb05642a179681d2bec63bdbe5215605116
--- /dev/null
+++ b/checkpoint-677/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c48dd43a7ed783d588cb55217f52f5f190dbf8457c2e204b12302791568c50ca
+size 26235704
diff --git a/checkpoint-677/optimizer.pt b/checkpoint-677/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1d9aa0a7d92a1accba251f62db6fc73ac91f19b9
--- /dev/null
+++ b/checkpoint-677/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b73a423b8f7ae1542809beff7b902c3479175fee46eafc8b56ca89a59f72530
+size 52563258
diff --git a/checkpoint-677/rng_state.pth b/checkpoint-677/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..346707572282eff6e90aeff25f77cd75d6fde795
--- /dev/null
+++ b/checkpoint-677/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e6356d528bf3866558bd9dd722baf34c195d10152de27bd868852994ed582b79
+size 14244
diff --git a/checkpoint-677/scheduler.pt b/checkpoint-677/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..497d583b2340975c7ab172d3b212f3ca0a44c7ab
--- /dev/null
+++ b/checkpoint-677/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f786c07d3b43c6a738f2f29bb700557b19652c689c99b1edd8ab9894094d7c8
+size 1064
diff --git a/checkpoint-677/trainer_state.json b/checkpoint-677/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..38ff7c4125dffc6ef7a711db9d123e3362dcea0d
--- /dev/null
+++ b/checkpoint-677/trainer_state.json
@@ -0,0 +1,124 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 4.0,
+ "eval_steps": 500,
+ "global_step": 677,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 48.92063522338867,
+ "learning_rate": 2.45e-05,
+ "loss": 352758333.44,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 6.408321380615234,
+ "learning_rate": 4.9500000000000004e-05,
+ "loss": 178610135.04,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 1.5189077854156494,
+ "learning_rate": 4.731947483588622e-05,
+ "loss": 0.4129,
+ "step": 150
+ },
+ {
+ "epoch": 1.1816838995568686,
+ "grad_norm": 2.1191959381103516,
+ "learning_rate": 4.458424507658643e-05,
+ "loss": 0.2667,
+ "step": 200
+ },
+ {
+ "epoch": 1.4771048744460857,
+ "grad_norm": 4.9505510330200195,
+ "learning_rate": 4.201312910284464e-05,
+ "loss": 27.1689,
+ "step": 250
+ },
+ {
+ "epoch": 1.7725258493353029,
+ "grad_norm": 0.197406604886055,
+ "learning_rate": 3.9332603938730855e-05,
+ "loss": 4.521,
+ "step": 300
+ },
+ {
+ "epoch": 2.06794682422452,
+ "grad_norm": 0.035883717238903046,
+ "learning_rate": 3.6597374179431074e-05,
+ "loss": 0.0013,
+ "step": 350
+ },
+ {
+ "epoch": 2.363367799113737,
+ "grad_norm": 0.02112976275384426,
+ "learning_rate": 3.386214442013129e-05,
+ "loss": 0.0006,
+ "step": 400
+ },
+ {
+ "epoch": 2.658788774002954,
+ "grad_norm": 0.005675207823514938,
+ "learning_rate": 3.112691466083151e-05,
+ "loss": 0.0003,
+ "step": 450
+ },
+ {
+ "epoch": 2.9542097488921715,
+ "grad_norm": 0.004977445118129253,
+ "learning_rate": 2.839168490153173e-05,
+ "loss": 0.0003,
+ "step": 500
+ },
+ {
+ "epoch": 3.2496307237813884,
+ "grad_norm": 0.0056141638197004795,
+ "learning_rate": 2.565645514223195e-05,
+ "loss": 0.0002,
+ "step": 550
+ },
+ {
+ "epoch": 3.5450516986706058,
+ "grad_norm": 0.0035467667039483786,
+ "learning_rate": 2.292122538293217e-05,
+ "loss": 0.0002,
+ "step": 600
+ },
+ {
+ "epoch": 3.8404726735598227,
+ "grad_norm": 0.0036520687863230705,
+ "learning_rate": 2.0185995623632387e-05,
+ "loss": 0.0001,
+ "step": 650
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 1.0693069955923968e+17,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-677/training_args.bin b/checkpoint-677/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..81d7900185e43b427dbbbd817f4b048cc44538b8
--- /dev/null
+++ b/checkpoint-677/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2685ba35aea51a31bdbfcc1cdf0ba01aa45e80527118c18242d090f068b2e114
+size 5304
diff --git a/checkpoint-846/README.md b/checkpoint-846/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-846/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-846/adapter_config.json b/checkpoint-846/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-846/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-846/adapter_model.safetensors b/checkpoint-846/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b2fb51c994b45d6f167bd7d46c740948cdc4a567
--- /dev/null
+++ b/checkpoint-846/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5dca49d3b5469c95216879f777f0b90b22bb1f92f4119de9e156a10834045d1e
+size 26235704
diff --git a/checkpoint-846/optimizer.pt b/checkpoint-846/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d0db68209d107a641ddfcf8cc18bdd66b6c1abd9
--- /dev/null
+++ b/checkpoint-846/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b3f923879942ca553ef159f92ceae654b9213668b419492fcc80b832137618f
+size 52563258
diff --git a/checkpoint-846/rng_state.pth b/checkpoint-846/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..421edff8f71c55b5598ad5b7f1a12958255a7b35
--- /dev/null
+++ b/checkpoint-846/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:20c2eb11d2b683c85ee0e7da692f4b5b279e4d46d161a2bd6b46f8b649eb6709
+size 14244
diff --git a/checkpoint-846/scheduler.pt b/checkpoint-846/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..75b24e5825aab5551cd41fbb1d3a2379e0f6eb4d
--- /dev/null
+++ b/checkpoint-846/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:509d5523f4ec8fd6fe13f5106b365fe195ce0220750a9e6e1531529631f72fb4
+size 1064
diff --git a/checkpoint-846/trainer_state.json b/checkpoint-846/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..34a501d298c1c220ce66d62020d839bad253be99
--- /dev/null
+++ b/checkpoint-846/trainer_state.json
@@ -0,0 +1,145 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 4.998522895125554,
+ "eval_steps": 500,
+ "global_step": 846,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 48.92063522338867,
+ "learning_rate": 2.45e-05,
+ "loss": 352758333.44,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 6.408321380615234,
+ "learning_rate": 4.9500000000000004e-05,
+ "loss": 178610135.04,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 1.5189077854156494,
+ "learning_rate": 4.731947483588622e-05,
+ "loss": 0.4129,
+ "step": 150
+ },
+ {
+ "epoch": 1.1816838995568686,
+ "grad_norm": 2.1191959381103516,
+ "learning_rate": 4.458424507658643e-05,
+ "loss": 0.2667,
+ "step": 200
+ },
+ {
+ "epoch": 1.4771048744460857,
+ "grad_norm": 4.9505510330200195,
+ "learning_rate": 4.201312910284464e-05,
+ "loss": 27.1689,
+ "step": 250
+ },
+ {
+ "epoch": 1.7725258493353029,
+ "grad_norm": 0.197406604886055,
+ "learning_rate": 3.9332603938730855e-05,
+ "loss": 4.521,
+ "step": 300
+ },
+ {
+ "epoch": 2.06794682422452,
+ "grad_norm": 0.035883717238903046,
+ "learning_rate": 3.6597374179431074e-05,
+ "loss": 0.0013,
+ "step": 350
+ },
+ {
+ "epoch": 2.363367799113737,
+ "grad_norm": 0.02112976275384426,
+ "learning_rate": 3.386214442013129e-05,
+ "loss": 0.0006,
+ "step": 400
+ },
+ {
+ "epoch": 2.658788774002954,
+ "grad_norm": 0.005675207823514938,
+ "learning_rate": 3.112691466083151e-05,
+ "loss": 0.0003,
+ "step": 450
+ },
+ {
+ "epoch": 2.9542097488921715,
+ "grad_norm": 0.004977445118129253,
+ "learning_rate": 2.839168490153173e-05,
+ "loss": 0.0003,
+ "step": 500
+ },
+ {
+ "epoch": 3.2496307237813884,
+ "grad_norm": 0.0056141638197004795,
+ "learning_rate": 2.565645514223195e-05,
+ "loss": 0.0002,
+ "step": 550
+ },
+ {
+ "epoch": 3.5450516986706058,
+ "grad_norm": 0.0035467667039483786,
+ "learning_rate": 2.292122538293217e-05,
+ "loss": 0.0002,
+ "step": 600
+ },
+ {
+ "epoch": 3.8404726735598227,
+ "grad_norm": 0.0036520687863230705,
+ "learning_rate": 2.0185995623632387e-05,
+ "loss": 0.0001,
+ "step": 650
+ },
+ {
+ "epoch": 4.13589364844904,
+ "grad_norm": 0.002137696836143732,
+ "learning_rate": 1.7450765864332606e-05,
+ "loss": 0.0001,
+ "step": 700
+ },
+ {
+ "epoch": 4.431314623338257,
+ "grad_norm": 0.0029208394698798656,
+ "learning_rate": 1.4715536105032822e-05,
+ "loss": 0.0001,
+ "step": 750
+ },
+ {
+ "epoch": 4.726735598227474,
+ "grad_norm": 0.0022111597936600447,
+ "learning_rate": 1.1980306345733041e-05,
+ "loss": 0.0001,
+ "step": 800
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 1.336633744490496e+17,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-846/training_args.bin b/checkpoint-846/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..81d7900185e43b427dbbbd817f4b048cc44538b8
--- /dev/null
+++ b/checkpoint-846/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2685ba35aea51a31bdbfcc1cdf0ba01aa45e80527118c18242d090f068b2e114
+size 5304
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenizer.model b/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..80c6aa4e7dd57e0e78805f1ddec9b62f2132fa5d
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}