ashishkumar-Conveyer commited on
Commit
518fe5a
·
verified ·
1 Parent(s): 08954d6

End of training

Browse files
README.md CHANGED
@@ -5,7 +5,7 @@ tags:
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
- base_model: mistralai/Mistral-7B-Instruct-v0.2
9
  model-index:
10
  - name: results
11
  results: []
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # results
18
 
19
- This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the None dataset.
20
 
21
  ## Model description
22
 
 
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
+ base_model: mistralai/Mistral-7B-v0.1
9
  model-index:
10
  - name: results
11
  results: []
 
16
 
17
  # results
18
 
19
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
20
 
21
  ## Model description
22
 
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.2",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -19,11 +19,11 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "q_proj",
23
- "k_proj",
24
- "gate_proj",
25
  "o_proj",
26
- "v_proj"
 
 
 
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
 
 
22
  "o_proj",
23
+ "k_proj",
24
+ "v_proj",
25
+ "q_proj",
26
+ "gate_proj"
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f9cb0d5026def69f3c5f4769224e8e20d7b6495f6817b641b3ae27246fc0593
3
  size 369142184
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25960bddddd1acc3e75491446883e477d2d88da615af89f34fb548e785994bf3
3
  size 369142184
tokenizer.json CHANGED
@@ -1,6 +1,11 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 1024,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
  "padding": null,
10
  "added_tokens": [
11
  {
tokenizer_config.json CHANGED
@@ -29,7 +29,6 @@
29
  },
30
  "additional_special_tokens": [],
31
  "bos_token": "<s>",
32
- "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
33
  "clean_up_tokenization_spaces": false,
34
  "eos_token": "</s>",
35
  "legacy": true,
 
29
  },
30
  "additional_special_tokens": [],
31
  "bos_token": "<s>",
 
32
  "clean_up_tokenization_spaces": false,
33
  "eos_token": "</s>",
34
  "legacy": true,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba9ab488c2d0f89796048cdb78b12ac26b4e98c759f5978b02a9ef81b5ff7261
3
  size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0492489a5f3e030743abfcdf69f1cbb4008fb808e1d15b1c27e861a87fca4e50
3
  size 4856