anujsahani01 commited on
Commit
5ce9049
·
1 Parent(s): e983c74

Training in progress, step 500

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ffae56b23b3c1d970b51a356e579834206abf7e6aad5dcf43ec0047070e8925
3
  size 1188025
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8849ff4649849129dc8d7aeeeec5d29e600075b35de199072442c14020d8f06a
3
  size 1188025
added_tokens.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "<ASSISTANT_TASK:>": 32771,
3
  "<END_TASK>": 32772,
4
- "<PAD>": 32768,
5
  "<SYSTEM_TASK:>": 32769,
6
- "<USER_TASK:>": 32770
 
7
  }
 
1
  {
2
  "<ASSISTANT_TASK:>": 32771,
3
  "<END_TASK>": 32772,
 
4
  "<SYSTEM_TASK:>": 32769,
5
+ "<USER_TASK:>": 32770,
6
+ "<|PAD|>": 32768
7
  }
tokenizer.json CHANGED
@@ -14,7 +14,7 @@
14
  },
15
  {
16
  "id": 32768,
17
- "content": "<PAD>",
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
 
14
  },
15
  {
16
  "id": 32768,
17
+ "content": "<|PAD|>",
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
tokenizer_config.json CHANGED
@@ -9,8 +9,8 @@
9
  "bos_token": "<|endoftext|>",
10
  "clean_up_tokenization_spaces": true,
11
  "eos_token": "<|endoftext|>",
12
- "model_max_length": 3000,
13
- "pad_token": "<PAD>",
14
  "tokenizer_class": "GPT2Tokenizer",
15
  "unk_token": "<|endoftext|>"
16
  }
 
9
  "bos_token": "<|endoftext|>",
10
  "clean_up_tokenization_spaces": true,
11
  "eos_token": "<|endoftext|>",
12
+ "model_max_length": 5000,
13
+ "pad_token": "<|PAD|>",
14
  "tokenizer_class": "GPT2Tokenizer",
15
  "unk_token": "<|endoftext|>"
16
  }