Virus-Proton commited on
Commit
1ddcfc4
·
verified ·
1 Parent(s): a07a323

Training in progress, step 500

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 128000
3
+ }
config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "_name_or_path": "lakshyakh93/deberta_finetuned_pii",
3
  "architectures": [
4
- "DebertaForTokenClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "hidden_act": "gelu",
@@ -42,7 +42,8 @@
42
  "layer_norm_eps": 1e-07,
43
  "max_position_embeddings": 512,
44
  "max_relative_positions": -1,
45
- "model_type": "deberta",
 
46
  "num_attention_heads": 12,
47
  "num_hidden_layers": 12,
48
  "pad_token_id": 0,
@@ -50,13 +51,15 @@
50
  "pooler_hidden_act": "gelu",
51
  "pooler_hidden_size": 768,
52
  "pos_att_type": [
53
- "c2p",
54
- "p2c"
55
  ],
56
  "position_biased_input": false,
 
57
  "relative_attention": true,
 
58
  "torch_dtype": "float32",
59
  "transformers_version": "4.36.2",
60
  "type_vocab_size": 0,
61
- "vocab_size": 50265
62
  }
 
1
  {
2
+ "_name_or_path": "microsoft/deberta-v3-base",
3
  "architectures": [
4
+ "DebertaV2ForTokenClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "hidden_act": "gelu",
 
42
  "layer_norm_eps": 1e-07,
43
  "max_position_embeddings": 512,
44
  "max_relative_positions": -1,
45
+ "model_type": "deberta-v2",
46
+ "norm_rel_ebd": "layer_norm",
47
  "num_attention_heads": 12,
48
  "num_hidden_layers": 12,
49
  "pad_token_id": 0,
 
51
  "pooler_hidden_act": "gelu",
52
  "pooler_hidden_size": 768,
53
  "pos_att_type": [
54
+ "p2c",
55
+ "c2p"
56
  ],
57
  "position_biased_input": false,
58
+ "position_buckets": 256,
59
  "relative_attention": true,
60
+ "share_att_key": true,
61
  "torch_dtype": "float32",
62
  "transformers_version": "4.36.2",
63
  "type_vocab_size": 0,
64
+ "vocab_size": 128100
65
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:685e0734bbb9085bfffff53ae73473ff409ec40556f1470832dc3264d41e4e27
3
- size 554470860
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbcec678037238add86033f25c42eb14dd60eb1ef696fd5a45396ff9f811ff28
3
+ size 735390572
special_tokens_map.json CHANGED
@@ -1,46 +1,10 @@
1
  {
2
- "bos_token": {
3
- "content": "[CLS]",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "cls_token": {
10
- "content": "[CLS]",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "eos_token": {
17
- "content": "[SEP]",
18
- "lstrip": false,
19
- "normalized": true,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "mask_token": {
24
- "content": "[MASK]",
25
- "lstrip": true,
26
- "normalized": true,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "pad_token": {
31
- "content": "[PAD]",
32
- "lstrip": false,
33
- "normalized": true,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
- "sep_token": {
38
- "content": "[SEP]",
39
- "lstrip": false,
40
- "normalized": true,
41
- "rstrip": false,
42
- "single_word": false
43
- },
44
  "unk_token": {
45
  "content": "[UNK]",
46
  "lstrip": false,
 
1
  {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  "unk_token": {
9
  "content": "[UNK]",
10
  "lstrip": false,
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,11 +1,9 @@
1
  {
2
- "add_bos_token": false,
3
- "add_prefix_space": true,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "[PAD]",
7
  "lstrip": false,
8
- "normalized": true,
9
  "rstrip": false,
10
  "single_word": false,
11
  "special": true
@@ -13,7 +11,7 @@
13
  "1": {
14
  "content": "[CLS]",
15
  "lstrip": false,
16
- "normalized": true,
17
  "rstrip": false,
18
  "single_word": false,
19
  "special": true
@@ -21,7 +19,7 @@
21
  "2": {
22
  "content": "[SEP]",
23
  "lstrip": false,
24
- "normalized": true,
25
  "rstrip": false,
26
  "single_word": false,
27
  "special": true
@@ -34,27 +32,27 @@
34
  "single_word": false,
35
  "special": true
36
  },
37
- "50264": {
38
  "content": "[MASK]",
39
- "lstrip": true,
40
- "normalized": true,
41
  "rstrip": false,
42
  "single_word": false,
43
  "special": true
44
  }
45
  },
46
- "additional_special_tokens": [],
47
  "bos_token": "[CLS]",
48
  "clean_up_tokenization_spaces": true,
49
  "cls_token": "[CLS]",
50
  "do_lower_case": false,
51
  "eos_token": "[SEP]",
52
- "errors": "replace",
53
  "mask_token": "[MASK]",
54
- "model_max_length": 512,
55
  "pad_token": "[PAD]",
56
  "sep_token": "[SEP]",
57
- "tokenizer_class": "DebertaTokenizer",
 
 
58
  "unk_token": "[UNK]",
59
- "vocab_type": "gpt2"
60
  }
 
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "[PAD]",
5
  "lstrip": false,
6
+ "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
 
11
  "1": {
12
  "content": "[CLS]",
13
  "lstrip": false,
14
+ "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
 
19
  "2": {
20
  "content": "[SEP]",
21
  "lstrip": false,
22
+ "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
 
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "128000": {
36
  "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
42
  }
43
  },
 
44
  "bos_token": "[CLS]",
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "[CLS]",
47
  "do_lower_case": false,
48
  "eos_token": "[SEP]",
 
49
  "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
  "pad_token": "[PAD]",
52
  "sep_token": "[SEP]",
53
+ "sp_model_kwargs": {},
54
+ "split_by_punct": false,
55
+ "tokenizer_class": "DebertaV2Tokenizer",
56
  "unk_token": "[UNK]",
57
+ "vocab_type": "spm"
58
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:14b6028bea919006be9ad13fd6cba8ea1bf6d1e2ede0b44fafc34e7aa5ef0bcc
3
  size 4283
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36eaebafcaa405a4870d259d4be3467ac08fc2a7e955e976d0860e2d67c72fcc
3
  size 4283