SkyR commited on
Commit
7bf32c9
·
1 Parent(s): 18a0ba2

Training in progress, step 10000

Browse files
.gitattributes CHANGED
@@ -26,3 +26,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
26
  *.zip filter=lfs diff=lfs merge=lfs -text
27
  *.zstandard filter=lfs diff=lfs merge=lfs -text
28
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
26
  *.zip filter=lfs diff=lfs merge=lfs -text
27
  *.zstandard filter=lfs diff=lfs merge=lfs -text
28
  *tfevents* filter=lfs diff=lfs merge=lfs -text
29
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json CHANGED
@@ -1,11 +1,12 @@
1
  {
2
- "_name_or_path": "bert-base-multilingual-cased",
3
  "architectures": [
4
- "BertForTokenClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
 
7
  "classifier_dropout": null,
8
- "directionality": "bidi",
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
@@ -33,21 +34,17 @@
33
  "I-PER": 2,
34
  "O": 0
35
  },
36
- "layer_norm_eps": 1e-12,
37
- "max_position_embeddings": 512,
38
- "model_type": "bert",
39
  "num_attention_heads": 12,
40
  "num_hidden_layers": 12,
41
- "pad_token_id": 0,
42
- "pooler_fc_size": 768,
43
- "pooler_num_attention_heads": 12,
44
- "pooler_num_fc_layers": 3,
45
- "pooler_size_per_head": 128,
46
- "pooler_type": "first_token_transform",
47
  "position_embedding_type": "absolute",
48
  "torch_dtype": "float32",
49
  "transformers_version": "4.16.2",
50
- "type_vocab_size": 2,
51
  "use_cache": true,
52
- "vocab_size": 119547
53
  }
 
1
  {
2
+ "_name_or_path": "xlm-roberta-base",
3
  "architectures": [
4
+ "XLMRobertaForTokenClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
  "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
  "hidden_size": 768,
 
34
  "I-PER": 2,
35
  "O": 0
36
  },
37
+ "layer_norm_eps": 1e-05,
38
+ "max_position_embeddings": 514,
39
+ "model_type": "xlm-roberta",
40
  "num_attention_heads": 12,
41
  "num_hidden_layers": 12,
42
+ "output_past": true,
43
+ "pad_token_id": 1,
 
 
 
 
44
  "position_embedding_type": "absolute",
45
  "torch_dtype": "float32",
46
  "transformers_version": "4.16.2",
47
+ "type_vocab_size": 1,
48
  "use_cache": true,
49
+ "vocab_size": 250002
50
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68ae74512a7153fd1fbbeb902a1b3cfc8307027d019cb4fc953e32217c0c9e67
3
- size 709162481
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77e9f74f0c258d2fa52f645e81e87be141c3b13d5b9a92c7882628382088ae52
3
+ size 1109924593
runs/Apr11_16-18-24_17daf5136803/1649694564.9745529/events.out.tfevents.1649694564.17daf5136803.36.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6527564c92d7fd79261a19b080be66eb3d2eaae71c71fb83588bfe4e26421df3
3
+ size 4802
runs/Apr11_16-18-24_17daf5136803/events.out.tfevents.1649694564.17daf5136803.36.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e84519b683b1a3de193bafcf6255101819e8c8306ddd48d6ae7954d7c279d6e
3
+ size 7339
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-base-multilingual-cased", "tokenizer_class": "BertTokenizer"}
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "xlm-roberta-base", "tokenizer_class": "XLMRobertaTokenizer"}
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9dfa833616c7445978cccac83ed43395ca0f83afce10efcac9fee9221a95a917
3
  size 3055
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:038ea22032858bf1a4b50cb504200b46abbfbd3320499b1c1393bdf6e60c6727
3
  size 3055