hung200504 commited on
Commit
a36f319
1 Parent(s): 1094ecf
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  tags:
3
  - generated_from_trainer
4
  model-index:
@@ -11,7 +12,7 @@ should probably proofread and complete it, then remove this comment. -->
11
 
12
  # bert-base-uncased
13
 
14
- This model was trained from scratch on an unknown dataset.
15
 
16
  ## Model description
17
 
 
1
  ---
2
+ base_model: hung200504/bert-base-uncased
3
  tags:
4
  - generated_from_trainer
5
  model-index:
 
12
 
13
  # bert-base-uncased
14
 
15
+ This model is a fine-tuned version of [hung200504/bert-base-uncased](https://huggingface.co/hung200504/bert-base-uncased) on an unknown dataset.
16
 
17
  ## Model description
18
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "bert-base-uncased",
3
  "architectures": [
4
  "BertForQuestionAnswering"
5
  ],
 
1
  {
2
+ "_name_or_path": "hung200504/bert-base-uncased",
3
  "architectures": [
4
  "BertForQuestionAnswering"
5
  ],
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:73a308f6d954643a57f9de834ce965a93e99142d701929511198b1e54279d773
3
  size 435640489
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40ee78709af067b2818403cc8c60b2d99be39d367ce56313acd7e273ccb38aac
3
  size 435640489
special_tokens_map.json CHANGED
@@ -1,4 +1,11 @@
1
  {
 
 
 
 
 
 
 
2
  "cls_token": "[CLS]",
3
  "mask_token": "[MASK]",
4
  "pad_token": "[PAD]",
 
1
  {
2
+ "additional_special_tokens": [
3
+ "[PAD]",
4
+ "[UNK]",
5
+ "[CLS]",
6
+ "[SEP]",
7
+ "[MASK]"
8
+ ],
9
  "cls_token": "[CLS]",
10
  "mask_token": "[MASK]",
11
  "pad_token": "[PAD]",
tokenizer_config.json CHANGED
@@ -41,16 +41,29 @@
41
  "special": true
42
  }
43
  },
44
- "additional_special_tokens": [],
 
 
 
 
 
 
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "[CLS]",
47
  "do_lower_case": true,
48
  "mask_token": "[MASK]",
 
49
  "model_max_length": 512,
 
50
  "pad_token": "[PAD]",
 
 
51
  "sep_token": "[SEP]",
 
52
  "strip_accents": null,
53
  "tokenize_chinese_chars": true,
54
  "tokenizer_class": "BertTokenizer",
 
 
55
  "unk_token": "[UNK]"
56
  }
 
41
  "special": true
42
  }
43
  },
44
+ "additional_special_tokens": [
45
+ "[PAD]",
46
+ "[UNK]",
47
+ "[CLS]",
48
+ "[SEP]",
49
+ "[MASK]"
50
+ ],
51
  "clean_up_tokenization_spaces": true,
52
  "cls_token": "[CLS]",
53
  "do_lower_case": true,
54
  "mask_token": "[MASK]",
55
+ "max_length": 384,
56
  "model_max_length": 512,
57
+ "pad_to_multiple_of": null,
58
  "pad_token": "[PAD]",
59
+ "pad_token_type_id": 0,
60
+ "padding_side": "right",
61
  "sep_token": "[SEP]",
62
+ "stride": 0,
63
  "strip_accents": null,
64
  "tokenize_chinese_chars": true,
65
  "tokenizer_class": "BertTokenizer",
66
+ "truncation_side": "right",
67
+ "truncation_strategy": "only_second",
68
  "unk_token": "[UNK]"
69
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0418ded23c8e6196f2e312245468d90d8afa4e0597f0858b94ced5a222ea9096
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa5ac1275396c00c658708e8a8fb0a7af79700bdfeea936c6a2a6388c75bdcdc
3
  size 4091