asahi417 commited on
Commit
7da1f53
·
1 Parent(s): 80556d4

model update

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "tner_ckpt/wnut2017_roberta_large/best_model",
3
  "architectures": [
4
  "RobertaForTokenClassification"
5
  ],
 
1
  {
2
+ "_name_or_path": "tner_ckpt/wnut2017_roberta_large/model_rcsnba/epoch_5",
3
  "architectures": [
4
  "RobertaForTokenClassification"
5
  ],
eval/metric.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"micro/f1": 0.5375139977603584, "micro/f1_ci": {"90": [0.5084441265818846, 0.5659035599952082], "95": [0.5009032784561068, 0.5708361009044657]}, "micro/recall": 0.4448563484708063, "micro/precision": 0.6789250353606789, "macro/f1": 0.4734480458244917, "macro/f1_ci": {"90": [0.43917120642129315, 0.5024875758740787], "95": [0.43271191874216847, 0.5070716132538202]}, "macro/recall": 0.4020936892146829, "macro/precision": 0.59471614080646, "per_entity_metric": {"corporation": {"f1": 0.4065040650406504, "f1_ci": {"90": [0.29906542056074764, 0.5], "95": [0.2828282828282828, 0.5203767746632691]}, "precision": 0.43859649122807015, "recall": 0.3787878787878788}, "group": {"f1": 0.33913043478260874, "f1_ci": {"90": [0.2561576354679803, 0.410734126984127], "95": [0.23828756209579155, 0.42231075697211157]}, "precision": 0.6, "recall": 0.23636363636363636}, "location": {"f1": 0.6715867158671587, "f1_ci": {"90": [0.6072732714756062, 0.7298478561549101], "95": [0.5953245540300738, 0.73932245870086]}, "precision": 0.7520661157024794, "recall": 0.6066666666666667}, "person": {"f1": 0.6657342657342658, "f1_ci": {"90": [0.6245023518680807, 0.7022267180475272], "95": [0.6176350713695883, 0.706923626617258]}, "precision": 0.8321678321678322, "recall": 0.5547785547785548}, "product": {"f1": 0.27999999999999997, "f1_ci": {"90": [0.2127659574468085, 0.34907365310297794], "95": [0.1951111590761925, 0.3604741069287941]}, "precision": 0.3835616438356164, "recall": 0.2204724409448819}, "work_of_art": {"f1": 0.4777327935222672, "f1_ci": {"90": [0.4075471698113208, 0.546103563188926], "95": [0.3943463996832399, 0.5569649153228137]}, "precision": 0.5619047619047619, "recall": 0.4154929577464789}}}
eval/metric_span.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"micro/f1": 0.6304591265397536, "micro/f1_ci": {"90": [0.6024857090171492, 0.6556145650305563], "95": [0.5973100155318836, 0.6633070334683238]}, "micro/recall": 0.5217794253938832, "micro/precision": 0.7963224893917963, "macro/f1": 0.6304591265397536, "macro/f1_ci": {"90": [0.6024857090171492, 0.6556145650305563], "95": [0.5973100155318836, 0.6633070334683238]}, "macro/recall": 0.5217794253938832, "macro/precision": 0.7963224893917963}
eval/prediction.validation.json ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad36d9f12ea8b172ebe53e89fafbcad6ffdb095ab22ecc951cc85939bcd17d7e
3
- size 1417424945
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22b77f78f7b8f90b4fb0c4fcef1f1be5fc7456f29e9057e53da0555048785769
3
+ size 1417430385
tokenizer_config.json CHANGED
@@ -6,7 +6,7 @@
6
  "errors": "replace",
7
  "mask_token": "<mask>",
8
  "model_max_length": 512,
9
- "name_or_path": "tner_ckpt/wnut2017_roberta_large/best_model",
10
  "pad_token": "<pad>",
11
  "sep_token": "</s>",
12
  "special_tokens_map_file": "tner_ckpt/wnut2017_roberta_large/model_rcsnba/epoch_5/special_tokens_map.json",
 
6
  "errors": "replace",
7
  "mask_token": "<mask>",
8
  "model_max_length": 512,
9
+ "name_or_path": "tner_ckpt/wnut2017_roberta_large/model_rcsnba/epoch_5",
10
  "pad_token": "<pad>",
11
  "sep_token": "</s>",
12
  "special_tokens_map_file": "tner_ckpt/wnut2017_roberta_large/model_rcsnba/epoch_5/special_tokens_map.json",
trainer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"dataset": ["tner/wnut2017"], "dataset_split": "train", "dataset_name": null, "local_dataset": null, "model": "roberta-large", "crf": true, "max_length": 128, "epoch": 15, "batch_size": 64, "lr": 1e-05, "random_seed": 42, "gradient_accumulation_steps": 1, "weight_decay": null, "lr_warmup_step_ratio": 0.1, "max_grad_norm": 10.0}