Upload folder using huggingface_hub
Browse files- config.json +52 -0
- model.safetensors +3 -0
- special_tokens_map.json +7 -0
- tokenizer.json +0 -0
- tokenizer_config.json +57 -0
- trainer_state.json +56 -0
- training_args.bin +3 -0
- vocab.txt +0 -0
config.json
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "UFNLP/gatortronS",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"MegatronBertForTokenClassification"
|
| 5 |
+
],
|
| 6 |
+
"attention_probs_dropout_prob": 0.1,
|
| 7 |
+
"finetuning_task": "ner",
|
| 8 |
+
"hidden_act": "gelu",
|
| 9 |
+
"hidden_dropout_prob": 0.1,
|
| 10 |
+
"hidden_size": 1024,
|
| 11 |
+
"id2label": {
|
| 12 |
+
"0": "B-IHC",
|
| 13 |
+
"1": "B-IHC_modifier",
|
| 14 |
+
"2": "B-anatomical_site",
|
| 15 |
+
"3": "B-diagnosis_descriptor",
|
| 16 |
+
"4": "B-pathological_diagnosis",
|
| 17 |
+
"5": "I-IHC",
|
| 18 |
+
"6": "I-IHC_modifier",
|
| 19 |
+
"7": "I-anatomical_site",
|
| 20 |
+
"8": "I-diagnosis_descriptor",
|
| 21 |
+
"9": "I-pathological_diagnosis",
|
| 22 |
+
"10": "O"
|
| 23 |
+
},
|
| 24 |
+
"initializer_range": 0.02,
|
| 25 |
+
"intermediate_size": 4096,
|
| 26 |
+
"label2id": {
|
| 27 |
+
"B-IHC": 0,
|
| 28 |
+
"B-IHC_modifier": 1,
|
| 29 |
+
"B-anatomical_site": 2,
|
| 30 |
+
"B-diagnosis_descriptor": 3,
|
| 31 |
+
"B-pathological_diagnosis": 4,
|
| 32 |
+
"I-IHC": 5,
|
| 33 |
+
"I-IHC_modifier": 6,
|
| 34 |
+
"I-anatomical_site": 7,
|
| 35 |
+
"I-diagnosis_descriptor": 8,
|
| 36 |
+
"I-pathological_diagnosis": 9,
|
| 37 |
+
"O": 10
|
| 38 |
+
},
|
| 39 |
+
"layer_norm_eps": 1e-12,
|
| 40 |
+
"max_position_embeddings": 512,
|
| 41 |
+
"model_type": "megatron-bert",
|
| 42 |
+
"num_attention_heads": 16,
|
| 43 |
+
"num_hidden_layers": 24,
|
| 44 |
+
"pad_token_id": 0,
|
| 45 |
+
"position_embedding_type": "absolute",
|
| 46 |
+
"tokenizer_type": "BertWordPieceCase",
|
| 47 |
+
"torch_dtype": "float32",
|
| 48 |
+
"transformers_version": "4.41.2",
|
| 49 |
+
"type_vocab_size": 2,
|
| 50 |
+
"use_cache": true,
|
| 51 |
+
"vocab_size": 50176
|
| 52 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:523648a9c59e9d88bf002885c54843bf52322ba0e3dbdbf405a94d412d147a78
|
| 3 |
+
size 1416962564
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"sep_token": "[SEP]",
|
| 6 |
+
"unk_token": "[UNK]"
|
| 7 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"100": {
|
| 12 |
+
"content": "[UNK]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"101": {
|
| 20 |
+
"content": "[CLS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"102": {
|
| 28 |
+
"content": "[SEP]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"103": {
|
| 36 |
+
"content": "[MASK]",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"clean_up_tokenization_spaces": true,
|
| 45 |
+
"cls_token": "[CLS]",
|
| 46 |
+
"do_basic_tokenize": true,
|
| 47 |
+
"do_lower_case": true,
|
| 48 |
+
"mask_token": "[MASK]",
|
| 49 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 50 |
+
"never_split": null,
|
| 51 |
+
"pad_token": "[PAD]",
|
| 52 |
+
"sep_token": "[SEP]",
|
| 53 |
+
"strip_accents": null,
|
| 54 |
+
"tokenize_chinese_chars": true,
|
| 55 |
+
"tokenizer_class": "BertTokenizer",
|
| 56 |
+
"unk_token": "[UNK]"
|
| 57 |
+
}
|
trainer_state.json
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": null,
|
| 3 |
+
"best_model_checkpoint": null,
|
| 4 |
+
"epoch": 2.0,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 1092,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 1.0,
|
| 13 |
+
"grad_norm": 2.766226053237915,
|
| 14 |
+
"learning_rate": 2.6223219590153115e-05,
|
| 15 |
+
"loss": 0.2037,
|
| 16 |
+
"step": 546
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"epoch": 2.0,
|
| 20 |
+
"grad_norm": 1.024935245513916,
|
| 21 |
+
"learning_rate": 0.0,
|
| 22 |
+
"loss": 0.0756,
|
| 23 |
+
"step": 1092
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"epoch": 2.0,
|
| 27 |
+
"step": 1092,
|
| 28 |
+
"total_flos": 790377348130650.0,
|
| 29 |
+
"train_loss": 0.13962632832509694,
|
| 30 |
+
"train_runtime": 294.0097,
|
| 31 |
+
"train_samples_per_second": 14.836,
|
| 32 |
+
"train_steps_per_second": 3.714
|
| 33 |
+
}
|
| 34 |
+
],
|
| 35 |
+
"logging_steps": 546,
|
| 36 |
+
"max_steps": 1092,
|
| 37 |
+
"num_input_tokens_seen": 0,
|
| 38 |
+
"num_train_epochs": 2,
|
| 39 |
+
"save_steps": 546,
|
| 40 |
+
"stateful_callbacks": {
|
| 41 |
+
"TrainerControl": {
|
| 42 |
+
"args": {
|
| 43 |
+
"should_epoch_stop": false,
|
| 44 |
+
"should_evaluate": false,
|
| 45 |
+
"should_log": false,
|
| 46 |
+
"should_save": true,
|
| 47 |
+
"should_training_stop": true
|
| 48 |
+
},
|
| 49 |
+
"attributes": {}
|
| 50 |
+
}
|
| 51 |
+
},
|
| 52 |
+
"total_flos": 790377348130650.0,
|
| 53 |
+
"train_batch_size": 4,
|
| 54 |
+
"trial_name": null,
|
| 55 |
+
"trial_params": null
|
| 56 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:11659d2b5f04737019690a9c641aa1c6b431c463f45b7028731f1530b10d9117
|
| 3 |
+
size 5112
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|