toilachuoituyet commited on
Commit
5d2882c
·
verified ·
1 Parent(s): dce3a17

Upload project files

Browse files
Text_encoder/model_best/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "uie_base_en",
3
+ "architectures": [
4
+ "UIE"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 3072,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 12,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.20.0",
22
+ "type_vocab_size": 4,
23
+ "use_cache": true,
24
+ "vocab_size": 30522
25
+ }
Text_encoder/model_best/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afea4b2ea4e7389c794ed4d71e169bb00284abb83d36e3c89f7883203c7887b0
3
+ size 456930115
Text_encoder/model_best/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
Text_encoder/model_best/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
Text_encoder/model_best/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "name_or_path": "uie_base_en",
7
+ "never_split": null,
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "special_tokens_map_file": "uie_base_en/special_tokens_map.json",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
Text_encoder/model_best/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/masc.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d0e1703c7c4c6c41b80ba2f83aff61be6a3803b1deec53f87088cc4f4387924
3
+ size 1223522938
checkpoints/mate.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:894fe8959e077a45a3d50fe1880abe88e7eb07db00ce4cec2870130d129fa654
3
+ size 1223403721
eval.sh CHANGED
@@ -1,15 +1,22 @@
1
  #!/usr/bin/env bash
2
- export CUDA_VISIBLE_DEVICES="1"
 
3
 
 
 
 
4
  # MATE evaluation
 
5
 
6
- CHECKPOINT_DIR="./checkpoints/MATE_2017"
7
- TEST_DATA="./finetune_dataset/twitter17/test"
8
 
9
  best_stats_values=(0 0 0 0 0 0 "None") # [Correct, Label, Prediction, Accuracy, Recall, F1, Model]
10
  declare -r COR=0 LABEL=1 PRED=2 ACC=3 REC=4 F1=5 MODEL=6
11
 
12
  for model in "${CHECKPOINT_DIR}"/*.pt; do
 
 
13
  output=$(python eval_tools.py \
14
  --MATE_model "${model}" \
15
  --test_ds "${TEST_DATA}" \
@@ -47,25 +54,26 @@ for model in "${CHECKPOINT_DIR}"/*.pt; do
47
  fi
48
  done
49
 
50
- echo -e "\nBest Model: ${best_stats_values[$MODEL]}"
 
51
  echo "F1 : ${best_stats_values[$F1]}"
52
  echo "Accuracy: ${best_stats_values[$ACC]}"
53
  echo "Recall : ${best_stats_values[$REC]}"
54
- echo "Correct : ${best_stats_values[$COR]}"
55
- echo "Label : ${best_stats_values[$LABEL]}"
56
- echo "Prediction: ${best_stats_values[$PRED]}"
57
-
58
 
59
 
60
- # MASC evaluation
 
 
61
 
62
- # CHECKPOINT_DIR="./checkpoints/MASC_2017"
63
- # TEST_DATA="./finetune_dataset/twitter17/test"
64
 
65
  # best_stats_values=(0 0 0 0 0 "None") # [Correct, Label, Prediction, Accuracy, Macro_F1, Model]
66
  # declare -r COR=0 LABEL=1 PRED=2 ACC=3 MacroF1=4 MODEL=5
67
 
68
  # for model in "${CHECKPOINT_DIR}"/*.pt; do
 
 
69
  # output=$(python eval_tools.py \
70
  # --MASC_model "${model}" \
71
  # --test_ds "${TEST_DATA}" \
@@ -88,7 +96,7 @@ echo "Prediction: ${best_stats_values[$PRED]}"
88
 
89
  # if [[ "${f1:-0}" =~ ^[0-9.]+$ ]]; then
90
  # is_better=$(awk -v f1="$f1" -v best="${best_stats_values[$MacroF1]}" 'BEGIN { print (f1 > best) ? 1 : 0 }')
91
-
92
  # if [ "$is_better" -eq 1 ]; then
93
  # best_stats_values[$COR]=${correct:-0}
94
  # best_stats_values[$LABEL]=${label:-0}
@@ -100,20 +108,20 @@ echo "Prediction: ${best_stats_values[$PRED]}"
100
  # fi
101
  # done
102
 
103
- # echo -e "\nBest Model: ${best_stats_values[$MODEL]}"
104
- # echo "F1 : ${best_stats_values[$MacroF1]}"
 
105
  # echo "Accuracy: ${best_stats_values[$ACC]}"
106
- # echo "Correct : ${best_stats_values[$COR]}"
107
- # echo "Label : ${best_stats_values[$LABEL]}"
108
- # echo "Prediction: ${best_stats_values[$PRED]}"
109
 
110
 
 
 
 
111
 
112
- # MABSA evaluation
113
  # python eval_tools.py \
114
- # --MATE_model ./DASCO/checkpoints/MATE_2017/best_f1:94.933.pt \
115
- # --MASC_model ./DASCO/checkpoints/MASC_2017/best_f1:77.616.pt \
116
- # --test_ds ./finetune_dataset/twitter17/test \
117
  # --task MABSA \
118
  # --gcn_layers 4 \
119
  # --device cuda:0
 
1
  #!/usr/bin/env bash
2
+ # Evaluation script for DASCO models
3
+ # Supports MATE, MASC, and MABSA evaluation
4
 
5
+ export CUDA_VISIBLE_DEVICES="0"
6
+
7
+ # ============================================
8
  # MATE evaluation
9
+ # ============================================
10
 
11
+ CHECKPOINT_DIR="./checkpoints/MATE_custom"
12
+ TEST_DATA="./finetune_dataset/custom/test"
13
 
14
  best_stats_values=(0 0 0 0 0 0 "None") # [Correct, Label, Prediction, Accuracy, Recall, F1, Model]
15
  declare -r COR=0 LABEL=1 PRED=2 ACC=3 REC=4 F1=5 MODEL=6
16
 
17
  for model in "${CHECKPOINT_DIR}"/*.pt; do
18
+ [ -f "$model" ] || continue # Skip if no .pt files found
19
+
20
  output=$(python eval_tools.py \
21
  --MATE_model "${model}" \
22
  --test_ds "${TEST_DATA}" \
 
54
  fi
55
  done
56
 
57
+ echo -e "\n========== MATE Best Results =========="
58
+ echo "Best Model: ${best_stats_values[$MODEL]}"
59
  echo "F1 : ${best_stats_values[$F1]}"
60
  echo "Accuracy: ${best_stats_values[$ACC]}"
61
  echo "Recall : ${best_stats_values[$REC]}"
 
 
 
 
62
 
63
 
64
+ # ============================================
65
+ # MASC evaluation (uncomment to use)
66
+ # ============================================
67
 
68
+ # CHECKPOINT_DIR="./checkpoints/MASC_custom"
69
+ # TEST_DATA="./finetune_dataset/custom/test"
70
 
71
  # best_stats_values=(0 0 0 0 0 "None") # [Correct, Label, Prediction, Accuracy, Macro_F1, Model]
72
  # declare -r COR=0 LABEL=1 PRED=2 ACC=3 MacroF1=4 MODEL=5
73
 
74
  # for model in "${CHECKPOINT_DIR}"/*.pt; do
75
+ # [ -f "$model" ] || continue
76
+ #
77
  # output=$(python eval_tools.py \
78
  # --MASC_model "${model}" \
79
  # --test_ds "${TEST_DATA}" \
 
96
 
97
  # if [[ "${f1:-0}" =~ ^[0-9.]+$ ]]; then
98
  # is_better=$(awk -v f1="$f1" -v best="${best_stats_values[$MacroF1]}" 'BEGIN { print (f1 > best) ? 1 : 0 }')
99
+ #
100
  # if [ "$is_better" -eq 1 ]; then
101
  # best_stats_values[$COR]=${correct:-0}
102
  # best_stats_values[$LABEL]=${label:-0}
 
108
  # fi
109
  # done
110
 
111
+ # echo -e "\n========== MASC Best Results =========="
112
+ # echo "Best Model: ${best_stats_values[$MODEL]}"
113
+ # echo "Macro F1: ${best_stats_values[$MacroF1]}"
114
  # echo "Accuracy: ${best_stats_values[$ACC]}"
 
 
 
115
 
116
 
117
+ # ============================================
118
+ # MABSA evaluation (uncomment to use)
119
+ # ============================================
120
 
 
121
  # python eval_tools.py \
122
+ # --MATE_model ./checkpoints/MATE_custom/best_f1:XX.XXX.pt \
123
+ # --MASC_model ./checkpoints/MASC_custom/best_f1:XX.XXX.pt \
124
+ # --test_ds ./finetune_dataset/custom/test \
125
  # --task MABSA \
126
  # --gcn_layers 4 \
127
  # --device cuda:0