asahi417 commited on
Commit
ce7523f
·
1 Parent(s): 415b96f
experiments/model_finetuning_ner.py CHANGED
@@ -9,8 +9,9 @@ import json
9
  import logging
10
  import math
11
  import os
 
12
  from os.path import join as pj
13
- from shutil import copyfile
14
  from glob import glob
15
 
16
  import numpy as np
@@ -23,6 +24,28 @@ logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logg
23
  EVAL_STEP = 500
24
  RANDOM_SEED = 42
25
  N_TRIALS = 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
 
28
  def sigmoid(x):
@@ -66,8 +89,9 @@ def main(
66
  return new_labels
67
 
68
  def tokenize_and_align_labels(examples):
 
69
  tokenized_inputs = tokenizer(
70
- examples["tokens"], truncation=True, is_split_into_words=True, padding="max_length", max_length=256
71
  )
72
  all_labels = examples["ner_tags"]
73
  new_labels = []
@@ -174,6 +198,7 @@ def main(
174
  if not os.path.exists(f"{model_alias}/{os.path.basename(i)}"):
175
  copyfile(i, f"{model_alias}/{os.path.basename(i)}")
176
  repo.push_to_hub()
 
177
 
178
 
179
  if __name__ == "__main__":
 
9
  import logging
10
  import math
11
  import os
12
+ import re
13
  from os.path import join as pj
14
+ from shutil import copyfile, rmtree
15
  from glob import glob
16
 
17
  import numpy as np
 
24
  EVAL_STEP = 500
25
  RANDOM_SEED = 42
26
  N_TRIALS = 10
27
+ URL_RE = re.compile(r"https?:\/\/[\w\.\/\?\=\d&#%_:/-]+")
28
+ HANDLE_RE = re.compile(r"@\w+")
29
+
30
+
31
+ def preprocess_bernice(text):
32
+ text = HANDLE_RE.sub("@USER", text)
33
+ text = URL_RE.sub("HTTPURL", text)
34
+ return text
35
+
36
+
37
+ def preprocess_timelm(text):
38
+ text = HANDLE_RE.sub("@user", text)
39
+ text = URL_RE.sub("http", text)
40
+ return text
41
+
42
+
43
+ def preprocess(model_name, text):
44
+ if model_name == "jhu-clsp/bernice":
45
+ return preprocess_bernice(text)
46
+ if "twitter-roberta-base" in model_name:
47
+ return preprocess_timelm(text)
48
+ return text
49
 
50
 
51
  def sigmoid(x):
 
89
  return new_labels
90
 
91
  def tokenize_and_align_labels(examples):
92
+ tokens = [[preprocess(model, w) for w in t] for t in examples["text_tokenized"]]
93
  tokenized_inputs = tokenizer(
94
+ tokens, truncation=True, is_split_into_words=True, padding="max_length", max_length=256
95
  )
96
  all_labels = examples["ner_tags"]
97
  new_labels = []
 
198
  if not os.path.exists(f"{model_alias}/{os.path.basename(i)}"):
199
  copyfile(i, f"{model_alias}/{os.path.basename(i)}")
200
  repo.push_to_hub()
201
+ rmtree(model_alias)
202
 
203
 
204
  if __name__ == "__main__":
experiments/model_finetuning_nerd.py CHANGED
@@ -9,6 +9,7 @@ import json
9
  import logging
10
  import math
11
  import os
 
12
  from os.path import join as pj
13
  from shutil import copyfile, rmtree
14
  from glob import glob
@@ -23,6 +24,28 @@ logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logg
23
  EVAL_STEP = 500
24
  RANDOM_SEED = 42
25
  N_TRIALS = 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
 
28
  def sigmoid(x):
@@ -45,7 +68,9 @@ def main(
45
  dataset = load_dataset(dataset, dataset_type)
46
  tokenized_datasets = dataset.map(
47
  lambda x: tokenizer(
48
- [f"[target] {a}, [definition] {b}, [text] {c}" for a, b, c in zip(x["target"], x["definition"], x["text"])],
 
 
49
  padding="max_length",
50
  truncation=True,
51
  max_length=256),
@@ -107,7 +132,7 @@ def main(
107
  test_split = ["test"]
108
  if dataset_type.endswith("temporal"):
109
  test_split += ["test_1", "test_2", "test_3", "test_4"]
110
- summary_file = pj(output_dir, "summary.json")
111
  if os.path.exists(summary_file):
112
  with open(summary_file) as f:
113
  metric = json.load(f)
 
9
  import logging
10
  import math
11
  import os
12
+ import re
13
  from os.path import join as pj
14
  from shutil import copyfile, rmtree
15
  from glob import glob
 
24
  EVAL_STEP = 500
25
  RANDOM_SEED = 42
26
  N_TRIALS = 10
27
+ URL_RE = re.compile(r"https?:\/\/[\w\.\/\?\=\d&#%_:/-]+")
28
+ HANDLE_RE = re.compile(r"@\w+")
29
+
30
+
31
+ def preprocess_bernice(text):
32
+ text = HANDLE_RE.sub("@USER", text)
33
+ text = URL_RE.sub("HTTPURL", text)
34
+ return text
35
+
36
+
37
+ def preprocess_timelm(text):
38
+ text = HANDLE_RE.sub("@user", text)
39
+ text = URL_RE.sub("http", text)
40
+ return text
41
+
42
+
43
+ def preprocess(model_name, text):
44
+ if model_name == "jhu-clsp/bernice":
45
+ return preprocess_bernice(text)
46
+ if "twitter-roberta-base" in model_name:
47
+ return preprocess_timelm(text)
48
+ return text
49
 
50
 
51
  def sigmoid(x):
 
68
  dataset = load_dataset(dataset, dataset_type)
69
  tokenized_datasets = dataset.map(
70
  lambda x: tokenizer(
71
+ [
72
+ preprocess(model, f"[target] {a}, [definition] {b}, [text] {c}")
73
+ for a, b, c in zip(x["target"], x["definition"], x["text"])],
74
  padding="max_length",
75
  truncation=True,
76
  max_length=256),
 
132
  test_split = ["test"]
133
  if dataset_type.endswith("temporal"):
134
  test_split += ["test_1", "test_2", "test_3", "test_4"]
135
+ summary_file = pj(best_model_path, "summary.json")
136
  if os.path.exists(summary_file):
137
  with open(summary_file) as f:
138
  metric = json.load(f)
experiments/model_finetuning_topic.py CHANGED
@@ -9,6 +9,7 @@ import json
9
  import logging
10
  import math
11
  import os
 
12
  from os.path import join as pj
13
  from shutil import copyfile, rmtree
14
  from glob import glob
@@ -46,6 +47,28 @@ ID2LABEL = {v: k for k, v in LABEL2ID.items()}
46
  EVAL_STEP = 500
47
  RANDOM_SEED = 42
48
  N_TRIALS = 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
 
51
  def sigmoid(x):
@@ -67,7 +90,12 @@ def main(
67
  tokenizer = AutoTokenizer.from_pretrained(model)
68
  dataset = load_dataset(dataset, dataset_type)
69
  tokenized_datasets = dataset.map(
70
- lambda x: tokenizer(x["text"], padding="max_length", truncation=True, max_length=256), batched=True
 
 
 
 
 
71
  )
72
  tokenized_datasets = tokenized_datasets.rename_column("gold_label_list", "label")
73
  metric_accuracy = evaluate.load("accuracy", "multilabel")
@@ -79,6 +107,7 @@ def main(
79
  predictions = np.array([[int(sigmoid(j) > 0.5) for j in lo] for lo in logits])
80
  return metric_f1.compute(predictions=predictions, references=labels, average="micro")
81
 
 
82
  def compute_metric_all(eval_pred):
83
  logits, labels = eval_pred
84
  predictions = np.array([[int(sigmoid(j) > 0.5) for j in lo] for lo in logits])
 
9
  import logging
10
  import math
11
  import os
12
+ import re
13
  from os.path import join as pj
14
  from shutil import copyfile, rmtree
15
  from glob import glob
 
47
  EVAL_STEP = 500
48
  RANDOM_SEED = 42
49
  N_TRIALS = 10
50
+ URL_RE = re.compile(r"https?:\/\/[\w\.\/\?\=\d&#%_:/-]+")
51
+ HANDLE_RE = re.compile(r"@\w+")
52
+
53
+
54
+ def preprocess_bernice(text):
55
+ text = HANDLE_RE.sub("@USER", text)
56
+ text = URL_RE.sub("HTTPURL", text)
57
+ return text
58
+
59
+
60
+ def preprocess_timelm(text):
61
+ text = HANDLE_RE.sub("@user", text)
62
+ text = URL_RE.sub("http", text)
63
+ return text
64
+
65
+
66
+ def preprocess(model_name, text):
67
+ if model_name == "jhu-clsp/bernice":
68
+ return preprocess_bernice(text)
69
+ if "twitter-roberta-base" in model_name:
70
+ return preprocess_timelm(text)
71
+ return text
72
 
73
 
74
  def sigmoid(x):
 
90
  tokenizer = AutoTokenizer.from_pretrained(model)
91
  dataset = load_dataset(dataset, dataset_type)
92
  tokenized_datasets = dataset.map(
93
+ lambda x: tokenizer(
94
+ [preprocess(model, t) for t in x["text"]],
95
+ padding="max_length",
96
+ truncation=True,
97
+ max_length=256),
98
+ batched=True
99
  )
100
  tokenized_datasets = tokenized_datasets.rename_column("gold_label_list", "label")
101
  metric_accuracy = evaluate.load("accuracy", "multilabel")
 
107
  predictions = np.array([[int(sigmoid(j) > 0.5) for j in lo] for lo in logits])
108
  return metric_f1.compute(predictions=predictions, references=labels, average="micro")
109
 
110
+
111
  def compute_metric_all(eval_pred):
112
  logits, labels = eval_pred
113
  predictions = np.array([[int(sigmoid(j) > 0.5) for j in lo] for lo in logits])