xenoeye commited on
Commit
f519664
·
1 Parent(s): 8295342

Add: add model weights and an example code.

Browse files
Critique_NER/config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "t5-large",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 4096,
7
+ "d_kv": 64,
8
+ "d_model": 1024,
9
+ "decoder_start_token_id": 0,
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "relu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "t5",
17
+ "n_positions": 512,
18
+ "num_decoder_layers": 24,
19
+ "num_heads": 16,
20
+ "num_layers": 24,
21
+ "output_past": true,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "task_specific_params": {
26
+ "summarization": {
27
+ "early_stopping": true,
28
+ "length_penalty": 2.0,
29
+ "max_length": 200,
30
+ "min_length": 30,
31
+ "no_repeat_ngram_size": 3,
32
+ "num_beams": 4,
33
+ "prefix": "summarize: "
34
+ },
35
+ "translation_en_to_de": {
36
+ "early_stopping": true,
37
+ "max_length": 300,
38
+ "num_beams": 4,
39
+ "prefix": "translate English to German: "
40
+ },
41
+ "translation_en_to_fr": {
42
+ "early_stopping": true,
43
+ "max_length": 300,
44
+ "num_beams": 4,
45
+ "prefix": "translate English to French: "
46
+ },
47
+ "translation_en_to_ro": {
48
+ "early_stopping": true,
49
+ "max_length": 300,
50
+ "num_beams": 4,
51
+ "prefix": "translate English to Romanian: "
52
+ }
53
+ },
54
+ "torch_dtype": "float32",
55
+ "transformers_version": "4.18.0",
56
+ "use_cache": true,
57
+ "vocab_size": 32128
58
+ }
Critique_NER/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4ce87e207dea8a2cc2eb1efdfaec552f210d4e289bb43a136e1a6dffd8570b4
3
+ size 2950844807
Critique_NUM/config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "t5-large",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 4096,
7
+ "d_kv": 64,
8
+ "d_model": 1024,
9
+ "decoder_start_token_id": 0,
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "relu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "t5",
17
+ "n_positions": 512,
18
+ "num_decoder_layers": 24,
19
+ "num_heads": 16,
20
+ "num_layers": 24,
21
+ "output_past": true,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "task_specific_params": {
26
+ "summarization": {
27
+ "early_stopping": true,
28
+ "length_penalty": 2.0,
29
+ "max_length": 200,
30
+ "min_length": 30,
31
+ "no_repeat_ngram_size": 3,
32
+ "num_beams": 4,
33
+ "prefix": "summarize: "
34
+ },
35
+ "translation_en_to_de": {
36
+ "early_stopping": true,
37
+ "max_length": 300,
38
+ "num_beams": 4,
39
+ "prefix": "translate English to German: "
40
+ },
41
+ "translation_en_to_fr": {
42
+ "early_stopping": true,
43
+ "max_length": 300,
44
+ "num_beams": 4,
45
+ "prefix": "translate English to French: "
46
+ },
47
+ "translation_en_to_ro": {
48
+ "early_stopping": true,
49
+ "max_length": 300,
50
+ "num_beams": 4,
51
+ "prefix": "translate English to Romanian: "
52
+ }
53
+ },
54
+ "torch_dtype": "float32",
55
+ "transformers_version": "4.18.0",
56
+ "use_cache": true,
57
+ "vocab_size": 32128
58
+ }
Critique_NUM/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1fa6ac29dd9298707e28d765d1f679bed36730dad4cbcef41ea43d47ab02d41
3
+ size 2950844807
tag_critiques.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import json
3
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
4
+ import torch
5
+
6
+ critic_model_num = "./Critique_NER"
7
+ critic_model_ner = "./Critique_NUM"
8
+
9
+ finitres = open(sys.argv[1], "r")
10
+
11
+ initres = finitres.readlines()
12
+ out_list = []
13
+
14
+ for i in range(len(initres)):
15
+
16
+ res_record = json.loads(initres[i].strip())
17
+ out_text = ""
18
+ out_text += res_record["initial_response"]
19
+ out_text += " ||| Facts: " + res_record["facts"]
20
+
21
+ res_record["text"] = out_text
22
+ out_list.append(res_record)
23
+
24
+ device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
25
+ # device = torch.device("cpu")
26
+
27
+ # Prepare model
28
+ tokenizer = T5Tokenizer.from_pretrained("t5-large")
29
+ model = T5ForConditionalGeneration.from_pretrained(critic_model_num).to(device)
30
+
31
+
32
+ task_prefix = "critique: "
33
+
34
+ for i in range(len(out_list)):
35
+
36
+ sentences = [out_list[i]["text"]]
37
+ # print(sentences)
38
+
39
+ inputs = tokenizer([task_prefix + sentence for sentence in sentences], return_tensors="pt", padding=True).to(device)
40
+
41
+ output_sequences = model.generate(
42
+ input_ids=inputs["input_ids"],
43
+ attention_mask=inputs["attention_mask"],
44
+ do_sample=False, # disable sampling to test if batching affects output
45
+ )
46
+
47
+ print(tokenizer.batch_decode(output_sequences, skip_special_tokens=True))
48
+ out_list[i]["critic_num"] = tokenizer.batch_decode(output_sequences, skip_special_tokens=True)[0]
49
+
50
+ del inputs
51
+ del output_sequences
52
+
53
+
54
+ # Free gpu memory after using one of the T5 model
55
+ del tokenizer
56
+ del model
57
+ torch.cuda.empty_cache()
58
+
59
+
60
+ # Prepare model
61
+ tokenizer = T5Tokenizer.from_pretrained("t5-large")
62
+ model = T5ForConditionalGeneration.from_pretrained(critic_model_ner).to(device)
63
+
64
+ task_prefix = "critique: "
65
+
66
+ for i in range(len(out_list)):
67
+
68
+ sentences = [out_list[i]["text"]]
69
+
70
+ inputs = tokenizer([task_prefix + sentence for sentence in sentences], return_tensors="pt", padding=True).to(device)
71
+
72
+ output_sequences = model.generate(
73
+ input_ids=inputs["input_ids"],
74
+ attention_mask=inputs["attention_mask"],
75
+ do_sample=False, # disable sampling to test if batching affects output
76
+ )
77
+
78
+ print(tokenizer.batch_decode(output_sequences, skip_special_tokens=True))
79
+ out_list[i]["critic_ner"] = tokenizer.batch_decode(output_sequences, skip_special_tokens=True)[0]
80
+
81
+ del inputs
82
+ del output_sequences
83
+
84
+
85
+ # Free gpu memory after using one of the T5 model
86
+ del tokenizer
87
+ del model
88
+ torch.cuda.empty_cache()
89
+
90
+
91
+ fmerged_critics = open(sys.argv[2], "w")
92
+ for i in out_list:
93
+ fmerged_critics.write(json.dumps(i) + "\n")