Ashton2000 commited on
Commit
cd2fc0b
·
1 Parent(s): 29fc7eb

Neural Machine Translation Datasets

Browse files
Files changed (8) hide show
  1. bleu_scores.txt +100 -0
  2. dev.de +0 -0
  3. dev.en +0 -0
  4. test.de-en.de +0 -0
  5. test.de-en.en +0 -0
  6. test.en-de.de +0 -0
  7. test.en-de.en +0 -0
  8. test.py +115 -0
bleu_scores.txt ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1074 24.60
2
+ 1104 23.51
3
+ 1256 20.52
4
+ 0976 20.71
5
+ 2352 20.69
6
+ 1456 22.34
7
+ 2300 22.58
8
+ 0241 21.54
9
+ 2885 22.78
10
+ 0591 23.85
11
+ 2821 17.48
12
+ 1285 22.63
13
+ 1822 16.89
14
+ 2820 23.92
15
+ 2384 21.89
16
+ 1564 19.85
17
+ 0578 23.98
18
+ 0929 19.46
19
+ 2594 11.70
20
+ 0333 16.95
21
+ 0927 21.74
22
+ 1722 23.67
23
+ 0661 21.97
24
+ 1434 20.65
25
+ 2116 20.36
26
+ 2466 22.00
27
+ 2869 18.70
28
+ 0398 21.32
29
+ 1057 23.51
30
+ 1909 16.51
31
+ 2398 23.15
32
+ 0048 22.89
33
+ 2767 20.96
34
+ 2587 24.75
35
+ 1746 22.17
36
+ 1960 24.92
37
+ 2888 21.07
38
+ 0673 25.83
39
+ 0285 18.71
40
+ 2409 20.42
41
+ 1584 23.82
42
+ 0618 22.27
43
+ 2499 25.51
44
+ 0804 17.57
45
+ 1091 25.48
46
+ 1076 2.67
47
+ 1687 26.50
48
+ 2787 25.36
49
+ 1937 24.80
50
+ 0988 24.41
51
+ 2524 22.39
52
+ 2694 21.90
53
+ 2251 19.92
54
+ 2363 21.86
55
+ 0899 19.04
56
+ 0115 23.71
57
+ 0481 19.76
58
+ 2159 21.99
59
+ 2712 19.82
60
+ 0726 23.68
61
+ 1095 23.94
62
+ 1339 20.46
63
+ 0780 18.27
64
+ 2984 21.10
65
+ 2286 24.68
66
+ 1017 21.94
67
+ 2093 23.06
68
+ 0800 20.50
69
+ 0488 20.80
70
+ 0387 26.64
71
+ 2222 21.79
72
+ 2684 22.92
73
+ 0082 23.53
74
+ 0996 18.98
75
+ 0750 22.44
76
+ 1786 21.07
77
+ 2874 16.21
78
+ 2590 25.26
79
+ 1857 26.15
80
+ 1507 23.44
81
+ 2436 19.25
82
+ 0248 18.56
83
+ 1277 17.32
84
+ 0311 15.09
85
+ 0500 17.38
86
+ 1814 17.59
87
+ 1296 22.22
88
+ 2188 17.49
89
+ 0699 21.94
90
+ 1938 18.48
91
+ 2024 20.55
92
+ 1138 23.76
93
+ 0558 23.29
94
+ 2678 17.22
95
+ 0469 20.20
96
+ 1916 23.57
97
+ 2522 22.12
98
+ 2782 23.23
99
+ 1743 22.52
100
+ 1393 24.79
dev.de ADDED
The diff for this file is too large to render. See raw diff
 
dev.en ADDED
The diff for this file is too large to render. See raw diff
 
test.de-en.de ADDED
The diff for this file is too large to render. See raw diff
 
test.de-en.en ADDED
The diff for this file is too large to render. See raw diff
 
test.en-de.de ADDED
The diff for this file is too large to render. See raw diff
 
test.en-de.en ADDED
The diff for this file is too large to render. See raw diff
 
test.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # This software may be used and distributed according to the terms of the GNU General Public License version 3.
3
+
4
+ from typing import Tuple
5
+ import os
6
+ import sys
7
+ import torch
8
+ import fire
9
+ import time
10
+ import json
11
+
12
+ from tqdm import tqdm
13
+
14
+ from pathlib import Path
15
+
16
+ from fairscale.nn.model_parallel.initialize import initialize_model_parallel
17
+
18
+ from llama import ModelArgs, Transformer, Tokenizer, LLaMA
19
+
20
+
21
+ def setup_model_parallel() -> Tuple[int, int]:
22
+ local_rank = int(os.environ.get("LOCAL_RANK", -1))
23
+ world_size = int(os.environ.get("WORLD_SIZE", -1))
24
+
25
+ torch.distributed.init_process_group("nccl")
26
+ initialize_model_parallel(world_size)
27
+ torch.cuda.set_device(local_rank)
28
+
29
+ # seed must be the same in all processes
30
+ torch.manual_seed(1)
31
+ return local_rank, world_size
32
+
33
+
34
+ def load(ckpt_dir: str, tokenizer_path: str, local_rank: int, world_size: int) -> LLaMA:
35
+ start_time = time.time()
36
+ checkpoints = sorted(Path(ckpt_dir).glob("*.pth"))
37
+ assert (
38
+ world_size == len(checkpoints)
39
+ ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {world_size}"
40
+ ckpt_path = checkpoints[local_rank]
41
+ print("Loading")
42
+ checkpoint = torch.load(ckpt_path, map_location="cpu")
43
+ with open(Path(ckpt_dir) / "params.json", "r") as f:
44
+ params = json.loads(f.read())
45
+
46
+ model_args: ModelArgs = ModelArgs(max_seq_len=1536, max_batch_size=32, **params)
47
+ tokenizer = Tokenizer(model_path=tokenizer_path)
48
+ model_args.vocab_size = tokenizer.n_words
49
+ torch.set_default_tensor_type(torch.cuda.HalfTensor)
50
+ model = Transformer(model_args)
51
+ torch.set_default_tensor_type(torch.FloatTensor)
52
+ model.load_state_dict(checkpoint, strict=False)
53
+
54
+ generator = LLaMA(model, tokenizer)
55
+ print(f"Loaded in {time.time() - start_time:.2f} seconds")
56
+ return generator
57
+
58
+
59
+ def read(lang):
60
+
61
+ score_map = dict()
62
+
63
+ f = open("/data/wyt/in-context/xglm/bleu_scores.txt", 'r')
64
+ for line in f:
65
+ idx = int(line.split("\t")[0])
66
+ score = float(line.split("\t")[1])
67
+ score_map[idx] = score
68
+
69
+ new_map = sorted(score_map.items(), key=lambda x:x[1], reverse=True)
70
+ top_k = [i[0] for i in new_map]
71
+
72
+ example_src_list, example_tgt_list = [], []
73
+ with open("/data/wyt/in-context/xglm/corpus/dev.%s" % (lang[:2]), 'r') as f:
74
+ src_list = [line.strip() for line in f]
75
+ with open("/data/wyt/in-context/xglm/corpus/dev.%s" % (lang[-2:]), 'r') as f:
76
+ tgt_list = [line.strip() for line in f]
77
+
78
+ f = open("/data/wyt/in-context/xglm/corpus/wmt19/test.%s.%s" % (lang, lang[:2]), 'r')
79
+ test_src_list = [line.strip() for line in f if line != "\n"]
80
+
81
+ demonstration = "Translate these sentences from German to English:\n" if lang == "de-en" else "Translate these sentences from English to German:\n"
82
+ for idx in top_k[:16]:
83
+ demonstration = demonstration + "%s\n%s\n" % (src_list[idx], tgt_list[idx])
84
+ return demonstration, test_src_list
85
+
86
+
87
+ def main(ckpt_dir: str, tokenizer_path: str, lang: str, temperature: float = 0.8, top_p: float = 0.95):
88
+ local_rank, world_size = setup_model_parallel()
89
+ if local_rank > 0:
90
+ sys.stdout = open(os.devnull, 'w')
91
+
92
+ print("temperature: ", temperature)
93
+ generator = load(ckpt_dir, tokenizer_path, local_rank, world_size)
94
+
95
+ demonstration, test_src_list = read(lang)
96
+ step_len = 8
97
+ for i in tqdm(range(0, len(test_src_list), step_len)):
98
+ prompt_list = [demonstration + test_src + "\n" for test_src in test_src_list[i:i+step_len]]
99
+ results = generator.generate(prompt_list, max_gen_len=1536, temperature=temperature, top_p=top_p)
100
+
101
+ if local_rank == 0:
102
+ f = open("%s.%s.temp%02d.raw" % (lang, lang[-2:], temperature * 10), 'a')
103
+ for res_idx, result in enumerate(results):
104
+ start = result.find(test_src_list[i + res_idx])
105
+ if start == -1:
106
+ tgt = ""
107
+ else:
108
+ lines = result[start:].split("\n")
109
+ tgt = lines[1] if len(lines) > 1 else ""
110
+
111
+ f.write(tgt + "\n")
112
+
113
+
114
+ if __name__ == "__main__":
115
+ fire.Fire(main)