File size: 4,932 Bytes
901e06a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

import logging
import argparse
from typing import List, Optional
from simuleval.data.dataloader import DATALOADER_DICT, GenericDataloader
from simuleval.evaluator.scorers import get_scorer_class


def add_dataloader_args(
    parser: argparse.ArgumentParser, cli_argument_list: Optional[List[str]] = None
):
    if cli_argument_list is None:
        args, _ = parser.parse_known_args()
    else:
        args, _ = parser.parse_known_args(cli_argument_list)
    dataloader_class = DATALOADER_DICT.get(args.dataloader)
    if dataloader_class is None:
        dataloader_class = GenericDataloader
    dataloader_class.add_args(parser)


def add_evaluator_args(parser: argparse.ArgumentParser):
    parser.add_argument(
        "--quality-metrics",
        nargs="+",
        default=["BLEU"],
        help="Quality metrics",
    )
    parser.add_argument(
        "--latency-metrics",
        nargs="+",
        default=["LAAL", "AL", "AP", "DAL", "ATD"],
        help="Latency metrics",
    )
    parser.add_argument(
        "--continue-unfinished",
        action="store_true",
        default=False,
        help="Continue the experiments in output dir.",
    )
    parser.add_argument(
        "--computation-aware",
        action="store_true",
        default=False,
        help="Include computational latency.",
    )
    parser.add_argument(
        "--no-use-ref-len",
        action="store_true",
        default=False,
        help="Include computational latency.",
    )
    parser.add_argument(
        "--eval-latency-unit",
        type=str,
        default="word",
        choices=["word", "char"],
        help="Basic unit used for latency calculation, choose from "
        "words (detokenized) and characters.",
    )
    parser.add_argument(
        "--remote-address",
        default="localhost",
        help="Address to client backend",
    )
    parser.add_argument(
        "--remote-port",
        default=12321,
        help="Port to client backend",
    )
    parser.add_argument(
        "--no-progress-bar",
        action="store_true",
        default=False,
        help="Do not use progress bar",
    )
    parser.add_argument(
        "--start-index",
        type=int,
        default=0,
        help="Start index for evaluation.",
    )
    parser.add_argument(
        "--end-index",
        type=int,
        default=-1,
        help="The last index for evaluation.",
    )
    parser.add_argument("--output", type=str, default=None, help="Output directory")


def add_scorer_args(
    parser: argparse.ArgumentParser, cli_argument_list: Optional[List[str]] = None
):
    if cli_argument_list is None:
        args, _ = parser.parse_known_args()
    else:
        args, _ = parser.parse_known_args(cli_argument_list)

    for metric in args.latency_metrics:
        get_scorer_class("latency", metric).add_args(parser)

    for metric in args.quality_metrics:
        get_scorer_class("quality", metric).add_args(parser)


def general_parser():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--remote-eval",
        action="store_true",
        help="Evaluate a standalone agent",
    )
    parser.add_argument(
        "--standalone",
        action="store_true",
        help="",
    )
    parser.add_argument(
        "--slurm", action="store_true", default=False, help="Use slurm."
    )
    parser.add_argument("--agent", default=None, help="Agent file")
    parser.add_argument(
        "--agent-class",
        default=None,
        help="The full string of class of the agent.",
    )
    parser.add_argument(
        "--system-dir",
        default=None,
        help="Directory that contains everything to start the simultaneous system.",
    )
    parser.add_argument(
        "--system-config",
        default="main.yaml",
        help="Name of the config yaml of the system configs.",
    )
    parser.add_argument("--dataloader", default=None, help="Dataloader to use")
    parser.add_argument(
        "--log-level",
        type=str,
        default="info",
        choices=[x.lower() for x in logging._levelToName.values()],
        help="Log level.",
    )
    parser.add_argument(
        "--score-only",
        action="store_true",
        default=False,
        help="Only score the inference file.",
    )
    parser.add_argument(
        "--device", type=str, default="cpu", help="Device to run the model."
    )
    return parser


def add_slurm_args(parser):
    parser.add_argument(
        "--slurm-partition", default="learnaccel,ust", help="Slurm partition."
    )
    parser.add_argument("--slurm-job-name", default="simuleval", help="Slurm job name.")
    parser.add_argument("--slurm-time", default="10:00:00", help="Slurm partition.")