fasdfsa commited on
Commit
901e06a
·
1 Parent(s): 032a215
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. LICENSE +21 -0
  3. README.md +379 -0
  4. SimulEval/.github/workflows/main.yml +46 -0
  5. SimulEval/.gitignore +139 -0
  6. SimulEval/CHANGELOG.md +10 -0
  7. SimulEval/CODE_OF_CONDUCT.md +45 -0
  8. SimulEval/CONTRIBUTING.md +20 -0
  9. SimulEval/LICENSE +427 -0
  10. SimulEval/README.md +35 -0
  11. SimulEval/docs/Makefile +20 -0
  12. SimulEval/docs/conf.py +40 -0
  13. SimulEval/docs/index.rst +30 -0
  14. SimulEval/docs/installation.rst +21 -0
  15. SimulEval/docs/make.bat +41 -0
  16. SimulEval/docs/quick_start.rst +49 -0
  17. SimulEval/docs/requirements.txt +2 -0
  18. SimulEval/docs/tutorials/remote_evaluation.rst +44 -0
  19. SimulEval/docs/tutorials/speech_to_speech.rst +2 -0
  20. SimulEval/docs/tutorials/speech_to_text.rst +2 -0
  21. SimulEval/docs/user_guide/agent.rst +198 -0
  22. SimulEval/docs/user_guide/dataloader.rst +5 -0
  23. SimulEval/docs/user_guide/evaluator.rst +44 -0
  24. SimulEval/docs/user_guide/introduction.rst +35 -0
  25. SimulEval/examples/quick_start/Dockerfile +10 -0
  26. SimulEval/examples/quick_start/agent_pipeline.py +39 -0
  27. SimulEval/examples/quick_start/agent_with_configs.py +39 -0
  28. SimulEval/examples/quick_start/agent_with_new_metrics.py +47 -0
  29. SimulEval/examples/quick_start/dict.txt +26 -0
  30. SimulEval/examples/quick_start/first_agent.py +26 -0
  31. SimulEval/examples/quick_start/readme.md +47 -0
  32. SimulEval/examples/quick_start/source.txt +10 -0
  33. SimulEval/examples/quick_start/target.txt +10 -0
  34. SimulEval/examples/speech_to_speech/english_counter_agent.py +69 -0
  35. SimulEval/examples/speech_to_speech/eval.sh +6 -0
  36. SimulEval/examples/speech_to_speech/readme.md +66 -0
  37. SimulEval/examples/speech_to_speech/reference/de.txt +1 -0
  38. SimulEval/examples/speech_to_speech/reference/en.txt +1 -0
  39. SimulEval/examples/speech_to_speech/reference/ja.txt +1 -0
  40. SimulEval/examples/speech_to_speech/reference/zh.txt +1 -0
  41. SimulEval/examples/speech_to_speech/source.txt +1 -0
  42. SimulEval/examples/speech_to_speech/test.wav +3 -0
  43. SimulEval/setup.cfg +2 -0
  44. SimulEval/setup.py +43 -0
  45. SimulEval/simuleval/__init__.py +5 -0
  46. SimulEval/simuleval/agents/__init__.py +16 -0
  47. SimulEval/simuleval/agents/actions.py +60 -0
  48. SimulEval/simuleval/agents/agent.py +216 -0
  49. SimulEval/simuleval/agents/pipeline.py +90 -0
  50. SimulEval/simuleval/agents/service.py +59 -0
.gitattributes CHANGED
@@ -1,4 +1,6 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
 
 
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.avro filter=lfs diff=lfs merge=lfs -text
4
  *.bin filter=lfs diff=lfs merge=lfs -text
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.pdf filter=lfs diff=lfs merge=lfs -text
3
+ *.mov filter=lfs diff=lfs merge=lfs -text
4
  *.arrow filter=lfs diff=lfs merge=lfs -text
5
  *.avro filter=lfs diff=lfs merge=lfs -text
6
  *.bin filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 ICTNLP
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # StreamSpeech
2
+
3
+ [![arXiv](https://img.shields.io/badge/arXiv-2406.03049-b31b1b.svg?logo=arXiv)](https://arxiv.org/abs/2406.03049)
4
+ [![project](https://img.shields.io/badge/%F0%9F%8E%A7%20Demo-Listen%20to%20StreamSpeech-orange.svg)](https://ictnlp.github.io/StreamSpeech-site/)
5
+ [![model](https://img.shields.io/badge/%F0%9F%A4%97%20-StreamSpeech_Models-blue.svg)](https://huggingface.co/ICTNLP/StreamSpeech_Models/tree/main)
6
+ [![Hits](https://hitscounter.dev/api/hit?url=https%3A%2F%2Fgithub.com%2Fictnlp%2FStreamSpeech&label=Vistors&icon=graph-up&color=%23dc3545)](https://hits.seeyoufarm.com)
7
+
8
+ [![twitter](https://img.shields.io/badge/Twitter-@Gorden%20Sun-black?logo=X&logoColor=black)](https://x.com/Gorden_Sun/status/1798742796524007845) [![twitter](https://img.shields.io/badge/Twitter-@imxiaohu-black?logo=X&logoColor=black)](https://x.com/imxiaohu/status/1798999363987124355)
9
+
10
+ > **Authors**: **[Shaolei Zhang](https://zhangshaolei1998.github.io/), [Qingkai Fang](https://fangqingkai.github.io/), [Shoutao Guo](https://scholar.google.com.hk/citations?user=XwHtPyAAAAAJ&hl), [Zhengrui Ma](https://scholar.google.com.hk/citations?user=dUgq6tEAAAAJ), [Min Zhang](https://scholar.google.com.hk/citations?user=CncXH-YAAAAJ), [Yang Feng*](https://people.ucas.edu.cn/~yangfeng?language=en)**
11
+
12
+
13
+ Code for ACL 2024 paper "[StreamSpeech: Simultaneous Speech-to-Speech Translation with Multi-task Learning](https://arxiv.org/pdf/2406.03049)".
14
+
15
+ <p align="center" width="100%">
16
+ <img src="./assets/streamspeech.png" alt="StreamSpeech" style="width: 70%; min-width: 300px; display: block; margin: auto;">
17
+ </p>
18
+ <p align="center">
19
+ 🎧 Listen to <a href="https://ictnlp.github.io/StreamSpeech-site/">StreamSpeech's translated speech</a> 🎧
20
+ </p>
21
+
22
+ 💡**Highlight**:
23
+ 1. StreamSpeech achieves **SOTA performance** on both offline and simultaneous speech-to-speech translation.
24
+ 2. StreamSpeech performs **streaming ASR**, **simultaneous speech-to-text translation** and **simultaneous speech-to-speech translation** via an "All in One" seamless model.
25
+ 3. StreamSpeech can present intermediate results (i.e., ASR or translation results) during simultaneous translation, offering a more comprehensive low-latency communication experience.
26
+
27
+ ## 🔥News
28
+ - [2025.06.17] We are excited to extend the "All-in-One" feature of StreamSpeech to more general multimodal interactions via developing **Stream-Omni**. 👉Refer to [paper](https://arxiv.org/abs/2506.13642), [code & demo](https://github.com/ictnlp/Stream-Omni), [model](https://huggingface.co/ICTNLP/stream-omni-8b) for more details.
29
+ - Stream-Omni is an GPT-4o-like language-vision-speech chatbot that simultaneously supports interactions across any combination of text, vision, and speech modalities.
30
+ - Stream-Omni can simultaneously produce intermediate textual results (e.g., ASR transcriptions and model responses) during speech interactions, like the advanced voice service of GPT-4o.
31
+
32
+ - [2024.06.17] Add [Web GUI demo](./demo), now you can experience StreamSpeech in your local browser.
33
+ - [2024.06.05] [Paper](https://arxiv.org/pdf/2406.03049), [code](https://github.com/ictnlp/StreamSpeech), [models](https://huggingface.co/ICTNLP/StreamSpeech_Models/tree/main) and [demo](https://ictnlp.github.io/StreamSpeech-site/) of StreamSpeech are available!
34
+
35
+ ## ⭐Features
36
+
37
+ ### Support 8 Tasks
38
+ - **Offline**: Speech Recognition (ASR)✅, Speech-to-Text Translation (S2TT)✅, Speech-to-Speech Translation (S2ST)✅, Speech Synthesis (TTS)✅
39
+ - **Simultaneous**: Streaming ASR✅, Simultaneous S2TT✅, Simultaneous S2ST✅, Real-time TTS✅ under any latency (with one model)
40
+
41
+ ### GUI Demo
42
+
43
+ https://github.com/ictnlp/StreamSpeech/assets/34680227/4d9bdabf-af66-4320-ae7d-0f23e721cd71
44
+ <p align="center">
45
+ Simultaneously provide ASR, translation, and synthesis results via a seamless model
46
+ </p>
47
+
48
+ ### Case
49
+
50
+ > **Speech Input**: [example/wavs/common_voice_fr_17301936.mp3](./example/wavs/common_voice_fr_17301936.mp3)
51
+ >
52
+ > **Transcription** (ground truth): jai donc lexpérience des années passées jen dirai un mot tout à lheure
53
+ >
54
+ > **Translation** (ground truth): i therefore have the experience of the passed years i'll say a few words about that later
55
+
56
+ | StreamSpeech | Simultaneous | Offline |
57
+ | ----------------------------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
58
+ | **Speech Recognition** | jai donc expérience des années passé jen dirairai un mot tout à lheure | jai donc lexpérience des années passé jen dirairai un mot tout à lheure |
59
+ | **Speech-to-Text Translation** | i therefore have an experience of last years i will tell a word later | so i have the experience in the past years i'll say a word later |
60
+ | **Speech-to-Speech Translation** | <video src='https://github.com/zhangshaolei1998/StreamSpeech_dev/assets/34680227/ed41ba13-353b-489b-acfa-85563d0cc2cb' width="30%"/> | <video src='https://github.com/zhangshaolei1998/StreamSpeech_dev/assets/34680227/ca482ba6-76da-4619-9dfd-24aa2eb3339a' width="30%"/> |
61
+ | **Text-to-Speech Synthesis** (*incrementally synthesize speech word by word*) | <video src='https://github.com/zhangshaolei1998/StreamSpeech_dev/assets/34680227/294f1310-eace-4914-be30-5cd798e8592e' width="30%"/> | <video src='https://github.com/zhangshaolei1998/StreamSpeech_dev/assets/34680227/52854163-7fc5-4622-a5a6-c133cbd99e58' width="30%"/> |
62
+
63
+
64
+
65
+ ## ⚙Requirements
66
+
67
+ - Python == 3.10, PyTorch == 2.0.1, Install fairseq & SimulEval
68
+
69
+ ```bash
70
+ cd fairseq
71
+ pip install --editable ./ --no-build-isolation
72
+ cd SimulEval
73
+ pip install --editable ./
74
+ ```
75
+
76
+ ## 🚀Quick Start
77
+
78
+ ### 1. Model Download
79
+
80
+ #### (1) StreamSpeech Models
81
+
82
+ | Language | UnitY | StreamSpeech (offline) | StreamSpeech (simultaneous) |
83
+ | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
84
+ | Fr-En | unity.fr-en.pt [[Huggingface](https://huggingface.co/ICTNLP/StreamSpeech_Models/blob/main/unity.fr-en.pt)] [[Baidu](https://pan.baidu.com/s/10uGYgl0xTej9FP43iKx7Cg?pwd=nkvu)] | streamspeech.offline.fr-en.pt [[Huggingface](https://huggingface.co/ICTNLP/StreamSpeech_Models/blob/main/streamspeech.offline.fr-en.pt)] [[Baidu](https://pan.baidu.com/s/1GFckHGP5SNLuOEj6mbIWhQ?pwd=pwgq)] | streamspeech.simultaneous.fr-en.pt [[Huggingface](https://huggingface.co/ICTNLP/StreamSpeech_Models/blob/main/streamspeech.simultaneous.fr-en.pt)] [[Baidu](https://pan.baidu.com/s/1edCPFljogyDHgGXkUV8_3w?pwd=8gg3)] |
85
+ | Es-En | unity.es-en.pt [[Huggingface](https://huggingface.co/ICTNLP/StreamSpeech_Models/blob/main/unity.es-en.pt)] [[Baidu](https://pan.baidu.com/s/1RwIEHye8jjw3kiIgrCHA3A?pwd=hde4)] | streamspeech.offline.es-en.pt [[Huggingface](https://huggingface.co/ICTNLP/StreamSpeech_Models/blob/main/streamspeech.offline.es-en.pt)] [[Baidu](https://pan.baidu.com/s/1T89G4NC4J0Ofzcsc8Rt2Ww?pwd=yuhd)] | streamspeech.simultaneous.es-en.pt [[Huggingface](https://huggingface.co/ICTNLP/StreamSpeech_Models/blob/main/streamspeech.simultaneous.es-en.pt)] [[Baidu](https://pan.baidu.com/s/1NbLEVcYWHIdqqLD17P1s9g?pwd=p1pc)] |
86
+ | De-En | unity.de-en.pt [[Huggingface](https://huggingface.co/ICTNLP/StreamSpeech_Models/blob/main/unity.de-en.pt)] [[Baidu](https://pan.baidu.com/s/1Mg_PBeZ5acEDhl5wRJ_-7w?pwd=egvv)] | streamspeech.offline.de-en.pt [[Huggingface](https://huggingface.co/ICTNLP/StreamSpeech_Models/blob/main/streamspeech.offline.de-en.pt)] [[Baidu](https://pan.baidu.com/s/1mTE4eHuVLJPB7Yg9AackEg?pwd=6ga8)] | streamspeech.simultaneous.de-en.pt [[Huggingface](https://huggingface.co/ICTNLP/StreamSpeech_Models/blob/main/streamspeech.simultaneous.de-en.pt)] [[Baidu](https://pan.baidu.com/s/1DYPMg3mdDopLY70BYQTduQ?pwd=r7kw)] |
87
+
88
+ #### (2) Unit-based HiFi-GAN Vocoder
89
+
90
+ | Unit config | Unit size | Vocoder language | Dataset | Model |
91
+ | ----------------- | --------- | ---------------- | --------------------------------------------------- | ------------------------------------------------------------ |
92
+ | mHuBERT, layer 11 | 1000 | En | [LJSpeech](https://keithito.com/LJ-Speech-Dataset/) | [ckpt](https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/vocoder/code_hifigan/mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj/g_00500000), [config](https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/vocoder/code_hifigan/mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj/config.json) |
93
+
94
+ ### 2. Prepare Data and Config (only for test/inference)
95
+
96
+ #### (1) Config Files
97
+
98
+ Replace `/data/zhangshaolei/StreamSpeech` in files [configs/fr-en/config_gcmvn.yaml](./configs/fr-en/config_gcmvn.yaml) and [configs/fr-en/config_mtl_asr_st_ctcst.yaml](./configs/fr-en/config_mtl_asr_st_ctcst.yaml) with your local address of StreamSpeech repo.
99
+
100
+ #### (2) Test Data
101
+
102
+ Prepare test data following [SimulEval](https://github.com/facebookresearch/SimulEval) format. [example/](./example) provides an example:
103
+
104
+ - [wav_list.txt](./example/wav_list.txt): Each line records the path of a source speech.
105
+ - [target.txt](./example/target.txt): Each line records the reference text, e.g., target translation or source transcription (used to calculate the metrics).
106
+
107
+ ### 3. Inference with SimulEval
108
+
109
+ Run these scripts to inference StreamSpeech on streaming ASR, simultaneous S2TT and simultaneous S2ST.
110
+
111
+ > `--source-segment-size`: set the chunk size (millisecond) to any value to control the latency
112
+
113
+ <details>
114
+ <summary>Simultaneous Speech-to-Speech Translation</summary>
115
+
116
+ `--output-asr-translation`: whether to output the intermediate ASR and translated text results during simultaneous speech-to-speech translation.
117
+
118
+ ```shell
119
+ export CUDA_VISIBLE_DEVICES=0
120
+
121
+ ROOT=/data/zhangshaolei/StreamSpeech # path to StreamSpeech repo
122
+ PRETRAIN_ROOT=/data/zhangshaolei/pretrain_models
123
+ VOCODER_CKPT=$PRETRAIN_ROOT/unit-based_HiFi-GAN_vocoder/mHuBERT.layer11.km1000.en/g_00500000 # path to downloaded Unit-based HiFi-GAN Vocoder
124
+ VOCODER_CFG=$PRETRAIN_ROOT/unit-based_HiFi-GAN_vocoder/mHuBERT.layer11.km1000.en/config.json # path to downloaded Unit-based HiFi-GAN Vocoder
125
+
126
+ LANG=fr
127
+ file=streamspeech.simultaneous.${LANG}-en.pt # path to downloaded StreamSpeech model
128
+ output_dir=$ROOT/res/streamspeech.simultaneous.${LANG}-en/simul-s2st
129
+
130
+ chunk_size=320 #ms
131
+ PYTHONPATH=$ROOT/fairseq simuleval --data-bin ${ROOT}/configs/${LANG}-en \
132
+ --user-dir ${ROOT}/researches/ctc_unity --agent-dir ${ROOT}/agent \
133
+ --source example/wav_list.txt --target example/target.txt \
134
+ --model-path $file \
135
+ --config-yaml config_gcmvn.yaml --multitask-config-yaml config_mtl_asr_st_ctcst.yaml \
136
+ --agent $ROOT/agent/speech_to_speech.streamspeech.agent.py \
137
+ --vocoder $VOCODER_CKPT --vocoder-cfg $VOCODER_CFG --dur-prediction \
138
+ --output $output_dir/chunk_size=$chunk_size \
139
+ --source-segment-size $chunk_size \
140
+ --quality-metrics ASR_BLEU --target-speech-lang en --latency-metrics AL AP DAL StartOffset EndOffset LAAL ATD NumChunks DiscontinuitySum DiscontinuityAve DiscontinuityNum RTF \
141
+ --device gpu --computation-aware \
142
+ --output-asr-translation True
143
+ ```
144
+
145
+ You should get the following outputs:
146
+
147
+ ```
148
+ fairseq plugins loaded...
149
+ fairseq plugins loaded...
150
+ fairseq plugins loaded...
151
+ fairseq plugins loaded...
152
+ 2024-06-06 09:45:46 | INFO | fairseq.tasks.speech_to_speech | dictionary size: 1,004
153
+ import agents...
154
+ Removing weight norm...
155
+ 2024-06-06 09:45:50 | INFO | agent.tts.vocoder | loaded CodeHiFiGAN checkpoint from /data/zhangshaolei/pretrain_models/unit-based_HiFi-GAN_vocoder/mHuBERT.layer11.km1000.en/g_00500000
156
+ 2024-06-06 09:45:50 | INFO | simuleval.utils.agent | System will run on device: gpu.
157
+ 2024-06-06 09:45:50 | INFO | simuleval.dataloader | Evaluating from speech to speech.
158
+ 0%| | 0/2 [00:00<?, ?it/s]
159
+ Streaming ASR:
160
+ Streaming ASR:
161
+ Streaming ASR: je
162
+ Simultaneous translation: i would
163
+ Streaming ASR: je voudrais
164
+ Simultaneous translation: i would like to
165
+ Streaming ASR: je voudrais soumettre
166
+ Simultaneous translation: i would like to sub
167
+ Streaming ASR: je voudrais soumettre cette
168
+ Simultaneous translation: i would like to submit
169
+ Streaming ASR: je voudrais soumettre cette idée
170
+ Simultaneous translation: i would like to submit this
171
+ Streaming ASR: je voudrais soumettre cette idée à la
172
+ Simultaneous translation: i would like to submit this idea to
173
+ Streaming ASR: je voudrais soumettre cette idée à la réflexion
174
+ Simultaneous translation: i would like to submit this idea to the
175
+ Streaming ASR: je voudrais soumettre cette idée à la réflexion de
176
+ Simultaneous translation: i would like to submit this idea to the reflection
177
+ Streaming ASR: je voudrais soumettre cette idée à la réflexion de lassemblée
178
+ Simultaneous translation: i would like to submit this idea to the reflection of
179
+ Streaming ASR: je voudrais soumettre cette idée à la réflexion de lassemblée nationale
180
+ Simultaneous translation: i would like to submit this idea to the reflection of the
181
+ Streaming ASR: je voudrais soumettre cette idée à la réflexion de lassemblée nationale
182
+ Simultaneous translation: i would like to submit this idea to the reflection of the national assembly
183
+ 50%|███████████████████████████████████████████████████████████████████████████████████ | 1/2 [00:04<00:04, 4.08s/it]
184
+ Streaming ASR:
185
+ Streaming ASR:
186
+ Streaming ASR:
187
+ Streaming ASR:
188
+ Streaming ASR: jai donc
189
+ Simultaneous translation: i therefore
190
+ Streaming ASR: jai donc
191
+ Streaming ASR: jai donc expérience des
192
+ Simultaneous translation: i therefore have an experience
193
+ Streaming ASR: jai donc expérience des années
194
+ Streaming ASR: jai donc expérience des années passé
195
+ Simultaneous translation: i therefore have an experience of last
196
+ Streaming ASR: jai donc expérience des années passé jen
197
+ Simultaneous translation: i therefore have an experience of last years
198
+ Streaming ASR: jai donc expérience des années passé jen dirairai
199
+ Simultaneous translation: i therefore have an experience of last years i will
200
+ Streaming ASR: jai donc expérience des années passé jen dirairai un mot
201
+ Simultaneous translation: i therefore have an experience of last years i will tell a
202
+ Streaming ASR: jai donc expérience des années passé jen dirairai un mot tout à lheure
203
+ Simultaneous translation: i therefore have an experience of last years i will tell a word
204
+ Streaming ASR: jai donc expérience des années passé jen dirairai un mot tout à lheure
205
+ Simultaneous translation: i therefore have an experience of last years i will tell a word later
206
+ 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:06<00:00, 3.02s/it]
207
+ 2024-06-06 09:45:56 | WARNING | simuleval.scorer.asr_bleu | Beta feature: Evaluating speech output. Faieseq is required.
208
+ 2024-06-06 09:46:12 | INFO | fairseq.tasks.audio_finetuning | Using dict_path : /data/zhangshaolei/.cache/ust_asr/en/dict.ltr.txt
209
+ Transcribing predictions: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:01<00:00, 1.63it/s]
210
+ 2024-06-06 09:46:21 | INFO | simuleval.sentence_level_evaluator | Results:
211
+ ASR_BLEU AL AL_CA AP AP_CA DAL DAL_CA StartOffset StartOffset_CA EndOffset EndOffset_CA LAAL LAAL_CA ATD ATD_CA NumChunks NumChunks_CA DiscontinuitySum DiscontinuitySum_CA DiscontinuityAve DiscontinuityAve_CA DiscontinuityNum DiscontinuityNum_CA RTF RTF_CA
212
+ 15.448 1724.895 2913.508 0.425 0.776 1358.812 3137.55 1280.0 2213.906 1366.0 1366.0 1724.895 2913.508 1440.146 3389.374 9.5 9.5 110.0 110.0 55.0 55.0 1 1 1.326 1.326
213
+
214
+ ```
215
+
216
+ Logs and evaluation results are stored in ` $output_dir/chunk_size=$chunk_size`:
217
+
218
+ ```
219
+ $output_dir/chunk_size=$chunk_size
220
+ ├── wavs/
221
+ │ ├── 0_pred.wav # generated speech
222
+ │ ├── 1_pred.wav
223
+ │ ├── 0_pred.txt # asr transcription for ASR-BLEU tookit
224
+ │ ├── 1_pred.txt
225
+ ├── config.yaml
226
+ ├── asr_transcripts.txt # ASR-BLEU transcription results
227
+ ├── metrics.tsv
228
+ ├── scores.tsv
229
+ ├── asr_cmd.bash
230
+ └── instances.log # logs of Simul-S2ST
231
+ ```
232
+
233
+ </details>
234
+
235
+ <details>
236
+ <summary>Simultaneous Speech-to-Text Translation</summary>
237
+
238
+ ```shell
239
+ export CUDA_VISIBLE_DEVICES=0
240
+
241
+ ROOT=/data/zhangshaolei/StreamSpeech # path to StreamSpeech repo
242
+
243
+ LANG=fr
244
+ file=streamspeech.simultaneous.${LANG}-en.pt # path to downloaded StreamSpeech model
245
+ output_dir=$ROOT/res/streamspeech.simultaneous.${LANG}-en/simul-s2tt
246
+
247
+ chunk_size=320 #ms
248
+ PYTHONPATH=$ROOT/fairseq simuleval --data-bin ${ROOT}/configs/${LANG}-en \
249
+ --user-dir ${ROOT}/researches/ctc_unity --agent-dir ${ROOT}/agent \
250
+ --source example/wav_list.txt --target example/target.txt \
251
+ --model-path $file \
252
+ --config-yaml config_gcmvn.yaml --multitask-config-yaml config_mtl_asr_st_ctcst.yaml \
253
+ --agent $ROOT/agent/speech_to_text.s2tt.streamspeech.agent.py\
254
+ --output $output_dir/chunk_size=$chunk_size \
255
+ --source-segment-size $chunk_size \
256
+ --quality-metrics BLEU --latency-metrics AL AP DAL StartOffset EndOffset LAAL ATD NumChunks RTF \
257
+ --device gpu --computation-aware
258
+ ```
259
+ </details>
260
+
261
+ <details>
262
+ <summary>Streaming ASR</summary>
263
+
264
+ ```shell
265
+ export CUDA_VISIBLE_DEVICES=0
266
+
267
+ ROOT=/data/zhangshaolei/StreamSpeech # path to StreamSpeech repo
268
+
269
+ LANG=fr
270
+ file=streamspeech.simultaneous.${LANG}-en.pt # path to downloaded StreamSpeech model
271
+ output_dir=$ROOT/res/streamspeech.simultaneous.${LANG}-en/streaming-asr
272
+
273
+ chunk_size=320 #ms
274
+ PYTHONPATH=$ROOT/fairseq simuleval --data-bin ${ROOT}/configs/${LANG}-en \
275
+ --user-dir ${ROOT}/researches/ctc_unity --agent-dir ${ROOT}/agent \
276
+ --source example/wav_list.txt --target example/source.txt \
277
+ --model-path $file \
278
+ --config-yaml config_gcmvn.yaml --multitask-config-yaml config_mtl_asr_st_ctcst.yaml \
279
+ --agent $ROOT/agent/speech_to_text.asr.streamspeech.agent.py\
280
+ --output $output_dir/chunk_size=$chunk_size \
281
+ --source-segment-size $chunk_size \
282
+ --quality-metrics BLEU --latency-metrics AL AP DAL StartOffset EndOffset LAAL ATD NumChunks RTF \
283
+ --device gpu --computation-aware
284
+ ```
285
+ </details>
286
+
287
+ ## 🎈Develop Your Own StreamSpeech
288
+
289
+ ### 1. Data Preprocess
290
+
291
+ - Follow [`./preprocess_scripts`](./preprocess_scripts) to process CVSS-C data.
292
+
293
+ ### 2. Training
294
+
295
+ > [!Note]
296
+ > You can directly use the [downloaded StreamSpeech model](#1-model-download) for evaluation and skip training.
297
+
298
+ <p align="center" width="100%">
299
+ <img src="./assets/model.png" alt="model" style="width: 100%; min-width: 300px; display: block; margin: auto;">
300
+ </p>
301
+
302
+ - Follow [`researches/ctc_unity/train_scripts/train.simul-s2st.sh`](./researches/ctc_unity/train_scripts/train.simul-s2st.sh) to train StreamSpeech for simultaneous speech-to-speech translation.
303
+ - Follow [`researches/ctc_unity/train_scripts/train.offline-s2st.sh`](./researches/ctc_unity/train_scripts/train.offline-s2st.sh) to train StreamSpeech for offline speech-to-speech translation.
304
+ - We also provide some other StreamSpeech variants and baseline implementations.
305
+
306
+ | Model | --user-dir | --arch | Description |
307
+ | ----------------- | -------------------------- | --------------------------------- | ------------------------------------------------------------ |
308
+ | **Translatotron 2** | `researches/translatotron` | `s2spect2_conformer_modified` | [Translatotron 2](https://proceedings.mlr.press/v162/jia22b.html) |
309
+ | **UnitY** | `researches/translatotron` | `unity_conformer_modified` | [UnitY](https://aclanthology.org/2023.acl-long.872/) |
310
+ | **Uni-UnitY** | `researches/uni_unity` | `uni_unity_conformer` | Change all encoders in UnitY into unidirectional |
311
+ | **Chunk-UnitY** | `researches/chunk_unity` | `chunk_unity_conformer` | Change the Conformer in UnitY into Chunk-based Conformer |
312
+ | **StreamSpeech** | `researches/ctc_unity` | `streamspeech` | StreamSpeech |
313
+ | **StreamSpeech (cascade)** | `researches/ctc_unity` | `streamspeech_cascade` | Cascaded StreamSpeech of S2TT and TTS. TTS module can be used independently for real-time TTS given incremental text. |
314
+ | **HMT** | `researches/hmt` | `hmt_transformer_iwslt_de_en` | [HMT](https://openreview.net/forum?id=9y0HFvaAYD6): strong simultaneous text-to-text translation method |
315
+ | **DiSeg** | `researches/diseg` | `convtransformer_espnet_base_seg` | [DiSeg](https://aclanthology.org/2023.findings-acl.485/): strong simultaneous speech-to-text translation method |
316
+
317
+ > [!Tip]
318
+ > The `train_scripts/` and `test_scripts/` in directory `--user-dir` give the training and testing scripts for each model.
319
+ > Refer to official repo of [UnitY](https://github.com/facebookresearch/fairseq/blob/main/fairseq/models/speech_to_speech/s2s_conformer_unity.py), [Translatotron 2](https://github.com/facebookresearch/fairseq/blob/main/fairseq/models/speech_to_speech/s2s_conformer_translatotron2.py), [HMT](https://github.com/ictnlp/HMT) and [DiSeg](https://github.com/ictnlp/DiSeg) for more details.
320
+
321
+ ### 3. Evaluation
322
+
323
+ #### (1) Offline Evaluation
324
+
325
+ Follow [`pred.offline-s2st.sh`](./researches/ctc_unity/test_scripts/pred.offline-s2st.sh) to evaluate the offline performance of StreamSpeech on ASR, S2TT and S2ST.
326
+
327
+ #### (2) Simultaneous Evaluation
328
+
329
+ A trained StreamSpeech model can be used for streaming ASR, simultaneous speech-to-text translation and simultaneous speech-to-speech translation. We provide [agent/](./agent) for these three tasks:
330
+
331
+ - `agent/speech_to_speech.streamspeech.agent.py`: simultaneous speech-to-speech translation
332
+ - `agent/speech_to_text.s2tt.streamspeech.agent.py`: simultaneous speech-to-text translation
333
+ - `agent/speech_to_text.asr.streamspeech.agent.py`: streaming ASR
334
+
335
+ Follow [`simuleval.simul-s2st.sh`](./researches/ctc_unity/test_scripts/simuleval.simul-s2st.sh), [`simuleval.simul-s2tt.sh`](./researches/ctc_unity/test_scripts/simuleval.simul-s2tt.sh), [`simuleval.streaming-asr.sh`](./researches/ctc_unity/test_scripts/simuleval.streaming-asr.sh) to evaluate StreamSpeech.
336
+
337
+ ### 4. Our Results
338
+
339
+ Our project page ([https://ictnlp.github.io/StreamSpeech-site/](https://ictnlp.github.io/StreamSpeech-site/)) provides some translated speech generated by StreamSpeech, listen to it 🎧.
340
+
341
+ #### (1) Offline Speech-to-Speech Translation ( ASR-BLEU: quality )
342
+
343
+ <p align="center" width="100%">
344
+ <img src="./assets/offline_results.png" alt="offline" style="width: 100%; min-width: 300px; display: block; margin: auto;">
345
+ </p>
346
+
347
+ #### (2) Simultaneous Speech-to-Speech Translation ( AL: latency | ASR-BLEU: quality )
348
+
349
+ <p align="center" width="100%">
350
+ <img src="./assets/simultaneous_results.png" alt="simul" style="width: 100%; min-width: 300px; display: block; margin: auto;">
351
+ </p>
352
+
353
+ #### (3) Simultaneous Speech-to-Text Translation ( AL: latency | BLEU: quality )
354
+
355
+ <p align="center" width="100%">
356
+ <img src="./assets/s2tt.png" alt="simul" style="width: 38%; min-width: 300px; display: block; margin: auto;">
357
+ </p>
358
+
359
+ #### (4) Streaming ASR ( AL: latency | WER: quality )
360
+
361
+ <p align="center" width="100%">
362
+ <img src="./assets/asr.png" alt="simul" style="width: 50%; min-width: 300px; display: block; margin: auto;">
363
+ </p>
364
+
365
+ ## 🖋Citation
366
+
367
+ If you have any questions, please feel free to submit an issue or contact `zhangshaolei20z@ict.ac.cn`.
368
+
369
+ If our work is useful for you, please cite as:
370
+
371
+ ```
372
+ @inproceedings{streamspeech,
373
+ title={StreamSpeech: Simultaneous Speech-to-Speech Translation with Multi-task Learning},
374
+ author={Shaolei Zhang and Qingkai Fang and Shoutao Guo and Zhengrui Ma and Min Zhang and Yang Feng},
375
+ year={2024},
376
+ booktitle = {Proceedings of the 62th Annual Meeting of the Association for Computational Linguistics (Long Papers)},
377
+ publisher = {Association for Computational Linguistics}
378
+ }
379
+ ```
SimulEval/.github/workflows/main.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2
+ # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3
+
4
+ name: build
5
+
6
+ on:
7
+ push:
8
+ branches:
9
+ - main
10
+ pull_request:
11
+
12
+ jobs:
13
+ build:
14
+
15
+ runs-on: ubuntu-latest
16
+ strategy:
17
+ matrix:
18
+ python-version: [3.7, 3.8]
19
+
20
+ steps:
21
+ - uses: actions/checkout@v2
22
+ - name: Set up Python ${{ matrix.python-version}}
23
+ uses: actions/setup-python@v2
24
+ with:
25
+ python-version: ${{ matrix.python-version }}
26
+ - name: Install dependencies
27
+ run: |
28
+ sudo apt-get install libsndfile1
29
+ python -m pip install --upgrade pip
30
+ pip install flake8 pytest black
31
+ pip install -e .
32
+ if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
33
+ - name: Lint with black
34
+ run: black --check --diff .
35
+ - name: Lint with flake8
36
+ run: |
37
+ # stop the build if there are Python syntax errors or undefined names
38
+ flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
39
+ # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
40
+ flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
41
+ - name: Test with pytest
42
+ run: |
43
+ pytest simuleval/test/test_agent.py
44
+ pytest simuleval/test/test_agent_pipeline.py
45
+ pytest simuleval/test/test_evaluator.py
46
+ pytest simuleval/test/test_remote_evaluation.py
SimulEval/.gitignore ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98
+ __pypackages__/
99
+
100
+ # Celery stuff
101
+ celerybeat-schedule
102
+ celerybeat.pid
103
+
104
+ # SageMath parsed files
105
+ *.sage.py
106
+
107
+ # Environments
108
+ .env
109
+ .venv
110
+ env/
111
+ venv/
112
+ ENV/
113
+ env.bak/
114
+ venv.bak/
115
+
116
+ # Spyder project settings
117
+ .spyderproject
118
+ .spyproject
119
+
120
+ # Rope project settings
121
+ .ropeproject
122
+
123
+ # mkdocs documentation
124
+ /site
125
+
126
+ # mypy
127
+ .mypy_cache/
128
+ .dmypy.json
129
+ dmypy.json
130
+
131
+ # Pyre type checker
132
+ .pyre/
133
+
134
+ # pytype static type analyzer
135
+ .pytype/
136
+
137
+ # Cython debug symbols
138
+ cython_debug/
139
+ .vscode
SimulEval/CHANGELOG.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ 1.0.0 (September 25, 2020)
2
+
3
+ * Initial release.
4
+
5
+ 1.0.1 (February 8, 2021)
6
+
7
+ * Change CLI command
8
+ * Change `simuleval-server` to `simuleval --server-only`
9
+ * Change `simuleval-client` to `simuleval --client-only`
10
+ * Fix some typos
SimulEval/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open Source Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
6
+
7
+ ## Our Standards
8
+
9
+ Examples of behavior that contributes to creating a positive environment include:
10
+
11
+ Using welcoming and inclusive language
12
+ Being respectful of differing viewpoints and experiences
13
+ Gracefully accepting constructive criticism
14
+ Focusing on what is best for the community
15
+ Showing empathy towards other community members
16
+ Examples of unacceptable behavior by participants include:
17
+
18
+ The use of sexualized language or imagery and unwelcome sexual attention or advances
19
+ Trolling, insulting/derogatory comments, and personal or political attacks
20
+ Public or private harassment
21
+ Publishing others’ private information, such as a physical or electronic address, without explicit permission
22
+ Other conduct which could reasonably be considered inappropriate in a professional setting
23
+
24
+ ## Our Responsibilities
25
+
26
+ Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
27
+
28
+ Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
29
+
30
+ ## Scope
31
+
32
+ This Code of Conduct applies within all project spaces, and it also applies when an individual is representing the project or its community in public spaces. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
33
+
34
+ ## Enforcement
35
+
36
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at opensource-conduct@fb.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
37
+
38
+ Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project’s leadership.
39
+
40
+ ## Attribution
41
+
42
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
43
+ available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
44
+
45
+ [homepage]: https://www.contributor-covenant.org
SimulEval/CONTRIBUTING.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing to Facebook AI SimulEval
2
+ We want to make contributing to this project as easy and transparent as
3
+ possible.
4
+
5
+ ## Pull Requests
6
+ We actively welcome your pull requests.
7
+
8
+ 1. Fork the repo and create your branch from `master`.
9
+ 2. If you've added code that should be tested, add tests.
10
+ 3. If you've changed APIs, update the documentation.
11
+ 4. Ensure the test suite passes.
12
+ 5. Make sure your code lints.
13
+
14
+ ## Issues
15
+ We use GitHub issues to track public bugs. Please ensure your description is
16
+ clear and has sufficient instructions to be able to reproduce the issue.
17
+
18
+ ## License
19
+ By contributing to Facebook AI SimulEval, you agree that your contributions will
20
+ be licensed under the LICENSE file in the root directory of this source tree.
SimulEval/LICENSE ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Attribution-ShareAlike 4.0 International
2
+
3
+ =======================================================================
4
+
5
+ Creative Commons Corporation ("Creative Commons") is not a law firm and
6
+ does not provide legal services or legal advice. Distribution of
7
+ Creative Commons public licenses does not create a lawyer-client or
8
+ other relationship. Creative Commons makes its licenses and related
9
+ information available on an "as-is" basis. Creative Commons gives no
10
+ warranties regarding its licenses, any material licensed under their
11
+ terms and conditions, or any related information. Creative Commons
12
+ disclaims all liability for damages resulting from their use to the
13
+ fullest extent possible.
14
+
15
+ Using Creative Commons Public Licenses
16
+
17
+ Creative Commons public licenses provide a standard set of terms and
18
+ conditions that creators and other rights holders may use to share
19
+ original works of authorship and other material subject to copyright
20
+ and certain other rights specified in the public license below. The
21
+ following considerations are for informational purposes only, are not
22
+ exhaustive, and do not form part of our licenses.
23
+
24
+ Considerations for licensors: Our public licenses are
25
+ intended for use by those authorized to give the public
26
+ permission to use material in ways otherwise restricted by
27
+ copyright and certain other rights. Our licenses are
28
+ irrevocable. Licensors should read and understand the terms
29
+ and conditions of the license they choose before applying it.
30
+ Licensors should also secure all rights necessary before
31
+ applying our licenses so that the public can reuse the
32
+ material as expected. Licensors should clearly mark any
33
+ material not subject to the license. This includes other CC-
34
+ licensed material, or material used under an exception or
35
+ limitation to copyright. More considerations for licensors:
36
+ wiki.creativecommons.org/Considerations_for_licensors
37
+
38
+ Considerations for the public: By using one of our public
39
+ licenses, a licensor grants the public permission to use the
40
+ licensed material under specified terms and conditions. If
41
+ the licensor's permission is not necessary for any reason--for
42
+ example, because of any applicable exception or limitation to
43
+ copyright--then that use is not regulated by the license. Our
44
+ licenses grant only permissions under copyright and certain
45
+ other rights that a licensor has authority to grant. Use of
46
+ the licensed material may still be restricted for other
47
+ reasons, including because others have copyright or other
48
+ rights in the material. A licensor may make special requests,
49
+ such as asking that all changes be marked or described.
50
+ Although not required by our licenses, you are encouraged to
51
+ respect those requests where reasonable. More_considerations
52
+ for the public:
53
+ wiki.creativecommons.org/Considerations_for_licensees
54
+
55
+ =======================================================================
56
+
57
+ Creative Commons Attribution-ShareAlike 4.0 International Public
58
+ License
59
+
60
+ By exercising the Licensed Rights (defined below), You accept and agree
61
+ to be bound by the terms and conditions of this Creative Commons
62
+ Attribution-ShareAlike 4.0 International Public License ("Public
63
+ License"). To the extent this Public License may be interpreted as a
64
+ contract, You are granted the Licensed Rights in consideration of Your
65
+ acceptance of these terms and conditions, and the Licensor grants You
66
+ such rights in consideration of benefits the Licensor receives from
67
+ making the Licensed Material available under these terms and
68
+ conditions.
69
+
70
+
71
+ Section 1 -- Definitions.
72
+
73
+ a. Adapted Material means material subject to Copyright and Similar
74
+ Rights that is derived from or based upon the Licensed Material
75
+ and in which the Licensed Material is translated, altered,
76
+ arranged, transformed, or otherwise modified in a manner requiring
77
+ permission under the Copyright and Similar Rights held by the
78
+ Licensor. For purposes of this Public License, where the Licensed
79
+ Material is a musical work, performance, or sound recording,
80
+ Adapted Material is always produced where the Licensed Material is
81
+ synched in timed relation with a moving image.
82
+
83
+ b. Adapter's License means the license You apply to Your Copyright
84
+ and Similar Rights in Your contributions to Adapted Material in
85
+ accordance with the terms and conditions of this Public License.
86
+
87
+ c. BY-SA Compatible License means a license listed at
88
+ creativecommons.org/compatiblelicenses, approved by Creative
89
+ Commons as essentially the equivalent of this Public License.
90
+
91
+ d. Copyright and Similar Rights means copyright and/or similar rights
92
+ closely related to copyright including, without limitation,
93
+ performance, broadcast, sound recording, and Sui Generis Database
94
+ Rights, without regard to how the rights are labeled or
95
+ categorized. For purposes of this Public License, the rights
96
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
97
+ Rights.
98
+
99
+ e. Effective Technological Measures means those measures that, in the
100
+ absence of proper authority, may not be circumvented under laws
101
+ fulfilling obligations under Article 11 of the WIPO Copyright
102
+ Treaty adopted on December 20, 1996, and/or similar international
103
+ agreements.
104
+
105
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
106
+ any other exception or limitation to Copyright and Similar Rights
107
+ that applies to Your use of the Licensed Material.
108
+
109
+ g. License Elements means the license attributes listed in the name
110
+ of a Creative Commons Public License. The License Elements of this
111
+ Public License are Attribution and ShareAlike.
112
+
113
+ h. Licensed Material means the artistic or literary work, database,
114
+ or other material to which the Licensor applied this Public
115
+ License.
116
+
117
+ i. Licensed Rights means the rights granted to You subject to the
118
+ terms and conditions of this Public License, which are limited to
119
+ all Copyright and Similar Rights that apply to Your use of the
120
+ Licensed Material and that the Licensor has authority to license.
121
+
122
+ j. Licensor means the individual(s) or entity(ies) granting rights
123
+ under this Public License.
124
+
125
+ k. Share means to provide material to the public by any means or
126
+ process that requires permission under the Licensed Rights, such
127
+ as reproduction, public display, public performance, distribution,
128
+ dissemination, communication, or importation, and to make material
129
+ available to the public including in ways that members of the
130
+ public may access the material from a place and at a time
131
+ individually chosen by them.
132
+
133
+ l. Sui Generis Database Rights means rights other than copyright
134
+ resulting from Directive 96/9/EC of the European Parliament and of
135
+ the Council of 11 March 1996 on the legal protection of databases,
136
+ as amended and/or succeeded, as well as other essentially
137
+ equivalent rights anywhere in the world.
138
+
139
+ m. You means the individual or entity exercising the Licensed Rights
140
+ under this Public License. Your has a corresponding meaning.
141
+
142
+
143
+ Section 2 -- Scope.
144
+
145
+ a. License grant.
146
+
147
+ 1. Subject to the terms and conditions of this Public License,
148
+ the Licensor hereby grants You a worldwide, royalty-free,
149
+ non-sublicensable, non-exclusive, irrevocable license to
150
+ exercise the Licensed Rights in the Licensed Material to:
151
+
152
+ a. reproduce and Share the Licensed Material, in whole or
153
+ in part; and
154
+
155
+ b. produce, reproduce, and Share Adapted Material.
156
+
157
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
158
+ Exceptions and Limitations apply to Your use, this Public
159
+ License does not apply, and You do not need to comply with
160
+ its terms and conditions.
161
+
162
+ 3. Term. The term of this Public License is specified in Section
163
+ 6(a).
164
+
165
+ 4. Media and formats; technical modifications allowed. The
166
+ Licensor authorizes You to exercise the Licensed Rights in
167
+ all media and formats whether now known or hereafter created,
168
+ and to make technical modifications necessary to do so. The
169
+ Licensor waives and/or agrees not to assert any right or
170
+ authority to forbid You from making technical modifications
171
+ necessary to exercise the Licensed Rights, including
172
+ technical modifications necessary to circumvent Effective
173
+ Technological Measures. For purposes of this Public License,
174
+ simply making modifications authorized by this Section 2(a)
175
+ (4) never produces Adapted Material.
176
+
177
+ 5. Downstream recipients.
178
+
179
+ a. Offer from the Licensor -- Licensed Material. Every
180
+ recipient of the Licensed Material automatically
181
+ receives an offer from the Licensor to exercise the
182
+ Licensed Rights under the terms and conditions of this
183
+ Public License.
184
+
185
+ b. Additional offer from the Licensor -- Adapted Material.
186
+ Every recipient of Adapted Material from You
187
+ automatically receives an offer from the Licensor to
188
+ exercise the Licensed Rights in the Adapted Material
189
+ under the conditions of the Adapter's License You apply.
190
+
191
+ c. No downstream restrictions. You may not offer or impose
192
+ any additional or different terms or conditions on, or
193
+ apply any Effective Technological Measures to, the
194
+ Licensed Material if doing so restricts exercise of the
195
+ Licensed Rights by any recipient of the Licensed
196
+ Material.
197
+
198
+ 6. No endorsement. Nothing in this Public License constitutes or
199
+ may be construed as permission to assert or imply that You
200
+ are, or that Your use of the Licensed Material is, connected
201
+ with, or sponsored, endorsed, or granted official status by,
202
+ the Licensor or others designated to receive attribution as
203
+ provided in Section 3(a)(1)(A)(i).
204
+
205
+ b. Other rights.
206
+
207
+ 1. Moral rights, such as the right of integrity, are not
208
+ licensed under this Public License, nor are publicity,
209
+ privacy, and/or other similar personality rights; however, to
210
+ the extent possible, the Licensor waives and/or agrees not to
211
+ assert any such rights held by the Licensor to the limited
212
+ extent necessary to allow You to exercise the Licensed
213
+ Rights, but not otherwise.
214
+
215
+ 2. Patent and trademark rights are not licensed under this
216
+ Public License.
217
+
218
+ 3. To the extent possible, the Licensor waives any right to
219
+ collect royalties from You for the exercise of the Licensed
220
+ Rights, whether directly or through a collecting society
221
+ under any voluntary or waivable statutory or compulsory
222
+ licensing scheme. In all other cases the Licensor expressly
223
+ reserves any right to collect such royalties.
224
+
225
+
226
+ Section 3 -- License Conditions.
227
+
228
+ Your exercise of the Licensed Rights is expressly made subject to the
229
+ following conditions.
230
+
231
+ a. Attribution.
232
+
233
+ 1. If You Share the Licensed Material (including in modified
234
+ form), You must:
235
+
236
+ a. retain the following if it is supplied by the Licensor
237
+ with the Licensed Material:
238
+
239
+ i. identification of the creator(s) of the Licensed
240
+ Material and any others designated to receive
241
+ attribution, in any reasonable manner requested by
242
+ the Licensor (including by pseudonym if
243
+ designated);
244
+
245
+ ii. a copyright notice;
246
+
247
+ iii. a notice that refers to this Public License;
248
+
249
+ iv. a notice that refers to the disclaimer of
250
+ warranties;
251
+
252
+ v. a URI or hyperlink to the Licensed Material to the
253
+ extent reasonably practicable;
254
+
255
+ b. indicate if You modified the Licensed Material and
256
+ retain an indication of any previous modifications; and
257
+
258
+ c. indicate the Licensed Material is licensed under this
259
+ Public License, and include the text of, or the URI or
260
+ hyperlink to, this Public License.
261
+
262
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
263
+ reasonable manner based on the medium, means, and context in
264
+ which You Share the Licensed Material. For example, it may be
265
+ reasonable to satisfy the conditions by providing a URI or
266
+ hyperlink to a resource that includes the required
267
+ information.
268
+
269
+ 3. If requested by the Licensor, You must remove any of the
270
+ information required by Section 3(a)(1)(A) to the extent
271
+ reasonably practicable.
272
+
273
+ b. ShareAlike.
274
+
275
+ In addition to the conditions in Section 3(a), if You Share
276
+ Adapted Material You produce, the following conditions also apply.
277
+
278
+ 1. The Adapter's License You apply must be a Creative Commons
279
+ license with the same License Elements, this version or
280
+ later, or a BY-SA Compatible License.
281
+
282
+ 2. You must include the text of, or the URI or hyperlink to, the
283
+ Adapter's License You apply. You may satisfy this condition
284
+ in any reasonable manner based on the medium, means, and
285
+ context in which You Share Adapted Material.
286
+
287
+ 3. You may not offer or impose any additional or different terms
288
+ or conditions on, or apply any Effective Technological
289
+ Measures to, Adapted Material that restrict exercise of the
290
+ rights granted under the Adapter's License You apply.
291
+
292
+
293
+ Section 4 -- Sui Generis Database Rights.
294
+
295
+ Where the Licensed Rights include Sui Generis Database Rights that
296
+ apply to Your use of the Licensed Material:
297
+
298
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
299
+ to extract, reuse, reproduce, and Share all or a substantial
300
+ portion of the contents of the database;
301
+
302
+ b. if You include all or a substantial portion of the database
303
+ contents in a database in which You have Sui Generis Database
304
+ Rights, then the database in which You have Sui Generis Database
305
+ Rights (but not its individual contents) is Adapted Material,
306
+
307
+ including for purposes of Section 3(b); and
308
+ c. You must comply with the conditions in Section 3(a) if You Share
309
+ all or a substantial portion of the contents of the database.
310
+
311
+ For the avoidance of doubt, this Section 4 supplements and does not
312
+ replace Your obligations under this Public License where the Licensed
313
+ Rights include other Copyright and Similar Rights.
314
+
315
+
316
+ Section 5 -- Disclaimer of Warranties and Limitation of Liability.
317
+
318
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
319
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
320
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
321
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
322
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
323
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
324
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
325
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
326
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
327
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
328
+
329
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
330
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
331
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
332
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
333
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
334
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
335
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
336
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
337
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
338
+
339
+ c. The disclaimer of warranties and limitation of liability provided
340
+ above shall be interpreted in a manner that, to the extent
341
+ possible, most closely approximates an absolute disclaimer and
342
+ waiver of all liability.
343
+
344
+
345
+ Section 6 -- Term and Termination.
346
+
347
+ a. This Public License applies for the term of the Copyright and
348
+ Similar Rights licensed here. However, if You fail to comply with
349
+ this Public License, then Your rights under this Public License
350
+ terminate automatically.
351
+
352
+ b. Where Your right to use the Licensed Material has terminated under
353
+ Section 6(a), it reinstates:
354
+
355
+ 1. automatically as of the date the violation is cured, provided
356
+ it is cured within 30 days of Your discovery of the
357
+ violation; or
358
+
359
+ 2. upon express reinstatement by the Licensor.
360
+
361
+ For the avoidance of doubt, this Section 6(b) does not affect any
362
+ right the Licensor may have to seek remedies for Your violations
363
+ of this Public License.
364
+
365
+ c. For the avoidance of doubt, the Licensor may also offer the
366
+ Licensed Material under separate terms or conditions or stop
367
+ distributing the Licensed Material at any time; however, doing so
368
+ will not terminate this Public License.
369
+
370
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
371
+ License.
372
+
373
+
374
+ Section 7 -- Other Terms and Conditions.
375
+
376
+ a. The Licensor shall not be bound by any additional or different
377
+ terms or conditions communicated by You unless expressly agreed.
378
+
379
+ b. Any arrangements, understandings, or agreements regarding the
380
+ Licensed Material not stated herein are separate from and
381
+ independent of the terms and conditions of this Public License.
382
+
383
+
384
+ Section 8 -- Interpretation.
385
+
386
+ a. For the avoidance of doubt, this Public License does not, and
387
+ shall not be interpreted to, reduce, limit, restrict, or impose
388
+ conditions on any use of the Licensed Material that could lawfully
389
+ be made without permission under this Public License.
390
+
391
+ b. To the extent possible, if any provision of this Public License is
392
+ deemed unenforceable, it shall be automatically reformed to the
393
+ minimum extent necessary to make it enforceable. If the provision
394
+ cannot be reformed, it shall be severed from this Public License
395
+ without affecting the enforceability of the remaining terms and
396
+ conditions.
397
+
398
+ c. No term or condition of this Public License will be waived and no
399
+ failure to comply consented to unless expressly agreed to by the
400
+ Licensor.
401
+
402
+ d. Nothing in this Public License constitutes or may be interpreted
403
+ as a limitation upon, or waiver of, any privileges and immunities
404
+ that apply to the Licensor or You, including from the legal
405
+ processes of any jurisdiction or authority.
406
+
407
+
408
+ =======================================================================
409
+
410
+ Creative Commons is not a party to its public
411
+ licenses. Notwithstanding, Creative Commons may elect to apply one of
412
+ its public licenses to material it publishes and in those instances
413
+ will be considered the “Licensor.” The text of the Creative Commons
414
+ public licenses is dedicated to the public domain under the CC0 Public
415
+ Domain Dedication. Except for the limited purpose of indicating that
416
+ material is shared under a Creative Commons public license or as
417
+ otherwise permitted by the Creative Commons policies published at
418
+ creativecommons.org/policies, Creative Commons does not authorize the
419
+ use of the trademark "Creative Commons" or any other trademark or logo
420
+ of Creative Commons without its prior written consent including,
421
+ without limitation, in connection with any unauthorized modifications
422
+ to any of its public licenses or any other arrangements,
423
+ understandings, or agreements concerning use of licensed material. For
424
+ the avoidance of doubt, this paragraph does not form part of the
425
+ public licenses.
426
+
427
+ Creative Commons may be contacted at creativecommons.org.
SimulEval/README.md ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SimulEval
2
+ [![](https://github.com/facebookresearch/SimulEval/workflows/build/badge.svg)](https://github.com/facebookresearch/SimulEval/actions)
3
+
4
+ SimulEval is a general evaluation framework for simultaneous translation on text and speech. Full documentation can be found [here](https://simuleval.readthedocs.io/en/v1.1.0/).
5
+
6
+ ## Installation
7
+ ```
8
+ git clone https://github.com/facebookresearch/SimulEval.git
9
+ cd SimulEval
10
+ pip install -e .
11
+ ```
12
+
13
+ ## Quick Start
14
+ Following is the evaluation of a [dummy agent](examples/quick_start) which operates wait-k (k = 3) policy and generates random words until the length of the generated words is the same as the number of all the source words.
15
+ ```shell
16
+ cd examples/quick_start
17
+ simuleval --source source.txt --target target.txt --agent first_agent.py
18
+ ```
19
+
20
+ # License
21
+
22
+ SimulEval is licensed under Creative Commons BY-SA 4.0.
23
+
24
+ # Citation
25
+
26
+ Please cite as:
27
+
28
+ ```bibtex
29
+ @inproceedings{simuleval2020,
30
+ title = {Simuleval: An evaluation toolkit for simultaneous translation},
31
+ author = {Xutai Ma, Mohammad Javad Dousti, Changhan Wang, Jiatao Gu, Juan Pino},
32
+ booktitle = {Proceedings of the EMNLP},
33
+ year = {2020},
34
+ }
35
+ ```
SimulEval/docs/Makefile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ #
3
+
4
+ # You can set these variables from the command line, and also
5
+ # from the environment for the first two.
6
+ SPHINXOPTS ?=
7
+ SPHINXBUILD ?= sphinx-build
8
+ SOURCEDIR = .
9
+ BUILDDIR = build
10
+
11
+ # Put it first so that "make" without argument is like "make help".
12
+ help:
13
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14
+
15
+ .PHONY: help Makefile
16
+
17
+ # Catch-all target: route all unknown targets to Sphinx using the new
18
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19
+ %: Makefile
20
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
SimulEval/docs/conf.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # Configuration file for the Sphinx documentation builder.
8
+ #
9
+ # For the full list of built-in configuration values, see the documentation:
10
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html
11
+
12
+ # -- Project information -----------------------------------------------------
13
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
14
+
15
+ project = "SimulEval"
16
+ copyright = "Facebook AI Research (FAIR)"
17
+ author = "Facebook AI Research (FAIR)"
18
+ release = "1.1.0"
19
+
20
+ # -- General configuration ---------------------------------------------------
21
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
22
+
23
+ extensions = [
24
+ "sphinx_rtd_theme",
25
+ "sphinx.ext.autodoc",
26
+ "sphinx.ext.intersphinx",
27
+ "sphinx.ext.viewcode",
28
+ "sphinx.ext.napoleon",
29
+ "sphinxarg.ext",
30
+ ]
31
+
32
+ # templates_path = ['_templates']
33
+ exclude_patterns = []
34
+
35
+
36
+ # -- Options for HTML output -------------------------------------------------
37
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
38
+
39
+ html_theme = "sphinx_rtd_theme"
40
+ # html_static_path = ['_static']
SimulEval/docs/index.rst ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SimulEval documentation
2
+ =======================
3
+
4
+ SimulEval is a general evaluation framework for simultaneous translation.
5
+
6
+ .. toctree::
7
+ :maxdepth: 2
8
+ :glob:
9
+ :caption: Get started
10
+
11
+ installation
12
+ quick_start
13
+
14
+ .. toctree::
15
+ :maxdepth: 2
16
+ :caption: User's guide
17
+
18
+ user_guide/introduction
19
+ user_guide/agent
20
+ user_guide/evaluator
21
+ user_guide/dataloader
22
+
23
+ .. toctree::
24
+ :maxdepth: 2
25
+ :caption: Tutorials
26
+
27
+ tutorials/remote_evaluation
28
+ tutorials/speech_to_text
29
+ tutorials/speech_to_speech
30
+
SimulEval/docs/installation.rst ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Installation
2
+ ============
3
+
4
+ From pip
5
+ --------
6
+
7
+ .. code-block:: bash
8
+
9
+ pip install simuleval
10
+
11
+
12
+ From source
13
+ -----------
14
+
15
+ .. code-block:: bash
16
+
17
+ git clone https://github.com/facebookresearch/SimulEval.git
18
+ cd SimulEval
19
+ pip install -e .
20
+
21
+
SimulEval/docs/make.bat ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ :: Copyright (c) Facebook, Inc. and its affiliates.
2
+ :: All rights reserved.
3
+ ::
4
+ :: This source code is licensed under the license found in the
5
+ :: LICENSE file in the root directory of this source tree.
6
+
7
+ @ECHO OFF
8
+
9
+ pushd %~dp0
10
+
11
+ REM Command file for Sphinx documentation
12
+
13
+ if "%SPHINXBUILD%" == "" (
14
+ set SPHINXBUILD=sphinx-build
15
+ )
16
+ set SOURCEDIR=source
17
+ set BUILDDIR=build
18
+
19
+ %SPHINXBUILD% >NUL 2>NUL
20
+ if errorlevel 9009 (
21
+ echo.
22
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
23
+ echo.installed, then set the SPHINXBUILD environment variable to point
24
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
25
+ echo.may add the Sphinx directory to PATH.
26
+ echo.
27
+ echo.If you don't have Sphinx installed, grab it from
28
+ echo.https://www.sphinx-doc.org/
29
+ exit /b 1
30
+ )
31
+
32
+ if "%1" == "" goto help
33
+
34
+ %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
35
+ goto end
36
+
37
+ :help
38
+ %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
39
+
40
+ :end
41
+ popd
SimulEval/docs/quick_start.rst ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _first-agent:
2
+
3
+ Quick Start
4
+ ===========
5
+
6
+ This section will introduce a minimal example on how to use SimulEval for simultaneous translation evaluation.
7
+ The code in the example can be found in :code:`examples/quick_start`.
8
+
9
+ The agent in SimulEval is core for simultaneous evaluation.
10
+ It's a carrier of user's simultaneous system.
11
+ The user has to implement the agent based on their system for evaluation.
12
+ The example simultaneous system is a dummy wait-k agent, which
13
+
14
+ - Runs `wait-k <https://aclanthology.org/P19-1289/>`_ policy.
15
+ - Generates random characters the policy decide to write.
16
+ - Stops the generation k predictions after source input. For simplicity, we just set :code:`k=3` in this example.
17
+
18
+ The implementation of this agent is shown as follow.
19
+
20
+ .. literalinclude:: ../examples/quick_start/first_agent.py
21
+ :language: python
22
+ :lines: 6-
23
+
24
+ There two essential components for an agent:
25
+
26
+ - :code:`states`: The attribute keeps track of the source and target information.
27
+ - :code:`policy`: The method makes decisions when the there is a new source segment.
28
+
29
+ Once the agent is implemented and saved at :code:`first_agent.py`,
30
+ run the following command for latency evaluation on:
31
+
32
+ .. code-block:: bash
33
+
34
+ simuleval --source source.txt --reference target.txt --agent first_agent.py
35
+
36
+ where :code:`--source` is the input file while :code:`--target` is the reference file.
37
+
38
+ By default, the SimulEval will give the following output --- one quality and three latency metrics.
39
+
40
+ .. code-block:: bash
41
+
42
+ 2022-12-05 13:43:58 | INFO | simuleval.cli | Evaluate system: DummyWaitkTextAgent
43
+ 2022-12-05 13:43:58 | INFO | simuleval.dataloader | Evaluating from text to text.
44
+ 2022-12-05 13:43:58 | INFO | simuleval.sentence_level_evaluator | Results:
45
+ BLEU AL AP DAL
46
+ 1.541 3.0 0.688 3.0
47
+
48
+ The average lagging is expected since we are running an wait-3 system where the source and target always have the same length.
49
+ Notice that we have a very low yet random BLEU score. It's because we are randomly generate the output.
SimulEval/docs/requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ sphinx-argparse==0.4.0
2
+ -e .
SimulEval/docs/tutorials/remote_evaluation.rst ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Remote Evaluation
2
+ =================
3
+
4
+ Stand Alone Agent
5
+ -----------------
6
+ The agent can run in stand alone mode,
7
+ by using :code:`--standalone` option.
8
+ The SimulEval will kickoff a server that host the agent.
9
+ For instance, with the agent in :ref:`first-agent`,
10
+
11
+ .. code-block:: bash
12
+
13
+ > simuleval --standalone --remote-port 8888 --agent first_agent.py.py
14
+ 2022-12-06 19:12:26 | INFO | simuleval.cli | Evaluate system: DummyWaitkTextAgent
15
+ 2022-12-06 19:12:26 | INFO | simuleval.agent_server | Simultaneous Translation Server Started (process id 53902). Listening to port 8888
16
+
17
+ For detailed RESTful APIs, please see (TODO)
18
+
19
+ Docker
20
+ -----------------
21
+ You can also use a docker image to run the simuleval.
22
+ An minimal example of :code:`Dockerfile` is
23
+
24
+ .. literalinclude:: ../../examples/quick_start/Dockerfile
25
+ :language: docker
26
+
27
+ Build and run the docker image:
28
+
29
+ .. code-block:: bash
30
+
31
+ cd examples/quick_start && docker build -t simuleval_agent .
32
+ docker run -p 8888:8888 simuleval_agent:latest
33
+
34
+ Remote Evaluation
35
+ ------------------
36
+ If there is an agent server or docker image available,
37
+ (let's say the one we just kickoff at localhost:8888)
38
+ We can start a remote evaluator as follow. For simplicity we assume they are on the same machine
39
+
40
+ .. code-block:: bash
41
+
42
+ simuleval --remote-eval --remote-port 8888 \
43
+ --source source.txt --target target.txt \
44
+ --source-type text --target-type text
SimulEval/docs/tutorials/speech_to_speech.rst ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Speech-to-Speech
2
+ ================
SimulEval/docs/tutorials/speech_to_text.rst ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Speech-to-Text
2
+ ==============
SimulEval/docs/user_guide/agent.rst ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Agent
2
+ =====
3
+
4
+ To evaluate the simultaneous translation system,
5
+ the users need to implement agent class which operate the system logics.
6
+ This section will introduce how to implement an agent.
7
+
8
+ Source-Target Types
9
+ -------------------
10
+ First of all,
11
+ we must declare the source and target types of the agent class.
12
+ It can be done by inheriting from
13
+
14
+ - One of the following four built-in agent types
15
+
16
+ - :class:`simuleval.agents.TextToTextAgent`
17
+ - :class:`simuleval.agents.SpeechToTextAgent`
18
+ - :class:`simuleval.agents.TextToSpeechAgent`
19
+ - :class:`simuleval.agents.SpeechToSpeechAgent`
20
+
21
+ - Or :class:`simuleval.agents.GenericAgent`, with explicit declaration of :code:`source_type` and :code:`target_type`.
22
+
23
+ The follow two examples are equivalent.
24
+
25
+ .. code-block:: python
26
+
27
+ from simuleval import simuleval
28
+ from simuleval.agents import GenericAgent
29
+
30
+ class MySpeechToTextAgent(GenericAgent):
31
+ source_type = "Speech"
32
+ target_type = "Text"
33
+ ....
34
+
35
+ .. code-block:: python
36
+
37
+ from simuleval.agents import SpeechToSpeechAgent
38
+
39
+ class MySpeechToTextAgent(SpeechToSpeechAgent):
40
+ ....
41
+
42
+ .. _agent_policy:
43
+
44
+ Policy
45
+ ------
46
+
47
+ The agent must have a :code:`policy` method which must return one of two actions, :code:`ReadAction` and :code:`WriteAction`.
48
+ For example, an agent with a :code:`policy` method should look like this
49
+
50
+ .. code-block:: python
51
+
52
+ class MySpeechToTextAgent(SpeechToSpeechAgent):
53
+ def policy(self):
54
+ if do_we_need_more_input(self.states):
55
+ return ReadAction()
56
+ else:
57
+ prediction = generate_a_token(self.states)
58
+ finished = is_sentence_finished(self.states)
59
+ return WriteAction(prediction, finished=finished)
60
+
61
+
62
+ ..
63
+ .. autoclass:: simuleval.agents.actions.WriteAction
64
+
65
+ ..
66
+ .. autoclass:: simuleval.agents.actions.ReadAction
67
+
68
+ States
69
+ ------------
70
+ Each agent has the attribute the :code:`states` to keep track of the progress of decoding.
71
+ The :code:`states` attribute will be reset at the beginning of each sentence.
72
+ SimulEval provide an built-in states :class:`simuleval.agents.states.AgentStates`,
73
+ which has some basic attributes such source and target sequences.
74
+ The users can also define customized states with :code:`Agent.build_states` method:
75
+
76
+ .. code-block:: python
77
+
78
+ from simuleval.agents.states import AgentStates
79
+ from dataclasses import dataclass
80
+
81
+ @dataclass
82
+ class MyComplicatedStates(AgentStates)
83
+ some_very_useful_variable: int
84
+
85
+ def reset(self):
86
+ super().reset()
87
+ # also remember to reset the value
88
+ some_very_useful_variable = 0
89
+
90
+ class MySpeechToTextAgent(SpeechToSpeechAgent):
91
+ def build_states(self):
92
+ return MyComplicatedStates(0)
93
+
94
+ def policy(self):
95
+ some_very_useful_variable = self.states.some_very_useful_variable
96
+ ...
97
+ self.states.some_very_useful_variable = new_value
98
+ ...
99
+
100
+ ..
101
+ .. autoclass:: simuleval.agents.states.AgentStates
102
+ :members:
103
+
104
+
105
+ Pipeline
106
+ --------
107
+ The simultaneous system can consist several different components.
108
+ For instance, a simultaneous speech-to-text translation can have a streaming automatic speech recognition system and simultaneous text-to-text translation system.
109
+ SimulEval introduces the agent pipeline to support this function.
110
+ The following is a minimal example.
111
+ We concatenate two wait-k systems with different rates (:code:`k=2` and :code:`k=3`)
112
+ Note that if there are more than one agent class define,
113
+ the :code:`@entrypoint` decorator has to be used to determine the entry point
114
+
115
+ .. literalinclude:: ../../examples/quick_start/agent_pipeline.py
116
+ :language: python
117
+ :lines: 7-
118
+
119
+ Customized Arguments
120
+ -----------------------
121
+
122
+ It is often the case that we need to pass some customized arguments for the system to configure different settings.
123
+ The agent class has a built-in static method :code:`add_args` for this purpose.
124
+ The following is an updated version of the dummy agent from :ref:`first-agent`.
125
+
126
+ .. literalinclude:: ../../examples/quick_start/agent_with_configs.py
127
+ :language: python
128
+ :lines: 6-
129
+
130
+ Then just simply pass the arguments through command line as follow.
131
+
132
+ .. code-block:: bash
133
+
134
+ simuleval \
135
+ --source source.txt --source target.txt \ # data arguments
136
+ --agent dummy_waitk_text_agent_v2.py \
137
+ --waitk 3 --vocab data/dict.txt # agent arguments
138
+
139
+ Load Agents from Python Class
140
+ -----------------------------
141
+
142
+ If you have the agent class in the python environment, for instance
143
+
144
+ .. literalinclude:: ../../examples/quick_start/agent_with_configs.py
145
+ :language: python
146
+ :lines: 6-
147
+
148
+ You can also start the evaluation with following command
149
+
150
+ .. code-block:: bash
151
+
152
+ simuleval \
153
+ --source source.txt --source target.txt \ # data arguments
154
+ --agent-class DummyWaitkTextAgent \
155
+ --waitk 3 --vocab data/dict.txt # agent arguments
156
+
157
+
158
+ Load Agents from Directory
159
+ --------------------------
160
+
161
+ Agent can also be loaded from a directory, which will be referred to as system directory.
162
+ The system directory should have everything required to start the agent. Again use the following agent as example
163
+
164
+ .. literalinclude:: ../../examples/quick_start/agent_with_configs.py
165
+ :language: python
166
+ :lines: 6-
167
+
168
+ and the system directory has
169
+
170
+ .. code-block:: bash
171
+
172
+ > ls ${system_dir}
173
+ main.yaml dict.txt
174
+
175
+ Where the `main.yaml` has all the command line options. The path will be the relative path to the `${system_dir}`.
176
+
177
+ .. code-block:: yaml
178
+
179
+ waitk: 3
180
+ vocab: dict.txt
181
+
182
+ The agent can then be started as following
183
+
184
+ .. code-block:: bash
185
+
186
+ simuleval \
187
+ --source source.txt --source target.txt \ # data arguments
188
+ --system-dir ${system_dir}
189
+
190
+ By default, the `main.yaml` will be read. You can also have multiple YAML files in the system directory and pass them through command line arguments
191
+
192
+ .. code-block:: bash
193
+ > ls ${system_dir}
194
+ main.yaml dict.txt v1.yaml
195
+
196
+ > simuleval \
197
+ --source source.txt --source target.txt \ # data arguments
198
+ --system-dir ${system_dir} --system-config v1.yaml
SimulEval/docs/user_guide/dataloader.rst ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Dataloader
2
+ ===========
3
+ There are two ways to load data.
4
+
5
+ .. autoclass:: simuleval.data.dataloader.GenericDataloader
SimulEval/docs/user_guide/evaluator.rst ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Evaluator
2
+ =========
3
+
4
+ The evaluation in SimulEval implemented as the Evaluator shown below.
5
+ It runs on sentence level, and will score the translation on quality and latency.
6
+ The user can use :code:`--quality-metrics` and :code:`--latency-metrics` to choose the metrics.
7
+ The final results along with the logs will be saved at :code:`--output` if given.
8
+
9
+ .. autoclass:: simuleval.evaluator.evaluator.SentenceLevelEvaluator
10
+
11
+ Quality Scorers
12
+ ---------------
13
+
14
+ .. autoclass:: simuleval.evaluator.scorers.quality_scorer.SacreBLEUScorer
15
+ .. autoclass:: simuleval.evaluator.scorers.quality_scorer.ASRSacreBLEUScorer
16
+
17
+ Latency Scorers
18
+ ---------------
19
+
20
+ .. autoclass:: simuleval.evaluator.scorers.latency_scorer.ALScorer
21
+ :members:
22
+
23
+ .. autoclass:: simuleval.evaluator.scorers.latency_scorer.APScorer
24
+ :members:
25
+
26
+ .. autoclass:: simuleval.evaluator.scorers.latency_scorer.DALScorer
27
+ :members:
28
+
29
+ Customized Scorers
30
+ ------------------
31
+ To add customized scorers, the user can use :code:`@register_latency_scorer` or :code:`@register_quality_scorer` to decorate a scorer class.
32
+ and use :code:`--quality-metrics` and :code:`--latency-metrics` to call the scorer. For example:
33
+
34
+ .. literalinclude:: ../../examples/quick_start/agent_with_new_metrics.py
35
+ :lines: 6-
36
+
37
+ .. code-block:: bash
38
+
39
+ > simuleval --source source.txt --target target.txt --agent agent_with_new_metrics.py --latency-metrics RTF
40
+ 2022-12-06 12:56:01 | INFO | simuleval.cli | Evaluate system: DummyWaitkTextAgent
41
+ 2022-12-06 12:56:01 | INFO | simuleval.dataloader | Evaluating from text to text.
42
+ 2022-12-06 12:56:01 | INFO | simuleval.sentence_level_evaluator | Results:
43
+ BLEU RTF
44
+ 1.593 1.078
SimulEval/docs/user_guide/introduction.rst ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Introduction
2
+ ============
3
+ Different from offline translation system, the evaluation of simultaneous translation requires incremental decoding with an streaming input.
4
+ The simultaneous introduce the a front-end / back-end setup, shown as follow.
5
+
6
+ The back-end contains one or multiple user-defined agents which make decisions of whether to generate prediction at a certain point.
7
+ The agent can also considered as queue, where the input are keep pushed in and policy decides the timing to pop the output.
8
+
9
+ The front-end on the other side, represent the source of input and recipient of the system prediction.
10
+ In deployment, the front-end can be web page or cell phone app.
11
+ In SimulEval, the front-end is the evaluator , which feeds streaming input to back-end, receive prediction and track the delays.
12
+ The front-end and back-end can run separately for different purpose.
13
+
14
+ The evaluation process can summarized as follow pseudocode
15
+
16
+ .. code-block:: python
17
+
18
+ for instance in evaluator.instances:
19
+ while not instance.finished:
20
+ input_segment = instance.send_source()
21
+ prediction = agent.pushpop(input_segment)
22
+ if prediction is not None:
23
+ instance.receive_prediction(prediction)
24
+
25
+ results = [scorer.score() for scorer in evaluate.scorers]
26
+
27
+
28
+
29
+ The common usage of SimulEval is as follow
30
+
31
+ .. code-block:: bash
32
+
33
+ simuleval DATALOADER_OPTIONS EVALUATOR_OPTIONS --agent $AGENT_FILE AGENT_OPTIONS
34
+
35
+ We will introduce the usage of the toolkit based on these three major components: Agent, Dataloader and Evaluator.
SimulEval/examples/quick_start/Dockerfile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.8
2
+ RUN apt-get update \
3
+ && apt-get upgrade -y \
4
+ && apt-get install -y \
5
+ && apt-get -y install apt-utils gcc libpq-dev libsndfile-dev
6
+ RUN git clone https://github.com/facebookresearch/SimulEval.git
7
+ WORKDIR SimulEval
8
+ RUN git checkout v1.1.0
9
+ RUN pip install -e .
10
+ CMD ["simuleval", "--standalone", "--remote-port", "8888", "--agent", "examples/quick_start/first_agent.py.py"]
SimulEval/examples/quick_start/agent_pipeline.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import random
8
+ from simuleval.utils import entrypoint
9
+ from simuleval.agents import TextToTextAgent
10
+ from simuleval.agents.actions import ReadAction, WriteAction
11
+ from simuleval.agents import AgentPipeline
12
+
13
+
14
+ class DummyWaitkTextAgent(TextToTextAgent):
15
+ waitk = 0
16
+ vocab = [chr(i) for i in range(ord("A"), ord("Z") + 1)]
17
+
18
+ def policy(self):
19
+ lagging = len(self.states.source) - len(self.states.target)
20
+
21
+ if lagging >= self.waitk or self.states.source_finished:
22
+ prediction = random.choice(self.vocab)
23
+
24
+ return WriteAction(prediction, finished=(lagging <= 1))
25
+ else:
26
+ return ReadAction()
27
+
28
+
29
+ class DummyWait2TextAgent(DummyWaitkTextAgent):
30
+ waitk = 2
31
+
32
+
33
+ class DummyWait4TextAgent(DummyWaitkTextAgent):
34
+ waitk = 4
35
+
36
+
37
+ @entrypoint
38
+ class DummyPipeline(AgentPipeline):
39
+ pipeline = [DummyWait2TextAgent, DummyWait4TextAgent]
SimulEval/examples/quick_start/agent_with_configs.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import random
8
+ from simuleval.utils import entrypoint
9
+ from simuleval.agents import TextToTextAgent
10
+ from simuleval.agents.actions import ReadAction, WriteAction
11
+ from argparse import Namespace, ArgumentParser
12
+
13
+
14
+ @entrypoint
15
+ class DummyWaitkTextAgent(TextToTextAgent):
16
+ def __init__(self, args: Namespace):
17
+ """Initialize your agent here.
18
+ For example loading model, vocab, etc
19
+ """
20
+ super().__init__(args)
21
+ self.waitk = args.waitk
22
+ with open(args.vocab) as f:
23
+ self.vocab = [line.strip() for line in f]
24
+
25
+ @staticmethod
26
+ def add_args(parser: ArgumentParser):
27
+ """Add customized command line arguments"""
28
+ parser.add_argument("--waitk", type=int, default=3)
29
+ parser.add_argument("--vocab", type=str)
30
+
31
+ def policy(self):
32
+ lagging = len(self.states.source) - len(self.states.target)
33
+
34
+ if lagging >= self.waitk or self.states.source_finished:
35
+ prediction = random.choice(self.vocab)
36
+
37
+ return WriteAction(prediction, finished=(lagging <= 1))
38
+ else:
39
+ return ReadAction()
SimulEval/examples/quick_start/agent_with_new_metrics.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import random
8
+ from statistics import mean
9
+ from simuleval.utils import entrypoint
10
+ from simuleval.evaluator.scorers.latency_scorer import (
11
+ register_latency_scorer,
12
+ LatencyScorer,
13
+ )
14
+ from simuleval.agents import TextToTextAgent
15
+ from simuleval.agents.actions import ReadAction, WriteAction
16
+
17
+
18
+ @register_latency_scorer("RTF")
19
+ class RTFScorer(LatencyScorer):
20
+ """Real time factor
21
+
22
+ Usage:
23
+ --latency-metrics RTF
24
+ """
25
+
26
+ def __call__(self, instances) -> float:
27
+ scores = []
28
+ for ins in instances.values():
29
+ scores.append(ins.delays[-1] / ins.source_length)
30
+
31
+ return mean(scores)
32
+
33
+
34
+ @entrypoint
35
+ class DummyWaitkTextAgent(TextToTextAgent):
36
+ waitk = 3
37
+ vocab = [chr(i) for i in range(ord("A"), ord("Z") + 1)]
38
+
39
+ def policy(self):
40
+ lagging = len(self.states.source) - len(self.states.target)
41
+
42
+ if lagging >= self.waitk or self.states.source_finished:
43
+ prediction = random.choice(self.vocab)
44
+
45
+ return WriteAction(prediction, finished=(lagging <= 1))
46
+ else:
47
+ return ReadAction()
SimulEval/examples/quick_start/dict.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A
2
+ B
3
+ C
4
+ D
5
+ E
6
+ F
7
+ G
8
+ H
9
+ I
10
+ J
11
+ K
12
+ L
13
+ M
14
+ N
15
+ O
16
+ P
17
+ Q
18
+ R
19
+ S
20
+ T
21
+ U
22
+ V
23
+ W
24
+ X
25
+ Y
26
+ Z
SimulEval/examples/quick_start/first_agent.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import random
8
+ from simuleval.utils import entrypoint
9
+ from simuleval.agents import TextToTextAgent
10
+ from simuleval.agents.actions import ReadAction, WriteAction
11
+
12
+
13
+ @entrypoint
14
+ class DummyWaitkTextAgent(TextToTextAgent):
15
+ waitk = 3
16
+ vocab = [chr(i) for i in range(ord("A"), ord("Z") + 1)]
17
+
18
+ def policy(self):
19
+ lagging = len(self.states.source) - len(self.states.target)
20
+
21
+ if lagging >= self.waitk or self.states.source_finished:
22
+ prediction = random.choice(self.vocab)
23
+
24
+ return WriteAction(prediction, finished=(lagging <= 1))
25
+ else:
26
+ return ReadAction()
SimulEval/examples/quick_start/readme.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Quick Start
2
+ Following are some minimal examples to use SimulEval. More details can be found [here](https://simuleval.readthedocs.io/en/v1.1.0/quick_start.html).
3
+
4
+ ## First Agent
5
+ To evaluate a text-to-text wait-3 system with random output:
6
+
7
+ ```
8
+ > simuleval --source source.txt --target target.txt --agent first_agent.py
9
+
10
+ 2022-12-05 13:43:58 | INFO | simuleval.cli | Evaluate system: DummyWaitkTextAgent
11
+ 2022-12-05 13:43:58 | INFO | simuleval.dataloader | Evaluating from text to text.
12
+ 2022-12-05 13:43:58 | INFO | simuleval.sentence_level_evaluator | Results:
13
+ BLEU AL AP DAL
14
+ 1.541 3.0 0.688 3.0
15
+
16
+ ```
17
+
18
+ ## Agent with Command Line Arguments
19
+ ```
20
+ simuleval --source source.txt --target target.txt --agent agent_with_configs.py --waitk 3 --vocab dict.txt
21
+ ```
22
+
23
+ ## Agent Pipeline
24
+ ```
25
+ simuleval --source source.txt --target target.txt --agent agent_pipeline.py
26
+ ```
27
+
28
+ ## Agent with New Metrics
29
+ ```
30
+ simuleval --source source.txt --target target.txt --agent agent_with_new_metrics.py
31
+ ```
32
+
33
+ ## Standalone Agent & Remote Evaluation
34
+ Start an agent server:
35
+ ```
36
+ simuleval --standalone --remote-port 8888 --agent agent_with_new_metrics.py
37
+ ```
38
+ Or with docker
39
+ ```
40
+ docker build -t simuleval_agent .
41
+ docker run -p 8888:8888 simuleval_agent:latest
42
+ ```
43
+
44
+ Start a remote evaluator:
45
+ ```
46
+ simuleval --remote-eval --source source.txt --target target.txt --source-type text --target-type text --remote-port 8888
47
+ ```
SimulEval/examples/quick_start/source.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ Z U S N B Y X Q L O T
2
+ M A J F P G O Y V R H M Z O T A
3
+ M O W A O I D H H B O F
4
+ N Q N I P C O H A A
5
+ G B O J H P W C I A L V
6
+ P T Z D E E N T B Y G Z R K
7
+ F S H U K R W K S B R K M B B Q F C O U
8
+ M H O L W Z G J Y X J B I
9
+ A V B F E S F E W Q C S
10
+ F N O I E Z B R S C V N S
SimulEval/examples/quick_start/target.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ Z U S N B Y X Q L O T
2
+ M A J F P G O Y V R H M Z O T A
3
+ M O W A O I D H H B O F
4
+ N Q N I P C O H A A
5
+ G B O J H P W C I A L V
6
+ P T Z D E E N T B Y G Z R K
7
+ F S H U K R W K S B R K M B B Q F C O U
8
+ M H O L W Z G J Y X J B I
9
+ A V B F E S F E W Q C S
10
+ F N O I E Z B R S C V N S
SimulEval/examples/speech_to_speech/english_counter_agent.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from simuleval.utils import entrypoint
2
+ from simuleval.data.segments import SpeechSegment
3
+ from simuleval.agents import SpeechToSpeechAgent
4
+ from simuleval.agents.actions import WriteAction, ReadAction
5
+ from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
6
+ from fairseq.models.text_to_speech.hub_interface import TTSHubInterface
7
+
8
+
9
+ class TTSModel:
10
+ def __init__(self):
11
+ models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
12
+ "facebook/fastspeech2-en-ljspeech",
13
+ arg_overrides={"vocoder": "hifigan", "fp16": False},
14
+ )
15
+ TTSHubInterface.update_cfg_with_data_cfg(cfg, task.data_cfg)
16
+ self.tts_generator = task.build_generator(models, cfg)
17
+ self.tts_task = task
18
+ self.tts_model = models[0]
19
+ self.tts_model.to("cpu")
20
+ self.tts_generator.vocoder.to("cpu")
21
+
22
+ def synthesize(self, text):
23
+ sample = TTSHubInterface.get_model_input(self.tts_task, text)
24
+ if sample["net_input"]["src_lengths"][0] == 0:
25
+ return [], 0
26
+ for key in sample["net_input"].keys():
27
+ if sample["net_input"][key] is not None:
28
+ sample["net_input"][key] = sample["net_input"][key].to("cpu")
29
+
30
+ wav, rate = TTSHubInterface.get_prediction(
31
+ self.tts_task, self.tts_model, self.tts_generator, sample
32
+ )
33
+ wav = wav.tolist()
34
+ return wav, rate
35
+
36
+
37
+ @entrypoint
38
+ class EnglishSpeechCounter(SpeechToSpeechAgent):
39
+ """
40
+ Incrementally feed text to this offline Fastspeech2 TTS model,
41
+ with a minimum numbers of phonemes every chunk.
42
+ """
43
+
44
+ def __init__(self, args):
45
+ super().__init__(args)
46
+ self.wait_seconds = args.wait_seconds
47
+ self.tts_model = TTSModel()
48
+
49
+ @staticmethod
50
+ def add_args(parser):
51
+ parser.add_argument("--wait-seconds", default=1, type=int)
52
+
53
+ def policy(self):
54
+ length_in_seconds = round(
55
+ len(self.states.source) / self.states.source_sample_rate
56
+ )
57
+ if not self.states.source_finished and length_in_seconds < self.wait_seconds:
58
+ return ReadAction()
59
+ samples, fs = self.tts_model.synthesize(f"{length_in_seconds} mississippi")
60
+
61
+ # A SpeechSegment has to be returned for speech-to-speech translation system
62
+ return WriteAction(
63
+ SpeechSegment(
64
+ content=samples,
65
+ sample_rate=fs,
66
+ finished=self.states.source_finished,
67
+ ),
68
+ finished=self.states.source_finished,
69
+ )
SimulEval/examples/speech_to_speech/eval.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ simuleval \
2
+ --agent english_counter_agent.py --output output \
3
+ --source source.txt --target reference/en.txt --source-segment-size 1000\
4
+ --quality-metrics WHISPER_ASR_BLEU \
5
+ --target-speech-lang en --transcript-lowercase --transcript-non-punctuation --whisper-model-size large \
6
+ --latency-metrics StartOffset EndOffset ATD
SimulEval/examples/speech_to_speech/readme.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Simultaneous Speech-to-Speech Translation
2
+
3
+ This tutorial provides a minimal example on how to evaluate a simultaneous speech-to-speech translation system.
4
+
5
+ ### Requirements
6
+
7
+ To run this example, the following package is required
8
+
9
+ - [`whisper`](https://github.com/openai/whisper): for quality evaluation (`WHISPER_ASR_BLEU`).
10
+
11
+ ### Agent
12
+
13
+ The speech-to-speech agent ([english_counter_agent.py](english_counter_agent.py)) in this example is a counter, which generates a piece of audio every second after an initial wait.
14
+ The policy of the agent is show follow. The agent will wait for `self.wait_seconds` seconds,
15
+ and generate the audio of `{length_in_seconds} mississippi` every second afterward.
16
+
17
+ ```python
18
+ def policy(self):
19
+ length_in_seconds = round(
20
+ len(self.states.source) / self.states.source_sample_rate
21
+ )
22
+ if not self.states.source_finished and length_in_seconds < self.wait_seconds:
23
+ return ReadAction()
24
+ print(length_in_seconds)
25
+ samples, fs = self.tts_model.synthesize(f"{length_in_seconds} mississippi")
26
+
27
+ # A SpeechSegment has to be returned for speech-to-speech translation system
28
+ return WriteAction(
29
+ SpeechSegment(
30
+ content=samples,
31
+ sample_rate=fs,
32
+ finished=self.states.source_finished,
33
+ ),
34
+ finished=self.states.source_finished,
35
+ )
36
+ ```
37
+
38
+ Notice that for speech output agent, the `WriteAction` has to contain a `SpeechSegment` class.
39
+
40
+ ### Evaluation
41
+
42
+ The following command will start an evaluation
43
+
44
+ ```bash
45
+ simuleval \
46
+ --agent english_counter_agent.py --output output \
47
+ --source source.txt --target reference/en.txt --source-segment-size 1000\
48
+ --quality-metrics WHISPER_ASR_BLEU \
49
+ --target-speech-lang en --transcript-lowercase --transcript-non-punctuation\
50
+ --latency-metrics StartOffset EndOffset ATD
51
+ ```
52
+
53
+ For quality evaluation, we use ASR_BLEU, that is transcribing the speech output and compute BLEU score with the reference text. To use this feature, `whisper` has to be installed.
54
+
55
+ We use three metrics for latency evaluation
56
+
57
+ - `StartOffset`: The starting offset of translation comparing with source audio
58
+ - `EndOffset`: The ending offset of translation comparing with source audio
59
+ - `ATD`: Average Token Delay
60
+
61
+ The results of the evaluation should be as following. The transcripts and alignments can be found in the `output` directory.
62
+
63
+ ```
64
+ WHISPER_ASR_BLEU StartOffset EndOffset ATD
65
+ 100.0 1000.0 1490.703 1248.261
66
+ ```
SimulEval/examples/speech_to_speech/reference/de.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ein Mississippi zwei Mississippi drei Mississippi vier Mississippi fünf Mississippi sechs Mississippi sieben Mississippi
SimulEval/examples/speech_to_speech/reference/en.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ one mississippi two mississippi three mississippi four mississippi five mississippi six mississippi seven mississippi
SimulEval/examples/speech_to_speech/reference/ja.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 1 ミシシッピ 2 ミシシッピ 3 ミシシッピ 4 ミシシッピ 5 ミシシッピ 6 ミシシッピ 7 ミシシッピ
SimulEval/examples/speech_to_speech/reference/zh.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 一密西西比二密西西比三密西西比四密西西比五密西西比六密西西比七密西西比
SimulEval/examples/speech_to_speech/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ test.wav
SimulEval/examples/speech_to_speech/test.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:354152b48e1575f4ca3590b76700d46ef302dca9c183b5526f810c636c5d88f7
3
+ size 302124
SimulEval/setup.cfg ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [tool:pytest]
2
+ flake8-max-line-length = 127
SimulEval/setup.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import setuptools
8
+
9
+ setuptools.setup(
10
+ python_requires=">3.7.0",
11
+ name="simuleval",
12
+ version="1.1.0",
13
+ author="Xutai Ma",
14
+ entry_points={
15
+ "console_scripts": [
16
+ "simuleval = simuleval.cli:main",
17
+ ],
18
+ },
19
+ install_requires=[
20
+ "pytest",
21
+ "pytest-cov",
22
+ "sacrebleu==2.3.1",
23
+ "tornado",
24
+ "soundfile",
25
+ "pandas",
26
+ "requests",
27
+ "pytest-flake8",
28
+ "textgrid",
29
+ "tqdm==4.64.1",
30
+ "pyyaml",
31
+ "bitarray==2.6.0",
32
+ "yt-dlp",
33
+ "pydub",
34
+ ],
35
+ package=setuptools.find_packages(
36
+ exclude=[
37
+ "examples",
38
+ "examples.*",
39
+ "docs",
40
+ "docs.*",
41
+ ]
42
+ ),
43
+ )
SimulEval/simuleval/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
SimulEval/simuleval/agents/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from .agent import ( # noqa
8
+ GenericAgent,
9
+ SpeechToTextAgent,
10
+ SpeechToSpeechAgent,
11
+ TextToSpeechAgent,
12
+ TextToTextAgent,
13
+ )
14
+ from .states import AgentStates # noqa
15
+ from .actions import Action, ReadAction, WriteAction # noqa
16
+ from .pipeline import AgentPipeline # noqa
SimulEval/simuleval/agents/actions.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import Union, List
8
+ from dataclasses import dataclass
9
+ from simuleval.data.segments import Segment
10
+
11
+
12
+ class Action:
13
+ """
14
+ Abstract Action class
15
+ """
16
+
17
+ def is_read(self) -> bool:
18
+ """
19
+ Whether the action is a read action
20
+
21
+ Returns:
22
+ bool: True if the action is a read action.
23
+ """
24
+ assert NotImplementedError
25
+
26
+
27
+ class ReadAction(Action):
28
+ """
29
+ Action to return when policy decide to read one more source segment.
30
+ The only way to use it is to return :code:`ReadAction()`
31
+ """
32
+
33
+ def is_read(self) -> bool:
34
+ return True
35
+
36
+ def __repr__(self) -> str:
37
+ return "ReadAction()"
38
+
39
+
40
+ @dataclass
41
+ class WriteAction(Action):
42
+ """
43
+ Action to return when policy decide to generate a prediction
44
+
45
+ Args:
46
+ content (Union[str, List[float]]): The prediction.
47
+ finished (bool): Indicates if current sentence is finished.
48
+
49
+ .. note:: For text the prediction a str; for speech, it's a list.
50
+
51
+ """
52
+
53
+ content: Union[str, List[float], Segment]
54
+ finished: bool
55
+
56
+ def is_read(self) -> bool:
57
+ return False
58
+
59
+ def __repr__(self) -> str:
60
+ return f"WriteAction({self.content}, finished={self.finished})"
SimulEval/simuleval/agents/agent.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from inspect import signature
8
+ from argparse import Namespace, ArgumentParser
9
+ from simuleval.data.segments import Segment, TextSegment, SpeechSegment, EmptySegment
10
+ from typing import Optional
11
+ from .states import AgentStates
12
+ from .actions import Action
13
+
14
+
15
+ SEGMENT_TYPE_DICT = {"text": TextSegment, "speech": SpeechSegment}
16
+
17
+
18
+ class GenericAgent:
19
+ """
20
+ Generic Agent class.
21
+ """
22
+
23
+ source_type = None
24
+ target_type = None
25
+
26
+ def __init__(self, args: Optional[Namespace] = None) -> None:
27
+ if args is not None:
28
+ self.args = args
29
+ assert self.source_type
30
+ assert self.target_type
31
+ self.device = "cpu"
32
+
33
+ self.states = self.build_states()
34
+ self.reset()
35
+
36
+ def build_states(self) -> AgentStates:
37
+ """
38
+ Build states instance for agent
39
+
40
+ Returns:
41
+ AgentStates: agent states
42
+ """
43
+ return AgentStates()
44
+
45
+ def reset(self) -> None:
46
+ """
47
+ Reset agent, called every time when a new sentence coming in.
48
+ """
49
+ self.states.reset()
50
+
51
+ def policy(self, states: Optional[AgentStates] = None) -> Action:
52
+ """
53
+ The policy to make decision every time
54
+ when the system has new input.
55
+ The function has to return an Action instance
56
+
57
+ Args:
58
+ states (Optional[AgentStates]): an optional states for stateless agent
59
+
60
+ Returns:
61
+ Action: The actions to make at certain point.
62
+
63
+ .. note:
64
+
65
+ WriteAction means that the system has a prediction.
66
+ ReadAction means that the system needs more source.
67
+ When states are provided, the agent will become stateless and ignore self.states.
68
+ """
69
+ assert NotImplementedError
70
+
71
+ def push(
72
+ self, source_segment: Segment, states: Optional[AgentStates] = None
73
+ ) -> None:
74
+ """
75
+ The function to process the incoming information.
76
+
77
+ Args:
78
+ source_info (dict): incoming information dictionary
79
+ states (Optional[AgentStates]): an optional states for stateless agent
80
+ """
81
+ if states is None:
82
+ states = self.states
83
+ states.update_source(source_segment)
84
+
85
+ def pop(self, states: Optional[AgentStates] = None) -> Segment:
86
+ """
87
+ The function to generate system output.
88
+ By default, it first runs policy,
89
+ and than returns the output segment.
90
+ If the policy decide to read,
91
+ it will return an empty segment.
92
+
93
+ Args:
94
+ states (Optional[AgentStates]): an optional states for stateless agent
95
+
96
+ Returns:
97
+ Segment: segment to return.
98
+ """
99
+ if len(signature(self.policy).parameters) == 0:
100
+ is_stateless = False
101
+ if states:
102
+ raise RuntimeError("Feeding states to stateful agents.")
103
+ else:
104
+ is_stateless = True
105
+
106
+ if states is None:
107
+ states = self.states
108
+
109
+ if states.target_finished:
110
+ return EmptySegment(finished=True)
111
+
112
+ if is_stateless:
113
+ action = self.policy(states)
114
+ else:
115
+ action = self.policy()
116
+
117
+ if not isinstance(action, Action):
118
+ raise RuntimeError(
119
+ f"The return value of {self.policy.__qualname__} is not an {Action.__qualname__} instance"
120
+ )
121
+ if action.is_read():
122
+ return EmptySegment()
123
+ else:
124
+ if isinstance(action.content, Segment):
125
+ return action.content
126
+
127
+ segment = SEGMENT_TYPE_DICT[self.target_type](
128
+ index=0, content=action.content, finished=action.finished
129
+ )
130
+ states.update_target(segment)
131
+ return segment
132
+
133
+ def pushpop(
134
+ self, segment: Segment, states: Optional[AgentStates] = None
135
+ ) -> Segment:
136
+ """
137
+ Operate pop immediately after push.
138
+
139
+ Args:
140
+ segment (Segment): input segment
141
+
142
+ Returns:
143
+ Segment: output segment
144
+ """
145
+ self.push(segment, states)
146
+ return self.pop(states)
147
+
148
+ @staticmethod
149
+ def add_args(parser: ArgumentParser):
150
+ """
151
+ Add agent arguments to parser.
152
+ Has to be a static method.
153
+
154
+ Args:
155
+ parser (ArgumentParser): cli argument parser
156
+ """
157
+ pass
158
+
159
+ @classmethod
160
+ def from_args(cls, args):
161
+ return cls(args)
162
+
163
+ def to(self, device: str, *args, **kwargs) -> None:
164
+ """
165
+ Move agent to specified device.
166
+
167
+ Args:
168
+ device (str): Device to move agent to.
169
+ """
170
+ pass
171
+
172
+ def __repr__(self) -> str:
173
+ return f"{self.__class__.__name__}[{self.source_type} -> {self.target_type}]"
174
+
175
+ def __str__(self) -> str:
176
+ return self.__repr__()
177
+
178
+
179
+ class SpeechToTextAgent(GenericAgent):
180
+ """
181
+ Same as generic agent, but with explicit types
182
+ speech -> text
183
+ """
184
+
185
+ source_type: str = "speech"
186
+ target_type: str = "text"
187
+
188
+
189
+ class SpeechToSpeechAgent(GenericAgent):
190
+ """
191
+ Same as generic agent, but with explicit types
192
+ speech -> speech
193
+ """
194
+
195
+ source_type: str = "speech"
196
+ target_type: str = "speech"
197
+
198
+
199
+ class TextToSpeechAgent(GenericAgent):
200
+ """
201
+ Same as generic agent, but with explicit types
202
+ text -> speech
203
+ """
204
+
205
+ source_type: str = "text"
206
+ target_type: str = "speech"
207
+
208
+
209
+ class TextToTextAgent(GenericAgent):
210
+ """
211
+ Same as generic agent, but with explicit types
212
+ text -> text
213
+ """
214
+
215
+ source_type: str = "text"
216
+ target_type: str = "text"
SimulEval/simuleval/agents/pipeline.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import List, Optional
8
+ from simuleval.data.segments import Segment
9
+ from .agent import GenericAgent, AgentStates
10
+
11
+
12
+ class AgentPipeline(GenericAgent):
13
+ """A pipeline of agents
14
+
15
+ Attributes:
16
+ pipeline (list): a list of agent classes.
17
+
18
+ """
19
+
20
+ pipeline: List = []
21
+
22
+ def __init__(self, module_list: List[GenericAgent]) -> None:
23
+ self.module_list = module_list
24
+ self.check_pipeline_types()
25
+
26
+ def check_pipeline_types(self):
27
+ if len(self.pipeline) > 1:
28
+ for i in range(1, len(self.pipeline)):
29
+ if (
30
+ self.module_list[i].source_type
31
+ != self.module_list[i - 1].target_type
32
+ ):
33
+ raise RuntimeError(
34
+ f"{self.module_list[i]}.source_type({self.module_list[i].source_type}) != {self.pipeline[i-1]}.target_type({self.pipeline[i - 1].target_type}" # noqa F401
35
+ )
36
+
37
+ @property
38
+ def source_type(self) -> Optional[str]:
39
+ return self.module_list[0].source_type
40
+
41
+ @property
42
+ def target_type(self) -> Optional[str]:
43
+ return self.module_list[-1].target_type
44
+
45
+ def reset(self) -> None:
46
+ for module in self.module_list:
47
+ module.reset()
48
+
49
+ def build_states(self) -> List[AgentStates]:
50
+ return [module.build_states() for module in self.module_list]
51
+
52
+ def push(
53
+ self, segment: Segment, states: Optional[List[Optional[AgentStates]]] = None
54
+ ) -> None:
55
+ if states is None:
56
+ states = [None for _ in self.module_list]
57
+ else:
58
+ assert len(states) == len(self.module_list)
59
+
60
+ for index, module in enumerate(self.module_list[:-1]):
61
+ segment = module.pushpop(segment, states[index])
62
+ self.module_list[-1].push(segment, states[-1])
63
+
64
+ def pop(self, states: Optional[List[Optional[AgentStates]]] = None) -> Segment:
65
+ if states is None:
66
+ last_states = None
67
+ else:
68
+ assert len(states) == len(self.module_list)
69
+ last_states = states[-1]
70
+
71
+ return self.module_list[-1].pop(last_states)
72
+
73
+ @classmethod
74
+ def add_args(cls, parser) -> None:
75
+ for module_class in cls.pipeline:
76
+ module_class.add_args(parser)
77
+
78
+ @classmethod
79
+ def from_args(cls, args):
80
+ assert len(cls.pipeline) > 0
81
+ return cls([module_class.from_args(args) for module_class in cls.pipeline])
82
+
83
+ def __repr__(self) -> str:
84
+ pipline_str = "\n\t".join(
85
+ "\t".join(str(module).splitlines(True)) for module in self.module_list
86
+ )
87
+ return f"{self.__class__.__name__}(\n\t{pipline_str}\n)"
88
+
89
+ def __str__(self) -> str:
90
+ return self.__repr__()
SimulEval/simuleval/agents/service.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ import os
6
+ import json
7
+ import logging
8
+ from tornado import web, ioloop
9
+ from simuleval.data.segments import segment_from_json_string
10
+ from simuleval import options
11
+
12
+ logger = logging.getLogger("simuleval.agent_server")
13
+
14
+
15
+ class SystemHandler(web.RequestHandler):
16
+ def initialize(self, system):
17
+ self.system = system
18
+
19
+ def get(self):
20
+ self.write(json.dumps({"info": str(self.system)}))
21
+
22
+
23
+ class ResetHandle(SystemHandler):
24
+ def post(self):
25
+ self.system.reset()
26
+
27
+
28
+ class OutputHandler(SystemHandler):
29
+ def get(self):
30
+ output_segment = self.system.pop()
31
+ self.write(output_segment.json())
32
+
33
+
34
+ class InputHandler(SystemHandler):
35
+ def put(self):
36
+ segment = segment_from_json_string(self.request.body)
37
+ self.system.push(segment)
38
+
39
+
40
+ def start_agent_service(system):
41
+ parser = options.general_parser()
42
+ options.add_evaluator_args(parser)
43
+ args, _ = parser.parse_known_args()
44
+ app = web.Application(
45
+ [
46
+ (r"/reset", ResetHandle, {"system": system}),
47
+ (r"/input", InputHandler, {"system": system}),
48
+ (r"/output", OutputHandler, {"system": system}),
49
+ (r"/", SystemHandler, {"system": system}),
50
+ ],
51
+ debug=False,
52
+ )
53
+
54
+ app.listen(args.remote_port, max_buffer_size=1024**3)
55
+
56
+ logger.info(
57
+ f"Simultaneous Translation Server Started (process id {os.getpid()}). Listening to port {args.remote_port} "
58
+ )
59
+ ioloop.IOLoop.current().start()