polinaeterna commited on
Commit ·
f1cc9cf
1
Parent(s): 4395e8c
add scripts
Browse files- anton_old_script.py +54 -0
- generate_metadata_file.py +140 -0
anton_old_script.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import torchaudio
|
| 3 |
+
from glob import glob
|
| 4 |
+
import os
|
| 5 |
+
import csv
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import pandas
|
| 8 |
+
|
| 9 |
+
sentence_bounds = {'!', '.', ';', '?', '…'}
|
| 10 |
+
|
| 11 |
+
files = list(sorted(glob("earnings22/media/*.mp3")))
|
| 12 |
+
metadata = []
|
| 13 |
+
for audio_file in tqdm(files):
|
| 14 |
+
file_id = audio_file.split("/")[-1].split(".")[0]
|
| 15 |
+
nlp_file = f"earnings22/aligned/{file_id}.nlp"
|
| 16 |
+
|
| 17 |
+
speech, sr = torchaudio.load(audio_file)
|
| 18 |
+
with open(nlp_file, "r") as nlp:
|
| 19 |
+
start, end = None, None
|
| 20 |
+
sentence = ""
|
| 21 |
+
segment_id = 0
|
| 22 |
+
csvreader = csv.DictReader(nlp, delimiter="|")
|
| 23 |
+
for row in csvreader:
|
| 24 |
+
punct = row["punctuation"].strip()
|
| 25 |
+
sentence += row["token"]
|
| 26 |
+
if punct:
|
| 27 |
+
sentence += punct
|
| 28 |
+
sentence += " "
|
| 29 |
+
if start is None and row["ts"].strip():
|
| 30 |
+
start = float(row["ts"]) - 0.1
|
| 31 |
+
if row["endTs"].strip():
|
| 32 |
+
end = float(row["endTs"]) + 0.1
|
| 33 |
+
if punct in sentence_bounds and start is not None and end is not None:
|
| 34 |
+
segment = speech[:, int(start*sr):int(end*sr)+1].contiguous()
|
| 35 |
+
|
| 36 |
+
os.makedirs(f"earnings22/segmented/{file_id}", exist_ok=True)
|
| 37 |
+
torchaudio.save(f"earnings22/segmented/{file_id}/{segment_id}.wav",
|
| 38 |
+
segment, sr, encoding="PCM_S", bits_per_sample=16)
|
| 39 |
+
|
| 40 |
+
data_row = {
|
| 41 |
+
"source_id": f"{file_id}",
|
| 42 |
+
"segment_id": segment_id,
|
| 43 |
+
"file": f"{file_id}/{segment_id}.wav",
|
| 44 |
+
"start_ts": start,
|
| 45 |
+
"end_ts": end,
|
| 46 |
+
"sentence": sentence.strip()
|
| 47 |
+
}
|
| 48 |
+
metadata.append(data_row)
|
| 49 |
+
|
| 50 |
+
start, end = None, None
|
| 51 |
+
sentence = ""
|
| 52 |
+
segment_id += 1
|
| 53 |
+
pd.DataFrame(metadata).to_csv("earnings22/segmented/metadata.csv", index=False)
|
| 54 |
+
|
generate_metadata_file.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import csv
|
| 2 |
+
import argparse
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
SEPARATORS = {";", "!", ".", "?", "…"} #, '-', '–', ':'}
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def get_args():
|
| 13 |
+
parser = argparse.ArgumentParser("Prepare sharded audio archives")
|
| 14 |
+
parser.add_argument(
|
| 15 |
+
"--source_dir",
|
| 16 |
+
help="directory with original word-level transcriptions files",
|
| 17 |
+
type=str,
|
| 18 |
+
required=True,
|
| 19 |
+
)
|
| 20 |
+
parser.add_argument(
|
| 21 |
+
"--output_filename",
|
| 22 |
+
required=True,
|
| 23 |
+
type=str,
|
| 24 |
+
)
|
| 25 |
+
parser.add_argument(
|
| 26 |
+
"--split_on_speaker",
|
| 27 |
+
action='store_true',
|
| 28 |
+
)
|
| 29 |
+
return parser.parse_args()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def generate_metadata_for_file(file, writer, split_on_speaker, n_filtered_sentences):
|
| 33 |
+
audio_id = file.name.split(".")[0]
|
| 34 |
+
with open(file) as csvfile:
|
| 35 |
+
tokens = list(csv.DictReader(csvfile, delimiter="|"))
|
| 36 |
+
# start_time, end_time = None, None
|
| 37 |
+
sentence_tokens = []
|
| 38 |
+
sentence_text = ""
|
| 39 |
+
segment_id = 0
|
| 40 |
+
previous_speaker = tokens[0]["speaker"]
|
| 41 |
+
for token in tokens:
|
| 42 |
+
punct = token["punctuation"].strip()
|
| 43 |
+
speaker = token["speaker"]
|
| 44 |
+
sentence_tokens.append(token)
|
| 45 |
+
if split_on_speaker and speaker != previous_speaker and len(sentence_tokens) > 1:
|
| 46 |
+
start_time, end_time = sentence_tokens[0]["ts"].strip(), sentence_tokens[-2]["endTs"].strip()
|
| 47 |
+
if start_time == "":
|
| 48 |
+
sentence_tokens = []
|
| 49 |
+
sentence_text = ""
|
| 50 |
+
previous_speaker = speaker
|
| 51 |
+
n_filtered_sentences += 1
|
| 52 |
+
continue
|
| 53 |
+
if end_time == "":
|
| 54 |
+
sentence_tokens = []
|
| 55 |
+
sentence_text = ""
|
| 56 |
+
previous_speaker = speaker
|
| 57 |
+
n_filtered_sentences += 1
|
| 58 |
+
continue
|
| 59 |
+
writer.writerow({
|
| 60 |
+
"audio_id": audio_id,
|
| 61 |
+
"segment_id": segment_id,
|
| 62 |
+
"start_time": start_time,
|
| 63 |
+
"end_time": end_time,
|
| 64 |
+
"speaker": previous_speaker,
|
| 65 |
+
"text": sentence_text.strip(),
|
| 66 |
+
})
|
| 67 |
+
sentence_tokens = [sentence_tokens[-1]]
|
| 68 |
+
sentence_text = sentence_tokens[-1]["token"]
|
| 69 |
+
segment_id += 1
|
| 70 |
+
else:
|
| 71 |
+
sentence_text += token["token"]
|
| 72 |
+
if punct:
|
| 73 |
+
sentence_text += punct
|
| 74 |
+
sentence_text += " "
|
| 75 |
+
|
| 76 |
+
if punct in SEPARATORS:
|
| 77 |
+
start_time, end_time = sentence_tokens[0]["ts"].strip(), sentence_tokens[-1]["endTs"].strip()
|
| 78 |
+
if start_time == "":
|
| 79 |
+
sentence_tokens = []
|
| 80 |
+
sentence_text = ""
|
| 81 |
+
previous_speaker = speaker
|
| 82 |
+
n_filtered_sentences += 1
|
| 83 |
+
continue
|
| 84 |
+
if end_time == "":
|
| 85 |
+
sentence_tokens = []
|
| 86 |
+
sentence_text = ""
|
| 87 |
+
previous_speaker = speaker
|
| 88 |
+
n_filtered_sentences += 1
|
| 89 |
+
continue
|
| 90 |
+
if len(list(set([t["speaker"] for t in sentence_tokens]))) > 1:
|
| 91 |
+
if split_on_speaker:
|
| 92 |
+
print("multiple speakers! they should have been filtered on the prev step, maybe something went wrong")
|
| 93 |
+
print(audio_id, sentence_tokens)
|
| 94 |
+
else:
|
| 95 |
+
print("multiple speakers, filtering them")
|
| 96 |
+
print(audio_id, sentence_tokens)
|
| 97 |
+
sentence_tokens = []
|
| 98 |
+
sentence_text = ""
|
| 99 |
+
previous_speaker = speaker
|
| 100 |
+
n_filtered_sentences += 1
|
| 101 |
+
continue
|
| 102 |
+
|
| 103 |
+
writer.writerow({
|
| 104 |
+
"audio_id": audio_id,
|
| 105 |
+
"segment_id": segment_id,
|
| 106 |
+
"start_time": start_time,
|
| 107 |
+
"end_time": end_time,
|
| 108 |
+
"speaker": speaker,
|
| 109 |
+
"text": sentence_text.strip(),
|
| 110 |
+
})
|
| 111 |
+
sentence_tokens = []
|
| 112 |
+
sentence_text = ""
|
| 113 |
+
segment_id += 1
|
| 114 |
+
|
| 115 |
+
previous_speaker = speaker
|
| 116 |
+
|
| 117 |
+
return n_filtered_sentences
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def generate_metadata(source_dir, output_filename, split_on_speaker):
|
| 121 |
+
transcription_files = sorted(list(Path(source_dir).glob("**/*.nlp")))
|
| 122 |
+
with open(output_filename, "w") as csv_writer:
|
| 123 |
+
writer = csv.DictWriter(csv_writer, fieldnames=["audio_id", "segment_id", "start_time", "end_time", "speaker", "text"])
|
| 124 |
+
writer.writeheader()
|
| 125 |
+
n_filtered_sentences = 0
|
| 126 |
+
for transcription_file in transcription_files:
|
| 127 |
+
n_filtered_sentences = generate_metadata_for_file(transcription_file, writer, split_on_speaker, n_filtered_sentences)
|
| 128 |
+
print("sentences filtered: ", n_filtered_sentences)
|
| 129 |
+
|
| 130 |
+
def main():
|
| 131 |
+
args = get_args()
|
| 132 |
+
generate_metadata(
|
| 133 |
+
source_dir=args.source_dir,
|
| 134 |
+
output_filename=args.output_filename,
|
| 135 |
+
split_on_speaker=args.split_on_speaker
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
if __name__ == "__main__":
|
| 140 |
+
main()
|