si-eval-50 / filter_and_upload.py
tsw0411's picture
Upload filter_and_upload.py
0ddb42e verified
import json
from datasets import load_dataset, Dataset
def check_no_overlap_and_min_duration(utterances, min_duration=1.2):
"""
Check that:
1. No overlap between consecutive utterances
2. Every utterance is at least min_duration seconds long
"""
# Empty utterance list should not be considered valid.
if not utterances:
return False
prev_end = -float("inf")
for utt in utterances:
words = utt["words"]
if not words:
return False
utt_start = words[0]["start_time"]
utt_end = words[-1]["end_time"]
# Check no overlap with previous utterance
if utt_start < prev_end:
return False
# Check minimum duration
if utt_end - utt_start < min_duration:
return False
prev_end = utt_end
return True
def main():
ds1 = load_dataset("humanify/si", name="naturalistic", split="test", streaming=True)
ds2 = load_dataset("humanify/si", name="improvised", split="test", streaming=True)
# merge
from itertools import chain
ds = chain(ds1, ds2)
selected = []
for sample in ds:
utterances = json.loads(sample["utterances_json"])
if check_no_overlap_and_min_duration(utterances):
selected.append(sample)
print(f"[{len(selected)}/100] Selected: {sample['conversation_id']}")
if len(selected) >= 50:
break
print(f"\nTotal selected: {len(selected)}")
if len(selected) < 50:
print("WARNING: Not enough samples meeting criteria!")
rows = {k: [] for k in selected[0].keys()}
for s in selected:
for k, v in s.items():
rows[k].append(v)
eval_ds = Dataset.from_dict(rows)
print(len(eval_ds))
if len(eval_ds) == 50:
print(f"\nDataset info: {eval_ds}")
print("Pushing to hub: humanify/si-eval-50 ...")
eval_ds.push_to_hub("humanify/si-eval-50", split="test")
print("Done!")
else:
print(len(eval_ds))
if __name__ == "__main__":
main()