File size: 4,299 Bytes
6158f1c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
#!/usr/bin/env python3
"""
Simple annotation setup for Voice Notes dataset
Creates task list from audio files and AI transcripts
"""
import json
import os
from pathlib import Path
def create_task_list():
"""Create annotation task list"""
# Create annotations directory
os.makedirs("annotations", exist_ok=True)
# Find all audio files
audio_files = list(Path("audio").glob("*.mp3"))
audio_files.extend(list(Path("audio").glob("*.wav")))
tasks = []
dataset_metadata = []
for audio_file in sorted(audio_files):
file_id = audio_file.stem
transcript_file = Path("aitranscripts") / f"{file_id}.txt"
# Read AI transcript
ai_transcript = ""
if transcript_file.exists():
ai_transcript = transcript_file.read_text().strip()
task = {
"id": file_id,
"audio_path": str(audio_file),
"ai_transcript": ai_transcript,
"corrected_transcript": "",
"parameters": {
"speaker_info": "",
"audio_quality": "",
"environment": "",
"corrections_needed": []
},
"status": "pending"
}
tasks.append(task)
# Also create dataset metadata with all fields
metadata_entry = {
"id": file_id,
"audio": str(audio_file),
"ai_transcript": ai_transcript,
"corrected_transcript": "",
"audio_challenges": [],
"non_speaker_content": "",
"conversation_languages": [],
"recording_place": "",
"microphone_type": "",
"recording_environment": "",
"audio_quality": 0,
"content_type": []
}
dataset_metadata.append(metadata_entry)
# Save task list
with open("annotations/task_list.json", "w") as f:
json.dump(tasks, f, indent=2)
# Save dataset metadata
with open("dataset_metadata.json", "w") as f:
json.dump(dataset_metadata, f, indent=2)
print(f"Created {len(tasks)} annotation tasks")
for task in tasks:
print(f"- {task['id']}: {task['audio_path']}")
return len(tasks)
def prepare_for_hf():
"""Prepare completed annotations for HF dataset"""
try:
from datasets import Dataset, Audio
with open("annotations/task_list.json") as f:
tasks = json.load(f)
# Get completed tasks
completed = [t for t in tasks if t["status"] == "completed"]
if not completed:
print("No completed annotations found")
return None
# Format for HF
hf_data = []
for task in completed:
hf_data.append({
"audio": task["audio_path"],
"ai_transcript": task["ai_transcript"],
"corrected_transcript": task["corrected_transcript"],
"audio_challenges": task.get("audio_challenges", []),
"non_speaker_content": task.get("non_speaker_content", ""),
"conversation_languages": task.get("conversation_languages", []),
"recording_place": task.get("recording_place", ""),
"microphone_type": task.get("microphone_type", ""),
"recording_environment": task.get("recording_environment", ""),
"audio_quality": task.get("audio_quality", 0),
"content_type": task.get("content_type", [])
})
dataset = Dataset.from_list(hf_data)
dataset = dataset.cast_column("audio", Audio())
# Save dataset
dataset.save_to_disk("annotations/hf_dataset")
print(f"HF dataset saved with {len(completed)} completed annotations")
return dataset
except ImportError:
print("Install datasets: pip install datasets")
return None
if __name__ == "__main__":
create_task_list()
print("\nNext steps:")
print("1. Edit annotations/task_list.json")
print("2. Add corrected transcripts and parameters")
print("3. Set status to 'completed' when done")
print("4. Run prepare_for_hf() to create HF dataset") |