|
|
|
|
|
""" |
|
|
Simple annotation setup for Voice Notes dataset |
|
|
Creates task list from audio files and AI transcripts |
|
|
""" |
|
|
|
|
|
import json |
|
|
import os |
|
|
from pathlib import Path |
|
|
|
|
|
def create_task_list(): |
|
|
"""Create annotation task list""" |
|
|
|
|
|
|
|
|
os.makedirs("annotations", exist_ok=True) |
|
|
|
|
|
|
|
|
audio_files = list(Path("audio").glob("*.mp3")) |
|
|
audio_files.extend(list(Path("audio").glob("*.wav"))) |
|
|
|
|
|
tasks = [] |
|
|
dataset_metadata = [] |
|
|
|
|
|
for audio_file in sorted(audio_files): |
|
|
file_id = audio_file.stem |
|
|
transcript_file = Path("aitranscripts") / f"{file_id}.txt" |
|
|
|
|
|
|
|
|
ai_transcript = "" |
|
|
if transcript_file.exists(): |
|
|
ai_transcript = transcript_file.read_text().strip() |
|
|
|
|
|
task = { |
|
|
"id": file_id, |
|
|
"audio_path": str(audio_file), |
|
|
"ai_transcript": ai_transcript, |
|
|
"corrected_transcript": "", |
|
|
"parameters": { |
|
|
"speaker_info": "", |
|
|
"audio_quality": "", |
|
|
"environment": "", |
|
|
"corrections_needed": [] |
|
|
}, |
|
|
"status": "pending" |
|
|
} |
|
|
tasks.append(task) |
|
|
|
|
|
|
|
|
metadata_entry = { |
|
|
"id": file_id, |
|
|
"audio": str(audio_file), |
|
|
"ai_transcript": ai_transcript, |
|
|
"corrected_transcript": "", |
|
|
"audio_challenges": [], |
|
|
"non_speaker_content": "", |
|
|
"conversation_languages": [], |
|
|
"recording_place": "", |
|
|
"microphone_type": "", |
|
|
"recording_environment": "", |
|
|
"audio_quality": 0, |
|
|
"content_type": [] |
|
|
} |
|
|
dataset_metadata.append(metadata_entry) |
|
|
|
|
|
|
|
|
with open("annotations/task_list.json", "w") as f: |
|
|
json.dump(tasks, f, indent=2) |
|
|
|
|
|
|
|
|
with open("dataset_metadata.json", "w") as f: |
|
|
json.dump(dataset_metadata, f, indent=2) |
|
|
|
|
|
print(f"Created {len(tasks)} annotation tasks") |
|
|
for task in tasks: |
|
|
print(f"- {task['id']}: {task['audio_path']}") |
|
|
|
|
|
return len(tasks) |
|
|
|
|
|
def prepare_for_hf(): |
|
|
"""Prepare completed annotations for HF dataset""" |
|
|
try: |
|
|
from datasets import Dataset, Audio |
|
|
|
|
|
with open("annotations/task_list.json") as f: |
|
|
tasks = json.load(f) |
|
|
|
|
|
|
|
|
completed = [t for t in tasks if t["status"] == "completed"] |
|
|
|
|
|
if not completed: |
|
|
print("No completed annotations found") |
|
|
return None |
|
|
|
|
|
|
|
|
hf_data = [] |
|
|
for task in completed: |
|
|
hf_data.append({ |
|
|
"audio": task["audio_path"], |
|
|
"ai_transcript": task["ai_transcript"], |
|
|
"corrected_transcript": task["corrected_transcript"], |
|
|
"audio_challenges": task.get("audio_challenges", []), |
|
|
"non_speaker_content": task.get("non_speaker_content", ""), |
|
|
"conversation_languages": task.get("conversation_languages", []), |
|
|
"recording_place": task.get("recording_place", ""), |
|
|
"microphone_type": task.get("microphone_type", ""), |
|
|
"recording_environment": task.get("recording_environment", ""), |
|
|
"audio_quality": task.get("audio_quality", 0), |
|
|
"content_type": task.get("content_type", []) |
|
|
}) |
|
|
|
|
|
dataset = Dataset.from_list(hf_data) |
|
|
dataset = dataset.cast_column("audio", Audio()) |
|
|
|
|
|
|
|
|
dataset.save_to_disk("annotations/hf_dataset") |
|
|
print(f"HF dataset saved with {len(completed)} completed annotations") |
|
|
|
|
|
return dataset |
|
|
|
|
|
except ImportError: |
|
|
print("Install datasets: pip install datasets") |
|
|
return None |
|
|
|
|
|
if __name__ == "__main__": |
|
|
create_task_list() |
|
|
print("\nNext steps:") |
|
|
print("1. Edit annotations/task_list.json") |
|
|
print("2. Add corrected transcripts and parameters") |
|
|
print("3. Set status to 'completed' when done") |
|
|
print("4. Run prepare_for_hf() to create HF dataset") |