web-crawl-v1 / audio_crawler.py
OpenTransformer's picture
Upload audio_crawler.py with huggingface_hub
5b32eaa verified
#!/usr/bin/env python3
"""
OpenTransformers Audio Crawler v1.0
Grabby hands for audio data - speech, music, sound effects.
Grabs audio files + metadata (title, description, duration, tags)
Perfect for ASR, TTS, music generation training.
Usage:
python audio_crawler.py --seeds audio_seeds.txt --output /workspace/audio --max-gb 10
python audio_crawler.py --seed "https://freesound.org" --min-duration 5 --max-duration 300
"""
import asyncio
import aiohttp
import argparse
import hashlib
import json
import os
import re
import time
import random
import struct
from dataclasses import dataclass, asdict
from datetime import datetime
from pathlib import Path
from typing import Optional, Set, Dict, List, Tuple
from urllib.parse import urljoin, urlparse
from collections import defaultdict
# ============= AUDIO UTILITIES =============
AUDIO_EXTENSIONS = {'.mp3', '.wav', '.ogg', '.flac', '.m4a', '.aac', '.opus', '.wma'}
AUDIO_MIMES = {'audio/mpeg', 'audio/wav', 'audio/ogg', 'audio/flac', 'audio/mp4', 'audio/aac', 'audio/opus', 'audio/x-wav'}
def get_audio_info_from_header(data: bytes) -> Dict:
"""Extract audio info from file header"""
info = {'format': 'unknown', 'duration': 0, 'sample_rate': 0, 'channels': 0}
if len(data) < 12:
return info
# MP3 (ID3 or sync word)
if data[:3] == b'ID3' or (data[0] == 0xff and (data[1] & 0xe0) == 0xe0):
info['format'] = 'mp3'
# Duration estimation from file size (rough: ~128kbps)
# Real duration needs full parse
# WAV/RIFF
elif data[:4] == b'RIFF' and data[8:12] == b'WAVE':
info['format'] = 'wav'
# Parse fmt chunk
i = 12
while i < len(data) - 8:
chunk_id = data[i:i+4]
chunk_size = struct.unpack('<I', data[i+4:i+8])[0]
if chunk_id == b'fmt ':
if chunk_size >= 16:
info['channels'] = struct.unpack('<H', data[i+10:i+12])[0]
info['sample_rate'] = struct.unpack('<I', data[i+12:i+16])[0]
break
i += 8 + chunk_size
# OGG
elif data[:4] == b'OggS':
info['format'] = 'ogg'
# FLAC
elif data[:4] == b'fLaC':
info['format'] = 'flac'
if len(data) >= 22:
# STREAMINFO block
info['sample_rate'] = (data[18] << 12) | (data[19] << 4) | (data[20] >> 4)
info['channels'] = ((data[20] >> 1) & 0x7) + 1
# M4A/AAC (ftyp)
elif data[4:8] in (b'ftyp', b'M4A ', b'mp41', b'mp42', b'isom'):
info['format'] = 'm4a'
return info
def extract_audio_links(html: str, base_url: str) -> List[Dict]:
"""Extract audio links with context from HTML"""
audio_links = []
# Direct audio file links
for match in re.finditer(r'href=["\']([^"\']+)["\']', html, re.I):
href = match.group(1)
lower = href.lower()
if any(lower.endswith(ext) for ext in AUDIO_EXTENSIONS):
try:
absolute = urljoin(base_url, href)
if absolute.startswith(('http://', 'https://')):
# Get surrounding text
start = max(0, match.start() - 150)
end = min(len(html), match.end() + 150)
context = re.sub(r'<[^>]+>', ' ', html[start:end])
context = re.sub(r'\s+', ' ', context).strip()
audio_links.append({
'url': absolute,
'context': context[:300],
'title': '',
'description': ''
})
except:
pass
# HTML5 audio tags
for match in re.finditer(r'<audio[^>]*>.*?</audio>', html, re.I | re.DOTALL):
audio_tag = match.group(0)
# Find source
src_match = re.search(r'src=["\']([^"\']+)["\']', audio_tag, re.I)
if not src_match:
src_match = re.search(r'<source[^>]+src=["\']([^"\']+)["\']', audio_tag, re.I)
if src_match:
src = src_match.group(1)
try:
absolute = urljoin(base_url, src)
if absolute.startswith(('http://', 'https://')):
audio_links.append({
'url': absolute,
'context': '',
'title': '',
'description': ''
})
except:
pass
# Podcast RSS/feed links (grab for later)
for match in re.finditer(r'<enclosure[^>]+url=["\']([^"\']+)["\']', html, re.I):
url = match.group(1)
if any(url.lower().endswith(ext) for ext in AUDIO_EXTENSIONS):
audio_links.append({
'url': url,
'context': 'podcast',
'title': '',
'description': ''
})
return audio_links
def extract_page_links(html: str, base_url: str) -> Set[str]:
"""Extract page links for crawling"""
links = set()
for match in re.finditer(r'href=["\']([^"\']+)["\']', html, re.I):
href = match.group(1)
if href.startswith(('#', 'javascript:', 'mailto:', 'tel:', 'data:')):
continue
lower = href.lower()
# Skip direct media files
if any(lower.endswith(ext) for ext in AUDIO_EXTENSIONS | {'.mp4', '.avi', '.mov', '.jpg', '.png', '.gif'}):
continue
try:
absolute = urljoin(base_url, href)
if absolute.startswith(('http://', 'https://')):
absolute = absolute.split('#')[0]
if len(absolute) < 500:
links.add(absolute)
except:
pass
return links
def get_domain(url: str) -> str:
try:
return urlparse(url).netloc.lower()
except:
return ""
# ============= CRAWLER =============
class AudioCrawler:
USER_AGENTS = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/120.0.0.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 Chrome/120.0.0.0 Safari/537.36',
]
def __init__(
self,
output_dir: str = 'audio_data',
max_workers: int = 30,
max_pages: int = 50000,
max_bytes: int = 10 * 1024**3,
timeout: int = 60,
min_bytes: int = 50000, # 50KB minimum
max_bytes_per_file: int = 100 * 1024**2, # 100MB max per file
min_duration: int = 5, # seconds (if detectable)
max_duration: int = 600, # 10 min max
):
self.output_dir = Path(output_dir)
self.output_dir.mkdir(parents=True, exist_ok=True)
(self.output_dir / 'audio').mkdir(exist_ok=True)
self.max_workers = max_workers
self.max_pages = max_pages
self.max_bytes = max_bytes
self.timeout = timeout
self.min_bytes = min_bytes
self.max_bytes_per_file = max_bytes_per_file
self.min_duration = min_duration
self.max_duration = max_duration
self.page_queue: asyncio.Queue = asyncio.Queue()
self.seen_pages: Set[str] = set()
self.seen_audio: Set[str] = set()
self.seen_hashes: Set[str] = set()
self.running = True
self.stats = {'pages': 0, 'audio': 0, 'bytes': 0, 'skipped': 0, 'errors': 0}
self.start_time = time.time()
self.output_file = None
self.output_lock = asyncio.Lock()
def _report(self) -> str:
elapsed = time.time() - self.start_time
rate = self.stats['audio'] / elapsed if elapsed > 0 else 0
mb = self.stats['bytes'] / (1024**2)
return f"Pages: {self.stats['pages']} | Audio: {self.stats['audio']} | {mb:.1f} MB | {rate:.2f}/s | Skip: {self.stats['skipped']} | Err: {self.stats['errors']}"
async def _fetch_page(self, session: aiohttp.ClientSession, url: str) -> Tuple[Optional[str], Set[str]]:
try:
headers = {'User-Agent': random.choice(self.USER_AGENTS)}
async with session.get(url, headers=headers, timeout=self.timeout, ssl=False, allow_redirects=True) as resp:
ct = resp.headers.get('content-type', '').lower()
if 'text/html' not in ct and 'text/xml' not in ct and 'application/rss' not in ct:
return None, set()
html = await resp.text(errors='ignore')
links = extract_page_links(html, url)
return html, links
except:
return None, set()
async def _fetch_audio(self, session: aiohttp.ClientSession, audio_info: Dict, source_page: str) -> Optional[Dict]:
url = audio_info['url']
if url in self.seen_audio:
return None
self.seen_audio.add(url)
try:
headers = {'User-Agent': random.choice(self.USER_AGENTS)}
async with session.get(url, headers=headers, timeout=self.timeout, ssl=False, allow_redirects=True) as resp:
# Check content-length
cl = resp.headers.get('content-length')
if cl:
size = int(cl)
if size < self.min_bytes or size > self.max_bytes_per_file:
self.stats['skipped'] += 1
return None
# Stream download
data = await resp.read()
if len(data) < self.min_bytes:
self.stats['skipped'] += 1
return None
# Dedupe
audio_hash = hashlib.md5(data).hexdigest()
if audio_hash in self.seen_hashes:
self.stats['skipped'] += 1
return None
self.seen_hashes.add(audio_hash)
# Get audio info
info = get_audio_info_from_header(data)
# Determine extension
ext = 'bin'
for e in AUDIO_EXTENSIONS:
if url.lower().endswith(e):
ext = e[1:]
break
if ext == 'bin':
ext = {'mp3': 'mp3', 'wav': 'wav', 'ogg': 'ogg', 'flac': 'flac', 'm4a': 'm4a'}.get(info['format'], 'mp3')
# Save file
fname = f"{audio_hash}.{ext}"
fpath = self.output_dir / 'audio' / fname
with open(fpath, 'wb') as f:
f.write(data)
result = {
'url': url,
'source_page': source_page,
'domain': get_domain(url),
'timestamp': datetime.utcnow().isoformat(),
'context': audio_info.get('context', ''),
'title': audio_info.get('title', ''),
'description': audio_info.get('description', ''),
'format': info['format'],
'sample_rate': info['sample_rate'],
'channels': info['channels'],
'size_bytes': len(data),
'hash': audio_hash,
'file': fname
}
return result
except Exception as e:
self.stats['errors'] += 1
return None
async def _worker(self, session: aiohttp.ClientSession, worker_id: int):
while self.running:
try:
page_url = await asyncio.wait_for(self.page_queue.get(), timeout=10.0)
except asyncio.TimeoutError:
if self.page_queue.empty():
break
continue
if self.stats['bytes'] >= self.max_bytes or self.stats['pages'] >= self.max_pages:
self.running = False
break
html, links = await self._fetch_page(session, page_url)
if html:
self.stats['pages'] += 1
# Extract and fetch audio
audio_links = extract_audio_links(html, page_url)
for audio_info in audio_links:
if not self.running:
break
result = await self._fetch_audio(session, audio_info, page_url)
if result:
self.stats['audio'] += 1
self.stats['bytes'] += result['size_bytes']
async with self.output_lock:
self.output_file.write(json.dumps(result) + '\n')
# Queue new pages
for link in links:
if link not in self.seen_pages and self.stats['pages'] < self.max_pages:
self.seen_pages.add(link)
await self.page_queue.put(link)
if self.stats['pages'] % 50 == 0:
print(f"[{datetime.now().strftime('%H:%M:%S')}] {self._report()}")
self.page_queue.task_done()
async def crawl(self, seeds: List[str]):
print(f"OpenTransformers Audio Crawler starting")
print(f"Seeds: {len(seeds)} | Workers: {self.max_workers}")
print(f"Target: {self.max_pages} pages / {self.max_bytes/1024**3:.1f} GB")
print("-" * 60)
for seed in seeds:
self.seen_pages.add(seed)
await self.page_queue.put(seed)
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
output_path = self.output_dir / f'audio_{timestamp}.jsonl'
self.output_file = open(output_path, 'w', buffering=1)
connector = aiohttp.TCPConnector(limit=self.max_workers, ssl=False)
async with aiohttp.ClientSession(connector=connector) as session:
workers = [asyncio.create_task(self._worker(session, i)) for i in range(self.max_workers)]
await asyncio.gather(*workers)
self.output_file.close()
print("-" * 60)
print(f"DONE! {self._report()}")
print(f"Metadata: {output_path}")
audio_dir = self.output_dir / 'audio'
files = list(audio_dir.glob('*'))
print(f"Audio files: {len(files)} in {audio_dir}")
def main():
p = argparse.ArgumentParser(description='OpenTransformers Audio Crawler')
p.add_argument('--seeds', type=str, help='File with seed URLs')
p.add_argument('--seed', type=str, action='append', help='Single seed URL')
p.add_argument('--output', type=str, default='audio_data')
p.add_argument('--workers', type=int, default=30)
p.add_argument('--max-pages', type=int, default=50000)
p.add_argument('--max-gb', type=float, default=10.0)
p.add_argument('--timeout', type=int, default=60)
p.add_argument('--min-kb', type=int, default=50)
p.add_argument('--max-mb', type=int, default=100)
args = p.parse_args()
seeds = []
if args.seeds:
with open(args.seeds) as f:
seeds.extend(line.strip() for line in f if line.strip())
if args.seed:
seeds.extend(args.seed)
if not seeds:
print("ERROR: Provide --seeds file or --seed URLs")
return
crawler = AudioCrawler(
output_dir=args.output,
max_workers=args.workers,
max_pages=args.max_pages,
max_bytes=int(args.max_gb * 1024**3),
timeout=args.timeout,
min_bytes=args.min_kb * 1024,
max_bytes_per_file=args.max_mb * 1024**2,
)
asyncio.run(crawler.crawl(seeds))
if __name__ == '__main__':
main()