OpenTransformer commited on
Commit
5b32eaa
·
verified ·
1 Parent(s): c8d8679

Upload audio_crawler.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. audio_crawler.py +420 -0
audio_crawler.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ OpenTransformers Audio Crawler v1.0
4
+ Grabby hands for audio data - speech, music, sound effects.
5
+
6
+ Grabs audio files + metadata (title, description, duration, tags)
7
+ Perfect for ASR, TTS, music generation training.
8
+
9
+ Usage:
10
+ python audio_crawler.py --seeds audio_seeds.txt --output /workspace/audio --max-gb 10
11
+ python audio_crawler.py --seed "https://freesound.org" --min-duration 5 --max-duration 300
12
+ """
13
+
14
+ import asyncio
15
+ import aiohttp
16
+ import argparse
17
+ import hashlib
18
+ import json
19
+ import os
20
+ import re
21
+ import time
22
+ import random
23
+ import struct
24
+ from dataclasses import dataclass, asdict
25
+ from datetime import datetime
26
+ from pathlib import Path
27
+ from typing import Optional, Set, Dict, List, Tuple
28
+ from urllib.parse import urljoin, urlparse
29
+ from collections import defaultdict
30
+
31
+ # ============= AUDIO UTILITIES =============
32
+
33
+ AUDIO_EXTENSIONS = {'.mp3', '.wav', '.ogg', '.flac', '.m4a', '.aac', '.opus', '.wma'}
34
+ AUDIO_MIMES = {'audio/mpeg', 'audio/wav', 'audio/ogg', 'audio/flac', 'audio/mp4', 'audio/aac', 'audio/opus', 'audio/x-wav'}
35
+
36
+ def get_audio_info_from_header(data: bytes) -> Dict:
37
+ """Extract audio info from file header"""
38
+ info = {'format': 'unknown', 'duration': 0, 'sample_rate': 0, 'channels': 0}
39
+
40
+ if len(data) < 12:
41
+ return info
42
+
43
+ # MP3 (ID3 or sync word)
44
+ if data[:3] == b'ID3' or (data[0] == 0xff and (data[1] & 0xe0) == 0xe0):
45
+ info['format'] = 'mp3'
46
+ # Duration estimation from file size (rough: ~128kbps)
47
+ # Real duration needs full parse
48
+
49
+ # WAV/RIFF
50
+ elif data[:4] == b'RIFF' and data[8:12] == b'WAVE':
51
+ info['format'] = 'wav'
52
+ # Parse fmt chunk
53
+ i = 12
54
+ while i < len(data) - 8:
55
+ chunk_id = data[i:i+4]
56
+ chunk_size = struct.unpack('<I', data[i+4:i+8])[0]
57
+ if chunk_id == b'fmt ':
58
+ if chunk_size >= 16:
59
+ info['channels'] = struct.unpack('<H', data[i+10:i+12])[0]
60
+ info['sample_rate'] = struct.unpack('<I', data[i+12:i+16])[0]
61
+ break
62
+ i += 8 + chunk_size
63
+
64
+ # OGG
65
+ elif data[:4] == b'OggS':
66
+ info['format'] = 'ogg'
67
+
68
+ # FLAC
69
+ elif data[:4] == b'fLaC':
70
+ info['format'] = 'flac'
71
+ if len(data) >= 22:
72
+ # STREAMINFO block
73
+ info['sample_rate'] = (data[18] << 12) | (data[19] << 4) | (data[20] >> 4)
74
+ info['channels'] = ((data[20] >> 1) & 0x7) + 1
75
+
76
+ # M4A/AAC (ftyp)
77
+ elif data[4:8] in (b'ftyp', b'M4A ', b'mp41', b'mp42', b'isom'):
78
+ info['format'] = 'm4a'
79
+
80
+ return info
81
+
82
+
83
+ def extract_audio_links(html: str, base_url: str) -> List[Dict]:
84
+ """Extract audio links with context from HTML"""
85
+ audio_links = []
86
+
87
+ # Direct audio file links
88
+ for match in re.finditer(r'href=["\']([^"\']+)["\']', html, re.I):
89
+ href = match.group(1)
90
+ lower = href.lower()
91
+ if any(lower.endswith(ext) for ext in AUDIO_EXTENSIONS):
92
+ try:
93
+ absolute = urljoin(base_url, href)
94
+ if absolute.startswith(('http://', 'https://')):
95
+ # Get surrounding text
96
+ start = max(0, match.start() - 150)
97
+ end = min(len(html), match.end() + 150)
98
+ context = re.sub(r'<[^>]+>', ' ', html[start:end])
99
+ context = re.sub(r'\s+', ' ', context).strip()
100
+ audio_links.append({
101
+ 'url': absolute,
102
+ 'context': context[:300],
103
+ 'title': '',
104
+ 'description': ''
105
+ })
106
+ except:
107
+ pass
108
+
109
+ # HTML5 audio tags
110
+ for match in re.finditer(r'<audio[^>]*>.*?</audio>', html, re.I | re.DOTALL):
111
+ audio_tag = match.group(0)
112
+ # Find source
113
+ src_match = re.search(r'src=["\']([^"\']+)["\']', audio_tag, re.I)
114
+ if not src_match:
115
+ src_match = re.search(r'<source[^>]+src=["\']([^"\']+)["\']', audio_tag, re.I)
116
+ if src_match:
117
+ src = src_match.group(1)
118
+ try:
119
+ absolute = urljoin(base_url, src)
120
+ if absolute.startswith(('http://', 'https://')):
121
+ audio_links.append({
122
+ 'url': absolute,
123
+ 'context': '',
124
+ 'title': '',
125
+ 'description': ''
126
+ })
127
+ except:
128
+ pass
129
+
130
+ # Podcast RSS/feed links (grab for later)
131
+ for match in re.finditer(r'<enclosure[^>]+url=["\']([^"\']+)["\']', html, re.I):
132
+ url = match.group(1)
133
+ if any(url.lower().endswith(ext) for ext in AUDIO_EXTENSIONS):
134
+ audio_links.append({
135
+ 'url': url,
136
+ 'context': 'podcast',
137
+ 'title': '',
138
+ 'description': ''
139
+ })
140
+
141
+ return audio_links
142
+
143
+
144
+ def extract_page_links(html: str, base_url: str) -> Set[str]:
145
+ """Extract page links for crawling"""
146
+ links = set()
147
+ for match in re.finditer(r'href=["\']([^"\']+)["\']', html, re.I):
148
+ href = match.group(1)
149
+ if href.startswith(('#', 'javascript:', 'mailto:', 'tel:', 'data:')):
150
+ continue
151
+ lower = href.lower()
152
+ # Skip direct media files
153
+ if any(lower.endswith(ext) for ext in AUDIO_EXTENSIONS | {'.mp4', '.avi', '.mov', '.jpg', '.png', '.gif'}):
154
+ continue
155
+ try:
156
+ absolute = urljoin(base_url, href)
157
+ if absolute.startswith(('http://', 'https://')):
158
+ absolute = absolute.split('#')[0]
159
+ if len(absolute) < 500:
160
+ links.add(absolute)
161
+ except:
162
+ pass
163
+ return links
164
+
165
+
166
+ def get_domain(url: str) -> str:
167
+ try:
168
+ return urlparse(url).netloc.lower()
169
+ except:
170
+ return ""
171
+
172
+
173
+ # ============= CRAWLER =============
174
+
175
+ class AudioCrawler:
176
+ USER_AGENTS = [
177
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/120.0.0.0 Safari/537.36',
178
+ 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 Chrome/120.0.0.0 Safari/537.36',
179
+ ]
180
+
181
+ def __init__(
182
+ self,
183
+ output_dir: str = 'audio_data',
184
+ max_workers: int = 30,
185
+ max_pages: int = 50000,
186
+ max_bytes: int = 10 * 1024**3,
187
+ timeout: int = 60,
188
+ min_bytes: int = 50000, # 50KB minimum
189
+ max_bytes_per_file: int = 100 * 1024**2, # 100MB max per file
190
+ min_duration: int = 5, # seconds (if detectable)
191
+ max_duration: int = 600, # 10 min max
192
+ ):
193
+ self.output_dir = Path(output_dir)
194
+ self.output_dir.mkdir(parents=True, exist_ok=True)
195
+ (self.output_dir / 'audio').mkdir(exist_ok=True)
196
+
197
+ self.max_workers = max_workers
198
+ self.max_pages = max_pages
199
+ self.max_bytes = max_bytes
200
+ self.timeout = timeout
201
+ self.min_bytes = min_bytes
202
+ self.max_bytes_per_file = max_bytes_per_file
203
+ self.min_duration = min_duration
204
+ self.max_duration = max_duration
205
+
206
+ self.page_queue: asyncio.Queue = asyncio.Queue()
207
+ self.seen_pages: Set[str] = set()
208
+ self.seen_audio: Set[str] = set()
209
+ self.seen_hashes: Set[str] = set()
210
+ self.running = True
211
+
212
+ self.stats = {'pages': 0, 'audio': 0, 'bytes': 0, 'skipped': 0, 'errors': 0}
213
+ self.start_time = time.time()
214
+
215
+ self.output_file = None
216
+ self.output_lock = asyncio.Lock()
217
+
218
+ def _report(self) -> str:
219
+ elapsed = time.time() - self.start_time
220
+ rate = self.stats['audio'] / elapsed if elapsed > 0 else 0
221
+ mb = self.stats['bytes'] / (1024**2)
222
+ return f"Pages: {self.stats['pages']} | Audio: {self.stats['audio']} | {mb:.1f} MB | {rate:.2f}/s | Skip: {self.stats['skipped']} | Err: {self.stats['errors']}"
223
+
224
+ async def _fetch_page(self, session: aiohttp.ClientSession, url: str) -> Tuple[Optional[str], Set[str]]:
225
+ try:
226
+ headers = {'User-Agent': random.choice(self.USER_AGENTS)}
227
+ async with session.get(url, headers=headers, timeout=self.timeout, ssl=False, allow_redirects=True) as resp:
228
+ ct = resp.headers.get('content-type', '').lower()
229
+ if 'text/html' not in ct and 'text/xml' not in ct and 'application/rss' not in ct:
230
+ return None, set()
231
+ html = await resp.text(errors='ignore')
232
+ links = extract_page_links(html, url)
233
+ return html, links
234
+ except:
235
+ return None, set()
236
+
237
+ async def _fetch_audio(self, session: aiohttp.ClientSession, audio_info: Dict, source_page: str) -> Optional[Dict]:
238
+ url = audio_info['url']
239
+
240
+ if url in self.seen_audio:
241
+ return None
242
+ self.seen_audio.add(url)
243
+
244
+ try:
245
+ headers = {'User-Agent': random.choice(self.USER_AGENTS)}
246
+ async with session.get(url, headers=headers, timeout=self.timeout, ssl=False, allow_redirects=True) as resp:
247
+
248
+ # Check content-length
249
+ cl = resp.headers.get('content-length')
250
+ if cl:
251
+ size = int(cl)
252
+ if size < self.min_bytes or size > self.max_bytes_per_file:
253
+ self.stats['skipped'] += 1
254
+ return None
255
+
256
+ # Stream download
257
+ data = await resp.read()
258
+
259
+ if len(data) < self.min_bytes:
260
+ self.stats['skipped'] += 1
261
+ return None
262
+
263
+ # Dedupe
264
+ audio_hash = hashlib.md5(data).hexdigest()
265
+ if audio_hash in self.seen_hashes:
266
+ self.stats['skipped'] += 1
267
+ return None
268
+ self.seen_hashes.add(audio_hash)
269
+
270
+ # Get audio info
271
+ info = get_audio_info_from_header(data)
272
+
273
+ # Determine extension
274
+ ext = 'bin'
275
+ for e in AUDIO_EXTENSIONS:
276
+ if url.lower().endswith(e):
277
+ ext = e[1:]
278
+ break
279
+ if ext == 'bin':
280
+ ext = {'mp3': 'mp3', 'wav': 'wav', 'ogg': 'ogg', 'flac': 'flac', 'm4a': 'm4a'}.get(info['format'], 'mp3')
281
+
282
+ # Save file
283
+ fname = f"{audio_hash}.{ext}"
284
+ fpath = self.output_dir / 'audio' / fname
285
+ with open(fpath, 'wb') as f:
286
+ f.write(data)
287
+
288
+ result = {
289
+ 'url': url,
290
+ 'source_page': source_page,
291
+ 'domain': get_domain(url),
292
+ 'timestamp': datetime.utcnow().isoformat(),
293
+ 'context': audio_info.get('context', ''),
294
+ 'title': audio_info.get('title', ''),
295
+ 'description': audio_info.get('description', ''),
296
+ 'format': info['format'],
297
+ 'sample_rate': info['sample_rate'],
298
+ 'channels': info['channels'],
299
+ 'size_bytes': len(data),
300
+ 'hash': audio_hash,
301
+ 'file': fname
302
+ }
303
+
304
+ return result
305
+
306
+ except Exception as e:
307
+ self.stats['errors'] += 1
308
+ return None
309
+
310
+ async def _worker(self, session: aiohttp.ClientSession, worker_id: int):
311
+ while self.running:
312
+ try:
313
+ page_url = await asyncio.wait_for(self.page_queue.get(), timeout=10.0)
314
+ except asyncio.TimeoutError:
315
+ if self.page_queue.empty():
316
+ break
317
+ continue
318
+
319
+ if self.stats['bytes'] >= self.max_bytes or self.stats['pages'] >= self.max_pages:
320
+ self.running = False
321
+ break
322
+
323
+ html, links = await self._fetch_page(session, page_url)
324
+
325
+ if html:
326
+ self.stats['pages'] += 1
327
+
328
+ # Extract and fetch audio
329
+ audio_links = extract_audio_links(html, page_url)
330
+ for audio_info in audio_links:
331
+ if not self.running:
332
+ break
333
+
334
+ result = await self._fetch_audio(session, audio_info, page_url)
335
+ if result:
336
+ self.stats['audio'] += 1
337
+ self.stats['bytes'] += result['size_bytes']
338
+
339
+ async with self.output_lock:
340
+ self.output_file.write(json.dumps(result) + '\n')
341
+
342
+ # Queue new pages
343
+ for link in links:
344
+ if link not in self.seen_pages and self.stats['pages'] < self.max_pages:
345
+ self.seen_pages.add(link)
346
+ await self.page_queue.put(link)
347
+
348
+ if self.stats['pages'] % 50 == 0:
349
+ print(f"[{datetime.now().strftime('%H:%M:%S')}] {self._report()}")
350
+
351
+ self.page_queue.task_done()
352
+
353
+ async def crawl(self, seeds: List[str]):
354
+ print(f"OpenTransformers Audio Crawler starting")
355
+ print(f"Seeds: {len(seeds)} | Workers: {self.max_workers}")
356
+ print(f"Target: {self.max_pages} pages / {self.max_bytes/1024**3:.1f} GB")
357
+ print("-" * 60)
358
+
359
+ for seed in seeds:
360
+ self.seen_pages.add(seed)
361
+ await self.page_queue.put(seed)
362
+
363
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
364
+ output_path = self.output_dir / f'audio_{timestamp}.jsonl'
365
+ self.output_file = open(output_path, 'w', buffering=1)
366
+
367
+ connector = aiohttp.TCPConnector(limit=self.max_workers, ssl=False)
368
+ async with aiohttp.ClientSession(connector=connector) as session:
369
+ workers = [asyncio.create_task(self._worker(session, i)) for i in range(self.max_workers)]
370
+ await asyncio.gather(*workers)
371
+
372
+ self.output_file.close()
373
+
374
+ print("-" * 60)
375
+ print(f"DONE! {self._report()}")
376
+ print(f"Metadata: {output_path}")
377
+ audio_dir = self.output_dir / 'audio'
378
+ files = list(audio_dir.glob('*'))
379
+ print(f"Audio files: {len(files)} in {audio_dir}")
380
+
381
+
382
+ def main():
383
+ p = argparse.ArgumentParser(description='OpenTransformers Audio Crawler')
384
+ p.add_argument('--seeds', type=str, help='File with seed URLs')
385
+ p.add_argument('--seed', type=str, action='append', help='Single seed URL')
386
+ p.add_argument('--output', type=str, default='audio_data')
387
+ p.add_argument('--workers', type=int, default=30)
388
+ p.add_argument('--max-pages', type=int, default=50000)
389
+ p.add_argument('--max-gb', type=float, default=10.0)
390
+ p.add_argument('--timeout', type=int, default=60)
391
+ p.add_argument('--min-kb', type=int, default=50)
392
+ p.add_argument('--max-mb', type=int, default=100)
393
+
394
+ args = p.parse_args()
395
+
396
+ seeds = []
397
+ if args.seeds:
398
+ with open(args.seeds) as f:
399
+ seeds.extend(line.strip() for line in f if line.strip())
400
+ if args.seed:
401
+ seeds.extend(args.seed)
402
+ if not seeds:
403
+ print("ERROR: Provide --seeds file or --seed URLs")
404
+ return
405
+
406
+ crawler = AudioCrawler(
407
+ output_dir=args.output,
408
+ max_workers=args.workers,
409
+ max_pages=args.max_pages,
410
+ max_bytes=int(args.max_gb * 1024**3),
411
+ timeout=args.timeout,
412
+ min_bytes=args.min_kb * 1024,
413
+ max_bytes_per_file=args.max_mb * 1024**2,
414
+ )
415
+
416
+ asyncio.run(crawler.crawl(seeds))
417
+
418
+
419
+ if __name__ == '__main__':
420
+ main()