Spaces:
Sleeping
Sleeping
| import requests | |
| from bs4 import BeautifulSoup | |
| import re | |
| import json | |
| from urllib.parse import urljoin, quote | |
| import time | |
| class SoundgasmScraper: | |
| def __init__(self): | |
| self.base_url = "https://soundgasm.net" | |
| self.session = requests.Session() | |
| self.session.headers.update({ | |
| 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' | |
| }) | |
| def search_audio(self, query, max_results=10): | |
| """ | |
| Search for audio on Soundgasm.net | |
| Since Soundgasm doesn't have a built-in search, we'll use external search engines | |
| """ | |
| results = [] | |
| # Use Google to search for Soundgasm content | |
| search_query = f"site:soundgasm.net {query}" | |
| google_url = f"https://www.google.com/search?q={quote(search_query)}" | |
| try: | |
| response = self.session.get(google_url) | |
| soup = BeautifulSoup(response.content, 'html.parser') | |
| # Extract Soundgasm links from Google search results | |
| links = soup.find_all('a', href=True) | |
| soundgasm_links = [] | |
| for link in links: | |
| href = link.get('href') | |
| if href and 'soundgasm.net/u/' in href: | |
| # Clean up the URL | |
| if href.startswith('/url?q='): | |
| href = href.split('/url?q=')[1].split('&')[0] | |
| if href.startswith('http') and 'soundgasm.net/u/' in href: | |
| soundgasm_links.append(href) | |
| # Remove duplicates and limit results | |
| soundgasm_links = list(set(soundgasm_links))[:max_results] | |
| # Get details for each audio | |
| for link in soundgasm_links: | |
| audio_info = self.get_audio_info(link) | |
| if audio_info: | |
| results.append(audio_info) | |
| except Exception as e: | |
| print(f"Search error: {e}") | |
| return results | |
| def get_audio_info(self, url): | |
| """ | |
| Extract audio information from a Soundgasm page | |
| """ | |
| try: | |
| response = self.session.get(url) | |
| soup = BeautifulSoup(response.content, 'html.parser') | |
| # Extract title | |
| title_elem = soup.find('title') | |
| title = title_elem.text.strip() if title_elem else "Unknown Title" | |
| # Extract description | |
| description = "" | |
| desc_elem = soup.find('div', class_='jp-description') | |
| if desc_elem: | |
| description = desc_elem.get_text(strip=True) | |
| # Extract audio file URL from JavaScript | |
| audio_url = None | |
| scripts = soup.find_all('script') | |
| for script in scripts: | |
| if script.string: | |
| # Look for the audio file URL in the JavaScript | |
| match = re.search(r'["\']([^"\']*\.m4a)["\']', script.string) | |
| if match: | |
| audio_url = match.group(1) | |
| if not audio_url.startswith('http'): | |
| audio_url = urljoin(self.base_url, audio_url) | |
| break | |
| # Extract username from URL | |
| username = "" | |
| url_match = re.search(r'/u/([^/]+)/', url) | |
| if url_match: | |
| username = url_match.group(1) | |
| # Extract audio title from URL | |
| audio_title = "" | |
| title_match = re.search(r'/u/[^/]+/(.+)$', url) | |
| if title_match: | |
| audio_title = title_match.group(1).replace('-', ' ').replace('_', ' ') | |
| return { | |
| 'title': title, | |
| 'audio_title': audio_title, | |
| 'username': username, | |
| 'description': description, | |
| 'url': url, | |
| 'audio_url': audio_url, | |
| 'duration': None # Would need to download file to get duration | |
| } | |
| except Exception as e: | |
| print(f"Error getting audio info for {url}: {e}") | |
| return None | |
| def search_by_username(self, username): | |
| """ | |
| Get all audios from a specific user | |
| """ | |
| user_url = f"{self.base_url}/u/{username}" | |
| try: | |
| response = self.session.get(user_url) | |
| soup = BeautifulSoup(response.content, 'html.parser') | |
| # Find all audio links on the user page | |
| audio_links = [] | |
| links = soup.find_all('a', href=True) | |
| for link in links: | |
| href = link.get('href') | |
| if href and f'/u/{username}/' in href and href != f'/u/{username}': | |
| full_url = urljoin(self.base_url, href) | |
| audio_links.append(full_url) | |
| # Get info for each audio | |
| results = [] | |
| for link in audio_links: | |
| audio_info = self.get_audio_info(link) | |
| if audio_info: | |
| results.append(audio_info) | |
| return results | |
| except Exception as e: | |
| print(f"Error searching by username {username}: {e}") | |
| return [] | |
| # Test the scraper | |
| if __name__ == "__main__": | |
| scraper = SoundgasmScraper() | |
| # Test search | |
| print("Testing search functionality...") | |
| results = scraper.search_audio("ASMR", max_results=3) | |
| for i, result in enumerate(results, 1): | |
| print(f"\n--- Result {i} ---") | |
| print(f"Title: {result['title']}") | |
| print(f"Audio Title: {result['audio_title']}") | |
| print(f"Username: {result['username']}") | |
| print(f"URL: {result['url']}") | |
| print(f"Audio URL: {result['audio_url']}") | |
| print(f"Description: {result['description'][:100]}..." if result['description'] else "No description") | |