# app.py import asyncio import json from typing import List, Dict, Any import aiohttp from fastapi import FastAPI, HTTPException, Request from fastapi.responses import StreamingResponse, JSONResponse from pydantic import BaseModel from starlette.middleware.cors import CORSMiddleware import asyncio import aiohttp import urllib.parse from bs4 import BeautifulSoup as bs import json import random import html from bs4 import NavigableString, Tag from rich import print class BingScraper: # Rotating User-Agent list. USER_AGENTS = [ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 12_0) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.0 Safari/605.1.15", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:104.0) Gecko/20100101 Firefox/104.0", "Mozilla/5.0 (Linux; Android 10; SM-G975F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Mobile Safari/537.36" ] # Optional proxy list (empty if not used for fastest performance). PROXIES = [ # "http://proxy1.example.com:8080", # "http://proxy2.example.com:8080", ] def __init__(self): pass @classmethod def get_random_headers(cls): """Return HTTP headers with a random User-Agent.""" return { "User-Agent": random.choice(cls.USER_AGENTS), "Accept": "text/html,application/xhtml+xml,application/xml;""q=0.9,image/avif,image/webp,*/*;q=0.8","Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8","Cache-Control": "no-cache","Pragma": "no-cache","Cookie": "MUID=12164D9908FD6B8C103F5871090F6AAA; _EDGE_V=1; MUIDB=12164D9908FD6B8C103F5871090F6AAA; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=26D35EBC20D44E418642BA10D7C14F42&dmnchg=1; ak_bmsc=17AB29B1FF4DE4D83B080A7E5CD24350~000000000000000000000000000000~YAAQNtjIFzUD3a2WAQAAnKvFuRtoko96GJe/vh01R588ZQYnYquFWtB0CzXeN5JGXZWgz7CwJqtckHuj3Z70qUhOcji4vkzhCMc/u91gnAIA0zCu7FcDeEJQgRx6n9MxhjrDAel2IIezGUgh+5ktFvDgUIO05s06PqDAtIUzuc9yTrbdAJi3iZvxFFKdGnbQkJ5krI9w3auWhY6i7JvcUPiDsbzzv0Chj1MxzRT1zdkP1B/JFtz+s5d8rUfagFpQporeRG/9gdid4qUPWvPHD6k98AdCTBYOysMHH2z9ErrD5PCO2mLK/RPrJSoqqN4d2mtnWeHNeF897PioJk0nOJw/IrseF0EgdsscKs7NVg/e3Mp27FTEIBduBRa93vvaabLMxg38; _UR=QS=0&TQS=0&Pn=0; BFBUSR=BFBHP=0; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyNS0wNS0xMFQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIlRucyI6MCwiRGZ0IjpudWxsLCJNdnMiOjAsIkZsdCI6MCwiSW1wIjoyLCJUb2JuIjowfQ==; _Rwho=u=d&ts=2025-05-10; ipv6=hit=1746877082438&t=4; _EDGE_S=F=1&SID=159E288249896C4D3A4D3D6A487B6D96&mkt=en-in; USRLOC=HS=1&ELOC=LAT=28.648426055908203|LON=77.1643295288086|N=Delhi%2C%20Delhi|ELT=1|; SRCHUSR=DOB=20250510&DS=1; MMCASM=ID=94A07BAF860D449DA2A4ABC8CAAC2538; _RwBf=r=0&ilt=9&ihpd=1&ispd=1&rc=24&rb=0&rg=200&pc=21&mtu=0&rbb=0&clo=0&v=9&l=2025-05-10T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&ard=0001-01-01T00:00:00.0000000&rwdbt=0&rwflt=0&rwaul2=0&g=&o=2&p=&c=&t=0&s=0001-01-01T00:00:00.0000000+00:00&ts=2025-05-10T11:00:39.6623875+00:00&rwred=0&wls=&wlb=&wle=&ccp=&cpt=&lka=0&lkt=0&aad=0&TH=&cid=0&gb=; _SS=SID=159E288249896C4D3A4D3D6A487B6D96&R=24&RB=0&GB=0&RG=200&RP=21; SRCHHPGUSR=SRCHLANG=en&IG=00DEBA4A2B754BC983011302535541CA&PV=10.0.0&DM=1&BRW=S&BRH=M&CW=1094&CH=738&SCW=1177&SCH=2902&DPR=1.3&UTC=330&WTS=63882470261&PRVCW=1094&PRVCH=738&EXLTT=1&HV=1746874838&HVE=CfDJ8GtUudZcSi1Enm88WwQKtCf-s_3m7rtBIR85jW2Uv01W2IjDUasKRdncp2MkJ7Bl7PxVetzuZETt8bkyd54iRcMP8SVUsClaL2I5uvRiGiSldOKFjy7i69jYPS-egJOhCGf717H5WHFvCI4UwespMZgxZkdo8SVoBwOlx_yQKpA2qtpqV7t6wYd7etwY1FpUaA&BCML=0&BCSRLANG=&ADLT=OFF" } @classmethod def get_random_proxy(cls): """Return a random proxy if available.""" if cls.PROXIES: return random.choice(cls.PROXIES) return None async def fetch_html(self, url, session): """Fetch HTML content asynchronously with minimal overhead.""" headers = self.get_random_headers() proxy = self.get_random_proxy() try: async with session.get(url, headers=headers, proxy=proxy, timeout=30) as response: if response.status != 200: print(f"Failed to fetch {url}: HTTP {response.status}") return None return await response.text() except Exception as e: print(f"Error fetching {url}: {e}") return None # ===== Text Search Methods ===== async def fetch_bing_text(self, query, session): """Fetch Bing search results HTML for the given query.""" encoded_query = urllib.parse.quote_plus(query) url = f'https://www.bing.com/search?q={encoded_query}' return await self.fetch_html(url, session) def parse_text_results(self, html): """ Parse the Bing HTML using BeautifulSoup to extract: - icon (if available) - URL - Title - Abstract (or fallback text) - Additional columns (if available) """ soup = bs(html, 'lxml') results = [] result_no = 0 main_containers = soup.find_all(id="b_results") for container in main_containers: list_results = container.find_all(class_='b_algo') for cont in list_results: result_no += 1 try: icon = 'https:' + cont.find_all(class_='rms_iac')[0].get('data-src') except Exception: icon = None try: URL = cont.find_all(class_='tilk')[0].get('href') except Exception: continue try: Title = cont.find_all('h2')[0].get_text(strip=True) except Exception: Title = "No Title" try: abstract_elem = cont.find_all(class_='b_caption') if abstract_elem and abstract_elem[0].get_text(strip=True): Abstract = abstract_elem[0].get_text(strip=True) else: Abstract = cont.find_all(class_='b_algoSlug')[0].get_text(strip=True) except Exception: Abstract = "No Abstract" other = [] try: for column in cont.find_all(class_='b_rc_gb_sub_column'): for div in column.find_all('div'): try: sub_title = div.find_all(class_='b_rc_gb_sub_title')[0].get_text(strip=True) except Exception: sub_title = "No Sub-title" try: sub_description = div.find_all(class_='b_rc_gb_text_wrapper')[0].get_text(strip=True) except Exception: sub_description = "" if sub_description: other.append({'Title': sub_title, 'Description': sub_description}) except Exception: other = None results.append({ 'no': result_no, 'icon': icon, 'URL': URL, 'Title': Title, 'Abstract': Abstract, 'other': other }) return results async def search_text(self, query, session): """Perform a text search (regular and news) and return parsed results.""" real_query = query news_query = query + " news" # Run both searches concurrently. real_html_task = asyncio.create_task(self.fetch_bing_text(real_query, session)) news_html_task = asyncio.create_task(self.fetch_bing_text(news_query, session)) real_html, news_html = await asyncio.gather(real_html_task, news_html_task) if not real_html or not news_html: print("Failed to retrieve one or both text searches.") return None real_results = self.parse_text_results(real_html) news_results = self.parse_text_results(news_html) return { "query_results": real_results, "news_results": news_results } # ===== Video Search Methods ===== async def fetch_bing_video(self, query, session): """ Fetch Bing video search results HTML for the given query. The URL below targets Bing's video search endpoint. """ encoded_query = urllib.parse.quote_plus(query) url = f'https://www.bing.com/videos/search?q={encoded_query}&adlt=off' return await self.fetch_html(url, session) def parse_video_results(self, html_content): """ Parse the Bing video search HTML to extract: - Video URL (from the mmeta JSON) - Title (from an element with class "mc_vtvc_title") - Thumbnail URL (from the mmeta JSON) - Duration (if available) - Source (if available) """ soup = bs(html_content, 'lxml') video_results = [] result_no = 0 # Find all divs with class "mc_vtvc" that have an mmeta attribute. video_divs = soup.find_all("div", class_="mc_vtvc", attrs={"mmeta": True}) for div in video_divs: result_no += 1 mmeta_raw = div.get("mmeta") try: # Unescape and parse the JSON metadata. mmeta_json = json.loads(html.unescape(mmeta_raw)) except Exception as e: mmeta_json = {} print(f"Error parsing mmeta JSON: {e}") # Retrieve video URL and thumbnail from the metadata. video_url = mmeta_json.get("murl") or mmeta_json.get("pgurl") thumbnail = mmeta_json.get("turl") # Find the title; often stored in an element with class "mc_vtvc_title". title_elem = div.find(class_="mc_vtvc_title") title = title_elem.get_text(strip=True) if title_elem else "No Title" # Look for duration within an element that may contain a duration value. duration_elem = div.find(class_="mc_bc_rc") duration = duration_elem.get_text(strip=True) if duration_elem else None # Optionally, extract the video source if available. source_elem = div.find(class_="mc_vtvc_meta_source") source = source_elem.get_text(strip=True) if source_elem else None video_results.append({ "no": result_no, "Video URL": video_url, "Title": title, "Thumbnail": thumbnail, "Duration": duration, "Source": source, "Meta": mmeta_json # Additional metadata if needed. }) return video_results async def search_video(self, query, session): """Perform a video search and return parsed results.""" html_content = await self.fetch_bing_video(query, session) if not html_content: print("Failed to retrieve video search results.") return None video_results = self.parse_video_results(html_content) return { "query": query, "video_results": video_results } # ===== Image Search Methods ===== async def fetch_bing_image(self, query, session): """Fetch Bing images search results HTML for the given query.""" encoded_query = urllib.parse.quote_plus(query) # Using the URL structure with &ch=803. url = f'https://www.bing.com/images/search?q={encoded_query}' return await self.fetch_html(url, session) def parse_image_results(self, html): """ Parse the Bing images HTML using BeautifulSoup to extract detailed image data. For each image result (
  • inside a