Greff3 commited on
Commit
eca2303
·
verified ·
1 Parent(s): f61fdcd

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +252 -160
main.py CHANGED
@@ -1,36 +1,30 @@
1
- import asyncio
 
 
 
 
 
2
  import base64
3
- import json
4
  from concurrent.futures import ThreadPoolExecutor
5
- from typing import Any, Dict, List, Optional
6
- from urllib.parse import parse_qs, urlparse
7
-
8
- from bs4 import BeautifulSoup
9
- # This import will work correctly after running `pip install --upgrade curl_cffi`
10
- from curl_cffi.aio import AsyncSession
11
- from fastapi import FastAPI, HTTPException, Query
12
- from pydantic import BaseModel, Field
13
  from webscout.litagent import LitAgent
 
 
14
 
15
- # --- FastAPI App Definition ---
16
  app = FastAPI(
17
- title="Snapzion Enhanced Search API",
18
- description="An advanced FastAPI wrapper for Bing Search, featuring AI-powered summarization and metadata enrichment.",
19
- version="2.0.1", # Version bump
20
  )
21
 
22
- # --- Pydantic Models for Clearer Responses ---
 
23
 
24
- class BaseSearchResult(BaseModel):
25
  url: str
26
  title: str
27
  description: str
28
-
29
- class EnhancedBingSearchResult(BaseSearchResult):
30
- """Model for the enhanced search results with summary and metadata."""
31
- summary: Optional[str] = Field(None, description="AI-generated summary of the page content.")
32
- source: Optional[str] = Field(None, description="The domain name of the result URL.")
33
- favicon: Optional[str] = Field(None, description="URL of the website's favicon.")
34
 
35
  class BingImageResult(BaseModel):
36
  title: str
@@ -45,14 +39,9 @@ class BingNewsResult(BaseModel):
45
  description: str
46
  source: str = ""
47
 
48
- # --- Enhanced BingSearch Library ---
49
-
50
  class BingSearch:
51
- """
52
- Bing search implementation rewritten for asynchronous performance and enhanced data retrieval.
53
- """
54
- _lit_agent_instance: Optional[LitAgent] = None
55
- _executor = ThreadPoolExecutor(max_workers=10)
56
 
57
  def __init__(
58
  self,
@@ -69,70 +58,24 @@ class BingSearch:
69
  self.lang = lang
70
  self.sleep_interval = sleep_interval
71
  self._base_url = "https://www.bing.com"
72
- self.session = AsyncSession(
73
  proxies=self.proxies,
74
  verify=self.verify,
75
  timeout=self.timeout,
76
  impersonate=impersonate
77
  )
 
78
  self.session.headers.update({
79
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
80
  })
81
 
82
- @classmethod
83
- def get_lit_agent(cls) -> LitAgent:
84
- """Initializes LitAgent lazily."""
85
- if cls._lit_agent_instance is None:
86
- cls._lit_agent_instance = LitAgent()
87
- return cls._lit_agent_instance
88
-
89
- async def _summarize_content(self, html_content: str) -> str:
90
- """Runs the synchronous summarize method in a thread pool."""
91
- loop = asyncio.get_running_loop()
92
- agent = self.get_lit_agent()
93
- try:
94
- summary = await loop.run_in_executor(
95
- self._executor, agent.summarize, html_content
96
- )
97
- return summary
98
- except Exception as e:
99
- print(f"Error during summarization: {e}")
100
- return "Could not generate summary."
101
-
102
-
103
- async def _enhance_result(self, result: BaseSearchResult) -> EnhancedBingSearchResult:
104
- """Fetches page content, generates summary, and extracts metadata."""
105
- enhanced_result = EnhancedBingSearchResult(**result.model_dump())
106
- try:
107
- parsed_url = urlparse(result.url)
108
- enhanced_result.source = parsed_url.netloc
109
-
110
- resp = await self.session.get(result.url, timeout=self.timeout)
111
- resp.raise_for_status()
112
- html = resp.text
113
-
114
- summary = await self._summarize_content(html)
115
- enhanced_result.summary = summary
116
-
117
- soup = BeautifulSoup(html, "html.parser")
118
- favicon_tag = soup.find("link", rel=lambda r: r and "icon" in r.lower())
119
- if favicon_tag and favicon_tag.get("href"):
120
- favicon_url = favicon_tag["href"]
121
- if not favicon_url.startswith(('http://', 'https://', '//')):
122
- favicon_url = f"{parsed_url.scheme}://{parsed_url.netloc}{'/' if not favicon_url.startswith('/') else ''}{favicon_url}"
123
- elif favicon_url.startswith('//'):
124
- favicon_url = f"{parsed_url.scheme}:{favicon_url}"
125
- enhanced_result.favicon = favicon_url
126
- except Exception as e:
127
- print(f"Failed to enhance URL {result.url}: {e}")
128
- return enhanced_result
129
-
130
  def _selectors(self, element):
131
  selectors = {
132
- 'links': 'ol#b_results > li.b_algo',
133
- 'next': 'a.sb_pagN'
134
  }
135
- return selectors.get(element, '')
136
 
137
  def _first_page(self, query):
138
  url = f'{self._base_url}/search?q={query}&search=&form=QBLH'
@@ -141,26 +84,47 @@ class BingSearch:
141
  def _next_page(self, soup):
142
  selector = self._selectors('next')
143
  next_page_tag = soup.select_one(selector)
 
144
  if next_page_tag and next_page_tag.get('href'):
145
- return {'url': self._base_url + next_page_tag['href'], 'data': None}
146
- return {'url': None, 'data': None}
147
 
148
  def _get_url(self, tag):
149
- # A more direct approach that is often sufficient
150
- return tag.get('href', '')
151
-
152
- async def text(
153
- self, keywords: str, max_results: int = 10, enhanced: bool = False, **kwargs
154
- ) -> List[BaseSearchResult | EnhancedBingSearchResult]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  if not keywords:
156
  raise ValueError("Search keywords cannot be empty")
157
 
158
  fetched_results = []
159
  fetched_links = set()
160
 
161
- async def fetch_page(url):
162
  try:
163
- resp = await self.session.get(url)
164
  resp.raise_for_status()
165
  return resp.text
166
  except Exception as e:
@@ -169,146 +133,274 @@ class BingSearch:
169
  current_url = self._first_page(keywords)['url']
170
 
171
  while current_url and len(fetched_results) < max_results:
172
- html = await fetch_page(current_url)
173
  soup = BeautifulSoup(html, "html.parser")
174
 
 
175
  result_blocks = soup.select(self._selectors('links'))
176
 
177
  for result in result_blocks:
 
178
  title_tag = result.find('h2')
179
- if not title_tag: continue
 
180
 
181
  link_tag = title_tag.find('a')
182
- if not link_tag or not link_tag.has_attr('href'): continue
 
183
 
184
  url_val = self._get_url(link_tag)
185
  title = title_tag.get_text(strip=True)
186
 
 
187
  desc_container = result.find('div', class_='b_caption')
188
- description = ""
189
  if desc_container:
190
- p_tag = desc_container.find('p')
 
 
 
 
 
 
 
 
 
191
  if p_tag:
192
  description = p_tag.get_text(strip=True)
193
 
194
  if url_val and title:
195
- if url_val in fetched_links: continue
196
- fetched_results.append(BaseSearchResult(url=url_val, title=title, description=description))
 
 
197
  fetched_links.add(url_val)
198
- if len(fetched_results) >= max_results: break
 
 
199
 
200
- if len(fetched_results) >= max_results: break
 
201
 
 
202
  next_page_info = self._next_page(soup)
203
  current_url = next_page_info['url']
204
  if current_url:
205
- await asyncio.sleep(self.sleep_interval)
206
 
207
- results_to_return = fetched_results[:max_results]
208
-
209
- if enhanced and results_to_return:
210
- enhancement_tasks = [self._enhance_result(res) for res in results_to_return]
211
- return await asyncio.gather(*enhancement_tasks)
212
-
213
- return results_to_return
214
 
215
- async def suggestions(self, query: str, **kwargs) -> List[str]:
216
- if not query: raise ValueError("Query cannot be empty")
217
- region = kwargs.get('region', 'en-US')
218
- url = f"https://api.bing.com/osjson.aspx?query={query}&mkt={region}"
219
- resp = await self.session.get(url)
220
- resp.raise_for_status()
221
- data = resp.json()
222
- return data[1] if isinstance(data, list) and len(data) > 1 else []
223
 
224
- async def images(self, keywords: str, max_results: int = 10, **kwargs) -> List[BingImageResult]:
225
- if not keywords: raise ValueError("Keywords cannot be empty")
226
- url = f"{self._base_url}/images/search?q={keywords}&count={max_results}"
227
- resp = await self.session.get(url)
228
- resp.raise_for_status()
229
- soup = BeautifulSoup(resp.text, "html.parser")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  results = []
231
  for item in soup.select("a.iusc"):
232
  try:
233
- m_data = item.get("m")
234
- if not m_data: continue
235
- meta = json.loads(m_data)
236
- if meta.get("murl"):
237
- results.append(BingImageResult(title=meta.get("t", ""), image=meta.get("murl"), thumbnail=meta.get("turl", ""), url=meta.get("purl", ""), source=meta.get("surl", "")))
238
- if len(results) >= max_results: break
239
- except Exception: continue
240
- return results
241
-
242
- async def news(self, keywords: str, max_results: int = 10, **kwargs) -> List[BingNewsResult]:
243
- if not keywords: raise ValueError("Keywords cannot be empty")
244
- url = f"{self._base_url}/news/search?q={keywords}"
245
- resp = await self.session.get(url)
246
- resp.raise_for_status()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
  soup = BeautifulSoup(resp.text, "html.parser")
248
  results = []
249
- for item in soup.select("div.news-card"):
250
- a_tag = item.find("a", class_="title")
251
- if not (a_tag and a_tag.has_attr('href')): continue
252
- desc_tag = item.find("div", class_="snippet")
253
- source_tag = item.find(attrs={"aria-label": "Publisher"})
254
- results.append(BingNewsResult(title=a_tag.get_text(strip=True), url=a_tag['href'], description=desc_tag.get_text(strip=True) if desc_tag else "", source=source_tag.get_text(strip=True) if source_tag else ""))
255
- if len(results) >= max_results: break
256
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
 
258
- bing = BingSearch()
259
 
260
- # --- FastAPI Endpoints ---
261
 
262
- @app.get("/search", response_model=List[EnhancedBingSearchResult | BaseSearchResult], summary="Perform a standard or enhanced text search")
263
  async def text_search(
264
  query: str = Query(..., description="The search keywords."),
 
 
265
  max_results: int = Query(10, description="Maximum number of results to return."),
266
- enhanced: bool = Query(False, description="Enable AI summarization and metadata fetching (slower but more detailed).")
267
  ):
268
  """
269
  Perform a text search on Bing.
270
- - Set `enhanced=true` to get AI-powered summaries and additional metadata for each result.
271
  """
272
  try:
273
- results = await bing.text(
274
  keywords=query,
 
 
275
  max_results=max_results,
276
- enhanced=enhanced
277
  )
278
  return results
279
  except Exception as e:
280
  raise HTTPException(status_code=500, detail=str(e))
281
 
282
- @app.get("/suggestions", response_model=List[str], summary="Fetch search suggestions")
283
  async def get_suggestions(
284
  query: str = Query(..., description="The search query for which to fetch suggestions."),
 
285
  ):
 
 
 
286
  try:
287
- return await bing.suggestions(query=query)
 
288
  except Exception as e:
289
  raise HTTPException(status_code=500, detail=str(e))
290
 
291
- @app.get("/images", response_model=List[BingImageResult], summary="Search for images")
292
  async def image_search(
293
  query: str = Query(..., description="The search keywords for images."),
 
 
294
  max_results: int = Query(10, description="Maximum number of image results to return."),
295
  ):
 
 
 
296
  try:
297
- return await bing.images(keywords=query, max_results=max_results)
 
 
 
 
 
 
298
  except Exception as e:
299
  raise HTTPException(status_code=500, detail=str(e))
300
 
301
- @app.get("/news", response_model=List[BingNewsResult], summary="Search for news articles")
302
  async def news_search(
303
  query: str = Query(..., description="The search keywords for news."),
 
 
304
  max_results: int = Query(10, description="Maximum number of news results to return."),
305
  ):
 
 
 
306
  try:
307
- return await bing.news(keywords=query, max_results=max_results)
 
 
 
 
 
 
308
  except Exception as e:
309
  raise HTTPException(status_code=500, detail=str(e))
310
 
311
-
312
  if __name__ == "__main__":
313
  import uvicorn
314
- uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
 
1
+ from fastapi import FastAPI, HTTPException, Query
2
+ from typing import List, Optional
3
+ from pydantic import BaseModel
4
+ from time import sleep
5
+ from curl_cffi.requests import Session
6
+ from urllib.parse import urlencode, unquote, urlparse, parse_qs
7
  import base64
8
+ from typing import Dict, Any
9
  from concurrent.futures import ThreadPoolExecutor
 
 
 
 
 
 
 
 
10
  from webscout.litagent import LitAgent
11
+ from bs4 import BeautifulSoup
12
+ import json
13
 
 
14
  app = FastAPI(
15
+ title="Snapzion Search API",
16
+ description="A FastAPI wrapper for the Search library with advanced features.",
17
+ version="1.0.0",
18
  )
19
 
20
+ # --- BingSearch Library Code ---
21
+ # The provided BingSearch code is integrated here directly.
22
 
23
+ class BingSearchResult(BaseModel):
24
  url: str
25
  title: str
26
  description: str
27
+ metadata: Dict[str, Any] = {}
 
 
 
 
 
28
 
29
  class BingImageResult(BaseModel):
30
  title: str
 
39
  description: str
40
  source: str = ""
41
 
 
 
42
  class BingSearch:
43
+ """Bing search implementation with configurable parameters and advanced features."""
44
+ _executor: ThreadPoolExecutor = ThreadPoolExecutor()
 
 
 
45
 
46
  def __init__(
47
  self,
 
58
  self.lang = lang
59
  self.sleep_interval = sleep_interval
60
  self._base_url = "https://www.bing.com"
61
+ self.session = Session(
62
  proxies=self.proxies,
63
  verify=self.verify,
64
  timeout=self.timeout,
65
  impersonate=impersonate
66
  )
67
+ # It's good practice to set a realistic User-Agent
68
  self.session.headers.update({
69
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
70
  })
71
 
72
+ # FIX: Updated selectors to be more robust against Bing UI changes.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  def _selectors(self, element):
74
  selectors = {
75
+ 'links': 'ol#b_results > li', # More generic selector for any list item in results
76
+ 'next': 'a.sb_pagN' # Selector for the "Next" page button
77
  }
78
+ return selectors[element]
79
 
80
  def _first_page(self, query):
81
  url = f'{self._base_url}/search?q={query}&search=&form=QBLH'
 
84
  def _next_page(self, soup):
85
  selector = self._selectors('next')
86
  next_page_tag = soup.select_one(selector)
87
+ url = None
88
  if next_page_tag and next_page_tag.get('href'):
89
+ url = self._base_url + next_page_tag['href']
90
+ return {'url': url, 'data': None}
91
 
92
  def _get_url(self, tag):
93
+ url = tag.get('href', '')
94
+ resp = url
95
+ try:
96
+ parsed_url = urlparse(url)
97
+ query_params = parse_qs(parsed_url.query)
98
+ if "u" in query_params:
99
+ encoded_url = query_params["u"][0][2:]
100
+ try:
101
+ decoded_bytes = base64.urlsafe_b64decode(encoded_url + '===')
102
+ except base64.binascii.Error as e:
103
+ print(f"Error decoding Base64 string: {e}")
104
+ return url
105
+ resp = decoded_bytes.decode('utf-8')
106
+ except Exception as e:
107
+ print(f"Error decoding Base64 string: {e}")
108
+ return resp
109
+
110
+ # FIX: The entire text parsing logic is updated to handle modern Bing HTML structure.
111
+ def text(
112
+ self,
113
+ keywords: str,
114
+ region: str = None,
115
+ safesearch: str = "moderate",
116
+ max_results: int = 10,
117
+ unique: bool = True
118
+ ) -> List[BingSearchResult]:
119
  if not keywords:
120
  raise ValueError("Search keywords cannot be empty")
121
 
122
  fetched_results = []
123
  fetched_links = set()
124
 
125
+ def fetch_page(url):
126
  try:
127
+ resp = self.session.get(url)
128
  resp.raise_for_status()
129
  return resp.text
130
  except Exception as e:
 
133
  current_url = self._first_page(keywords)['url']
134
 
135
  while current_url and len(fetched_results) < max_results:
136
+ html = fetch_page(current_url)
137
  soup = BeautifulSoup(html, "html.parser")
138
 
139
+ # Use the more generic selector for result blocks
140
  result_blocks = soup.select(self._selectors('links'))
141
 
142
  for result in result_blocks:
143
+ # Find the title and link, which are usually in an <h2> tag
144
  title_tag = result.find('h2')
145
+ if not title_tag:
146
+ continue
147
 
148
  link_tag = title_tag.find('a')
149
+ if not link_tag or not link_tag.has_attr('href'):
150
+ continue
151
 
152
  url_val = self._get_url(link_tag)
153
  title = title_tag.get_text(strip=True)
154
 
155
+ # Find the description, often in a div with class 'b_caption'
156
  desc_container = result.find('div', class_='b_caption')
157
+ description = ''
158
  if desc_container:
159
+ # Find the paragraph within the caption, or use the whole caption text
160
+ desc_p = desc_container.find('p')
161
+ if desc_p:
162
+ description = desc_p.get_text(strip=True)
163
+ else:
164
+ description = desc_container.get_text(strip=True)
165
+
166
+ # Fallback if no 'b_caption' is found
167
+ if not description:
168
+ p_tag = result.find('p')
169
  if p_tag:
170
  description = p_tag.get_text(strip=True)
171
 
172
  if url_val and title:
173
+ if unique and url_val in fetched_links:
174
+ continue
175
+
176
+ fetched_results.append(BingSearchResult(url=url_val, title=title, description=description))
177
  fetched_links.add(url_val)
178
+
179
+ if len(fetched_results) >= max_results:
180
+ break
181
 
182
+ if len(fetched_results) >= max_results:
183
+ break
184
 
185
+ # Find the next page URL
186
  next_page_info = self._next_page(soup)
187
  current_url = next_page_info['url']
188
  if current_url:
189
+ sleep(self.sleep_interval)
190
 
191
+ return fetched_results[:max_results]
 
 
 
 
 
 
192
 
 
 
 
 
 
 
 
 
193
 
194
+ def suggestions(self, query: str, region: str = None) -> List[str]:
195
+ if not query:
196
+ raise ValueError("Search query cannot be empty")
197
+ params = {
198
+ "query": query,
199
+ "mkt": region if region else "en-US"
200
+ }
201
+ url = f"https://api.bing.com/osjson.aspx?{urlencode(params)}"
202
+ try:
203
+ resp = self.session.get(url)
204
+ resp.raise_for_status()
205
+ data = resp.json()
206
+ if isinstance(data, list) and len(data) > 1 and isinstance(data[1], list):
207
+ return data[1]
208
+ return []
209
+ except Exception as e:
210
+ if hasattr(e, 'response') and e.response is not None:
211
+ raise Exception(f"Bing suggestions failed with status {e.response.status_code}: {str(e)}")
212
+ else:
213
+ raise Exception(f"Bing suggestions failed: {str(e)}")
214
+
215
+ def images(
216
+ self,
217
+ keywords: str,
218
+ region: str = None,
219
+ safesearch: str = "moderate",
220
+ max_results: int = 10
221
+ ) -> List[BingImageResult]:
222
+ if not keywords:
223
+ raise ValueError("Search keywords cannot be empty")
224
+ safe_map = {
225
+ "on": "Strict",
226
+ "moderate": "Moderate",
227
+ "off": "Off"
228
+ }
229
+ safe = safe_map.get(safesearch.lower(), "Moderate")
230
+ params = {
231
+ "q": keywords,
232
+ "count": max_results,
233
+ "setlang": self.lang,
234
+ "safeSearch": safe,
235
+ }
236
+ if region:
237
+ params["mkt"] = region
238
+ url = f"{self._base_url}/images/search?{urlencode(params)}"
239
+ try:
240
+ resp = self.session.get(url)
241
+ resp.raise_for_status()
242
+ html = resp.text
243
+ except Exception as e:
244
+ if hasattr(e, 'response') and e.response is not None:
245
+ raise Exception(f"Bing image search failed with status {e.response.status_code}: {str(e)}")
246
+ else:
247
+ raise Exception(f"Bing image search failed: {str(e)}")
248
+ soup = BeautifulSoup(html, "html.parser")
249
  results = []
250
  for item in soup.select("a.iusc"):
251
  try:
252
+ m = item.get("m")
253
+ meta = json.loads(m) if m else {}
254
+ image_url = meta.get("murl", "")
255
+ thumb_url = meta.get("turl", "")
256
+ title = meta.get("t", "")
257
+ page_url = meta.get("purl", "")
258
+ source = meta.get("surl", "")
259
+ if image_url:
260
+ results.append(BingImageResult(title=title, image=image_url, thumbnail=thumb_url, url=page_url, source=source))
261
+ if len(results) >= max_results:
262
+ break
263
+ except Exception:
264
+ continue
265
+ return results[:max_results]
266
+
267
+ def news(
268
+ self,
269
+ keywords: str,
270
+ region: str = None,
271
+ safesearch: str = "moderate",
272
+ max_results: int = 10,
273
+ ) -> List['BingNewsResult']:
274
+ if not keywords:
275
+ raise ValueError("Search keywords cannot be empty")
276
+ safe_map = {
277
+ "on": "Strict",
278
+ "moderate": "Moderate",
279
+ "off": "Off"
280
+ }
281
+ safe = safe_map.get(safesearch.lower(), "Moderate")
282
+ params = {
283
+ "q": keywords,
284
+ "form": "QBNH",
285
+ "safeSearch": safe,
286
+ }
287
+ if region:
288
+ params["mkt"] = region
289
+ url = f"{self._base_url}/news/search?{urlencode(params)}"
290
+ try:
291
+ resp = self.session.get(url)
292
+ resp.raise_for_status()
293
+ except Exception as e:
294
+ if hasattr(e, 'response') and e.response is not None:
295
+ raise Exception(f"Bing news search failed with status {e.response.status_code}: {str(e)}")
296
+ else:
297
+ raise Exception(f"Bing news search failed: {str(e)}")
298
  soup = BeautifulSoup(resp.text, "html.parser")
299
  results = []
300
+ for item in soup.select("div.news-card, div.card, div.newsitem, div.card-content, div.t_s_main"):
301
+ a_tag = item.find("a")
302
+ title = a_tag.get_text(strip=True) if a_tag else ''
303
+ url_val = a_tag['href'] if a_tag and a_tag.has_attr('href') else ''
304
+ desc_tag = item.find("div", class_="snippet") or item.find("div", class_="news-card-snippet") or item.find("div", class_="snippetText")
305
+ description = desc_tag.get_text(strip=True) if desc_tag else ''
306
+ source_tag = item.find("div", class_="source")
307
+ source = source_tag.get_text(strip=True) if source_tag else ''
308
+ if url_val and title:
309
+ results.append(BingNewsResult(title=title, url=url_val, description=description, source=source))
310
+ if len(results) >= max_results:
311
+ break
312
+ if not results:
313
+ for item in soup.select("a.title"):
314
+ title = item.get_text(strip=True)
315
+ url_val = item['href'] if item.has_attr('href') else ''
316
+ description = ''
317
+ source = ''
318
+ if url_val and title:
319
+ results.append(BingNewsResult(title=title, url=url_val, description=description, source=source))
320
+ if len(results) >= max_results:
321
+ break
322
+ return results[:max_results]
323
 
 
324
 
325
+ bing = BingSearch()
326
 
327
+ @app.get("/search", response_model=List[BingSearchResult])
328
  async def text_search(
329
  query: str = Query(..., description="The search keywords."),
330
+ region: Optional[str] = Query(None, description="The region for the search (e.g., 'us-US')."),
331
+ safesearch: str = Query("moderate", description="Safe search level ('on', 'moderate', 'off')."),
332
  max_results: int = Query(10, description="Maximum number of results to return."),
 
333
  ):
334
  """
335
  Perform a text search on Bing.
 
336
  """
337
  try:
338
+ results = bing.text(
339
  keywords=query,
340
+ region=region,
341
+ safesearch=safesearch,
342
  max_results=max_results,
 
343
  )
344
  return results
345
  except Exception as e:
346
  raise HTTPException(status_code=500, detail=str(e))
347
 
348
+ @app.get("/suggestions", response_model=List[str])
349
  async def get_suggestions(
350
  query: str = Query(..., description="The search query for which to fetch suggestions."),
351
+ region: Optional[str] = Query(None, description="The region for the suggestions (e.g., 'en-US')."),
352
  ):
353
+ """
354
+ Fetches search suggestions for a given query.
355
+ """
356
  try:
357
+ suggestions = bing.suggestions(query=query, region=region)
358
+ return suggestions
359
  except Exception as e:
360
  raise HTTPException(status_code=500, detail=str(e))
361
 
362
+ @app.get("/images", response_model=List[BingImageResult])
363
  async def image_search(
364
  query: str = Query(..., description="The search keywords for images."),
365
+ region: Optional[str] = Query(None, description="The region for the image search (e.g., 'us-US')."),
366
+ safesearch: str = Query("moderate", description="Safe search level ('on', 'moderate', 'off')."),
367
  max_results: int = Query(10, description="Maximum number of image results to return."),
368
  ):
369
+ """
370
+ Perform an image search on Bing.
371
+ """
372
  try:
373
+ results = bing.images(
374
+ keywords=query,
375
+ region=region,
376
+ safesearch=safesearch,
377
+ max_results=max_results,
378
+ )
379
+ return results
380
  except Exception as e:
381
  raise HTTPException(status_code=500, detail=str(e))
382
 
383
+ @app.get("/news", response_model=List[BingNewsResult])
384
  async def news_search(
385
  query: str = Query(..., description="The search keywords for news."),
386
+ region: Optional[str] = Query(None, description="The region for the news search (e.g., 'us-US')."),
387
+ safesearch: str = Query("moderate", description="Safe search level ('on', 'moderate', 'off')."),
388
  max_results: int = Query(10, description="Maximum number of news results to return."),
389
  ):
390
+ """
391
+ Perform a news search on Bing.
392
+ """
393
  try:
394
+ results = bing.news(
395
+ keywords=query,
396
+ region=region,
397
+ safesearch=safesearch,
398
+ max_results=max_results,
399
+ )
400
+ return results
401
  except Exception as e:
402
  raise HTTPException(status_code=500, detail=str(e))
403
 
 
404
  if __name__ == "__main__":
405
  import uvicorn
406
+ uvicorn.run(app, host="0.0.0.0", port=8000)