| import os |
| import json |
| import requests |
| from requests.exceptions import Timeout |
| from bs4 import BeautifulSoup |
| from tqdm import tqdm |
| import time |
| import concurrent |
| from concurrent.futures import ThreadPoolExecutor |
| import pdfplumber |
| from io import BytesIO |
| import re |
| import string |
| from typing import Optional, Tuple |
| from nltk.tokenize import sent_tokenize |
|
|
| |
| |
|
|
|
|
| |
| headers = { |
| 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' |
| 'AppleWebKit/537.36 (KHTML, like Gecko) ' |
| 'Chrome/58.0.3029.110 Safari/537.36', |
| 'Referer': 'https://www.google.com/', |
| 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', |
| 'Accept-Language': 'en-US,en;q=0.5', |
| 'Connection': 'keep-alive', |
| 'Upgrade-Insecure-Requests': '1' |
| } |
|
|
| |
| session = requests.Session() |
| session.headers.update(headers) |
|
|
|
|
|
|
| def remove_punctuation(text: str) -> str: |
| """Remove punctuation from the text.""" |
| return text.translate(str.maketrans("", "", string.punctuation)) |
|
|
| def f1_score(true_set: set, pred_set: set) -> float: |
| """Calculate the F1 score between two sets of words.""" |
| intersection = len(true_set.intersection(pred_set)) |
| if not intersection: |
| return 0.0 |
| precision = intersection / float(len(pred_set)) |
| recall = intersection / float(len(true_set)) |
| return 2 * (precision * recall) / (precision + recall) |
|
|
| def extract_snippet_with_context(full_text: str, snippet: str, context_chars: int = 2500) -> Tuple[bool, str]: |
| """ |
| Extract the sentence that best matches the snippet and its context from the full text. |
| |
| Args: |
| full_text (str): The full text extracted from the webpage. |
| snippet (str): The snippet to match. |
| context_chars (int): Number of characters to include before and after the snippet. |
| |
| Returns: |
| Tuple[bool, str]: The first element indicates whether extraction was successful, the second element is the extracted context. |
| # 这个 extract_snippet_with_context 函数的作用是 从一段长文本中找到最符合给定片段(snippet)的句子,并返回包含该句子的一定上下文范围的文本。它的核心逻辑包括 文本预处理、句子匹配、F1 评分计算、上下文截取 等几个步骤。 |
| """ |
| try: |
| full_text = full_text[:50000] |
|
|
| snippet = snippet.lower() |
| snippet = remove_punctuation(snippet) |
| snippet_words = set(snippet.split()) |
|
|
| best_sentence = None |
| best_f1 = 0.2 |
|
|
| |
| sentences = sent_tokenize(full_text) |
|
|
| for sentence in sentences: |
| key_sentence = sentence.lower() |
| key_sentence = remove_punctuation(key_sentence) |
| sentence_words = set(key_sentence.split()) |
| f1 = f1_score(snippet_words, sentence_words) |
| if f1 > best_f1: |
| best_f1 = f1 |
| best_sentence = sentence |
|
|
| if best_sentence: |
| para_start = full_text.find(best_sentence) |
| para_end = para_start + len(best_sentence) |
| start_index = max(0, para_start - context_chars) |
| end_index = min(len(full_text), para_end + context_chars) |
| context = full_text[start_index:end_index] |
| return True, context |
| else: |
| |
| return False, full_text[:context_chars * 2] |
| except Exception as e: |
| return False, f"Failed to extract snippet context due to {str(e)}" |
|
|
| def extract_text_from_url(url, use_jina=False, jina_api_key=None, snippet: Optional[str] = None): |
| """ |
| Extract text from a URL. If a snippet is provided, extract the context related to it. |
| |
| Args: |
| url (str): URL of a webpage or PDF. |
| use_jina (bool): Whether to use Jina for extraction. |
| snippet (Optional[str]): The snippet to search for. |
| |
| Returns: |
| str: Extracted text or context. |
| """ |
| try: |
| print(f"extract_text_from_url use_jina: {use_jina}") |
| if use_jina: |
| jina_headers = { |
| 'Authorization': f'Bearer {jina_api_key}', |
| 'X-Return-Format': 'markdown', |
| |
| } |
| response = requests.get(f'https://r.jina.ai/{url}', headers=jina_headers).text |
| |
| pattern = r"\(https?:.*?\)|\[https?:.*?\]" |
| text = re.sub(pattern, "", response).replace('---','-').replace('===','=').replace(' ',' ').replace(' ',' ') |
| print("use jina to extract text successfully") |
| else: |
| print(f"don't use jina to extract text") |
| response = session.get(url, timeout=20) |
| response.raise_for_status() |
| |
| content_type = response.headers.get('Content-Type', '') |
| if 'pdf' in content_type: |
| |
| print("Extracting text from PDF...") |
| return extract_pdf_text(url) |
| |
| try: |
| soup = BeautifulSoup(response.text, 'lxml') |
| except Exception: |
| print("lxml parser not found or failed, falling back to html.parser") |
| soup = BeautifulSoup(response.text, 'html.parser') |
| text = soup.get_text(separator=' ', strip=True) |
|
|
| if snippet: |
| success, context = extract_snippet_with_context(text, snippet) |
| if success: |
| return context |
| else: |
| return text |
| else: |
| |
| return text[:8000] |
| except requests.exceptions.HTTPError as http_err: |
| return f"HTTP error occurred: {http_err}" |
| except requests.exceptions.ConnectionError: |
| return "Error: Connection error occurred" |
| except requests.exceptions.Timeout: |
| return "Error: Request timed out after 20 seconds" |
| except Exception as e: |
| return f"Unexpected error: {str(e)}" |
|
|
| def fetch_page_content(urls, max_workers=4, use_jina=False, jina_api_key=None, snippets: Optional[dict] = None): |
| """ |
| Concurrently fetch content from multiple URLs. |
| |
| Args: |
| urls (list): List of URLs to scrape. |
| max_workers (int): Maximum number of concurrent threads. |
| use_jina (bool): Whether to use Jina for extraction. |
| snippets (Optional[dict]): A dictionary mapping URLs to their respective snippets. |
| |
| Returns: |
| dict: A dictionary mapping URLs to the extracted content or context. |
| 这段代码定义了一个名为 fetch_page_content 的函数,用于并发地从多个 URL 中提取内容。该函数使用 ThreadPoolExecutor 来并发执行提取操作 |
| """ |
| results = {} |
| with ThreadPoolExecutor(max_workers=64) as executor: |
| |
| futures = { |
| executor.submit(extract_text_from_url, url, use_jina, jina_api_key,snippets.get(url) if snippets else None): url |
| for url in urls |
| } |
| for future in tqdm(concurrent.futures.as_completed(futures), desc="Fetching URLs", total=len(urls)): |
| url = futures[future] |
| try: |
| data = future.result() |
| results[url] = data |
| except Exception as exc: |
| results[url] = f"Error fetching {url}: {exc}" |
| time.sleep(0.2) |
| return results |
|
|
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| proxies = { |
| "http": "http://127.0.0.1:7880", |
| "https": "http://127.0.0.1:7880" |
| } |
|
|
|
|
| def bing_web_search(query, subscription_key, endpoint, market='en-US', language='en', timeout=2000): |
| """ |
| Perform a search using the Bing Web Search API with a set timeout. |
| |
| Args: |
| query (str): Search query. |
| subscription_key (str): Subscription key for the Bing Search API. |
| endpoint (str): Endpoint for the Bing Search API. |
| market (str): Market, e.g., "en-US" or "zh-CN". |
| language (str): Language of the results, e.g., "en". |
| timeout (int or float or tuple): Request timeout in seconds. |
| Can be a float representing the total timeout, |
| or a tuple (connect timeout, read timeout). |
| |
| Returns: |
| dict: JSON response of the search results. Returns None or raises an exception if the request times out. |
| 函数的目标是使用 Bing Web Search API 执行搜索,并返回 JSON 格式的结果。 |
| 如果请求超时或出现其他问题,返回空字典({})或抛出异常 |
| """ |
| payload = json.dumps({ |
| "q": query, |
| "mkt": market, |
| "setLang": language, |
| "textDecorations": True, |
| "textFormat": "HTML" |
| }) |
|
|
| headers = { |
| 'X-API-KEY': subscription_key, |
| 'Content-Type': 'application/json' |
| } |
| error_cnt = 0 |
| while True: |
| if error_cnt == 20: |
| print(f"qery: {query} has tried {error_cnt} times without success, just skip it.") |
| break |
| try: |
| |
| response = requests.request("POST", endpoint, headers=headers, data=payload, proxies=proxies, timeout=timeout) |
| response.raise_for_status() |
| search_results = response.json() |
| return search_results |
| except Timeout: |
| error_cnt += 1 |
| print(f"error_cnt: {error_cnt}, Bing Web Search request timed out ({timeout} seconds) for query: {query}") |
| time.sleep(5) |
| |
| except requests.exceptions.RequestException as e: |
| error_cnt += 1 |
| print(f"error_cnt: {error_cnt}, Error occurred during Bing Web Search request: {e}, payload: {payload}") |
| time.sleep(5) |
| |
|
|
|
|
| def extract_pdf_text(url): |
| """ |
| Extract text from a PDF. |
| |
| Args: |
| url (str): URL of the PDF file. |
| |
| Returns: |
| str: Extracted text content or error message. |
| """ |
| try: |
| response = session.get(url, timeout=20) |
| if response.status_code != 200: |
| return f"Error: Unable to retrieve the PDF (status code {response.status_code})" |
| |
| |
| with pdfplumber.open(BytesIO(response.content)) as pdf: |
| full_text = "" |
| for page in pdf.pages: |
| text = page.extract_text() |
| if text: |
| full_text += text |
| |
| |
| cleaned_text = ' '.join(full_text.split()[:600]) |
| return cleaned_text |
| except requests.exceptions.Timeout: |
| return "Error: Request timed out after 20 seconds" |
| except Exception as e: |
| return f"Error: {str(e)}" |
|
|
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| def extract_relevant_info(search_results): |
| """ |
| Extract relevant information from Bing search results. |
| |
| Args: |
| search_results (dict): JSON response from the Bing Web Search API. |
| |
| Returns: |
| list: A list of dictionaries containing the extracted information. |
| """ |
| useful_info = [] |
| |
| if 'organic' in search_results : |
| for id, result in enumerate(search_results['organic']): |
| info = { |
| 'id': id + 1, |
| 'title': result.get('title', ''), |
| 'url': result.get('link', ''), |
| 'site_name': result.get('siteName', ''), |
| 'date': result.get('datePublished', '').split('T')[0], |
| 'snippet': result.get('snippet', ''), |
| |
| 'context': '' |
| } |
| useful_info.append(info) |
| |
| return useful_info |
|
|
|
|
| |
|
|
| if __name__ == "__main__": |
| |
| |
| query = "Structure of dimethyl fumarate" |
| |
| |
| BING_SUBSCRIPTION_KEY = "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" |
| if not BING_SUBSCRIPTION_KEY: |
| raise ValueError("Please set the BING_SEARCH_V7_SUBSCRIPTION_KEY environment variable.") |
| |
| bing_endpoint = "https://google.serper.dev/search" |
| |
| |
| print("Performing Bing Web Search...") |
| search_results = bing_web_search(query, BING_SUBSCRIPTION_KEY, bing_endpoint) |
| |
| print("Extracting relevant information from search results...") |
| extracted_info = extract_relevant_info(search_results) |
|
|
| print("Fetching and extracting context for each snippet...") |
| for info in tqdm(extracted_info, desc="Processing Snippets"): |
| full_text = extract_text_from_url(info['url'], use_jina=False, jina_api_key="jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ", snippet=info["snippet"]) |
| if full_text and not full_text.startswith("Error"): |
| success, context = extract_snippet_with_context(full_text, info['snippet']) |
| if success: |
| info['context'] = context |
| print("-------------------") |
| print(f"Snippet: {info['snippet']}\nContext: {context}") |
| |
| else: |
| info['context'] = f"Could not extract context. Returning first 8000 chars: {full_text[:8000]}" |
| else: |
| info['context'] = f"Failed to fetch full text: {full_text}" |
|
|
| |
| |
| |
|
|