web / app.py
play7284's picture
Update app.py
12808e7 verified
import gradio as gr
import requests
import markdownify
import readabilipy.simple_json
from urllib.parse import urlparse, urlunparse
from protego import Protego
from typing import Tuple
# 定义用户代理
DEFAULT_USER_AGENT = "Gradio Fetch App/1.0 (https://gradio.app)"
# --- 核心功能函数 (未改变) ---
# ... (extract_content_from_html, get_robots_txt_url, check_can_fetch 函数保持不变) ...
def extract_content_from_html(html: str) -> str:
try:
ret = readabilipy.simple_json.simple_json_from_html_string(html, use_readability=True)
if not ret or not ret.get("content"):
return "页面简化失败:无法从HTML中提取主要内容。"
content = markdownify.markdownify(ret["content"], heading_style=markdownify.ATX)
return content if content.strip() else "页面简化后内容为空。"
except Exception as e:
return f"处理HTML时出错: {e}"
def get_robots_txt_url(url: str) -> str:
parsed = urlparse(url)
robots_url = urlunparse((parsed.scheme, parsed.netloc, "/robots.txt", "", "", ""))
return robots_url
def check_can_fetch(url: str, user_agent: str) -> Tuple[bool, str]:
robots_url = get_robots_txt_url(url)
try:
response = requests.get(robots_url, headers={"User-Agent": user_agent}, timeout=5)
if response.status_code >= 400:
return True, f"无法获取 {robots_url} (状态码: {response.status_code}),将继续抓取。"
robot_parser = Protego.parse(response.text)
if not robot_parser.can_fetch(url, user_agent):
return False, f"**抓取被拒绝**:该网站的 `robots.txt` 文件 ({robots_url}) 不允许我们的用户代理 ('{user_agent}') 访问此页面。"
return True, f"根据 {robots_url} 的规则,允许抓取。"
except requests.exceptions.RequestException as e:
return True, f"检查 {robots_url} 时发生网络错误: {e},将继续尝试抓取。"
# --- 主处理函数 (使用 Google 风格 Docstring) ---
def fetch_and_process_url(
url: str,
ignore_robots: bool,
force_raw: bool,
max_length: int,
start_index: int
) -> str:
"""抓取并处理指定URL的网页内容。
此函数会访问一个网页,智能地提取其主要正文内容,并将其转换为简洁的Markdown格式。
它还能处理robots.txt规则、内容截断和返回原始HTML等高级功能。
Args:
url (str): 需要抓取的完整网址,必须以 http:// 或 https:// 开头。
ignore_robots (bool): 是否忽略目标网站的 robots.txt 文件中的抓取限制。
force_raw (bool): 是否返回未经简化的原始HTML内容,而不是提取后的Markdown文本。
max_length (int): 返回内容的最大字符数。用于处理超长页面。
start_index (int): 从内容的哪个字符位置开始返回。用于分页获取超长内容。
Returns:
str: 处理后的网页内容或错误信息。
"""
if not url or not url.strip().startswith(('http://', 'https://')):
return "请输入一个有效的URL (以 http:// 或 https:// 开头)。"
# ... 函数的其余部分代码保持不变 ...
url = url.strip()
user_agent = DEFAULT_USER_AGENT
if not ignore_robots:
can_fetch, message = check_can_fetch(url, user_agent)
if not can_fetch:
return message
try:
response = requests.get(url, headers={"User-Agent": user_agent}, timeout=30, allow_redirects=True)
response.raise_for_status()
except requests.exceptions.RequestException as e:
return f"抓取URL时发生网络错误: {e}"
page_raw = response.text
content_type = response.headers.get("content-type", "").lower()
is_html = "text/html" in content_type
prefix = ""
if is_html and not force_raw:
content = extract_content_from_html(page_raw)
else:
content = page_raw
if not is_html:
prefix = f"**注意**: 内容类型是 `{content_type}`,已显示为原始内容。\n\n---\n\n"
original_length = len(content)
if start_index >= original_length:
content_to_show = "错误:起始索引超出了内容总长度。"
else:
end_index = start_index + max_length
content_to_show = content[start_index:end_index]
remaining_chars = original_length - (start_index + len(content_to_show))
if remaining_chars > 0:
next_start = start_index + len(content_to_show)
truncation_message = (
f"\n\n---\n\n**内容被截断**。 "
f"已显示字符数: {len(content_to_show)},"
f"剩余字符数: {remaining_chars}。"
f"要获取更多内容,请将**起始索引**设置为 **{next_start}** 再试。"
)
content_to_show += truncation_message
return prefix + content_to_show
# --- Gradio 界面定义 (未改变) ---
with gr.Blocks(theme=gr.themes.Soft()) as app:
gr.Markdown("# 网页内容抓取与简化工具")
gr.Markdown("输入一个网址,工具会抓取其内容,并默认提取正文转换为Markdown格式。")
with gr.Row():
url_input = gr.Textbox(label="URL", placeholder="例如: https://www.google.com/about", scale=4)
fetch_button = gr.Button("抓取内容", variant="primary", scale=1)
with gr.Accordion("高级选项", open=False):
with gr.Row():
ignore_robots_checkbox = gr.Checkbox(label="忽略 robots.txt 限制", value=False)
raw_checkbox = gr.Checkbox(label="显示原始HTML (不简化)", value=False)
with gr.Row():
max_length_slider = gr.Slider(minimum=500, maximum=50000, value=8000, step=500, label="返回内容的最大长度 (字符数)")
start_index_number = gr.Number(label="起始索引", value=0)
output_markdown = gr.Markdown(label="抓取结果")
fetch_button.click(
fn=fetch_and_process_url,
inputs=[url_input, ignore_robots_checkbox, raw_checkbox, max_length_slider, start_index_number],
outputs=output_markdown
)
if __name__ == "__main__":
app.launch(mcp_server=True)