Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import requests | |
| from bs4 import BeautifulSoup | |
| import pandas as pd | |
| st.set_page_config(page_title="台酒商品價格擷取器", layout="wide") | |
| st.title("📦 台酒商品價格擷取工具") | |
| urls = [ | |
| "https://eshop.ttl.com.tw/%E5%86%B7%E8%97%8F%E5%8F%B0%E9%85%92%E7%B4%B9%E8%88%88%E5%8E%9F%E5%91%B3%E9%A6%99%E8%85%B8%E7%A6%AE%E7%9B%92-%E7%B4%B9%E8%88%88%E5%8E%9F%E5%91%B32%E5%8C%85", | |
| "https://eshop.ttl.com.tw/%E5%8F%B0%E9%85%92%E8%8A%B1%E9%9B%95%E9%9B%9E%E9%BA%B54%E7%A2%97", | |
| "https://eshop.ttl.com.tw/%E6%BC%AB%E7%85%AE%E9%A3%9F%E5%85%89-%E9%B9%BD%E9%BA%B4%E8%8A%9D%E9%BA%BB%E6%8B%8C%E9%BA%B54%E5%8C%85_%E8%A2%8B%E5%85%A8%E7%B4%A0" | |
| ] | |
| with st.spinner("正在擷取資料..."): | |
| results = [] | |
| for url in urls: | |
| try: | |
| response = requests.get(url, timeout=10) | |
| soup = BeautifulSoup(response.text, "html.parser") | |
| title = soup.find("h2", class_="main_title").get_text(strip=True) | |
| price = soup.find("span", class_="number").get_text(strip=True) | |
| results.append({"標題": title, "價格": price, "網址": url}) | |
| except Exception as e: | |
| results.append({"標題": "錯誤: 無法擷取", "價格": "-", "網址": url}) | |
| df = pd.DataFrame(results) | |
| st.success("✅ 擷取完成") | |
| st.dataframe(df, use_container_width=True) | |
| csv = df.to_csv(index=False).encode("utf-8-sig") | |
| st.download_button( | |
| label="📥 下載CSV", | |
| data=csv, | |
| file_name="ttl_products.csv", | |
| mime="text/csv" | |
| ) | |