Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,21 +1,246 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
-
|
|
|
|
| 6 |
|
| 7 |
-
|
| 8 |
-
2. **読み込むシート名**を指定(例:`代表地番`、未指定なら自動選択)
|
| 9 |
-
3. **ダウンロード間隔(秒)**でアクセス間隔を調整(デフォルト 1.0 秒)
|
| 10 |
-
4. 完了後、以下の成果物をダウンロードできます
|
| 11 |
-
- `combined_fit.xlsx`(結合Excel)
|
| 12 |
-
- `combined_fit.parquet`(同内容のParquet)
|
| 13 |
-
- `raw_excels.zip`(取得した都道府県ファイル一式)
|
| 14 |
-
- `combined_head.csv`(先頭1000行プレビュー)
|
| 15 |
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import time
|
| 4 |
+
import zipfile
|
| 5 |
+
import unicodedata
|
| 6 |
+
from io import BytesIO
|
| 7 |
+
from urllib.parse import urljoin, urlparse, parse_qs, unquote
|
| 8 |
|
| 9 |
+
import gradio as gr
|
| 10 |
+
import requests
|
| 11 |
+
import pandas as pd
|
| 12 |
+
from bs4 import BeautifulSoup
|
| 13 |
|
| 14 |
+
PUBLIC_URL = "https://www.fit-portal.go.jp/PublicInfo"
|
| 15 |
+
OUTDIR = "data_fit" # Spacesの永続領域(リポジトリ直下)に保存
|
| 16 |
|
| 17 |
+
# -------------------- ユーティリティ --------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
+
def normalize_filename(name: str) -> str:
|
| 20 |
+
name = unicodedata.normalize("NFKC", name)
|
| 21 |
+
name = re.sub(r'[\\/:*?"<>|]+', "_", name)
|
| 22 |
+
name = name.strip()
|
| 23 |
+
return name or "file"
|
| 24 |
|
| 25 |
+
def guess_filename_from_headers(resp: requests.Response, fallback: str) -> str:
|
| 26 |
+
cd = resp.headers.get("Content-Disposition", "")
|
| 27 |
+
m = re.search(r'filename\*?=(?:UTF-8\'\')?"?([^";]+)"?', cd, flags=re.IGNORECASE)
|
| 28 |
+
if m:
|
| 29 |
+
try:
|
| 30 |
+
fn = unquote(m.group(1))
|
| 31 |
+
except Exception:
|
| 32 |
+
fn = m.group(1)
|
| 33 |
+
return normalize_filename(fn)
|
| 34 |
+
return normalize_filename(fallback)
|
| 35 |
+
|
| 36 |
+
def is_pref_link(a_tag) -> bool:
|
| 37 |
+
href = a_tag.get("href") or ""
|
| 38 |
+
return "servlet.FileDownload" in href and "file=" in href
|
| 39 |
+
|
| 40 |
+
def extract_pref_name(a_tag) -> str:
|
| 41 |
+
txt = (a_tag.get_text() or "").strip()
|
| 42 |
+
return txt or "pref"
|
| 43 |
+
|
| 44 |
+
def pick_sheet_name(xls_path: str, preferred: str | None) -> str | None:
|
| 45 |
+
try:
|
| 46 |
+
xl = pd.ExcelFile(xls_path)
|
| 47 |
+
if preferred and preferred in xl.sheet_names:
|
| 48 |
+
return preferred
|
| 49 |
+
# 代表地番を優先
|
| 50 |
+
for candidate in ["代表地番", "代表地番のみ", "代表地番シート"]:
|
| 51 |
+
if candidate in xl.sheet_names:
|
| 52 |
+
return candidate
|
| 53 |
+
return xl.sheet_names[0] if xl.sheet_names else None
|
| 54 |
+
except Exception:
|
| 55 |
+
return None
|
| 56 |
+
|
| 57 |
+
def collect_pref_links(session: requests.Session) -> list[dict]:
|
| 58 |
+
r = session.get(PUBLIC_URL, timeout=60)
|
| 59 |
+
r.raise_for_status()
|
| 60 |
+
soup = BeautifulSoup(r.text, "html.parser")
|
| 61 |
+
links = []
|
| 62 |
+
for a in soup.find_all("a"):
|
| 63 |
+
if is_pref_link(a):
|
| 64 |
+
links.append({
|
| 65 |
+
"pref": extract_pref_name(a),
|
| 66 |
+
"href": urljoin(PUBLIC_URL, a.get("href")),
|
| 67 |
+
})
|
| 68 |
+
# 重複除去
|
| 69 |
+
seen = set()
|
| 70 |
+
uniq = []
|
| 71 |
+
for item in links:
|
| 72 |
+
key = (item["pref"], item["href"])
|
| 73 |
+
if key not in seen:
|
| 74 |
+
seen.add(key)
|
| 75 |
+
uniq.append(item)
|
| 76 |
+
return uniq
|
| 77 |
+
|
| 78 |
+
def download_one(session: requests.Session, url: str, outdir: str, pref: str) -> str:
|
| 79 |
+
os.makedirs(outdir, exist_ok=True)
|
| 80 |
+
qs = parse_qs(urlparse(url).query)
|
| 81 |
+
file_id = (qs.get("file", ["unknown"])[0])[:18]
|
| 82 |
+
with session.get(url, timeout=180, stream=True) as r:
|
| 83 |
+
r.raise_for_status()
|
| 84 |
+
fname = guess_filename_from_headers(r, f"{pref}_{file_id}.xlsx")
|
| 85 |
+
path = os.path.join(outdir, fname)
|
| 86 |
+
with open(path, "wb") as f:
|
| 87 |
+
for chunk in r.iter_content(chunk_size=1 << 15):
|
| 88 |
+
if chunk:
|
| 89 |
+
f.write(chunk)
|
| 90 |
+
return path
|
| 91 |
+
|
| 92 |
+
def load_excel(xls_path: str, sheet_pref: str | None, pref_name: str) -> pd.DataFrame | None:
|
| 93 |
+
sheet = pick_sheet_name(xls_path, sheet_pref)
|
| 94 |
+
if not sheet:
|
| 95 |
+
return None
|
| 96 |
+
try:
|
| 97 |
+
df = pd.read_excel(xls_path, sheet_name=sheet, engine="openpyxl", dtype=str)
|
| 98 |
+
# 前後空白トリム
|
| 99 |
+
for c in df.select_dtypes(include=["object"]).columns:
|
| 100 |
+
df[c] = df[c].str.strip()
|
| 101 |
+
df.insert(0, "都道府県", pref_name)
|
| 102 |
+
df.insert(1, "元ファイル", os.path.basename(xls_path))
|
| 103 |
+
df.insert(2, "読込シート", sheet)
|
| 104 |
+
return df
|
| 105 |
+
except Exception:
|
| 106 |
+
return None
|
| 107 |
+
|
| 108 |
+
def zip_paths(paths: list[str], out_zip: str) -> str:
|
| 109 |
+
with zipfile.ZipFile(out_zip, "w", compression=zipfile.ZIP_DEFLATED) as z:
|
| 110 |
+
for p in paths:
|
| 111 |
+
if os.path.exists(p):
|
| 112 |
+
z.write(p, arcname=os.path.basename(p))
|
| 113 |
+
return out_zip
|
| 114 |
+
|
| 115 |
+
# -------------------- メイン実行(Gradioから呼ぶ) --------------------
|
| 116 |
+
|
| 117 |
+
def run_job(sheet_name, sleep_sec, limit, re_download, progress=gr.Progress(track_tqdm=False)):
|
| 118 |
+
"""
|
| 119 |
+
sheet_name: "代表地番" 等。空欄なら自動
|
| 120 |
+
sleep_sec: ダウンロード間隔
|
| 121 |
+
limit: 先頭N件のみ(テスト用)
|
| 122 |
+
re_download: 既存ファイルがあっても再取得する
|
| 123 |
+
"""
|
| 124 |
+
progress(0, desc="初期化中…")
|
| 125 |
+
|
| 126 |
+
# polite headers(UA指摘を避けるため)
|
| 127 |
+
session = requests.Session()
|
| 128 |
+
session.headers.update({
|
| 129 |
+
"User-Agent": "Mozilla/5.0 (compatible; FITCollector/1.0; +https://huggingface.co/spaces)",
|
| 130 |
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
| 131 |
+
})
|
| 132 |
+
|
| 133 |
+
# 1) リンク収集
|
| 134 |
+
links = collect_pref_links(session)
|
| 135 |
+
if not links:
|
| 136 |
+
return ("都道府県ファイルのリンク検出に失敗しました。ページ構成の変更や一時的なブロックの可能性があります。",
|
| 137 |
+
None, None, None, None)
|
| 138 |
+
|
| 139 |
+
if limit and limit > 0:
|
| 140 |
+
links = links[:int(limit)]
|
| 141 |
+
|
| 142 |
+
progress(0.1, desc=f"リンク検出 {len(links)} 件")
|
| 143 |
+
|
| 144 |
+
# 2) ダウンロード
|
| 145 |
+
downloaded = []
|
| 146 |
+
for i, item in enumerate(links, start=1):
|
| 147 |
+
progress(0.1 + 0.6 * i / max(1, len(links)),
|
| 148 |
+
desc=f"ダウンロード {i}/{len(links)}: {item['pref']}")
|
| 149 |
+
try:
|
| 150 |
+
# 既存ファイルを利用する(高速化)
|
| 151 |
+
# 同一prefの既存xlsxがあればそれを優先(緩めの一致)
|
| 152 |
+
existing = None
|
| 153 |
+
if not re_download and os.path.isdir(OUTDIR):
|
| 154 |
+
for fn in os.listdir(OUTDIR):
|
| 155 |
+
if fn.lower().endswith(".xlsx") and item["pref"] in fn:
|
| 156 |
+
existing = os.path.join(OUTDIR, fn)
|
| 157 |
+
break
|
| 158 |
+
if existing and os.path.exists(existing):
|
| 159 |
+
path = existing
|
| 160 |
+
else:
|
| 161 |
+
path = download_one(session, item["href"], OUTDIR, item["pref"])
|
| 162 |
+
time.sleep(float(sleep_sec))
|
| 163 |
+
downloaded.append({"pref": item["pref"], "path": path})
|
| 164 |
+
except Exception as e:
|
| 165 |
+
print(f"[WARN] ダウンロード失敗: {item['pref']} {e}")
|
| 166 |
+
|
| 167 |
+
if not downloaded:
|
| 168 |
+
return ("ダウンロードに失敗しました。ネットワークやサイト側制限をご確認ください。",
|
| 169 |
+
None, None, None, None)
|
| 170 |
+
|
| 171 |
+
# 3) 結合
|
| 172 |
+
frames = []
|
| 173 |
+
for i, it in enumerate(downloaded, start=1):
|
| 174 |
+
progress(0.72 + 0.18 * i / max(1, len(downloaded)),
|
| 175 |
+
desc=f"読み込み {i}/{len(downloaded)}: {os.path.basename(it['path'])}")
|
| 176 |
+
df = load_excel(it["path"], sheet_name if sheet_name else None, it["pref"])
|
| 177 |
+
if df is not None and len(df) > 0:
|
| 178 |
+
frames.append(df)
|
| 179 |
+
|
| 180 |
+
if not frames:
|
| 181 |
+
return ("Excelは取得できましたが、読み込めるデータがありませんでした(シート名の指定を見直してください)。",
|
| 182 |
+
None, None, None, None)
|
| 183 |
+
|
| 184 |
+
combined = pd.concat(frames, ignore_index=True)
|
| 185 |
+
|
| 186 |
+
# 4) 出力
|
| 187 |
+
os.makedirs(OUTDIR, exist_ok=True)
|
| 188 |
+
out_xlsx = os.path.join(OUTDIR, "combined_fit.xlsx")
|
| 189 |
+
out_parq = os.path.join(OUTDIR, "combined_fit.parquet")
|
| 190 |
+
|
| 191 |
+
with pd.ExcelWriter(out_xlsx, engine="openpyxl") as w:
|
| 192 |
+
combined.to_excel(w, index=False, sheet_name="combined")
|
| 193 |
+
combined.to_parquet(out_parq, index=False)
|
| 194 |
+
|
| 195 |
+
# 5) 付帯:生ファイル一式のZIP
|
| 196 |
+
# ダウンロードした都道府県別Excelも渡したいニーズ向け
|
| 197 |
+
raw_zip = os.path.join(OUTDIR, "raw_excels.zip")
|
| 198 |
+
zip_paths([it["path"] for it in downloaded], raw_zip)
|
| 199 |
+
|
| 200 |
+
# 6) 進捗完了
|
| 201 |
+
progress(1.0, desc=f"完了({len(combined):,} 行)")
|
| 202 |
+
|
| 203 |
+
# Gradio File はパスを返せばダウンロード可能
|
| 204 |
+
msg = f"✅ 結合完了: 行数 = {len(combined):,}\n" \
|
| 205 |
+
f"・Excel: combined_fit.xlsx\n" \
|
| 206 |
+
f"・Parquet: combined_fit.parquet\n" \
|
| 207 |
+
f"・Raw ZIP: raw_excels.zip\n"
|
| 208 |
+
|
| 209 |
+
# 先頭数行のプレビューCSV(軽量)
|
| 210 |
+
preview_csv = os.path.join(OUTDIR, "combined_head.csv")
|
| 211 |
+
combined.head(1000).to_csv(preview_csv, index=False)
|
| 212 |
+
|
| 213 |
+
return (msg, out_xlsx, out_parq, raw_zip, preview_csv)
|
| 214 |
+
|
| 215 |
+
# -------------------- Gradio UI --------------------
|
| 216 |
+
|
| 217 |
+
with gr.Blocks(title="FIT 公表(都道府県別Excel)一括取得&結合") as demo:
|
| 218 |
+
gr.Markdown(
|
| 219 |
+
"""
|
| 220 |
+
# FIT 公表(都道府県別Excel)一括取得 & 結合
|
| 221 |
+
- 公表ページから都道府県別のExcelを取得し、縦結合します。
|
| 222 |
+
- サーバ負荷配慮のため**間隔(sleep)**を入れています。
|
| 223 |
+
- 出力:`combined_fit.xlsx` / `combined_fit.parquet` / 生ファイル一式`raw_excels.zip`
|
| 224 |
+
"""
|
| 225 |
+
)
|
| 226 |
+
with gr.Row():
|
| 227 |
+
sheet = gr.Textbox(label="読み込むシート名(空欄=自動)", placeholder="例)代表地番 / 全地番")
|
| 228 |
+
sleep = gr.Slider(0.0, 5.0, value=1.0, step=0.1, label="ダウンロード間隔(秒)")
|
| 229 |
+
with gr.Row():
|
| 230 |
+
limit = gr.Number(value=None, precision=0, label="先頭N県のみ(テスト用・空欄は全県)")
|
| 231 |
+
reget = gr.Checkbox(label="既存ファイルがあっても再ダウンロードする", value=False)
|
| 232 |
+
|
| 233 |
+
run_btn = gr.Button("実行", variant="primary")
|
| 234 |
+
out_msg = gr.Markdown()
|
| 235 |
+
out_xlsx = gr.File(label="結合Excel(combined_fit.xlsx)")
|
| 236 |
+
out_parq = gr.File(label="結合Parquet(combined_fit.parquet)")
|
| 237 |
+
out_zip = gr.File(label="取得した都道府県Excel一式(zip)")
|
| 238 |
+
out_preview = gr.File(label="先頭1000行プレビュー(CSV)")
|
| 239 |
+
|
| 240 |
+
run_btn.click(fn=run_job,
|
| 241 |
+
inputs=[sheet, sleep, limit, reget],
|
| 242 |
+
outputs=[out_msg, out_xlsx, out_parq, out_zip, out_preview])
|
| 243 |
+
|
| 244 |
+
if __name__ == "__main__":
|
| 245 |
+
# queue を有効にして複数ユーザーでも安定実行
|
| 246 |
+
demo.queue(max_size=20).launch()
|