Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import requests
|
| 3 |
+
from bs4 import BeautifulSoup
|
| 4 |
+
import urllib.parse # iframe κ²½λ‘ λ³΄μ μ μν λͺ¨λ
|
| 5 |
+
import re
|
| 6 |
+
import logging
|
| 7 |
+
import tempfile
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import mecab # pythonβmecabβko λΌμ΄λΈλ¬λ¦¬ μ¬μ©
|
| 10 |
+
import os
|
| 11 |
+
import time
|
| 12 |
+
import hmac
|
| 13 |
+
import hashlib
|
| 14 |
+
import base64
|
| 15 |
+
|
| 16 |
+
# λλ²κΉ
(λ‘κ·Έ)μ© ν¨μ
|
| 17 |
+
def debug_log(message: str):
|
| 18 |
+
print(f"[DEBUG] {message}")
|
| 19 |
+
|
| 20 |
+
# =============================================================================
|
| 21 |
+
# [κΈ°λ³Έμ½λ]: λ€μ΄λ² λΈλ‘κ·Έμμ μ λͺ©κ³Ό λ³Έλ¬Έμ μΆμΆνλ ν¨μ
|
| 22 |
+
# =============================================================================
|
| 23 |
+
def scrape_naver_blog(url: str) -> str:
|
| 24 |
+
debug_log("scrape_naver_blog ν¨μ μμ")
|
| 25 |
+
debug_log(f"μμ²λ°μ URL: {url}")
|
| 26 |
+
|
| 27 |
+
# ν€λ μΈν
(ν¬λ‘€λ§ μ°¨λ¨ λ°©μ§)
|
| 28 |
+
headers = {
|
| 29 |
+
"User-Agent": (
|
| 30 |
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
| 31 |
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
| 32 |
+
"Chrome/96.0.4664.110 Safari/537.36"
|
| 33 |
+
)
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
try:
|
| 37 |
+
# 1) λ€μ΄λ² λΈλ‘κ·Έ λ©μΈ νμ΄μ§ μμ²
|
| 38 |
+
response = requests.get(url, headers=headers)
|
| 39 |
+
debug_log("HTTP GET μμ²(λ©μΈ νμ΄μ§) μλ£")
|
| 40 |
+
|
| 41 |
+
if response.status_code != 200:
|
| 42 |
+
debug_log(f"μμ² μ€ν¨, μνμ½λ: {response.status_code}")
|
| 43 |
+
return f"μ€λ₯κ° λ°μνμ΅λλ€. μνμ½λ: {response.status_code}"
|
| 44 |
+
|
| 45 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
| 46 |
+
debug_log("HTML νμ±(λ©μΈ νμ΄μ§) μλ£")
|
| 47 |
+
|
| 48 |
+
# 2) iframe νκ·Έ μ°ΎκΈ°
|
| 49 |
+
iframe = soup.select_one("iframe#mainFrame")
|
| 50 |
+
if not iframe:
|
| 51 |
+
debug_log("iframe#mainFrame νκ·Έλ₯Ό μ°Ύμ μ μμ΅λλ€.")
|
| 52 |
+
return "λ³Έλ¬Έ iframeμ μ°Ύμ μ μμ΅λλ€."
|
| 53 |
+
|
| 54 |
+
iframe_src = iframe.get("src")
|
| 55 |
+
if not iframe_src:
|
| 56 |
+
debug_log("iframe srcκ° μ‘΄μ¬νμ§ μμ΅λλ€.")
|
| 57 |
+
return "λ³Έλ¬Έ iframeμ srcλ₯Ό μ°Ύμ μ μμ΅λλ€."
|
| 58 |
+
|
| 59 |
+
# 3) iframe srcκ° μλκ²½λ‘μΈ κ²½μ° μ λκ²½λ‘λ‘ λ³΄μ
|
| 60 |
+
parsed_iframe_url = urllib.parse.urljoin(url, iframe_src)
|
| 61 |
+
debug_log(f"iframe νμ΄μ§ μμ² URL: {parsed_iframe_url}")
|
| 62 |
+
|
| 63 |
+
# 4) iframe νμ΄μ§ μ¬μμ²
|
| 64 |
+
iframe_response = requests.get(parsed_iframe_url, headers=headers)
|
| 65 |
+
debug_log("HTTP GET μμ²(iframe νμ΄μ§) μλ£")
|
| 66 |
+
|
| 67 |
+
if iframe_response.status_code != 200:
|
| 68 |
+
debug_log(f"iframe μμ² μ€ν¨, μνμ½λ: {iframe_response.status_code}")
|
| 69 |
+
return f"iframeμμ μ€λ₯κ° λ°μνμ΅λλ€. μνμ½λ: {iframe_response.status_code}"
|
| 70 |
+
|
| 71 |
+
iframe_soup = BeautifulSoup(iframe_response.text, "html.parser")
|
| 72 |
+
debug_log("HTML νμ±(iframe νμ΄μ§) μλ£")
|
| 73 |
+
|
| 74 |
+
# μ λͺ© μΆμΆ
|
| 75 |
+
title_div = iframe_soup.select_one('.se-module.se-module-text.se-title-text')
|
| 76 |
+
title = title_div.get_text(strip=True) if title_div else "μ λͺ©μ μ°Ύμ μ μμ΅λλ€."
|
| 77 |
+
debug_log(f"μΆμΆλ μ λͺ©: {title}")
|
| 78 |
+
|
| 79 |
+
# λ³Έλ¬Έ μΆμΆ
|
| 80 |
+
content_div = iframe_soup.select_one('.se-main-container')
|
| 81 |
+
if content_div:
|
| 82 |
+
content = content_div.get_text("\n", strip=True)
|
| 83 |
+
else:
|
| 84 |
+
content = "λ³Έλ¬Έμ μ°Ύμ μ μμ΅λλ€."
|
| 85 |
+
debug_log("λ³Έλ¬Έ μΆμΆ μλ£")
|
| 86 |
+
|
| 87 |
+
# κ²°κ³Ό ν©μΉκΈ°
|
| 88 |
+
result = f"[μ λͺ©]\n{title}\n\n[λ³Έλ¬Έ]\n{content}"
|
| 89 |
+
debug_log("μ λͺ©κ³Ό λ³Έλ¬Έμ ν©μ³ λ°ν μ€λΉ μλ£")
|
| 90 |
+
return result
|
| 91 |
+
|
| 92 |
+
except Exception as e:
|
| 93 |
+
debug_log(f"μλ¬ λ°μ: {str(e)}")
|
| 94 |
+
return f"μ€ν¬λν μ€ μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}"
|
| 95 |
+
|
| 96 |
+
# =============================================================================
|
| 97 |
+
# [μ°Έμ‘°μ½λ-1]: ννμ λΆμ ν¨μ (Mecab μ΄μ©)
|
| 98 |
+
# =============================================================================
|
| 99 |
+
logging.basicConfig(level=logging.DEBUG)
|
| 100 |
+
logger = logging.getLogger(__name__)
|
| 101 |
+
|
| 102 |
+
def analyze_text(text: str):
|
| 103 |
+
logger.debug("μλ³Έ ν
μ€νΈ: %s", text)
|
| 104 |
+
|
| 105 |
+
# 1. νκ΅μ΄λ§ λ¨κΈ°κΈ° (곡백, μμ΄, κΈ°νΈ λ± μ κ±°)
|
| 106 |
+
filtered_text = re.sub(r'[^κ°-ν£]', '', text)
|
| 107 |
+
logger.debug("νν°λ§λ ν
μ€νΈ (νκ΅μ΄λ§, 곡백 μ κ±°): %s", filtered_text)
|
| 108 |
+
|
| 109 |
+
if not filtered_text:
|
| 110 |
+
logger.debug("μ ν¨ν νκ΅μ΄ ν
μ€νΈκ° μμ.")
|
| 111 |
+
return pd.DataFrame(columns=["λ¨μ΄", "λΉλμ"]), ""
|
| 112 |
+
|
| 113 |
+
# 2. Mecabμ μ΄μ©ν ννμ λΆμ (λͺ
μ¬μ 볡ν©λͺ
μ¬λ§ μΆμΆ)
|
| 114 |
+
mecab_instance = mecab.MeCab() # μΈμ€ν΄μ€ μμ±
|
| 115 |
+
tokens = mecab_instance.pos(filtered_text)
|
| 116 |
+
logger.debug("ννμ λΆμ κ²°κ³Ό: %s", tokens)
|
| 117 |
+
|
| 118 |
+
freq = {}
|
| 119 |
+
for word, pos in tokens:
|
| 120 |
+
if word and word.strip():
|
| 121 |
+
if pos.startswith("NN"):
|
| 122 |
+
freq[word] = freq.get(word, 0) + 1
|
| 123 |
+
logger.debug("λ¨μ΄: %s, νμ¬: %s, νμ¬ λΉλ: %d", word, pos, freq[word])
|
| 124 |
+
|
| 125 |
+
# 3. λΉλμλ₯Ό λ΄λ¦Όμ°¨μ μ λ ¬
|
| 126 |
+
sorted_freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)
|
| 127 |
+
logger.debug("λ΄λ¦Όμ°¨μ μ λ ¬λ λ¨μ΄ λΉλ: %s", sorted_freq)
|
| 128 |
+
|
| 129 |
+
# 4. κ²°κ³Ό DataFrame μμ±
|
| 130 |
+
df = pd.DataFrame(sorted_freq, columns=["λ¨μ΄", "λΉλμ"])
|
| 131 |
+
logger.debug("κ²°κ³Ό DataFrame μμ±λ¨, shape: %s", df.shape)
|
| 132 |
+
|
| 133 |
+
# 5. Excel νμΌ μμ± (μμ νμΌ μ μ₯)
|
| 134 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".xlsx")
|
| 135 |
+
df.to_excel(temp_file.name, index=False, engine='openpyxl')
|
| 136 |
+
temp_file.close()
|
| 137 |
+
logger.debug("Excel νμΌ μμ±λ¨: %s", temp_file.name)
|
| 138 |
+
|
| 139 |
+
return df, temp_file.name
|
| 140 |
+
|
| 141 |
+
# =============================================================================
|
| 142 |
+
# [μ°Έμ‘°μ½λ-2]: ν€μλ κ²μλ λ° λΈλ‘κ·Έ λ¬Έμμ μ‘°ν κ΄λ ¨ ν¨μ
|
| 143 |
+
# =============================================================================
|
| 144 |
+
def generate_signature(timestamp, method, uri, secret_key):
|
| 145 |
+
message = f"{timestamp}.{method}.{uri}"
|
| 146 |
+
digest = hmac.new(secret_key.encode("utf-8"), message.encode("utf-8"), hashlib.sha256).digest()
|
| 147 |
+
return base64.b64encode(digest).decode()
|
| 148 |
+
|
| 149 |
+
def get_header(method, uri, api_key, secret_key, customer_id):
|
| 150 |
+
timestamp = str(round(time.time() * 1000))
|
| 151 |
+
signature = generate_signature(timestamp, method, uri, secret_key)
|
| 152 |
+
return {
|
| 153 |
+
"Content-Type": "application/json; charset=UTF-8",
|
| 154 |
+
"X-Timestamp": timestamp,
|
| 155 |
+
"X-API-KEY": api_key,
|
| 156 |
+
"X-Customer": str(customer_id),
|
| 157 |
+
"X-Signature": signature
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
def fetch_related_keywords(keyword):
|
| 161 |
+
API_KEY = os.environ["NAVER_API_KEY"]
|
| 162 |
+
SECRET_KEY = os.environ["NAVER_SECRET_KEY"]
|
| 163 |
+
CUSTOMER_ID = os.environ["NAVER_CUSTOMER_ID"]
|
| 164 |
+
|
| 165 |
+
BASE_URL = "https://api.naver.com"
|
| 166 |
+
uri = "/keywordstool"
|
| 167 |
+
method = "GET"
|
| 168 |
+
headers = get_header(method, uri, API_KEY, SECRET_KEY, CUSTOMER_ID)
|
| 169 |
+
params = {
|
| 170 |
+
"hintKeywords": [keyword],
|
| 171 |
+
"showDetail": "1"
|
| 172 |
+
}
|
| 173 |
+
response = requests.get(BASE_URL + uri, params=params, headers=headers)
|
| 174 |
+
data = response.json()
|
| 175 |
+
if "keywordList" not in data:
|
| 176 |
+
return pd.DataFrame()
|
| 177 |
+
df = pd.DataFrame(data["keywordList"])
|
| 178 |
+
if len(df) > 100:
|
| 179 |
+
df = df.head(100)
|
| 180 |
+
|
| 181 |
+
def parse_count(x):
|
| 182 |
+
try:
|
| 183 |
+
return int(str(x).replace(",", ""))
|
| 184 |
+
except:
|
| 185 |
+
return 0
|
| 186 |
+
|
| 187 |
+
df["PCμκ²μλ"] = df["monthlyPcQcCnt"].apply(parse_count)
|
| 188 |
+
df["λͺ¨λ°μΌμκ²μλ"] = df["monthlyMobileQcCnt"].apply(parse_count)
|
| 189 |
+
df["ν νμκ²μλ"] = df["PCμκ²μλ"] + df["λͺ¨λ°μΌμκ²μλ"]
|
| 190 |
+
df.rename(columns={"relKeyword": "μ 보ν€μλ"}, inplace=True)
|
| 191 |
+
result_df = df[["μ 보ν€μλ", "PCμκ²μλ", "λͺ¨λ°μΌμκ²μλ", "ν νμκ²μλ"]]
|
| 192 |
+
return result_df
|
| 193 |
+
|
| 194 |
+
def fetch_blog_count(keyword):
|
| 195 |
+
client_id = os.environ["NAVER_SEARCH_CLIENT_ID"]
|
| 196 |
+
client_secret = os.environ["NAVER_SEARCH_CLIENT_SECRET"]
|
| 197 |
+
url = "https://openapi.naver.com/v1/search/blog.json"
|
| 198 |
+
headers = {
|
| 199 |
+
"X-Naver-Client-Id": client_id,
|
| 200 |
+
"X-Naver-Client-Secret": client_secret
|
| 201 |
+
}
|
| 202 |
+
params = {"query": keyword, "display": 1}
|
| 203 |
+
response = requests.get(url, headers=headers, params=params)
|
| 204 |
+
if response.status_code == 200:
|
| 205 |
+
data = response.json()
|
| 206 |
+
return data.get("total", 0)
|
| 207 |
+
else:
|
| 208 |
+
return 0
|
| 209 |
+
|
| 210 |
+
def create_excel_file(df):
|
| 211 |
+
with tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) as tmp:
|
| 212 |
+
excel_path = tmp.name
|
| 213 |
+
df.to_excel(excel_path, index=False)
|
| 214 |
+
return excel_path
|
| 215 |
+
|
| 216 |
+
def process_keyword(keywords: str, include_related: bool):
|
| 217 |
+
"""
|
| 218 |
+
μ¬λ¬ ν€μλλ₯Ό μν°λ‘ ꡬλΆνμ¬ λ¦¬μ€νΈλ‘ λ§λ€κ³ ,
|
| 219 |
+
κ° ν€μλμ λν΄ λ€μ΄λ² κ΄κ³ APIλ‘ κ²μλ μ 보λ₯Ό μ‘°ννλ©°,
|
| 220 |
+
첫 λ²μ§Έ ν€μλμ κ²½μ° μ΅μ
μ λ°λΌ μ°κ΄κ²μμ΄λ μΆκ°ν ν,
|
| 221 |
+
κ° μ 보ν€μλμ λν΄ λΈλ‘κ·Έ λ¬Έμμλ₯Ό μ‘°ννμ¬ DataFrameκ³Ό Excel νμΌμ λ°νν©λλ€.
|
| 222 |
+
"""
|
| 223 |
+
input_keywords = [k.strip() for k in keywords.splitlines() if k.strip()]
|
| 224 |
+
result_dfs = []
|
| 225 |
+
|
| 226 |
+
for idx, kw in enumerate(input_keywords):
|
| 227 |
+
df_kw = fetch_related_keywords(kw)
|
| 228 |
+
if df_kw.empty:
|
| 229 |
+
continue
|
| 230 |
+
row_kw = df_kw[df_kw["μ 보ν€μλ"] == kw]
|
| 231 |
+
if not row_kw.empty:
|
| 232 |
+
result_dfs.append(row_kw)
|
| 233 |
+
else:
|
| 234 |
+
result_dfs.append(df_kw.head(1))
|
| 235 |
+
if include_related and idx == 0:
|
| 236 |
+
df_related = df_kw[df_kw["μ 보ν€μλ"] != kw]
|
| 237 |
+
if not df_related.empty:
|
| 238 |
+
result_dfs.append(df_related)
|
| 239 |
+
|
| 240 |
+
if result_dfs:
|
| 241 |
+
result_df = pd.concat(result_dfs, ignore_index=True)
|
| 242 |
+
result_df.drop_duplicates(subset=["μ 보ν€μλ"], inplace=True)
|
| 243 |
+
else:
|
| 244 |
+
result_df = pd.DataFrame(columns=["μ 보ν€μλ", "PCμκ²μλ", "λͺ¨λ°μΌμκ²μλ", "ν νμκ²μλ"])
|
| 245 |
+
|
| 246 |
+
result_df["λΈλ‘κ·Έλ¬Έμμ"] = result_df["μ 보ν€μλ"].apply(fetch_blog_count)
|
| 247 |
+
result_df.sort_values(by="ν νμκ²μλ", ascending=False, inplace=True)
|
| 248 |
+
|
| 249 |
+
return result_df, create_excel_file(result_df)
|
| 250 |
+
|
| 251 |
+
# =============================================================================
|
| 252 |
+
# ν΅ν© μ²λ¦¬ ν¨μ: λΈλ‘κ·Έ λ΄μ©(ν
μ€νΈ)μ λν΄ ννμ λΆμμ μνν ν,
|
| 253 |
+
# ν€μλμ κ²μλ λ° λΈλ‘κ·Έ λ¬Έμμλ₯Ό μΆκ°νμ¬ μ΅μ’
κ²°κ³Όλ₯Ό λ°νν¨.
|
| 254 |
+
# =============================================================================
|
| 255 |
+
def process_blog_content(text: str):
|
| 256 |
+
debug_log("process_blog_content ν¨μ μμ")
|
| 257 |
+
# 1. ννμ λΆμ μ€ν ([μ°Έμ‘°μ½λ-1] νμ©)
|
| 258 |
+
df_morph, morph_excel = analyze_text(text)
|
| 259 |
+
debug_log("ννμ λΆμ μλ£")
|
| 260 |
+
|
| 261 |
+
if df_morph.empty:
|
| 262 |
+
debug_log("ννμ λΆμ κ²°κ³Όκ° λΉμ΄μμ")
|
| 263 |
+
return df_morph, ""
|
| 264 |
+
|
| 265 |
+
# 2. ννμ λΆμλ λ¨μ΄ λͺ©λ‘ μΆμΆ (ν€μλ μ‘°νμ©)
|
| 266 |
+
keywords = "\n".join(df_morph["λ¨μ΄"].tolist())
|
| 267 |
+
debug_log(f"μΆμΆλ λ¨μ΄ λͺ©λ‘: {keywords}")
|
| 268 |
+
|
| 269 |
+
# 3. ν€μλ κ²μλ λ° λΈλ‘κ·Έ λ¬Έμμ μ‘°ν ([μ°Έμ‘°μ½λ-2] νμ©)
|
| 270 |
+
df_keyword, keyword_excel = process_keyword(keywords, include_related=False)
|
| 271 |
+
debug_log("ν€μλ κ²μ μ 보 μ‘°ν μλ£")
|
| 272 |
+
|
| 273 |
+
# 4. ννμ λΆμ κ²°κ³Όμ ν€μλ μ 보λ₯Ό λ¨μ΄ κΈ°μ€μΌλ‘ λ³ν©
|
| 274 |
+
df_merged = pd.merge(df_morph, df_keyword, left_on="λ¨μ΄", right_on="μ 보ν€μλ", how="left")
|
| 275 |
+
debug_log("λ°μ΄ν° λ³ν© μλ£")
|
| 276 |
+
df_merged.drop(columns=["μ 보ν€μλ"], inplace=True)
|
| 277 |
+
|
| 278 |
+
# 5. λ³ν© κ²°κ³Όλ₯Ό Excel νμΌλ‘ μμ±
|
| 279 |
+
merged_excel = create_excel_file(df_merged)
|
| 280 |
+
debug_log(f"λ³ν© κ²°κ³Ό Excel νμΌ μμ±λ¨: {merged_excel}")
|
| 281 |
+
|
| 282 |
+
return df_merged, merged_excel
|
| 283 |
+
|
| 284 |
+
# =============================================================================
|
| 285 |
+
# Gradio μΈν°νμ΄μ€ κ΅¬μ± (νκΉ
νμ΄μ€ κ·ΈλΌλμ€ νκ²½)
|
| 286 |
+
# =============================================================================
|
| 287 |
+
with gr.Blocks() as demo:
|
| 288 |
+
gr.Markdown("# λΈλ‘κ·Έ κΈ ννμ λΆμ λ° ν€μλ μ 보 μ‘°ν")
|
| 289 |
+
|
| 290 |
+
with gr.Tab("λΈλ‘κ·Έ λ΄μ© μ
λ ₯ λ° μ€ν¬λν"):
|
| 291 |
+
with gr.Row():
|
| 292 |
+
blog_url = gr.Textbox(label="λ€μ΄λ² λΈλ‘κ·Έ λ§ν¬", placeholder="μ: https://blog.naver.com/ssboost/222983068507")
|
| 293 |
+
fetch_button = gr.Button("λΈλ‘κ·Έλ΄μ©κ°μ Έμ€κΈ°")
|
| 294 |
+
blog_content = gr.Textbox(label="λΈλ‘κ·Έ λ΄μ© (μ λͺ© λ° λ³Έλ¬Έ)", lines=10, placeholder="λΈλ‘κ·Έ λ΄μ©μ κ°μ Έμ€κ±°λ μ§μ μ
λ ₯νμΈμ.")
|
| 295 |
+
# 'λΈλ‘κ·Έλ΄μ©κ°μ Έμ€κΈ°' λ²νΌ ν΄λ¦ μ μ€ν¬λν μ€ννμ¬ blog_contentμ λ°μ
|
| 296 |
+
fetch_button.click(fn=scrape_naver_blog, inputs=blog_url, outputs=blog_content)
|
| 297 |
+
|
| 298 |
+
with gr.Tab("ννμ λΆμ μ€ν"):
|
| 299 |
+
with gr.Row():
|
| 300 |
+
analysis_button = gr.Button("ννμλΆμ")
|
| 301 |
+
# λΆμ κ²°κ³Όλ μμ κ°λ₯νλλ‘ interactive=True μ€μ
|
| 302 |
+
output_table = gr.Dataframe(label="λΆμ κ²°κ³Ό (ννμ λ° ν€μλ μ 보)", interactive=True)
|
| 303 |
+
output_file = gr.File(label="Excel λ€μ΄λ‘λ")
|
| 304 |
+
# 'ννμλΆμ' λ²νΌ ν΄λ¦ μ process_blog_content ν¨μ μ€ν
|
| 305 |
+
analysis_button.click(fn=process_blog_content, inputs=blog_content, outputs=[output_table, output_file])
|
| 306 |
+
|
| 307 |
+
if __name__ == "__main__":
|
| 308 |
+
debug_log("Gradio μ± μ€ν μμ")
|
| 309 |
+
demo.launch()
|
| 310 |
+
debug_log("Gradio μ± μ€ν μ’
λ£")
|