| | import os |
| | import time |
| | import hmac |
| | import hashlib |
| | import base64 |
| | import requests |
| | import pandas as pd |
| | import tempfile |
| | import gradio as gr |
| |
|
| | |
| | def generate_signature(timestamp, method, uri, secret_key): |
| | message = f"{timestamp}.{method}.{uri}" |
| | digest = hmac.new(secret_key.encode("utf-8"), message.encode("utf-8"), hashlib.sha256).digest() |
| | return base64.b64encode(digest).decode() |
| |
|
| | def get_header(method, uri, api_key, secret_key, customer_id): |
| | timestamp = str(round(time.time() * 1000)) |
| | signature = generate_signature(timestamp, method, uri, secret_key) |
| | return { |
| | "Content-Type": "application/json; charset=UTF-8", |
| | "X-Timestamp": timestamp, |
| | "X-API-KEY": api_key, |
| | "X-Customer": str(customer_id), |
| | "X-Signature": signature |
| | } |
| |
|
| | |
| | def fetch_related_keywords(keyword): |
| | API_KEY = os.environ["NAVER_API_KEY"] |
| | SECRET_KEY = os.environ["NAVER_SECRET_KEY"] |
| | CUSTOMER_ID = os.environ["NAVER_CUSTOMER_ID"] |
| | |
| | BASE_URL = "https://api.naver.com" |
| | uri = "/keywordstool" |
| | method = "GET" |
| | headers = get_header(method, uri, API_KEY, SECRET_KEY, CUSTOMER_ID) |
| | params = { |
| | "hintKeywords": [keyword], |
| | "showDetail": "1" |
| | } |
| | response = requests.get(BASE_URL + uri, params=params, headers=headers) |
| | data = response.json() |
| | if "keywordList" not in data: |
| | return pd.DataFrame() |
| | df = pd.DataFrame(data["keywordList"]) |
| | if len(df) > 100: |
| | df = df.head(100) |
| | |
| | def parse_count(x): |
| | try: |
| | return int(str(x).replace(",", "")) |
| | except: |
| | return 0 |
| |
|
| | df["PCμκ²μλ"] = df["monthlyPcQcCnt"].apply(parse_count) |
| | df["λͺ¨λ°μΌμκ²μλ"] = df["monthlyMobileQcCnt"].apply(parse_count) |
| | df["ν νμκ²μλ"] = df["PCμκ²μλ"] + df["λͺ¨λ°μΌμκ²μλ"] |
| | df.rename(columns={"relKeyword": "μ 보ν€μλ"}, inplace=True) |
| | result_df = df[["μ 보ν€μλ", "PCμκ²μλ", "λͺ¨λ°μΌμκ²μλ", "ν νμκ²μλ"]] |
| | return result_df |
| |
|
| | |
| | def fetch_blog_count(keyword): |
| | client_id = os.environ["NAVER_SEARCH_CLIENT_ID"] |
| | client_secret = os.environ["NAVER_SEARCH_CLIENT_SECRET"] |
| | url = "https://openapi.naver.com/v1/search/blog.json" |
| | headers = { |
| | "X-Naver-Client-Id": client_id, |
| | "X-Naver-Client-Secret": client_secret |
| | } |
| | params = {"query": keyword, "display": 1} |
| | response = requests.get(url, headers=headers, params=params) |
| | if response.status_code == 200: |
| | data = response.json() |
| | return data.get("total", 0) |
| | else: |
| | return 0 |
| |
|
| | |
| | def create_excel_file(df): |
| | with tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) as tmp: |
| | excel_path = tmp.name |
| | df.to_excel(excel_path, index=False) |
| | return excel_path |
| |
|
| | |
| | def process_keyword(keywords: str, include_related: bool): |
| | """ |
| | 1. μ¬λ¬ ν€μλλ₯Ό μν°λ‘ ꡬλΆνμ¬ λ¦¬μ€νΈλ‘ λ§λλλ€. |
| | 2. κ° ν€μλμ λν΄ λ€μ΄λ² κ΄κ³ APIλ‘ κ²μλ μ 보λ₯Ό μ‘°ννκ³ , |
| | 첫 λ²μ§Έ ν€μλμ λν΄ μ΅μ
(μ°κ΄κ²μμ΄ ν¬ν¨)μ΄ TrueμΈ κ²½μ° μ°κ΄κ²μμ΄λ μΆκ°ν©λλ€. |
| | 3. μ΅μ’
κ²°κ³Ό DataFrameμ κ° "μ 보ν€μλ"λ§λ€ λ€μ΄λ² κ²μ APIλ‘ λΈλ‘κ·Έ λ¬Έμμλ₯Ό μ‘°ννμ¬ "λΈλ‘κ·Έλ¬Έμμ" 컬λΌμ μΆκ°ν©λλ€. |
| | """ |
| | input_keywords = [k.strip() for k in keywords.splitlines() if k.strip()] |
| | result_dfs = [] |
| | |
| | for idx, kw in enumerate(input_keywords): |
| | df_kw = fetch_related_keywords(kw) |
| | if df_kw.empty: |
| | continue |
| | |
| | row_kw = df_kw[df_kw["μ 보ν€μλ"] == kw] |
| | if not row_kw.empty: |
| | result_dfs.append(row_kw) |
| | else: |
| | result_dfs.append(df_kw.head(1)) |
| | |
| | if include_related and idx == 0: |
| | df_related = df_kw[df_kw["μ 보ν€μλ"] != kw] |
| | if not df_related.empty: |
| | result_dfs.append(df_related) |
| | |
| | if result_dfs: |
| | result_df = pd.concat(result_dfs, ignore_index=True) |
| | result_df.drop_duplicates(subset=["μ 보ν€μλ"], inplace=True) |
| | else: |
| | result_df = pd.DataFrame(columns=["μ 보ν€μλ", "PCμκ²μλ", "λͺ¨λ°μΌμκ²μλ", "ν νμκ²μλ"]) |
| | |
| | |
| | result_df["λΈλ‘κ·Έλ¬Έμμ"] = result_df["μ 보ν€μλ"].apply(fetch_blog_count) |
| | result_df.sort_values(by="ν νμκ²μλ", ascending=False, inplace=True) |
| | |
| | return result_df, create_excel_file(result_df) |
| |
|
| | |
| | with gr.Blocks(css=".gradio-container { max-width: 960px; margin: auto; }") as demo: |
| | gr.Markdown("# λ€μ΄λ² μ°κ΄κ²μμ΄, κ²μλ λ° λΈλ‘κ·Έ λ¬Έμμ μ‘°ν") |
| | gr.Markdown( |
| | "μ¬λ¬ ν€μλλ₯Ό **μν°**λ‘ κ΅¬λΆνμ¬ μ
λ ₯νμΈμ. κ° ν€μλμ λν κ²μλ μ 보λ₯Ό μ‘°ννλ©°, " |
| | "첫 λ²μ§Έ ν€μλμ λν΄ 'μ°κ΄κ²μμ΄ ν¬ν¨' μ΅μ
μ μ ννλ©΄ μ°κ΄κ²μμ΄ κ²°κ³Όλ ν¨κ» μ‘°νλ©λλ€. \n\n" |
| | "λν, κ° μ 보ν€μλμ λν λ€μ΄λ² λΈλ‘κ·Έ λ¬Έμμλ ν¨κ» μΆλ ₯λ©λλ€." |
| | ) |
| | |
| | with gr.Row(): |
| | with gr.Column(scale=1): |
| | keyword_input = gr.Textbox( |
| | label="ν€μλ μ
λ ₯ (μ¬λ¬ κ°μΌ κ²½μ° μν°λ‘ ꡬλΆ)", |
| | lines=6, |
| | placeholder="μ:\nκ°μλνλΉλΌ\nμλ°μ€ν¬λ¦½νΈ" |
| | ) |
| | include_checkbox = gr.Checkbox(label="μ°κ΄κ²μμ΄ ν¬ν¨ (첫λ²μ§Έ ν€μλμ νν¨)", value=False) |
| | search_button = gr.Button("κ²μ", variant="primary") |
| | with gr.Column(scale=1): |
| | gr.Markdown("### κ²μ κ²°κ³Ό") |
| | df_output = gr.Dataframe(label="κ²°κ³Ό ν
μ΄λΈ") |
| | excel_output = gr.File(label="μμ
λ€μ΄λ‘λ") |
| | |
| | search_button.click( |
| | fn=process_keyword, |
| | inputs=[keyword_input, include_checkbox], |
| | outputs=[df_output, excel_output] |
| | ) |
| |
|
| | |
| | demo.launch() |
| |
|