import json import glob import ssl import time from datetime import datetime, timedelta, timezone from pathlib import Path from urllib.request import urlopen, Request from urllib.error import HTTPError import streamlit as st # --------------------------------------------------------------------------- # Page config # --------------------------------------------------------------------------- st.set_page_config( page_title="Daily Paper Reader", page_icon="📰", layout="wide", initial_sidebar_state="collapsed", ) # --------------------------------------------------------------------------- # Custom CSS – HuggingFace-inspired design # --------------------------------------------------------------------------- st.markdown( """ """, unsafe_allow_html=True, ) # --------------------------------------------------------------------------- # Data helpers # --------------------------------------------------------------------------- DATA_DIR = Path(__file__).resolve().parent.parent / "data" HF_DATASET_REPO = "Elfsong/hf_paper_summary" HF_TRENDING_REPO = "Elfsong/hf_paper_trending" def _get_hf_token() -> str | None: import os token = os.getenv("HF_TOKEN", "") if token: return token env_path = Path(__file__).resolve().parent.parent / ".env" if env_path.exists(): for line in env_path.read_text().splitlines(): if line.startswith("HF_TOKEN="): return line.split("=", 1)[1].strip() return None def _date_to_split(date_str: str) -> str: """Convert '2026-03-11' to 'date_2026_03_11' for valid split name.""" return "date_" + date_str.replace("-", "_") def _split_to_date(split_name: str) -> str: """Convert 'date_2026_03_11' back to '2026-03-11'.""" return split_name.replace("date_", "", 1).replace("_", "-") def push_to_hf_dataset(papers: list[dict], date_str: str): """Push papers list to HuggingFace dataset as a date split.""" from datasets import Dataset token = _get_hf_token() if not token: return rows = [] for p in papers: rows.append( { "title": p.get("title", ""), "paper_id": p.get("paper_id", ""), "hf_url": p.get("hf_url", ""), "arxiv_url": p.get("arxiv_url", ""), "pdf_url": p.get("pdf_url", ""), "authors": p.get("authors", []), "summary": p.get("summary", ""), "upvotes": p.get("upvotes", 0), "published_at": p.get("published_at", ""), "concise_summary": p.get("concise_summary", ""), "concise_summary_zh": p.get("concise_summary_zh", ""), "detailed_analysis": json.dumps( p.get("detailed_analysis", {}), ensure_ascii=False ), "detailed_analysis_zh": json.dumps( p.get("detailed_analysis_zh", {}), ensure_ascii=False ), "topics": json.dumps(p.get("topics", []), ensure_ascii=False), "topics_zh": json.dumps(p.get("topics_zh", []), ensure_ascii=False), "keywords": json.dumps(p.get("keywords", []), ensure_ascii=False), "keywords_zh": json.dumps( p.get("keywords_zh", []), ensure_ascii=False ), } ) ds = Dataset.from_list(rows) split_name = _date_to_split(date_str) ds.push_to_hub(HF_DATASET_REPO, split=split_name, token=token) @st.cache_data(ttl=300, show_spinner=False) def _list_dataset_splits() -> list[str]: """List available date splits from the HF dataset repo without loading data.""" from huggingface_hub import HfApi token = _get_hf_token() api = HfApi(token=token) try: files = api.list_repo_files(HF_DATASET_REPO, repo_type="dataset") except Exception: return [] # Split dirs look like: data/date_2026_03_11-*.parquet or date_2026_03_11/... splits = set() for f in files: name = f.split("/")[-1] for part in name.replace(".parquet", "").replace(".arrow", "").split("-"): if part.startswith("date_"): splits.add(part) break return sorted(splits, reverse=True) @st.cache_data(ttl=300, show_spinner=False) def pull_from_hf_dataset(target_date: str | None = None) -> dict[str, list[dict]]: """Load a date split from HF dataset. If target_date is None, load the latest. Returns {date_str: papers_list}.""" from datasets import load_dataset token = _get_hf_token() splits = _list_dataset_splits() if not splits: return {} if target_date: target_split = _date_to_split(target_date) if target_split not in splits: return {} split_to_load = target_split else: split_to_load = splits[0] date_str = _split_to_date(split_to_load) try: ds = load_dataset(HF_DATASET_REPO, split=split_to_load, token=token) except Exception: return {} papers = [] for row in ds: paper = dict(row) paper["detailed_analysis"] = json.loads(paper.get("detailed_analysis", "{}")) paper["detailed_analysis_zh"] = json.loads( paper.get("detailed_analysis_zh", "{}") ) paper["topics"] = json.loads(paper.get("topics", "[]")) paper["topics_zh"] = json.loads(paper.get("topics_zh", "[]")) paper["keywords"] = json.loads(paper.get("keywords", "[]")) paper["keywords_zh"] = json.loads(paper.get("keywords_zh", "[]")) papers.append(paper) return {date_str: papers} @st.cache_data(ttl=300, show_spinner=False) def list_available_dates() -> list[str]: """Return available dates (YYYY-MM-DD) from HF dataset and local files, sorted descending.""" dates = set() # From HF dataset splits for split in _list_dataset_splits(): dates.add(_split_to_date(split)) # From local JSON files for date_str in find_json_files(): dates.add(date_str) return sorted(dates, reverse=True) def find_json_files() -> dict[str, Path]: """Return {date_str: path} for all summarized JSON files.""" files: dict[str, Path] = {} for fp in glob.glob(str(DATA_DIR / "hf_papers_*_summarized.json")): p = Path(fp) for part in p.stem.split("_"): if len(part) == 10 and part[4] == "-" and part[7] == "-": files[part] = p break return dict(sorted(files.items(), reverse=True)) def load_papers(source) -> list[dict]: if isinstance(source, (str, Path)): with open(source, "r", encoding="utf-8") as f: return json.load(f) return json.loads(source.read()) # --------------------------------------------------------------------------- # Crawl & summarize # --------------------------------------------------------------------------- SSL_CTX = ssl.create_default_context() try: import certifi SSL_CTX.load_verify_locations(certifi.where()) except ImportError: SSL_CTX.check_hostname = False SSL_CTX.verify_mode = ssl.CERT_NONE HF_API_URL = "https://huggingface.co/api/daily_papers" HF_THUMB = "https://cdn-thumbnails.huggingface.co/social-thumbnails/papers/{pid}.png" SUMMARY_SYSTEM_PROMPT = """\ You are a senior AI researcher. Given a paper's title and abstract, produce a JSON object \ with exactly eight keys — English and Chinese versions of analyses, plus keywords and topics: 1. "concise_summary": A 2-4 sentence plain-language summary in English explaining WHAT the paper does \ and WHY it matters. Avoid jargon; end with the key result or takeaway. 2. "concise_summary_zh": The same concise summary translated into Chinese (简体中文). 3. "detailed_analysis": A longer analysis in English, structured as: - "summary": 4-6 sentences. Go beyond restating the abstract — interpret the approach \ and explain how it fits into the broader research landscape. - "pros": A list of 3-4 strengths (novelty, practical impact, methodology, etc.) - "cons": A list of 2-3 weaknesses or limitations (scope, assumptions, scalability, etc.) 4. "detailed_analysis_zh": The same detailed analysis translated into Chinese (简体中文), \ with the same structure: "summary", "pros", "cons". 5. "topics": A list of 2-3 short topic labels categorizing the paper's research area \ (e.g. "Multimodal LLMs", "Efficient Fine-tuning", "Code Generation", "Vision-Language Models"). \ Use concise, recognizable labels. 6. "topics_zh": The same topic labels translated into Chinese (简体中文). 7. "keywords": A list of 4-6 specific technical keywords or terms central to the paper \ (e.g. "LoRA", "RLHF", "diffusion", "chain-of-thought", "MoE", "RAG", "DPO", "transformer"). \ Use canonical technical terms, not paraphrases. Include method names, model names, and key techniques. 8. "keywords_zh": The same keywords translated into Chinese where applicable \ (keep English acronyms and proper nouns as-is, e.g. "LoRA", "RLHF", "扩散模型", "思维链"). Reply with ONLY valid JSON — no markdown fences, no extra text.""" TRENDING_SYSTEM_PROMPT = """\ You are a senior AI researcher. Given a collection of top papers from the last several days, \ identify the key research trends and produce a JSON object with exactly six keys: 1. "trending_summary": A 2-3 sentence English summary of the dominant research trends \ and themes across these papers. Focus on emerging patterns, hot topics, and notable shifts. 2. "trending_summary_zh": The same trending summary translated into Chinese (简体中文). 3. "top_topics": A list of 3-5 short topic labels (e.g. "Multimodal LLMs", "Efficient Fine-tuning") \ representing the most prominent themes, in English. 4. "top_topics_zh": The same topic labels translated into Chinese (简体中文). 5. "keywords": A list of 5-10 specific technical keywords or terms that appear frequently \ or are central to the papers (e.g. "LoRA", "RLHF", "diffusion", "chain-of-thought", "MoE", \ "RAG", "MLLM", "DPO"). Use the canonical technical term, not a paraphrase. 6. "keywords_zh": The same technical keywords translated into Chinese where applicable \ (keep English acronyms as-is, e.g. "LoRA", "RLHF", "扩散模型", "思维链"). Reply with ONLY valid JSON — no markdown fences, no extra text.""" def fetch_daily_papers(date_str: str) -> list[dict]: url = f"{HF_API_URL}?date={date_str}" req = Request(url, headers={"User-Agent": "Mozilla/5.0"}) try: with urlopen(req, timeout=30, context=SSL_CTX) as resp: data = json.loads(resp.read().decode()) except HTTPError: return [] papers = [] for item in data: paper = item.get("paper", {}) paper_id = paper.get("id", "") authors = [a.get("name", "") for a in paper.get("authors", [])] papers.append( { "title": paper.get("title", ""), "paper_id": paper_id, "hf_url": f"https://huggingface.co/papers/{paper_id}", "arxiv_url": f"https://arxiv.org/abs/{paper_id}", "pdf_url": f"https://arxiv.org/pdf/{paper_id}", "authors": authors, "summary": paper.get("summary", ""), "upvotes": paper.get("upvotes", 0), "published_at": paper.get("publishedAt", ""), } ) papers.sort(key=lambda x: x["upvotes"], reverse=True) return papers def _get_gemini_key() -> str: import os api_key = os.getenv("GEMINI_API_KEY", "") if api_key: return api_key env_path = Path(__file__).resolve().parent.parent / ".env" if env_path.exists(): for line in env_path.read_text().splitlines(): if line.startswith("GEMINI_API_KEY="): return line.split("=", 1)[1].strip() raise RuntimeError( "GEMINI_API_KEY not found. Set it as a HF Space secret or in .env" ) def summarize_paper_gemini( title: str, abstract: str, pdf_url: str = "" ) -> dict: from google import genai api_key = _get_gemini_key() client = genai.Client(api_key=api_key) text_part = genai.types.Part.from_text( text=f"Title: {title}\n\nAbstract: {abstract}" ) contents = [text_part] if pdf_url: try: pdf_data = urlopen( pdf_url, context=SSL_CTX, timeout=30 ).read() pdf_part = genai.types.Part.from_bytes( data=pdf_data, mime_type="application/pdf" ) contents.append(pdf_part) except Exception: pass # fall back to text-only resp = client.models.generate_content( model="gemini-3.1-pro-preview", contents=contents, config=genai.types.GenerateContentConfig( system_instruction=SUMMARY_SYSTEM_PROMPT, temperature=0.3, max_output_tokens=16384, response_mime_type="application/json", ), ) decoder = json.JSONDecoder() result, _ = decoder.raw_decode(resp.text.strip()) return result def _paper_has_summary(paper: dict) -> bool: """Check if a paper already has a valid summary (not an error).""" cs = paper.get("concise_summary", "") return bool(cs) and not cs.startswith("Error:") def _save_papers_local(papers: list[dict], path: Path): """Atomically save papers list to local JSON.""" tmp = path.with_suffix(".tmp") with open(tmp, "w", encoding="utf-8") as f: json.dump(papers, f, ensure_ascii=False, indent=2) tmp.replace(path) def crawl_and_summarize(date_str: str) -> Path: DATA_DIR.mkdir(parents=True, exist_ok=True) output_path = DATA_DIR / f"hf_papers_{date_str}_summarized.json" progress = st.progress(0, text="Fetching papers from HuggingFace...") papers = fetch_daily_papers(date_str) if not papers: progress.empty() st.error(f"No papers found for {date_str}") return None # Resume: load existing partial results and merge if output_path.exists(): try: with open(output_path, "r", encoding="utf-8") as f: cached = {p["paper_id"]: p for p in json.load(f) if _paper_has_summary(p)} for paper in papers: pid = paper.get("paper_id", "") if pid in cached: paper.update(cached[pid]) except Exception: pass # corrupted cache, start fresh total = len(papers) skipped = sum(1 for p in papers if _paper_has_summary(p)) if skipped: st.toast(f"Resuming: {skipped}/{total} papers already summarized.", icon="⏩") for i, paper in enumerate(papers): # Skip already summarized papers if _paper_has_summary(paper): progress.progress( (i + 1) / (total + 1), text=f"Cached ({i+1}/{total}): {paper['title'][:60]}...", ) continue progress.progress( (i + 1) / (total + 1), text=f"Summarizing ({i+1}/{total}): {paper['title'][:60]}...", ) abstract = paper.get("summary", "") pdf_url = paper.get("pdf_url", "") if not abstract and not pdf_url: paper["concise_summary"] = "" paper["concise_summary_zh"] = "" paper["detailed_analysis"] = {} paper["detailed_analysis_zh"] = {} paper["topics"] = [] paper["topics_zh"] = [] paper["keywords"] = [] paper["keywords_zh"] = [] else: try: result = summarize_paper_gemini(paper["title"], abstract, pdf_url) paper["concise_summary"] = result.get("concise_summary", "") paper["concise_summary_zh"] = result.get("concise_summary_zh", "") paper["detailed_analysis"] = result.get("detailed_analysis", {}) paper["detailed_analysis_zh"] = result.get("detailed_analysis_zh", {}) paper["topics"] = result.get("topics", []) paper["topics_zh"] = result.get("topics_zh", []) paper["keywords"] = result.get("keywords", []) paper["keywords_zh"] = result.get("keywords_zh", []) except Exception as e: paper["concise_summary"] = f"Error: {e}" paper["concise_summary_zh"] = "" paper["detailed_analysis"] = {} paper["detailed_analysis_zh"] = {} paper["topics"] = [] paper["topics_zh"] = [] paper["keywords"] = [] paper["keywords_zh"] = [] # Save after each paper for resume support _save_papers_local(papers, output_path) if i < total - 1: time.sleep(1) # Push to HuggingFace only after all papers are done progress.progress(0.95, text="Uploading to HuggingFace Dataset...") try: push_to_hf_dataset(papers, date_str) except Exception as e: st.warning(f"Failed to push to HF dataset: {e}") progress.progress(1.0, text="Done!") time.sleep(0.5) progress.empty() return output_path # --------------------------------------------------------------------------- # Trending summary # --------------------------------------------------------------------------- def _load_recent_papers(n_days: int = 5) -> tuple[list[dict], str, str]: """Load top papers from the most recent n_days splits. Returns (papers, earliest_date, latest_date).""" from datasets import load_dataset token = _get_hf_token() splits = _list_dataset_splits()[:n_days] all_papers = [] loaded_dates = [] for split in splits: try: ds = load_dataset(HF_DATASET_REPO, split=split, token=token) date = _split_to_date(split) loaded_dates.append(date) for row in ds: paper = dict(row) paper["_date"] = date all_papers.append(paper) except Exception: continue all_papers.sort(key=lambda p: p.get("upvotes", 0), reverse=True) earliest = min(loaded_dates) if loaded_dates else "" latest = max(loaded_dates) if loaded_dates else "" return all_papers, earliest, latest def generate_trending_summary(papers: list[dict]) -> dict: """Call Gemini to produce a trending summary from recent papers.""" from google import genai api_key = _get_gemini_key() client = genai.Client(api_key=api_key) # Build input: title + concise_summary + detailed analysis for each paper lines = [] for p in papers: date = p.get("_date", "") title = p.get("title", "") summary = p.get("concise_summary", "") or p.get("summary", "") upvotes = p.get("upvotes", 0) parts = [f"[{date}] (upvotes: {upvotes}) {title}", summary] analysis = p.get("detailed_analysis", {}) if isinstance(analysis, str): try: analysis = json.loads(analysis) except Exception: analysis = {} if analysis: if analysis.get("summary"): parts.append(f"Analysis: {analysis['summary']}") pros = analysis.get("pros", []) if pros: parts.append("Strengths: " + "; ".join(pros)) cons = analysis.get("cons", []) if cons: parts.append("Limitations: " + "; ".join(cons)) lines.append("\n".join(parts)) content = "\n\n".join(lines) resp = client.models.generate_content( model="gemini-3.1-pro-preview", contents=content, config=genai.types.GenerateContentConfig( system_instruction=TRENDING_SYSTEM_PROMPT, temperature=0.3, max_output_tokens=4096*6, response_mime_type="application/json", ), ) decoder = json.JSONDecoder() result, _ = decoder.raw_decode(resp.text.strip()) return result def push_trending_to_hf(trending: dict, date_str: str): """Push trending summary to HF dataset.""" from datasets import Dataset token = _get_hf_token() if not token: return row = { "trending_summary": trending.get("trending_summary", ""), "trending_summary_zh": trending.get("trending_summary_zh", ""), "top_topics": json.dumps(trending.get("top_topics", []), ensure_ascii=False), "top_topics_zh": json.dumps( trending.get("top_topics_zh", []), ensure_ascii=False ), "keywords": json.dumps(trending.get("keywords", []), ensure_ascii=False), "keywords_zh": json.dumps(trending.get("keywords_zh", []), ensure_ascii=False), "date_range": trending.get("date_range", ""), "generated_date": date_str, } ds = Dataset.from_list([row]) split_name = _date_to_split(date_str) ds.push_to_hub(HF_TRENDING_REPO, split=split_name, token=token) @st.cache_data(ttl=300, show_spinner=False) def pull_trending_from_hf(target_date: str | None = None) -> dict | None: """Load trending summary from HF dataset. Returns dict or None.""" from huggingface_hub import HfApi from datasets import load_dataset token = _get_hf_token() api = HfApi(token=token) try: files = api.list_repo_files(HF_TRENDING_REPO, repo_type="dataset") except Exception: return None splits = set() for f in files: name = f.split("/")[-1] for part in name.replace(".parquet", "").replace(".arrow", "").split("-"): if part.startswith("date_"): splits.add(part) break splits = sorted(splits, reverse=True) if not splits: return None if target_date: target_split = _date_to_split(target_date) if target_split not in splits: return None split_to_load = target_split else: split_to_load = splits[0] try: ds = load_dataset(HF_TRENDING_REPO, split=split_to_load, token=token) except Exception: return None row = dict(ds[0]) row["top_topics"] = json.loads(row.get("top_topics", "[]")) row["top_topics_zh"] = json.loads(row.get("top_topics_zh", "[]")) row["keywords"] = json.loads(row.get("keywords", "[]")) row["keywords_zh"] = json.loads(row.get("keywords_zh", "[]")) return row def get_or_generate_trending(date_str: str, status=None) -> tuple[dict | None, str]: """Get trending from HF cache, or generate and push it. Returns (trending_dict, date_range_str).""" if status: status.info("Checking cached trending summary...") trending = pull_trending_from_hf(target_date=date_str) if trending: date_range = trending.get("date_range", "") return trending, date_range # Generate fresh trending if status: status.info("Loading recent papers for trending analysis...") recent_papers, earliest, latest = _load_recent_papers(n_days=5) if not recent_papers: if status: status.warning("No recent papers available for trending analysis.") return None, "" date_range = f"{earliest} ~ {latest}" if earliest and latest else "" try: if status: status.info("Generating trending summary with Gemini...") trending = generate_trending_summary(recent_papers) trending["date_range"] = date_range except Exception as e: if status: status.error(f"Trending generation failed: {e}") return None, "" try: if status: status.info("Saving trending summary to HuggingFace...") push_trending_to_hf(trending, date_str) except Exception as e: if status: status.warning(f"HF push failed: {e}") return trending, date_range # --------------------------------------------------------------------------- # Summary dialog # --------------------------------------------------------------------------- @st.dialog("📄 Summary", width="large") def show_summary(paper: dict): st.markdown(f"### {paper.get('title', '')}") # Authors authors = paper.get("authors", []) if authors: st.caption(", ".join(authors)) # Resource links links_html = f"""""" st.markdown(links_html, unsafe_allow_html=True) # Use global language toggle lang = st.session_state.get("global_lang_toggle", False) # Topics & Keywords if lang: topics = paper.get("topics_zh", []) or paper.get("topics", []) kws = paper.get("keywords_zh", []) or paper.get("keywords", []) else: topics = paper.get("topics", []) kws = paper.get("keywords", []) if topics or kws: lines = [] if topics: topic_spans = "".join( f'{t}' for t in topics ) lines.append(f'
{topic_spans}
') if kws: kw_spans = "".join( f'{k}' for k in kws ) lines.append(f'
{kw_spans}
') st.markdown( f'
{"".join(lines)}
', unsafe_allow_html=True, ) # TL;DR if lang: concise = paper.get("concise_summary_zh", "") or paper.get( "concise_summary", "" ) else: concise = paper.get("concise_summary", "") if concise: st.markdown("#### 📝 TL;DR") st.markdown(concise) # Detailed Analysis if lang: analysis = paper.get("detailed_analysis_zh", {}) or paper.get( "detailed_analysis", {} ) else: analysis = paper.get("detailed_analysis", {}) if analysis: st.divider() st.markdown("#### 🔬 Detailed Analysis" if not lang else "#### 🔬 详细分析") st.markdown(analysis.get("summary", "")) st.divider() col_a, col_b = st.columns(2) with col_a: pros = analysis.get("pros", []) pros_html = "".join(f'
{p}
' for p in pros) label = "✓ Strengths" if not lang else "✓ 优势" st.markdown( f'
{label}
{pros_html}
', unsafe_allow_html=True, ) with col_b: cons = analysis.get("cons", []) cons_html = "".join(f'
{c}
' for c in cons) label = "✗ Limitations" if not lang else "✗ 不足" st.markdown( f'
{label}
{cons_html}
', unsafe_allow_html=True, ) # --------------------------------------------------------------------------- # Render paper card # --------------------------------------------------------------------------- def render_card(paper: dict, rank: int): pid = paper.get("paper_id", "") title = paper.get("title", "Untitled") authors = paper.get("authors", []) thumb_url = HF_THUMB.format(pid=pid) if authors: authors_str = ", ".join(authors) else: authors_str = "Unknown authors" with st.container(border=True): # Thumbnail st.image(thumb_url, width="stretch") # Title as clickable button if st.button(f"**{title}**", key=f"card-{rank}", use_container_width=True): show_summary(paper) # Authors lang = st.session_state.get("global_lang_toggle", False) if lang: topics = paper.get("topics_zh", []) or paper.get("topics", []) else: topics = paper.get("topics", []) topic_spans = "".join( f'{t}' for t in topics ) html = f"""
{authors_str}
{topic_spans}
""" st.markdown(html, unsafe_allow_html=True) # --------------------------------------------------------------------------- # Main content # --------------------------------------------------------------------------- papers: list[dict] = [] yesterday_str = (datetime.now(timezone.utc) - timedelta(days=1)).strftime("%Y-%m-%d") # --- Header row: date selector + language toggle --- col_date, col_lang = st.columns([0.1, 0.9]) with col_date: available_dates = list_available_dates() selected_date = st.date_input( "Select date", value=( datetime.strptime(available_dates[0], "%Y-%m-%d").date() if available_dates else (datetime.now(timezone.utc) - timedelta(days=1)).date() ), format="YYYY-MM-DD", label_visibility="collapsed", ) selected_date_str = selected_date.strftime("%Y-%m-%d") with col_lang: # st.markdown("
", unsafe_allow_html=True) use_zh = st.toggle("中文", key="global_lang_toggle") latest_date = selected_date_str with st.spinner("Loading papers..."): hf_data = pull_from_hf_dataset(target_date=selected_date_str) if hf_data: papers = hf_data[selected_date_str] if not papers: json_files = find_json_files() if selected_date_str in json_files: papers = load_papers(json_files[selected_date_str]) # Check if loaded papers have incomplete summaries (interrupted collection) needs_summarization = papers and any(not _paper_has_summary(p) for p in papers) if not papers or needs_summarization: if not papers: st.balloons() st.toast(f"You are the first one to read papers on {selected_date_str}! We are collecting papers for you.", icon="📰") else: summarized = sum(1 for p in papers if _paper_has_summary(p)) st.toast(f"Resuming summarization: {summarized}/{len(papers)} papers done.", icon="⏩") result_path = crawl_and_summarize(selected_date_str) if result_path: papers = load_papers(result_path) if not papers: st.error("No papers found. Please check back later.") st.stop() papers.sort(key=lambda p: p.get("upvotes", 0), reverse=True) date_label = latest_date lang = st.session_state.get("global_lang_toggle", False) # --- Trending status (spinner under title, filled later) --- trending_spinner = st.empty() # --- Trending summary placeholder (filled after papers render) --- trending_placeholder = st.empty() # --- Render paper grid (3 columns) --- NUM_COLS = 3 for row_start in range(0, len(papers), NUM_COLS): cols = st.columns(NUM_COLS, gap="medium") for col_idx, col in enumerate(cols): paper_idx = row_start + col_idx if paper_idx >= len(papers): break with col: render_card(papers[paper_idx], rank=paper_idx + 1) # --- Trending summary (loaded after papers are displayed) --- with trending_spinner.container(): with st.spinner("Loading trending summary..."): trending, trending_date_range = get_or_generate_trending( selected_date_str, status=None ) trending_spinner.empty() if trending: if lang: summary_text = trending.get("trending_summary_zh", "") or trending.get( "trending_summary", "" ) topics = trending.get("top_topics_zh", []) or trending.get("top_topics", []) keywords = trending.get("keywords_zh", []) or trending.get("keywords", []) else: summary_text = trending.get("trending_summary", "") topics = trending.get("top_topics", []) keywords = trending.get("keywords", []) topics_html = " ".join( f'{t}' for t in topics ) keywords_html = " ".join( f'{k}' for k in keywords ) date_range_label = ( f'({trending_date_range})' if trending_date_range else "" ) trending_placeholder.markdown( f"""
{"🔥 趋势" if lang else "🔥 Trending"} {date_range_label}
{summary_text}
{topics_html}
{keywords_html}
""", unsafe_allow_html=True, )