File size: 15,961 Bytes
dd1bdfb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d171bc5
dd1bdfb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0cc41fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd1bdfb
 
 
 
7588d83
 
 
 
dd1bdfb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a23c0ea
dd1bdfb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0cc41fc
dd1bdfb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a23c0ea
dd1bdfb
 
 
 
a23c0ea
 
dd1bdfb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0cc41fc
dd1bdfb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
"""
PaperCircle Papers API — HuggingFace Spaces
=============================================
Lightweight FastAPI serving conference papers from a Parquet dataset via DuckDB.
Deployed on HuggingFace Spaces (free tier).
"""

import os
import json
import time
from contextlib import asynccontextmanager
from typing import Optional, List

import duckdb
from fastapi import FastAPI, Query, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from huggingface_hub import hf_hub_download

# =============================================================================
# Configuration
# =============================================================================

HF_DATASET_REPO = os.getenv("HF_DATASET_REPO", "ItsMaxNorm/pc-database")
PARQUET_PATH = os.getenv("PARQUET_PATH", "")

# =============================================================================
# Database
# =============================================================================

db: Optional[duckdb.DuckDBPyConnection] = None
ready = False


def init_database():
    """Load Parquet into DuckDB and create FTS index."""
    global db, ready

    start = time.time()
    db = duckdb.connect(":memory:")

    # Find the parquet file
    parquet_file = None

    # Option 1: Local parquet file
    if PARQUET_PATH and os.path.exists(PARQUET_PATH):
        parquet_file = PARQUET_PATH
        print(f"[DB] Using local Parquet: {parquet_file}")

    # Option 2: Download from HF Hub
    elif HF_DATASET_REPO:
        print(f"[DB] Downloading dataset from HF Hub: {HF_DATASET_REPO}")
        parquet_file = hf_hub_download(
            repo_id=HF_DATASET_REPO,
            filename="papers.parquet",
            repo_type="dataset",
        )
        print(f"[DB] Downloaded to: {parquet_file}")

    # Option 3: Look in local data/ directory
    else:
        local_path = os.path.join(os.path.dirname(__file__), "data", "papers.parquet")
        if os.path.exists(local_path):
            parquet_file = local_path
            print(f"[DB] Using bundled Parquet: {parquet_file}")

    if not parquet_file:
        raise RuntimeError(
            "No Parquet file found. Set HF_DATASET_REPO or PARQUET_PATH env var, "
            "or place data/papers.parquet in the app directory."
        )

    # Load into DuckDB
    db.execute(f"""
        CREATE TABLE papers AS
        SELECT * FROM read_parquet('{parquet_file}')
    """)

    row_count = db.execute("SELECT COUNT(*) FROM papers").fetchone()[0]
    print(f"[DB] Loaded {row_count} papers in {time.time() - start:.1f}s")

    # Install and load FTS extension
    db.execute("INSTALL fts")
    db.execute("LOAD fts")

    # Create FTS index on title, abstract, tldr
    db.execute("""
        PRAGMA create_fts_index(
            'papers', 'paper_id',
            'title', 'abstract', 'tldr',
            overwrite=1
        )
    """)
    print(f"[DB] FTS index created in {time.time() - start:.1f}s total")

    ready = True


# =============================================================================
# App
# =============================================================================

@asynccontextmanager
async def lifespan(app: FastAPI):
    init_database()
    yield
    if db:
        db.close()


app = FastAPI(
    title="PaperCircle Papers API",
    version="1.0.0",
    lifespan=lifespan,
)

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# =============================================================================
# Conference Name Normalization
# =============================================================================

# Map of lowercase aliases → canonical names stored in the parquet
_CONFERENCE_ALIASES = {
    "nips": "NeurIPS",
    "neurips": "NeurIPS",
    "iclr": "ICLR",
    "icml": "ICML",
    "cvpr": "CVPR",
    "iccv": "ICCV",
    "eccv": "ECCV",
    "aaai": "AAAI",
    "ijcai": "IJCAI",
    "acl": "ACL",
    "emnlp": "EMNLP",
    "naacl": "NAACL",
    "coling": "COLING",
    "colm": "COLM",
    "icra": "ICRA",
    "iros": "IROS",
    "rss": "RSS",
    "corl": "CoRL",
    "kdd": "KDD",
    "www": "WWW",
    "aistats": "AISTATS",
    "uai": "UAI",
    "colt": "COLT",
    "acml": "ACML",
    "wacv": "WACV",
    "siggraph": "SIGGRAPH",
    "siggraphasia": "SIGGRAPHASIA",
    "acmmm": "ACMMM",
    "3dv": "3DV",
    "automl": "AutoML",
    "alt": "ALT",
    "ai4x": "AI4X",
}


def _normalize_conference(name: str) -> str:
    """Normalize conference name to match parquet data (uppercase)."""
    return _CONFERENCE_ALIASES.get(name.lower(), name.upper())


# =============================================================================
# Endpoints
# =============================================================================

@app.get("/")
async def root():
    return {"name": "PaperCircle Papers API", "status": "healthy" if ready else "loading", "docs": "/docs"}

@app.get("/health")
async def health():
    return {"status": "healthy" if ready else "loading", "ready": ready}


@app.get("/api/community/papers")
async def get_community_papers(
    page: int = Query(1, ge=1),
    limit: int = Query(20, ge=1, le=100),
    year: Optional[int] = None,
    conference: Optional[str] = None,
    source: Optional[str] = None,
    track: Optional[str] = None,
    status: Optional[str] = None,
    primary_area: Optional[str] = None,
    min_rating: Optional[float] = None,
    keywords: Optional[str] = None,
    sort_by: str = Query("year", regex="^(imported_at|year|rating|combined_score|recency|title|likes|views)$"),
):
    """Get paginated community papers with filters."""
    if not ready:
        raise HTTPException(status_code=503, detail="Database loading, please retry")

    offset = (page - 1) * limit

    where_clauses = []
    params = []

    if year is not None:
        where_clauses.append("year = ?")
        params.append(year)
    if conference:
        where_clauses.append("conference = ?")
        params.append(_normalize_conference(conference))
    if source:
        where_clauses.append("source = ?")
        params.append(source)
    if track:
        where_clauses.append("track = ?")
        params.append(track)
    if status:
        where_clauses.append("paper_status = ?")
        params.append(status)
    if primary_area:
        where_clauses.append("primary_area = ?")
        params.append(primary_area)
    if min_rating is not None:
        where_clauses.append("rating_avg >= ?")
        params.append(min_rating)
    if keywords:
        # Simple ILIKE search for keyword filtering
        where_clauses.append("(title ILIKE ? OR abstract ILIKE ? OR keywords ILIKE ?)")
        pattern = f"%{keywords}%"
        params.extend([pattern, pattern, pattern])

    where_sql = " AND ".join(where_clauses) if where_clauses else "1=1"

    # Sort mapping
    sort_map = {
        "year": "year DESC NULLS LAST",
        "imported_at": "year DESC NULLS LAST",
        "rating": "rating_avg DESC NULLS LAST",
        "recency": "year DESC NULLS LAST",
        "title": "title ASC",
        "combined_score": "rating_avg DESC NULLS LAST",
        "likes": "year DESC NULLS LAST",
        "views": "year DESC NULLS LAST",
    }
    order_sql = sort_map.get(sort_by, "year DESC NULLS LAST")

    # Get total count
    count_result = db.execute(
        f"SELECT COUNT(*) FROM papers WHERE {where_sql}", params
    ).fetchone()
    total = count_result[0]

    # Get papers
    rows = db.execute(
        f"""
        SELECT paper_id, title, authors, abstract, year, venue, conference,
               source, track, paper_status, primary_area, keywords, tldr,
               pdf_url, arxiv_id, rating_avg, github_url
        FROM papers
        WHERE {where_sql}
        ORDER BY {order_sql}
        LIMIT ? OFFSET ?
        """,
        params + [limit, offset],
    ).fetchall()

    columns = [
        "paper_id", "title", "authors", "abstract", "year", "venue", "conference",
        "source", "track", "paper_status", "primary_area", "keywords", "tldr",
        "pdf_url", "arxiv_id", "rating_avg", "github_url",
    ]

    papers = []
    for row in rows:
        paper = dict(zip(columns, row))
        # Parse JSON strings back to lists
        paper["authors"] = json.loads(paper["authors"]) if paper["authors"] else []
        paper["keywords"] = json.loads(paper["keywords"]) if paper["keywords"] else []
        papers.append(paper)

    total_pages = (total + limit - 1) // limit if total > 0 else 1

    return {
        "papers": papers,
        "total": total,
        "page": page,
        "limit": limit,
        "total_pages": total_pages,
    }


@app.get("/api/community/papers/{paper_id}")
async def get_community_paper(paper_id: str):
    """Get a single paper by paper_id."""
    if not ready:
        raise HTTPException(status_code=503, detail="Database loading")

    row = db.execute(
        """
        SELECT paper_id, title, authors, abstract, year, venue, conference,
               source, track, paper_status, primary_area, keywords, tldr,
               pdf_url, arxiv_id, rating_avg, github_url, bibtex
        FROM papers WHERE paper_id = ?
        """,
        [paper_id],
    ).fetchone()

    if not row:
        raise HTTPException(status_code=404, detail="Paper not found")

    columns = [
        "paper_id", "title", "authors", "abstract", "year", "venue", "conference",
        "source", "track", "paper_status", "primary_area", "keywords", "tldr",
        "pdf_url", "arxiv_id", "rating_avg", "github_url", "bibtex",
    ]
    paper = dict(zip(columns, row))
    paper["authors"] = json.loads(paper["authors"]) if paper["authors"] else []
    paper["keywords"] = json.loads(paper["keywords"]) if paper["keywords"] else []
    return paper


@app.get("/api/community/filters")
async def get_filter_options():
    """Get available filter options."""
    if not ready:
        raise HTTPException(status_code=503, detail="Database loading")

    years = [r[0] for r in db.execute(
        "SELECT DISTINCT year FROM papers WHERE year IS NOT NULL ORDER BY year DESC"
    ).fetchall()]

    conferences = [r[0] for r in db.execute(
        "SELECT DISTINCT conference FROM papers WHERE conference IS NOT NULL AND conference != '' ORDER BY conference"
    ).fetchall()]

    sources = [r[0] for r in db.execute(
        "SELECT DISTINCT source FROM papers WHERE source IS NOT NULL AND source != '' ORDER BY source"
    ).fetchall()]

    tracks = [r[0] for r in db.execute(
        "SELECT DISTINCT track FROM papers WHERE track IS NOT NULL AND track != '' ORDER BY track"
    ).fetchall()]

    statuses = [r[0] for r in db.execute(
        "SELECT DISTINCT paper_status FROM papers WHERE paper_status IS NOT NULL AND paper_status != '' ORDER BY paper_status"
    ).fetchall()]

    primary_areas = [r[0] for r in db.execute(
        "SELECT DISTINCT primary_area FROM papers WHERE primary_area IS NOT NULL AND primary_area != '' ORDER BY primary_area"
    ).fetchall()]

    return {
        "years": years,
        "conferences": conferences,
        "sources": sources,
        "tracks": tracks,
        "statuses": statuses,
        "primary_areas": primary_areas,
    }


@app.get("/api/search")
async def search_papers(
    query: str = Query(..., min_length=1),
    conferences: Optional[str] = None,
    start_year: Optional[int] = None,
    end_year: Optional[int] = None,
    limit: int = Query(50, ge=1, le=200),
    offset: int = Query(0, ge=0),
):
    """Full-text search with optional filters. conferences is comma-separated."""
    if not ready:
        raise HTTPException(status_code=503, detail="Database loading")

    conf_list = [_normalize_conference(c.strip()) for c in conferences.split(",")] if conferences else None

    # Try FTS first
    try:
        papers = _search_fts(query, conf_list, start_year, end_year, limit, offset)
        if papers:
            return {"papers": papers, "search_type": "fts", "count": len(papers)}
    except Exception as e:
        print(f"[Search] FTS failed: {e}, falling back to simple search")

    # Fallback to simple ILIKE search
    papers = _search_simple(query, conf_list, start_year, end_year, limit, offset)
    return {"papers": papers, "search_type": "simple", "count": len(papers)}


def _search_fts(query, conferences, start_year, end_year, limit, offset):
    """Full-text search using DuckDB FTS extension."""
    where_clauses = []
    params = []

    if conferences:
        placeholders = ",".join(["?" for _ in conferences])
        where_clauses.append(f"p.conference IN ({placeholders})")
        params.extend(conferences)
    if start_year is not None:
        where_clauses.append("p.year >= ?")
        params.append(start_year)
    if end_year is not None:
        where_clauses.append("p.year <= ?")
        params.append(end_year)

    extra_where = (" AND " + " AND ".join(where_clauses)) if where_clauses else ""

    rows = db.execute(
        f"""
        SELECT p.paper_id, p.title, p.authors, p.abstract, p.year, p.venue,
               p.conference, p.arxiv_id, p.pdf_url, p.rating_avg, p.keywords,
               p.tldr, p.primary_area,
               fts_main_papers.match_bm25(paper_id, ?) AS score
        FROM papers p
        WHERE score IS NOT NULL {extra_where}
        ORDER BY score DESC
        LIMIT ? OFFSET ?
        """,
        [query] + params + [limit, offset],
    ).fetchall()

    columns = [
        "paper_id", "title", "authors", "abstract", "year", "venue",
        "conference", "arxiv_id", "pdf_url", "rating_avg", "keywords",
        "tldr", "primary_area", "score",
    ]

    papers = []
    for row in rows:
        paper = dict(zip(columns, row))
        paper["authors"] = json.loads(paper["authors"]) if paper["authors"] else []
        paper["keywords"] = json.loads(paper["keywords"]) if paper["keywords"] else []
        papers.append(paper)

    return papers


def _search_simple(query, conferences, start_year, end_year, limit, offset):
    """Fallback ILIKE-based search."""
    where_clauses = ["(p.title ILIKE ? OR p.abstract ILIKE ? OR p.tldr ILIKE ?)"]
    pattern = f"%{query}%"
    params = [pattern, pattern, pattern]

    if conferences:
        placeholders = ",".join(["?" for _ in conferences])
        where_clauses.append(f"p.conference IN ({placeholders})")
        params.extend(conferences)
    if start_year is not None:
        where_clauses.append("p.year >= ?")
        params.append(start_year)
    if end_year is not None:
        where_clauses.append("p.year <= ?")
        params.append(end_year)

    where_sql = " AND ".join(where_clauses)

    rows = db.execute(
        f"""
        SELECT p.paper_id, p.title, p.authors, p.abstract, p.year, p.venue,
               p.conference, p.arxiv_id, p.pdf_url, p.rating_avg, p.keywords,
               p.tldr, p.primary_area
        FROM papers p
        WHERE {where_sql}
        ORDER BY
            CASE WHEN p.title ILIKE ? THEN 0 ELSE 1 END,
            p.rating_avg DESC NULLS LAST,
            p.year DESC NULLS LAST
        LIMIT ? OFFSET ?
        """,
        params + [pattern, limit, offset],
    ).fetchall()

    columns = [
        "paper_id", "title", "authors", "abstract", "year", "venue",
        "conference", "arxiv_id", "pdf_url", "rating_avg", "keywords",
        "tldr", "primary_area",
    ]

    papers = []
    for row in rows:
        paper = dict(zip(columns, row))
        paper["authors"] = json.loads(paper["authors"]) if paper["authors"] else []
        paper["keywords"] = json.loads(paper["keywords"]) if paper["keywords"] else []
        papers.append(paper)

    return papers


# =============================================================================
# Main
# =============================================================================

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)