Bromeo777 commited on
Commit
a218999
·
verified ·
1 Parent(s): b41efec

Add app\api\v1\maps.py

Browse files
Files changed (1) hide show
  1. app//api//v1//maps.py +105 -0
app//api//v1//maps.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import time
3
+ from enum import Enum
4
+ from typing import List
5
+ from fastapi import APIRouter, Depends, Query, HTTPException, status
6
+ from fastapi.responses import StreamingResponse
7
+ from pydantic import BaseModel, Field
8
+ from sqlalchemy.ext.asyncio import AsyncSession
9
+ from sqlalchemy import select
10
+
11
+ from app.api import deps
12
+ from app.models.user import User
13
+ from app.models.paper import Paper
14
+ from app.services.discovery.maps import discovery_map_service
15
+ from app.utils.converters import export_service
16
+
17
+ logger = logging.getLogger("rm_research.api.maps")
18
+ router = APIRouter()
19
+
20
+ class ExportFormat(str, Enum):
21
+ """Supported citation formats for institutional export."""
22
+ BIBTEX = "bibtex"
23
+ RIS = "ris"
24
+ CSV = "csv"
25
+
26
+ class ExportRequest(BaseModel):
27
+ """Payload for bulk exporting papers from a map view."""
28
+ paper_ids: List[str] = Field(..., min_length=1, max_length=5000)
29
+
30
+ # --- 1. The Visualization Endpoint (WebGL Optimized) ---
31
+
32
+ @router.get("/generate", summary="Generate WebGL-ready graph data for large-scale discovery")
33
+ async def generate_discovery_map(
34
+ seed_id: str = Query(..., description="The OpenAlex ID used as the map anchor"),
35
+ limit: int = Query(1000, ge=1, le=50000, description="Max node count"),
36
+ db: AsyncSession = Depends(deps.get_db),
37
+ current_user: User = Depends(deps.get_current_active_user)
38
+ ):
39
+ """
40
+ Fulfills Requirement 3.3: High-scale WebGL payloads for >10,000 nodes.
41
+
42
+ 💰 Subscription Gating:
43
+ - Free: 1,000 nodes max.
44
+ - Premium: Up to 50,000 nodes.
45
+ """
46
+ effective_limit = limit if current_user.is_premium else min(limit, 1000)
47
+
48
+ try:
49
+ # Build WebGL payload (nodes/edges/metadata)
50
+ # RESOLUTION: Stateless service call (Reviewer 1 #57)
51
+ return await discovery_map_service.build_webgl_graph(db, seed_id, effective_limit)
52
+ except Exception as e:
53
+ logger.exception(f"WebGL map generation failed for seed {seed_id}: {str(e)}")
54
+ raise HTTPException(
55
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
56
+ detail="Strategic Discovery Map engine failed to generate the network graph."
57
+ )
58
+
59
+ # --- 2. The Institutional Export Endpoint ---
60
+
61
+ @router.post("/export/{format}", summary="Institutional metadata export")
62
+ async def export_discovery_map(
63
+ format: ExportFormat,
64
+ request: ExportRequest,
65
+ db: AsyncSession = Depends(deps.get_db),
66
+ current_user: User = Depends(deps.get_current_active_user)
67
+ ):
68
+ """
69
+ Fulfills Phase 6: BibTeX, RIS, and CSV export for institutional use.
70
+
71
+ RESOLUTION: Materialized Content Pattern (Reviewer 1 #71).
72
+ Fetches and resolves all data before streaming to prevent DB connection leaks.
73
+ """
74
+ # 1. Fetch metadata and close DB context immediately
75
+ stmt = select(Paper).where(Paper.openalex_id.in_(request.paper_ids))
76
+ result = await db.execute(stmt)
77
+ papers = result.scalars().all()
78
+
79
+ if not papers:
80
+ raise HTTPException(
81
+ status_code=status.HTTP_404_NOT_FOUND,
82
+ detail="Specified papers were not found in the local repository."
83
+ )
84
+
85
+ # 2. Convert and Materialize (Safe up to 5k items in memory)
86
+ # This ensures the DB session is released back to the pool before the stream starts.
87
+ if format == ExportFormat.BIBTEX:
88
+ content = export_service.to_bibtex(papers)
89
+ media_type = "application/x-bibtex"
90
+ elif format == ExportFormat.RIS:
91
+ content = export_service.to_ris(papers)
92
+ media_type = "application/x-research-info-systems"
93
+ else:
94
+ content = export_service.to_csv(papers)
95
+ media_type = "text/csv; charset=utf-8"
96
+
97
+ # 3. Stream pre-generated content
98
+ filename = f"rm_export_{int(time.time())}.{format.value}"
99
+ headers = {"Content-Disposition": f'attachment; filename="{filename}"'}
100
+
101
+ return StreamingResponse(
102
+ iter([content]), # Pass as iterator to ensure compliance with StreamingResponse
103
+ media_type=media_type,
104
+ headers=headers
105
+ )