File size: 9,980 Bytes
713632e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df25ba9
713632e
 
df25ba9
03c33de
df25ba9
713632e
 
 
 
 
 
 
 
3a0873f
 
713632e
 
 
 
 
df25ba9
 
713632e
 
df25ba9
713632e
 
 
 
 
 
 
 
 
 
df25ba9
713632e
 
 
 
 
 
 
 
3a0873f
713632e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a0873f
713632e
 
 
 
 
 
 
 
 
 
 
 
3a0873f
713632e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a0873f
713632e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a0873f
713632e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a0873f
713632e
 
 
 
df25ba9
 
 
713632e
 
 
 
 
 
 
df25ba9
713632e
3a0873f
 
713632e
 
 
 
 
 
 
 
 
 
 
 
 
3a0873f
713632e
 
 
 
 
 
 
3a0873f
 
713632e
 
 
 
 
fdf05cf
 
 
3a0873f
fdf05cf
3a0873f
713632e
 
df25ba9
 
 
 
 
 
 
713632e
 
 
 
 
 
3a0873f
 
713632e
 
 
9dae1dd
c712954
9dae1dd
 
 
 
 
 
 
713632e
 
 
 
 
3a0873f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
"""Main script: REST API initialization and endpoints.

This module defines the FastAPI application for:
- Retrieving available ML models and supported programming languages
- Classifying code comments using multi-label classification

The API is designed to be client-agnostic and supports concurrent requests
through asynchronous endpoint handlers and background thread execution
for CPU-bound ML inference tasks.
"""

import asyncio
from concurrent.futures import ThreadPoolExecutor
from contextlib import asynccontextmanager
from datetime import datetime
from functools import wraps
from http import HTTPStatus
import json
from typing import Any, Callable, Dict

from fastapi import FastAPI, Request, Response
from fastapi.responses import RedirectResponse
from fastapi.middleware.cors import CORSMiddleware

from nygaardcodecommentclassification import config
from nygaardcodecommentclassification.api.controllers import PredictionController
from nygaardcodecommentclassification.api.schemas import PredictionRequest

# ---------------------------------------------------------------------------
# Global Resources
# ---------------------------------------------------------------------------
# Initialize the prediction controller (models loaded from MLflow on startup)
controller = PredictionController()

# Thread pool for CPU-bound ML inference tasks
# This prevents blocking the async event loop during model predictions
_executor = ThreadPoolExecutor(max_workers=4)

# ---------------------------------------------------------------------------


@asynccontextmanager
async def lifespan(app: FastAPI) -> Any:
    """Async context manager for application lifecycle events.

    This handles:
    - Startup: Load all ML models into memory for fast inference
    - Shutdown: Release model resources and clear GPU memory if applicable

    Args:
        app: The FastAPI application instance

    Yields:
        None: Control back to the application after startup is complete
    """
    # Startup: load models into memory
    controller.startup()
    yield
    # Shutdown: release resources
    controller.shutdown()
    _executor.shutdown(wait=True)


# ---------------------------------------------------------------------------
# FastAPI Application Definition
# ---------------------------------------------------------------------------
app = FastAPI(
    title="Nygaard Code Comment Classification API",
    description="""
    Multi-label classification API for code comments.
    """,
    version="1.0",
    lifespan=lifespan,
)

# ---------------------------------------------------------------------------
# CORS Middleware Configuration
# ---------------------------------------------------------------------------
# Enable Cross-Origin Resource Sharing (CORS) for client-agnostic access.
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # Restrict to specific domains in production
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# ---------------------------------------------------------------------------
# Response Decorator
# ---------------------------------------------------------------------------
def construct_response(f: Callable) -> Callable:
    """Decorator to enforce a consistent JSON response structure across all endpoints.

    This decorator wraps endpoint functions to provide:
    - Uniform response format with timestamp, method, URL, status, and data
    - Centralized error handling for ValueError (client errors) and Exception (server errors)
    - Automatic HTTP status code mapping

    Args:
        f: The endpoint function to wrap

    Returns:
        Wrapped function that returns a standardized response dict

    Response Structure:
        {
            "timestamp": "ISO 8601 timestamp",
            "method": "HTTP method (GET, POST, etc.)",
            "url": "Full request URL",
            "status-code": "HTTP status code",
            "message": "Status message or error description",
            "data": "Response payload (if successful)"
        }
    """

    @wraps(f)
    async def wrap(request: Request, *args, **kwargs) -> Dict[str, Any]:
        # Initialize response with request metadata
        response_struct: Dict[str, Any] = {
            "timestamp": datetime.now().isoformat(),
            "method": request.method,
            "url": str(request.url),
        }

        try:
            # Execute the wrapped endpoint function
            results = await f(request, *args, **kwargs)

            # If function returns a dict with status/message/data, use it directly
            if isinstance(results, dict) and "status-code" in results:
                response_struct.update(results)
            else:
                # Fallback for simple returns without explicit status
                response_struct["status-code"] = HTTPStatus.OK
                response_struct["message"] = HTTPStatus.OK.phrase
                response_struct["data"] = results

        except ValueError as e:
            # Client errors: invalid input, unsupported language/model, etc.
            response_struct["status-code"] = HTTPStatus.BAD_REQUEST
            response_struct["message"] = str(e)
        except Exception as e:
            # Server errors: model failures, configuration issues, etc.
            response_struct["status-code"] = HTTPStatus.INTERNAL_SERVER_ERROR
            response_struct["message"] = f"Internal Server Error: {str(e)}"

        return response_struct

    return wrap


# ---------------------------------------------------------------------------
# API Endpoints
# ---------------------------------------------------------------------------


@app.get("/models", tags=["Info"])
@construct_response
async def _get_models(request: Request) -> Dict[str, Any]:
    """Retrieve the list of available ML models grouped by language.

    Returns:
        Dict containing:
        - status-code: HTTP 200 on success
        - message: Status description
        - data: Dict mapping languages to available model types

    Example Response:
        {
            "java": ["catboost"],
            "python": ["catboost"],
            "pharo": ["catboost"]
        }
    """
    data = controller.get_models_info()
    return {"status-code": HTTPStatus.OK, "message": "Available models retrieved", "data": data}


@app.get("/languages", tags=["Info"])
@construct_response
async def _get_languages(request: Request) -> Dict[str, Any]:
    """Retrieve the list of supported programming languages.

    Returns the programming languages for which code comment classification
    is available. Each language has its own trained model.

    Returns:
        Dict containing:
        - status-code: HTTP 200 on success
        - message: Status description
        - data: Dict with "languages" key containing list of supported languages

    Example Response:
        {
            "languages": ["java", "python", "pharo"]
        }
    """
    data = {"languages": config.LANGUAGES}
    return {"status-code": HTTPStatus.OK, "message": "Supported languages retrieved", "data": data}


@app.post("/predict", tags=["Prediction"])
@construct_response
async def _predict(
    request: Request, response: Response, payload: PredictionRequest
) -> Dict[str, Any]:
    """Classify code comments using multi-label classification.

    This endpoint performs ML inference to classify code comments into
    multiple categories.

    Args:
        request: The FastAPI request object
        response: The FastAPI response object
        payload: PredictionRequest containing:
            - texts: List of code comments
            - class_names: List of class names corresponding to each comment
            - language: Programming language ("java", "python", "pharo")
            - model_type: Model to use (default: "catboost")

    Returns:
        Dict containing:
        - status-code: HTTP 200 on success, 400 on invalid input, 500 on error
        - message: Status description
        - data: Dict with model_used, language, and results list

    Example Request:
        POST /predict
        {
            "texts": ["This method calculates fibonacci", "this is a deprecated function"],
            "class_names": ["MathUtils", "Utils"],
            "language": "java",
            "model_type": "catboost"
        }

    Example Response:
        {
            "results": [
                {"text": "This method calculates fibonacci", "class_name": "MathUtils", "labels": ["summary"]},
                {"text": "this is a deprecated function", "class_name": "Utils", "labels": ["deprecation"]}
            ]
        }
    """
    loop = asyncio.get_event_loop()
    results = await loop.run_in_executor(
        _executor,
        controller.predict,
        payload.texts,
        payload.class_names,
        payload.language,
        payload.model_type,
    )

    response.headers["X-model"] = payload.model_type
    response.headers["X-language"] = payload.language

    # Collect all predicted labels
    all_labels = [label for result in results for label in result["labels"]]
    response.headers["X-predicted-labels"] = json.dumps(all_labels)

    return {
        "status-code": HTTPStatus.OK,
        "message": "Prediction successful",
        "data": {
            "model_used": payload.model_type,
            "language": payload.language,
            "results": results,
        },
    }


@app.get("/", tags=["Info"])
async def _root(request: Request) -> RedirectResponse:
    """Root endpoint redirecting to API documentation.
    Returns:
        Redirect response to the auto-generated API docs at /docs
    """
    return RedirectResponse(url="/docs")


# ---------------------------------------------------------------------------
# Entry Point
# ---------------------------------------------------------------------------
if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=7860)