Jrine commited on
Commit
4eba38c
·
1 Parent(s): d1e15b6

Production-ready API with logging, validation, and docs

Browse files
Files changed (1) hide show
  1. app.py +274 -46
app.py CHANGED
@@ -1,19 +1,35 @@
1
- from typing import List, Optional
 
 
 
 
 
 
 
2
 
3
  import numpy as np
4
  from fastapi import FastAPI, HTTPException
5
  from fastapi.middleware.cors import CORSMiddleware
6
- from pydantic import BaseModel
7
  from sentence_transformers import SentenceTransformer
8
- from transformers import pipeline
 
 
 
 
 
 
9
 
 
10
  app = FastAPI(
11
  title="Learning Objective Taxonomy API",
12
- description="API for Bloom's, Dave's, and CO-PO analysis",
13
  version="1.0.0",
 
 
14
  )
15
 
16
- # CORS middleware for Next.js
17
  app.add_middleware(
18
  CORSMiddleware,
19
  allow_origins=["*"],
@@ -22,39 +38,174 @@ app.add_middleware(
22
  allow_headers=["*"],
23
  )
24
 
25
- # Load your models at startup
26
- print("Loading models...")
27
- try:
28
- blooms_model = pipeline("text-classification", model="Jrine/blooms")
29
- dave_model = pipeline("text-classification", model="Jrine/dave")
30
- coppo_model = SentenceTransformer("Jrine/co-po")
31
- print("✅ All models loaded successfully!")
32
- except Exception as e:
33
- print(f"❌ Error loading models: {e}")
34
- blooms_model = None
35
- dave_model = None
36
- coppo_model = None
37
 
38
 
39
  # Pydantic models
40
  class TextRequest(BaseModel):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  text: str
 
 
42
 
43
 
44
- class AnalyzeAllRequest(BaseModel):
 
 
45
  text: str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
 
47
 
48
- @app.get("/")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  async def root():
 
50
  return {
51
  "message": "Learning Objective Taxonomy API",
52
  "version": "1.0.0",
 
53
  "endpoints": {
54
  "blooms": "/api/blooms",
55
  "dave": "/api/dave",
56
  "coppo": "/api/coppo",
57
  "analyze": "/api/analyze",
 
58
  },
59
  "status": {
60
  "blooms": blooms_model is not None,
@@ -64,111 +215,188 @@ async def root():
64
  }
65
 
66
 
67
- @app.post("/api/blooms")
 
 
 
 
 
 
68
  async def predict_blooms(request: TextRequest):
 
69
  if blooms_model is None:
70
- raise HTTPException(status_code=503, detail="Blooms model not loaded")
 
71
 
72
  try:
73
- results = blooms_model(request.text)
 
 
 
74
 
75
  return {
76
  "success": True,
77
  "model": "blooms-taxonomy",
78
  "text": request.text,
79
  "prediction": {
80
- "level": results[0]["label"],
81
- "confidence": results[0]["score"],
82
  "all_predictions": results,
83
  },
84
- "timestamp": None,
85
  }
86
  except Exception as e:
 
87
  raise HTTPException(status_code=500, detail=str(e))
88
 
89
 
90
- @app.post("/api/dave")
 
 
 
 
 
 
91
  async def predict_dave(request: TextRequest):
 
92
  if dave_model is None:
93
- raise HTTPException(status_code=503, detail="Dave model not loaded")
 
94
 
95
  try:
96
- results = dave_model(request.text)
 
 
 
97
 
98
  return {
99
  "success": True,
100
  "model": "dave-psychomotor",
101
  "text": request.text,
102
  "prediction": {
103
- "level": results[0]["label"],
104
- "confidence": results[0]["score"],
105
  "all_predictions": results,
106
  },
107
- "timestamp": None,
108
  }
109
  except Exception as e:
 
110
  raise HTTPException(status_code=500, detail=str(e))
111
 
112
 
113
- @app.post("/api/coppo")
 
 
 
 
 
 
114
  async def predict_coppo(request: TextRequest):
 
115
  if coppo_model is None:
 
116
  raise HTTPException(status_code=503, detail="CO-PO model not loaded")
117
 
118
  try:
 
119
  embeddings = coppo_model.encode(request.text)
120
 
 
 
 
 
 
 
 
 
 
121
  return {
122
  "success": True,
123
  "model": "co-po",
124
  "text": request.text,
125
- "embeddings": embeddings.tolist(),
126
- "timestamp": None,
127
  }
128
  except Exception as e:
 
129
  raise HTTPException(status_code=500, detail=str(e))
130
 
131
 
132
- @app.post("/api/analyze")
133
- async def analyze_all(request: AnalyzeAllRequest):
 
 
 
 
 
 
 
134
  if not all([blooms_model, dave_model, coppo_model]):
 
135
  raise HTTPException(status_code=503, detail="Not all models loaded")
136
 
137
  try:
 
 
138
  # Run all models
139
- blooms_results = blooms_model(request.text)
140
- dave_results = dave_model(request.text)
141
  coppo_embeddings = coppo_model.encode(request.text)
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  return {
144
  "success": True,
145
  "text": request.text,
146
  "results": {
147
  "blooms": {
148
- "level": blooms_results[0]["label"],
149
- "confidence": blooms_results[0]["score"],
150
  "all_predictions": blooms_results,
151
  "model": "Jrine/blooms",
152
  },
153
  "dave": {
154
- "level": dave_results[0]["label"],
155
- "confidence": dave_results[0]["score"],
156
  "all_predictions": dave_results,
157
  "model": "Jrine/dave",
158
  },
159
  "coppo": {
160
- "embeddings": coppo_embeddings.tolist(),
161
  "model": "Jrine/co-po",
162
  },
163
  },
164
- "timestamp": None,
165
  }
166
  except Exception as e:
 
167
  raise HTTPException(status_code=500, detail=str(e))
168
 
169
 
170
- @app.get("/health")
 
 
 
 
 
 
171
  async def health():
 
172
  return {
173
  "status": "healthy",
174
  "models_loaded": {
@@ -182,4 +410,4 @@ async def health():
182
  if __name__ == "__main__":
183
  import uvicorn
184
 
185
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ """
2
+ Learning Objective Taxonomy API
3
+ FastAPI backend for Bloom's, Dave's, and CO-PO analysis
4
+ """
5
+
6
+ import logging
7
+ from datetime import datetime
8
+ from typing import Any, Dict, List, Optional
9
 
10
  import numpy as np
11
  from fastapi import FastAPI, HTTPException
12
  from fastapi.middleware.cors import CORSMiddleware
13
+ from pydantic import BaseModel, Field
14
  from sentence_transformers import SentenceTransformer
15
+ from transformers import Pipeline, pipeline
16
+
17
+ # Configure logging
18
+ logging.basicConfig(
19
+ level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
20
+ )
21
+ logger = logging.getLogger(__name__)
22
 
23
+ # FastAPI app
24
  app = FastAPI(
25
  title="Learning Objective Taxonomy API",
26
+ description="API for Bloom's Taxonomy, Dave's Psychomotor, and CO-PO Mapping analysis",
27
  version="1.0.0",
28
+ docs_url="/docs",
29
+ redoc_url="/redoc",
30
  )
31
 
32
+ # CORS middleware
33
  app.add_middleware(
34
  CORSMiddleware,
35
  allow_origins=["*"],
 
38
  allow_headers=["*"],
39
  )
40
 
41
+ # Global model variables
42
+ blooms_model: Optional[Pipeline] = None
43
+ dave_model: Optional[Pipeline] = None
44
+ coppo_model: Optional[SentenceTransformer] = None
 
 
 
 
 
 
 
 
45
 
46
 
47
  # Pydantic models
48
  class TextRequest(BaseModel):
49
+ text: str = Field(
50
+ ...,
51
+ min_length=1,
52
+ max_length=1000,
53
+ description="Learning objective text to analyze",
54
+ )
55
+
56
+
57
+ class PredictionResult(BaseModel):
58
+ level: Optional[str] = Field(None, description="Predicted taxonomy level")
59
+ confidence: Optional[float] = Field(None, description="Confidence score (0-1)")
60
+ all_predictions: List[Dict[str, Any]] = Field(
61
+ default_factory=list, description="All prediction results"
62
+ )
63
+
64
+
65
+ class SingleModelResponse(BaseModel):
66
+ success: bool
67
+ model: str
68
  text: str
69
+ prediction: PredictionResult
70
+ timestamp: str
71
 
72
 
73
+ class EmbeddingResponse(BaseModel):
74
+ success: bool
75
+ model: str
76
  text: str
77
+ embeddings: List[float]
78
+ timestamp: str
79
+
80
+
81
+ class CombinedResults(BaseModel):
82
+ blooms: Dict[str, Any]
83
+ dave: Dict[str, Any]
84
+ coppo: Dict[str, Any]
85
+
86
+
87
+ class CombinedResponse(BaseModel):
88
+ success: bool
89
+ text: str
90
+ results: CombinedResults
91
+ timestamp: str
92
+
93
+
94
+ class HealthResponse(BaseModel):
95
+ status: str
96
+ models_loaded: Dict[str, bool]
97
+
98
+
99
+ # Helper functions
100
+ def normalize_results(results: Any) -> List[Dict[str, Any]]:
101
+ """Normalize various model output formats to list of dicts."""
102
+ if isinstance(results, list):
103
+ return [
104
+ item if isinstance(item, dict) else {"label": str(item)} for item in results
105
+ ]
106
+
107
+ if isinstance(results, dict):
108
+ return [results]
109
+
110
+ if isinstance(results, np.ndarray):
111
+ try:
112
+ return [{"value": item.tolist()} for item in results]
113
+ except Exception:
114
+ return [{"value": results.tolist()}]
115
+
116
+ try:
117
+ candidate = list(results)
118
+ return [
119
+ item if isinstance(item, dict) else {"label": str(item)}
120
+ for item in candidate
121
+ ]
122
+ except (TypeError, AttributeError):
123
+ return [{"value": results}]
124
+
125
+
126
+ def extract_label_and_score(results: List[Dict[str, Any]]) -> Dict[str, Optional[Any]]:
127
+ """Extract label and score from normalized results with fallback."""
128
+ if not results:
129
+ return {"label": None, "score": None}
130
+
131
+ first = results[0]
132
+ label = first.get("label")
133
+ score = first.get("score") or first.get("confidence")
134
+
135
+ try:
136
+ score = float(score) if score is not None else None
137
+ except (ValueError, TypeError):
138
+ score = None
139
 
140
+ return {"label": label, "score": score}
141
 
142
+
143
+ def get_timestamp() -> str:
144
+ """Get current UTC timestamp in ISO format."""
145
+ return datetime.utcnow().isoformat() + "Z"
146
+
147
+
148
+ # Model loading at startup
149
+ @app.on_event("startup")
150
+ async def load_models():
151
+ """Load all models at application startup."""
152
+ global blooms_model, dave_model, coppo_model
153
+
154
+ logger.info("=" * 60)
155
+ logger.info("Starting model loading...")
156
+ logger.info("=" * 60)
157
+
158
+ # Load Bloom's model
159
+ try:
160
+ logger.info("Loading Bloom's Taxonomy model (Jrine/blooms)...")
161
+ blooms_model = pipeline("text-classification", model="Jrine/blooms")
162
+ logger.info("✅ Bloom's model loaded successfully")
163
+ except Exception as e:
164
+ logger.error(f"❌ Failed to load Bloom's model: {e}")
165
+ blooms_model = None
166
+
167
+ # Load Dave's model
168
+ try:
169
+ logger.info("Loading Dave's Psychomotor model (Jrine/dave)...")
170
+ dave_model = pipeline("text-classification", model="Jrine/dave")
171
+ logger.info("✅ Dave's model loaded successfully")
172
+ except Exception as e:
173
+ logger.error(f"❌ Failed to load Dave's model: {e}")
174
+ dave_model = None
175
+
176
+ # Load CO-PO model
177
+ try:
178
+ logger.info("Loading CO-PO mapping model (Jrine/co-po)...")
179
+ coppo_model = SentenceTransformer("Jrine/co-po")
180
+ logger.info("✅ CO-PO model loaded successfully")
181
+ except Exception as e:
182
+ logger.error(f"❌ Failed to load CO-PO model: {e}")
183
+ coppo_model = None
184
+
185
+ logger.info("=" * 60)
186
+ logger.info(
187
+ f"Model loading complete - "
188
+ f"Bloom's: {blooms_model is not None}, "
189
+ f"Dave's: {dave_model is not None}, "
190
+ f"CO-PO: {coppo_model is not None}"
191
+ )
192
+ logger.info("=" * 60)
193
+
194
+
195
+ # API Endpoints
196
+ @app.get("/", tags=["Root"])
197
  async def root():
198
+ """API root endpoint with status information."""
199
  return {
200
  "message": "Learning Objective Taxonomy API",
201
  "version": "1.0.0",
202
+ "documentation": "/docs",
203
  "endpoints": {
204
  "blooms": "/api/blooms",
205
  "dave": "/api/dave",
206
  "coppo": "/api/coppo",
207
  "analyze": "/api/analyze",
208
+ "health": "/health",
209
  },
210
  "status": {
211
  "blooms": blooms_model is not None,
 
215
  }
216
 
217
 
218
+ @app.post(
219
+ "/api/blooms",
220
+ response_model=SingleModelResponse,
221
+ tags=["Classification"],
222
+ summary="Classify Bloom's Taxonomy Level",
223
+ description="Classifies learning objectives into Bloom's 6 cognitive levels: Remember, Understand, Apply, Analyze, Evaluate, Create",
224
+ )
225
  async def predict_blooms(request: TextRequest):
226
+ """Predict Bloom's Taxonomy cognitive level for a learning objective."""
227
  if blooms_model is None:
228
+ logger.error("Bloom's model not available")
229
+ raise HTTPException(status_code=503, detail="Bloom's model not loaded")
230
 
231
  try:
232
+ logger.info(f"Bloom's prediction for: {request.text[:50]}...")
233
+ raw = blooms_model(request.text)
234
+ results = normalize_results(raw)
235
+ first = extract_label_and_score(results)
236
 
237
  return {
238
  "success": True,
239
  "model": "blooms-taxonomy",
240
  "text": request.text,
241
  "prediction": {
242
+ "level": first["label"],
243
+ "confidence": first["score"],
244
  "all_predictions": results,
245
  },
246
+ "timestamp": get_timestamp(),
247
  }
248
  except Exception as e:
249
+ logger.error(f"Bloom's prediction error: {e}")
250
  raise HTTPException(status_code=500, detail=str(e))
251
 
252
 
253
+ @app.post(
254
+ "/api/dave",
255
+ response_model=SingleModelResponse,
256
+ tags=["Classification"],
257
+ summary="Classify Dave's Psychomotor Level",
258
+ description="Classifies learning objectives into Dave's 5 psychomotor levels: Imitation, Manipulation, Precision, Articulation, Naturalization",
259
+ )
260
  async def predict_dave(request: TextRequest):
261
+ """Predict Dave's Psychomotor motor skill level for a learning objective."""
262
  if dave_model is None:
263
+ logger.error("Dave's model not available")
264
+ raise HTTPException(status_code=503, detail="Dave's model not loaded")
265
 
266
  try:
267
+ logger.info(f"Dave's prediction for: {request.text[:50]}...")
268
+ raw = dave_model(request.text)
269
+ results = normalize_results(raw)
270
+ first = extract_label_and_score(results)
271
 
272
  return {
273
  "success": True,
274
  "model": "dave-psychomotor",
275
  "text": request.text,
276
  "prediction": {
277
+ "level": first["label"],
278
+ "confidence": first["score"],
279
  "all_predictions": results,
280
  },
281
+ "timestamp": get_timestamp(),
282
  }
283
  except Exception as e:
284
+ logger.error(f"Dave's prediction error: {e}")
285
  raise HTTPException(status_code=500, detail=str(e))
286
 
287
 
288
+ @app.post(
289
+ "/api/coppo",
290
+ response_model=EmbeddingResponse,
291
+ tags=["Embeddings"],
292
+ summary="Generate CO-PO Embeddings",
293
+ description="Generates semantic embeddings for Course Outcome to Program Outcome mapping",
294
+ )
295
  async def predict_coppo(request: TextRequest):
296
+ """Generate CO-PO semantic embeddings for a course outcome."""
297
  if coppo_model is None:
298
+ logger.error("CO-PO model not available")
299
  raise HTTPException(status_code=503, detail="CO-PO model not loaded")
300
 
301
  try:
302
+ logger.info(f"CO-PO embedding for: {request.text[:50]}...")
303
  embeddings = coppo_model.encode(request.text)
304
 
305
+ if isinstance(embeddings, np.ndarray):
306
+ emb_list = embeddings.tolist()
307
+ else:
308
+ emb_list = (
309
+ list(embeddings)
310
+ if hasattr(embeddings, "__iter__")
311
+ else [float(embeddings)]
312
+ )
313
+
314
  return {
315
  "success": True,
316
  "model": "co-po",
317
  "text": request.text,
318
+ "embeddings": emb_list,
319
+ "timestamp": get_timestamp(),
320
  }
321
  except Exception as e:
322
+ logger.error(f"CO-PO embedding error: {e}")
323
  raise HTTPException(status_code=500, detail=str(e))
324
 
325
 
326
+ @app.post(
327
+ "/api/analyze",
328
+ response_model=CombinedResponse,
329
+ tags=["Combined Analysis"],
330
+ summary="Analyze with All Models",
331
+ description="Runs all three models (Bloom's, Dave's, CO-PO) on the input text and returns combined results",
332
+ )
333
+ async def analyze_all(request: TextRequest):
334
+ """Analyze learning objective with all three models simultaneously."""
335
  if not all([blooms_model, dave_model, coppo_model]):
336
+ logger.error("Not all models available for combined analysis")
337
  raise HTTPException(status_code=503, detail="Not all models loaded")
338
 
339
  try:
340
+ logger.info(f"Combined analysis for: {request.text[:50]}...")
341
+
342
  # Run all models
343
+ raw_blooms = blooms_model(request.text)
344
+ raw_dave = dave_model(request.text)
345
  coppo_embeddings = coppo_model.encode(request.text)
346
 
347
+ # Process results
348
+ blooms_results = normalize_results(raw_blooms)
349
+ dave_results = normalize_results(raw_dave)
350
+ first_blooms = extract_label_and_score(blooms_results)
351
+ first_dave = extract_label_and_score(dave_results)
352
+
353
+ # Convert embeddings
354
+ if isinstance(coppo_embeddings, np.ndarray):
355
+ emb_list = coppo_embeddings.tolist()
356
+ else:
357
+ emb_list = (
358
+ list(coppo_embeddings)
359
+ if hasattr(coppo_embeddings, "__iter__")
360
+ else [float(coppo_embeddings)]
361
+ )
362
+
363
  return {
364
  "success": True,
365
  "text": request.text,
366
  "results": {
367
  "blooms": {
368
+ "level": first_blooms["label"],
369
+ "confidence": first_blooms["score"],
370
  "all_predictions": blooms_results,
371
  "model": "Jrine/blooms",
372
  },
373
  "dave": {
374
+ "level": first_dave["label"],
375
+ "confidence": first_dave["score"],
376
  "all_predictions": dave_results,
377
  "model": "Jrine/dave",
378
  },
379
  "coppo": {
380
+ "embeddings": emb_list,
381
  "model": "Jrine/co-po",
382
  },
383
  },
384
+ "timestamp": get_timestamp(),
385
  }
386
  except Exception as e:
387
+ logger.error(f"Combined analysis error: {e}")
388
  raise HTTPException(status_code=500, detail=str(e))
389
 
390
 
391
+ @app.get(
392
+ "/health",
393
+ response_model=HealthResponse,
394
+ tags=["Health"],
395
+ summary="Health Check",
396
+ description="Returns API health status and model availability",
397
+ )
398
  async def health():
399
+ """Health check endpoint for monitoring."""
400
  return {
401
  "status": "healthy",
402
  "models_loaded": {
 
410
  if __name__ == "__main__":
411
  import uvicorn
412
 
413
+ uvicorn.run(app, host="0.0.0.0", port=7860, log_level="info")