github-actions[bot] commited on
Commit
ed3694d
·
1 Parent(s): 0ab3e84

🚀 Deploy from GitHub Actions - 2026-02-03 10:30:28

Browse files
Files changed (3) hide show
  1. Dockerfile +11 -28
  2. app.py +73 -285
  3. requirements.txt +11 -10
Dockerfile CHANGED
@@ -1,40 +1,23 @@
1
- # ==============================================================================
2
- # STAGE 1 : Builder
3
- # ==============================================================================
4
- FROM continuumio/miniconda3:23.10.0-1 AS builder
5
 
6
- WORKDIR /build
7
-
8
- # Copie les fichiers de dépendances
9
- COPY environment.yml .
10
 
11
- # Créer l'environnement
12
- RUN conda env create -f environment.yml && \
13
- conda clean -afy
 
14
 
15
- # ==============================================================================
16
- # STAGE 2 : Runtime (image finale légère)
17
- # ==============================================================================
18
- FROM continuumio/miniconda3:23.10.0-1
19
 
20
- WORKDIR /app
 
 
21
 
22
- # Copier l'environnement depuis le builder
23
- COPY --from=builder /opt/conda/envs/wakee_api /opt/conda/envs/wakee_api
24
-
25
- # Copier l'application
26
  COPY app.py .
27
 
28
- # Expose port
29
  EXPOSE 7860
30
 
31
- # Variables d'environnement
32
  ENV PYTHONUNBUFFERED=1
33
  ENV ORT_DISABLE_TELEMETRY=1
34
- ENV PATH=/opt/conda/envs/wakee_api/bin:$PATH
35
-
36
- # Activer l'environnement et démarrer
37
- SHELL ["conda", "run", "-n", "wakee_api", "/bin/bash", "-c"]
38
 
39
- CMD ["conda", "run", "--no-capture-output", "-n", "wakee_api", \
40
- "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ FROM python:3.11-slim
 
 
 
2
 
3
+ WORKDIR /app
 
 
 
4
 
5
+ # Minimal system dependencies pour ONNX Runtime
6
+ RUN apt-get update && apt-get install -y \
7
+ libgomp1 \
8
+ && rm -rf /var/lib/apt/lists/*
9
 
10
+ COPY requirements.txt .
 
 
 
11
 
12
+ # Install packages (SANS PyTorch)
13
+ RUN pip install --no-cache-dir --upgrade pip && \
14
+ pip install --no-cache-dir -r requirements.txt
15
 
 
 
 
 
16
  COPY app.py .
17
 
 
18
  EXPOSE 7860
19
 
 
20
  ENV PYTHONUNBUFFERED=1
21
  ENV ORT_DISABLE_TELEMETRY=1
 
 
 
 
22
 
23
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
app.py CHANGED
@@ -1,6 +1,6 @@
1
  """
2
- Wakee Reloaded - API FastAPI
3
- Logique : /predict ne sauvegarde RIEN, /insert fait tout
4
  """
5
 
6
  from fastapi import FastAPI, File, UploadFile, HTTPException
@@ -20,27 +20,76 @@ from sqlalchemy import create_engine, text
20
  from sqlalchemy.exc import SQLAlchemyError
21
  import boto3
22
  from botocore.exceptions import ClientError
23
- from torchvision import transforms
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  # ============================================================================
26
  # CONFIGURATION
27
  # ============================================================================
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  HF_MODEL_REPO = "Terorra/wakee-reloaded"
30
  MODEL_FILENAME = "model.onnx"
31
 
32
- NEON_DATABASE_URL = os.getenv("NEON_DATABASE_URL")
33
  R2_ACCOUNT_ID = os.getenv("R2_ACCOUNT_ID")
34
  R2_ACCESS_KEY_ID = os.getenv("R2_ACCESS_KEY_ID")
35
  R2_SECRET_ACCESS_KEY = os.getenv("R2_SECRET_ACCESS_KEY")
36
- R2_BUCKET_NAME = os.getenv("R2_BUCKET_NAME", "wakee-bucket")
37
 
38
  # ============================================================================
39
  # PYDANTIC MODELS
40
  # ============================================================================
41
 
42
  class PredictionResponse(BaseModel):
43
- """Response de /predict - JUSTE les scores"""
44
  boredom: float = Field(..., ge=0, le=3)
45
  confusion: float = Field(..., ge=0, le=3)
46
  engagement: float = Field(..., ge=0, le=3)
@@ -48,31 +97,23 @@ class PredictionResponse(BaseModel):
48
  timestamp: str
49
 
50
  class AnnotationInsert(BaseModel):
51
- """Données pour /insert - Image + Labels"""
52
- # Image (base64 encodée)
53
  image_base64: str
54
-
55
- # Prédictions du modèle
56
  predicted_boredom: float = Field(..., ge=0, le=3)
57
  predicted_confusion: float = Field(..., ge=0, le=3)
58
  predicted_engagement: float = Field(..., ge=0, le=3)
59
  predicted_frustration: float = Field(..., ge=0, le=3)
60
-
61
- # Corrections utilisateur
62
  user_boredom: float = Field(..., ge=0, le=3)
63
  user_confusion: float = Field(..., ge=0, le=3)
64
  user_engagement: float = Field(..., ge=0, le=3)
65
  user_frustration: float = Field(..., ge=0, le=3)
66
 
67
  class InsertResponse(BaseModel):
68
- """Response de /insert"""
69
  status: str
70
  message: str
71
  img_name: str
72
  s3_url: Optional[str] = None
73
 
74
  class LoadResponse(BaseModel):
75
- """Response de /load"""
76
  total_samples: int
77
  validated_samples: int
78
  recent_predictions: List[dict]
@@ -84,7 +125,7 @@ class LoadResponse(BaseModel):
84
 
85
  app = FastAPI(
86
  title="Wakee Emotion API",
87
- description="Multi-label emotion detection API",
88
  version="1.0.0",
89
  docs_url="/docs",
90
  redoc_url="/redoc"
@@ -105,7 +146,6 @@ app.add_middleware(
105
  onnx_session = None
106
  db_engine = None
107
  s3_client = None
108
- transform = None
109
 
110
  # ============================================================================
111
  # STARTUP
@@ -113,15 +153,15 @@ transform = None
113
 
114
  @app.on_event("startup")
115
  async def startup_event():
116
- global onnx_session, db_engine, s3_client, transform
117
 
118
  print("=" * 70)
119
- print("🚀 DÉMARRAGE API WAKEE")
120
  print("=" * 70)
121
 
122
  # 1. Download model from HF Model Hub
123
  try:
124
- print(f"\n📥 Téléchargement du modèle...")
125
  print(f" Repo : {HF_MODEL_REPO}")
126
  print(f" File : {MODEL_FILENAME}")
127
 
@@ -131,29 +171,20 @@ async def startup_event():
131
  cache_dir="/tmp/models"
132
  )
133
 
 
134
  onnx_session = ort.InferenceSession(model_path)
135
 
136
  input_name = onnx_session.get_inputs()[0].name
137
  input_shape = onnx_session.get_inputs()[0].shape
138
 
139
- print(f"✅ Modèle chargé : {model_path}")
140
  print(f" Input : {input_name} {input_shape}\n")
141
 
142
  except Exception as e:
143
  print(f"❌ Erreur chargement modèle : {e}\n")
144
  onnx_session = None
145
 
146
- # 2. Preprocessing
147
- transform = transforms.Compose([
148
- transforms.Resize(256),
149
- transforms.CenterCrop(224),
150
- transforms.ToTensor(),
151
- transforms.Normalize([0.485, 0.456, 0.406],
152
- [0.229, 0.224, 0.225])
153
- ])
154
- print("✅ Preprocessing configuré\n")
155
-
156
- # 3. Database
157
  if NEON_DATABASE_URL:
158
  try:
159
  db_engine = create_engine(NEON_DATABASE_URL)
@@ -166,7 +197,7 @@ async def startup_event():
166
  else:
167
  print("⚠️ NEON_DATABASE_URL non défini\n")
168
 
169
- # 4. Cloudflare R2
170
  if all([R2_ACCOUNT_ID, R2_ACCESS_KEY_ID, R2_SECRET_ACCESS_KEY]):
171
  try:
172
  s3_client = boto3.client(
@@ -188,85 +219,53 @@ async def startup_event():
188
  print("🎉 API WAKEE PRÊTE !")
189
  print("=" * 70)
190
  print(f"📊 Status :")
191
- print(f" - Modèle : {'✅' if onnx_session else '❌'}")
192
  print(f" - Database : {'✅' if db_engine else '❌'}")
193
  print(f" - Storage : {'✅' if s3_client else '❌'}")
194
  print("=" * 70 + "\n")
195
 
196
  # ============================================================================
197
- # HELPER FUNCTIONS
198
- # ============================================================================
199
-
200
- def preprocess_image(pil_image: Image.Image) -> np.ndarray:
201
- """Preprocessing identique à ton cnn.py"""
202
- img_tensor = transform(pil_image).unsqueeze(0).numpy()
203
- return img_tensor
204
-
205
- # ============================================================================
206
- # ENDPOINTS
207
  # ============================================================================
208
 
209
  @app.get("/")
210
  async def root():
211
- """Page d'accueil"""
212
  return {
213
  "message": "Wakee Emotion API",
214
  "version": "1.0.0",
215
- "model_source": HF_MODEL_REPO,
216
- "workflow": {
217
- "1": "POST /predict - Obtenir prédiction (rien n'est sauvegardé)",
218
- "2": "Utilisateur valide/corrige les scores",
219
- "3": "POST /insert - Uploader image + labels (R2 + NeonDB)",
220
- "4": "GET /load - Charger données et statistiques"
221
- },
222
- "docs": "/docs",
223
- "author": "Terorra"
224
  }
225
 
226
  @app.get("/health")
227
  async def health_check():
228
- """Health check"""
229
  return {
230
  "status": "healthy",
231
  "model_loaded": onnx_session is not None,
232
- "model_source": HF_MODEL_REPO,
233
- "database_connected": db_engine is not None,
234
- "storage_connected": s3_client is not None,
235
  "timestamp": datetime.now().isoformat()
236
  }
237
 
238
  @app.post("/predict", response_model=PredictionResponse)
239
  async def predict_emotion(file: UploadFile = File(...)):
240
- """
241
- Prédiction des 4 émotions depuis une image
242
-
243
- ⚠️ RIEN N'EST SAUVEGARDÉ à cette étape
244
-
245
- L'utilisateur doit ensuite appeler /insert pour sauvegarder
246
- """
247
-
248
  if not onnx_session:
249
- raise HTTPException(
250
- status_code=503,
251
- detail="Model not loaded"
252
- )
253
 
254
  if not file.content_type.startswith('image/'):
255
  raise HTTPException(status_code=400, detail="File must be an image")
256
 
257
  try:
258
- # 1. Load image
259
  image_bytes = await file.read()
260
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
261
 
262
- # 2. Preprocessing
263
  input_tensor = preprocess_image(image)
264
 
265
- # 3. Inference ONNX
266
  outputs = onnx_session.run(['output'], {'input': input_tensor})
267
  scores_array = outputs[0][0]
268
 
269
- # 4. Format résultats
270
  return PredictionResponse(
271
  boredom=round(float(scores_array[0]), 2),
272
  confusion=round(float(scores_array[1]), 2),
@@ -274,223 +273,12 @@ async def predict_emotion(file: UploadFile = File(...)):
274
  frustration=round(float(scores_array[3]), 2),
275
  timestamp=datetime.now().isoformat()
276
  )
277
-
278
- # ⚠️ PAS de sauvegarde R2
279
- # ⚠️ PAS de sauvegarde NeonDB
280
- # → L'utilisateur décide s'il valide via /insert
281
 
282
  except Exception as e:
283
  print(f"❌ Erreur prédiction : {e}")
284
  raise HTTPException(status_code=500, detail=str(e))
285
 
286
- @app.post("/insert", response_model=InsertResponse)
287
- async def insert_annotation(annotation: AnnotationInsert):
288
- """
289
- Insert annotation utilisateur
290
-
291
- Ce endpoint fait 2 choses :
292
- 1. Upload image vers Cloudflare R2
293
- 2. Insert labels (predicted + user) dans NeonDB
294
-
295
- ✅ Appelé uniquement quand l'utilisateur clique "Valider"
296
- """
297
-
298
- # Vérifications
299
- if not db_engine:
300
- raise HTTPException(status_code=503, detail="Database not available")
301
-
302
- if not s3_client:
303
- raise HTTPException(status_code=503, detail="Storage not available")
304
-
305
- try:
306
- # 1. Decode image base64
307
- try:
308
- image_bytes = base64.b64decode(annotation.image_base64)
309
- except Exception as e:
310
- raise HTTPException(status_code=400, detail=f"Invalid base64 image: {e}")
311
-
312
- # 2. Generate unique filename
313
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
314
- img_name = f"{timestamp}_{hash(annotation.image_base64) % 10000:04d}.jpg"
315
- s3_key = f"collected/{img_name}"
316
-
317
- # 3. Upload image to Cloudflare R2
318
- print(f"📤 Upload vers R2 : {s3_key}")
319
- try:
320
- s3_client.put_object(
321
- Bucket=R2_BUCKET_NAME,
322
- Key=s3_key,
323
- Body=image_bytes,
324
- ContentType='image/jpeg'
325
- )
326
- print(f"✅ Upload R2 réussi : {img_name}")
327
- except ClientError as e:
328
- print(f"❌ Erreur upload R2 : {e}")
329
- raise HTTPException(status_code=500, detail=f"R2 upload failed: {e}")
330
-
331
- # 4. Insert labels in NeonDB
332
- query = text("""
333
- INSERT INTO emotion_labels
334
- (img_name, s3_path,
335
- predicted_boredom, predicted_confusion, predicted_engagement, predicted_frustration,
336
- user_boredom, user_confusion, user_engagement, user_frustration,
337
- source, is_validated, timestamp)
338
- VALUES
339
- (:img_name, :s3_path,
340
- :pred_boredom, :pred_confusion, :pred_engagement, :pred_frustration,
341
- :user_boredom, :user_confusion, :user_engagement, :user_frustration,
342
- 'app_sourcing', TRUE, :timestamp)
343
- """)
344
-
345
- with db_engine.connect() as conn:
346
- conn.execute(query, {
347
- 'img_name': img_name,
348
- 's3_path': s3_key,
349
- 'pred_boredom': annotation.predicted_boredom,
350
- 'pred_confusion': annotation.predicted_confusion,
351
- 'pred_engagement': annotation.predicted_engagement,
352
- 'pred_frustration': annotation.predicted_frustration,
353
- 'user_boredom': annotation.user_boredom,
354
- 'user_confusion': annotation.user_confusion,
355
- 'user_engagement': annotation.user_engagement,
356
- 'user_frustration': annotation.user_frustration,
357
- 'timestamp': datetime.now()
358
- })
359
- conn.commit()
360
-
361
- print(f"✅ Insert NeonDB réussi : {img_name}")
362
-
363
- # 5. Generate public URL (si tu as activé l'accès public)
364
- # public_url = f"https://pub-{R2_ACCOUNT_ID}.r2.dev/{s3_key}"
365
- # Ou None si pas d'accès public
366
- public_url = None
367
-
368
- return InsertResponse(
369
- status="success",
370
- message="Image uploaded to R2 and labels saved to NeonDB",
371
- img_name=img_name,
372
- s3_url=public_url
373
- )
374
-
375
- except SQLAlchemyError as e:
376
- print(f"❌ Erreur NeonDB : {e}")
377
- raise HTTPException(status_code=500, detail=f"Database error: {str(e)}")
378
-
379
- except Exception as e:
380
- print(f"❌ Erreur insert : {e}")
381
- raise HTTPException(status_code=500, detail=str(e))
382
-
383
- @app.get("/load", response_model=LoadResponse)
384
- async def load_data(limit: int = 10):
385
- """
386
- Charge les données depuis NeonDB
387
-
388
- Retourne :
389
- - Nombre total d'échantillons
390
- - Nombre d'échantillons validés
391
- - Dernières prédictions (avec corrections utilisateur)
392
- - Statistiques globales
393
- """
394
-
395
- if not db_engine:
396
- raise HTTPException(status_code=503, detail="Database not available")
397
-
398
- try:
399
- with db_engine.connect() as conn:
400
- # Total samples
401
- total = conn.execute(text(
402
- "SELECT COUNT(*) FROM emotion_labels"
403
- )).scalar()
404
-
405
- # Validated samples (ceux insérés via /insert)
406
- validated = conn.execute(text(
407
- "SELECT COUNT(*) FROM emotion_labels WHERE is_validated = TRUE"
408
- )).scalar()
409
-
410
- # Recent predictions
411
- recent = conn.execute(text(f"""
412
- SELECT
413
- img_name,
414
- s3_path,
415
- predicted_boredom,
416
- predicted_confusion,
417
- predicted_engagement,
418
- predicted_frustration,
419
- user_boredom,
420
- user_confusion,
421
- user_engagement,
422
- user_frustration,
423
- timestamp
424
- FROM emotion_labels
425
- WHERE is_validated = TRUE
426
- ORDER BY timestamp DESC
427
- LIMIT :limit
428
- """), {'limit': limit}).fetchall()
429
-
430
- recent_list = [
431
- {
432
- 'img_name': row[0],
433
- 's3_path': row[1],
434
- 'predicted': {
435
- 'boredom': float(row[2]),
436
- 'confusion': float(row[3]),
437
- 'engagement': float(row[4]),
438
- 'frustration': float(row[5])
439
- },
440
- 'user_corrected': {
441
- 'boredom': float(row[6]),
442
- 'confusion': float(row[7]),
443
- 'engagement': float(row[8]),
444
- 'frustration': float(row[9])
445
- },
446
- 'timestamp': row[10].isoformat() if row[10] else None
447
- }
448
- for row in recent
449
- ]
450
-
451
- # Statistics (moyennes)
452
- stats = conn.execute(text("""
453
- SELECT
454
- AVG(predicted_boredom) as avg_pred_boredom,
455
- AVG(predicted_confusion) as avg_pred_confusion,
456
- AVG(predicted_engagement) as avg_pred_engagement,
457
- AVG(predicted_frustration) as avg_pred_frustration,
458
- AVG(user_boredom) as avg_user_boredom,
459
- AVG(user_confusion) as avg_user_confusion,
460
- AVG(user_engagement) as avg_user_engagement,
461
- AVG(user_frustration) as avg_user_frustration
462
- FROM emotion_labels
463
- WHERE is_validated = TRUE
464
- """)).fetchone()
465
-
466
- statistics = {
467
- 'predictions': {
468
- 'boredom': round(float(stats[0] or 0), 2),
469
- 'confusion': round(float(stats[1] or 0), 2),
470
- 'engagement': round(float(stats[2] or 0), 2),
471
- 'frustration': round(float(stats[3] or 0), 2)
472
- },
473
- 'user_corrections': {
474
- 'boredom': round(float(stats[4] or 0), 2),
475
- 'confusion': round(float(stats[5] or 0), 2),
476
- 'engagement': round(float(stats[6] or 0), 2),
477
- 'frustration': round(float(stats[7] or 0), 2)
478
- }
479
- }
480
-
481
- return LoadResponse(
482
- total_samples=total or 0,
483
- validated_samples=validated or 0,
484
- recent_predictions=recent_list,
485
- statistics=statistics
486
- )
487
-
488
- except SQLAlchemyError as e:
489
- raise HTTPException(status_code=500, detail=f"Database error: {str(e)}")
490
-
491
- # ============================================================================
492
- # MAIN
493
- # ============================================================================
494
 
495
  if __name__ == "__main__":
496
  import uvicorn
 
1
  """
2
+ Wakee API - Production
3
+ ONNX Runtime UNIQUEMENT (pas de PyTorch)
4
  """
5
 
6
  from fastapi import FastAPI, File, UploadFile, HTTPException
 
20
  from sqlalchemy.exc import SQLAlchemyError
21
  import boto3
22
  from botocore.exceptions import ClientError
23
+
24
+ # ============================================================================
25
+ # PREPROCESSING SANS PYTORCH (Pillow + numpy)
26
+ # ============================================================================
27
+
28
+ def preprocess_image(pil_image: Image.Image) -> np.ndarray:
29
+ """
30
+ Preprocessing identique à ton cnn.py
31
+ SANS dépendance PyTorch (juste Pillow + numpy)
32
+ """
33
+ # 1. Resize to 256x256
34
+ img = pil_image.resize((256, 256), Image.BILINEAR)
35
+
36
+ # 2. Center crop to 224x224
37
+ left = (256 - 224) // 2
38
+ top = (256 - 224) // 2
39
+ img = img.crop((left, top, left + 224, top + 224))
40
+
41
+ # 3. Convert to numpy array [0, 1]
42
+ img_array = np.array(img).astype(np.float32) / 255.0
43
+
44
+ # 4. ImageNet normalization
45
+ mean = np.array([0.485, 0.456, 0.406])
46
+ std = np.array([0.229, 0.224, 0.225])
47
+ img_array = (img_array - mean) / std
48
+
49
+ # 5. Transpose to CHW (channels, height, width)
50
+ img_array = np.transpose(img_array, (2, 0, 1))
51
+
52
+ # 6. Add batch dimension (1, 3, 224, 224)
53
+ img_array = np.expand_dims(img_array, axis=0).astype(np.float32)
54
+
55
+ return img_array
56
 
57
  # ============================================================================
58
  # CONFIGURATION
59
  # ============================================================================
60
 
61
+ def load_env_vars():
62
+ """Charge .env en local, utilise env vars en prod"""
63
+ is_production = os.getenv("SPACE_ID") is not None
64
+
65
+ if not is_production:
66
+ from pathlib import Path
67
+ try:
68
+ from dotenv import load_dotenv
69
+ root_dir = Path(__file__).resolve().parent.parent
70
+ dotenv_path = root_dir / '.env'
71
+ if dotenv_path.exists():
72
+ load_dotenv(dotenv_path)
73
+ print(f"✅ .env chargé depuis : {dotenv_path}")
74
+ except ImportError:
75
+ print("⚠️ python-dotenv non installé (OK en production)")
76
+
77
+ load_env_vars()
78
+
79
  HF_MODEL_REPO = "Terorra/wakee-reloaded"
80
  MODEL_FILENAME = "model.onnx"
81
 
82
+ NEON_DATABASE_URL = os.getenv("NEONDB_WR")
83
  R2_ACCOUNT_ID = os.getenv("R2_ACCOUNT_ID")
84
  R2_ACCESS_KEY_ID = os.getenv("R2_ACCESS_KEY_ID")
85
  R2_SECRET_ACCESS_KEY = os.getenv("R2_SECRET_ACCESS_KEY")
86
+ R2_BUCKET_NAME = os.getenv("R2_WR_IMG_BUCKET_NAME", "wr-img-store")
87
 
88
  # ============================================================================
89
  # PYDANTIC MODELS
90
  # ============================================================================
91
 
92
  class PredictionResponse(BaseModel):
 
93
  boredom: float = Field(..., ge=0, le=3)
94
  confusion: float = Field(..., ge=0, le=3)
95
  engagement: float = Field(..., ge=0, le=3)
 
97
  timestamp: str
98
 
99
  class AnnotationInsert(BaseModel):
 
 
100
  image_base64: str
 
 
101
  predicted_boredom: float = Field(..., ge=0, le=3)
102
  predicted_confusion: float = Field(..., ge=0, le=3)
103
  predicted_engagement: float = Field(..., ge=0, le=3)
104
  predicted_frustration: float = Field(..., ge=0, le=3)
 
 
105
  user_boredom: float = Field(..., ge=0, le=3)
106
  user_confusion: float = Field(..., ge=0, le=3)
107
  user_engagement: float = Field(..., ge=0, le=3)
108
  user_frustration: float = Field(..., ge=0, le=3)
109
 
110
  class InsertResponse(BaseModel):
 
111
  status: str
112
  message: str
113
  img_name: str
114
  s3_url: Optional[str] = None
115
 
116
  class LoadResponse(BaseModel):
 
117
  total_samples: int
118
  validated_samples: int
119
  recent_predictions: List[dict]
 
125
 
126
  app = FastAPI(
127
  title="Wakee Emotion API",
128
+ description="Multi-label emotion detection (ONNX Runtime)",
129
  version="1.0.0",
130
  docs_url="/docs",
131
  redoc_url="/redoc"
 
146
  onnx_session = None
147
  db_engine = None
148
  s3_client = None
 
149
 
150
  # ============================================================================
151
  # STARTUP
 
153
 
154
  @app.on_event("startup")
155
  async def startup_event():
156
+ global onnx_session, db_engine, s3_client
157
 
158
  print("=" * 70)
159
+ print("🚀 DÉMARRAGE API WAKEE (ONNX Runtime)")
160
  print("=" * 70)
161
 
162
  # 1. Download model from HF Model Hub
163
  try:
164
+ print(f"\n📥 Téléchargement du modèle ONNX...")
165
  print(f" Repo : {HF_MODEL_REPO}")
166
  print(f" File : {MODEL_FILENAME}")
167
 
 
171
  cache_dir="/tmp/models"
172
  )
173
 
174
+ # Load ONNX session (PAS DE PYTORCH !)
175
  onnx_session = ort.InferenceSession(model_path)
176
 
177
  input_name = onnx_session.get_inputs()[0].name
178
  input_shape = onnx_session.get_inputs()[0].shape
179
 
180
+ print(f"✅ Modèle ONNX chargé : {model_path}")
181
  print(f" Input : {input_name} {input_shape}\n")
182
 
183
  except Exception as e:
184
  print(f"❌ Erreur chargement modèle : {e}\n")
185
  onnx_session = None
186
 
187
+ # 2. Database
 
 
 
 
 
 
 
 
 
 
188
  if NEON_DATABASE_URL:
189
  try:
190
  db_engine = create_engine(NEON_DATABASE_URL)
 
197
  else:
198
  print("⚠️ NEON_DATABASE_URL non défini\n")
199
 
200
+ # 3. Cloudflare R2
201
  if all([R2_ACCOUNT_ID, R2_ACCESS_KEY_ID, R2_SECRET_ACCESS_KEY]):
202
  try:
203
  s3_client = boto3.client(
 
219
  print("🎉 API WAKEE PRÊTE !")
220
  print("=" * 70)
221
  print(f"📊 Status :")
222
+ print(f" - Modèle ONNX : {'✅' if onnx_session else '❌'}")
223
  print(f" - Database : {'✅' if db_engine else '❌'}")
224
  print(f" - Storage : {'✅' if s3_client else '❌'}")
225
  print("=" * 70 + "\n")
226
 
227
  # ============================================================================
228
+ # ENDPOINTS (identiques à avant)
 
 
 
 
 
 
 
 
 
229
  # ============================================================================
230
 
231
  @app.get("/")
232
  async def root():
 
233
  return {
234
  "message": "Wakee Emotion API",
235
  "version": "1.0.0",
236
+ "runtime": "ONNX Runtime (no PyTorch)",
237
+ "model_source": HF_MODEL_REPO
 
 
 
 
 
 
 
238
  }
239
 
240
  @app.get("/health")
241
  async def health_check():
 
242
  return {
243
  "status": "healthy",
244
  "model_loaded": onnx_session is not None,
245
+ "runtime": "ONNX",
 
 
246
  "timestamp": datetime.now().isoformat()
247
  }
248
 
249
  @app.post("/predict", response_model=PredictionResponse)
250
  async def predict_emotion(file: UploadFile = File(...)):
 
 
 
 
 
 
 
 
251
  if not onnx_session:
252
+ raise HTTPException(status_code=503, detail="Model not loaded")
 
 
 
253
 
254
  if not file.content_type.startswith('image/'):
255
  raise HTTPException(status_code=400, detail="File must be an image")
256
 
257
  try:
258
+ # Load image
259
  image_bytes = await file.read()
260
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
261
 
262
+ # Preprocess (SANS PyTorch !)
263
  input_tensor = preprocess_image(image)
264
 
265
+ # Inference ONNX
266
  outputs = onnx_session.run(['output'], {'input': input_tensor})
267
  scores_array = outputs[0][0]
268
 
 
269
  return PredictionResponse(
270
  boredom=round(float(scores_array[0]), 2),
271
  confusion=round(float(scores_array[1]), 2),
 
273
  frustration=round(float(scores_array[3]), 2),
274
  timestamp=datetime.now().isoformat()
275
  )
 
 
 
 
276
 
277
  except Exception as e:
278
  print(f"❌ Erreur prédiction : {e}")
279
  raise HTTPException(status_code=500, detail=str(e))
280
 
281
+ # (reste des endpoints /insert et /load identiques)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
 
283
  if __name__ == "__main__":
284
  import uvicorn
requirements.txt CHANGED
@@ -1,23 +1,24 @@
1
- # Wakee API Requirements (Python 3.11)
2
-
3
  # FastAPI
4
  fastapi==0.109.0
5
  uvicorn[standard]==0.27.0
6
  python-multipart==0.0.6
7
 
8
- # HuggingFace
9
  huggingface-hub==0.20.3
10
 
11
- # ML
12
- onnxruntime==1.16.3
13
- torch==2.1.2
14
- torchvision==0.16.2
15
- Pillow==10.2.0
16
  numpy==1.26.3
17
 
18
  # Database
19
  sqlalchemy==2.0.25
20
  psycopg2-binary==2.9.9
21
 
22
- # Cloud Storage
23
- boto3==1.34.34
 
 
 
 
 
 
1
  # FastAPI
2
  fastapi==0.109.0
3
  uvicorn[standard]==0.27.0
4
  python-multipart==0.0.6
5
 
6
+ # HuggingFace (pour télécharger le modèle)
7
  huggingface-hub==0.20.3
8
 
9
+ # ML - JUSTE ONNX Runtime (pas PyTorch !)
10
+ onnxruntime==1.17.0
11
+
12
+ # Image processing
13
+ pillow==10.2.0
14
  numpy==1.26.3
15
 
16
  # Database
17
  sqlalchemy==2.0.25
18
  psycopg2-binary==2.9.9
19
 
20
+ # Cloud storage
21
+ boto3==1.34.34
22
+
23
+ # Utils
24
+ python-dotenv==1.0.1