github-actions[bot] commited on
Commit
3e0359e
·
1 Parent(s): a8f2815

🚀 Deploy from GitHub Actions - 2026-02-09 13:54:08

Browse files
Files changed (2) hide show
  1. app.py +77 -18
  2. requirements.txt +5 -1
app.py CHANGED
@@ -160,31 +160,90 @@ async def startup_event():
160
  print("=" * 70)
161
  print("🚀 DÉMARRAGE API WAKEE (ONNX Runtime)")
162
  print("=" * 70)
163
-
164
- # 1. Download model from HF Model Hub
 
165
  try:
166
- print(f"\n📥 Téléchargement du modèle ONNX...")
167
- print(f" Repo : {HF_MODEL_REPO}")
168
- print(f" File : {MODEL_FILENAME}")
169
-
170
- model_path = hf_hub_download(
171
  repo_id=HF_MODEL_REPO,
172
- filename=MODEL_FILENAME,
173
  cache_dir="/tmp/models"
174
  )
175
-
176
- # Load ONNX session (PAS DE PYTORCH !)
177
- onnx_session = ort.InferenceSession(model_path)
178
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  input_name = onnx_session.get_inputs()[0].name
180
  input_shape = onnx_session.get_inputs()[0].shape
181
-
182
- print(f"✅ Modèle ONNX chargé : {model_path}")
183
  print(f" Input : {input_name} {input_shape}\n")
184
-
185
- except Exception as e:
186
- print(f"❌ Erreur chargement modèle : {e}\n")
187
- onnx_session = None
188
 
189
  # 2. Database
190
  if NEON_DATABASE_URL:
 
160
  print("=" * 70)
161
  print("🚀 DÉMARRAGE API WAKEE (ONNX Runtime)")
162
  print("=" * 70)
163
+
164
+ onnx_session = None
165
+
166
  try:
167
+ print("\n📥 Tentative chargement ONNX depuis HF...")
168
+
169
+ onnx_path = hf_hub_download(
 
 
170
  repo_id=HF_MODEL_REPO,
171
+ filename="model.onnx",
172
  cache_dir="/tmp/models"
173
  )
174
+
175
+ onnx_session = ort.InferenceSession(onnx_path)
176
+ print("✅ ONNX chargé directement")
177
+
178
+ except Exception as e:
179
+ print(f"⚠️ ONNX indisponible: {e}")
180
+ print("🔁 Fallback → PyTorch .bin → conversion ONNX...")
181
+
182
+ try:
183
+ # -------------------------
184
+ # 1. Download .bin
185
+ # -------------------------
186
+ bin_path = hf_hub_download(
187
+ repo_id=HF_MODEL_REPO,
188
+ filename="pytorch_model.bin",
189
+ cache_dir="/tmp/models"
190
+ )
191
+
192
+ # -------------------------
193
+ # 2. Charger PyTorch
194
+ # -------------------------
195
+ import torch
196
+ from torchvision import models
197
+ import torch.nn as nn
198
+
199
+ NUM_CLASSES = 4
200
+ DEVICE = "cpu"
201
+
202
+ model = models.efficientnet_b4(weights=None)
203
+ model.classifier[1] = nn.Linear(
204
+ model.classifier[1].in_features,
205
+ NUM_CLASSES
206
+ )
207
+
208
+ state_dict = torch.load(bin_path, map_location=DEVICE)
209
+ model.load_state_dict(state_dict, strict=True)
210
+ model.eval()
211
+
212
+ print("✅ PyTorch chargé")
213
+
214
+ # -------------------------
215
+ # 3. Export ONNX local
216
+ # -------------------------
217
+ tmp_onnx = "/tmp/models/fallback_model.onnx"
218
+
219
+ dummy = torch.randn(1, 3, 224, 224)
220
+
221
+ torch.onnx.export(
222
+ model,
223
+ dummy,
224
+ tmp_onnx,
225
+ export_params=True,
226
+ opset_version=17,
227
+ do_constant_folding=False,
228
+ input_names=["input"],
229
+ output_names=["output"]
230
+ )
231
+
232
+ print("✅ Conversion ONNX locale OK")
233
+
234
+ # -------------------------
235
+ # 4. ORT session
236
+ # -------------------------
237
+ onnx_session = ort.InferenceSession(tmp_onnx)
238
+
239
+ except Exception as e2:
240
+ print(f"❌ Fallback PyTorch échoué : {e2}")
241
+ onnx_session = None
242
+
243
+ if onnx_session:
244
  input_name = onnx_session.get_inputs()[0].name
245
  input_shape = onnx_session.get_inputs()[0].shape
 
 
246
  print(f" Input : {input_name} {input_shape}\n")
 
 
 
 
247
 
248
  # 2. Database
249
  if NEON_DATABASE_URL:
requirements.txt CHANGED
@@ -6,7 +6,11 @@ python-multipart==0.0.6
6
  # HuggingFace (pour télécharger le modèle)
7
  huggingface-hub==0.20.3
8
 
9
- # ML - JUSTE ONNX Runtime (pas PyTorch !)
 
 
 
 
10
  # onnxruntime==1.17.0
11
  onnxruntime
12
 
 
6
  # HuggingFace (pour télécharger le modèle)
7
  huggingface-hub==0.20.3
8
 
9
+ # ML
10
+ # pytorch
11
+ --extra-index-url https://download.pytorch.org/whl/cpu
12
+ torch
13
+ torchvision
14
  # onnxruntime==1.17.0
15
  onnxruntime
16