SonicaB commited on
Commit
d35d98d
·
verified ·
1 Parent(s): 34dbda7

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. fusion-app/app_local.py +11 -21
  2. requirements.txt +1 -1
fusion-app/app_local.py CHANGED
@@ -274,41 +274,31 @@ def predict_image_audio_api(image, audio_path, alpha=0.7):
274
  return pred, probs, lat
275
 
276
  # ============= Wrapper Functions with Mode Selection =============
277
- def predict_video_wrapper(video, alpha, use_api, request: gr.Request):
278
  """
279
  Wrapper function that routes to local or API prediction based on use_api flag.
280
  When user logs in via LoginButton on HF Spaces, their token is available via request.
281
  """
282
  global USER_HF_TOKEN
283
  if use_api:
284
- # Get user's token from Gradio request context
285
- # When OAuth is enabled, request.username contains the logged-in username
286
- # The token is available via huggingface_hub.get_token()
287
- try:
288
- from huggingface_hub import get_token
289
- USER_HF_TOKEN = get_token()
290
- print(f"[DEBUG] Token retrieved: {USER_HF_TOKEN[:10] if USER_HF_TOKEN else 'None'}...")
291
- except Exception as e:
292
- print(f"[DEBUG] Failed to get token: {e}")
293
- USER_HF_TOKEN = None
294
  return predict_vid_api(video, alpha)
295
  else:
296
  return predict_vid(video, alpha)
297
 
298
- def predict_image_audio_wrapper(image, audio_path, alpha, use_api, request: gr.Request):
299
  """
300
  Wrapper function that routes to local or API prediction based on use_api flag.
301
  When user logs in via LoginButton on HF Spaces, their token is available via request.
302
  """
303
  global USER_HF_TOKEN
304
  if use_api:
305
- # Get user's token from Gradio request context
306
- try:
307
- from huggingface_hub import get_token
308
- USER_HF_TOKEN = get_token()
309
- print(f"[DEBUG] Token retrieved: {USER_HF_TOKEN[:10] if USER_HF_TOKEN else 'None'}...")
310
- except Exception as e:
311
- print(f"[DEBUG] Failed to get token: {e}")
312
  USER_HF_TOKEN = None
313
  return predict_image_audio_api(image, audio_path, alpha)
314
  else:
@@ -359,7 +349,7 @@ if not _is_testing:
359
  out_v1 = gr.Label(label="Prediction")
360
  out_v2 = gr.JSON(label="Probabilities")
361
  out_v3 = gr.JSON(label="Latency (ms)")
362
- btn_v.click(predict_video_wrapper, inputs=[v, alpha_v, use_api_mode], outputs=[out_v1, out_v2, out_v3])
363
 
364
  with gr.Tab("Image + Audio"):
365
  img = gr.Image(type="pil", height=240)
@@ -373,7 +363,7 @@ if not _is_testing:
373
  out_i1 = gr.Label(label="Prediction")
374
  out_i2 = gr.JSON(label="Probabilities")
375
  out_i3 = gr.JSON(label="Latency (ms)")
376
- btn_ia.click(predict_image_audio_wrapper, inputs=[img, aud, alpha_ia, use_api_mode], outputs=[out_i1, out_i2, out_i3])
377
 
378
  if __name__ == "__main__":
379
  demo.launch()
 
274
  return pred, probs, lat
275
 
276
  # ============= Wrapper Functions with Mode Selection =============
277
+ def predict_video_wrapper(video, alpha, use_api, oauth_token: gr.OAuthToken | None = None):
278
  """
279
  Wrapper function that routes to local or API prediction based on use_api flag.
280
  When user logs in via LoginButton on HF Spaces, their token is available via request.
281
  """
282
  global USER_HF_TOKEN
283
  if use_api:
284
+ if oauth_token is not None and getattr(oauth_token, "token", None):
285
+ USER_HF_TOKEN = oauth_token.token
286
+ else:
287
+ USER_HF_TOKEN = None
 
 
 
 
 
 
288
  return predict_vid_api(video, alpha)
289
  else:
290
  return predict_vid(video, alpha)
291
 
292
+ def predict_image_audio_wrapper(image, audio_path, alpha, use_api, oauth_token: gr.OAuthToken | None = None):
293
  """
294
  Wrapper function that routes to local or API prediction based on use_api flag.
295
  When user logs in via LoginButton on HF Spaces, their token is available via request.
296
  """
297
  global USER_HF_TOKEN
298
  if use_api:
299
+ if oauth_token is not None and getattr(oauth_token, "token", None):
300
+ USER_HF_TOKEN = oauth_token.token
301
+ else:
 
 
 
 
302
  USER_HF_TOKEN = None
303
  return predict_image_audio_api(image, audio_path, alpha)
304
  else:
 
349
  out_v1 = gr.Label(label="Prediction")
350
  out_v2 = gr.JSON(label="Probabilities")
351
  out_v3 = gr.JSON(label="Latency (ms)")
352
+ btn_v.click(predict_video_wrapper, inputs=[v, alpha_v, use_api_mode, login_btn], outputs=[out_v1, out_v2, out_v3])
353
 
354
  with gr.Tab("Image + Audio"):
355
  img = gr.Image(type="pil", height=240)
 
363
  out_i1 = gr.Label(label="Prediction")
364
  out_i2 = gr.JSON(label="Probabilities")
365
  out_i3 = gr.JSON(label="Latency (ms)")
366
+ btn_ia.click(predict_image_audio_wrapper, inputs=[img, aud, alpha_ia, use_api_mode, login_btn], outputs=[out_i1, out_i2, out_i3])
367
 
368
  if __name__ == "__main__":
369
  demo.launch()
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- gradio
2
  transformers
3
  torch
4
  torchaudio
 
1
+ gradio >= 4.29.0
2
  transformers
3
  torch
4
  torchaudio