Chris commited on
Commit
da22b37
Β·
1 Parent(s): 748b763

Final 6.1.3

Browse files
Files changed (2) hide show
  1. README.md +3 -0
  2. src/app.py +338 -28
README.md CHANGED
@@ -10,6 +10,9 @@ pinned: false
10
  hf_oauth: true
11
  # optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
12
  hf_oauth_expiration_minutes: 480
 
 
 
13
  short_description: Multi-Agent AI System for GAIA Benchmark Questions
14
  suggested_hardware: cpu-upgrade
15
  models:
 
10
  hf_oauth: true
11
  # optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
12
  hf_oauth_expiration_minutes: 480
13
+ # Required scopes for Qwen model access via Inference API
14
+ hf_oauth_scopes:
15
+ - inference-api
16
  short_description: Multi-Agent AI System for GAIA Benchmark Questions
17
  suggested_hardware: cpu-upgrade
18
  models:
src/app.py CHANGED
@@ -438,21 +438,63 @@ def check_oauth_scopes(oauth_token: str) -> Dict[str, any]:
438
  headers = {"Authorization": f"Bearer {oauth_token}"}
439
 
440
  # Test whoami endpoint (requires read scope)
441
- whoami_response = requests.get("https://huggingface.co/api/whoami", headers=headers, timeout=5)
442
- can_read = whoami_response.status_code == 200
 
 
 
 
 
 
 
 
 
 
 
 
443
 
444
  # Test inference capability by trying a simple model call
 
445
  can_inference = False
446
  try:
447
  # Try a very simple inference call to test scope
448
  inference_url = "https://api-inference.huggingface.co/models/microsoft/DialoGPT-medium"
449
  test_payload = {"inputs": "test", "options": {"wait_for_model": False, "use_cache": True}}
450
- inference_response = requests.post(inference_url, headers=headers, json=test_payload, timeout=10)
 
451
  # 200 = success, 503 = model loading (but scope works), 401/403 = no scope
452
  can_inference = inference_response.status_code in [200, 503]
453
- except:
 
 
 
 
 
 
 
 
 
 
454
  can_inference = False
455
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  # Determine probable scopes based on capabilities
457
  probable_scopes = []
458
  if can_read:
@@ -460,9 +502,11 @@ def check_oauth_scopes(oauth_token: str) -> Dict[str, any]:
460
  if can_inference:
461
  probable_scopes.append("inference")
462
 
 
 
463
  # Get user info if available
464
  user_info = {}
465
- if can_read:
466
  try:
467
  user_data = whoami_response.json()
468
  user_info = {
@@ -470,7 +514,9 @@ def check_oauth_scopes(oauth_token: str) -> Dict[str, any]:
470
  "fullname": user_data.get("fullName", ""),
471
  "avatar": user_data.get("avatarUrl", "")
472
  }
473
- except:
 
 
474
  user_info = {}
475
 
476
  return {
@@ -483,6 +529,7 @@ def check_oauth_scopes(oauth_token: str) -> Dict[str, any]:
483
  }
484
 
485
  except Exception as e:
 
486
  return {
487
  "logged_in": True,
488
  "scopes": ["unknown"],
@@ -495,7 +542,7 @@ def check_oauth_scopes(oauth_token: str) -> Dict[str, any]:
495
  def format_auth_status(profile: gr.OAuthProfile | None) -> str:
496
  """Format authentication status for display in UI"""
497
 
498
- # Check for HF_TOKEN first
499
  hf_token = os.getenv("HF_TOKEN")
500
 
501
  if hf_token:
@@ -521,18 +568,34 @@ def format_auth_status(profile: gr.OAuthProfile | None) -> str:
521
  πŸ’‘ **Status**: Optimal configuration for GAIA benchmark performance with real AI agents.
522
  """
523
 
 
 
 
 
 
524
  if not profile:
525
- return """
 
 
 
 
 
 
 
 
 
526
  ### πŸ” Authentication Status: Not Logged In
527
 
528
  Please log in to access GAIA evaluation with Qwen models and LangGraph workflow.
529
 
 
 
530
  **What you need:**
531
- - πŸ”‘ HuggingFace login with `read` and `inference` permissions
532
  - πŸ€– Access to Qwen 2.5 models via HF Inference API
533
  - 🧠 LangGraph multi-agent system capabilities
534
 
535
- **πŸ”‘ OAuth Configuration**: Login requests both `read` and `inference` scopes for Qwen model access.
536
  **πŸ“ˆ Expected Performance**: 30%+ GAIA score with full LangGraph workflow and Qwen models.
537
  **⚠️ No Fallbacks**: System requires proper authentication - no simplified responses.
538
  """
@@ -540,7 +603,33 @@ Please log in to access GAIA evaluation with Qwen models and LangGraph workflow.
540
  username = profile.username
541
  oauth_token = getattr(profile, 'oauth_token', None) or getattr(profile, 'token', None)
542
 
543
- scope_info = check_oauth_scopes(oauth_token)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544
 
545
  status_parts = [f"### πŸ” Authentication Status: Logged In as {username}"]
546
 
@@ -549,9 +638,19 @@ Please log in to access GAIA evaluation with Qwen models and LangGraph workflow.
549
  if user_info and user_info.get("fullname"):
550
  status_parts.append(f"**Full Name**: {user_info['fullname']}")
551
 
 
 
 
 
 
 
 
 
 
 
552
  # Safely access scopes
553
  scopes = scope_info.get("scopes", [])
554
- status_parts.append(f"**Detected Scopes**: {', '.join(scopes) if scopes else 'None detected'}")
555
  status_parts.append("")
556
  status_parts.append("**System Capabilities:**")
557
 
@@ -585,6 +684,17 @@ Please log in to access GAIA evaluation with Qwen models and LangGraph workflow.
585
  ])
586
 
587
  if not can_inference:
 
 
 
 
 
 
 
 
 
 
 
588
  status_parts.extend([
589
  "",
590
  "πŸ”‘ **Authentication Required**: Your OAuth session lacks inference permissions.",
@@ -592,6 +702,15 @@ Please log in to access GAIA evaluation with Qwen models and LangGraph workflow.
592
  "**Alternative**: Set HF_TOKEN as a Space secret for guaranteed Qwen model access.",
593
  "**Note**: System requires Qwen model access - no simplified fallbacks available."
594
  ])
 
 
 
 
 
 
 
 
 
595
  else:
596
  status_parts.extend([
597
  "",
@@ -1152,6 +1271,7 @@ Please log in to access GAIA evaluation features with full inference access.
1152
  # The scopes will be configured at the interface level
1153
  )
1154
  refresh_auth_button = gr.Button("πŸ”„ Refresh Auth Status", variant="secondary", scale=1)
 
1155
 
1156
  unit4_run_button = gr.Button(
1157
  "πŸ”’ Login Required for GAIA Evaluation",
@@ -1293,24 +1413,88 @@ Please log in to access GAIA evaluation features with full inference access.
1293
  return status, table, auth_status, csv_update, json_update, summary_update
1294
 
1295
  def refresh_auth_status(request: gr.Request):
1296
- """Refresh authentication status display"""
1297
- profile = getattr(request, 'oauth_profile', None)
1298
- return format_auth_status(profile)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1299
 
1300
  def check_login_state(request: gr.Request):
1301
- """Check if user is logged in and update UI accordingly"""
1302
- profile = getattr(request, 'oauth_profile', None)
1303
-
1304
- if profile:
1305
- # User is logged in - return updated auth status
1306
- auth_status = format_auth_status(profile)
1307
- # Enable the run button
1308
- button_update = gr.update(interactive=True, value="πŸš€ Run GAIA Evaluation & Submit All Answers")
1309
- return auth_status, button_update
1310
- else:
1311
- # User not logged in - show login required message
1312
- auth_status = format_auth_status(None)
1313
- # Disable the run button
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1314
  button_update = gr.update(interactive=False, value="πŸ”’ Login Required for GAIA Evaluation")
1315
  return auth_status, button_update
1316
 
@@ -1333,6 +1517,132 @@ Please log in to access GAIA evaluation features with full inference access.
1333
  outputs=[auth_status_display]
1334
  )
1335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1336
  # Event handlers for manual testing
1337
  def process_and_update(question, file_input, show_reasoning):
1338
  """Process question with authentication check"""
 
438
  headers = {"Authorization": f"Bearer {oauth_token}"}
439
 
440
  # Test whoami endpoint (requires read scope)
441
+ logger.info("πŸ” Testing OAuth token with whoami endpoint...")
442
+ try:
443
+ whoami_response = requests.get("https://huggingface.co/api/whoami", headers=headers, timeout=10)
444
+ can_read = whoami_response.status_code == 200
445
+ logger.info(f"βœ… Whoami response: {whoami_response.status_code}")
446
+
447
+ if whoami_response.status_code == 401:
448
+ logger.warning("⚠️ OAuth token unauthorized for whoami endpoint")
449
+ elif whoami_response.status_code != 200:
450
+ logger.warning(f"⚠️ Unexpected whoami response: {whoami_response.status_code}")
451
+
452
+ except Exception as whoami_error:
453
+ logger.error(f"❌ Whoami test failed: {whoami_error}")
454
+ can_read = False
455
 
456
  # Test inference capability by trying a simple model call
457
+ logger.info("πŸ” Testing OAuth token with inference endpoint...")
458
  can_inference = False
459
  try:
460
  # Try a very simple inference call to test scope
461
  inference_url = "https://api-inference.huggingface.co/models/microsoft/DialoGPT-medium"
462
  test_payload = {"inputs": "test", "options": {"wait_for_model": False, "use_cache": True}}
463
+ inference_response = requests.post(inference_url, headers=headers, json=test_payload, timeout=15)
464
+
465
  # 200 = success, 503 = model loading (but scope works), 401/403 = no scope
466
  can_inference = inference_response.status_code in [200, 503]
467
+ logger.info(f"βœ… Inference response: {inference_response.status_code}")
468
+
469
+ if inference_response.status_code == 401:
470
+ logger.warning("⚠️ OAuth token unauthorized for inference endpoint - likely missing 'inference' scope")
471
+ elif inference_response.status_code == 403:
472
+ logger.warning("⚠️ OAuth token forbidden for inference endpoint - insufficient permissions")
473
+ elif inference_response.status_code not in [200, 503]:
474
+ logger.warning(f"⚠️ Unexpected inference response: {inference_response.status_code}")
475
+
476
+ except Exception as inference_error:
477
+ logger.error(f"❌ Inference test failed: {inference_error}")
478
  can_inference = False
479
 
480
+ # Alternative inference test - try Qwen model directly
481
+ if not can_inference:
482
+ logger.info("πŸ” Testing OAuth token with Qwen model directly...")
483
+ try:
484
+ qwen_url = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-7B-Instruct"
485
+ qwen_payload = {"inputs": "Hello", "options": {"wait_for_model": False}}
486
+ qwen_response = requests.post(qwen_url, headers=headers, json=qwen_payload, timeout=15)
487
+
488
+ qwen_inference = qwen_response.status_code in [200, 503]
489
+ if qwen_inference:
490
+ can_inference = True
491
+ logger.info(f"βœ… Qwen model response: {qwen_response.status_code}")
492
+ else:
493
+ logger.warning(f"⚠️ Qwen model response: {qwen_response.status_code}")
494
+
495
+ except Exception as qwen_error:
496
+ logger.error(f"❌ Qwen model test failed: {qwen_error}")
497
+
498
  # Determine probable scopes based on capabilities
499
  probable_scopes = []
500
  if can_read:
 
502
  if can_inference:
503
  probable_scopes.append("inference")
504
 
505
+ logger.info(f"πŸ“Š Final scope assessment: {probable_scopes}")
506
+
507
  # Get user info if available
508
  user_info = {}
509
+ if can_read and whoami_response.status_code == 200:
510
  try:
511
  user_data = whoami_response.json()
512
  user_info = {
 
514
  "fullname": user_data.get("fullName", ""),
515
  "avatar": user_data.get("avatarUrl", "")
516
  }
517
+ logger.info(f"βœ… User info retrieved: {user_info.get('name', 'unknown')}")
518
+ except Exception as user_error:
519
+ logger.warning(f"⚠️ Could not parse user info: {user_error}")
520
  user_info = {}
521
 
522
  return {
 
529
  }
530
 
531
  except Exception as e:
532
+ logger.error(f"❌ OAuth scope check failed: {e}")
533
  return {
534
  "logged_in": True,
535
  "scopes": ["unknown"],
 
542
  def format_auth_status(profile: gr.OAuthProfile | None) -> str:
543
  """Format authentication status for display in UI"""
544
 
545
+ # Check for HF_TOKEN first (best performance)
546
  hf_token = os.getenv("HF_TOKEN")
547
 
548
  if hf_token:
 
568
  πŸ’‘ **Status**: Optimal configuration for GAIA benchmark performance with real AI agents.
569
  """
570
 
571
+ # Check HuggingFace Spaces OAuth configuration
572
+ oauth_scopes = os.getenv("OAUTH_SCOPES")
573
+ oauth_client_id = os.getenv("OAUTH_CLIENT_ID")
574
+ has_inference_scope = oauth_scopes and "inference-api" in oauth_scopes
575
+
576
  if not profile:
577
+ oauth_status = ""
578
+ if oauth_client_id:
579
+ if has_inference_scope:
580
+ oauth_status = "**πŸ”‘ OAuth Configuration**: βœ… Space configured with `inference-api` scope"
581
+ else:
582
+ oauth_status = "**⚠️ OAuth Configuration**: Space OAuth enabled but missing `inference-api` scope"
583
+ else:
584
+ oauth_status = "**❌ OAuth Configuration**: Space not configured for OAuth (missing `hf_oauth: true` in README.md)"
585
+
586
+ return f"""
587
  ### πŸ” Authentication Status: Not Logged In
588
 
589
  Please log in to access GAIA evaluation with Qwen models and LangGraph workflow.
590
 
591
+ {oauth_status}
592
+
593
  **What you need:**
594
+ - πŸ”‘ HuggingFace login with `read` and `inference-api` permissions
595
  - πŸ€– Access to Qwen 2.5 models via HF Inference API
596
  - 🧠 LangGraph multi-agent system capabilities
597
 
598
+ **πŸ”‘ OAuth Scopes**: Login requests `inference-api` scope for Qwen model access.
599
  **πŸ“ˆ Expected Performance**: 30%+ GAIA score with full LangGraph workflow and Qwen models.
600
  **⚠️ No Fallbacks**: System requires proper authentication - no simplified responses.
601
  """
 
603
  username = profile.username
604
  oauth_token = getattr(profile, 'oauth_token', None) or getattr(profile, 'token', None)
605
 
606
+ # Try multiple methods to extract OAuth token
607
+ if not oauth_token:
608
+ for attr in ['access_token', 'id_token', 'bearer_token']:
609
+ token = getattr(profile, attr, None)
610
+ if token:
611
+ oauth_token = token
612
+ logger.info(f"πŸ”‘ Found OAuth token via {attr}")
613
+ break
614
+
615
+ # If still no token, check if profile has any token-like attributes
616
+ if not oauth_token and hasattr(profile, '__dict__'):
617
+ token_attrs = [attr for attr in profile.__dict__.keys() if 'token' in attr.lower()]
618
+ if token_attrs:
619
+ logger.info(f"πŸ” Available token attributes: {token_attrs}")
620
+ # Try the first available token attribute
621
+ oauth_token = getattr(profile, token_attrs[0], None)
622
+ if oauth_token:
623
+ logger.info(f"πŸ”‘ Using token from {token_attrs[0]}")
624
+
625
+ scope_info = check_oauth_scopes(oauth_token) if oauth_token else {
626
+ "logged_in": True,
627
+ "scopes": [],
628
+ "can_inference": False,
629
+ "can_read": False,
630
+ "user_info": {},
631
+ "message": "Logged in but no OAuth token found"
632
+ }
633
 
634
  status_parts = [f"### πŸ” Authentication Status: Logged In as {username}"]
635
 
 
638
  if user_info and user_info.get("fullname"):
639
  status_parts.append(f"**Full Name**: {user_info['fullname']}")
640
 
641
+ # HuggingFace Spaces OAuth Environment Status
642
+ if oauth_client_id:
643
+ if has_inference_scope:
644
+ status_parts.append("**🏠 Space OAuth**: βœ… Configured with `inference-api` scope")
645
+ else:
646
+ status_parts.append("**🏠 Space OAuth**: ⚠️ Missing `inference-api` scope in README.md")
647
+ status_parts.append(f"**Available Scopes**: {oauth_scopes}")
648
+ else:
649
+ status_parts.append("**🏠 Space OAuth**: ❌ Not configured (`hf_oauth: true` missing)")
650
+
651
  # Safely access scopes
652
  scopes = scope_info.get("scopes", [])
653
+ status_parts.append(f"**Detected Token Scopes**: {', '.join(scopes) if scopes else 'None detected'}")
654
  status_parts.append("")
655
  status_parts.append("**System Capabilities:**")
656
 
 
684
  ])
685
 
686
  if not can_inference:
687
+ if not has_inference_scope:
688
+ status_parts.extend([
689
+ "",
690
+ "πŸ”§ **Space Configuration Issue**: Add `inference-api` scope to README.md:",
691
+ "```yaml",
692
+ "hf_oauth_scopes:",
693
+ " - inference-api",
694
+ "```",
695
+ "**After updating**: Space will restart and request proper scopes on next login."
696
+ ])
697
+
698
  status_parts.extend([
699
  "",
700
  "πŸ”‘ **Authentication Required**: Your OAuth session lacks inference permissions.",
 
702
  "**Alternative**: Set HF_TOKEN as a Space secret for guaranteed Qwen model access.",
703
  "**Note**: System requires Qwen model access - no simplified fallbacks available."
704
  ])
705
+
706
+ # Add specific guidance if we couldn't find an OAuth token
707
+ if not oauth_token:
708
+ status_parts.extend([
709
+ "",
710
+ "πŸ” **OAuth Token Issue**: Could not extract OAuth token from your session.",
711
+ "**Troubleshooting**: Click 'πŸ” Debug OAuth' button above to investigate.",
712
+ "**Common Fix**: Logout and login again to refresh your OAuth session."
713
+ ])
714
  else:
715
  status_parts.extend([
716
  "",
 
1271
  # The scopes will be configured at the interface level
1272
  )
1273
  refresh_auth_button = gr.Button("πŸ”„ Refresh Auth Status", variant="secondary", scale=1)
1274
+ debug_auth_button = gr.Button("πŸ” Debug OAuth", variant="secondary", scale=1)
1275
 
1276
  unit4_run_button = gr.Button(
1277
  "πŸ”’ Login Required for GAIA Evaluation",
 
1413
  return status, table, auth_status, csv_update, json_update, summary_update
1414
 
1415
  def refresh_auth_status(request: gr.Request):
1416
+ """Refresh authentication status display with enhanced debugging"""
1417
+ try:
1418
+ # Get OAuth profile from request with debugging
1419
+ profile = getattr(request, 'oauth_profile', None)
1420
+
1421
+ # Try multiple ways to get user info
1422
+ user = getattr(request, 'user', None)
1423
+ session = getattr(request, 'session', None)
1424
+ headers = getattr(request, 'headers', {})
1425
+
1426
+ # Log debug information
1427
+ logger.info(f"πŸ” OAuth Debug - Profile: {profile is not None}")
1428
+ logger.info(f"πŸ” OAuth Debug - User: {user is not None}")
1429
+ logger.info(f"πŸ” OAuth Debug - Session: {session is not None}")
1430
+
1431
+ if profile:
1432
+ logger.info(f"βœ… Profile found: username={getattr(profile, 'username', 'unknown')}")
1433
+
1434
+ # Try different ways to extract OAuth token
1435
+ oauth_token = None
1436
+ for attr in ['oauth_token', 'token', 'access_token', 'id_token']:
1437
+ token = getattr(profile, attr, None)
1438
+ if token:
1439
+ oauth_token = token
1440
+ logger.info(f"πŸ”‘ Found token via {attr}: {len(token) if token else 0} chars")
1441
+ break
1442
+
1443
+ if not oauth_token and hasattr(profile, '__dict__'):
1444
+ logger.info(f"πŸ” Profile attributes: {list(profile.__dict__.keys())}")
1445
+ else:
1446
+ logger.warning("⚠️ No OAuth profile found in request")
1447
+
1448
+ # Check for HF_TOKEN as fallback
1449
+ hf_token = os.getenv("HF_TOKEN")
1450
+ if hf_token:
1451
+ logger.info("βœ… HF_TOKEN environment variable available")
1452
+ else:
1453
+ logger.info("ℹ️ No HF_TOKEN environment variable")
1454
+
1455
+ return format_auth_status(profile)
1456
+
1457
+ except Exception as e:
1458
+ logger.error(f"❌ Error in refresh_auth_status: {e}")
1459
+ return f"### ❌ Authentication Error\n\nError checking auth status: {str(e)}"
1460
 
1461
  def check_login_state(request: gr.Request):
1462
+ """Check if user is logged in and update UI accordingly with enhanced detection"""
1463
+ try:
1464
+ profile = getattr(request, 'oauth_profile', None)
1465
+
1466
+ # Enhanced profile detection
1467
+ if not profile:
1468
+ # Try alternative ways to get profile
1469
+ user = getattr(request, 'user', None)
1470
+ if user and hasattr(user, 'username'):
1471
+ # Create a minimal profile object if we have user info
1472
+ class MockProfile:
1473
+ def __init__(self, username):
1474
+ self.username = username
1475
+ self.oauth_token = None
1476
+ profile = MockProfile(user.username)
1477
+ logger.info(f"πŸ”„ Created mock profile for user: {user.username}")
1478
+
1479
+ if profile:
1480
+ # User is logged in - return updated auth status
1481
+ auth_status = format_auth_status(profile)
1482
+ # Enable the run button
1483
+ button_update = gr.update(interactive=True, value="πŸš€ Run GAIA Evaluation & Submit All Answers")
1484
+ logger.info(f"βœ… User detected as logged in: {getattr(profile, 'username', 'unknown')}")
1485
+ return auth_status, button_update
1486
+ else:
1487
+ # User not logged in - show login required message
1488
+ auth_status = format_auth_status(None)
1489
+ # Disable the run button
1490
+ button_update = gr.update(interactive=False, value="πŸ”’ Login Required for GAIA Evaluation")
1491
+ logger.info("ℹ️ No user login detected")
1492
+ return auth_status, button_update
1493
+
1494
+ except Exception as e:
1495
+ logger.error(f"❌ Error in check_login_state: {e}")
1496
+ # Return safe defaults
1497
+ auth_status = f"### ❌ Error\n\nError checking login state: {str(e)}"
1498
  button_update = gr.update(interactive=False, value="πŸ”’ Login Required for GAIA Evaluation")
1499
  return auth_status, button_update
1500
 
 
1517
  outputs=[auth_status_display]
1518
  )
1519
 
1520
+ # Debug OAuth information
1521
+ def debug_oauth_info(request: gr.Request):
1522
+ """Debug function to show OAuth information"""
1523
+ try:
1524
+ debug_info = []
1525
+ debug_info.append("# πŸ” OAuth Debug Information\n")
1526
+
1527
+ # Check HuggingFace Spaces OAuth Environment Variables
1528
+ debug_info.append("## 🏠 HuggingFace Spaces OAuth Environment")
1529
+ oauth_client_id = os.getenv("OAUTH_CLIENT_ID")
1530
+ oauth_client_secret = os.getenv("OAUTH_CLIENT_SECRET")
1531
+ oauth_scopes = os.getenv("OAUTH_SCOPES")
1532
+ openid_provider_url = os.getenv("OPENID_PROVIDER_URL")
1533
+
1534
+ debug_info.append(f"**OAUTH_CLIENT_ID**: {oauth_client_id is not None}")
1535
+ debug_info.append(f"**OAUTH_CLIENT_SECRET**: {oauth_client_secret is not None}")
1536
+ debug_info.append(f"**OAUTH_SCOPES**: {oauth_scopes}")
1537
+ debug_info.append(f"**OPENID_PROVIDER_URL**: {openid_provider_url}")
1538
+
1539
+ if oauth_scopes:
1540
+ scopes_list = oauth_scopes.split()
1541
+ debug_info.append(f"**Available Scopes**: {', '.join(scopes_list)}")
1542
+ debug_info.append(f"**Has inference-api scope**: {'inference-api' in scopes_list}")
1543
+
1544
+ # Check request attributes
1545
+ debug_info.append("\n## πŸ” Request Analysis")
1546
+ profile = getattr(request, 'oauth_profile', None)
1547
+ user = getattr(request, 'user', None)
1548
+ session = getattr(request, 'session', None)
1549
+
1550
+ debug_info.append(f"**Profile Object**: {profile is not None}")
1551
+ debug_info.append(f"**User Object**: {user is not None}")
1552
+ debug_info.append(f"**Session Object**: {session is not None}")
1553
+
1554
+ # Try to access Gradio's built-in OAuth info
1555
+ try:
1556
+ # Check if Gradio has OAuth info in the request state
1557
+ state = getattr(request, 'state', None)
1558
+ if state:
1559
+ debug_info.append(f"**Request State**: {type(state)}")
1560
+ if hasattr(state, '__dict__'):
1561
+ state_attrs = [attr for attr in state.__dict__.keys() if 'oauth' in attr.lower() or 'user' in attr.lower() or 'auth' in attr.lower()]
1562
+ if state_attrs:
1563
+ debug_info.append(f"**State Auth Attributes**: {', '.join(state_attrs)}")
1564
+ except Exception as state_error:
1565
+ debug_info.append(f"**State Check Error**: {state_error}")
1566
+
1567
+ if profile:
1568
+ debug_info.append(f"**Profile Type**: {type(profile)}")
1569
+ debug_info.append(f"**Profile Username**: {getattr(profile, 'username', 'N/A')}")
1570
+
1571
+ # Check all profile attributes
1572
+ if hasattr(profile, '__dict__'):
1573
+ attrs = list(profile.__dict__.keys())
1574
+ debug_info.append(f"**Profile Attributes**: {', '.join(attrs)}")
1575
+
1576
+ # Check for token attributes
1577
+ for attr in attrs:
1578
+ if 'token' in attr.lower():
1579
+ value = getattr(profile, attr, None)
1580
+ debug_info.append(f"**{attr}**: {len(str(value)) if value else 0} chars")
1581
+
1582
+ # Try to extract OAuth token using different methods
1583
+ oauth_token = None
1584
+ token_source = None
1585
+ for attr in ['oauth_token', 'token', 'access_token', 'id_token', 'bearer_token']:
1586
+ token = getattr(profile, attr, None)
1587
+ if token:
1588
+ oauth_token = token
1589
+ token_source = attr
1590
+ break
1591
+
1592
+ if oauth_token:
1593
+ debug_info.append(f"**Token Found**: Via {token_source}, {len(oauth_token)} chars")
1594
+
1595
+ # Test the token
1596
+ scope_info = check_oauth_scopes(oauth_token)
1597
+ debug_info.append(f"**Token Valid**: {scope_info.get('logged_in', False)}")
1598
+ debug_info.append(f"**Detected Scopes**: {', '.join(scope_info.get('scopes', []))}")
1599
+ debug_info.append(f"**Can Read**: {scope_info.get('can_read', False)}")
1600
+ debug_info.append(f"**Can Inference**: {scope_info.get('can_inference', False)}")
1601
+ else:
1602
+ debug_info.append("**Token Found**: ❌ No OAuth token found in profile")
1603
+
1604
+ if user:
1605
+ debug_info.append(f"**User Type**: {type(user)}")
1606
+ if hasattr(user, '__dict__'):
1607
+ user_attrs = list(user.__dict__.keys())
1608
+ debug_info.append(f"**User Attributes**: {', '.join(user_attrs)}")
1609
+
1610
+ # Check environment variables
1611
+ debug_info.append("\n## πŸ”§ Environment Variables")
1612
+ hf_token = os.getenv("HF_TOKEN")
1613
+ debug_info.append(f"**HF_TOKEN Available**: {hf_token is not None}")
1614
+ if hf_token:
1615
+ debug_info.append(f"**HF_TOKEN Length**: {len(hf_token)} chars")
1616
+
1617
+ # Check request headers for any auth info
1618
+ headers = getattr(request, 'headers', {})
1619
+ auth_headers = [h for h in headers.keys() if 'auth' in h.lower() or 'token' in h.lower()]
1620
+ if auth_headers:
1621
+ debug_info.append(f"**Auth Headers**: {', '.join(auth_headers)}")
1622
+
1623
+ # Gradio-specific OAuth checks
1624
+ debug_info.append("\n## 🎨 Gradio OAuth Integration")
1625
+ try:
1626
+ # Check if we're in an authenticated Gradio context
1627
+ import gradio as gr
1628
+ debug_info.append(f"**Gradio Version**: {gr.__version__}")
1629
+
1630
+ # Try to access OAuth through Gradio's context
1631
+ # This might work differently in newer Gradio versions
1632
+
1633
+ except Exception as gradio_error:
1634
+ debug_info.append(f"**Gradio OAuth Error**: {gradio_error}")
1635
+
1636
+ return "\n".join(debug_info)
1637
+
1638
+ except Exception as e:
1639
+ return f"# ❌ Debug Error\n\nError during OAuth debug: {str(e)}"
1640
+
1641
+ debug_auth_button.click(
1642
+ fn=debug_oauth_info,
1643
+ outputs=[auth_status_display]
1644
+ )
1645
+
1646
  # Event handlers for manual testing
1647
  def process_and_update(question, file_input, show_reasoning):
1648
  """Process question with authentication check"""